text stringlengths 38 1.54M |
|---|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, Boolean, VARCHAR, Integer, String, DateTime, ForeignKey, UniqueConstraint, PrimaryKeyConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.sqlite import TEXT
engine = create_engine('sqlite:///mynah.db', encoding="utf-8")
Base = declarative_base()
DBSession = sessionmaker(bind=engine)
session = DBSession()
class User(Base):
__tablename__ = 'user'
userid = Column(VARCHAR(36), primary_key=True)
email = Column(VARCHAR(256),unique=True)
password = Column(VARCHAR(256))
apitoken= Column(VARCHAR(12))
def is_active(self):
"""True, as all users are active."""
return True
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements."""
return self.userid
def is_authenticated(self):
"""Return True if the user is authenticated."""
return True
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
class Filedetails(Base):
__tablename__='filedetails'
fileid=Column(VARCHAR(36),primary_key=True)
userid=Column(VARCHAR(36),ForeignKey('user.userid'))
filename=Column(VARCHAR(128))
parentid=Column(VARCHAR(36),ForeignKey('user.userid'), nullable = False)
fileextension=Column(VARCHAR(128))
filetype=Column(VARCHAR(5))
fileuploadedon=Column(DateTime)
class Transcriptiondata(Base):
__tablename__='transcriptiondata'
id=Column(VARCHAR(36),primary_key=True)
fileid=Column(VARCHAR(36),ForeignKey('filedetails.fileid'), nullable = False)
response=Column(TEXT)
status=Column(Boolean)
if __name__ == "__main__":
Base.metadata.create_all(bind=engine)
|
#finds all "lucky tickets" in range between two inputted numbers
#"lucky" are tickets which sum of first three digits equals to sum of three last
import sys
if len(sys.argv) != 3 : #if number of arguments does not equal 3
print "error1"
exit() #exit
a1 = int(sys.argv[1])
a2 = int(sys.argv[2])
new_string = "" #variable for expanded number
count = 0 #variable for counting "happy" numbers
if a1 < 0 or a1 > a2 or a2 > 999999 :
print "error2"
exit()
while a1 <= a2 :
string = str(a1)
length = len(string)
if length != 6 :
for element in range(6 - length) :
new_string += '0'
new_string += string
sum1 = 0
sum2 = 0
for i in range(3) :
sum1 += int(new_string[i])
for j in range(3, 6) :
sum2 += int(new_string[j])
if sum1 == sum2 :
count += 1
a1 += 1
new_string = ""
print count
|
#Arrays/Lists
import rhinoscriptsyntax as rs
#because of the coercePt command all of the lists/strings below will be able to creat points
# [ ] brackets define lists
# ( ) perenthesis define touples
arrPt1 = [1,3,9]
arrPt2 = [4,5,6]
arrPt3 = [-1,-2,-3]
arrPt4 = [7,8,9]
rs.AddPoint(arrPt1)
rs.AddPoint (arrPt2)
rs.AddPoint (arrPt3)
rs.AddPoint (arrPt4)
points = []
points.append(arrPt1)
points.append(arrPt2)
points.append(arrPt3)
points.append(arrPt4)
print (points)
print(points[1])
rs.AddPolyline(points)
#print (arrPt1[2])
|
# Generated by Django 3.2.5 on 2021-07-17 16:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes_app', '0003_alter_note_title'),
]
operations = [
migrations.AlterField(
model_name='note',
name='title',
field=models.CharField(default='Untitled Note', max_length=128),
),
]
|
import pdb;
def x_traverse(x_forward, y_forward, result, depth, start_idx, start_jdx, matrix):
if len(result) == len(matrix) * len(matrix):
return result
if x_forward:
for jdx in range(0, len(matrix) - depth):
result.append(matrix[start_idx][start_jdx + jdx])
start_idx += 1
start_jdx += len(matrix) - depth - 1
else:
for jdx in range(0, len(matrix) - depth):
result.append(matrix[start_idx][start_jdx - jdx])
start_idx -= 1
start_jdx -= (len(matrix) - depth - 1)
x_forward = not x_forward
depth += 1
return y_traverse(x_forward, y_forward, result, depth, start_idx, start_jdx, matrix)
def y_traverse(x_forward, y_forward, result, depth, start_idx, start_jdx, matrix):
if len(result) == len(matrix) * len(matrix):
return result
if y_forward:
for idx in range(0, len(matrix) - depth):
result.append(matrix[start_idx + idx][start_jdx])
start_idx += len(matrix) - depth - 1
start_jdx -= 1
else:
for idx in range(0, len(matrix) - depth):
result.append(matrix[start_idx - idx][start_jdx])
start_idx -= (len(matrix) - depth - 1)
start_jdx += 1
y_forward = not y_forward
return x_traverse(x_forward, y_forward, result, depth, start_idx, start_jdx, matrix)
def main():
matrix = ([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
])
result = []
x_forward = True
y_forward = True
depth = 0
print x_traverse(x_forward, y_forward, result, depth, 0, 0, matrix)
return None
if __name__ == "__main__":
main()
|
"""
root
/ \
a | a
/ \ | / \
b c | c b
/ \ | / \
d e | e d return true
root
/ \
a | a
/ \ | / \
b c | c c
/ \ | / \
d e | e d return false
struct Node {
int data;
Node *left;
Node *right;
} Node;
bool isSelfMirrorTree(root) {
return true or false;
}
"""
def isMirror(src, dest):
# Validations
# If both are none, return True
if src is None and dest is None:
return True
if src is None or dest is None:
return False
# 1) Root matches
# 2) Constraint for L & R Subtree matches
return src.data == dest.data and isMirror(src.left, dest.right) and isMirror(src.right, dest.left)
def isSelfMirrorTree(root):
# Empty tree is mirro
if root is None:
return True
return isMirror(root.right, root.left)
# time complexity is O(log n) is O(n)
# * This class will be given a list of words (such as might be tokenized
# * from a paragraph of text), and will provide a method that takes two
# * words and returns the shortest distance (in words) between those two
# * words in the provided text.
# * Example:
# * WordDistanceFinder finder = new WordDistanceFinder(Arrays.asList("the", "quick", "brown", "fox", "quick"));
# * assert(finder.distance("fox", "the") == 3);
# * assert(finder.distance("quick", "fox") == 1);
# *
# * "quick" appears twice in the input. There are two possible distance values for "quick" and "fox":
# * (3 - 1) = 2 and (4 - 3) = 1.
# * Since we have to return the shortest distance between the two words we return 1.
import math
class WordDistanceFinder(object):
def __init__(self, words):
self.words = words
def wd(self, word1, word2):
# locals
retVal = 65535
pos1 = None
pos2 = None
for i in xrange(len(words)):
if self.words[i] == word1:
pos1 = i
if self.words[i] == word2:
pos2 = i
# GEt min
if pos1 is not None and pos2 is not None:
retVal = min(retVal, abs(pos1 - pos2))
return retVal
# tests
words = ["the", "quick", "brown", "fox", ""]
obj = WordDistanceFinder(words)
print obj.wd("the", "quick")
print obj.wd("the", "fox")
"""
# Optimized version
class WordDistanceFinderOptimized(object):
def __init__(self, words):
self.words = words
self.d = dict()
self.storeInDict()
def storeInDict(self):
for i in xrange(len(words)):
try:
self.d[words[i]].append(i)
except KeyError:
self.d[words[i]] = [i]
def wd(self, word1, word2):
retVal = 65535
pos1 = None
pos2 = None
if word1 in self.d:
pos1 = self.d[word1]
if word2 in self.d:
pos2 = self.d[word2]
# GEt min
retVal = min(retVal, min(min(pos1), min(pos2)), max(max(pos1), max(pos2)))
return retVal
"""
# O(N)
words = ["the", "quick", "brown", "fox", "the"]
obj = WordDistanceFinder(words)
print obj.wd("the", "quick")
print obj.wd("the", "fox")
"""
# O(log(pos)) binary search to find min delta between 2 lists of positions.
a: 1,2,3
b: 4,5,6
"""
|
from game import *
from math import sin, cos, tan, pi
W, H = CANVAS_WIDTH, CANVAS_HEIGHT
PINCH_MIN, PINCH_MAX, PINCH_SPEED = -50, 50, 2.5
init_state = {
"pos": (0,0),
"pinch": 0
}
colourmod = lambda x, y, t, st: (
rangebounce(0,255,x),
rangebounce(0,255,y),
rangebounce(0,255,x*y)
) if st["pinch"] >= 0 else (
rangebounce(0,255,x*sin(x)),
rangebounce(0,255,y*cos(y)),
rangebounce(0,255,x*y*tan(x*y))
)
scene = layers([
# follow(point(0, 0, grey), "pos"),
translate(
pinch(
mask(
tile(
compose(
line(2, 0, 2, 5, 1, green),
line(0, 2, 5, 2, 1, green)),
5, 5),
colourmod),
"pinch"),
W/2, H/2),
rectangle(0, 0, W, H, black)
])
def rangebounce(lo, hi, x):
span = hi - lo
cycle = 2 * span
phase = abs(x) % cycle
return ((lo + phase) if phase < hi
else (lo + cycle - phase - 1))
def update(state, time_elapsed, events):
seconds = time_elapsed / 1000
for event in events:
if event[0] == "mousemove": state["pos"] = event[1]
pinch_range = PINCH_MAX - PINCH_MIN
pinch_cycle = 2 * pinch_range
pinch_phase = (seconds * PINCH_SPEED) % pinch_cycle
state["pinch"] = (PINCH_MIN + pinch_phase if pinch_phase < pinch_range
else PINCH_MIN + pinch_cycle - pinch_phase - 1)
run(scene, update, init_state)
|
import os
import numpy as np
from fparam import *
from default_functions_proc import *
class offset_class:
def __init__(self): # {{{
self.vx = None
self.vy = None
self.snr = None
# }}}
def load(self,filename,par_file=None,snr_file=None,init=False): # {{{
if par_file is None:
par_file = os.path.splitext(filename)[0]+'.par'
p = off_param()
p.load(par_file)
if check_if_binary_is_good(filename,par_file,datatype='complex',file_type='off'):
dtype = np.complex64
elif check_if_binary_is_good(filename,par_file,datatype='float',file_type='off'):
dtype = np.float32
else:
dtype = np.complex64
if dtype == np.complex64:
off=np.fromfile(filename,dtype=dtype).reshape(p.nrec,p.npix).byteswap()
self.vx = off.real
self.vy = off.imag
elif dtype == np.float32:
vx_filename = os.path.splitext(filename)[0]+'.vx'
self.vx = np.fromfile(vx_filename,dtype=dtype).reshape(p.nrec,p.npix).byteswap()
vy_filename = os.path.splitext(filename)[0]+'.vy'
self.vy = np.fromfile(vy_filename,dtype=dtype).reshape(p.nrec,p.npix).byteswap()
#TODO add SNR file
# }}}
# def plot(self,minimum=None,maximum=None):
#
# import matplotlib.pyplot as plt
# v = np.sqrt( self.vx**2 + self.vy**2)
#
# if minimum is None:
# minimum = np.min(v)
# if maximum is None:
# minimum = np.max(v)
# v = np.clip(v,minimum,maximum)
# fig = plt.imshow(v)
# plt.show(block=False)
|
#!/usr/bin/python
# python-deps - find the dependencies of a given python script.
import os, sys
from modulefinder import ModuleFinder
# pylint: disable=wildcard-import
from distutils.sysconfig import *
sitedir = get_python_lib()
libdir = get_config_var('LIBDEST')
# A couple helper functions...
def moduledir(pyfile):
'''Given a python file, return the module dir it belongs to, or None.'''
for topdir in sitedir, libdir:
relpath = os.path.relpath(pyfile, topdir)
if '/' not in relpath: continue
modname = relpath.split('/')[0]
if modname not in ('..', 'site-packages'):
return os.path.join(topdir, modname)
# pylint: disable=redefined-outer-name
def pyfiles(moddir):
'''basically, "find $moddir -type f -name "*.py"'''
for curdir, _dirs, files in os.walk(moddir):
for f in files:
if f.endswith(".py"):
yield os.path.join(curdir, f)
# OK. Use modulefinder to find all the modules etc. this script uses!
mods = []
deps = []
for script in sys.argv[1:]:
finder = ModuleFinder()
finder.run_script(script) # parse the script
for name, mod in finder.modules.iteritems():
if not mod.__file__: # this module is builtin, so we can skip it
continue
if mod.__file__ not in deps: # grab the file itself
deps.append(mod.__file__)
moddir = moduledir(mod.__file__) # if it's part of a module...
if moddir and moddir not in mods: #
deps += list(pyfiles(moddir)) # ...get the whole module
mods.append(moddir)
# Include some bits that the python install itself needs
print(get_makefile_filename())
print(get_config_h_filename())
print(os.path.join(libdir,'site.py'))
print(os.path.join(libdir,'sysconfig.py'))
# And print the list of deps.
for d in deps:
print(d)
|
#!/usr/bin/env python
import numpy as np
from collections import deque
N, M = map(int, input().split())
G = [[] for _ in range(N)]
for _ in range(M):
a, b = map(int, input().split())
a -= 1
b -= 1
G[a].append(b)
G[b].append(a)
def dfs(graph, v):
used = {v}
res = set()
q = deque([v])
while len(q):
v = q.popleft()
if res.issubset(set(graph[v])):
res.add(v)
for nxt in graph[v]:
if nxt not in used:
used.add(nxt)
q.append(nxt)
return res
ALL = set(range(N))
A = dfs(G, 0)
print(A)
B = dfs(G, list(ALL-A)[0])
print(B)
|
from matplotlib.pyplot import*
import numpy as np
import matplotlib.pyplot as plt
def f(x):
return x**2 - 85*x + 400
def falsi( a , b , f , kmax ):
i = 0
dif = 10000000
x = 1
while( i < kmax and dif > 0.0000001 ):
ant = x
x = ( (f(b) * a - f(a) * b) / ( f(b) - f(a)))
if( f(a) * f(x) < 0 ):
b = x
else:
a = x
dif = abs( x - ant)
i = i + 1
print('Iteração: ' , i , ' xn: ' , x)
print('result: ' , x)
falsi( -10000 , 50 , f , 5000)
|
from server.models import User, Location
from server import app, db, twilio_client
from datetime import datetime, timedelta
import json
from server.constants import *
from server.sms.msg_templates import *
#TODO: Option for neighboring counties
def filter_users_to_alert(users):
final = []
for user in users:
# print((datetime.now() - user.last_sms_timestamp).seconds)
freq = user.settings["freqValue"]
# print(user.last_sms_timestamp + timedelta(days=int(freq)))
# print(datetime.now())
if user.last_sms_timestamp + timedelta(days=int(freq)) <= datetime.utcnow():
final.append(user)
else:
print(f"User {user.phone_number}: Not time yet")
return final
def filter_locations(user):
final = []
for loc in user.locations:
if user.settings["reportChangesValue"]:
if loc.last_change_time <= user.last_sms_timestamp:
print(f"Location {loc.id}: not changed")
continue
final.append(loc)
return final
def calculate_stat_diffs(user, loc):
prev_stats = user.prev_stats[str(loc.id)]
prev_time = user.last_sms_timestamp
# if prev_stats is None:
# # grab most recent previous stat of location
# if loc.prev_stats:
# prev_stats, prev_time = loc.prev_stats[0], loc.prev_stats[0]["timestamp"]
# prev_time = datetime.fromtimestamp(prev_time)
# else:
# prev_time, prev_stats = datetime.now(), loc.stats
new_confirmed = int(loc.stats["Confirmed"]) - int(prev_stats["Confirmed"]) #TODO: don't assume always increase!
new_deaths = int(loc.stats["Deaths"]) - int(prev_stats["Deaths"])
print(f"Calculating stats diff:\nNOW: {datetime.now()}\nPREV: {prev_time}")
time_diff = datetime.now() - prev_time
print(f"Time Diff: {time_diff}")
time_since_str = f"{round(time_diff.seconds / 3600)} hours" if time_diff.days < 1 else f"{time_diff.days} days"
# TODO: fix issue where server calls to datetime.now() is in UTC but mine aren't
if round(time_diff.total_seconds() / 3600) == 0:
time_since_str = None
return new_confirmed, new_deaths, time_since_str
def build_alert_msg(user, locs=None, update_stats=True):
locs = user.locations if locs is None else locs
print(f"LOCs TO SEND: {locs}")
msg = ""
for loc in locs:
place = [p for p in user.places if p["location_id"] == loc.id][0] # TODO: Ignores places with same location
county = loc.name
msg += ALERT_MSG_LOCATION % (place["data"]["description"], loc.name)
if user.prev_stats and str(loc.id) in user.prev_stats:
new_confirmed, new_deaths, time_since = calculate_stat_diffs(user, loc)
if time_since is not None:
msg += ALERT_MSG_NEW % (new_confirmed, new_deaths, time_since)
total_confirmed, total_deaths = loc.stats["Confirmed"], loc.stats["Deaths"]
msg += ALERT_MSG_TOTAL % (total_confirmed, total_deaths)
last_updated = loc.last_update_time.strftime(TIME_DISPLAY_STR)
source = "JHU CSSE" # TODO: make this general
msg += ALERT_SOURCE % (last_updated, source)
if update_stats:
stats = {loc.id:loc.stats for loc in locs}
user.update_stats(stats)
print(msg)
return msg
def build_starter_msg(user):
msg = STARTER + build_alert_msg(user, user.locations)
return msg
def send_msg(user, msg, update_stats=True):
message = twilio_client.messages \
.create(
body=msg,
from_=app.config["TWILIO_NUMBER"],
to=user.phone_number
)
print(f"Sent Message. User: {user.phone_number}, ID: {message.sid}")
def run_alerts():
all_users = User.query.all()
all_locations = Location.query.all()
users_to_alert = filter_users_to_alert(all_users)
print(f"Users to Alert: {[u.phone_number for u in users_to_alert]}")
for user in users_to_alert:
print(f"Checking number: {user.phone_number}")
try:
locs = filter_locations(user)
if len(locs) == 0:
print(f"User {user.phone_number}: No locations updated!")
continue
msg = build_alert_msg(user, locs)
send_msg(user, msg)
except Exception as e:
print(f"Could not send message to {user.phone_number}!")
print(f"Error: {e}") |
"""Initialization module."""
from .celery import CELERY_APP as celery_app
__all__ = ['celery_app']
|
_base_ = './mask-rcnn_s50_fpn_syncbn-backbone+head_ms-1x_coco.py'
model = dict(
backbone=dict(
stem_channels=128,
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='open-mmlab://resnest101')))
|
import subprocess
from eth_utils import (
to_text,
)
def get_version_from_git() -> str:
version = subprocess.check_output(["git", "describe"]).strip()
return to_text(version)
|
"""
Operations on containers
Interacts directly with Docker and Docker-Compose. In the future it will be possible to integrate also Docker Swarm.
:author: Blackandred <riotkit .. riseup.net>
"""
import os
import re
import subprocess
from time import time
from contextlib import contextmanager
from typing import Optional
from typing import Dict
from typing import List
from collections import OrderedDict
from json import loads as json_loads
from rkd.api.contract import ExecutionContext
from .interface import HarborTaskInterface
from .service import ServiceDeclaration
from .exception import ServiceNotReadyException
from .exception import ServiceNotCreatedException
class InspectedContainer(object):
"""Running or stopped container model retrieved from docker inspection"""
name: str
inspection: dict
def __init__(self, name: str, inspection: dict):
self.name = name
self.inspection = inspection
def get_id(self) -> str:
return self.inspection['Id']
def get_health_status(self) -> str:
try:
return self.inspection['State']['Health']['Status']
except KeyError:
return 'healthy' if self.inspection['State']['Status'] == 'running' else 'unhealthy'
def has_health_check(self):
try:
self.inspection['State']['Health']['Status']
except KeyError:
return False
return True
def get_health_check_command(self) -> Optional[str]:
try:
return ' '.join(self.inspection['Config']['Healthcheck']['Test'][1:])
except KeyError:
return None
def get_name(self) -> str:
return self.name
def get_image(self) -> Optional[str]:
try:
return self.inspection['Config']['Image']
except KeyError:
return None
def get_start_time(self) -> str:
return self.inspection['State']['StartedAt'][0:19]
def to_dict(self) -> dict:
return self.inspection
def build_compose_files_list(src_root: str, is_dev: bool) -> list:
"""Lists all YAML files to include in docker-compose arguments
"""
yamls = {
'conf': ['docker-compose.yml'],
'conf.dev': []
}
for env_type in yamls.keys():
for root, subdirs, files in os.walk(src_root + '/' + env_type):
for file in files:
if not file.endswith('.yml') and not file.endswith('.yaml'):
continue
yamls[env_type].append(root + '/' + file)
if is_dev:
return yamls['conf'] + yamls['conf.dev']
return yamls['conf']
class ComposeDriver(object):
"""Performs container operations using docker-compose
"""
scope: HarborTaskInterface
ctx: ExecutionContext
project_name: str
# lazy
_compose_args: str = None
def __init__(self, scope: HarborTaskInterface, ctx: ExecutionContext, project_name: str):
self.scope = scope
self.project_name = project_name
self.ctx = ctx
def get_last_container_name_for_service(self, service_name: str) -> str:
"""Gets full container name of last deployed service (last instance name)
"""
service_name = self.project_name + '_' + service_name + '_'
try:
ps = self.scope.sh('docker ps --format=\'{{ .Names }}\' | grep "%s"' % service_name, capture=True).split("\n")
except subprocess.CalledProcessError:
ps = ''
instance_numbers = []
for instance in ps:
matches = re.findall(service_name + '([0-9]+)', instance)
if matches:
instance_numbers.append(int(matches[0]))
if not instance_numbers:
raise ServiceNotCreatedException(service_name)
return service_name + str(max(instance_numbers))
def inspect_container(self, container_name: str):
"""Inspects a running/stopped container"""
out = self.scope.sh('docker inspect %s' % container_name, capture=True)
as_json = json_loads(out)
if not as_json:
raise Exception('Cannot inspect container, unknown docker inspect output: %s' % out)
return InspectedContainer(container_name, as_json[0])
def inspect_containers(self, names: list):
"""Inspect multiple containers by name at once (does same as inspect_container()
but has better performance for multiple containers at once)
"""
out = self.scope.sh('docker inspect %s' % ' '.join(names), capture=True)
as_json = json_loads(out)
if not as_json:
raise Exception('Cannot inspect container, unknown docker inspect output: %s' % out)
containers = []
num = 0
for sub_json in as_json:
containers.append(InspectedContainer(names[num], sub_json))
num += 1
return containers
@contextmanager
def service_discovery_stopped(self):
"""Stops a service discovery for a moment"""
try:
self.scope.io().info('Suspending service discovery')
self.compose(['stop', 'gateway_proxy_gen'])
yield
finally:
self.scope.io().info('Starting service discovery')
self.compose(['up', '-d', '--no-recreate', 'gateway_proxy_gen'])
#
# Methods to spawn processes in shell
#
def compose(self, arguments: list, capture: bool = False) -> Optional[str]:
"""Makes a call to docker-compose with all prepared arguments that should be"""
cmd = 'IS_DEBUG_ENVIRONMENT=%s docker-compose %s %s' % (
self.scope.is_dev_env,
self.get_compose_args(),
' '.join(arguments)
)
self.scope.io().debug('Calling compose: %s' % cmd)
return self.scope.sh(cmd, capture=capture)
def exec_in_container(self, service_name: str, command: str, instance_num: int = None, capture: bool = True) -> str:
"""Executes a command in given container"""
if instance_num is None:
instance_num = int(self.get_last_container_name_for_service(service_name).split('_')[-1])
return self.compose([
'exec', '-T',
'--index=%i' % instance_num if instance_num else '',
service_name,
'sh', '-c', '"', command.replace('"', '\\"'), '"'
], capture=capture)
def exec_in_container_passthrough(self, command: str, service: ServiceDeclaration, instance_num: int = None,
shell: str = '/bin/sh', tty: bool = True, interactive: bool = True):
container_name = self.find_container_name(service, instance_num)
opts = []
if tty:
opts.append('-t')
if interactive:
opts.append('-i')
return subprocess.call(['docker', 'exec'] + opts + [container_name, shell, '-c', command])
#
# Basics - compose arguments present in all commands
#
def get_compose_args(self):
"""Gets arguments to use with docker-compose"""
if not self._compose_args:
self._compose_args = self.create_compose_arguments(self.ctx.get_env('APPS_PATH'),
is_dev=self.scope.is_dev_env)
self.scope.io().debug('Compose args: %s' % self._compose_args)
return self._compose_args
def create_compose_arguments(self, src_root: str, is_dev: bool) -> str:
"""Internal method: Builds list of docker-compose arguments
"""
yamls = build_compose_files_list(src_root, is_dev)
args = ' --project-directory=%s -p %s ' % (os.getcwd(), self.project_name)
for yaml_path in yamls:
args += ' -f %s ' % yaml_path
return args
#
# Domain specific methods
#
def up(self, service: ServiceDeclaration, norecreate: bool = False, force_recreate: bool = False,
extra_args: str = '', capture: bool = False):
"""Bring up the service"""
if norecreate and force_recreate:
raise Exception('Logic exception, cannot set --no-recreate and --force-recreate at one time')
self.compose([
'up', '-d',
'--no-recreate' if norecreate else '',
'--force-recreate' if force_recreate else '',
'--scale %s=%i' % (service.get_name(), service.get_desired_replicas_count()),
service.get_name(),
extra_args
], capture=capture)
def find_container_name(self, service: ServiceDeclaration, instance_num: int = None) -> str:
if not instance_num:
return self.get_last_container_name_for_service(service.get_name())
containers = self.get_created_containers(only_running=False)
if not service.get_name() in containers:
raise ServiceNotCreatedException(service.get_name(), instance_num=instance_num)
matches = dict(containers[service.get_name()]).keys()
if instance_num not in matches:
raise ServiceNotCreatedException(service.get_name(), instance_num=instance_num)
return self.create_container_name(service, instance_num)
def create_container_name(self, service: ServiceDeclaration, instance_num: int) -> str:
return self.project_name + '_' + service.get_name() + '_' + str(instance_num)
def get_logs(self, service: ServiceDeclaration, instance_num: int = None, raw: bool = False,
follow: bool = False) -> str:
"""Gets logs from given container
Args:
service: Service declaration
instance_num: Replica number
raw: Do not return result, pass it directly to stdout and to stderr
follow: Follow the output
Returns:
Logs in text format
"""
args = ''
if follow:
args += ' --follow '
command = 'docker logs %s "%s" 2>&1' % (args, self.find_container_name(service, instance_num))
if raw:
subprocess.call(command, shell=True)
return ''
return self.scope.sh(command, capture=True)
def wait_for_log_message(self, text: str, service: ServiceDeclaration, instance_num: int = None,
timeout: int = 300) -> bool:
"""Waits for a text to appear in docker log
Args:
text: Text to wait for
service: Service declaration
instance_num: Replica number
timeout: Timeout in seconds
Raises:
ServiceNotReadyException
"""
timeout_at = time() + timeout
while time() < timeout_at:
logs = self.get_logs(service, instance_num)
if text in logs:
return True
raise ServiceNotReadyException(service.get_name(), text, instance_num)
def rm(self, service: ServiceDeclaration, extra_args: str = '', capture: bool = False):
self.compose(['rm', '--stop', '--force', service.get_name(), extra_args], capture=capture)
def kill_older_replica_than(self, service: ServiceDeclaration, project_name: str,
existing_containers: Dict[int, bool],
already_killed: int = 0):
"""Kill first old replica on the list"""
instance_index = (already_killed * -1) - 1
previous_instance_num = list(existing_containers.items())[instance_index][0]
service_full_name = project_name + '_' + service.get_name() + '_' + str(previous_instance_num)
self.scope.io().debug('Instances: ' + str(list(existing_containers.items())))
self.scope.io().debug('Previous instance selector: %i' % instance_index)
self.scope.io().info('Replica "%s" was spawned, killing older instance' % service_full_name)
self.scope.io().info('Killing replica num=%i' % previous_instance_num)
self.scope.sh('docker rm -f "%s"' % service_full_name)
def scale_one_up(self, service: ServiceDeclaration) -> Dict[int, bool]:
"""Scale up and return last instance name (docker container name)"""
desired_replicas = service.get_desired_replicas_count()
self.scope.io().info('Scaling up to %i' % (desired_replicas + 1))
try:
self.compose(
['up', '-d', '--no-deps',
'--scale %s=%i' % (service.get_name(), desired_replicas + 1), service.get_name(), '2>&1'],
capture=True
)
except subprocess.CalledProcessError as e:
self.scope.io().err(e.output.decode('utf-8'))
raise e
self.scope.io().info('Finding last instance name...')
instances: Dict[int, bool] = self.get_created_containers(only_running=False)[service.get_name()]
self.scope.io().info('OK, it is "%s"' % max(instances.keys()))
return instances
def scale_to_desired_state(self, service: ServiceDeclaration):
"""Scale to declared state - eg. in case of a failure"""
self.compose(
['up', '-d', '--no-deps',
'--scale %s=%i' % (service.get_name(), service.get_desired_replicas_count()),
service.get_name(),
'2>&1']
)
def rm_image(self, img_to_remove: str, capture: bool = False):
self.scope.sh('docker rmi %s 2>&1' % img_to_remove, capture=capture)
def restart(self, service_name: str, extra_args: str = ''):
self.compose(['restart', service_name, extra_args])
def stop(self, service_name: str, extra_args: str = '', capture: bool = False):
self.compose(['stop', service_name, extra_args], capture=capture)
def ps(self, params: list):
self.compose(['ps'] + params)
def pull(self, services_names: list):
self.compose(['pull'] + services_names)
def get_created_containers(self, only_running: bool) -> Dict[str, Dict[int, bool]]:
"""Gets all running services"""
# @todo: Cover with a test
instances = self.scope.sh('docker ps -a --format="{{ .Names }}|{{ .Status }}"', capture=True).strip().split("\n")
counted = {}
for instance in instances:
try:
name, status = instance.split('|')
except ValueError:
continue
if not name.startswith(self.project_name + '_'):
continue
service_name = name[len(self.project_name + '_'):-2]
is_up = status.upper().startswith('UP')
service_num = name.split('_')[-1]
if service_name not in counted:
counted[service_name] = OrderedDict()
if only_running and not is_up:
continue
counted[service_name][int(service_num)] = is_up
counted_and_sorted = {}
for service in counted:
counted_and_sorted[service] = OrderedDict(sorted(counted[service].items()))
return counted_and_sorted
def find_all_container_names_for_service(self, service: ServiceDeclaration) -> List[str]:
"""Finds all created container names for given service name
Args:
service: Service declaration object
"""
created = self.get_created_containers(only_running=False)
if not service.get_name() in created:
raise ServiceNotCreatedException(service.get_name())
return list(
map(
lambda instance_num: self.create_container_name(service, instance_num),
created[service.get_name()].keys()
)
)
|
# -*- coding: utf-8 -*-
"""
The Django URLs for the server.
"""
from django.conf.urls.defaults import patterns
from slumber.server.http import view_handler
from slumber.server.meta import applications
_urls = {'^$': 'slumber.server.views.get_applications'}
for app in applications():
_urls['^(%s)/$' % app.path] = 'slumber.server.views.get_models'
for model in app.models.values():
_urls['^(%s)/(%s)/$' % (app.path, model.name)] = \
'slumber.server.views.get_model'
for op in model.operations():
_urls['^(%s)/(%s)/%s/%s$' %
(app.path, model.name, op.name, op.regex)] = \
view_handler(op.operation)
# The name urlpatterns is defined by Django and we can't change it
# pylint: disable=C0103
urlpatterns = patterns('', *_urls.items())
|
#!/usr/bin/python3
def flip(stack, count):
hsh ={"+": "-", "-": "+"}
for i in range(count):
stack[i] = hsh[stack[i]]
def pack(stack):
new_stack = [stack[0]]
old = stack[0]
for i in stack[1:]:
if old != i:
new_stack.append(i)
old = i
return new_stack
def pancakes(stack):
stack = pack(list(stack))
i = 0
l = len(stack)
while not all("+" == x for x in stack):
flip(stack, l - stack[::-1].index('-'))
i += 1
return i
def codejammer():
Rounds = int(input())
for r in range(1, Rounds + 1):
print("Case #{}: {}".format(r, pancakes(input())))
if __name__ == '__main__':
codejammer()
|
# -*- coding: utf-8 -*-
# 关键参数
DATA_DIR = '../data/'
TRAIN_DATA_DIR = '../traindata/'
PCON = 'BB' # 合约简称
PERIOD = '1MINUTE' # 合约数据时间格式
# 回测相关参数
CAPITAL = 1000000 # 初始资金总数
RISK = 0.01 # 承担风险指数
CLEVEL = 0 # 机器学习预测置信比
OPCYCLE = 4 # 操作周期 = OPCYCLE * 数据最小时间单位
# 策略相关参数
MAXUNIT = 4 # 最大头寸数限制
TT = { # 海龟相关数据(单位时间倍率)
'IN': 100, # 入市指标
'OUT': 80, # 退市指标
'N': 40 # 波动率
}
# 机器学习相关参数
CLOSE_IDX = 1
PRED_RANGE = 5
# 暂无
SPLIT_RATIO = 0.2
class ML_Default(object):
# Add machine learning para below!
def __init__(self):
pass
@classmethod
def get_close_idx(cls):
return CLOSE_IDX
@classmethod
def get_pred_range(cls):
return PRED_RANGE
@classmethod
def get_split_ratio(cls):
return SPLIT_RATIO
class Default(object):
def __init__(self):
pass
@classmethod
def get_datadir(cls):
data_dir = TRAIN_DATA_DIR + PERIOD + '/SHFE/' + PCON + '_full.csv'
return data_dir
@classmethod
def get_traindir(cls):
train_dir = './traindata/' + PERIOD + '/SHFE/' + PCON + '.train.csv'
return train_dir
@classmethod
def get_testdir(cls):
test_dir = DATA_DIR + PERIOD + '/SHFE/' + PCON + '.csv'
return test_dir
@classmethod
def get_sadir(cls):
sa_dir = DATA_DIR + PERIOD + '/SHFE/' + PCON + '.sa.csv'
return sa_dir
@classmethod
def get_name(cls):
name = PCON + '.SHFE-' + PERIOD[0] + '.' + PERIOD[1:]
return name
@classmethod
def get_risk(cls):
return RISK
@classmethod
def get_clevel(cls):
return CLEVEL
@classmethod
def get_opcycle(cls):
return OPCYCLE
@classmethod
def get_capital(cls):
assert CAPITAL > 0
return CAPITAL
@classmethod
def get_tt(cls):
assert 0 < TT['IN'] <= 120
assert 0 < TT['OUT'] <= 120
assert 0 < TT['N'] <= 120
assert isinstance(TT['IN'], int)
assert isinstance(TT['OUT'], int)
assert isinstance(TT['N'], int)
return TT
@classmethod
def get_maxunit(cls):
assert MAXUNIT > 0
return MAXUNIT
if __name__ == '__main__':
# Default.name = '1'
# print (Default.get_traindir().split('/')[2] + Default.get_traindir().split('/')[4].split('.')[0])
print Default.get_name().split('-')[1][2:]
|
#!/usr/bin/env python
"""
This filename contains a health monitoring plugin which is still
being developed and is still in the alpha testing stage.
----------------------------------------------------------------
check-disk
DESCRIPTION
Check Disk Usage by filesystem, by analysing the output from the "df -x tmpfs" command.
OUTPUT
plain text
PLATFORMS:
Linux
DEPENDENCIES:
Python 2.7+
USAGE:
check-disk.py -w 80 -c 90
ARGUMENTS:
--warning: Percent fileystem usage beyond which an exit code of 1 is returned
-w: alias for --warning
--critical: Percent filesystem usage beyond which an exit code of 2 is returned
-c: alias for --critical
NOTES:
After researching several nagios disk checks, found it very complicated. This
is a simple version useful for network switches - Stanley K.
TODO:
LICENSE:
Copyright 2015 Cumulus Networks
Original Author: Stanley Karunditu <stanleyk@cumulusnetworks.com>
Released under the the MIT license. see LICENSE for details.
"""
import argparse
import sys
import subprocess
def check_disk(_args):
cmd = '/bin/df -x tmpfs'
df_output = None
try:
df_output = subprocess.check_output(cmd.split())
except IOError as e:
print("failed to execute df %s" % (e))
exit(2)
for _entry in df_output.split('\n'):
_entry_details = _entry.split()
_filesystem = None
_percent_used = None
_msg = None
_code = 0
if len(_entry_details) > 0 and _entry_details[0] == 'Filesystem':
continue
try:
_filesystem = _entry_details[-1]
_percent_used = int(_entry_details[-2].split('%')[0])
except ValueError and IndexError:
continue
if _percent_used and _args.critical and _percent_used > _args.critical:
_msg = "CRITICAL: Filesystem '%s' Usage Passed Threshold " % (_filesystem) + \
"Current:%s%% Threshold:%s%%" % (_percent_used, _args.critical)
_code = 2
elif _percent_used and _args.warning and _percent_used > _args.warning:
_msg = "WARNING: Filesystem '%s' Usage Passed Threshold " % (_filesystem) + \
"Current:%s%% Threshold:%s%%" % (_percent_used, _args.warning)
_code = 1
if _msg:
print(_msg)
exit(_code)
def print_help(parser):
parser.print_help()
exit(2)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check Disk Filesystem Usage")
parser.add_argument('-c', '--critical',
type=int,
metavar='PERCENT',
help='Percent Filesystem Usage Critical Threshold')
parser.add_argument('-w', '--warning',
type=int,
metavar='PERCENT',
help='Percent Filesystem Usage Warning Threshold')
if (len(sys.argv) < 2):
print_help(parser)
_args = parser.parse_args()
check_disk(_args)
|
from ._affine_transform import affine_transform
from ._AffineTransform3D import AffineTransform3D
from ._apply_vector_field import apply_vector_field
from ._deskew_y import deskew_y
from ._deskew_x import deskew_x
from ._rigid_transform import rigid_transform
from ._rotate import rotate
from ._scale import scale
from ._translate import translate
|
from sklearn import *
from clean_train import *
import time, re, amueller_mlp
from sklearn.metrics import *
import numpy as np
def logRange(limit, n=10,start_at_one=[]):
"""
returns an array of logaritmicly spaced integers untill limit of size n
starts at 0 unless if start_at_one = True
"""
if start_at_one: n=n+1
if n > limit: raise Exception("n>limit!")
result = [1]
if n>1: # just a check to avoid ZeroDivisionError
ratio = (float(limit)/result[-1]) ** (1.0/(n-len(result)))
while len(result)<n:
next_value = result[-1]*ratio
if next_value - result[-1] >= 1:
# safe zone. next_value will be a different integer
result.append(next_value)
else:
# problem! same integer. we need to find next_value by artificially incrementing previous value
result.append(result[-1]+1)
# recalculate the ratio so that the remaining values will scale correctly
ratio = (float(limit)/result[-1]) ** (1.0/(n-len(result)))
# round, re-adjust to 0 indexing (i.e. minus 1) and return np.uint64 array
logRange = np.array(map(lambda x: round(x)-1, result), dtype=np.uint64)
if start_at_one:
return np.delete(logRange,0)
else:
return logRange
def sort_results_csv(input_file='../../results/classifier_results.csv',output_file=''):
"""
Sorts the results csv file and writes to the same file.
Sort on classifier name first (1th column), then on features (6th column)
"""
if output_file =='': output_file = input_file
#import header first
with open(input_file, 'r') as f:
header = f.readline()
#load csv into table (automatically with correct datatypes)
table = np.recfromcsv(input_file,delimiter=',')
#only sort if we have more then one element (to prevent bugs)
if np.size(table) > 1:
#sort on features
if 'word_features' in table.dtype.names:
table.sort(order=['classifier_id','features','dop_features','word_features'])
else:
table.sort(order=['classifier_id','features'])
#store sorted file
with open(output_file,'w') as fd:
fd.write(header)
if 'word_features' in table.dtype.names:
[fd.write(settings_to_string(tup[0],tup[1],tup[2],tup[3],tup[4],tup[5],tup[6],tup[7],tup[8],tup[9]) + "\n") for tup in table]
else:
[fd.write(settings_to_string(tup[0],tup[1],tup[2],tup[3],tup[4],tup[5],tup[6],tup[7]) + "\n") for tup in table]
def findRun(classifier_id,features=None,resultsfile = '../../results/classifier_results.csv',word_features=None,dop_features=None):
"""
returns the numer of lines where the classifier /features combination occured
if it didn't occur, return empty
when one of the two features isn't set
"""
table = np.recfromcsv(resultsfile,delimiter=',')
#make sure table is allways iterable
if np.size(table)==1: table=list(table.flatten())
if dop_features>=0 and word_features>=0:
return [n for n,tup in enumerate(table) if tup['classifier_id']=='"' + classifier_id + '"'
and tup['dop_features']==dop_features and tup['word_features']==word_features]
else:
return [n for n,tup in enumerate(table) if tup['classifier_id']=='"' + classifier_id + '"' and tup['features']==features]
def settings_to_string(classifier_id,train_accuracy,test_accuracy,fit_time,score_time,
features,train_conf_matrix='', test_conf_matrix='',dop_features=None,word_features=None):
"""
Get a string to store to csv file (also usefull for regexp)
"""
#add quotation marks for the strings, if needed
if classifier_id=="" or not classifier_id[0]=='"': classifier_id = '"' + classifier_id + '"'
if dop_features >=0 and word_features >=0:
return "{0},{1},{2},{3},{4},{5},{6},{7},{8},{9}".format(classifier_id, train_accuracy,
test_accuracy,fit_time,score_time,features,
train_conf_matrix, test_conf_matrix, dop_features, word_features)
else:
return "{0},{1},{2},{3},{4},{5},{6},{7}".format(classifier_id, train_accuracy,
test_accuracy,fit_time,score_time,features,
train_conf_matrix, test_conf_matrix)
def batch_run(test_settings,method=2):
"""
batch_runs classifiers and stores results in the file ../../results/M{method}_classivier_results.csv
Please provide settings as a tuple list:
0:classifier object
1:number of features (left after feature deduction)
"""
resultsfile = '../../results/M{0}_classifier_results.csv'.format(method)
training, test, y,r = getProcessedData(method)
#initialize
last_features = [];
for settings in test_settings:
#import parameters
classifier = settings[0]
features = settings[1]
#converte the class name and properties into a string, be carefull for punctuation in csv
classifier_id = str(classifier)
classifier_id = classifier_id.replace('\n', ' ').replace('"',"'").replace(',',';')
classifier_id = ' '.join(classifier_id.split())
#check if a experiment with the current settings was allready conducted
if findRun(classifier_id,features,resultsfile=resultsfile):
print "Experiment with current settings was allready conducted, skipping"
else:
#load to csv file to append the results. Do this in the loop to update the file live
fd = open(resultsfile,'a')
#do feature deduction if nesececary
if not last_features == features:
X, Xtest = feature2vector(training,test,features)
last_features = features
#fit classifier
print "Fitting " + classifier_id
t0 = time.time()
classifier.fit(X, y)
fit_time = time.time() - t0
#Predict labels
print "Fit classifier, calculating scores"
t0 = time.time()
y_pred = classifier.predict(X)
r_pred = classifier.predict(Xtest)
score_time = time.time()- t0
#calculate performances
train_accuracy = accuracy_score(y,y_pred)
test_accuracy = accuracy_score(r,r_pred)
train_conf_matrix = np.array_str(confusion_matrix(y,y_pred) ).replace("\n",' ')
test_conf_matrix = np.array_str(confusion_matrix(r,r_pred) ).replace("\n",' ')
#store results
fd.write(settings_to_string(classifier_id,train_accuracy,
test_accuracy,fit_time,score_time,features,
train_conf_matrix, test_conf_matrix) + "\n")
#save to csv file and sort csv file
fd.close()
sort_results_csv(input_file=resultsfile)
def main():
#tuples of classifers to test, and a string with their settings (to store)
classifiers=[ amueller_mlp.MLPClassifier(n_hidden=200),
amueller_mlp.MLPClassifier(n_hidden=400),
amueller_mlp.MLPClassifier(n_hidden=800),
ensemble.RandomForestClassifier(),
#sklearn.ensemble.AdaBoostClassifier(),
sklearn.linear_model.Perceptron(n_iter=60),
#svm.SVC(kernel='poly'),
svm.SVC(kernel='linear'),
#svm.SVC(kernel='sigmoid'),
#naive_bayes.GaussianNB(),
#neighbors.nearest_centroid.NearestCentroid(),
#svm.SVC(),
tree.DecisionTreeClassifier(),
#naive_bayes.MultinomialNB(),
#naive_bayes.BernoulliNB(),
#sklearn.ensemble.GradientBoostingClassifier(),
#sklearn.ensemble.AdaBoostClassifier()
]
# Maximum number of features: 261396
features_set = logRange(261396,15,1)
#combine combinatorial (factory because we dont want to duplicate all the classifiers)
settings = ( (classifier, features) for features in features_set for classifier in classifiers )
batch_run(settings,method=4)
batch_run(settings,method=1)
batch_run(settings,method=3)
batch_run(settings,method=2)
batch_run(settings,method=0)
if __name__ == '__main__':
main() |
import random
HangMan = ['''
________
|/ |
|
|
|
|
|
|___''', '''
________
|/ |
| (_)
|
|
|
|
|___''', '''
________
|/ |
| (_)
| |
| |
|
|
|___''', '''
________
|/ |
| (_)
| \|
| |
|
|
|___''', '''
________
|/ |
| (_)
| \|/
| |
|
|
|___''', '''
________
|/ |
| (_)
| \|/
| |
| /
|
|___''', '''
________
|/ |
| (_)
| \|/
| |
| / \
|
|___''' ]
#print HangMan[0]
f = open("wordlist.txt", 'r')
words= f.read()
#while (playAgain):
word = random.choice(words.split())
lives = 6
guess = ''
def play():
#word = pick_a_word()
while True:
guess = getGuess(word)
if processGuess(guess, word):
print('You win! Well Done!')
break
if lives == 0:
print('Game Over!')
print('The word was: ' + word)
break
def getGuess(word):
blanks(word)
#print('Lives Remaining: ' + str(lives_remaining))
guess = raw_input(' Guess a letter or whole word?')
return guess
def blanks(word):
word = ''
for letter in word:
if guess.find(letter) > -1:
# letter found
word = word + letter
else:
# letter not found
word = word + '-'
#print(word)
def processGuess(guess, word):
if len(guess) > 1 and len(guess) == len(word):
return wordGuess(guess, word)
else:
return letterGuess(guess, word)
def wordGuess(guess, word):
#global lives
if guess.lower() == word.lower():
return True
else:
lives =- 1
return False
def letterGuess(guess, word):
#global guessed_letters
#global lives
if word.find(guess) == -1:
lives = lives -1
guess = guess + guess.lower()
if allLetters(word):
return True
return False
def allLetters(word):
for letter in word:
if guess.find(letter.lower()) == -1:
return False
return True
play()
|
"""A module that defines constants of expression's variable names.
"""
from typing_extensions import Final
DOCUMENT: Final[str] = 'document'
DISPLAY_OBJECT: Final[str] = 'do'
PARENT: Final[str] = 'parent'
GRAPHICS: Final[str] = 'g'
POINT2D: Final[str] = 'p2d'
RECTANGLE: Final[str] = 'rect'
CIRCLE: Final[str] = 'circle'
ELLIPSE: Final[str] = 'ellipse'
LINE: Final[str] = 'line'
POLYLINE: Final[str] = 'pline'
POLYGON: Final[str] = 'poly'
SPRITE: Final[str] = 'sp'
ARRAY: Final[str] = 'arr'
DICTIONARY: Final[str] = 'dct'
INDEX: Final[str] = 'idx'
BOOLEAN: Final[str] = 'b'
INT: Final[str] = 'i'
NUMBER: Final[str] = 'n'
STRING: Final[str] = 's'
SNAPSHOT: Final[str] = 'snapshot'
ANY: Final[str] = 'any'
EVENT: Final[str] = 'evt'
MOUSE_EVENT: Final[str] = 'mevt'
WHEEL_EVENT: Final[str] = 'wevt'
|
import os
def renomear_arquivos():
#Obter o nome dos arquivos de uma pasta
lista_arquivos = os.listdir("/home/victorshgo/Desktop/Python Projects/rename-files/images-for-example")
#Qual o diretorio atual
pasta_atual = os.getcwd()
print("A pasta atual e: " + pasta_atual)
#Entrando no diretorio correto
os.chdir("/home/victorshgo/Desktop/Python Projects/rename-files/images-for-example")
for nome_arquivo in lista_arquivos:
print("Nome anterior: " + nome_arquivo)
print("Nome atual: " + nome_arquivo.translate(None, "0123456789"))
print("")
#Removendo todos os numeros dos nomes dos arquivos
os.rename(nome_arquivo, nome_arquivo.translate(None, "0123456789"))
os.chdir(pasta_atual)
renomear_arquivos() |
#MergeRankSummaries.py
#
# Description: Merges rank summaries and attaches values as
# an attribute table to the patch raster
#
# Inputs: <Patch raster> <Geometry CSV> <Connectivity CSV>
# Outputs: <Patch composite>
#
# June 2012, John.Fay@duke.edu
#
import sys, os, arcpy
import arcpy.sa as sa
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
#Input variables
patchRaster = sys.argv[1]
geometryTbl = sys.argv[2]
geometryWt = float(sys.argv[3])
connectivityTbl = sys.argv[4]
connectivityWt = float(sys.argv[5])
efficiencyTbl = sys.argv[6]
efficiencyWt = float(sys.argv[7])
threatTbl = sys.argv[8]
threatWt = float(sys.argv[9])
biodiversityTbl = sys.argv[10]
biodiversityWt = float(sys.argv[11])
prefixes = ("geom", "conn","effic", "vuln", "bio")
inputTbls = (geometryTbl, connectivityTbl,efficiencyTbl, threatTbl, biodiversityTbl)
inputWts = (geometryWt, connectivityWt,efficiencyWt, threatWt, biodiversityWt)
#Output variables
outputRaster = sys.argv[12]
##-FUNCTIONS-
def msg(txt):print txt; arcpy.AddMessage(txt); return
##-PROCESSING-
#Create the output raster
msg("Creating output raster: %s" %outputRaster)
outRaster = arcpy.CopyRaster_management(patchRaster,outputRaster)
#Initiate the calcString - used for calculating final ranks
calcString = ''
#Loop through each attribute group
for prefix in prefixes:
msg("Working on %s" %prefix)
wt = float(inputWts[prefixes.index(prefix)] / 100.0)
tbl = inputTbls[prefixes.index(prefix)]
# Add a field to the ouput raster
arcpy.AddField_management(outRaster,prefix+"_Wt","DOUBLE",10,2) #Weight value
arcpy.AddField_management(outRaster,prefix+"_Rank","DOUBLE",10,2) #Rank
# Join the table to the outRaster
arcpy.JoinField_management(outRaster,"VALUE",tbl,"PatchID",["WtdScore","Rank"])
arcpy.CalculateField_management(outRaster,prefix+"_Wt","[WtdScore]")
arcpy.CalculateField_management(outRaster,prefix+"_Rank","[Rank]")
arcpy.DeleteField_management(outRaster,"WtdScore")
arcpy.DeleteField_management(outRaster,"Rank")
# Update the calcString
calcString += "([%s_Wt] * %s) + " %(prefix,wt)
# Add final ranking
arcpy.AddField_management(outRaster,"FinalWt","DOUBLE",10,2)
arcpy.AddField_management(outRaster,"FinalRank","DOUBLE",10,2)
arcpy.CalculateField_management(outRaster,"FinalWt",calcString[:-3])
arcpy.CalculateField_management(outRaster,"FinalRank",calcString.replace("Wt","Rank")[:-3])
|
"""Markdown extension to render content similar to the source.
This will render Markdown such that the spacing and alignment in the source
text and the rendered content looks roughly the same. It's meant to help ensure
that what's typed is very close to what's viewed when rendered.
"""
import re
from collections import OrderedDict
from xml.etree.ElementTree import SubElement
import markdown
from markdown.blockprocessors import BlockProcessor, OListProcessor
from markdown.postprocessors import RawHtmlPostprocessor
from markdown.treeprocessors import Treeprocessor
class SmartEmptyBlockProcessor(BlockProcessor):
"""Handles empty blocks in a more specialized way.
By default, Python-Markdown will trim away excess empty blocks, with
the idea being that it doesn't matter how much whitespace exists
between tags when rendered to HTML. However, in our case, we need to
preserve whitespace in order to better match the resulting render to
the input text.
We replace any empty lines with paragraph tags, which will safely stick
around.
This is invoked before EmptyBlockProcessor.
"""
def test(self, parent, block):
"""Test that the processor should apply to the given block."""
return not block or block.startswith('\n')
def run(self, parent, blocks):
"""Run the processor on the given blocks."""
block = blocks.pop(0)
# We'll enter this while loop at least once, given that test() must
# pass before run() is called.
#
# We know that blocks are separated by 2 blank lines. At a minimum,
# we'll have 2 resulting paragraphs here.
#
# For odd-numbered sequences of newlines, we'll end up with a block
# starting with a newline. We'll add a paragraph in this case, and
# continue on.
while self.test(parent, block):
SubElement(parent, 'p')
if block:
block = block[1:]
else:
SubElement(parent, 'p')
return
# Add remaining lines to master blocks for later.
#
# We know there's a block here, given that test() passes if block
# is None. We'll only exit the while loop to here if there's a block
# without any leading newlines.
blocks.insert(0, block)
class PreserveStartOListBlockProcessor(OListProcessor):
"""Applies CSS styles to any <ol> with a start= attribute.
This allows for CSS tricks to be performed that ensure that ordered list
item numbers and item contents line up between source text and rendered
text. It basically turns off the <li>'s native counter value and instead
creates its own using :before and CSS counters. These tricks end up causing
the start= attribute on the <ol> to be ignored.
This block processor extends the standard OListProcessor to also apply
a CSS style to set the displayed counter value to the intended start
value.
This replaces OListProcessor.
"""
# Force <ol> tags to have a start= attribute.
LAZY_OL = False
def run(self, parent, blocks):
"""Run the processor on the given blocks."""
# The base BlockProcessor class does not inherit from object, so
# we can't use super() here.
OListProcessor.run(self, parent, blocks)
list_el = self.lastChild(parent)
# The element should always be an <ol>, but better safe than sorry.
if list_el.tag == 'ol' and 'start' in list_el.attrib:
try:
start = int(list_el.attrib['start'])
except ValueError:
start = 1
if start > 1:
# Set a style= attribute to force the text to render with the
# particular start value.
list_el.attrib['style'] = 'counter-reset: li %s' % (start - 1)
class TrimTrailingEmptyParagraphs(Treeprocessor):
"""Removes empty paragraphs from the end of the tree.
This will remove any trailing empty paragraphs formerly added by
SmartEmptyBlockProcessor. This step must be done after all the blocks are
processed, so that we have a complete picture of the state of the tree.
It's therefore performed right before we prettify the tree.
"""
def run(self, root):
"""Run the processor on the root of the tree."""
num_children = len(root)
start_i = num_children
# Loop through the children from end to beginning, counting how many
# of them are empty <p> elements.
for child in reversed(root):
if child.tag != 'p' or child.text or len(child) > 0:
break
start_i -= 1
if start_i < num_children:
# Clear away any of the trailing paragraphs we found.
root[start_i:] = []
class TrimmedRawHtmlPostprocessor(RawHtmlPostprocessor):
"""Post-processes raw HTML placeholders, without adding extra newlines.
Python-Markdown's RawHtmlPostprocessor had a nasty habit of adding an
extra newline after replacing a placeholder with stored raw HTML. That
would cause extra newlines to appear in our output.
This version more efficiently replaces the raw HTML placeholders and
ensures there are no trailing newlines in the resulting HTML. Not only does
it prevent the newline normally added by the original function, but it
strips trailing newlines from stashed HTML that may have been generated
by other extensions, keeping spacing consistent and predictable.
"""
def run(self, text):
"""Run the processor on the HTML.
Args:
text (unicode):
The text to process.
Returns:
unicode:
The processed text.
"""
html_stash = self.md.htmlStash
if html_stash.html_counter == 0:
return text
replacements = OrderedDict()
for i in range(html_stash.html_counter):
html = html_stash.rawHtmlBlocks[i]
placeholder = html_stash.get_placeholder(i)
# Help control whitespace by getting rid of any trailing newlines
# we may find.
html = html.rstrip('\n')
if self.isblocklevel(html):
replacements['<p>%s</p>' % placeholder] = html
replacements[placeholder] = html
return re.sub(
'|'.join(re.escape(key) for key in replacements),
lambda m: replacements[m.group(0)],
text)
class WysiwygFormattingExtension(markdown.Extension):
"""Provides a series of WYSIWYG formatting rules for Markdown rendering.
We have a lot of specific rendering concerns that Python-Markdown doesn't
really address, or generally need to care about. We try very hard to match
up newlines around various code blocks, and we have special ways we do
ordered lists. The resulting rendered output is meant to look identical
to the input text, as much as possible.
This extension renders a series of processors that ensures that the HTML
output is in the format required for our rendering.
This is meant to be used with the following Markdown configuration
and extensions:
.. code-block:: python
{
'lazy_ol': False,
'extensions': [
'sane_lists', 'nl2br', 'djblets.markdown.extentions.wysiwyg',
],
}
"""
def extendMarkdown(self, md):
"""Extend the list of Markdown processors.
Each processor in this file will be registered in the order
necessary for the smarter formatting.
Args:
md (markdown.Markdown):
The Markdown instance.
"""
# Make this the highest priority.
md.parser.blockprocessors.register(
PreserveStartOListBlockProcessor(md.parser),
'olist',
priority=999)
# Place this 5 higher than "empty"'s priority.
md.parser.blockprocessors.register(
SmartEmptyBlockProcessor(md.parser),
'smart-empty',
priority=105)
# Place this 5 higher than "prettify"'s priority.
md.treeprocessors.register(
TrimTrailingEmptyParagraphs(md),
'trim_empty_p',
priority=15)
# Make this the highest priority.
md.postprocessors.register(
TrimmedRawHtmlPostprocessor(md),
'raw_html',
priority=999)
def makeExtension(*args, **kwargs):
"""Create and return an instance of this extension.
Args:
*args (tuple):
Positional arguments for the extension.
**kwargs (dict):
Keyword arguments for the extension.
Returns:
WysiwygFormattingExtension:
The extension instance.
"""
return WysiwygFormattingExtension(*args, **kwargs)
|
import datetime
import json
import logging
import os
import threading
import time
from abc import ABC, abstractmethod
import pika
from tools.mongo_dao import MongoDB
class StopCondition(ABC):
def __init__(self, stop_condition_parameters: dict, experiment_description: dict, experiment_id: str):
self.event_host = os.getenv("BRISE_EVENT_SERVICE_HOST")
self.event_port = os.getenv("BRISE_EVENT_SERVICE_AMQP_PORT")
self.database = MongoDB(os.getenv("BRISE_DATABASE_HOST"),
os.getenv("BRISE_DATABASE_PORT"),
os.getenv("BRISE_DATABASE_NAME"),
os.getenv("BRISE_DATABASE_USER"),
os.getenv("BRISE_DATABASE_PASS"))
self.experiment_id = experiment_id
self.stop_condition_type = stop_condition_parameters["Name"]
self.decision = False
self.logger = logging.getLogger(stop_condition_parameters["Name"])
self.repetition_interval = datetime.timedelta(**{
experiment_description["StopConditionTriggerLogic"]["InspectionParameters"]["TimeUnit"]:
experiment_description["StopConditionTriggerLogic"]["InspectionParameters"]["RepetitionPeriod"]}).total_seconds()
def start_threads(self):
"""
Start 2 threads.
One thread listens event to shut down Stop Condition.
Second thread run the functionality of Stop Condition (`self_evaluation` method).
"""
self.listen_thread = EventServiceConnection(self)
self.listen_thread.start()
self.thread_is_active = True
self.thread = threading.Thread(target=self.self_evaluation, args=())
self.thread.start()
def stop_threads(self, ch, method, properties, body):
"""
This function stops Stop Condition microservice.
:param ch: pika.Channel
:param method: pika.spec.Basic.GetOk
:param properties: pika.spec.BasicProperties
:param body: empty
"""
self.listen_thread.stop()
self.thread_is_active = False
@abstractmethod
def is_finish(self):
"""
Main logic of Stop Condition should be overridden in this method.
Later, this method will be called in `self_evaluation` method with defined in Experiment Description period.
When the Stop Condition is triggered to stop BRISE,
it changes internal state of variable 'self.decision' to True.
:return: None
"""
def update_expression(self, stop_condition_type: str, decision: bool) -> None:
"""
This function sends event to Stop Condition Validator with command to check StopConditionTriggerLogic expression,
since this particular Stop Condition was triggered.
:param stop_condition_type: Stop Condition identificator
:param decision: Stop Condition decision (boolean)
"""
dictionary_dump = {"experiment_id": self.experiment_id,
"stop_condition_type": stop_condition_type,
"decision": decision
}
body = json.dumps(dictionary_dump)
with pika.BlockingConnection(
pika.ConnectionParameters(host=self.event_host,
port=self.event_port)) as connection:
with connection.channel() as channel:
channel.basic_publish(exchange='',
routing_key='check_stop_condition_expression_queue',
body=body)
def self_evaluation(self):
"""
This function performs self-evaluation of Stop Condition periodically according to user-defined repetition interval.
"""
counter = 0
listen_interval = self.repetition_interval/10
previous_decision = self.decision # for sending the update only when decision changes
while self.thread_is_active:
# time.sleep blocks thread execution for whole time specified in function argument
# and stop message from main-node could be delivered only after this timer ends.
# This code decision is designed to accelerate stopping process.
time.sleep(listen_interval)
counter = counter + 1
if counter % 10 == 0:
counter = 0
numb_of_measured_configurations = 0
try:
numb_of_measured_configurations = \
self.database.get_last_record_by_experiment_id("Experiment_state", self.experiment_id)["Number_of_measured_configs"]
except TypeError:
self.logger.warning(f"No Experiment state is yet available for the experiment {self.experiment_id}")
if numb_of_measured_configurations > 0:
search_space_size = \
self.database.get_last_record_by_experiment_id("Search_space", self.experiment_id)["Search_space_size"]
if numb_of_measured_configurations >= search_space_size:
break
self.is_finish()
if previous_decision != self.decision:
msg = f"{self.__class__.__name__} Stop Condition decision: " \
f"{ 'stop' if self.decision else 'continue'} running Experiment."
self.logger.info(msg)
previous_decision = self.decision
self.update_expression(self.stop_condition_type, self.decision)
def stop_experiment_due_to_failed_sc_creation(self):
"""
This function sends stop_experiment message to main node. It could be triggered only if
Stop Condition initialization fails.
"""
with pika.BlockingConnection(
pika.ConnectionParameters(host=self.event_host,
port=self.event_port)) as connection:
with connection.channel() as channel:
channel.basic_publish(exchange='',
routing_key='stop_experiment_queue',
body="Stop condition is not able to initialize.")
class EventServiceConnection(threading.Thread):
"""
This class is responsible for listening `stop_brise_components` queue
for shutting down Stop Condition (in case of BRISE Experiment termination).
"""
def __init__(self, stop_condition: StopCondition):
"""
The function for initializing consumer thread
:param stop_condition: an instance of Stop Condition object
"""
super(EventServiceConnection, self).__init__()
self.stop_condition: StopCondition = stop_condition
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.stop_condition.event_host,
port=self.stop_condition.event_port))
self.consume_channel = self.connection.channel()
self.termination_result = self.consume_channel.queue_declare(queue='', exclusive=True)
self.termination_queue_name = self.termination_result.method.queue
self.consume_channel.queue_bind(exchange='brise_termination_sender', queue=self.termination_queue_name)
self._is_interrupted = False
self.consume_channel.basic_consume(queue=self.termination_queue_name, auto_ack=True,
on_message_callback=self.stop_condition.stop_threads)
def stop(self):
"""
The function for thread stop
"""
self._is_interrupted = True
def run(self):
"""
Point of entry to tasks results consumers functionality,
listening of queue with task result
"""
try:
while not self._is_interrupted:
self.consume_channel.connection.process_data_events(time_limit=1) # 1 second
finally:
if self.connection.is_open:
self.connection.close()
|
with open('D:/python_udemy/pycharm/src/files', mode='a+') as my_file:
print(my_file.read())
print('---------------------')
my_file.seek(0)
print(my_file.read())
|
import argparse
import random
from crowd_dataset import CrowdDataset
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import cv2
import numpy as np
import os
import random, string
import math
import pickle
from collections import OrderedDict
import torch
from torch import nn as nn, optim as optim
from torch.autograd import Variable
import datetime
import scipy.stats as ss
from pdb import set_trace as bp
from models import BSDR_Net
from models import load_rot_model_blocks, check_BN_no_gradient_change
from models import check_conv_no_gradient_change, set_batch_norm_to_eval
from models import load_net
from noisy_gts import create_noisy_gt
from models import NRN
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='PyTorch BSDR Testing')
parser.add_argument('--gpu', default=1, type=int,
help='GPU number')
parser.add_argument('--dataset', default="parta", type=str,
help='dataset to train on')
parser.add_argument('--model-name', default="", type=str,
help='name of model file')
def log(f, txt, do_print=1):
txt = str(datetime.datetime.now()) + ': ' + txt
if do_print == 1:
print(txt)
f.write(txt + '\n')
# Get the filename for the model stored after 'epochs_over' epochs got over
def get_filename(net_name, epochs_over):
return net_name + "_epoch_" + str(epochs_over) + ".pth"
def save_checkpoint(state, fdir, name='checkpoint.pth'):
filepath = os.path.join(fdir, name)
torch.save(state, filepath)
def print_graph(maps, title, save_path):
fig = plt.figure()
st = fig.suptitle(title)
for i, (map, args) in enumerate(maps):
plt.subplot(1, len(maps), i + 1)
if len(map.shape) > 2 and map.shape[0] == 3:
# bp()
plt.imshow(map.transpose((1, 2, 0)).astype(np.uint8),aspect='equal', **args)
# bp()
else:
# bp()
plt.imshow(map, aspect='equal', **args)
plt.colorbar()
# bp()
plt.axis('off')
plt.savefig(save_path + ".png", bbox_inches='tight', pad_inches = 0)
fig.clf()
plt.clf()
plt.close()
excluded_layers = ['conv4_1', 'conv4_2', 'conv5_1']
@torch.no_grad()
def test_function(X, Y, network):
"""
Evaluation of network on test and valid set
Parameters
----------
X : input images (B,3,h,w)
Y : ground truth (B,1,h/8,w/8)
network : BSDR object
"""
X = torch.autograd.Variable(torch.from_numpy(X)).cuda()
Y = torch.autograd.Variable(torch.from_numpy(Y)).cuda()
network = network.cuda()
network.eval()
output = network(X) # (B,1,h,w)
loss = 0.0
loss_criterion = nn.MSELoss(size_average=True)
# bp()
loss = loss_criterion(output, Y)
count_error = torch.abs(torch.sum(Y.view(Y.size(0), -1), dim=1) - torch.sum(output.view(output.size(0), -1), dim=1))
network.train()
network = set_batch_norm_to_eval(network)
return loss.item(), output.cpu().detach().numpy(), count_error.cpu().detach().numpy()
def test_network(dataset, set_name, network, print_output=False):
"""
Main loop for evaluation of BSDR network
Parameters
----------
dataset : dataset object for retrieving data from test/valid set
set-name : choose the test / valid set
network : BSDR object
print_output : determine to dump predictions
"""
if isinstance(print_output, str):
print_path = print_output
elif isinstance(print_output, bool) and print_output:
print_path = model_save_dir+'/dump'
else:
print_path = None
loss_list = []
count_error_list = []
for idx, data in enumerate(dataset.test_get_data(set_name)):
image_name, Xs, Ys = data
image = Xs[0].transpose((1, 2, 0))
image = cv2.resize(image, (image.shape[1] // output_downscale, image.shape[0] // output_downscale))
loss, pred_dmap, count_error = test_function(Xs, Ys, network)
# bp()
max_val = max(np.max(pred_dmap[0, 0].reshape(-1)), np.max(Ys[0, 0].reshape(-1)))
maps = [(np.transpose(image,(2,0,1)), {}),
(pred_dmap[0,0], {'cmap': 'jet', 'vmin': 0., 'vmax': max_val}),
(Ys[0,0], {'cmap': 'jet', 'vmin': 0., 'vmax': max_val})]
# bp()
loss_list.append(loss)
count_error_list.append(count_error)
# -- Plotting boxes
if print_path:
print_graph(maps, "Gt:{},Pred:{}".format(np.sum(Ys),np.sum(pred_dmap)), os.path.join(print_path, image_name))
loss = np.mean(loss_list)
mae = np.mean(count_error_list)
mse = np.sqrt(np.mean(np.square(count_error_list)))
return {'loss1':loss,'new_mae':mae,'mse':mse}, mae
def train_network():
"""
Main training loop for BSDR
"""
network = BSDR_Net()
model_save_path = os.path.join(model_save_dir, 'train2')
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
os.makedirs(os.path.join(model_save_path, 'snapshots'))
os.makedirs(os.path.join(model_save_dir,'dump'))
os.makedirs(os.path.join(model_save_dir,'dump_test'))
global f
snapshot_path = os.path.join(model_save_path, 'snapshots')
f = open(os.path.join(model_save_path, 'train0.log'), 'w')
# -- Logging Parameters
log(f, 'args: ' + str(args))
log(f, 'model: ' + str(network), False)
network = load_net(network,'models_BSDR/train2/snapshots',str(args.model_name))
log(f, 'Testing...')
epoch_test_losses, mae = test_network(dataset, 'test', network, False)
log(f, 'TEST epoch: ' + str(-1) + ' test loss1, mae:' + str(epoch_test_losses))
return
if __name__ == '__main__':
args = parser.parse_args()
# -- Assign GPU
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
# -- Assertions
assert (args.dataset)
# -- Setting seeds for reproducability
np.random.seed(11)
random.seed(11)
torch.manual_seed(11)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.cuda.manual_seed(11)
torch.cuda.manual_seed_all(11)
# -- Dataset paths
if args.dataset == "parta":
validation_set = 60
path = '../dataset/ST_partA/'
output_downscale = 8
density_map_sigma = 1
blur_sigma = 1
image_size_min = 256
image_crop_size = 256
network_output_downscale = 4
elif args.dataset == "ucfqnrf":
validation_set = 240
output_downscale = 8
path = '../dataset/UCF-QNRF_ECCV18/'
output_downscale = 8
density_map_sigma = 1
blur_sigma = 1
image_size_min = 256
image_crop_size = 256
network_output_downscale = 4
else:
validation_set = 0
output_downscale = 8
path = '../../dataset/ST_partA_' + args.dataset.replace('parta_', '') + '/'
model_save_dir = './models_BSDR_test'
dataset = CrowdDataset(path, name=args.dataset, valid_set_size=validation_set,
gt_downscale_factor=output_downscale,density_map_sigma=density_map_sigma,
image_size_multiple = output_downscale * network_output_downscale,
image_size_min = image_size_min , image_crop_size = image_crop_size)
#print(dataset.data_files['test_valid'], len(dataset.data_files['test_valid']))
print(dataset.data_files['train'], len(dataset.data_files['train']))
# -- Train the model
train_network()
|
"""
Test attrs that previously caused bugs.
"""
import pdir
from pdir._internal_utils import category_match
from pdir.attr_category import AttrCategory
def test_dataframe():
from pandas import DataFrame
result = pdir(DataFrame)
for attr in result.pattrs:
if attr.name in ('columns', 'index'):
assert category_match(attr.category, AttrCategory.PROPERTY)
def test_type():
result = pdir(type)
for attr in result.pattrs:
if attr.name == '__abstractmethods__':
assert category_match(attr.category, AttrCategory.ABSTRACT_CLASS)
return
def test_list():
result = pdir(list)
for attr in result.pattrs:
if attr.name == 'append':
assert category_match(attr.category, AttrCategory.FUNCTION)
return
class D(object):
"""this is D"""
def __init__(self):
pass
def __get__(self, instance, type=None):
pass
def __set__(self, instance, value):
pass
def __delete__(self, obj):
pass
class RevealAccess(object):
"""this is R"""
def __init__(self, initval=None, name='var'):
self.val = initval
self.name = name
def __get__(self, obj, objtype):
print('Retrieving', self.name)
return self.val
def __set__(self, obj, val):
print('Updating', self.name)
self.val = val
def __delete__(self, obj):
pass
def test_descriptor():
class T(object):
r = RevealAccess(10, 'var ' r'')
def __init__(self):
self.d = D()
@property
def p(self):
'this is p'
return 1
@p.setter
def p(self):
pass
@p.deleter
def p(self):
pass
t = T()
pattrs = pdir(t).pattrs
for pattr in pattrs:
if pattr.name == 'd':
assert category_match(pattr.category, AttrCategory.DESCRIPTOR)
assert pattr.doc == ('class D with getter, setter, deleter, ' 'this is D')
if pattr.name == 'r':
assert category_match(pattr.category, AttrCategory.DESCRIPTOR)
assert pattr.doc == (
'class RevealAccess with getter, setter, ' 'deleter, this is R'
)
if pattr.name == 'p':
assert category_match(pattr.category, AttrCategory.DESCRIPTOR)
assert pattr.doc == ('@property with getter, setter, ' 'deleter, this is p')
|
# 第一次
import requests
url = 'https://wordpress-edu-3autumn.localprod.forc.work/wp-login.php'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
}
data = {
'log': 'spiderman', #写入账户
'pwd': 'crawler334566', #写入密码
'wp-submit': '登录',
'redirect_to': 'https://wordpress-edu-3autumn.localprod.forc.work/wp-admin/',
'testcookie': '1'
}
login_in = requests.post(url,headers=headers,data=data)
#用requests.post发起请求,放入参数:请求登录的网址、请求头和登录参数,然后赋值给login_in。
cookies = login_in.cookies
#提取cookies的方法:调用requests对象(login_in)的cookies属性获得登录的cookies,并赋值给变量cookies。
url_1 = 'https://wordpress-edu-3autumn.localprod.forc.work/wp-comments-post.php'
data_1 = {
'comment': input('请输入你想要发表的评论:'),
'submit': '发表评论',
'comment_post_ID': '13',
'comment_parent': '0'
}
#把有关评论的参数封装成字典。
comment = requests.post(url_1,headers=headers,data=data_1,cookies=cookies)
#用requests.post发起发表评论的请求,放入参数:文章网址、headers、评论参数、cookies参数,赋值给comment。
#调用cookies的方法就是在post请求中传入cookies=cookies的参数。
print(comment.status_code)
#打印出comment的状态码,若状态码等于200,则证明我们评论成功。
# 虽然我们已经成功发表了评论,但我们的项目到这里还没有结束。因为这个代码还有优化的空间(仅仅是完成还不够,更优雅才是
# 我们该有的追求)。
# 优化这个代码的话,我们需要理解一个新的概念——session(会话)。
# 所谓的会话,你可以理解成我们用浏览器上网,到关闭浏览器的这一过程。session是会话过程中,服务器用来记录特定用户会话的信息。
# 比如你打开浏览器逛购物网页的整个过程中,浏览了哪些商品,在购物车里放了多少件物品,这些记录都会被服务器保存在session中。
# 如果没有session,可能会出现这样搞笑的情况:你加购了很多商品在购物车,打算结算时,发现购物车空无一物Σ(っ°Д°;)っ
# ,因为服务器根本没有帮你记录你想买的商品。
# 对了,session和cookies的关系还非常密切——cookies中存储着session的编码信息,session中又存储了cookies的信息。
# 当浏览器第一次访问购物网页时,服务器会返回set cookies的字段给浏览器,而浏览器会把cookies保存到本地。
# 等浏览器第二次访问这个购物网页时,就会带着cookies去请求,而因为cookies里带有会话的编码信息,服务器立马就能辨认出
# 这个用户,同时返回和这个用户相关的特定编码的session。
# 这也是为什么你每次重新登录购物网站后,你之前在购物车放入的商品并不会消失的原因。因为你在登录时,服务器可以通过浏览器
# 携带的cookies,找到保存了你购物车信息的session。
# 既然cookies和session的关系如此密切,那我们可不可以通过创建一个session来处理cookies?
# 不知道。那就翻阅requests的官方文档找找看有没有这样的方法,能让我们创建session来处理cookies。
|
import numpy as np
from largescale.src.support.geometry import gen_coordinates
def make_gabor(size=(0,0), orientation=0, scale=0, period=0, phase=0, peak=1.0):
if isinstance(scale, tuple):
xscale = scale[1]
yscale = scale[0]
else:
xscale = scale
yscale = scale
[coorx, coory] = gen_coordinates(size, center_zero=True)
cos_o = np.cos(orientation / 180.0 * np.pi)
sin_o = np.sin(orientation / 180.0 * np.pi)
# radius = np.sqrt(coorx**2 + coory**2)
gabor = np.exp( - ((coorx/xscale)**2 + (coory/yscale)**2) / 2 )
yy = coorx * sin_o + coory * cos_o
yyy = 2 * np.pi * yy / period - phase
return peak * np.cos(yyy) * gabor
|
# coding:utf-8
import sys
import redis
import time
import sqlite3
import re
import collections
import logging
logging.basicConfig(filename ='redis.log',level=logging.DEBUG,format='%(asctime)s %(filename)s[line:%(lineno)d] %(message)s',datefmt='%m/%d/%Y %H:%M:%S %p')
logging.info('beginning!')
key_set = set()
key_list = list()
sequence_key = list()
key_tuple = tuple()
pool = redis.ConnectionPool(host='192.168.174.128',port=6379,db=0)
r = redis.Redis(connection_pool=pool)
print( "占用内存:",r.info('memory')["used_memory_human"])
print("key的总个数:",r.dbsize())
def analysis():
begin_time = time.time()
line = 1
conn = sqlite3.connect(r'E:\File\memory.db')
c = conn.cursor()
#while 1<= line <= (endlines-1000):
while 1<= line <= 202:
cursor = c.execute("select (key) from memory limit 100 offset %s;" %line)
line = line + 100
print("读入行数:%s" %(line-1))
results = cursor.fetchall()
for row in results:
#ret = re.findall(r'^[\D]',row[0])[0]
#ret = re.findall(r'(.*)[_|:]\d{4}',row[0])[0]
#ret = re.findall(r'[\D]*\d?\D*\d?\D*',row[0])[0]
ret = re.findall(r'(.*)_?\D',row[0])[0]
key_list.append(ret)
key_set = set(key_list)
print(key_set)
print("集合元素一共有:",len(key_set))
logging.info('set is done')
for item in key_set:
key_size = c.execute("select sum(size_in_bytes),count(*) from memory where key like '%s%%';"%item)
key_size = key_size.fetchall()
key_tuple = (key_size[0][0],key_size[0][1],item)
sequence_key.append(key_tuple)
zipped = sorted(sequence_key, key=lambda s:s[0], reverse=True)
print(zipped)
f = open(r'C:\Users\Administrator\Desktop\python\output.txt','a')
for zipped_order in zipped:
print(zipped_order)
s = str(zipped_order)
f.write(s)
f.write('\n')
end_time = time.time()
print(str(end_time - begin_time))
f.write(str(end_time - begin_time))
f.close
logging.info("match done!")
conn.close()
analysis()
while True:
conn = sqlite3.connect(r'E:\File\memory.db')
c = conn.cursor()
key_type = input('请输入要清理的key的类别:')
key_select = c.execute("select * from memory where key like '%s%%' order by size_in_bytes;"%key_type)
keys = 0
for key in key_select:
keys = keys + 1
print(key)
print("总共:%s个"%keys)
conn.close()
g = input("请输入要清理的key(按q退出程序):")
if g == 'q':
sys.exit()
def delete_value():
try:
#哈希删除
if (r.type(g)) == b'hash':
while r.type(g) != b'none':
list_value = r.hscan(g,0,count=15)
count = 0
youbiao = list_value[0]
print(youbiao)
print (list_value[1])
for item in list_value[1]:
if count%3 == 0:
time.sleep(3)
item = item.decode('utf-8')
print (item)
r.hdel(g,item)
count = count + 1
#列表删除
elif (r.type(g)) == b'list':
while r.type(g) != b'none':
list_value = r.lrange(g,0,-1)
count = 0
for item in list_value:
if count%3 == 0:
time.sleep(3)
item = item.decode('utf-8')
print (item)
r.lpop(g)
count = count + 1
#集合删除
elif (r.type(g)) == b'set':
while r.type(g) != b'none':
list_value = r.sscan(g,0)
count = 0
for item in list_value[1]:
if count%3 == 0:
time.sleep(3)
item = item.decode('utf-8')
print(item)
r.spop(g)
count = count + 1
#有序集合删除
elif (r.type(g)) == b'zset':
while r.type(g) != b'none':
list_value = r.zscan(g,0)
count = 0
for item in list_value[1]:
if count%3 == 0:
time.sleep(3)
item = item[0].decode('utf-8')
print(item)
r.zrem(g,item)
count = count + 1
#字符串删除
elif (r.type(g)) == b'string':
r.delete(g)
else:
print("继续删除")
except Exception as err:
print('delete error!',err)
delete_value()
logging.info('finish') |
def gcd(n,m):
if n==0:
return m
else:
return(n, m%n)
def iterative_gcd(n1,n2):
min = n1 if n1<n2 else n2
largest_factor=1
for i in range(1,min+1):
if n1%i==0 and n2%i==0:
largest_factor = i
return(largest_factor)
def main():
for num1 in range(1,101):
for num2 in range(1,101):
print("gcd", ",(", num1 , ", ", num2 , ")=", iterative_gcd(num1,num2), sep=" ")
main()
|
# coding=utf-8
__author__ = 'F.Marouane'
import sys
from PyQt4.QtGui import *
from PyQt4 import QtCore
from DPricer.presentation.PyuicFiles.Echeancier import Ui_Dialog
class EcheancierDialog(QDialog, Ui_Dialog):
def __init__(self):
super(Ui_Dialog, self).__init__()
QDialog.__init__(self)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
@QtCore.pyqtSignature("")
def on_pushButtonAjouter_clicked(self):
num = self.ui.tableWidget.rowCount()
dd = QTableWidgetItem()
ff = QTableWidgetItem()
self.ui.tableWidget.insertRow(num)
self.ui.tableWidget.setItem(num, 0, dd)
self.ui.tableWidget.setItem(num, 1, ff)
@QtCore.pyqtSignature("")
def on_pushButtonSupprimer_clicked(self):
itm = self.ui.tableWidget.currentItem()
idx = self.ui.tableWidget.indexFromItem(itm)
self.ui.tableWidget.removeRow(idx.row())
if __name__ == '__main__':
# sip.setapi('Qstring',2 )
# YM = YieldManager()
# YM.import_auto()
# form = Calculette()
ap = QApplication(sys.argv)
form = EcheancierDialog()
form.show()
ap.exec_() |
CLASSES = [
'Brick 1x1',
'Brick 1x2',
'Brick 1x3',
'Brick 1x4',
'Brick 2x2',
'Brick 2x2 L',
'Brick 2x2 Slope',
'Brick 2x3',
'Brick 2x4',
'Plate 1x1',
'Plate 1x1 Round',
'Plate 1x1 Slope',
'Plate 1x2',
'Plate 1x2 Grill',
'Plate 1x3',
'Plate 1x4',
'Plate 2x2',
'Plate 2x2 L',
'Plate 2x3',
'Plate 2x4',
]
|
#Find possible odds
n, m = [int(i) for i in input().split()]
if n%2 != 0:
n += 1
for i in range(n,m):
if (i%2)!=0:
print(i)
|
'''
@author: xilh
@since: 20200126
'''
print("== 无序列表修改 ==")
set1 = {'a', 'b', 'c'}
print("set1 : ", set1)
set1.add(1)
print("set1 add : ", set1)
set2 = {'computer', 'calulator'}
set1.update(set2)
print("set1 update: ", set1)
|
#ThreadLocal
"""
在多线程环境下,每个线程都有自己的数据。一个线程使用自己的局部变量比使用全局变量好,因为局部变量只有线程自己能看见,不会影响其他线程,而全局变量的修改必须加锁。
但是局部变量也有问题,就是在函数调用的时候,传递起来很麻烦:
"""
class Student(object):
pass
def process_student(name):
std = Student(name)
# std是局部变量,但是每个函数都要用它,因此必须传进去:
do_task_1(std)
do_task_2(std)
def do_subtask_1(std):
pass
def do_subtask_2(std):
pass
def do_task_1(std):
do_subtask_1(std)
do_subtask_2(std)
def do_task_2(std):
do_subtask_2(std)
do_subtask_2(std)
"""
每个函数一层一层调用都这么传参数那还得了?用全局变量?也不行,因为每个线程处理不同的
Student对象,不能共享。
如果用一个全局dict存放所有的Student对象,然后以thread自身作为key获得
线程对应的Student对象如何?
"""
import threading
global_dict = {}
def std_thread(name):
std = Student(name)
# 把std放到全局变量global_dict中:
global_dict[threading.current_thread()] = std
do_task_1()
do_task_2()
def do_task_1():
# 不传入std,而是根据当前线程查找:
std = global_dict[threading.current_thread()]
...
def do_task_2():
# 任何函数都可以查找出当前线程的std变量:
std = global_dict[threading.current_thread()]
"""
这种方式理论上是可行的,它最大的优点是消除了std对象在每层函数中的传递问题,但是,每个
函数获取std的代码有点丑。
有没有更简单的方式?
ThreadLocal应运而生,不用查找dict,ThreadLocal帮你自动做这件事
"""
# 创建全局ThreadLocal对象:
local_school = threading.local()
def process_student():
# 获取当前线程关联的student:
std = local_school.student
print('Hello, %s (in %s)' % (std, threading.current_thread().name))
def process_thread(name):
# 绑定ThreadLocal的student:
local_school.student = name
process_student()
t1 = threading.Thread(target= process_thread, args=('Alice',), name='Thread-A')
t2 = threading.Thread(target= process_thread, args=('Bob',), name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()
"""
全局变量local_school就是一个ThreadLocal对象,每个Thread对它都可以读写student属性,但互不影响。你可以把local_school看成全局变量,但每个属性如local_school.student都是线程的局部变量,可以任意读写而互不干扰,也不用管理锁的问题,ThreadLocal内部会处理。
可以理解为全局变量local_school是一个dict,不但可以用local_school.student,还可以绑定其他变量,如local_school.teacher等等。
ThreadLocal最常用的地方就是为每个线程绑定一个数据库连接,HTTP请求,用户身份信息等,这样一个线程的所有调用到的处理函数都可以非常方便地访问这些资源。
"""
# 小结
"""
一个ThreadLocal变量虽然是全局变量,但每个线程都只能读写自己线程的独立副本,互
不干扰。ThreadLocal解决了参数在一个线程中各个函数之间互相传递的问题。
"""
|
#!/usr/bin/env python
# Copyright (c) 2020-2021 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
import logging
import os
import sqlite3
import unittest
import uuid
from lydian.apps.rules import RulesApp
from lydian.traffic.core import TrafficRule
DB_NAME = './rules_test.db'
log = logging.getLogger(__name__)
class RulesAppTest(unittest.TestCase):
DUMMY_RULE = {
'ruleid': '%s' % uuid.uuid4(),
'src': '127.0.0.1',
'dst': '127.0.0.1',
'protocol': 'TCP',
'port': 9465,
'connected': False
}
def setUp(self):
if os.path.exists(DB_NAME):
os.remove(DB_NAME)
def _get_new_app(self):
return RulesApp(DB_NAME)
def _get_trule(self, rule):
trule = TrafficRule()
for k, v in rule.items():
setattr(trule, k, v)
trule.fill()
return trule
def test_add(self):
self.rulesApp = self._get_new_app()
trule = self._get_trule(self.DUMMY_RULE)
self.rulesApp.add(trule)
# TEST : Add one rule
self.rulesApp.add(trule)
# TEST : Adding same rule would lead to a warning
# but no assertion / exception to break the run.
self.rulesApp.add(trule)
trules = []
for x in range(5000):
rule = {k: v for k, v in self.DUMMY_RULE.items()}
rule['ruleid'] = '%s' % uuid.uuid4()
trule = self._get_trule(rule)
trules.append(trule)
# TEST : Adding multiple rules.
self.rulesApp.add_rules(trules)
self.rulesApp.close()
# Get New instance of rulesApp()
# If persistence works, all the rules must be brought up fine.
self.rulesApp = self._get_new_app()
self.rulesApp.load_from_db()
# TEST : All the rules must be prsent.
assert self.rulesApp.get(trule.ruleid)
for trule in trules:
assert self.rulesApp.get(trule.ruleid)
# TEST : Diable / Enable of rules should work.
self.rulesApp.disable(trules[0].ruleid)
# disable and enable the rule
self.rulesApp.disable(trules[1].ruleid)
self.rulesApp.enable(trules[1].ruleid)
assert not self.rulesApp.is_enabled(trules[0].ruleid)
assert self.rulesApp.is_enabled(trules[1].ruleid)
def tearDown(self):
os.remove(DB_NAME)
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2017--, Evguenia Kopylova
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
"""
Split FASTA file into two files with WGS and RNA-Seq reads.
"""
import click
from os.path import join, basename
import skbio
def process_file(input_fasta_fp):
"""Get file stats.
"""
min_len = 10000000000000
max_len = 0
tot_len = 0
num_str = 0
num_M = 0
for seq in skbio.io.read(input_fasta_fp, format='fasta'):
num_str += 1
if num_str % 1000000 == 0:
num_M += 1
print("%sM reads processed" % num_M)
seq_id = seq.metadata['id']
str_len = len(str(seq))
if str_len < min_len:
min_len = str_len
if str_len > max_len:
max_len = str_len
tot_len = tot_len + str_len
avg_len = tot_len/num_str
return min_len, max_len, num_str, avg_len
@click.command()
@click.option('--input-fasta-fp', required=True,
type=click.Path(resolve_path=True, readable=True, exists=True,
file_okay=True),
help='FASTA file containing WGS and RNA-Seq reads')
def main(input_fasta_fp):
min_len, max_len, num_str, avg_len = process_file(input_fasta_fp=input_fasta_fp)
print("Total reads: %s" % num_str)
print("Minimum length read: %s" % min_len)
print("Maximum length read: %s" % max_len)
print("Average length read: %s" % avg_len)
if __name__ == "__main__":
main() |
import datetime
from peewee import CharField, DateTimeField, ForeignKeyField
from model_base import *
from model_table import *
class ColumnModel(PostgresqlModel):
class Meta:
db_table = 'columns'
table = ForeignKeyField(TableModel, related_name='columns')
name = CharField()
created_date = DateTimeField(default=datetime.datetime.now)
class AdminColumnModel(AdminModel):
class Meta:
db_table = 'columns'
table = ForeignKeyField(AdminTableModel, related_name='columns')
name = CharField()
created_date = DateTimeField(default=datetime.datetime.now)
|
"""
PointNet分类
"""
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
from torch.autograd import Variable
from pointnet1_utils import PointNetEncoder, feature_transform_regularizer, TN3d, TNkd
class PointNetCls(nn.Module):
"""
pointnet 分类网络结构
"""
def __init__(self, k=40, channel=3):
super(PointNetCls, self).__init__()
self.feat = PointNetEncoder(global_feat=True, feature_transform=True, channel=channel)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.dropout = nn.Dropout(p=0.4)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def forward(self, x):
x, trans, trans_feat = self.feat(x)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.dropout(self.fc2(x))))
x = self.fc3(x)
x = F.log_softmax(x, dim=1)
return x, trans_feat
class PointNetClsLoss(torch.nn.Module):
def __init__(self, mat_diff_loss_scale=0.001):
super(PointNetClsLoss, self).__init__()
self.mat_diff_loss_scale = mat_diff_loss_scale
def forward(self, pred, target, trans_feat):
loss = F.nll_loss(pred, target)
mat_diff_loss = feature_transform_regularizer(trans_feat)
total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale
return total_loss
if __name__ == '__main__':
sim_data = Variable(torch.rand(32,3,2500))
trans = TN3d()
out = trans(sim_data)
print('stn', out.size())
print('loss', feature_transform_regularizer(out))
sim_data_64d = Variable(torch.rand(32, 64, 2500))
trans = TNkd(k=64)
out = trans(sim_data_64d)
print('stn64d', out.size())
print('loss', feature_transform_regularizer(out))
model = PointNetCls(k=10)
out, _ = model(sim_data)
print("class", out.size())
|
import hashlib, binascii, os
from functools import wraps
from flask import session, redirect
def hash_password(password):
"""Hash a password for storing."""
"""https://www.vitoshacademy.com/hashing-passwords-in-python/"""
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'),
salt, 100000)
pwdhash = binascii.hexlify(pwdhash)
return (salt + pwdhash).decode('ascii')
def verify_password(stored_password, provided_password):
"""Verify a stored password against one provided by user"""
salt = stored_password[:64]
stored_password = stored_password[64:]
pwdhash = hashlib.pbkdf2_hmac('sha512',
provided_password.encode('utf-8'),
salt.encode('ascii'),
100000)
pwdhash = binascii.hexlify(pwdhash).decode('ascii')
return pwdhash == stored_password
def password_check(password):
"""
Verify the strength of 'password'
Returns a dict indicating the wrong criteria
A password is considered strong if:
8 characters length or more
1 digit or more
1 symbol or more
1 uppercase letter or more
1 lowercase letter or more
"""
# calculating the length
length_error = len(password) < 8
# searching for digits
digit_error = re.search(r"\d", password) is None
# searching for uppercase
uppercase_error = re.search(r"[A-Z]", password) is None
# searching for lowercase
lowercase_error = re.search(r"[a-z]", password) is None
# searching for symbols
symbol_error = re.search(r"\W", password) is None
# overall result
password_ok = not ( length_error or digit_error or uppercase_error or lowercase_error or symbol_error )
return {
'password_ok' : password_ok,
'length_error' : length_error,
'digit_error' : digit_error,
'uppercase_error' : uppercase_error,
'lowercase_error' : lowercase_error,
'symbol_error' : symbol_error,
}
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
|
#!/usr/bin/python
#coding=utf-8
import psycopg2
import time
from datetime import datetime
conn = psycopg2.connect(database="cyy_insurdb", user="cyyuser",password="kkz9c7H2", host="10.10.1.210")
print "connect success"
def log(file,context): #log记录函数
current_time=datetime.now()
f = open(file,"wb+")
f.write(str(current_time)+str(context)+'\n')
f.close()
return
def compa():
cur = conn.cursor()
result=cur.execute("SELECT count(1) from car_service.insurance_orders where created_dt > current_timestamp - interval '5 min';")
print result
conn.commit()
conn.close()
return
compa()
|
## PARA REMOVER UM ITEM DA LISTA DE ACORDO COM A POSICÇÃO
del lanche [3]
# OU
lanche.pop(3)
# PARA REMOVER SOMENTE O ULTIMO ELEMENTO
lanche.pop()
#PARA REMOVER UM ITEM DA LISTA DE ACORDO COM O NOME DELE
lanche.remove('pizza')
## PARA ORDERNAR VALORES
valores.sort()#ordena em ordem crescente
valores.sort(reverse=True) #ordena em ordem decrescente
# DETERMINAS UM AÇÃO COM UMA CONDIÇÃO
if 'pizza' in lanche:
lanche.remove('pizza') # se tiver pizza na lista de lanche, remova pizza
#CRIAR UMA LISTA A PARTIR DE UM RANGE
valores= list(range(4,11)) # esse comando irá criar uma lista de 4 a 11 (excluindo o ultimo numero)
## SABER O TAMANHO DA LISTA
valores = [5,6,8,4,1,9,2,10]
len(valores)
### LISTAS PARTE 2 ####
#Lista dentro de lista
pessoas= [['Pedro',25], ['Maria',19], ['João',32]]
#PARA JUNTAR LISTAS
teste = list()
teste.append('Thayna')
teste.append(24)
print(teste)
#PARA COLOCAR UMA LISTA DENTRO DA LISTA
teste = list()
teste.append('Thayna')
teste.append(24)
galera = list ()
galera.append(teste)
print(galera)
##
teste = list()
teste.append('Thayna')
teste.append(24)
galera = list ()
galera.append(teste[:])
teste [0] = 'Rubem'
teste [1] = 31
galera.append(teste[:])
print(galera)
##ESTRUTURAS COMPOSTAS DENTRO DE LISTAS E IMPRIMINDO A LISTA TODA OU FRAGMENTADA
galera = [['Joao', 19],['Ana', 33], ['Joaquim', 13], ['Maria', 45]]
print(galera[0])
galera = [['Joao', 19],['Ana', 33], ['Joaquim', 13], ['Maria', 45]]
print(galera[0][1])
galera = [['Joao', 19],['Ana', 33], ['Joaquim', 13], ['Maria', 45]]
print(galera[3])
galera = [['Joao', 19],['Ana', 33], ['Joaquim', 13], ['Maria', 45]]
print(galera[3][1])
## SE QUISER PRINTAR UMA PARTE ESPECIFICA DE TODAS AS LISTAS DE ACORDO COM A POSIÇÃO DE FATIAMENTO
galera = [['Joao', 19],['Ana', 33], ['Joaquim', 13], ['Maria', 45]]
for p in galera:
print(p[0])
galera = [['Joao', 19], ['Ana', 33], ['Joaquim', 13], ['Maria', 45]]
for p in galera:
print(f'{p[0]} tem {p[1]} anos de idade.')
## FORMANDO LISTAS APARTIR DE ENTRADA DE DADOS OBS: O COMANDO [:] FAZ A COPIA DOS DADOS
pessoas = list()
dado = list ()
for c in range (0, 3):
dado.append(str(input('Nome: ')))
dado.append(int(input('Idade: ')))
pessoas.append(dado[:])
dado.clear()
print(pessoas)
## APLICANDO CONDIÇÕES
pessoas = list()
dado = list ()
totalmaior = totalmenor = 0
for c in range (0, 3):
dado.append(str(input('Nome: ')))
dado.append(int(input('Idade: ')))
pessoas.append(dado[:])
dado.clear()
for p in pessoas:
if p[1] >= 21:
print(f'{p[0]} é maior de idade')
totalmaior+=1
else:
print(f'{p[0]} é menor de idade.')
totalmenor += 1
print(f'Temos {totalmaior} maiores e {totalmenor} menores de idade') |
import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Sum,F,FloatField
from work_data.views import JSONQueryView
from stats.helpers import day_of_year,week_of_year,filter_keys
MONTHS = ['January','Febuary','March','April','May','June','July','August','September','October','November','December']
class DailyRecord(JSONQueryView):
accept_arg = True
def make_query(self,ask=None):
data = []
record = self.model.objects.all().dates('timestamp','day')
if 'start' in ask.keys() and 'end' in ask.keys():
start = ask['start']
end = ask['end']
record = record.filter(timestamp__range=(start,end))
for day in sorted(list(record), reverse=True):
records = self.model.objects.filter(timestamp__date=day)
records = filter_keys(ask,records,rangeless=True)
records = records.aggregate(record=Sum(F(self.price)*F(self.quant), output_field=FloatField()))
if not records['record']:
continue
data.append({
'month':MONTHS[day.month-1],
'year':day.year,
'day':day_of_year(day.month,day.day),
'date':day.isoformat(),
'record':records['record'],
})
return data
class WeeklyRecord(JSONQueryView):
accept_arg = True
def make_query(self,ask=None):
data = []
today = datetime.datetime.today()
this_week = week_of_year(today.month,today.day)
for week in range(this_week,0,-1):
records = self.model.objects.filter(timestamp__week=week)
records = filter_keys(ask,records)
records = records.aggregate(record=Sum(F(self.price)*F(self.quant), output_field=FloatField()))
if not records['record']:
pass
else:
data.append({
'week':week,
'year':today.year,
'month':MONTHS[today.month-1],
'record':records['record'],
})
today = today-datetime.timedelta(weeks=1)
# raise Exception('break')
return data
class MonthlyRecord(JSONQueryView):
accept_arg = True
def make_query(self,ask=None):
data = []
record = self.model.objects.all().dates('timestamp','month')
if 'start' in ask.keys() and 'end' in ask.keys():
start = ask['start']
end = ask['end']
record = record.filter(timestamp__range=(start,end))
for month in sorted(list(record), reverse=True):
records = self.model.objects.filter(timestamp__year=month.year,timestamp__month=month.month)
records = filter_keys(ask,records,rangeless=True)
records = records.aggregate(record=Sum(F(self.price)*F(self.quant), output_field=FloatField()))
if not records['record']:
continue
data.append({
'year':month.year,
'month':month.month,
'mname':MONTHS[month.month-1],
'record':records['record'],
})
return data
class YearlyRecord(JSONQueryView):
accept_arg = True
def make_query(self,ask=None):
data = []
record = self.model.objects.all().dates('timestamp','year')
if 'start' in ask.keys() and 'end' in ask.keys():
start = ask['start']
end = ask['end']
record = record.filter(timestamp__range=(start,end))
for year in sorted(list(record), reverse=True):
records = self.model.objects.filter(timestamp__year=year.year)
records = filter_keys(ask,records,rangeless=True)
records = records.aggregate(record=Sum(F(self.price)*F(self.quant), output_field=FloatField()))
if not records['record']:
continue
data.append({
'year':year.year,
'record':records['record'],
})
return data
class DailySaleRecord(DailyRecord):
price = 'sp'
quant = 'quantity'
class DailyPurchaseRecord(DailyRecord):
price = 'cp'
quant = 'quantity'
class WeeklySaleRecord(WeeklyRecord):
price = 'sp'
quant = 'quantity'
class WeeklyPurchaseRecord(WeeklyRecord):
price = 'cp'
quant = 'quantity'
class MonthlySaleRecord(MonthlyRecord):
price = 'sp'
quant = 'quantity'
class MonthlyPurchaseRecord(MonthlyRecord):
price = 'cp'
quant = 'quantity'
class YearlySaleRecord(YearlyRecord):
price = 'sp'
quant = 'quantity'
class YearlyPurchaseRecord(YearlyRecord):
price = 'cp'
quant = 'quantity'
|
from peacemakr.exception.peacemakr import PeacemakrError
class ServerError(PeacemakrError):
pass
|
from ..users.models import Users
from django.shortcuts import render
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework import status
from django.conf import settings
from django.contrib import auth
from rest_framework.views import APIView
from rest_framework_simplejwt.tokens import RefreshToken, OutstandingToken, BlacklistedToken
from drf_yasg.utils import swagger_auto_schema
from rest_framework.permissions import IsAuthenticated
from core.response import ResponseInfo
from .serializers import RefreshTokenSerializer, UserSerializer, LoginSerializer, LogoutSerializer, OTPSerializer
from .schemas import LoginPostSchema, RegisterPostSchema, RegisterSchema, LoginSchema, UsersSchema, VerifyOTPPostSchema
from core.utils import Util
from core.hashing import Hash
from .emails import RegistrationEmail
# Create your views here.
class RegisterAPIView(GenericAPIView):
def __init__(self, **kwargs):
self.response_format = ResponseInfo().response
super(RegisterAPIView, self).__init__(**kwargs)
serializer_class = RegisterPostSchema
@swagger_auto_schema(tags=["Authorization"])
def post(self, request):
try:
user_data = request.data
otp = Util.random_number()
user_data.update({"otp": Hash.bcrypt({"key": otp})})
user_data.update({"plain_otp": otp})
# return Response(RegistrationEmail.send(user_data), status=status.HTTP_200_OK)
serializer = UserSerializer(data=user_data)
if serializer.is_valid():
serializer.save()
# RegistrationEmail.send(user_data)
data = request.data
phone = data.get('phone', '')
password = data.get('password', '')
user = auth.authenticate(username=phone, password=password)
if user:
refresh = RefreshToken.for_user(user)
serializer = RegisterSchema(user)
data = {'user': serializer.data, 'errors': {}, 'token': str(
refresh.access_token), 'refresh': str(refresh), 'otp': otp}
self.response_format['status_code'] = 200
self.response_format["data"] = data
self.response_format["status"] = True
return Response(self.response_format, status=status.HTTP_201_CREATED)
else:
self.response_format['status_code'] = 106
data = {'user': serializer.data, 'errors': {}, 'token': '', 'refresh': '', 'otp': ''}
self.response_format["data"] = data
self.response_format["status"] = True
return Response(self.response_format, status=status.HTTP_201_CREATED)
else:
self.response_format['status_code'] = 102
data = {'user': {}, 'errors': serializer.errors,
'token': '', 'refresh': '', 'otp': ''}
self.response_format["data"] = data
self.response_format["status"] = False
return Response(self.response_format, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
self.response_format['status_code'] = 101
self.response_format['status'] = False
self.response_format['message'] = str(e)
return Response(self.response_format, status=status.HTTP_200_OK)
class LoginAPIView(GenericAPIView):
def __init__(self, **kwargs):
self.response_format = ResponseInfo().response
super(LoginAPIView, self).__init__(**kwargs)
serializer_class = LoginPostSchema
@swagger_auto_schema(tags=["Authorization"])
def post(self, request):
try:
data = request.data
phone = data.get('phone', '')
password = data.get('password', '')
user = auth.authenticate(username=phone, password=password)
if user:
serializer = LoginSchema(user)
if not user.is_active:
data = {'user': {}, 'token': '', 'refresh': ''}
self.response_format['status_code'] = 107
self.response_format["data"] = data
self.response_format["status"] = True
self.response_format["message"] = 'Account Temparary suspended, contact admin'
return Response(self.response_format, status=status.HTTP_200_OK)
else:
refresh = RefreshToken.for_user(user)
data = {'user': serializer.data, 'token': str(
refresh.access_token), 'refresh': str(refresh)}
self.response_format['status_code'] = 200
self.response_format["data"] = data
self.response_format["status"] = True
return Response(self.response_format, status=status.HTTP_200_OK)
else:
self.response_format['status_code'] = 106
self.response_format["data"] = {'detail': 'Invalid credentials'}
self.response_format["status"] = True
return Response(self.response_format, status=status.HTTP_401_UNAUTHORIZED)
except Exception as e:
self.response_format['status_code'] = 101
self.response_format['status'] = False
self.response_format['message'] = str(e)
return Response(self.response_format, status=status.HTTP_200_OK)
class VerifyOTP(GenericAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = VerifyOTPPostSchema
def __init__(self, **kwargs):
self.response_format = ResponseInfo().response
super(VerifyOTP, self).__init__(**kwargs)
@swagger_auto_schema(tags=["Authorization"])
def post(self, request):
try:
otp = request.data.get('otp', '')
if(otp):
user_data = Users.objects.get(id=request.user.id)
if (user_data.is_verified == 0):
if(Hash.verify(user_data.otp, int(otp))):
user_data.is_verified = 1
user_data.otp = ''
user_data.save()
serializer = UsersSchema(user_data)
data = {'user' : serializer.data, 'errors': {}}
self.response_format['data'] = data
self.response_format['status_code'] = 200
return Response(self.response_format, status=status.HTTP_200_OK)
else:
data = {'user': {}, 'errors': {'message': 'Invalid OTP'}}
self.response_format['status_code'] = 104
self.response_format['data'] = data
return Response(self.response_format, status=status.HTTP_200_OK)
else:
serializer = UsersSchema(user_data)
data = {'user': serializer.data, 'errors': {}}
self.response_format['message'] = 'Already Verified'
self.response_format['status_code'] = 105
self.response_format['data'] = data
return Response(self.response_format, status=status.HTTP_200_OK)
else:
data = {'user': {}, 'errors': {'message': 'Invalid OTP'}}
self.response_format['status_code'] = 103
self.response_format['data'] = data
return Response(self.response_format, status=status.HTTP_200_OK)
except Exception as e:
self.response_format['status_code'] = 101
self.response_format['status'] = False
self.response_format['message'] = str(e)
return Response(self.response_format, status=status.HTTP_200_OK)
class LogoutAPIView(GenericAPIView):
serializer_class = LogoutSerializer
permission_classes = (IsAuthenticated,)
def __init__(self, **kwargs):
self.response_format = ResponseInfo().response
super(LogoutAPIView, self).__init__(**kwargs)
@swagger_auto_schema(tags=["Authorization"])
def post(self, request):
try:
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
self.response_format['status'] = True
self.response_format['status_code'] = 200
return Response(self.response_format, status=status.HTTP_200_OK)
except Exception as e:
self.response_format['status'] = False
self.response_format['status_code'] = 101
self.response_format['message'] = str(e)
return Response(self.response_format, status=status.HTTP_200_OK)
class LogoutAllView(GenericAPIView):
pass
# permission_classes = (IsAuthenticated,)
# def __init__(self, **kwargs):
# self.response_format = ResponseInfo().response
# super(LogoutAllView, self).__init__(**kwargs)
# @swagger_auto_schema(tags=["Authorization"])
# def post(self, request):
# pass
# tokens = OutstandingToken.objects.filter(user_id=request.user.id)
# for token in tokens:
# t, _ = BlacklistedToken.objects.get_or_create(token=token)
# return Response(status=status.HTTP_205_RESET_CONTENT)
class RefreshTokenView(GenericAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = RefreshTokenSerializer
def __init__(self, **kwargs):
self.response_format = ResponseInfo().response
super(RefreshTokenView, self).__init__(**kwargs)
@swagger_auto_schema(tags=["Authorization"])
def post(self, request):
try:
user = Users.objects.get(id=request.user.id)
refresh = RefreshToken.for_user(user)
data = {'token': str(
refresh.access_token), 'refresh': str(refresh)}
self.response_format['status_code'] = 200
self.response_format["data"] = data
self.response_format["status"] = True
return Response(self.response_format, status=status.HTTP_201_CREATED)
except Exception as e:
self.response_format['status_code'] = 101
self.response_format['status'] = False
self.response_format['message'] = str(e)
return Response(self.response_format, status=status.HTTP_200_OK)
|
#!/usr/bin/env python
import json
import os
from os.path import join
import argparse
def write_doc_and_decl(op, layout, type_sig, type_map, operator, out_as_operand, compound_assignment):
signature = list(enumerate(zip(layout, type_sig)))
out_cpp_type = type_map[type_sig[0]]['cpp']
doc = "/** %s\n" % (op['doc'])
if operator is None:
func_name = "%s" % op['opcode'][3:].lower()
else:
func_name = "operator%s" % operator
if out_as_operand:
decl = "void %s(BhArray<%s> &out" % (func_name, out_cpp_type)
doc += "* @param out Output array.\n"
if len(signature) > 1:
decl += ", "
else:
if compound_assignment:
decl = "void " # compound assignment such as "+=" returns nothing
else:
decl = "BhArray<%s> " % out_cpp_type
decl += "%s(" % func_name
for i, (symbol, t) in signature[1:]:
if symbol == "A":
if i == 1 and compound_assignment:
decl += "BhArray<%s> in%d" % (type_map[t]['cpp'], i)
else:
decl += "const BhArray<%s> &in%d" % (type_map[t]['cpp'], i)
doc += "* @param in%d Array input.\n" % i
else:
decl += "%s in%d" % (type_map[t]['cpp'], i)
if i == 2 and ("REDUCE" in op['opcode'] or "ACCUMULATE" in op['opcode']):
doc += "* @param in%d The axis to run over.\n" % i
else:
doc += "* @param in%d Scalar input.\n" % i
if i < len(layout) - 1:
decl += ", "
decl += ")"
if not out_as_operand:
doc += "* @return Output array.\n"
doc += "*/\n"
return (doc, decl)
def get_array_inputs(layout, ignore_ops=[]):
ret = []
for i, symbol in enumerate(layout):
if i not in ignore_ops:
if symbol == "A":
if i == 0:
ret.append("out")
else:
ret.append("in%d" % i)
return ret
def write_broadcasted_shape(array_inputs):
ret = "const Shape shape = broadcasted_shape<%d>({" % (len(array_inputs))
for i, op in enumerate(array_inputs):
ret += "%s.shape()" % op
if i < len(array_inputs) - 1:
ret += ", "
ret += "});"
return ret
def write_broadcast_and_enqueue(op, layout, array_inputs):
ret = ""
for op_var in array_inputs:
ret += "\tauto _{0} = broadcast_to({0}, shape);\n".format(op_var)
ret += "\tRuntime::instance().enqueue(%s, out" % op['opcode']
for i in range(len(layout) - 1):
op_var = "in%d" % (i + 1)
if op_var in array_inputs:
op_var = "_%s" % op_var
ret += ", %s" % op_var
ret += ");\n"
return ret
def main(args):
prefix = os.path.abspath(os.path.dirname(__file__))
# Let's read the opcode and type files
with open(join(prefix, '..', '..', 'core', 'codegen', 'opcodes.json')) as f:
opcodes = json.loads(f.read())
with open(join(prefix, '..', '..', 'core', 'codegen', 'types.json')) as f:
types = json.loads(f.read())
type_map = {}
for t in types[:-1]:
type_map[t['enum']] = {
'cpp': t['cpp'],
'bhc': t['bhc'],
'name': t['union'],
'bhc_ary': "bhc_ndarray_%s_p" % t['union']
}
# Let's generate the header and implementation of all array operations
head = ""
impl = ""
for op in opcodes:
if op['opcode'] in ["BH_RANDOM"]:
continue
# Generate functions that takes no operands
if len(op['types']) == 0:
continue
ignore_ops = [0]
if op['opcode'] == "BH_GATHER":
ignore_ops.append(1)
# Generate a function for each type signature
head += "#ifndef DOXYGEN_SHOULD_SKIP_THIS\n\n"
for type_sig in op['types']:
for layout in op['layout']:
array_inputs = get_array_inputs(layout, ignore_ops)
(doc, decl) = write_doc_and_decl(op, layout, type_sig, type_map, None, True, False)
head += "%s%s;\n\n" % (doc, decl)
impl += decl
impl += " {\n"
if op['opcode'] == "BH_IDENTITY" and len(array_inputs) == 1 \
and type_map[type_sig[0]]['cpp'] == type_map[type_sig[1]]['cpp']:
impl += "\tif (is_same_array(out, in1)) { out.reset(in1); return; }\n"
if len(array_inputs) > 0:
impl += "\t%s\n" % write_broadcasted_shape(array_inputs)
else:
impl += "\tconst Shape &shape = out.shape();\n"
impl += "\tShape out_shape = shape;\n"
if "REDUCE" in op['opcode']:
impl += "\tif (out_shape.size() == 1) { out_shape = {1}; } else " \
"{ out_shape.erase(out_shape.begin() + in2); }\n"
impl += "\tif (!out.base()) { out.reset(BhArray<%s>{out_shape}); }\n" % type_map[type_sig[0]]['cpp']
if op['opcode'] not in ['BH_SCATTER', 'BH_COND_SCATTER']:
impl += "\tif(out_shape != out.shape()) { " \
"throw std::runtime_error(\"Output shape miss match\"); }\n"
for op_var in get_array_inputs(layout):
impl += "\tif(!%s.base()) { throw std::runtime_error(\"Operands not initiated\"); }\n" % op_var
if len(array_inputs) > 1:
for op_var in array_inputs:
impl += '\tif(out.base() == {0}.base() && ' \
'!is_same_array(out, {0}) && may_share_memory(out, {0})) '.format(op_var)
impl += '{ throw std::runtime_error("When output and input uses the same base array, ' \
'they must be identical"); }\n'
impl += write_broadcast_and_enqueue(op, layout, array_inputs)
impl += "}\n"
head += "#endif /* DOXYGEN_SHOULD_SKIP_THIS */\n"
# Generate a function that returns its output for each type signature
for type_sig in op['types']:
if len(type_sig) > 1 and op['opcode'] != "BH_IDENTITY":
for layout in op['layout']:
array_inputs = get_array_inputs(layout, ignore_ops)
if len(array_inputs) > 0:
(doc, decl) = write_doc_and_decl(op, layout, type_sig, type_map, None, False, False)
head += "%s%s;\n\n" % (doc, decl)
impl += decl
impl += " {\n"
impl += "\tBhArray<%s> out;\n" % type_map[type_sig[0]]['cpp']
impl += "\t%s(out" % op['opcode'][3:].lower()
for i in range(1, len(type_sig)):
impl += ", in%s" % i
impl += ");\n"
impl += "\treturn out;\n"
impl += "}\n"
# Generate an operator overload for each type signature
operator = {"BH_ADD": "+", "BH_SUBTRACT": "-", "BH_MULTIPLY": "*", "BH_DIVIDE": "/", "BH_MOD": "%",
"BH_BITWISE_AND": "&", "BH_BITWISE_OR": "|", "BH_BITWISE_XOR": "^"}
if op['opcode'] in operator:
head += "#ifndef DOXYGEN_SHOULD_SKIP_THIS\n\n"
for type_sig in op['types']:
for layout in op['layout']:
array_inputs = get_array_inputs(layout, ignore_ops)
if len(array_inputs) > 0:
(doc, decl) = write_doc_and_decl(op, layout, type_sig, type_map, operator[op['opcode']], False,
False)
head += "%s%s;\n\n" % (doc, decl)
impl += decl
impl += " {\n"
impl += "\tBhArray<%s> out;\n" % type_map[type_sig[0]]['cpp']
impl += "\t%s(out" % op['opcode'][3:].lower()
for i in range(1, len(type_sig)):
impl += ", in%s" % i
impl += ");\n"
impl += "\treturn out;\n"
impl += "}\n"
head += "#endif /* DOXYGEN_SHOULD_SKIP_THIS */\n"
# Generate += operator overload for each type signature
if op['opcode'] in operator:
head += "#ifndef DOXYGEN_SHOULD_SKIP_THIS\n\n"
for type_sig in op['types']:
for layout in op['layout']:
if layout[1] == "A":
(doc, decl) = write_doc_and_decl(op, layout, type_sig, type_map, "%s=" % operator[op['opcode']],
False, True)
head += "%s%s;\n\n" % (doc, decl)
impl += decl
impl += " {\n"
impl += "\t%s(in1" % op['opcode'][3:].lower()
for i in range(1, len(type_sig)):
impl += ", in%s" % i
impl += ");\n"
impl += "}\n"
head += "#endif /* DOXYGEN_SHOULD_SKIP_THIS */\n"
impl += "\n\n"
head += "\n\n"
# Let's handle random
doc = """
/*Fill out with random data.
The returned result is a deterministic function of the key and counter,
i.e. a unique (seed, indexes) tuple will always produce the same result.
The result is highly sensitive to small changes in the inputs, so that the sequence
of values produced by simply incrementing the counter (or key) is effectively
indistinguishable from a sequence of samples of a uniformly distributed random variable.
random123(out, seed, key) where: 'out' is the array to fill with random data
'seed' is the seed of a random sequence
'key' is the index in the random sequence */
"""
impl += doc
head += doc
decl = "void random123(BhArray<uint64_t> &out, uint64_t seed, uint64_t key)"
head += "%s;\n" % decl
impl += "%s\n" % decl
impl += """
{
\tRuntime::instance().enqueueRandom(out, seed, key);
}
"""
# Let's add header and footer
head = """/* Bohrium CXX Bridge: array operation functions. Auto generated! */
#pragma once
#include <cstdint>
#include <complex>
namespace bhxx {
template<typename T> class BhArray;
%s
} // namespace bhxx
""" % head
impl = """/* Bohrium C Bridge: array operation functions. Auto generated! */
#include <bhxx/Runtime.hpp>
#include <bhxx/array_operations.hpp>
#include <bhxx/util.hpp>
namespace bhxx {
%s
} // namespace bhxx
""" % impl
if not os.path.exists(args.inc_output):
os.makedirs(args.inc_output)
if not os.path.exists(args.src_output):
os.makedirs(args.src_output)
# Finally, let's write the files
with open(join(args.inc_output, 'array_operations.hpp'), 'w') as f:
f.write(head)
with open(join(args.src_output, 'array_operations.cpp'), 'w') as f:
f.write(impl)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Generates the array operation source files for the Bohrium CXX bridge.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'inc_output',
help='Path to the header output directory.'
)
parser.add_argument(
'src_output',
help='Path to the source output directory.'
)
args = parser.parse_args()
main(args)
|
#! -*-encoding=utf-8-*-
import pandas as pd
import hypertools as hyp
from hypertools.tools import cluster
data = pd.read_csv('F:\\mushrooms.csv')
#print data.head()
'''
Now let’s plot the high-dimensional data in a low dimensional space by passing it to HyperTools.
To handle text columns, HyperTools will first convert each text column into
a series of binary ‘dummy’ variables before performing the dimensionality reduction.
For example, if the ‘cap size’ column contained ‘big’ and ‘small’ labels,
this single column would be turned into two binary columns:
one for ‘big’ and one for ‘small’, where 1s represents the presence of that feature and 0s represents the absence
(for more on this, see the documentation for the get_dummies function in pandas).
'''
hyp.plot(data,'o') #高维度数据在低纬度空间中的展示图(使用HyperTools)
#由以上所画图可知,相似的特征出现在近邻的群中,并且很明显有几个不一样的特战群。
#即:所有的特征群不一定是完全相等,之后可以根据数据中我们喜欢的特征进行颜色标记。
#hyp.plot(data,'o',group=class_labels.legend=lisk(set(class_labels)))
#以上需要预先分类,才能使用
hyp.plot(data,'o',n_clusters=50)#根据分类数进行染色展示
#为了得到登录的群标签,聚类工具可能会通过hyp.tools.cluster被直接调用,并得到类别结果再传递给plot
#[注意:对于母包和子包,如果导入母包没有用*,则无法识别子包!!!]
cluster_labels = cluster(data,n_clusters=50)
hyp.plot(data,'o',group=cluster_labels)
#[注意:HYPERTOOLS默认使用PCA进行降维,所以如果想用其他的算法进行降维,可以如下]
from sklearn.manifold import TSNE
from hypertools.tools import df2mat
TSNE_model = TSNE(n_components=3)
reduced_data_t = TSNE_model.fit_transform(df2mat(data))
hyp.plot(reduced_data_t,'o') |
from Class_object.Concluding_task_1 import Student
from Class_object.Concluding_task_2 import Course
user_num=int(input("Enter a number of the course: "))
user_name_course=input("Enter a name of the course: ")
user_num_subject=int(input("Enter the number of subjects: "))
user_capacity=int(input("Enter a max number of students: "))
user_student_list=[]
user_subject_dict={}
for i in range(user_num_subject):
user_teacher=input("Enter a name of teacher: ")
user_subject=input("Enter a name of subject:")
user_subject_dict.update({user_teacher:user_subject})
course_1 = Course(user_num, user_name_course,user_capacity)
user_id=int(input("Enter the id of student.If '0' - End of input: "))
while user_id != 0:
if not course_1.check_space():
print("There is no place")
break
user_student_name=input("Enter the name of student: ")
user_grade_dict = {}
for i in range(int(input("Enter the number of subjects: "))):
user_student_subject = input("Enter the name of subject: ")
user_grade=int(input("Enter the grade: "))
user_grade_dict.update({user_student_subject:user_grade})
student_1=Student(user_id,user_grade_dict,user_student_name)
course_1.add_student(student_1)
print(course_1)
user_id = int(input("Enter the id of student.If '0' - End of input: "))
user_factor_subject=input("Enter the name of subject you gonna do factor: ")
user_num_factor=float(input("Enter the percent you want: "))
course_1.add_factor(user_factor_subject, user_num_factor)
print(course_1)
min=100
for studen in course_1.student_list:
avarage=studen.avarage()
if avarage<min:
min=avarage
for stud in course_1.student_list:
if stud.avarage()==min:
course_1.del_student(stud)
print(course_1)
|
from django.shortcuts import render
from annoying.decorators import render_to
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_protect
from .poll_api import Api
# Create your views here.
@csrf_protect
def api(request, method_name):
poll_api = Api()
poll_api.method = method_name
poll_api.team_name = request.POST.get('team_name', None)
poll_api.flag = request.POST.get('flag', None)
data = poll_api.select_method()
return JsonResponse(data) |
import os
import sys
import struct
import zlib
import time
"""
#define IMAGE_MAGIC 0xAEAE
struct image_header
{
uint16_t magic;
uint8_t encrypt_algo;
uint8_t resv[1];
uint8_t version[VERSION_SZ];
uint32_t crc32;
uint32_t size;
//uint8_t padding[4060];
};
"""
def insert(original, new, pos):
'''Inserts new inside original at pos.'''
return original[:pos] + new + original[pos:]
def pack_image_lzma(filename, version):
try:
import pylzma
except ImportError:
print 'could import pylzma module'
exit(0)
fname = os.path.splitext(filename)
print 'firmware:', filename
f = file(filename, 'rb')
data = f.read()
f.close()
magic = 0xAEAE
encrypt_algo = 1
rescv = 0
#ISOTIMEFORMAT='%Y-%m-%d %X'
#version = time.strftime( ISOTIMEFORMAT, time.localtime() )
crc32 = zlib.crc32(data, 0) & 0xFFFFFFFF
crc32 ^= 0xFFFFFFFF
size = len(data)
lzma_data = pylzma.compress(data, dictionary=12, eos=0)
uncompressed_size = struct.pack("LL", size&0xffffffff, size>>32)
lzma_data = insert(lzma_data, uncompressed_size, 5)
f = file(fname[0] + '.bin.lzma', "wb")
f.write(lzma_data)
f.close()
lzma_crc32 = zlib.crc32(lzma_data, 0) & 0xFFFFFFFF
lzma_crc32 ^= 0xFFFFFFFF
lzma_size = len(lzma_data)
print ' size:', lzma_size
print ' version:', version
print ' crc32: %08x' % lzma_crc32
print ' uncompress size:', size
print ' uncompress crc32: %08x' % crc32
header = struct.pack("<HBB16sLLLL", magic, encrypt_algo, rescv, version, crc32, size, lzma_crc32, lzma_size)
f = file(fname[0] + '_otapackage_lzma.bin', "wb")
f.write(header)
f.write(lzma_data)
f.close()
def pack_image(filename, version):
fname = os.path.splitext(filename)
print 'firmware:', filename
f = file(filename, 'rb')
data = f.read()
f.close()
magic = 0xAEAE
encrypt_algo = 0
rescv = 0
#ISOTIMEFORMAT='%Y-%m-%d %X'
#version = time.strftime( ISOTIMEFORMAT, time.localtime() )
crc32 = zlib.crc32(data, 0) & 0xFFFFFFFF
crc32 ^= 0xFFFFFFFF
size = len(data)
print ' size:', size
print ' version:', version
print ' crc32: %08x' % crc32
header = struct.pack("<HBB24sLL", magic, encrypt_algo, rescv, version, crc32, size)
f = file(fname[0] + '_otapackage.bin', "wb")
f.write(header)
f.write(data)
f.close()
if __name__ == "__main__":
if len(sys.argv) < 3:
print sys.argv[0], "filename"
exit(0)
if len(sys.argv) == 3:
pack_image(sys.argv[1], sys.argv[2])
elif sys.argv[3] == 'lzma':
pack_image_lzma(sys.argv[1], sys.argv[2])
elif sys.argv[3] == 'LZMA':
pack_image_lzma(sys.argv[1], sys.argv[2]) |
import os
import cv2
import json
import sys
import tensorflow as tf
# Wagon segmentation with the help of Tensorflow
class App:
def __init__(self):
# Config
self.debug = False
# Global variables
self.tf_files_dir = './tf_files'
self.frames_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "output", "frames"))
self.crop = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "output", "crop"))
self.output_dir_stitched = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "output", "stiched"))
def run(self):
self.print_debug('Loading tensorflow')
self.create_output_dirs()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile(self.tf_files_dir + '/retrained_labels.txt')]
# Unpersists graph from file
with tf.gfile.FastGFile(self.tf_files_dir + '/retrained_graph.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
count = 1
data = self.read_input()
previous_buffer_score = [0, 0]
previous_left_edge = [0, 0]
previous_buffer_left_edge = 0
buffers_left_edges = []
while (data):
frame_nr = data["frameNr"]
frame_path = os.path.join(self.frames_dir, '%05d.png' % frame_nr)
leftedge = data["leftEdge"]
if(leftedge != previous_left_edge[-1]):
#Read frame
frame = cv2.imread(frame_path, cv2.IMREAD_COLOR) # trainImage
self.print_debug('Processing frame %d' % frame_nr)
#Create crop
p1 = (290, 200)
p2 = (390, 440)
crop = frame[p1[0]:p2[0], p1[1]:p2[1]]
outputpath = "%s/%s_%05d.jpg" % (self.crop, frame_nr, count)
cv2.imwrite(outputpath, crop)
# Read in the image_data
image_data = tf.gfile.FastGFile(outputpath, 'rb').read()
#Feed the image to tensorflow
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
#Fill the bufferscore with the prediction of the crop being a buffer
buffer_score = predictions[0][0]
threshold = 0.90
if (buffer_score < previous_buffer_score[1] and previous_buffer_score[0] < previous_buffer_score[1] and
previous_buffer_score[1] > threshold):
buffer_left_edge = self.calculate_buffer_left_edge(p1, p2, previous_left_edge)
self.write_output(previous_buffer_left_edge, buffer_left_edge)
buffers_left_edges.append(buffer_left_edge)
previous_buffer_left_edge = buffer_left_edge
#Update the buffer scores and left edges
self.update_buffer_scores(buffer_score, leftedge, previous_buffer_score, previous_left_edge)
if (self.debug):
self.write_classified_images(count, frame_nr, label_lines, predictions, crop)
count += 1
data = self.read_input()
#self.draw_separation_lines(buffers_left_edges)
self.print_debug('Finished writing frames to')
def update_buffer_scores(self, buffer_score, leftedge, previous_buffer_score, previous_left_edge):
del previous_buffer_score[0]
previous_buffer_score.append(buffer_score)
del previous_left_edge[0]
previous_left_edge.append(leftedge)
def calculate_buffer_left_edge(self, p1, p2, previous_left_edge):
return int(previous_left_edge[1] + round(p1[0] + (p2[0] - p1[0]) / 2))
def write_classified_images(self, count, frame_nr, label_lines, predictions, crop):
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
outputfile = "%s/%s_%05d_%03f.jpg" % (
os.path.join(self.crop, label_lines[top_k[0]]), frame_nr, count, predictions[0][top_k[0]])
cv2.imwrite(outputfile, crop)
def draw_separation_lines(self, buffers_left_edges):
stitched_image = cv2.imread(self.output_dir_stitched + '\stitched.jpg', cv2.IMREAD_COLOR) # trainImage
for buffer_left_edge in buffers_left_edges:
if(buffer_left_edge > 0):
cv2.line(stitched_image, (buffer_left_edge, 0), (buffer_left_edge, stitched_image.shape[1]), (255, 0, 0), 5)
else:
cv2.line(stitched_image, ( stitched_image.shape[0] + buffer_left_edge, 0), ( stitched_image.shape[0] + buffer_left_edge, stitched_image.shape[1]), (255, 0, 0), 5)
cv2.imwrite(self.output_dir_stitched + '/stitched_wagons.jpg', stitched_image)
def create_output_dirs(self):
if not os.path.exists(self.crop):
os.makedirs(self.crop)
if(self.debug):
if not os.path.exists(self.crop + '/buffer'):
os.makedirs(self.crop + '/buffer')
if not os.path.exists(self.crop + '/non buffer'):
os.makedirs(self.crop + '/non buffer')
def read_input(self):
line = sys.stdin.readline()
if not line:
return
return json.loads(line)
def write_output(self, wagonStart, wagonEnd):
outData = {"wagonStart": wagonStart, "wagonEnd": wagonEnd}
outLine = json.dumps(outData)
print(outLine)
def print_debug(self, message):
if (self.debug):
sys.stderr.write(message)
def main():
App().run()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
from task2.data_util import *
from task2.config import Config
from task2.NER_model import NERModel
from task2.utils import *
if __name__ == "__main__":
config = Config()
train_dataset = Dataset(config=config, name="train")
validate_dataset = Dataset(config=config, name="valid")
test_dataset = Dataset(config=config, name="test")
'''while True:
dataset.cur_idx = 0
has_one_epoch, batch_data, batch_label = dataset.get_one_batch()
sentences_length, padded_sentences_word_lv, padded_word_lengths, padded_sentences_char_lv, padded_label = \
dataset.batch_padding(batch_data, batch_label)'''
model = NERModel(config, train_dataset, validate_dataset, test_dataset)
# model.train(5)
confusion_mat = model.test()
print(normalize_confusion_mat(confusion_mat))
# model.predict_sentence()
# print(dataset.get_one_batch())
'''a = np.array([[1, 2, 3], [4, 5, 6]])
a[1] = np.append(a[1], 4)
print(a)'''
'''
test.py:
total accuracy: 0.957447
[[0.9915 0.0009 0.0015 0.0008 0.0002 0.0012 0.0006 0.0017 0.0016]
[0.0927 0.6787 0.0156 0.0343 0.0006 0.127 0.0006 0.0481 0.0024]
[0.1198 0.0228 0.7102 0.0024 0.0287 0.0108 0.0814 0.0048 0.0192]
[0.0465 0.0379 0. 0.8588 0.0037 0.0403 0.0031 0.0079 0.0018]
[0.0188 0.0017 0.0214 0.0103 0.9247 0. 0.0205 0. 0.0026]
[0.0183 0.0348 0.003 0.0059 0.0012 0.9191 0.0041 0.013 0.0006]
[0.0192 0. 0.0538 0. 0.0154 0.0308 0.8808 0. 0. ]
[0.0886 0.0408 0. 0.0141 0. 0.0675 0.0042 0.7764 0.0084]
[0.1689 0.0046 0.0411 0. 0.0091 0.0046 0.0639 0.0365 0.6712]]
train:
total accuracy: 0.995408
[[0.9992 0. 0.0003 0. 0. 0.0001 0.0001 0. 0.0001]
[0.0245 0.9362 0.0021 0.0059 0. 0.0229 0.0009 0.0071 0.0003]
[0.0076 0.0022 0.9754 0.0003 0.0013 0.0016 0.0113 0. 0.0003]
[0.0064 0.0071 0.0003 0.9758 0.0008 0.007 0.0005 0.002 0.0003]
[0.0015 0. 0.0011 0.0002 0.9951 0. 0.0015 0. 0.0004]
[0.0018 0.0018 0.0001 0.0004 0. 0.994 0.001 0.0008 0. ]
[0.0026 0. 0.0017 0. 0. 0.0009 0.9948 0. 0. ]
[0.0122 0.002 0.0003 0.0009 0. 0.0052 0.0006 0.9721 0.0067]
[0.0147 0. 0.0009 0. 0.0009 0. 0.0078 0.0113 0.9645]]
'''
|
import pprint as pp
import json, math, re, csv, itertools, os
from datetime import datetime
import numpy as np
# COUNTING UNIQUE TUEKERS
exit()
path_to_batch_results = "./batch_results"
batch_files = os.listdir(path_to_batch_results)
dict_turkers = {}
for bf_name in batch_files:
print bf_name
with open(path_to_batch_results+"/"+bf_name, mode='r') as bf:
reader = csv.DictReader(bf)
for row in reader:
if row['Title'] not in dict_turkers: dict_turkers[row['Title']]={}
if row['WorkerId'] not in dict_turkers[row['Title']]:
dict_turkers[row['Title']][row['WorkerId']]=[]
dict_turkers[row['Title']][row['WorkerId']].append(row['Answer.surveycode'])
# pp.pprint(dict_turkers)
tasks_per_turker = [5*len(hitlist) for tid, hitlist in dict_turkers['Word labeling tasks'].iteritems()]
print tasks_per_turker
print len(tasks_per_turker)
print np.mean(tasks_per_turker), np.std(tasks_per_turker), np.max(tasks_per_turker), np.min(tasks_per_turker)
for k,d in dict_turkers.iteritems():
print len(d.keys())
# tasks_per_turker = [len(hitlist) for tid, hitlist in dict_turkers['Pick the best label for a set of documents'].iteritems()]
# print tasks_per_turker
# print np.mean(tasks_per_turker)
# print np.std(tasks_per_turker)
# print np.max(tasks_per_turker)
# print np.min(tasks_per_turker)
# with open('turkers.json','w') as outfile:
# json.dump(dict_turkers, outfile, indent = 4)
# file_topicJSON = open("nyt-50-topics-documents.json","r")
# topicJSON = json.loads(file_topicJSON.read()) |
import torch
import torchvision
from torchvision import transforms
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import os
import json
import tqdm
import random
class_id_list = list()
for classdir in os.listdir(os.path.join("../image_exp/Classification/Data", "Train")):
class_id_list.append(classdir)
ndata=14463 # number of data
val_id=[] # validation data indexes, 1000 in aggregate
with open("val_id.txt") as f:
for line in f.readlines():
val_id.append(int(line))
class TrafficDataset(Dataset):
def __init__(self, data_path, mode="train", resize=224,maxdata=99999):
super(TrafficDataset, self).__init__()
self.data_path = data_path
self.mode = mode
self.resize = resize
self.data = []
self.labels = []
self.maxdata=maxdata
self.train_transform = transforms.Compose([lambda x: Image.open(x).convert('RGB'),
transforms.Resize((self.resize, self.resize)),
# transforms.RandomRotation(10), # data augmentation
# transforms.ColorJitter(0.2,0.2,0.2)
transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
self.test_transform=transforms.Compose([lambda x: Image.open(x).convert('RGB'),
transforms.Resize((self.resize, self.resize)),
transforms.ToTensor(),
transforms.Normalize(
(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
if mode == "train" or mode=="val":
cnt=0
data_path = os.path.join(data_path, "Train")
for classdir in tqdm.tqdm(os.listdir(data_path)):
class_data_path = os.path.join(data_path, classdir)
for img_name in os.listdir(class_data_path):
if cnt==self.maxdata:
break
if (mode=="train") ^ (cnt not in val_id):
cnt=cnt+1
continue
img_path = os.path.join(class_data_path, img_name)
try:
if mode=="train":
img = self.train_transform(img_path)
else:
img=self.test_transform(img_path)
self.data.append(img)
self.labels.append(class_id_list.index(classdir))
except(OSError, IOError):
print("Cannot open file "+img_path)
cnt=cnt+1
elif mode == "test":
json_path = os.path.join(data_path, "test.json")
self.names=[]
with open(json_path, "r") as f:
json_data = json.load(f)
data_path = os.path.join(data_path, "Test")
cnt=0
for img_name in tqdm.tqdm(os.listdir(data_path)):
img_path = os.path.join(data_path, img_name)
try:
img = self.test_transform(img_path)
self.data.append(img)
self.labels.append(
class_id_list.index(json_data[img_name]))
self.names.append(img_name)
except(OSError, IOError):
print("Cannot open file "+img_path)
cnt=cnt+1
if cnt==self.maxdata:
break
def __getitem__(self, idx):
if self.mode=="train":
return self.data[idx],self.labels[idx]
elif self.mode=="val":
return self.data[idx], self.labels[idx]
else:
return self.data[idx], self.names[idx]
def __len__(self):
return len(self.data)
|
import numpy as np
import cv2
##import keras
##from keras.models import load_model
##model = load_model('savedmodel.h5')
image=cv2.imread('photo1.jpg')
##print(image)
##cv2.imshow('image',image)
##cv2.waitKey()
##cv2.destroyAllWindows()
grayimage=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert to grayscale to match dataset
##print(grayimage)
##cv2.imshow('gray',grayimage)
##cv2.waitKey()
##cv2.destroyAllWindows()
blurredgray=cv2.GaussianBlur(grayimage,(5,5),0) #we make the iamge blurred, because it gets thick, which is easier to see
##print(blurredgray)
##cv2.imshow('blur',blurredgray)
##cv2.waitKey()
##cv2.destroyAllWindows()
#Threshold the image
ret, im_th = cv2.threshold(blurredgray, 90, 255, cv2.THRESH_BINARY_INV) #makes dark parts black and light parts white and the '_INV' makes the blacks to white and whites to black
##print(im_th)
##cv2.imshow('im_th',im_th)
##cv2.waitKey()
##cv2.destroyAllWindows()
ctrs, hier = cv2.findContours(im_th.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image, ctrs, -1,(255,0,0),3)
##cv2.imshow('image',image)
##cv2.waitKey()
##cv2.destroyAllWindows()
rectangles=[]
for eachContour in ctrs:
rectangles.append(cv2.boundingRect(eachContour))
##print(rectangles)
for eachRectangle in rectangles:
ROI = im_th[eachRectangle[1]-10:eachRectangle[1]+eachRectangle[3]+10,eachRectangle[0]-10:eachRectangle[0]+eachRectangle[2]+10]
if ROI.any():
imgarray=cv2.resize(ROI,(28,28))
dilatedimg=cv2.dilate(imgarray,(3,3)) #this is to thicken
dilatedlist=[dilatedimg]
dilatedarray=np.array(dilatedlist)
dilatedarray=dilatedarray/255
print(dilatedarray.shape)
print('yes')
cv2.rectangle(im_th,(eachRectangle[0]-10,eachRectangle[1]-10),(eachRectangle[0]+eachRectangle[2]+10,eachRectangle[1]+eachRectangle[3]+10),(255,255,255),2)
cv2.imshow('image',im_th)
cv2.waitKey()
cv2.destroyAllWindows()
|
import numpy as np
import tensorflow as tf
class BiLSTM:
def __init__(self, hparams, scope):
self.scope = scope
self.hidden_nums = int(hparams['global']['hidden_size'])
def __call__(self, hs, seq_len):
with tf.variable_scope('LSTM_Variables_%s' % self.scope, reuse=tf.AUTO_REUSE):
self.fw_cell = tf.nn.rnn_cell.LSTMCell(self.hidden_nums, name='fw_lstm_cell_%s' % self.scope)
self.bw_cell = tf.nn.rnn_cell.LSTMCell(self.hidden_nums, name='bw_lstm_cell_%s' % self.scope)
(fw_out, bw_out), _ = tf.nn.bidirectional_dynamic_rnn(
self.fw_cell,
self.bw_cell,
hs,
sequence_length=seq_len,
dtype=tf.float32
)
output = tf.concat(
[fw_out, bw_out],
axis=-1)
return output
|
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test T2Ramsey experiment
"""
import numpy as np
from qiskit.utils import apply_prefix
from qiskit.providers import BackendV1
from qiskit.providers.options import Options
from qiskit.providers.models import QasmBackendConfiguration
from qiskit.result import Result
from qiskit.test import QiskitTestCase
from qiskit_experiments.framework import ParallelExperiment
from qiskit_experiments.library import T2Ramsey
from qiskit_experiments.test.utils import FakeJob
class T2RamseyBackend(BackendV1):
"""
A simple and primitive backend, to be run by the T2Ramsey tests
"""
def __init__(
self,
p0=None,
initial_prob_plus=None,
readout0to1=None,
readout1to0=None,
conversion_factor=1,
):
"""
Initialize the T2Ramsey backend
"""
dt_factor_in_ns = conversion_factor * 1e9 if conversion_factor is not None else None
configuration = QasmBackendConfiguration(
backend_name="T2Ramsey_simulator",
backend_version="0",
n_qubits=int(1e6),
basis_gates=["barrier", "h", "p", "delay", "measure"],
gates=[],
local=True,
simulator=True,
conditional=False,
open_pulse=False,
memory=False,
max_shots=int(1e6),
coupling_map=None,
dt=dt_factor_in_ns,
)
self._t2ramsey = p0["T2star"]
self._a_param = p0["A"]
self._freq = p0["f"]
self._phi = p0["phi"]
self._b_param = p0["B"]
self._initial_prob_plus = initial_prob_plus
self._readout0to1 = readout0to1
self._readout1to0 = readout1to0
self._dt_factor = conversion_factor
self._rng = np.random.default_rng(0)
super().__init__(configuration)
@classmethod
def _default_options(cls):
"""Default options of the test backend."""
return Options(shots=1024)
def run(self, run_input, **options):
"""
Run the T2Ramsey backend
"""
self.options.update_options(**options)
shots = self.options.get("shots")
result = {
"backend_name": "T2Ramsey backend",
"backend_version": "0",
"qobj_id": 0,
"job_id": 0,
"success": True,
"results": [],
}
for circ in run_input:
nqubits = circ.num_qubits
qubit_indices = {bit: idx for idx, bit in enumerate(circ.qubits)}
clbit_indices = {bit: idx for idx, bit in enumerate(circ.clbits)}
counts = dict()
if self._readout0to1 is None:
ro01 = np.zeros(nqubits)
else:
ro01 = self._readout0to1
if self._readout1to0 is None:
ro10 = np.zeros(nqubits)
else:
ro10 = self._readout1to0
for _ in range(shots):
if self._initial_prob_plus is None:
prob_plus = np.ones(nqubits)
else:
prob_plus = self._initial_prob_plus.copy()
clbits = np.zeros(circ.num_clbits, dtype=int)
for op, qargs, cargs in circ.data:
qubit = qubit_indices[qargs[0]]
if op.name == "delay":
delay = op.params[0]
t2ramsey = self._t2ramsey[qubit] * self._dt_factor
freq = self._freq[qubit] / self._dt_factor
prob_plus[qubit] = (
self._a_param[qubit]
* np.exp(-delay / t2ramsey)
* np.cos(2 * np.pi * freq * delay + self._phi[qubit])
+ self._b_param[qubit]
)
if op.name == "measure":
# we measure in |+> basis which is the same as measuring |0>
meas_res = self._rng.binomial(
1,
(1 - prob_plus[qubit]) * (1 - ro10[qubit])
+ prob_plus[qubit] * ro01[qubit],
)
clbit = clbit_indices[cargs[0]]
clbits[clbit] = meas_res
clstr = ""
for clbit in clbits[::-1]:
clstr = clstr + str(clbit)
if clstr in counts:
counts[clstr] += 1
else:
counts[clstr] = 1
result["results"].append(
{
"shots": shots,
"success": True,
"header": {"metadata": circ.metadata},
"data": {"counts": counts},
}
)
return FakeJob(self, result=Result.from_dict(result))
class TestT2Ramsey(QiskitTestCase):
"""Test T2Ramsey experiment"""
def test_t2ramsey_run_end2end(self):
"""
Run the T2Ramsey backend on all possible units
"""
for unit in ["s", "ms", "us", "ns", "dt"]:
if unit in ("s", "dt"):
dt_factor = 1
else:
dt_factor = apply_prefix(1, unit)
osc_freq = 0.1
estimated_t2ramsey = 20
estimated_freq = 0.11
# Set up the circuits
qubit = 0
if unit == "dt": # dt requires integer values for delay
delays = list(range(1, 46))
else:
delays = np.append(
(np.linspace(1.0, 15.0, num=15)).astype(float),
(np.linspace(16.0, 45.0, num=59)).astype(float),
)
exp = T2Ramsey(qubit, delays, unit=unit, osc_freq=osc_freq)
default_p0 = {
"A": 0.5,
"T2star": estimated_t2ramsey,
"f": estimated_freq,
"phi": 0,
"B": 0.5,
}
for user_p0 in [default_p0, None]:
exp.set_analysis_options(user_p0=user_p0, plot=True)
backend = T2RamseyBackend(
p0={
"A": [0.5],
"T2star": [estimated_t2ramsey],
"f": [estimated_freq],
"phi": [0.0],
"B": [0.5],
},
initial_prob_plus=[0.0],
readout0to1=[0.02],
readout1to0=[0.02],
conversion_factor=dt_factor,
)
expdata = exp.run(backend=backend, shots=2000)
expdata.block_for_results() # Wait for job/analysis to finish.
result = expdata.analysis_results()
self.assertAlmostEqual(
result[0].value.value,
estimated_t2ramsey * dt_factor,
delta=3 * dt_factor,
)
self.assertAlmostEqual(
result[1].value.value,
estimated_freq / dt_factor,
delta=3 / dt_factor,
)
for res in result:
self.assertEqual(res.quality, "good", "Result quality bad for unit " + str(unit))
def test_t2ramsey_parallel(self):
"""
Test parallel experiments of T2Ramsey using a simulator.
"""
t2ramsey = [30, 25]
estimated_freq = [0.1, 0.12]
delays = [list(range(1, 60)), list(range(1, 50))]
dt_factor = 1e-6
osc_freq = 0.1
exp0 = T2Ramsey(0, delays[0], osc_freq=osc_freq)
exp2 = T2Ramsey(2, delays[1], osc_freq=osc_freq)
par_exp = ParallelExperiment([exp0, exp2])
p0 = {
"A": [0.5, None, 0.5],
"T2star": [t2ramsey[0], None, t2ramsey[1]],
"f": [estimated_freq[0], None, estimated_freq[1]],
"phi": [0, None, 0],
"B": [0.5, None, 0.5],
}
backend = T2RamseyBackend(p0)
expdata = par_exp.run(backend=backend, shots=1000)
expdata.block_for_results()
for i in range(2):
sub_res = expdata.component_experiment_data(i).analysis_results()
self.assertAlmostEqual(sub_res[0].value.value, t2ramsey[i], delta=3)
self.assertAlmostEqual(
sub_res[1].value.value,
estimated_freq[i] / dt_factor,
delta=3 / dt_factor,
)
for res in sub_res:
self.assertEqual(
res.quality,
"good",
"Result quality bad for experiment on qubit " + str(i),
)
def test_t2ramsey_concat_2_experiments(self):
"""
Concatenate the data from 2 separate experiments
"""
unit = "s"
dt_factor = 1
estimated_t2ramsey = 30
estimated_freq = 0.09
# First experiment
qubit = 0
delays0 = list(range(1, 60, 2))
osc_freq = 0.08
exp0 = T2Ramsey(qubit, delays0, unit=unit, osc_freq=osc_freq)
default_p0 = {
"A": 0.5,
"T2star": estimated_t2ramsey,
"f": estimated_freq,
"phi": 0,
"B": 0.5,
}
exp0.set_analysis_options(user_p0=default_p0)
backend = T2RamseyBackend(
p0={
"A": [0.5],
"T2star": [estimated_t2ramsey],
"f": [estimated_freq],
"phi": [0.0],
"B": [0.5],
},
initial_prob_plus=[0.0],
readout0to1=[0.02],
readout1to0=[0.02],
conversion_factor=1,
)
# run circuits
expdata0 = exp0.run(backend=backend, shots=1000)
expdata0.block_for_results()
results0 = expdata0.analysis_results()
# second experiment
delays1 = list(range(2, 65, 2))
exp1 = T2Ramsey(qubit, delays1, unit=unit)
exp1.set_analysis_options(user_p0=default_p0)
expdata1 = exp1.run(backend=backend, experiment_data=expdata0, shots=1000)
expdata1.block_for_results()
results1 = expdata1.analysis_results()
self.assertAlmostEqual(
results1[0].value.value,
estimated_t2ramsey * dt_factor,
delta=3 * dt_factor,
)
self.assertAlmostEqual(
results1[1].value.value, estimated_freq / dt_factor, delta=3 / dt_factor
)
self.assertLessEqual(results1[0].value.stderr, results0[0].value.stderr)
self.assertEqual(len(expdata1.data()), len(delays0) + len(delays1))
|
import numpy as np
from tools import Indicators
class MovingAverage:
def __init__(self):
pass
def ma_cross_strategy(self, open_prices,
fast_h=6, slow_h=13, family="sma",
start_balance=1000, fee=0.1):
if family == "sma":
ma_fast = Indicators().sma(open_prices, fast_h)
ma_fast = ma_fast[slow_h-fast_h:]
ma_slow = Indicators().sma(open_prices, slow_h)
open_prices = open_prices[slow_h-1:]
elif family == "ema":
ma_fast = Indicators().ema(open_prices, fast_h)
ma_slow = Indicators().ema(open_prices, slow_h)
balance = np.ones(len(open_prices))
balance[0] = start_balance
hold_btc = 0
for i in range(1, len(open_prices)):
if hold_btc == 0:
if (ma_fast[i] > ma_slow[i] and ma_fast[i-1] < ma_slow[i-1]): #buy btc
hold_btc = balance[i-1] / open_prices[i] * (1.0 - fee/100.0)
balance[i] = balance[i-1]
else:
balance[i] = hold_btc * open_prices[i] * (1.0 - fee/100.0)
if (ma_fast[i] < ma_slow[i] and ma_fast[i-1] > ma_slow[i-1]): #sell btc
hold_btc = 0
return balance
def ma_cross_strategy_sltp(self, open_prices,
fast_h=6, slow_h=13, family="sma",
start_balance=1000, fee=0.1,
stop_loss=0.05, take_profit=0.07):
if family == "sma":
ma_fast = Indicators().sma(open_prices, fast_h)
ma_fast = ma_fast[slow_h-fast_h:]
ma_slow = Indicators().sma(open_prices, slow_h)
open_prices = open_prices[slow_h-1:]
elif family == "ema":
ma_fast = Indicators().ema(open_prices, fast_h)
ma_slow = Indicators().ema(open_prices, slow_h)
balance = np.ones(len(open_prices))
balance[0] = start_balance
hold_btc = 0
buy_price = 0
for i in range(1, len(open_prices)):
if hold_btc == 0:
if (ma_fast[i] > ma_slow[i] and ma_fast[i-1] < ma_slow[i-1]): #buy btc
hold_btc = balance[i-1] / open_prices[i] * (1.0 - fee/100.0)
buy_price = open_prices[i]
balance[i] = balance[i-1]
else:
balance[i] = hold_btc * open_prices[i] * (1.0 - fee/100.0)
if (ma_fast[i] < ma_slow[i] and ma_fast[i-1] > ma_slow[i-1]): #sell btc
hold_btc = 0
change = 1 - open_prices[i] / buy_price
if (change > take_profit or -change > stop_loss):
hold_btc = 0
return balance |
def get_max_value(data_list, capacity):
data_list = sorted(data_list, key=lambda x: x[1] / x[0], reverse=True)
total_value = 0
details = list()
for data in data_list:
if capacity - data[0] >= 0:
capacity -= data[0]
total_value += data[1]
details.append([data[0], data[1], 1])
else:
fraction = capacity / data[0]
total_value += data[1] * fraction
details.append([data[0], data[1], fraction])
print(fraction, total_value)
print(capacity, data[0])
print([data[0], data[1], fraction])
break
return total_value, details
if __name__ == "__main__":
data_list = [(10, 5), (20, 10), (30, 20), (40, 30)]
value, detail = get_max_value(data_list, 85)
print(value, detail)
|
#!/usr/bin/env micropython
from ev3dev2.motor import LargeMotor, MoveSteering, MoveTank, OUTPUT_C, OUTPUT_B
from ev3dev2.sensor import INPUT_1, INPUT_4
from ev3dev2.sensor.lego import ColorSensor
from time import sleep
motor = LargeMotor(OUTPUT_C)
tank_pair = MoveTank(OUTPUT_C, OUTPUT_B, motor_class=LargeMotor)
steer_pair = MoveSteering(OUTPUT_C, OUTPUT_B, motor_class=LargeMotor)
csl = ColorSensor(INPUT_4)
csr = ColorSensor(INPUT_1)
while csl.color != 1 and csr.color != 1:
tank_pair.on(70,70)
"""motor.on(speed=30)
motor.wait_until_not_moving()
tank_pair.on_for_degrees(-100,100,720, brake=True, block=True)
tank_pair.on_for_degrees(60,60,90, brake=True, block=True)
steer_pair.on_for_degrees(60,60,90, brake=True, block=True) """
|
import numpy as np
import pandas as pd
from models import model_dt
ext_params = {}
def set_params(max_round, p_value):
global ext_params
ext_params = {
'max_round': max_round,
'p_value': p_value,
}
print('Extraction params:', ext_params)
def fit(x, y, t, **kwargs):
kwargs.update({'method': 'ed'})
fit_list = []
u_list = []
rest = len(y)
full_x, full_y, full_t = x, y, t
for idx in range(ext_params['max_round']):
ext_list = []
p_value = ext_params['p_value']
kwargs.update({'ext_list': ext_list})
if idx == ext_params['max_round'] - 1:
if idx == 0:
fit_list.append(model_dt.fit(full_x, full_y, full_t, **kwargs))
else:
fit_list.append(fit_list[0])
ext_idx_list = x.index.tolist()
u_value = 0
rest = 0
else:
fit_list.append(model_dt.fit(x, y, t, **kwargs))
df_ext = pd.DataFrame(ext_list).sort_values('abs_uplift', ascending=False)
if len(df_ext) == 1:
# If there is only one group after building tree, it should be halted.
u_value = 0
rest = 0
if idx > 0:
fit_list.pop()
fit_list.append(fit_list[0])
ext_idx_list = x.index.tolist()
u_list.append(u_value)
print('Before max round, tree has only one group.')
print('Train) Round, u value, rest, number of extraction:', idx, u_value, rest, len(ext_idx_list))
break
df_ext['n_cumsum_samples'] = df_ext['n_samples'].cumsum()
cut_len = rest * p_value
cut_len_upper = df_ext[df_ext['n_cumsum_samples'] > cut_len]['n_cumsum_samples'].iloc[0]
df_cut_ext = df_ext[df_ext['n_cumsum_samples'] <= cut_len_upper]
if len(df_cut_ext) == len(df_ext):
# Should not extract all data from training set
df_cut_ext = df_ext.iloc[: -1]
u_value = df_cut_ext.iloc[-1]['abs_uplift']
ext_idx_list = df_cut_ext['idx_list'].sum()
x = x.drop(ext_idx_list)
y = y.drop(ext_idx_list)
t = t.drop(ext_idx_list)
rest -= len(ext_idx_list)
u_list.append(u_value)
print('Train) Round, u value, rest, number of extraction:', idx, u_value, rest, len(ext_idx_list))
return zip(fit_list, u_list)
def predict(obj, newdata, **kwargs):
kwargs.update({'method': 'ed'})
meet_list = []
final_pred = None
rest = len(newdata)
for idx, (model_fit, u_value) in enumerate(obj):
pred = model_dt.predict(model_fit, newdata, **kwargs)
meet = pd.Series(np.abs(pred['pr_y1_t1'] - pred['pr_y1_t0']) >= u_value)
if idx == 0:
final_pred = pred
final_pred[~meet] = None
else:
for prev_idx in range(idx):
prev_meet = meet_list[prev_idx]
meet[prev_meet] = False
final_pred[meet] = pred[meet]
meet_list.append(meet)
if idx == ext_params['max_round'] - 1:
rest = 0
else:
rest -= meet.sum()
print('Prediction) Round, u value, rest, meet count:', idx, u_value, rest, meet.sum())
return final_pred
|
import sqlite3
import csv
class SqliteReader():
def __init__ (self, dbName):
self.conn = sqlite3.connect(dbName)
self.league = ["England Premier League","Germany 1. Bundesliga", "Switzerland Super League", "Netherlands Eredivisie", "France Ligue 1", "Scotland Premier League", "Portugal Liga ZON Sagres", "Poland Ekstraklasa", "Italy Serie A", "Spain LIGA BBVA", "Belgium Jupiler League"]
self.teams = []
def readTeam(self):
cursor = self.conn.execute("SELECT team_long_name FROM Team")
for row in cursor:
self.teams.append(row[0])
print (self.teams)
def read(self, year):
outputDict = dict()
for i in range(1,11):
cursor = self.conn.execute("SELECT player_name, match.id, season, team_long_name, League.name FROM Match JOIN Player ON home_player_"+str(i)+" = player_api_id JOIN Team on home_team_api_id = team_api_id JOIN League on league.id = league_id and season = '"+year+"'")
for row in cursor:
outputDict[row[0]] = row[1:]
return self.reformated(outputDict)
#self.writeFile(self.reformated(outputDict))
def process(self):
self.readTeam()
list1 = self.read("2014/2015")
list2 = self.read("2015/2016")
filterList = []
for item1 in list1:
for item2 in list2:
if((item1["player_name"] == item2["player_name"]) and (item1["team_long_name"] != item2["team_long_name"])):
filterList.append(item1)
#print (len(list1))
#print (len(filterList))
#print (filterList)
#transfer between league
transferList = []
for i in range(len(self.league)):
sets = set()
for item in filterList:
if( item["league"] == self.league[i]):
sets.add(item["player_name"])
tmpList = []
for j in range(len(self.league)):
count = 0
for item in list2:
if( item["league"] == self.league[j]):
if item["player_name"] in sets:
print (item["player_name"])
count += 1
tmpList.append(count)
transferList.append(tmpList)
self.writeFile_transform(self.reformated_transfer(transferList, self.league), 'transfer2014-2015.csv', self.league)
# between teams
teamTransferList = []
for i in range(len(self.teams)):
sets = set()
for item in filterList:
if (item["team_long_name"] == self.teams[i]):
sets.add(item["player_name"])
print (sets)
tmpList = []
for j in range(len(self.teams)):
count = 0
for item in list2:
if (item["team_long_name"] == self.teams[j]):
if item["player_name"] in sets:
print ("count+1")
count+=1
tmpList.append(count)
teamTransferList.append(tmpList)
# writefile
self.writeFile_transform(self.reformated_transfer(teamTransferList,self.teams), "team_transfer2013-2014.csv",self.teams)
def reformated_transfer(self, transferList, fieldList):
formatedList = []
for item in transferList:
tmpDict = dict()
for i in range(len(fieldList)):
tmpDict[fieldList[i]] = item[i]
formatedList.append(tmpDict)
return formatedList
def writeFile_transform(self, transferList, outputFilename, fieldnames):
with open(outputFilename, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for item in transferList:
writer.writerow(item)
def reformated(self, outputList):
formatedList = []
for k,v in outputList.items():
tmpDict = dict()
tmpDict["player_name"] = k
tmpDict["season"] = v[1]
tmpDict["team_long_name"] = v[2]
tmpDict["league"] = v[3]
formatedList.append(tmpDict)
return formatedList
def writeFile(self, resultList):
def team_name(s):
return s["team_long_name"]
resultList = sorted(resultList, key = team_name)
with open('season2009.csv', 'w') as csvfile:
fieldnames = ['player_name', 'season', 'team_long_name', 'league']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for item in resultList:
writer.writerow(item)
if __name__ == '__main__':
lite = SqliteReader("database v2-2.sqlite")
lite.process()
|
from __future__ import print_function
class Node:
def __init__(self, item):
self.item = item
self.min = item
def __str__(self):
return str(self.item)
# unbounded stack
class Stack:
def __init__(self):
self.nodes = []
self.top = -1
def push(self, node):
if self.top >= 0:
if self.nodes[self.top].min < node.item:
node.min = self.nodes[self.top].min
self.nodes.append(node)
self.top += 1
def pop(self):
# prevent underflow
if self.top >= 0:
popped = self.nodes.pop(self.top)
self.top -= 1
return popped
else:
return 'Stack empty'
def min(self):
if self.top >= 0:
return self.nodes[self.top].min
s = Stack()
s.push(Node(1))
s.push(Node(2))
print('Stack: %s' % str([n.item for n in s.nodes]))
print('Minimum: %d' % s.min())
s.push(Node(3))
s.push(Node(-1))
print('Stack: %s' % str([n.item for n in s.nodes]))
print('Minimum: %d' % s.min())
s.pop()
print('Stack: %s' % str([n.item for n in s.nodes]))
print('Minimum: %d' % s.min())
s.pop()
print('Stack: %s' % str([n.item for n in s.nodes]))
print('Minimum: %d' % s.min())
s.pop()
print('Stack: %s' % str([n.item for n in s.nodes]))
print('Minimum: %d' % s.min())
s.pop()
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow as tf
train = pd.read_csv('./train.csv')
test = pd.read_csv('./test.csv')
del train['Name']
del train['Ticket']
del train['Fare']
del train['Embarked']
train = train.fillna(value=0.0)
for i in range(train.shape[0]):
if train.at[i, 'Sex'] == 'male':
train.at[i, 'Sex'] = 1
else:
train.at[i, 'Sex'] = 0
train['Age_group'] = 0
for i in range(train.shape[0]):
for j in range(70, 0, -10):
if train.at[i, 'Age'] > j:
train.at[i, 'Age_group'] = int(j/10)
break
del train['Age'] # it's unnecessary anymore
print(list(set(train['Cabin'].values))[:10]) # sample of 'Cabin' values
train['Cabin_section'] = '0'
for i in range(train.shape[0]):
if train.at[i, 'Cabin'] != 0:
train.at[i, 'Cabin_section'] = train.at[i, 'Cabin'][0]
CABIN_SECTION = list(set(train['Cabin_section'].values)) # will be reused for test data
print(CABIN_SECTION) # 'Cabin_Section' values
for i in range(train.shape[0]):
train.at[i, 'Cabin_section'] = CABIN_SECTION.index(train.at[i, 'Cabin_section'])
del train['Cabin'] # it's unnecessary anymore
pclass = np.eye(train['Pclass'].values.max()+1)[train['Pclass'].values]
age_group = np.eye(train['Age_group'].values.max()+1)[train['Age_group'].values]
cabin_section = np.eye(train['Cabin_section'].values.max()+1) \
[train['Cabin_section'].values.astype(int)] # prevent IndexError
X = train[['Sex', 'SibSp', 'Parch']].values
X = np.concatenate([X, age_group], axis=1)
X = np.concatenate([X, pclass], axis=1)
X = np.concatenate([X, cabin_section], axis=1)
X = X.astype(float)
y = train['Survived'].values
y = y.astype(float).reshape(-1, 1)
X_train, X_dev, y_train, y_dev = train_test_split(X, y, test_size=0.1, random_state=0)
del test['Name']
del test['Ticket']
del test['Fare']
del test['Embarked']
test = test.fillna(value=0.0)
test['Age_group'] = 0
test['Cabin_section'] = '0'
for i in range(test.shape[0]):
if test.at[i, 'Sex'] == 'male':
test.at[i, 'Sex'] = 1
else:
test.at[i, 'Sex'] = 0
for j in range(70, 0, -10):
if test.at[i, 'Age'] > j:
test.at[i, 'Age_group'] = int(j/10)
break
if test.at[i, 'Cabin'] != 0:
test.at[i, 'Cabin_section'] = test.at[i, 'Cabin'][0]
test.at[i, 'Cabin_section'] = CABIN_SECTION.index(test.at[i, 'Cabin_section'])
del test['Cabin'] # it's unnecessary anymore
del test['Age'] # it's unnecessary anymore
pclass_test = np.eye(test['Pclass'].values.max()+1)[test['Pclass'].values]
age_group_test = np.eye(test['Age_group'].values.max()+1)[test['Age_group'].values]
cabin_section_test = np.eye(test['Cabin_section'].values.max()+1) \
[test['Cabin_section'].values.astype(int)] # prevent IndexError
X_test = test[['Sex', 'SibSp', 'Parch']].values
X_test = np.concatenate([X_test, age_group_test], axis=1)
X_test = np.concatenate([X_test, pclass_test], axis=1)
X_test = np.concatenate([X_test, cabin_section_test], axis=1)
X_test = X_test.astype(float)
id_test = test['PassengerId'].values
id_test = id_test.reshape(-1, 1)
seed = 7 # for reproducible purpose
input_size = X_train.shape[1] # number of features
learning_rate = 0.001 # most common value for Adam
epochs = 8500 # I've tested previously that this is the best epochs to avoid overfitting
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(seed)
np.random.seed(seed)
X_input = tf.placeholder(dtype=tf.float32, shape=[None, input_size], name='X_input')
y_input = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='y_input')
#W1 = tf.Variable(tf.random_normal(shape=[input_size, input_size], seed=seed), name='W1')
W1 = tf.get_variable("W1", shape=[input_size, input_size], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal(shape=[input_size], seed=seed), name='b1')
L1 = tf.add(tf.matmul(X_input, W1), b1)
L1 = tf.nn.dropout(L1, keep_prob=0.7)
W2 = tf.get_variable("W2", shape=[input_size, input_size], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal(shape=[input_size], seed=seed), name='b2')
L2 = tf.add(tf.matmul(L1, W2), b2)
L2 = tf.nn.dropout(L1, keep_prob=0.7)
W3 = tf.get_variable("W3", shape=[input_size, input_size], initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal(shape=[input_size], seed=seed), name='b3')
L3 = tf.add(tf.matmul(L2, W3), b3)
L3 = tf.nn.dropout(L1, keep_prob=0.7)
#W2 = tf.Variable(tf.random_normal(shape=[input_size, 1], seed=seed), name='W2')
W4 = tf.get_variable("W4", shape=[input_size, 1], initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal(shape=[1], seed=seed), name='b4')
sigm = tf.nn.sigmoid(tf.add(tf.matmul(L3, W4), b4), name='pred')
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_input,
logits=sigm, name='loss'))
train_steps = tf.train.AdamOptimizer(learning_rate).minimize(loss)
pred = tf.cast(tf.greater_equal(sigm, 0.5), tf.float32, name='pred') # 1 if >= 0.5
acc = tf.reduce_mean(tf.cast(tf.equal(pred, y_input), tf.float32), name='acc')
init_var = tf.global_variables_initializer()
train_feed_dict = {X_input: X_train, y_input: y_train}
dev_feed_dict = {X_input: X_dev, y_input: y_dev}
test_feed_dict = {X_input: X_test} # no y_input since the goal is to predict it
sess = tf.Session(graph=graph)
sess.run(init_var)
cur_loss = sess.run(loss, feed_dict=train_feed_dict)
train_acc = sess.run(acc, feed_dict=train_feed_dict)
test_acc = sess.run(acc, feed_dict=dev_feed_dict)
print('step 0: loss {0:.5f}, train_acc {1:.2f}%, test_acc {2:.2f}%'.format(
cur_loss, 100*train_acc, 100*test_acc))
for step in range(1, epochs+1):
sess.run(train_steps, feed_dict=train_feed_dict)
cur_loss = sess.run(loss, feed_dict=train_feed_dict)
train_acc = sess.run(acc, feed_dict=train_feed_dict)
test_acc = sess.run(acc, feed_dict=dev_feed_dict)
if step%100 != 0: # print result every 100 steps
continue
print('step {3}: loss {0:.5f}, train_acc {1:.2f}%, test_acc {2:.2f}%'.format(
cur_loss, 100*train_acc, 100*test_acc, step))
'''
step 8500: loss 0.63441, train_acc 79.78%, test_acc 78.89%
L1 step 8500: loss 0.62886, train_acc 80.77%, test_acc 75.56%
initial value step 8500: loss 0.62095, train_acc 82.77%, test_acc 80.00%
initial value
'''
y_pred = sess.run(pred, feed_dict=test_feed_dict).astype(int)
prediction = pd.DataFrame(np.concatenate([id_test, y_pred], axis=1),
columns=['PassengerId', 'Survived'])
print(prediction.head())
|
from HNL.Tools.ROC import ROC
from HNL.Tools.mergeFiles import merge
import os
import glob
from HNL.Tools.helpers import makePathTimeStamped
#
# Argument parser and logging
#
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('bkgr', action='store', default=None, help='Select bkgr')
argParser.add_argument('--onlyReco', action='store_true', default=False, help='only have reco efficiency')
argParser.add_argument('--wp', action='store', default='tight', help='only have reco efficiency')
args = argParser.parse_args()
#Merges subfiles if needed
input_file_path = os.getcwd()+'/data/compareTauID/includeReco/'
if args.onlyReco:
input_file_path += 'onlyReco/'
merge_files = glob.glob(input_file_path + '*')
for mf in merge_files:
if "Results" in mf: continue
if not args.onlyReco and 'onlyReco' in mf: continue
merge(mf)
list_of_bkgr_eff = {}
list_of_signal_eff = {}
list_of_bkgr_pt = {}
list_of_signal_pt = {}
inputfiles_eff = glob.glob(input_file_path + '*/*root')
samples = {f.split('/')[-2] for f in inputfiles_eff}
from HNL.Tools.efficiency import Efficiency
from HNL.Tools.helpers import getObjFromFile
for sample in samples:
eff = Efficiency('efficiency_pt', None, None, input_file_path + sample+'/efficiency.root', subdirs = ['deeptauVSjets-none-none-'+args.wp, 'efficiency_pt'])
if 'HNL' in sample:
list_of_signal_eff[sample] = eff.getEfficiency()
list_of_signal_pt[sample] = getObjFromFile(os.path.expandvars('$CMSSW_BASE/src/HNL/Test/data/plotTau/gen/' + sample + '/variables.root'), 'pt/'+sample+'-pt')
list_of_signal_pt[sample].Scale(1./list_of_signal_pt[sample].GetSumOfWeights())
else:
list_of_bkgr_eff[sample] = eff.getEfficiency()
list_of_bkgr_pt[sample] = getObjFromFile(os.path.expandvars('$CMSSW_BASE/src/HNL/Test/data/plotTau/gen/' + sample + '/variables.root'), 'pt/'+sample+'-pt')
list_of_bkgr_pt[sample].Scale(1./list_of_bkgr_pt[sample].GetSumOfWeights())
from HNL.Plotting.plot import Plot
output_dir = os.getcwd()+'/data/Results/compareTauID/includeReco/'
if args.onlyReco: output_dir += 'onlyReco/'
output_dir = makePathTimeStamped(output_dir)
for sample in list_of_signal_eff.keys():
legend_names = ['efficiency in '+sample, 'efficiency in '+args.bkgr, 'tau distribution in '+sample, 'tau distribution in '+args.bkgr]
p = Plot([list_of_signal_eff[sample], list_of_bkgr_eff[args.bkgr]], legend_names, sample, bkgr_hist = [list_of_signal_pt[sample], list_of_bkgr_pt[args.bkgr]])
final_dir = output_dir
if not args.onlyReco:
final_dir += '/'+args.wp+'/'
p.drawHist(output_dir = final_dir, draw_option = 'EP', bkgr_draw_option = 'EHist')
|
import unittest
def monge_min(A):
'''
:param A: list[list[num]] a monge list
:return: list[num] min value of every row
'''
# todo: write the next
i = 0
while i < len(A) // 2:
pass
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
(([[1]], [[2]]), [[2]]),
(([[1, 2],
[3, 4]], [[4, 3],
[2, 1]]), [[8, 5],
[20, 13]]),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(monge_min(*first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
|
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Cross')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 13
# define encoder decoder layers
self.encoder_block_t = nn.ModuleList([nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)])])
self.decoder_block_t = nn.ModuleList([nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)])])
for j in range(3):
if j < 2:
self.encoder_block_t.append(nn.ModuleList([self.conv_layer([3, filter[0], filter[0]], bottle_neck=True)]))
self.decoder_block_t.append(nn.ModuleList([self.conv_layer([filter[0], filter[0], filter[0]], bottle_neck=True)]))
for i in range(4):
if i == 0:
self.encoder_block_t[j].append(self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=True))
self.decoder_block_t[j].append(self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=True))
else:
self.encoder_block_t[j].append(self.conv_layer([filter[i], filter[i + 1], filter[i + 1]], bottle_neck=False))
self.decoder_block_t[j].append(self.conv_layer([filter[i + 1], filter[i], filter[i]], bottle_neck=False))
# define cross-stitch units
self.cs_unit_encoder = nn.Parameter(data=torch.ones(4, 3))
self.cs_unit_decoder = nn.Parameter(data=torch.ones(5, 3))
# define task specific layers
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], bottle_neck=True, pred_layer=True)
self.pred_task2 = self.conv_layer([filter[0], 1], bottle_neck=True, pred_layer=True)
self.pred_task3 = self.conv_layer([filter[0], 3], bottle_neck=True, pred_layer=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Parameter):
nn.init.constant(m.weight, 1)
def conv_layer(self, channel, bottle_neck, pred_layer=False):
if bottle_neck:
if not pred_layer:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[2]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=3, padding=1),
nn.BatchNorm2d(channel[2]),
nn.ReLU(inplace=True),
)
return conv_block
def forward(self, x):
encoder_conv_t, decoder_conv_t, encoder_samp_t, decoder_samp_t, indices_t = ([0] * 3 for _ in range(5))
for i in range(3):
encoder_conv_t[i], decoder_conv_t[i], encoder_samp_t[i], decoder_samp_t[i], indices_t[i] = ([0] * 5 for _ in range(5))
# task branch 1
for i in range(5):
for j in range(3):
if i == 0:
encoder_conv_t[j][i] = self.encoder_block_t[j][i](x)
encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i])
else:
encoder_cross_stitch = self.cs_unit_encoder[i - 1][0] * encoder_samp_t[0][i - 1] + \
self.cs_unit_encoder[i - 1][1] * encoder_samp_t[1][i - 1] + \
self.cs_unit_encoder[i - 1][2] * encoder_samp_t[2][i - 1]
encoder_conv_t[j][i] = self.encoder_block_t[j][i](encoder_cross_stitch)
encoder_samp_t[j][i], indices_t[j][i] = self.down_sampling(encoder_conv_t[j][i])
for i in range(5):
for j in range(3):
if i == 0:
decoder_cross_stitch = self.cs_unit_decoder[i][0] * encoder_samp_t[0][-1] + \
self.cs_unit_decoder[i][1] * encoder_samp_t[1][-1] + \
self.cs_unit_decoder[i][2] * encoder_samp_t[2][-1]
decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1])
decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i])
else:
decoder_cross_stitch = self.cs_unit_decoder[i][0] * decoder_conv_t[0][i - 1] + \
self.cs_unit_decoder[i][1] * decoder_conv_t[1][i - 1] + \
self.cs_unit_decoder[i][2] * decoder_conv_t[2][i - 1]
decoder_samp_t[j][i] = self.up_sampling(decoder_cross_stitch, indices_t[j][-i - 1])
decoder_conv_t[j][i] = self.decoder_block_t[j][-i - 1](decoder_samp_t[j][i])
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(decoder_conv_t[0][-1]), dim=1)
t2_pred = self.pred_task2(decoder_conv_t[1][-1])
t3_pred = self.pred_task3(decoder_conv_t[2][-1])
t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred, t3_pred], self.logsigma
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_CROSS = SegNet().to(device)
optimizer = optim.Adam(SegNet_CROSS.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_CROSS),
count_parameters(SegNet_CROSS) / 24981069))
print('LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation on NYUv2.')
else:
nyuv2_train_set = NYUv2(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
nyuv2_test_set = NYUv2(root=dataset_path, train=False)
batch_size = 2
nyuv2_train_loader = torch.utils.data.DataLoader(
dataset=nyuv2_train_set,
batch_size=batch_size,
shuffle=True)
nyuv2_test_loader = torch.utils.data.DataLoader(
dataset=nyuv2_test_set,
batch_size=batch_size,
shuffle=False)
# Train and evaluate multi-task network
multi_task_trainer(nyuv2_train_loader,
nyuv2_test_loader,
SegNet_CROSS,
device,
optimizer,
scheduler,
opt,
200)
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import
import time
from celery.utils.log import get_task_logger
from artproject.celery import app
from art.utils.send_mail import send_email, pack_html
@app.task
def add(x, y):
return x+y
@app.task
def tsend_email():
url = "http://www.baidu.com"
receiver = 'pythontest666@163.com'
content = pack_html(url, receiver)
send_email(receiver, content)
print('tsend_email ok.')
return True
|
from django.shortcuts import render
from initialise.models import User, Bandit
import random
def get_game(order, game_id):
real_game_id = ((order / pow(10, game_id - 1)) % 10)
game = Bandit.objects.get(id=real_game_id)
return game.p, game.reward
# Create your views here.
def playgame(request):
items = request.path.split('/')
uid = items[1]
game_id = int(items[2])
content = ''
if game_id < 1 or game_id > 5:
content = "Invalid game id. Game id should be in [1,5]."
elif not User.objects.filter(uid=uid).exists():
content = "Invalid user name."
else:
user = User.objects.get(uid=uid)
if user.balance < 0:
content = "Sorry, you lost all your money. Game over!"
elif user.balance >= 1000:
content = "You win too much money. We have gone bankrupt! Game over!"
else:
content = "You bet on game {0} with $1. ".format(game_id)
p, reward = get_game(user.order, game_id)
win = True if random.random() < p else False
if win:
user.balance += float(reward)
content += "Congrats! You win ${0}. Your current balance is ${1}.".format((reward+1), user.balance)
else:
user.balance -= 1
content += "Ooops you lose. Your current balance is ${0}.".format(user.balance)
user.save()
if user.balance < 0:
content += "Sorry, you lost all your money. Game over!"
elif user.balance >= 1000:
content += "You win too much money. We have gone bankrupt! Game over!"
return render(request, "playgame.html", {"message":content}) |
lis = input("Enter a input: ")
for i in lis:
if len(i) >= 3:
print "True"
break
elif len(i) <= 2:
print "False"
"""
if len(lis) >= 2:
print "True"
elif len(lis) <= 2:
print "False"
else:
pass
"""
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
#请求URL并把结果用UTF-8编码
response = urlopen('https://en.wikipedia.org/wiki/Main_Page').read().decode('utf-8')
#使用BeautifulSoup去解析
soup = BeautifulSoup(response, 'html.parser')
#获取所有以/wiki/开头的a标签的href属性
listUrls = soup.findAll('a', href=re.compile('^/wiki/'))
#输出所有的词条对应的名称和URL
for url in listUrls:
#过滤以.jpg或.JPG结尾的链接
if not re.search('\.(jpg|JPG)', url['href']):
#输出URL的文字和对应的链接
print(url.get_text(), '<---->', 'https://en.wikipedia.org' + url['href'])
|
#!/usr/bin/env python3
import sys
import unittest
#Add your test here
tests = [
'unittests.test_simple_mangling',
'unittests.test_kooc_file',
'unittests.test_import',
'unittests.test_class',
# 'unittests.test_module',
'unittests.test_kooccall',
]
# 'unittests.test_newtest',
def main():
sys.tracebacklimit = 0
suite = unittest.TestSuite()
if len(sys.argv) > 1:
for test in sys.argv[1:]:
test = test.replace('.py', '')
test = test.replace('/', '.')
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(test))
else:
for test in tests:
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(test))
unittest.TextTestRunner().run(suite)
if __name__ == '__main__':
main()
## TEST OF OVERLOADS WITH DIFFERENTS STORAGES, QUALIFIERS OR SPECIFIER
# Little reminder :
# Storages = meta.enum('AUTO', 'REGISTER', 'TYPEDEF',
# 'STATIC', 'EXTERN', 'INLINE',
# 'VIRTUAL', 'EXPLICIT',
# 'FORCEINLINE', 'THREAD')
# Qualifiers = meta.enum('AUTO', 'CONST', 'VOLATILE', 'RESTRICT',
# 'W64', 'STDCALL', 'CDECL',
# 'PTR32', 'PTR64', 'FASTCALL')
# Specifiers = meta.enum('AUTO', 'STRUCT', 'UNION', 'ENUM', 'LONG',
# 'LONGLONG', 'SHORT')
|
import numpy as np
import matplotlib.pyplot as plt
from uncertainties import ufloat
from scipy.optimize import curve_fit
from scipy.stats import sem
import importlib
exec(open('python/u_hall_iqconst_pos.py').read())
exec(open('python/u_hall_iqconst_neg.py').read())
plt.tight_layout()
plt.savefig("build/u_hall_i.pdf")
#ohne fehler bis jetzt |
#
# Copyright 2019-2020 Lukas Schmelzeisen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from logging import NOTSET, getLogger
from logging.config import dictConfig
from logging.handlers import MemoryHandler
from os import getenv
from sys import maxsize
from typing import TYPE_CHECKING, ClassVar, Mapping, Optional
if TYPE_CHECKING:
from _pytest.config import Config
else:
Config = object
from nasty_utils.settings import Settings
DEFAULT_LOGGING_SETTINGS: Mapping[str, object] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"colored": {
"()": "nasty_utils.ColoredArgumentsFormatter",
"format": "{log_color}{message}",
"style": "{",
"arg_color": "reset",
},
"json": {
"()": "jsonlog.JSONFormatter",
"keys": [
"timestamp",
"levelno",
"level",
"message",
"name",
"pathname",
"lineno",
"thread",
"threadName",
"process",
"processName",
"traceback",
],
"timespec": "milliseconds",
},
},
"handlers": {
"console": {
"class": "nasty_utils.TqdmAwareStreamHandler",
"level": "INFO",
"formatter": "colored",
},
"file": {
"class": "nasty_utils.TqdmAwareFileHandler",
"formatter": "json",
"filename": "{XDG_DATA_HOME}/logs/{argv0}-{asctime:%Y%m%d-%H%M%S}.log",
"encoding": "UTF-8",
"symlink": "{XDG_DATA_HOME}/logs/{argv0}-current.log",
},
},
"root": {"level": "DEBUG", "handlers": ["console", "file"]},
}
class LoggingSettings(Settings):
memory_handler: ClassVar[Optional[MemoryHandler]] = None
logging: dict = dict(DEFAULT_LOGGING_SETTINGS) # type: ignore
@classmethod
def setup_memory_logging_handler(cls) -> None:
if cls._pytest_active() and cls.memory_handler:
return
cls.memory_handler = MemoryHandler(capacity=maxsize, flushLevel=maxsize)
root = getLogger()
root.setLevel(NOTSET)
root.addHandler(cls.memory_handler)
@classmethod
def remove_memory_logging_handler(cls) -> None:
if cls._pytest_active() or not cls.memory_handler:
return
root = getLogger()
root.removeHandler(cls.memory_handler)
cls.memory_handler = None
def setup_logging(self) -> None:
if self._pytest_active():
return
root = getLogger()
dictConfig(dict(self.logging))
if self.memory_handler:
for record in self.memory_handler.buffer:
for handler in root.handlers:
if record.levelno >= handler.level:
handler.handle(record)
@classmethod
def _pytest_active(cls) -> bool:
# Use this to check if the current code is executed as part of a pytest test
# run. In this case, all logging operations should become no-ops, in order to
# not modify the pytest logging configuration.
return bool(getenv("PYTEST_CURRENT_TEST"))
@classmethod
def setup_pytest_logging(
cls,
pytest_config: Config,
*,
level: str = "DEBUG",
format_: str = (
"%(asctime)s,%(msecs)03.f %(levelname).1s [ %(name)-42s ] %(message)s"
),
) -> None:
pytest_config.option.log_level = level
pytest_config.option.log_format = format_
pytest_config.option.log_date_format = "%Y-%m-%dT%H:%M:%S"
# When running pytest from PyCharm enable live cli logging so that we can click
# a test case and see (only) its log output. When not using PyCharm, this
# functionality is available via the html report.
if pytest_config.pluginmanager.hasplugin(
"teamcity.pytest_plugin"
): # pragma: no cover
pytest_config.option.log_cli_level = level
|
# -*- coding:utf-8 -*-
"""
Boston Housing Example.
"""
# Copyright 2017 Authors NJU PASA BigData Laboratory.
# Authors: Qiu Hu <huqiu00#163.com>
# License: Apache-2.0
from __future__ import print_function
from keras.datasets import boston_housing
from forestlayer.estimators.estimator_configs import ExtraRandomForestConfig, RandomForestConfig, GBDTConfig
from forestlayer.layers.layer import AutoGrowingCascadeLayer
(x_train, y_train), (x_test, y_test) = boston_housing.load_data(test_split=0.25)
print("x_train: {}".format(x_train.shape))
print("x_test: {}".format(x_test.shape))
est_configs = [
RandomForestConfig(),
ExtraRandomForestConfig(),
GBDTConfig()
]
cascade = AutoGrowingCascadeLayer(task='regression',
est_configs=est_configs,
early_stopping_rounds=3,
keep_in_mem=False)
cascade.fit_transform(x_train, y_train, x_test, y_test)
|
"""Distance functions."""
import numpy as np
from scipy import linalg as la
from typing import List, Callable, Union
import logging
from .scale import standard_deviation, span
from .base import Distance, to_distance
from ..storage import save_dict_to_json
logger = logging.getLogger("ABC.Distance")
class PNormDistance(Distance):
"""
Use a weighted p-norm
.. math::
d(x, y) = \
\\left [\\sum_{i} \\left| w_i ( x_i-y_i ) \\right|^{p} \\right ]^{1/p}
to compute distances between sets of summary statistics. E.g. set p=2 to
get a Euclidean distance.
Parameters
----------
p: float, optional (default = 2)
p for p-norm. Required p >= 1, p = np.inf allowed (infinity-norm).
weights: dict, optional (default = 1)
Weights. Dictionary indexed by time points. Each entry contains a
dictionary of numeric weights, indexed by summary statistics labels.
If None is passed, a weight of 1 is considered for every summary
statistic. If no entry is available in `weights` for a given time
point, the maximum available time point is selected.
It is also possible to pass a single dictionary index by summary
statistics labels, if weights do not change in time.
factors: dict, optional (default = 1)
Scaling factors that the weights are multiplied with. The same
structure applies as to weights.
If None is passed, a factor of 1 is considered for every summary
statistic.
Note that in this class, factors are superfluous as everything can
be achieved with weights alone, however in subclasses the factors
can remain static while weights adapt over time, allowing for
greater flexibility.
"""
def __init__(self,
p: float = 2,
weights: dict = None,
factors: dict = None):
super().__init__()
if p < 1:
raise ValueError("It must be p >= 1")
self.p = p
self.weights = weights
self.factors = factors
def initialize(self,
t: int,
get_all_sum_stats: Callable[[], List[dict]],
x_0: dict = None):
super().initialize(t, get_all_sum_stats, x_0)
self.format_weights_and_factors(t, x_0.keys())
def format_weights_and_factors(self, t, sum_stat_keys):
self.weights = PNormDistance.format_dict(
self.weights, t, sum_stat_keys)
self.factors = PNormDistance.format_dict(
self.factors, t, sum_stat_keys)
def __call__(self,
x: dict,
x_0: dict,
t: int = None,
par: dict = None) -> float:
# make sure everything is formatted correctly
self.format_weights_and_factors(t, x_0.keys())
# extract values for given time point
w = PNormDistance.get_for_t_or_latest(self.weights, t)
f = PNormDistance.get_for_t_or_latest(self.factors, t)
# compute distance
if self.p == np.inf:
# maximum absolute distance
d = max(abs((f[key] * w[key]) * (x[key] - x_0[key]))
if key in x and key in x_0 else 0
for key in w)
else:
# weighted p-norm distance
d = pow(
sum(pow(abs((f[key] * w[key]) * (x[key] - x_0[key])), self.p)
if key in x and key in x_0 else 0
for key in w),
1 / self.p)
return d
def get_config(self) -> dict:
return {"name": self.__class__.__name__,
"p": self.p,
"weights": self.weights,
"factors": self.factors}
@staticmethod
def format_dict(w, t, sum_stat_keys, default_val=1.):
"""
Normalize weight or factor dictionary to the employed format.
"""
if w is None:
# use default
w = {t: {k: default_val for k in sum_stat_keys}}
elif not isinstance(next(iter(w.values())), dict):
# f is not time-dependent
# so just create one for time t
w = {t: w}
return w
@staticmethod
def get_for_t_or_latest(w, t):
"""
Extract values from dict for given time point.
"""
# take last time point for which values exist
if t not in w:
t = max(w)
# extract values for time point
return w[t]
class AdaptivePNormDistance(PNormDistance):
"""
In the p-norm distance, adapt the weights for each generation, based on
the previous simulations. This class is motivated by [#prangle]_.
Parameters
----------
p:
p for p-norm. Required p >= 1, p = np.inf allowed (infinity-norm).
Default: p=2.
initial_weights:
Weights to be used in the initial iteration. Dictionary with
observables as keys and weights as values.
factors:
As in PNormDistance.
adaptive:
True: Adapt distance after each iteration.
False: Adapt distance only once at the beginning in initialize().
This corresponds to a pre-calibration.
scale_function:
(data: list, x_0: float) -> scale: float. Computes the scale (i.e.
inverse weight s = 1 / w) for a given summary statistic. Here, data
denotes the list of simulated summary statistics, and x_0 the observed
summary statistic. Implemented are absolute_median_deviation,
standard_deviation (default), centered_absolute_median_deviation,
centered_standard_deviation.
normalize_weights:
Whether to normalize the weights to have mean 1. This just possibly
smoothes the decrease of epsilon and might aid numeric stability, but
is not strictly necessary.
max_weight_ratio:
If not None, large weights will be bounded by the ratio times the
smallest non-zero absolute weight. In practice usually not necessary,
it is theoretically required to ensure convergence.
log_file:
A log file to store weights for each time point in. Weights are
currently not stored in the database. The data are saved in json
format and can be retrieved via `pyabc.storage.load_dict_from_json`.
.. [#prangle] Prangle, Dennis. "Adapting the ABC Distance Function".
Bayesian Analysis, 2017. doi:10.1214/16-BA1002.
"""
def __init__(self,
p: float = 2,
initial_weights: dict = None,
factors: dict = None,
adaptive: bool = True,
scale_function: Callable = None,
normalize_weights: bool = True,
max_weight_ratio: float = None,
log_file: str = None):
# call p-norm constructor
super().__init__(p=p, weights=None, factors=factors)
self.initial_weights = initial_weights
self.factors = factors
self.adaptive = adaptive
if scale_function is None:
scale_function = standard_deviation
self.scale_function = scale_function
self.normalize_weights = normalize_weights
self.max_weight_ratio = max_weight_ratio
self.log_file = log_file
self.x_0 = None
def configure_sampler(self,
sampler):
"""
Make the sampler return also rejected particles,
because these are needed to get a better estimate of the summary
statistic variabilities, avoiding a bias to accepted ones only.
Parameters
----------
sampler: Sampler
The sampler employed.
"""
if self.adaptive:
sampler.sample_factory.record_rejected = True
def requires_calibration(self) -> bool:
return self.initial_weights is None
def is_adaptive(self) -> bool:
return self.adaptive
def initialize(self,
t: int,
get_all_sum_stats: Callable[[], List[dict]],
x_0: dict = None):
"""
Initialize weights.
"""
super().initialize(t, get_all_sum_stats, x_0)
self.x_0 = x_0
# initial weights pre-defined
if not self.requires_calibration():
self.weights[t] = self.initial_weights
return
# execute function
all_sum_stats = get_all_sum_stats()
# update weights from samples
self._update(t, all_sum_stats)
def update(self,
t: int,
get_all_sum_stats: Callable[[], List[dict]]):
"""
Update weights.
"""
if not self.is_adaptive():
return False
# execute function
all_sum_stats = get_all_sum_stats()
self._update(t, all_sum_stats)
return True
def _update(self,
t: int,
all_sum_stats: List[dict]):
"""
Here the real update of weights happens.
"""
# retrieve keys
keys = self.x_0.keys()
# number of samples
n_samples = len(all_sum_stats)
# to-be-filled-and-appended weights dictionary
w = {}
for key in keys:
# prepare list for key
current_list = []
for j in range(n_samples):
if key in all_sum_stats[j]:
current_list.append(all_sum_stats[j][key])
# compute scaling
scale = self.scale_function(data=current_list, x_0=self.x_0[key])
# compute weight (inverted scale)
if np.isclose(scale, 0):
# This means that either the summary statistic is not in the
# samples, or that all simulations were identical. In either
# case, it should be safe to ignore this summary statistic.
w[key] = 0
else:
w[key] = 1 / scale
# normalize weights to have mean 1
w = self._normalize_weights(w)
# bound weights
w = self._bound_weights(w)
# add to w attribute, at time t
self.weights[t] = w
# logging
self.log(t)
def _normalize_weights(self, w):
"""
Normalize weights to have mean 1.
This has just the effect that eps will decrease more smoothly, but is
not important otherwise.
"""
if not self.normalize_weights:
return w
mean_weight = np.mean(list(w.values()))
for key in w:
w[key] /= mean_weight
return w
def _bound_weights(self, w):
"""
Bound all weights to self.max_weight_ratio times the minimum
non-zero absolute weight, if self.max_weight_ratio is not None.
While this is usually not required in practice, it is theoretically
necessary that the ellipses are not arbitrarily eccentric, in order
to ensure convergence.
"""
if self.max_weight_ratio is None:
return w
# find minimum weight != 0
w_arr = np.array(list(w.values()))
min_abs_weight = np.min(np.abs(w_arr[w_arr != 0]))
# can be assumed to be != 0
for key, value in w.items():
# bound too large weights
if abs(value) / min_abs_weight > self.max_weight_ratio:
w[key] = np.sign(value) * self.max_weight_ratio \
* min_abs_weight
return w
def get_config(self) -> dict:
return {"name": self.__class__.__name__,
"p": self.p,
"factors": self.factors,
"adaptive": self.adaptive,
"scale_function": self.scale_function.__name__,
"normalize_weights": self.normalize_weights,
"max_weight_ratio": self.max_weight_ratio}
def log(self, t: int) -> None:
logger.debug(f"Weights[{t}] = {self.weights[t]}")
if self.log_file:
save_dict_to_json(self.weights, self.log_file)
class AggregatedDistance(Distance):
"""
Aggregates a list of distance functions, all of which may work on subparts
of the summary statistics. Then computes and returns the weighted sum of
the distance values generated by the various distance functions.
All class functions are propagated to the children and the obtained
results aggregated appropriately.
"""
def __init__(
self,
distances: List[Distance],
weights: Union[List, dict] = None,
factors: Union[List, dict] = None):
"""
Parameters
----------
distances: List
The distance functions to apply.
weights: Union[List, dict], optional (default = [1,...])
The weights to apply to the distances when taking the sum. Can be
a list with entries in the same order as the distances, or a
dictionary of lists, with the keys being the single time points
(if the weights should be iteration-specific).
factors: Union[List, dict], optional (dfault = [1,...])
Scaling factors that the weights are multiplied with. The same
structure applies as to weights.
If None is passed, a factor of 1 is considered for every summary
statistic.
Note that in this class, factors are superfluous as everything can
be achieved with weights alone, however in subclsses the factors
can remain static while weights adapt over time, allowing for
greater flexibility.
"""
super().__init__()
if not isinstance(distances, list):
distances = [distances]
self.distances = [to_distance(distance) for distance in distances]
self.weights = weights
self.factors = factors
def requires_calibration(self) -> bool:
return any(d.requires_calibration() for d in self.distances)
def is_adaptive(self) -> bool:
return any(d.is_adaptive() for d in self.distances)
def initialize(
self,
t: int,
get_all_sum_stats: Callable[[], List[dict]],
x_0: dict = None):
super().initialize(t, get_all_sum_stats, x_0)
for distance in self.distances:
distance.initialize(t, get_all_sum_stats, x_0)
self.format_weights_and_factors(t)
def configure_sampler(
self,
sampler):
"""
Note: `configure_sampler` is applied by all distances sequentially,
so care must be taken that they perform no contradictory operations
on the sampler.
"""
for distance in self.distances:
distance.configure_sampler(sampler)
def update(
self,
t: int,
get_all_sum_stats: Callable[[], List[dict]]) -> bool:
"""
The `sum_stats` are passed on to all distance functions, each of
which may then update using these. If any update occurred, a value
of True is returned indicating that e.g. the distance may need to
be recalculated since the underlying distances changed.
"""
return any(distance.update(t, get_all_sum_stats)
for distance in self.distances)
def __call__(
self,
x: dict,
x_0: dict,
t: int = None,
par: dict = None) -> float:
"""
Applies all distance functions and computes the weighted sum of all
obtained values.
"""
values = np.array([
distance(x, x_0, t, par) for distance in self.distances
])
self.format_weights_and_factors(t)
weights = AggregatedDistance.get_for_t_or_latest(self.weights, t)
factors = AggregatedDistance.get_for_t_or_latest(self.factors, t)
return float(np.dot(weights * factors, values))
def get_config(self) -> dict:
"""
Return configuration of the distance.
Returns
-------
config: dict
Dictionary describing the distance.
"""
config = {}
for j, distance in enumerate(self.distances):
config[f'Distance_{j}'] = distance.get_config()
return config
def format_weights_and_factors(self, t):
self.weights = AggregatedDistance.format_dict(
self.weights, t, len(self.distances))
self.factors = AggregatedDistance.format_dict(
self.factors, t, len(self.distances))
@staticmethod
def format_dict(w, t, n_distances, default_val=1.):
"""
Normalize weight or factor dictionary to the employed format.
"""
if w is None:
# use default
w = {t: default_val * np.ones(n_distances)}
elif not isinstance(w, dict):
# f is not time-dependent
# so just create one for time t
w = {t: np.array(w)}
return w
@staticmethod
def get_for_t_or_latest(w, t):
"""
Extract values from dict for given time point.
"""
# take last time point for which values exist
if t not in w:
t = max(w)
# extract values for time point
return w[t]
class AdaptiveAggregatedDistance(AggregatedDistance):
"""
Adapt the weights of `AggregatedDistances` automatically over time.
Parameters
----------
distances:
As in AggregatedDistance.
initial_weights:
Weights to be used in the initial iteration. List with
a weight for each distance function.
factors:
As in AggregatedDistance.
adaptive:
True: Adapt weights after each iteration.
False: Adapt weights only once at the beginning in initialize().
This corresponds to a pre-calibration.
scale_function:
Function that takes a list of floats, namely the values obtained
by applying one of the distances passed to a set of samples,
and returns a single float, namely the weight to apply to this
distance function. Default: scale_span.
log_file:
A log file to store weights for each time point in. Weights are
currently not stored in the database. The data are saved in json
format and can be retrieved via `pyabc.storage.load_dict_from_json`.
"""
def __init__(
self,
distances: List[Distance],
initial_weights: List = None,
factors: Union[List, dict] = None,
adaptive: bool = True,
scale_function: Callable = None,
log_file: str = None):
super().__init__(distances=distances)
self.initial_weights = initial_weights
self.factors = factors
self.adaptive = adaptive
self.x_0 = None
if scale_function is None:
scale_function = span
self.scale_function = scale_function
self.log_file = log_file
def requires_calibration(self) -> bool:
return (self.initial_weights is None
or any(d.requires_calibration() for d in self.distances))
def is_adaptive(self) -> bool:
return (self.adaptive
or any(d.is_adaptive() for d in self.distances))
def initialize(self,
t: int,
get_all_sum_stats: Callable[[], List[dict]],
x_0: dict = None):
"""
Initialize weights.
"""
super().initialize(t, get_all_sum_stats, x_0)
self.x_0 = x_0
if self.initial_weights is not None:
self.weights[t] = self.initial_weights
return
# execute function
all_sum_stats = get_all_sum_stats()
# update weights from samples
self._update(t, all_sum_stats)
def update(self,
t: int,
get_all_sum_stats: Callable[[], List[dict]]):
"""
Update weights based on all simulations.
"""
super().update(t, get_all_sum_stats)
if not self.adaptive:
return False
# execute function
all_sum_stats = get_all_sum_stats()
self._update(t, all_sum_stats)
return True
def _update(self,
t: int,
sum_stats: List[dict]):
"""
Here the real update of weights happens.
"""
# to-be-filled-and-appended weights dictionary
w = []
for distance in self.distances:
# apply distance to all samples
current_list = [
distance(sum_stat, self.x_0)
for sum_stat in sum_stats
]
# compute scaling
scale = self.scale_function(current_list)
# compute weight (inverted scale)
if np.isclose(scale, 0):
# This means that either the summary statistic is not in the
# samples, or that all simulations were identical. In either
# case, it should be safe to ignore this summary statistic.
w.append(0)
else:
w.append(1 / scale)
# add to w attribute, at time t
self.weights[t] = np.array(w)
# logging
self.log(t)
def log(self, t: int) -> None:
logger.debug(f"updated weights[{t}] = {self.weights[t]}")
if self.log_file:
save_dict_to_json(self.weights, self.log_file)
class DistanceWithMeasureList(Distance):
"""
Base class for distance functions with measure list.
This class is not functional on its own.
Parameters
----------
measures_to_use: Union[str, List[str]].
* If set to "all", all measures are used. This is the default.
* If a list is provided, the measures in the list are used.
* measures refers to the summary statistics.
"""
def __init__(self,
measures_to_use='all'):
super().__init__()
# the measures (summary statistics) to use for distance calculation
self.measures_to_use = measures_to_use
def initialize(self,
t: int,
get_all_sum_stats: Callable[[], List[dict]],
x_0: dict = None):
if self.measures_to_use == 'all':
self.measures_to_use = x_0.keys()
def get_config(self):
config = super().get_config()
config["measures_to_use"] = self.measures_to_use
return config
class ZScoreDistance(DistanceWithMeasureList):
"""
Calculate distance as sum of ZScore over the selected measures.
The measured Data is the reference for the ZScore.
Hence
.. math::
d(x, y) = \
\\sum_{i \\in \\text{measures}} \\left| \\frac{x_i-y_i}{y_i} \\right|
"""
def __call__(self,
x: dict,
x_0: dict,
t: int = None,
par: dict = None) -> float:
return sum(abs((x[key] - x_0[key]) / x_0[key]) if x_0[key] != 0 else
(0 if x[key] == 0 else np.inf)
for key in self.measures_to_use) / len(self.measures_to_use)
class PCADistance(DistanceWithMeasureList):
"""
Calculate distance in whitened coordinates.
A whitening transformation :math:`X` is calculated from an initial sample.
The distance is measured as euclidean distance in the transformed space.
I.e
.. math::
d(x,y) = \\| Wx - Wy \\|
"""
def __init__(self, measures_to_use='all'):
super().__init__(measures_to_use)
self._whitening_transformation_matrix = None
def _dict_to_vect(self, x):
return np.asarray([x[key] for key in self.measures_to_use])
def _calculate_whitening_transformation_matrix(self, sum_stats):
samples_vec = np.asarray([self._dict_to_vect(x)
for x in sum_stats])
# samples_vec is an array of shape nr_samples x nr_features
means = samples_vec.mean(axis=0)
centered = samples_vec - means
covariance = centered.T.dot(centered)
w, v = la.eigh(covariance)
self._whitening_transformation_matrix = (
v.dot(np.diag(1. / np.sqrt(w))).dot(v.T))
def requires_calibration(self) -> bool:
return True
def initialize(self,
t: int,
get_all_sum_stats: Callable[[], List[dict]],
x_0: dict = None):
super().initialize(t, get_all_sum_stats, x_0)
# execute function
all_sum_stats = get_all_sum_stats()
self._calculate_whitening_transformation_matrix(all_sum_stats)
def __call__(self,
x: dict,
x_0: dict,
t: int = None,
par: dict = None) -> float:
x_vec, x_0_vec = self._dict_to_vect(x), self._dict_to_vect(x_0)
distance = la.norm(
self._whitening_transformation_matrix.dot(x_vec - x_0_vec), 2)
return distance
class RangeEstimatorDistance(DistanceWithMeasureList):
"""
Abstract base class for distance functions which estimate is based on a
range.
It defines the two template methods ``lower`` and ``upper``.
Hence
.. math::
d(x, y) = \
\\sum_{i \\in \\text{measures}} \\left | \\frac{x_i - y_i}{u_i - l_i}\
\\right |
where :math:`l_i` and :math:`u_i` are the lower and upper
margin for measure :math:`i`.
"""
@staticmethod
def lower(parameter_list: List[float]):
"""
Calculate the lower margin form a list of parameter values.
Parameters
----------
parameter_list: List[float]
List of values of a parameter.
Returns
-------
lower_margin: float
The lower margin of the range calculated from these parameters
"""
@staticmethod
def upper(parameter_list: List[float]):
"""
Calculate the upper margin form a list of parameter values.
Parameters
----------
parameter_list: List[float]
List of values of a parameter.
Returns
-------
upper_margin: float
The upper margin of the range calculated from these parameters
"""
def __init__(self, measures_to_use='all'):
super().__init__(measures_to_use)
self.normalization = None
def get_config(self):
config = super().get_config()
config["normalization"] = self.normalization
return config
def _calculate_normalization(self, sum_stats):
measures = {name: [] for name in self.measures_to_use}
for sample in sum_stats:
for measure in self.measures_to_use:
measures[measure].append(sample[measure])
self.normalization = {measure:
self.upper(measures[measure])
- self.lower(measures[measure])
for measure in self.measures_to_use}
def requires_calibration(self) -> bool:
return True
def initialize(self,
t: int,
get_all_sum_stats: Callable[[], List[dict]],
x_0: dict = None):
super().initialize(t, get_all_sum_stats, x_0)
# execute function
all_sum_stats = get_all_sum_stats()
self._calculate_normalization(all_sum_stats)
def __call__(self,
x: dict,
x_0: dict,
t: int = None,
par: dict = None) -> float:
distance = sum(abs((x[key] - x_0[key]) / self.normalization[key])
for key in self.measures_to_use)
return distance
class MinMaxDistance(RangeEstimatorDistance):
"""
Calculate upper and lower margins as max and min of the parameters.
This works surprisingly well for normalization in simple cases
"""
@staticmethod
def upper(parameter_list):
return max(parameter_list)
@staticmethod
def lower(parameter_list):
return min(parameter_list)
class PercentileDistance(RangeEstimatorDistance):
"""
Calculate normalization 20% and 80% from percentiles as lower
and upper margins
"""
PERCENTILE = 20 #: The percentiles
@staticmethod
def upper(parameter_list):
return np.percentile(parameter_list,
100 - PercentileDistance.PERCENTILE)
@staticmethod
def lower(parameter_list):
return np.percentile(parameter_list,
PercentileDistance.PERCENTILE)
def get_config(self):
config = super().get_config()
config["PERCENTILE"] = self.PERCENTILE
return config
|
# Uses python3
import sys
"""
def optimal_sequence(n):
sequence = []
while n >= 1:
sequence.append(n)
if n % 3 == 0:
n = n // 3
elif n % 2 == 0:
n = n // 2
else:
n = n - 1
return reversed(sequence)
"""
"""
9 8 7 6 5 4 3 2 1
0 x x x x x x x x
0 1 2 3 4 5 6 7 8
"""
def optimal_sequence(n):
steps = [n]*n
# print(steps)
steps[0]=0
step_from = [999]*n
steps_taken=[]
for i in range(0,n-1):
this_num = n-i
if this_num%3==0:
div_num = this_num//3
div_idx = n-div_num
# steps[div_idx] = min(steps[div_idx], steps[i] + 1)
if steps[i] + 1<steps[div_idx]:
steps[div_idx] = steps[i] + 1
step_from[div_idx] = this_num
if this_num%2==0:
div_num = this_num//2
div_idx = n-div_num
# steps[div_idx] = min(steps[div_idx], steps[i] + 1)
if steps[i] + 1<steps[div_idx]:
steps[div_idx] = steps[i] + 1
step_from[div_idx] = this_num
# steps[i+1] = min(steps[i+1], steps[i]+1)
if steps[i] + 1<steps[i+1]:
steps[i+1] = steps[i] + 1
step_from[i+1] = this_num
curr=1
steps_taken.append(curr)
while curr!=n:
curr=step_from[n-curr]
steps_taken.append(curr)
# return steps[n-1], steps_taken
return steps_taken
# print(optimal_sequence(96234))
input = sys.stdin.read()
n = int(input)
sequence = list(optimal_sequence(n))
print(len(sequence) - 1)
for x in sequence:
print(x, end=' ')
|
import re
partyDict = {
'Latvijas Krievu savienība': 'LKS',
'Jaunā konservatīvā partija': 'JKP',
'Rīcības partija': 'Ricib',
'Nacionālā apvienība Visu Latvijai!-Tēvzemei un Brīvībai/LNNK': 'NA',
'PROGRESĪVIE': 'Progr',
'Latvijas centriskā partija': 'Centr',
'LSDSP/KDS/GKL':'LSDSP',
'No sirds Latvijai': 'NSL',
'Saskaņa sociāldemokrātiskā partija': 'Sask',
'Attīstībai/Par!': 'A-PAR',
'Latvijas Reģionu Apvienība': 'LRA',
'Latviešu Nacionālisti': 'LNacio',
'Jaunā VIENOTĪBA': 'JV',
'Par Alternatīvu': 'Altern',
'Politiskā partija KPV LV': 'KPV',
'Zaļo un Zemnieku savienība': 'ZZS'
}
candidateTotals = 0
def parseParties(ll):
global candidateTotals
p = re.compile(r'^"(\d+)\.\s+(.*)"$')
rQuotes = re.compile(r'"')
result = []
items = ll.split(',')
# count the number of empty slots
partyNum = 0
cc = 0
for item in items:
if len(item) > 0:
if item == '\n':
cc=cc+1
if partyNum > 0:
partyCount = int((cc+1)/2);
result.append((partyNum, partyName, partyCount))
candidateTotals = candidateTotals + partyCount
cc=0
if len(item) > 0 and item != '\n':
if p.match(item):
cc = 0
m = p.match(item)
partyNum = int(m.group(1))
partyName = m.group(2)
partyName = rQuotes.sub('', partyName)
else:
print('Does not match11 %s (%d)' % (item,cc))
else:
cc = cc + 1
print("partyCountTotals %s" % candidateTotals)
return result
def parseCandidates(ll):
p = re.compile(r'^"(\d+)\.\s+(.*)"$')
result = []
items = ll.split(",")
for item in items:
if len(item) > 0 and item != '\n':
if p.match(item):
m = p.match(item)
result.append((m.group(1),m.group(2)))
else:
print('Does not match22 %s' % item)
return result
def readRecords(inFile):
p = re.compile(r'^"(.*)","(.*)",(.*)$')
outRecords = []
with open(inFile, encoding='utf-8') as f:
content = f.readlines()
# Line count
count = 0
for line in content:
count = count + 1
if count == 1:
parties = parseParties(line)
elif count == 2:
candidates = parseCandidates(line)
elif count == 3:
# initialize plus and minus lists
lPlus = [0]*candidateTotals
lMinus = [0]*candidateTotals
else:
if p.match(line):
m = p.match(line)
line2 = m.group(3)
else:
print('Does not match33 %s' % line)
mur = line2.split(",")
for x in range(candidateTotals):
lPlus[x] = lPlus[x] + int(mur[2*x])
lMinus[x] = lMinus[x] + int(mur[2*x+1])
offset = 0
for party in parties:
partyNum = party[0]
partyAbbr = partyDict[party[1]]
partyCount = party[2]
for x in range(partyCount):
candNum = int(candidates[offset+x][0])
candName = candidates[offset+x][1]
candPlus = lPlus[offset + x]
candMinus = lMinus[offset + x]
outRecords.append((partyNum, partyAbbr, candNum, candName, candPlus, candMinus))
offset = offset + partyCount
return outRecords
def getParties(inFile):
with open(inFile, encoding='utf-8') as f:
content = f.readlines()
count = 0
for line in content:
count = count + 1
if count == 1:
parties = parseParties(line)
return parties
def main():
ROOT = 'candidates'
apgabali = ['riga','vidzeme','latgale','kurzeme','zemgale']
# apgabali = ['zemgale']
for apgabals in apgabali:
inFile = '%s/%s' % (ROOT,'%s-plusi-svitrojumi.csv' % apgabals)
outFile = '%s/%s' % (ROOT,'%s-data.csv' % apgabals)
theRecords = readRecords(inFile)
with open(outFile, encoding='utf8', mode='w+') as fOutFile:
fOutFile.write('"ListNo","ShortName","CandNo","CandName","Pluses","Minuses"\n')
for rr in theRecords:
fOutFile.write('%d,"%s",%d,"%s",%d,%d\n' % rr)
parties = getParties(inFile)
print('Parties are %s' % parties)
if __name__ == '__main__':
main()
|
from math import *
def sieve(n):
L = [True] * (n + 1)
L[0] = False
L[1] = False
prime = 2
k = prime
while prime < int(floor(sqrt(n))) + 1:
while k <= n - prime:
k += prime
L[k] = False
prime += 1
while L[prime] == False:
prime += 1
k = prime
return L
def compute_quadratic(a, b, n):
return (n ** 2) + (a * n) + b
prime_sieve = sieve(1000000)
m = -1
product = 0
for a in range(-1000, 1001):
for b in range(-1000, 1001):
n = -1
while True:
prime = compute_quadratic(a, b, n + 1)
if prime_sieve[prime] == True:
n += 1
else:
break
if n > m:
m = n
product = a * b
print(m + 1)
print(product) |
# TF-IDF é uma medida estatística que indica a importância de uma palavra TF - IDF = TF*IDF
# TF = Numero de vezes que uma palavra ocorre no documento / Número total de palavras no documento
import math
DicWords = []
DicWord = {}
DicWordAllDocument = {}
def includeWordsOfDocuments(Document):
Words = []
for x in Document:
Words.append(x.split(" "))
return Words
def printAllDocument():
for index, Document in enumerate(DicWords):
# The index is the number of Document
print("Document: ", index + 1)
for key, value in Document.items():
print(key, "=>", value)
# Here print all words in all Documents
# print("All Document: ")
# for key, value in DicWordAllDocument.items():
# print(key, "=>", value)
def setTFinAllWords():
for Document in DicWords:
tam = 0
for word in Document:
if Document.get(word, "") > 0:
tam += 1
for word in Document:
aux = Document.get(word, "")
Document[word] = float(aux / tam)
def setTF(NumberWordDocument):
for Document in DicWords:
for word in Document:
Document[word] = Document.get(word, "") * NumberWordDocument.get(word, "")
def getPoints(AllDocuments):
# Se tiver uma lista de documentos, então ele irá incluir em cada documento todas as palavras de todos os documentos
if type(AllDocuments) == list:
Documents = includeWordsOfDocuments(AllDocuments)
elif type(AllDocuments) == str:
Documents = [AllDocuments]
# Check if exist word in Document actual and All Documents
for words in Documents:
for word in words:
if word in DicWord:
DicWord[word] += 1
else:
DicWord[word] = 1
if word in DicWordAllDocument:
DicWordAllDocument[word] += 1
else:
DicWordAllDocument[word] = 1
DicWords.append(DicWord.copy())
DicWord.clear()
# Add all words in all Documents. If the Document dont have the Word, so add 0 to your value
NumberWordDocument = {}
for index, Document in enumerate(DicWords):
for word in DicWordAllDocument:
if word in Document and Document.get(word, "") > 0:
if word in NumberWordDocument:
if not (NumberWordDocument[str(word)] > index):
NumberWordDocument[str(word)] += 1
else:
NumberWordDocument[str(word)] = 1
else:
Document[word] = 0
setTFinAllWords()
NumberOfDocuments = len(DicWords)
for word in NumberWordDocument:
x = NumberOfDocuments / NumberWordDocument.get(word, " ")
NumberWordDocument[word] = math.log(x)
setTF(NumberWordDocument)
return DicWords
|
# coding: utf-8
# In[1]:
import sys
with open(sys.argv[1]) as f:
m = f.read()
dictionary = m.split()
sentences = []
with open(sys.argv[2]) as d:
sentences = d.readlines()
punct = ["「", "」", "。", "、", "!", "?", "“", "\n"]
for i in range(len(sentences)):
for elem in punct:
sentences[i] = sentences[i].replace(elem, "")
for sentence in sentences:
sentence = sentence.strip()
print(sentences)
#tokens = []
def tokenize(text, dictionary, tokens):
for i in range(len(text), -1, -1):
word = text[:i]
if word in dictionary:
tokens.append(word)
text = text[len(word):]
tokenize(text, dictionary, tokens)
return tokens
return tokens
with open("max.txt", "w", encoding = "utf-8") as final:
for sentence in sentences:
tokens = []
tokenize(sentence, dictionary, tokens)
final.write("\n".join(tokens))
final.write("\n")
final.close()
|
#======================== puzzle.builder.fromMask ========================
#
# @brief Create digital puzzles from a puzzle partition mask and a
# source iamge with a similar aspect ratio.
#
# The mask is white or "true" where the puzzle piece regions are and is
# black or "false" where the boundaries are. For an image and mask pair
# that are not the same aspect ratio, the image or the mask, or both can
# be warped to fit. If the image is bigger, then it can be cropped to
# fit.
#
# In addition, the puzzle pieces are automatically extracted and stored
# into a puzzle piece template class instance.
#
#======================== puzzle.builder.fromMask ========================
#
# @file fromMask.py
#
# @author Patricio A. Vela, pvela@gatech.edu
# @date 2021/07/25 [started]
#
#!NOTE:
#! Indent is set to 2 spaces.
#! Tab is set to 4 spaces with conversion to spaces.
#
#======================== puzzle.builder.fromMask ========================
# Imports go here. Aim for the bare minimum.
# Make sure to include in dependencies for this package.
# Delete this comment when done.
#
#======================== puzzle.builder.fromMask ========================
#
class fromMask:
#============================== fromMask =============================
#
# @brief Constructor for mask-based puzzle builder. Can pass contents
# at instantiation time or delay until later.
#
# @param[in] theMask The puzzle template mask. (optional)
# @param[in] theImage The puzzle image source. (optional)
#
def __init__(self, theMask = [], theImage = [])
self.pieces = [] # @< The puzzle pieces.
pass
# Remainder of this constructor needs to be coded up.
# Is processing automatic or triggered by calling scope?
# If automatic, then need proper logic in member functions.
#============================== setMask ==============================
#
# @brief Provide the mask to use.
#
# @param[in] theMask The puzzle template mask. (optional)
#
def setMask(self, theMask)
self.mask = theMask
# Should more be done?
# Is processing automatic or triggered by calling scope?
#============================== setImage =============================
#
# @param[in] theImage The puzzle image source. (optional)
#
def setImage(self, theImage):
self.image = theImage
# Should more be done?
# Is processing automatic or triggered by calling scope?
#========================== setMaskAndImage ==========================
#
# @brief Specify the mask and the image to use.
#
# @param[in] theMask The puzzle template mask.
# @param[in] theImage The puzzle image source.
# @param[in] doParse perform follow-up parsing? (optional boolean)
#
def setMaskAndImage(self, theMask, theImage, doParse = false):
self.mask = theMask
self.image = theImage
if (doParse)
self.process()
# Should more be done?
# Is processing automatic or triggered by calling scope?
# If automatic, then not need for flag. Remove it.
#============================== process ==============================
#
# @brief Parse the mask and apply to source image.
#
# When parsing is done, the pieces member variable is populated with
# the puzzle piece information.
#
def process(self):
#=========================== explodedPuzzle ==========================
#
# @brief Create an exploded version of the puzzle. It is an image
# with no touching pieces.
#
# The value fo an exploded puzzle image is that it can be used to
# generate a simulated puzzle scenario that can be passed to a puzzle
# solver. It can also be used to define a quasi-puzzle problem, where
# the objective is to place the pieces in grid ordering like the
# exploded view (without needing to interlock). Doing see keeps puzzle
# piece well separated for simple puzzle interpretation algorithms to
# rapidly parse.
#
# @param[in] bgColor The background color to use.
# @param[in] dx The horizontal offset when exploding.
# @param[in] dy The vertical offset when exploding.
#
# @param[out] epImage Exploded puzzle image.
#
def explodedPuzzle(self, bgColor):
#--[1] First figure out how big the exploded image should be based
# on the puzzle image dimensions, the number of puzzle pieces
# across rows and columns, and the chosen spacing.
[nr, nc] = image size.
dr = 0 # ADDITIONAL ROWS TO ADD. ACCUMULATE.
dc = 0 # ADDITIONAL COLUMNS TO ADD. ACCUMULATE.
nr = nr + dr
nc = nc + dc
epImage = image of size nr x nc with bgColor.
#--[2] Place image data into the exploded puzzle image.
#
for loop over pieces
x = self.piece[ii].r
p = self.piece[i]].p
dr = x + [dx, dy] .* p
self.piece[ii].placeInImage(epImage, x)
# ABOVE IS JUST PSEUDOCODE. NEEDS CORRECTION.
#
#======================== puzzle.builder.fromMask ========================
|
#!/usr/bin/env python3
import sys
from orthrus.serialdevice import SerialDevice
import _thread
from orthrus.util.logger import DataLogger
from orthrus.util.argparser import ArgParser
from orthrus.ui.plotter import DataPlotter
def print_tof(distance):
outstring = "Distance from transmitter: {}m"
print(outstring.format(distance))
def get_distance_value(data):
list = str.split(data, ":")
list = list[1].split(",")
return float(list[0])
def read_data_thread(thread_name, dev, isVerbose, logger=None, plotter=None):
distance_label = "distance"
print("Starting " + thread_name + " Thread")
print("Receiving receptor signal...")
while True:
rcv_line = dev.read_line()
if distance_label in rcv_line:
distance = get_distance_value(rcv_line)
if logger is not None:
logger.write(distance)
if plotter is not None:
plotter.update_data(distance)
if isVerbose is True:
print_tof(distance)
if __name__ == '__main__':
isVerbose = False
plotter = logger = device = None
arg = ArgParser(ArgParser.TOF_DEMO_TYPE)
device = SerialDevice()
device.open()
if device.port is not None:
isVerbose = arg.do_verbose()
if arg.do_log() is True:
try:
logger = DataLogger(arg.get_file_name(), DataLogger.TOF_LOG_DATA_TYPE)
except SystemExit:
sys.exit()
if arg.do_show() == "xyplot":
plotter = DataPlotter()
try:
_thread.start_new_thread(read_data_thread, ("Read_Data", device, isVerbose, logger, plotter))
if plotter is not None:
plotter.start()
while True:
pass
except KeyboardInterrupt as e:
_thread.exit()
device.close()
|
import shutil
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
import ansible.constants as C
from ansible.executor import playbook_executor # a class to execute
Options = namedtuple(
'Options',
[
'remote_user',
'ack_pass',
'sudo_user',
'forks',
'sudo_pass',
'ask_sudo_pass',
'verbosity',
'module_path',
'become',
'become_method',
'become_user',
'check',
'diff',
'listhosts',
'listtasks',
'listtags',
'syntax'
]
)
options = Options()
loader = DataLoader() #用于分析文件格式
passwords = dict() #免密无需密码
inventory = InventoryManager(loader=loader, sources='myansible/hosts')
variable_mamager=VariableManager(loader=loader,)
def runpb(pb_path):
playbook = playbook_executor(
playbooks=pb_path,
inventory=inventory,
loader=loader,
options=options,
passwords=passwords
)
result = playbook.run()
|
# coding: utf-8
from snowballing.models import *
from snowballing import dbindex
dbindex.last_citation_file = dbindex.this_file(__file__)
from ..work.y2016 import michaelides2016a
from ..work.y2016 import correndo2016a
from ..work.y2017 import correndo2017a
from ..work.y2018 import moreau2018a
from ..work.y2018 import lerner2018a
from ..work.y2018 import moreau2018b
DB(Citation(
moreau2018a, michaelides2016a, ref="",
contexts=[
],
))
DB(Citation(
correndo2017a, michaelides2016a, ref="",
contexts=[
],
))
DB(Citation(
lerner2018a, michaelides2016a, ref="",
contexts=[
],
))
DB(Citation(
correndo2016a, michaelides2016a, ref="",
contexts=[
],
))
DB(Citation(
moreau2018b, michaelides2016a, ref="",
contexts=[
],
))
|
import os, sys
#import intan module
sys.path.insert(0, '/Intan/')
print(os.getcwd())
from load_intan_rhd_format import read_data
#Read .rhd file and returns numpy array of amplitude data
#Adapted from Esther's Concentrate RAW code
def rhd_to_numpy(file_name):
rhd_dic = read_data(file_name) #read rhd file
rhd_array = rhd_dic["amplifier_data"] #extract relevant data and put in np array
print(rhd_array, rhd_array.shape)
return rhd_array
|
from keystone import *
CODE = (
" jmp esp;"
)
# Initialize engine in 32bit
ks = Ks(KS_ARCH_X86, KS_MODE_32)
encoding, count = ks.asm(CODE)
egghunter = ""
for dec in encoding:
egghunter += "\\x{0:02x}".format(int(dec)).rstrip("\n")
print("egghunter = (\"" + egghunter + "\")") |
species(
label = 'C#CC([CH2])C(=C)[CH]C(26577)',
structure = SMILES('C#CC([CH2])C([CH2])=CC'),
E0 = (478.231,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2175,525,1380,1390,370,380,2900,435,350,440,435,1725,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,750,770,3400,2100,273.559],'cm^-1')),
HinderedRotor(inertia=(0.20589,'amu*angstrom^2'), symmetry=1, barrier=(10.9289,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.2534,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.25263,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.205847,'amu*angstrom^2'), symmetry=1, barrier=(10.929,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.25256,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.119174,0.0887879,-8.75458e-05,4.76961e-08,-1.05787e-11,57667.9,29.155], Tmin=(100,'K'), Tmax=(1086.04,'K')), NASAPolynomial(coeffs=[14.3672,0.0354333,-1.38548e-05,2.46114e-09,-1.65913e-13,54521.3,-41.9294], Tmin=(1086.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(478.231,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Isobutyl) + radical(Allyl_P)"""),
)
species(
label = 'CH3CHCCH2(18175)',
structure = SMILES('C=C=CC'),
E0 = (145.615,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label="""CH3CHCCH2""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'CH2CHCCH(26391)',
structure = SMILES('C#CC=C'),
E0 = (274.188,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,750,770,3400,2100,3010,987.5,1337.5,450,1655,2175,525],'cm^-1')),
HinderedRotor(inertia=(1.46338,'amu*angstrom^2'), symmetry=1, barrier=(33.6459,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (52.0746,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2968.28,'J/mol'), sigma=(5.18,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.87083,0.0182042,1.06711e-05,-2.72492e-08,1.19478e-11,33023.8,11.2934], Tmin=(100,'K'), Tmax=(955.249,'K')), NASAPolynomial(coeffs=[8.52653,0.0108962,-3.56564e-06,6.31243e-10,-4.51891e-14,31196.2,-19.6435], Tmin=(955.249,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(274.188,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(178.761,'J/(mol*K)'), label="""CH2CHCCH""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'C#CC1CC1([CH2])[CH]C(28161)',
structure = SMILES('C#CC1CC1([CH2])[CH]C'),
E0 = (586.371,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.579915,0.0597992,-9.59294e-07,-4.24954e-08,2.08555e-11,70661,29.7444], Tmin=(100,'K'), Tmax=(979.017,'K')), NASAPolynomial(coeffs=[16.7225,0.0300351,-1.08047e-05,1.96693e-09,-1.40153e-13,65765.9,-56.6494], Tmin=(979.017,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(586.371,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(415.724,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsCs) + group(Cs-CtCsCsH) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Ct-CtCs) + group(Ct-CtH) + ring(Cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""),
)
species(
label = '[CH]=C1CC1C([CH2])=CC(28188)',
structure = SMILES('[CH]=C1CC1C([CH2])=CC'),
E0 = (582.161,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.145889,0.0792655,-6.11168e-05,2.4292e-08,-3.88012e-12,70177,27.9595], Tmin=(100,'K'), Tmax=(1488.97,'K')), NASAPolynomial(coeffs=[18.2896,0.0297397,-1.12238e-05,1.95292e-09,-1.29321e-13,64687,-68.3197], Tmin=(1488.97,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(582.161,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(419.881,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Cds_P) + radical(Allyl_P)"""),
)
species(
label = '[CH]=C1CC(=CC)C1[CH2](27973)',
structure = SMILES('[CH]=C1CC(=CC)C1[CH2]'),
E0 = (578.719,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.675307,0.0598105,-6.09802e-06,-3.33792e-08,1.68688e-11,69735.4,28.5336], Tmin=(100,'K'), Tmax=(985.123,'K')), NASAPolynomial(coeffs=[14.787,0.0331075,-1.20261e-05,2.15999e-09,-1.51096e-13,65470.4,-46.8712], Tmin=(985.123,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(578.719,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(Cyclobutane) + radical(Cds_P) + radical(Isobutyl)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C#CC(=C)C([CH2])=CC(28189)',
structure = SMILES('C#CC(=C)C([CH2])=CC'),
E0 = (401.291,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2175,525,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,750,770,3400,2100,325,375,415,465,420,450,1700,1750,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(1.2066,'amu*angstrom^2'), symmetry=1, barrier=(27.7422,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.20834,'amu*angstrom^2'), symmetry=1, barrier=(27.7822,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.20664,'amu*angstrom^2'), symmetry=1, barrier=(27.7429,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.20778,'amu*angstrom^2'), symmetry=1, barrier=(27.7692,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (105.157,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0056467,0.0751701,-4.24627e-05,-4.06682e-09,8.35668e-12,48419.8,26.6058], Tmin=(100,'K'), Tmax=(994.465,'K')), NASAPolynomial(coeffs=[19.1877,0.0256074,-9.39261e-06,1.71028e-09,-1.21159e-13,43235.7,-72.7558], Tmin=(994.465,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(401.291,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Ct) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Ct-Ct(Cds-Cds)) + group(Ct-CtH) + radical(Allyl_P)"""),
)
species(
label = 'C=[C][CH]C(18176)',
structure = SMILES('[CH2][C]=CC'),
E0 = (361.056,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),
HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (54.0904,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)"""),
)
species(
label = 'C2H(33)',
structure = SMILES('[C]#C'),
E0 = (557.301,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([750,770,3400,2100],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (25.0293,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(1737.73,'J/mol'), sigma=(4.1,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.5, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.89868,0.0132988,-2.80733e-05,2.89485e-08,-1.07502e-11,67061.6,6.18548], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.6627,0.00382492,-1.36633e-06,2.13455e-10,-1.23217e-14,67168.4,3.92206], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(557.301,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(62.3585,'J/(mol*K)'), label="""C2H""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C(C=C)=CC(24210)',
structure = SMILES('[CH2]C(C=C)=CC'),
E0 = (171.804,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),
HinderedRotor(inertia=(0.902009,'amu*angstrom^2'), symmetry=1, barrier=(20.739,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.902046,'amu*angstrom^2'), symmetry=1, barrier=(20.7398,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.901955,'amu*angstrom^2'), symmetry=1, barrier=(20.7377,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (81.1357,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.17187,0.0502492,-6.01379e-06,-2.94563e-08,1.53252e-11,20775.8,20.8331], Tmin=(100,'K'), Tmax=(976.501,'K')), NASAPolynomial(coeffs=[14.417,0.0239289,-8.49401e-06,1.53256e-09,-1.08637e-13,16857.1,-49.5715], Tmin=(976.501,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(171.804,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(Allyl_P)"""),
)
species(
label = '[CH]=[C]C=C(4699)',
structure = SMILES('[CH]=C=C[CH2]'),
E0 = (451.584,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,540,610,2055,3000,3100,440,815,1455,1000,180,1024.85,1025.53,1026.61],'cm^-1')),
HinderedRotor(inertia=(0.00938781,'amu*angstrom^2'), symmetry=1, barrier=(7.01846,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (52.0746,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.76805,0.020302,8.75519e-06,-2.87666e-08,1.37354e-11,54363.7,13.5565], Tmin=(100,'K'), Tmax=(915.031,'K')), NASAPolynomial(coeffs=[9.46747,0.00887314,-1.78262e-06,2.38534e-10,-1.6263e-14,52390.1,-22.2544], Tmin=(915.031,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(451.584,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(C=C=CJ) + radical(Allyl_P)"""),
)
species(
label = 'C#C[C](C)C([CH2])=CC(28190)',
structure = SMILES('C#CC(C)=C([CH2])[CH]C'),
E0 = (395.054,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.433095,0.0746052,-5.68201e-05,2.37605e-08,-4.16177e-12,47645.5,25.2835], Tmin=(100,'K'), Tmax=(1324.72,'K')), NASAPolynomial(coeffs=[12.6988,0.0375688,-1.4883e-05,2.65558e-09,-1.78847e-13,44395.8,-37.3403], Tmin=(1324.72,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(395.054,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCtCs) + group(Ct-Ct(Cds-Cds)) + group(Ct-CtH) + radical(CTCC=CCJ) + radical(Allyl_S)"""),
)
species(
label = 'C#C[C]([CH2])C(C)=CC(28191)',
structure = SMILES('C#CC([CH2])=C(C)[CH]C'),
E0 = (425.137,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0652111,0.0779655,-5.89652e-05,2.30559e-08,-3.6375e-12,51288.1,27.7197], Tmin=(100,'K'), Tmax=(1501.02,'K')), NASAPolynomial(coeffs=[17.7225,0.0305637,-1.15955e-05,2.01693e-09,-1.33366e-13,45948.1,-65.3199], Tmin=(1501.02,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(425.137,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCtCs) + group(Ct-Ct(Cds-Cds)) + group(Ct-CtH) + radical(Allyl_S) + radical(Allyl_P)"""),
)
species(
label = 'C#CC([CH2])C(C)=[C]C(28192)',
structure = SMILES('C#CC([CH2])C(C)=[C]C'),
E0 = (564.574,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2175,525,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,750,770,3400,2100,217.043],'cm^-1')),
HinderedRotor(inertia=(0.324075,'amu*angstrom^2'), symmetry=1, barrier=(10.8268,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.323976,'amu*angstrom^2'), symmetry=1, barrier=(10.827,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.323962,'amu*angstrom^2'), symmetry=1, barrier=(10.8268,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.323935,'amu*angstrom^2'), symmetry=1, barrier=(10.8266,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.97991,'amu*angstrom^2'), symmetry=1, barrier=(66.1633,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.144112,0.0957746,-0.000120541,9.01514e-08,-2.77085e-11,68047.4,29.4398], Tmin=(100,'K'), Tmax=(823.938,'K')), NASAPolynomial(coeffs=[10.489,0.04168,-1.75569e-05,3.18119e-09,-2.14291e-13,66379.2,-19.29], Tmin=(823.938,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(564.574,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Isobutyl) + radical(Cds_S)"""),
)
species(
label = 'C#CC(C)C([CH2])=[C]C(28193)',
structure = SMILES('C#CC(C)C([CH2])=[C]C'),
E0 = (510.991,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2175,525,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,750,770,3400,2100,214.155],'cm^-1')),
HinderedRotor(inertia=(0.465745,'amu*angstrom^2'), symmetry=1, barrier=(15.1577,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.465749,'amu*angstrom^2'), symmetry=1, barrier=(15.1577,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.533594,'amu*angstrom^2'), symmetry=1, barrier=(17.3658,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.21014,'amu*angstrom^2'), symmetry=1, barrier=(71.9291,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.465747,'amu*angstrom^2'), symmetry=1, barrier=(15.1577,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0602892,0.0888894,-8.95969e-05,5.0727e-08,-1.19088e-11,61598,27.2103], Tmin=(100,'K'), Tmax=(1016.84,'K')), NASAPolynomial(coeffs=[12.5549,0.039738,-1.70901e-05,3.18921e-09,-2.21058e-13,59057.1,-33.2775], Tmin=(1016.84,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(510.991,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Allyl_P) + radical(Cds_S)"""),
)
species(
label = 'C#CC([CH2])[C](C)C=C(27304)',
structure = SMILES('C#CC([CH2])C(C)=C[CH2]'),
E0 = (478.231,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2175,525,1380,1390,370,380,2900,435,350,440,435,1725,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,750,770,3400,2100,273.549],'cm^-1')),
HinderedRotor(inertia=(0.205842,'amu*angstrom^2'), symmetry=1, barrier=(10.9286,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.25261,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.25479,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.205694,'amu*angstrom^2'), symmetry=1, barrier=(10.9292,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.25325,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.11917,0.0887879,-8.75457e-05,4.7696e-08,-1.05786e-11,57667.9,29.155], Tmin=(100,'K'), Tmax=(1086.06,'K')), NASAPolynomial(coeffs=[14.3673,0.0354333,-1.38548e-05,2.46114e-09,-1.65912e-13,54521.3,-41.9295], Tmin=(1086.06,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(478.231,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Allyl_P) + radical(Isobutyl)"""),
)
species(
label = '[C]#CC(C)C([CH2])=CC(28194)',
structure = SMILES('[C]#CC(C)C([CH2])=CC'),
E0 = (610.293,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2175,525,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,272.945,272.959],'cm^-1')),
HinderedRotor(inertia=(0.199624,'amu*angstrom^2'), symmetry=1, barrier=(10.5537,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.199622,'amu*angstrom^2'), symmetry=1, barrier=(10.5537,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.199618,'amu*angstrom^2'), symmetry=1, barrier=(10.5537,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.551799,'amu*angstrom^2'), symmetry=1, barrier=(29.1704,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.49121,'amu*angstrom^2'), symmetry=1, barrier=(78.8337,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0356133,0.0888033,-9.08766e-05,5.30193e-08,-1.27982e-11,73542.7,27.5658], Tmin=(100,'K'), Tmax=(993.307,'K')), NASAPolynomial(coeffs=[12.296,0.039431,-1.63184e-05,2.97846e-09,-2.0362e-13,71107.1,-31.501], Tmin=(993.307,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(610.293,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Acetyl) + radical(Allyl_P)"""),
)
species(
label = '[C]#CC([CH2])C(C)=CC(28195)',
structure = SMILES('[C]#CC([CH2])C(C)=CC'),
E0 = (663.876,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2175,525,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,256.105,256.106],'cm^-1')),
HinderedRotor(inertia=(0.184878,'amu*angstrom^2'), symmetry=1, barrier=(8.60541,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.184881,'amu*angstrom^2'), symmetry=1, barrier=(8.60543,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.184882,'amu*angstrom^2'), symmetry=1, barrier=(8.60541,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.184888,'amu*angstrom^2'), symmetry=1, barrier=(8.60541,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.41244,'amu*angstrom^2'), symmetry=1, barrier=(65.7405,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.135906,0.0953045,-0.000120467,9.06027e-08,-2.77377e-11,79990.6,29.6769], Tmin=(100,'K'), Tmax=(867.038,'K')), NASAPolynomial(coeffs=[10.2997,0.0412444,-1.67065e-05,2.95102e-09,-1.95189e-13,78403.4,-17.8977], Tmin=(867.038,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(663.876,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Acetyl) + radical(Isobutyl)"""),
)
species(
label = 'C#CC(C)[C]([CH2])C=C(27307)',
structure = SMILES('C#CC(C)C([CH2])=C[CH2]'),
E0 = (424.648,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.420985,0.0878918,-7.7617e-05,3.56373e-08,-6.53844e-12,51240.4,28.7396], Tmin=(100,'K'), Tmax=(1313.17,'K')), NASAPolynomial(coeffs=[18.6246,0.0298784,-1.13508e-05,1.99585e-09,-1.33912e-13,46238.3,-68.3337], Tmin=(1313.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(424.648,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Allyl_P) + radical(Allyl_P)"""),
)
species(
label = 'C#CC([CH2])[C]1CC1C(28196)',
structure = SMILES('C#CC([CH2])[C]1CC1C'),
E0 = (580.354,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.669857,0.064943,-2.93956e-05,-6.52415e-09,7.46031e-12,69927.8,30.4812], Tmin=(100,'K'), Tmax=(959.768,'K')), NASAPolynomial(coeffs=[12.4246,0.0349174,-1.21084e-05,2.05553e-09,-1.37177e-13,66797.9,-30.2962], Tmin=(959.768,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(580.354,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(415.724,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-CsCsCsH) + group(Cs-CtCsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Ct-CtCs) + group(Ct-CtH) + ring(Cyclopropane) + radical(Isobutyl) + radical(Tertalkyl)"""),
)
species(
label = '[CH]=[C]C1CC(C)C1=C(28127)',
structure = SMILES('[CH]=[C]C1CC(C)C1=C'),
E0 = (625.241,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[10.9244,-0.001505,0.000117043,-1.16756e-07,2.67166e-11,74817.7,-18.6413], Tmin=(100,'K'), Tmax=(1755.11,'K')), NASAPolynomial(coeffs=[83.1059,0.0264574,-7.13474e-05,1.72839e-08,-1.2761e-12,19836.3,-491.931], Tmin=(1755.11,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(625.241,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(methylenecyclobutane) + radical(Cds_P) + radical(Cds_S)"""),
)
species(
label = '[CH2]C(=CC)C1[C]=CC1(28197)',
structure = SMILES('[CH2]C(=CC)C1[C]=CC1'),
E0 = (545.074,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.448191,0.0672177,-3.16541e-05,-2.52403e-09,4.55203e-12,65694.3,27.01], Tmin=(100,'K'), Tmax=(1100.31,'K')), NASAPolynomial(coeffs=[14.4049,0.0354718,-1.42665e-05,2.62744e-09,-1.82514e-13,61473.3,-46.8815], Tmin=(1100.31,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(545.074,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(419.881,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(Cyclobutene) + radical(Allyl_P) + radical(cyclobutene-vinyl)"""),
)
species(
label = '[CH2]C1[C]=CCC1=CC(27951)',
structure = SMILES('[CH2]C1[C]=CCC1=CC'),
E0 = (508.364,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.873141,0.0601265,-2.56443e-05,-1.26391e-10,2.11445e-12,61261.7,28.1928], Tmin=(100,'K'), Tmax=(1226.28,'K')), NASAPolynomial(coeffs=[11.0884,0.0393858,-1.56625e-05,2.81296e-09,-1.90323e-13,57810.5,-27.0305], Tmin=(1226.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(508.364,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(4-Methylenecyclopentene) + radical(cyclopentene-vinyl) + radical(Isobutyl)"""),
)
species(
label = 'C#CC(=C)C(C)=CC(28198)',
structure = SMILES('C#CC(=C)C(C)=CC'),
E0 = (249.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0629506,0.0783113,-5.31144e-05,1.17514e-08,1.42569e-12,30198.7,26.5329], Tmin=(100,'K'), Tmax=(1062.22,'K')), NASAPolynomial(coeffs=[17.4482,0.0307437,-1.18891e-05,2.16216e-09,-1.50029e-13,25442,-63.8846], Tmin=(1062.22,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(249.792,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(415.724,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Ct) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Ct-Ct(Cds-Cds)) + group(Ct-CtH)"""),
)
species(
label = 'CH2(S)(23)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'C#CC([CH2])C([CH2])=C(28199)',
structure = SMILES('C#CC([CH2])C([CH2])=C'),
E0 = (514.257,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2175,525,2950,3100,1380,975,1025,1650,750,770,3400,2100,350,440,435,1725,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,1380,1390,370,380,2900,435,414.588],'cm^-1')),
HinderedRotor(inertia=(3.33367,'amu*angstrom^2'), symmetry=1, barrier=(76.6477,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.361083,'amu*angstrom^2'), symmetry=1, barrier=(14.3649,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.629114,'amu*angstrom^2'), symmetry=1, barrier=(76.6461,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.194461,'amu*angstrom^2'), symmetry=1, barrier=(23.7057,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.333188,0.0755357,-7.5244e-05,4.0144e-08,-8.53195e-12,61987.2,25.0291], Tmin=(100,'K'), Tmax=(1146.09,'K')), NASAPolynomial(coeffs=[14.8868,0.0247408,-8.76231e-06,1.47157e-09,-9.60599e-14,58651.3,-47.1677], Tmin=(1146.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(514.257,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Isobutyl) + radical(Allyl_P)"""),
)
species(
label = 'C#C[CH]CC(=C)[CH]C(26576)',
structure = SMILES('C#C[CH]CC([CH2])=CC'),
E0 = (452.432,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2175,525,750,770,3400,2100,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,280.958,4000],'cm^-1')),
HinderedRotor(inertia=(0.31491,'amu*angstrom^2'), symmetry=1, barrier=(17.4021,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.69638,'amu*angstrom^2'), symmetry=1, barrier=(93.7549,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.69715,'amu*angstrom^2'), symmetry=1, barrier=(93.852,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.69066,'amu*angstrom^2'), symmetry=1, barrier=(93.8213,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.68589,'amu*angstrom^2'), symmetry=1, barrier=(93.8855,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3617.13,'J/mol'), sigma=(6.33257,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=564.99 K, Pc=32.32 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0773253,0.0821768,-7.17924e-05,3.45969e-08,-6.76576e-12,54568.1,30.1996], Tmin=(100,'K'), Tmax=(1230.35,'K')), NASAPolynomial(coeffs=[15.1429,0.032694,-1.14643e-05,1.90791e-09,-1.23502e-13,50822.9,-46.3841], Tmin=(1230.35,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(452.432,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CtCsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Allyl_P) + radical(Sec_Propargyl)"""),
)
species(
label = 'C#CC[CH]C([CH2])=CC(28163)',
structure = SMILES('C#CC[CH]C([CH2])=CC'),
E0 = (447.335,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0596728,0.0780646,-6.02488e-05,2.46605e-08,-4.08945e-12,53957.4,29.2035], Tmin=(100,'K'), Tmax=(1434.13,'K')), NASAPolynomial(coeffs=[16.7108,0.0312897,-1.13261e-05,1.91866e-09,-1.25094e-13,49147.2,-57.7514], Tmin=(1434.13,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(447.335,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CtCsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Allyl_S) + radical(Allyl_P)"""),
)
species(
label = 'C#CC([CH2])C[C]=CC(26585)',
structure = SMILES('C#CC([CH2])C[C]=CC'),
E0 = (603.796,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2175,525,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,1685,370,1380,1390,370,380,2900,435,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,750,770,3400,2100,218.855,219.126],'cm^-1')),
HinderedRotor(inertia=(0.00350253,'amu*angstrom^2'), symmetry=1, barrier=(0.119786,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.161609,'amu*angstrom^2'), symmetry=1, barrier=(5.50317,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.9251,'amu*angstrom^2'), symmetry=1, barrier=(101.848,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.170685,'amu*angstrom^2'), symmetry=1, barrier=(5.50478,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(3.01599,'amu*angstrom^2'), symmetry=1, barrier=(102.07,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3619.53,'J/mol'), sigma=(6.34117,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=565.36 K, Pc=32.21 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.301188,0.0802179,-7.47905e-05,4.08001e-08,-9.26938e-12,72754.1,32.1875], Tmin=(100,'K'), Tmax=(1051.89,'K')), NASAPolynomial(coeffs=[11.4468,0.0378349,-1.43523e-05,2.4957e-09,-1.65688e-13,70409.3,-22.1473], Tmin=(1051.89,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(603.796,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CtCsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Isobutyl) + radical(Cds_S)"""),
)
species(
label = 'C#CC1CCC1=CC(28109)',
structure = SMILES('C#CC1CCC1=CC'),
E0 = (277.58,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[10.7388,-0.000635209,0.000116702,-1.17013e-07,2.68619e-11,33013.3,-20.3185], Tmin=(100,'K'), Tmax=(1749.67,'K')), NASAPolynomial(coeffs=[82.4395,0.0271591,-7.14814e-05,1.73124e-08,-1.27893e-12,-21422.2,-490.203], Tmin=(1749.67,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(277.58,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + ring(methylenecyclobutane)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'C#C[CH]C([CH2])=CC(28200)',
structure = SMILES('C#CC=C([CH2])[CH]C'),
E0 = (435.51,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2175,525,750,770,3400,2100,350,440,435,1725,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.417901,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.292485,'amu*angstrom^2'), symmetry=1, barrier=(20.3659,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.418421,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.421287,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.951004,0.0577526,-2.77367e-05,-4.57001e-09,5.78378e-12,52497.7,21.8521], Tmin=(100,'K'), Tmax=(1022.91,'K')), NASAPolynomial(coeffs=[13.4773,0.0273447,-1.03855e-05,1.87403e-09,-1.29861e-13,48963.2,-43.6142], Tmin=(1022.91,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(435.51,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCtH) + group(Ct-Ct(Cds-Cds)) + group(Ct-CtH) + radical(CTCC=CCJ) + radical(Allyl_S)"""),
)
species(
label = 'C#CC([CH2])[C]=CC(28201)',
structure = SMILES('C#CC([CH2])[C]=CC'),
E0 = (603.629,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2175,525,3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,1685,370,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,750,770,3400,2100,180],'cm^-1')),
HinderedRotor(inertia=(0.508653,'amu*angstrom^2'), symmetry=1, barrier=(11.6949,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.571988,'amu*angstrom^2'), symmetry=1, barrier=(13.1511,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.84644,'amu*angstrom^2'), symmetry=1, barrier=(65.4453,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.508352,'amu*angstrom^2'), symmetry=1, barrier=(11.688,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (92.1384,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.673053,0.0760649,-9.08781e-05,6.39507e-08,-1.85024e-11,72716.9,25.751], Tmin=(100,'K'), Tmax=(838.771,'K')), NASAPolynomial(coeffs=[9.75295,0.0327664,-1.34507e-05,2.41384e-09,-1.62102e-13,71193.7,-16.4584], Tmin=(838.771,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(603.629,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Cds_S) + radical(Isobutyl)"""),
)
species(
label = '[CH]=C1C([CH2])C(=C)C1C(28062)',
structure = SMILES('[CH]=C1C([CH2])C(=C)C1C'),
E0 = (581.175,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.396928,0.0684962,-3.10124e-05,-7.426e-09,7.68343e-12,70038.3,25.9269], Tmin=(100,'K'), Tmax=(1006.96,'K')), NASAPolynomial(coeffs=[14.7201,0.0338243,-1.24707e-05,2.21695e-09,-1.52469e-13,66027,-48.8681], Tmin=(1006.96,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(581.175,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(Cyclobutane) + radical(Isobutyl) + radical(Cds_P)"""),
)
species(
label = 'C#CC([CH2])C(=C)C=C(27302)',
structure = SMILES('C#CC([CH2])C(=C)C=C'),
E0 = (452.02,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2175,525,3010,987.5,1337.5,450,1655,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,750,770,3400,2100,180,180],'cm^-1')),
HinderedRotor(inertia=(1.03125,'amu*angstrom^2'), symmetry=1, barrier=(23.7105,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.03144,'amu*angstrom^2'), symmetry=1, barrier=(23.7148,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.03087,'amu*angstrom^2'), symmetry=1, barrier=(23.7018,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.03116,'amu*angstrom^2'), symmetry=1, barrier=(23.7085,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (105.157,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.254927,0.0862255,-7.86394e-05,3.29108e-08,-3.8019e-12,54525.2,28.2262], Tmin=(100,'K'), Tmax=(957.701,'K')), NASAPolynomial(coeffs=[18.0421,0.0256535,-8.59123e-06,1.42886e-09,-9.44497e-14,50293.8,-63.05], Tmin=(957.701,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(452.02,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Isobutyl)"""),
)
species(
label = 'C#CC([CH2])C(=C)C[CH2](28202)',
structure = SMILES('C#CC([CH2])C(=C)C[CH2]'),
E0 = (545.359,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2175,525,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,750,770,3400,2100,180,3134.32],'cm^-1')),
HinderedRotor(inertia=(0.357705,'amu*angstrom^2'), symmetry=1, barrier=(16.3837,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0278674,'amu*angstrom^2'), symmetry=1, barrier=(16.3818,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(3.47977,'amu*angstrom^2'), symmetry=1, barrier=(80.0067,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.712543,'amu*angstrom^2'), symmetry=1, barrier=(16.3828,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(3.47974,'amu*angstrom^2'), symmetry=1, barrier=(80.006,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0685429,0.0881258,-8.77578e-05,4.84677e-08,-1.09054e-11,65739.2,31.5861], Tmin=(100,'K'), Tmax=(1070.9,'K')), NASAPolynomial(coeffs=[14.0556,0.0353696,-1.38626e-05,2.46576e-09,-1.66326e-13,62714.1,-37.5223], Tmin=(1070.9,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(545.359,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Isobutyl) + radical(RCCJ)"""),
)
species(
label = 'C#C[C]([CH2])C(=C)CC(28203)',
structure = SMILES('C#CC([CH2])=C([CH2])CC'),
E0 = (405.441,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0383366,0.0795971,-6.48939e-05,2.85317e-08,-5.11843e-12,48912.1,28.0908], Tmin=(100,'K'), Tmax=(1324.78,'K')), NASAPolynomial(coeffs=[15.3594,0.0333369,-1.2515e-05,2.17303e-09,-1.44251e-13,44852.8,-50.1334], Tmin=(1324.78,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(405.441,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCtCs) + group(Ct-Ct(Cds-Cds)) + group(Ct-CtH) + radical(Allyl_P) + radical(CTCC=CCJ)"""),
)
species(
label = '[CH]=C(CC)C([CH2])C#C(28204)',
structure = SMILES('[CH]=C(CC)C([CH2])C#C'),
E0 = (587.209,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,2175,525,1380,1390,370,380,2900,435,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,750,770,3400,2100,296.774],'cm^-1')),
HinderedRotor(inertia=(0.176524,'amu*angstrom^2'), symmetry=1, barrier=(11.1472,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.1761,'amu*angstrom^2'), symmetry=1, barrier=(11.1657,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.14745,'amu*angstrom^2'), symmetry=1, barrier=(73.406,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.165531,'amu*angstrom^2'), symmetry=1, barrier=(11.1453,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.35132,'amu*angstrom^2'), symmetry=1, barrier=(22.1888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.182989,0.0909487,-9.40559e-05,5.36951e-08,-1.24226e-11,70776.4,30.614], Tmin=(100,'K'), Tmax=(1045.61,'K')), NASAPolynomial(coeffs=[14.4735,0.0348822,-1.36277e-05,2.41706e-09,-1.627e-13,67711.3,-40.7491], Tmin=(1045.61,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(587.209,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Cds_P) + radical(Isobutyl)"""),
)
species(
label = '[CH]=C([CH]C)C(C)C#C(26415)',
structure = SMILES('[CH]C(=CC)C(C)C#C'),
E0 = (492.334,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2175,525,750,770,3400,2100,350,440,435,1725,1380,1390,370,380,2900,435,200,800,1200,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.0939173,0.0867805,-7.35219e-05,3.52309e-08,-7.18591e-12,59354,27.4382], Tmin=(100,'K'), Tmax=(1140.2,'K')), NASAPolynomial(coeffs=[11.8233,0.0456323,-1.93896e-05,3.58051e-09,-2.46327e-13,56679.2,-30.6885], Tmin=(1140.2,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(492.334,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Ct-CtCs) + group(Ct-CtH) + radical(AllylJ2_triplet)"""),
)
species(
label = '[C]#CC([CH2])C(=C)CC(28205)',
structure = SMILES('[C]#CC([CH2])C(=C)CC'),
E0 = (677.256,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2175,525,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,200,800,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0955444,0.0910583,-0.000100214,6.34548e-08,-1.64035e-11,81601.7,30.6407], Tmin=(100,'K'), Tmax=(938.16,'K')), NASAPolynomial(coeffs=[12.4815,0.0374338,-1.44751e-05,2.52746e-09,-1.67591e-13,79241.8,-29.2332], Tmin=(938.16,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(677.256,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Acetyl) + radical(Isobutyl)"""),
)
species(
label = '[CH]=[C]C1CCC1=CC(28110)',
structure = SMILES('[CH]=[C]C1CCC1=CC'),
E0 = (620.968,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[11.4585,-0.00407591,0.000119164,-1.16947e-07,2.65163e-11,74276.4,-19.3863], Tmin=(100,'K'), Tmax=(1769.07,'K')), NASAPolynomial(coeffs=[84.8065,0.0254037,-7.14488e-05,1.72963e-08,-1.27453e-12,17760.4,-501.475], Tmin=(1769.07,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(620.968,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(methylenecyclobutane) + radical(Cds_S) + radical(Cds_P)"""),
)
species(
label = '[CH2]C1[C]=CC(C)C1=C(28074)',
structure = SMILES('[CH2]C1[C]=CC(C)C1=C'),
E0 = (510.82,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.890564,0.0655118,-3.97256e-05,1.25488e-08,-1.67154e-12,61551.2,24.5099], Tmin=(100,'K'), Tmax=(1645.37,'K')), NASAPolynomial(coeffs=[11.8064,0.0389747,-1.55333e-05,2.74659e-09,-1.82185e-13,57959,-33.5884], Tmin=(1645.37,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(510.82,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(4-Methylenecyclopentene) + radical(Isobutyl) + radical(cyclopentene-vinyl)"""),
)
species(
label = 'C#CC(=C)C(=C)CC(28206)',
structure = SMILES('C#CC(=C)C(=C)CC'),
E0 = (263.172,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.00405388,0.0739208,-3.27625e-05,-1.48196e-08,1.21474e-11,31809,27.4258], Tmin=(100,'K'), Tmax=(989.432,'K')), NASAPolynomial(coeffs=[19.1617,0.0277024,-1.00903e-05,1.83893e-09,-1.30637e-13,26486,-72.5674], Tmin=(989.432,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(263.172,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(415.724,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Ct) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Ct-Ct(Cds-Cds)) + group(Ct-CtH)"""),
)
species(
label = 'C#CC(C)C(=C)C=C(27316)',
structure = SMILES('C#CC(C)C(=C)C=C'),
E0 = (246.937,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.331557,0.0852241,-6.84851e-05,2.33815e-08,-1.42364e-12,29864.4,26.7198], Tmin=(100,'K'), Tmax=(1025.41,'K')), NASAPolynomial(coeffs=[18.5959,0.028449,-1.03867e-05,1.83279e-09,-1.25409e-13,25085.9,-69.442], Tmin=(1025.41,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(246.937,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(415.724,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Ct-CtCs) + group(Ct-CtH)"""),
)
species(
label = 'C#CC([CH2])C(C)[C]=C(26581)',
structure = SMILES('C#CC([CH2])C(C)[C]=C'),
E0 = (608.07,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([750,770,3400,2100,2175,525,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,1380,1383.33,1386.67,1390,370,373.333,376.667,380,2800,3000,430,440,3000,3100,440,815,1455,1000,1685,370,346.654,3567.82],'cm^-1')),
HinderedRotor(inertia=(0.156181,'amu*angstrom^2'), symmetry=1, barrier=(13.1159,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.976502,'amu*angstrom^2'), symmetry=1, barrier=(82.4596,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.979884,'amu*angstrom^2'), symmetry=1, barrier=(82.5209,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156716,'amu*angstrom^2'), symmetry=1, barrier=(13.0809,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.993785,'amu*angstrom^2'), symmetry=1, barrier=(82.5432,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3560.51,'J/mol'), sigma=(6.30174,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=556.14 K, Pc=32.28 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0301223,0.0807078,-7.08754e-05,3.45879e-08,-6.83665e-12,73285.7,33.5674], Tmin=(100,'K'), Tmax=(1221.04,'K')), NASAPolynomial(coeffs=[14.9207,0.0317301,-1.07075e-05,1.73688e-09,-1.10546e-13,69634.6,-41.5471], Tmin=(1221.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(608.07,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(411.566,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-CtCsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Cds_S) + radical(Isobutyl)"""),
)
species(
label = 'C#CC1CC(C)C1=C(28126)',
structure = SMILES('C#CC1CC(C)C1=C'),
E0 = (281.854,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (106.165,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[10.2139,0.00184847,0.000114806,-1.17026e-07,2.71212e-11,33554.2,-19.6082], Tmin=(100,'K'), Tmax=(1735.08,'K')), NASAPolynomial(coeffs=[80.7972,0.0281619,-7.13661e-05,1.72989e-08,-1.28054e-12,-19393.6,-481.024], Tmin=(1735.08,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(281.854,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(424.038,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + group(Ct-CtCs) + group(Ct-CtH) + ring(methylenecyclobutane)"""),
)
species(
label = 'CHCH3(T)(95)',
structure = SMILES('[CH]C'),
E0 = (343.893,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')),
HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CHCH3(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'C#CC([CH2])[C]=C(27503)',
structure = SMILES('C#CC([CH2])[C]=C'),
E0 = (639.655,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,2175,525,2950,3100,1380,975,1025,1650,750,770,3400,2100,3000,3100,440,815,1455,1000,1685,370,180],'cm^-1')),
HinderedRotor(inertia=(5.20297,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(5.20297,'amu*angstrom^2'), symmetry=1, barrier=(119.627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.564352,'amu*angstrom^2'), symmetry=1, barrier=(12.9756,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (78.1118,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.2595,0.0612351,-7.30673e-05,4.92288e-08,-1.33647e-11,77030.5,21.837], Tmin=(100,'K'), Tmax=(899.573,'K')), NASAPolynomial(coeffs=[9.94523,0.0226112,-8.6597e-06,1.494e-09,-9.79377e-14,75467.9,-19.1468], Tmin=(899.573,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(639.655,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CtCsH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Isobutyl) + radical(Cds_S)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (478.231,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (586.371,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (582.161,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (578.719,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (631.074,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (652.933,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (751.051,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (629.226,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (576.699,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (680.807,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (726.495,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (555.299,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (628.019,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (706.334,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (703.775,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (533.879,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (812.64,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (709.448,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (627.307,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (608.912,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (536.807,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (541.632,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (934.119,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (638.167,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (638.167,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS26',
E0 = (773.834,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS27',
E0 = (486.516,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS28',
E0 = (817.073,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS29',
E0 = (985.192,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS30',
E0 = (581.175,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS31',
E0 = (663.812,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS32',
E0 = (659.164,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS33',
E0 = (687.431,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS34',
E0 = (732.394,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS35',
E0 = (536.643,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS36',
E0 = (720.661,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS37',
E0 = (623.333,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS38',
E0 = (536.389,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS39',
E0 = (541.632,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS40',
E0 = (503.205,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS41',
E0 = (702.545,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS42',
E0 = (486.516,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS43',
E0 = (983.547,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['CH3CHCCH2(18175)', 'CH2CHCCH(26391)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#CC1CC1([CH2])[CH]C(28161)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(1.68e+09,'s^-1'), n=0.84, Ea=(108.139,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Exocyclic
Ea raised from 104.1 to 108.1 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['[CH]=C1CC1C([CH2])=CC(28188)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(1.881e+08,'s^-1'), n=1.062, Ea=(103.929,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 18 used for R4_S_T;triplebond_intra_H;radadd_intra_cs2H
Exact match found for rate rule [R4_S_T;triplebond_intra_H;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Exocyclic
Ea raised from 102.0 to 103.9 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction4',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['[CH]=C1CC(=CC)C1[CH2](27973)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(1.46159e+06,'s^-1'), n=1.55572, Ea=(100.488,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5_SS;multiplebond_intra;radadd_intra_cs2H] for rate rule [R5_SS_T;triplebond_intra_H;radadd_intra_cs2H]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Exocyclic
Ea raised from 96.4 to 100.5 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction5',
reactants = ['H(3)', 'C#CC(=C)C([CH2])=CC(28189)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(1.36e+08,'cm^3/(mol*s)'), n=1.64, Ea=(17.9912,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2645 used for Cds-CdCt_Cds-HH;HJ
Exact match found for rate rule [Cds-CdCt_Cds-HH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['C=[C][CH]C(18176)', 'CH2CHCCH(26391)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(0.00294841,'m^3/(mol*s)'), n=2.48333, Ea=(17.6885,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-CtH_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction7',
reactants = ['C2H(33)', '[CH2]C(C=C)=CC(24210)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(0.00684716,'m^3/(mol*s)'), n=2.49, Ea=(21.946,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds-CdH_Cds-HH;CJ] for rate rule [Cds-CdH_Cds-HH;CtJ_Ct]
Euclidian distance = 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH]=[C]C=C(4699)', 'CH3CHCCH2(18175)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Ca_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction9',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#C[C](C)C([CH2])=CC(28190)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(58.4615,'s^-1'), n=3.15787, Ea=(98.4673,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_noH]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#C[C]([CH2])C(C)=CC(28191)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(5.4947e+07,'s^-1'), n=1.58167, Ea=(202.575,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_2Cd;C_rad_out_2H;XH_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['C#CC([CH2])C(C)=[C]C(28192)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H
Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['C#CC(C)C([CH2])=[C]C(28193)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['C#CC([CH2])[C](C)C=C(27304)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(800000,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction14',
reactants = ['[C]#CC(C)C([CH2])=CC(28194)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.39293e+07,'s^-1'), n=1.32074, Ea=(96.0416,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_RSS;Y_rad_out;Cs_H_out_2H] for rate rule [R4H_TSS;Ct_rad_out;Cs_H_out_2H]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction15',
reactants = ['[C]#CC([CH2])C(C)=CC(28195)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(263079,'s^-1'), n=1.73643, Ea=(39.8993,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_RSSR;Y_rad_out;Cs_H_out_2H] for rate rule [R5H_TSSS;Ct_rad_out;Cs_H_out_2H]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction16',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#CC(C)[C]([CH2])C=C(27307)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(121000,'s^-1'), n=1.9, Ea=(55.6472,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 92 used for R5H_SSMS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R5H_SSMS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction17',
reactants = ['[CH]=[C]C=C(4699)', 'C=[C][CH]C(18176)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(7.46075e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction18',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#CC([CH2])[C]1CC1C(28196)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(3.473e+12,'s^-1'), n=0.247, Ea=(231.216,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3_D;doublebond_intra_secNd;radadd_intra_cs] for rate rule [R3_D;doublebond_intra_secNd_HNd;radadd_intra_cs2H]
Euclidian distance = 1.41421356237
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction19',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['[CH]=[C]C1CC(C)C1=C(28127)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(1.51071e+08,'s^-1'), n=0.996667, Ea=(149.076,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_Cs_RR_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction20',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['[CH2]C(=CC)C1[C]=CC1(28197)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(3.27074e+08,'s^-1'), n=0.924088, Ea=(130.68,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_S;multiplebond_intra;radadd_intra_cs2H] for rate rule [R4_S_T;triplebond_intra_H;radadd_intra_cs2H]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction21',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['[CH2]C1[C]=CCC1=CC(27951)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(3.47e+11,'s^-1'), n=0.15, Ea=(58.576,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5_SS_T;triplebond_intra_H;radadd_intra] for rate rule [R5_SS_T;triplebond_intra_H;radadd_intra_cs2H]
Euclidian distance = 2.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction22',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#CC(=C)C(C)=CC(28198)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(7.437e+08,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad;XH_Rrad]
Euclidian distance = 0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction23',
reactants = ['CH2(S)(23)', 'C#CC([CH2])C([CH2])=C(28199)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 4 used for carbene;Cd_pri
Exact match found for rate rule [carbene;Cd_pri]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: 1,2_Insertion_carbene
Ea raised from -3.9 to 0 kJ/mol."""),
)
reaction(
label = 'reaction22',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#C[CH]CC(=C)[CH]C(26576)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction25',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#CC[CH]C([CH2])=CC(28163)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction29',
reactants = ['C#CC([CH2])C[C]=CC(26585)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS26',
kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction27',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#CC1CCC1=CC(28109)'],
transitionState = 'TS27',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H
Exact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]
Euclidian distance = 0
family: Birad_recombination"""),
)
reaction(
label = 'reaction28',
reactants = ['CH2(19)', 'C#C[CH]C([CH2])=CC(28200)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS28',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H/TwoDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction29',
reactants = ['CH2(19)', 'C#CC([CH2])[C]=CC(28201)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS29',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/NonDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction30',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['[CH]=C1C([CH2])C(=C)C1C(28062)'],
transitionState = 'TS30',
kinetics = Arrhenius(A=(2.13771e+06,'s^-1'), n=1.58803, Ea=(102.944,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5_SS;multiplebond_intra;radadd_intra_csHNd] for rate rule [R5_SS_T;triplebond_intra_H;radadd_intra_csHNd]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Exocyclic
Ea raised from 100.0 to 102.9 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction31',
reactants = ['H(3)', 'C#CC([CH2])C(=C)C=C(27302)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS31',
kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2544 used for Cds-HH_Cds-CdH;HJ
Exact match found for rate rule [Cds-HH_Cds-CdH;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond
Ea raised from -2.0 to 0 kJ/mol."""),
)
reaction(
label = 'reaction32',
reactants = ['C#CC([CH2])C(=C)C[CH2](28202)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS32',
kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction33',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#C[C]([CH2])C(=C)CC(28203)'],
transitionState = 'TS33',
kinetics = Arrhenius(A=(6.18083e+09,'s^-1'), n=1.04667, Ea=(209.2,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS_2Cd;C_rad_out_H/NonDeC;XH_out]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction34',
reactants = ['[CH]=C(CC)C([CH2])C#C(28204)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS34',
kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC
Exact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction35',
reactants = ['[CH]=C([CH]C)C(C)C#C(26415)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS35',
kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction36',
reactants = ['[C]#CC([CH2])C(=C)CC(28205)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS36',
kinetics = Arrhenius(A=(366176,'s^-1'), n=1.54456, Ea=(43.4053,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_RSSR;Y_rad_out;Cs_H_out_H/NonDeC] for rate rule [R5H_TSSS;Ct_rad_out;Cs_H_out_H/NonDeC]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction37',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['[CH]=[C]C1CCC1=CC(28110)'],
transitionState = 'TS37',
kinetics = Arrhenius(A=(1.51071e+08,'s^-1'), n=0.996667, Ea=(145.101,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_Cs_RR_D;doublebond_intra;radadd_intra_cs2H]
Euclidian distance = 0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction38',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['[CH2]C1[C]=CC(C)C1=C(28074)'],
transitionState = 'TS38',
kinetics = Arrhenius(A=(4.64e+06,'s^-1'), n=1.15, Ea=(58.1576,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5_SS;multiplebond_intra;radadd_intra_csHCs] for rate rule [R5_SS_T;triplebond_intra_H;radadd_intra_csHCs]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction39',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#CC(=C)C(=C)CC(28206)'],
transitionState = 'TS39',
kinetics = Arrhenius(A=(7.437e+08,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad_NDe;XH_Rrad]
Euclidian distance = 0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction40',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#CC(C)C(=C)C=C(27316)'],
transitionState = 'TS40',
kinetics = Arrhenius(A=(6.37831e+09,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction27',
reactants = ['C#CC([CH2])C(C)[C]=C(26581)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS41',
kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C
Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C]
Euclidian distance = 0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction42',
reactants = ['C#CC([CH2])C(=C)[CH]C(26577)'],
products = ['C#CC1CC(C)C1=C(28126)'],
transitionState = 'TS42',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]
Euclidian distance = 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction43',
reactants = ['CHCH3(T)(95)', 'C#CC([CH2])[C]=C(27503)'],
products = ['C#CC([CH2])C(=C)[CH]C(26577)'],
transitionState = 'TS43',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/NonDe;Birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
network(
label = '4549',
isomers = [
'C#CC([CH2])C(=C)[CH]C(26577)',
],
reactants = [
('CH3CHCCH2(18175)', 'CH2CHCCH(26391)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '4549',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
###
# Time Complexity: O(n)
# Space Complexity: O(1)
###
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reverseKGroup(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head or not head.next or k < 2:
return head
num = 0
dummy = ListNode(0)
dummy.next = head
l = dummy
curr = head
while curr:
num += 1
if num % k == 0:
l = self.reverse(l, curr.next)
curr = l.next
else:
curr = curr.next
return dummy.next
def reverse(self, l, r):
curr = tail = l.next
pre = None
while curr != r:
next = curr.next
curr.next = pre
pre = curr
curr = next
l.next = pre
tail.next = r
return tail
|
#!/usr/bin/python3
import argparse
import json
import urllib.request
# Set up argparser, add one argument option, parse it, and assign it to arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("-u","--url", action="store", dest="url", help="URL to JSON data")
arguments = argparser.parse_args()
# User-Agent header is required by most sites
http_header = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64)"}
# Verify that there was a URL passed to the script
if not arguments.url:
print("Please pass a JSON URL for parsing using the -u / --url flags.")
exit()
# Try to set up the HTTP request. Catch exceptions with the URL formatting
try:
http_request = urllib.request.Request(arguments.url, None, http_header)
except ValueError:
print("There was an issue with the URL provided. Please check the URL and try again")
exit()
# Try to initiate the HTTP request and report any errors received.
try:
http_response = urllib.request.urlopen(http_request)
except urllib.error.HTTPError as error:
print("An error code was recieved from the server: {}".format(error.code))
exit()
except urllib.error.URLError as error:
print("A URL error was received from the server: {}".format(error.args))
exit()
# Read the request data
json_posts = http_response.read().decode('utf8')
# Pass the data to the JSON parser which returns a list of JSON objects
json_list = json.loads(json_posts)
# Loop through the list, then loop through the key:value pairs for each dict, printing them out
for element in json_list:
for key in element.keys():
print("{}: {}".format(str(key), str(element[key])))
print("\n")
|
'''
Name: Sung Joon Park
ID: 01514170
'''
def referenceSpectra(spectra):
#dividing and converting spectra into a dictionary about symbolic name and reference spectrum
'''
>>> spectra = ['H 486.135,434.0472,656.279,410.1734', 'He 501.56783,667.8151,587.5621,471.31457,492.19313,504.7738,447.14802,438.79296,402.61914,412.08154', 'Li 610.354,670.791,413.259,610.365,670.776', 'Hg 404.6565,407.7837,434.74945,435.8335,535.4034,546.075,567.581,576.961,579.067,580.3782,585.9254,671.634,690.746']
>>> referenceSpectrum = referenceSpectra(spectra)
>>> referenceSpectrum['H']
(410.1734, 434.0472, 486.135, 656.279)
>>> referenceSpectrum['Li']
(413.259, 610.354, 610.365, 670.776, 670.791)
'''
referenceSpectrum = {} #dictionary for the element and reference spectrum to be entered
for i in spectra:
element = ''
spectrum = [] # list that would be added to
meter = ''
for j in i:
if j.isdigit() == True or ord(j) == 46:
# if j in a number or a period, the values must be added to the string meter
meter += j
if ord(j) == 44:
#if j in a comma the meter string must be reset and it should be added to the spectrum list
meter = float(meter)
spectrum.append(meter)
meter = ''
if 65<= ord(j) <=90 or 97 <= ord(j) <= 122:
# dividing the element's name by using ascii code
element += j
meter = float(meter) #converting the type of string meter into float
spectrum.append(meter) #each meter value added to the total spectrum list named as spectrum
meter = ''
referenceSpectrum[element] = tuple(sorted(spectrum)) #sorting and converting the returning value into tuple
return referenceSpectrum
def referenceLines(spectrum, referenceSpectrum, eps = None):
'''
>>> spectra = ['H 486.135,434.0472,656.279,410.1734', 'He 501.56783,667.8151,587.5621,471.31457,492.19313,504.7738,447.14802,438.79296,402.61914,412.08154', 'Li 610.354,670.791,413.259,610.365,670.776', 'Hg 404.6565,407.7837,434.74945,435.8335,535.4034,546.075,567.581,576.961,579.067,580.3782,585.9254,671.634,690.746']
>>> referenceSpectrum = referenceSpectra(spectra)
>>> referenceSpectrum['H']
(410.1734, 434.0472, 486.135, 656.279)
>>> referenceSpectrum['Li']
(413.259, 610.354, 610.365, 670.776, 670.791)
>>> spectrum1 = (410.1055, 434.1126, 434.1427, 486.3071, 656.224)
>>> referenceLines(spectrum1, referenceSpectrum['H'])
3
>>> spectrum2 = (410.1875, 434.0906, 486.2315, 524.7571, 656.2779)
>>> referenceLines(spectrum2, referenceSpectrum['H'], eps=0.1)
4
>>> referenceLines(spectrum2, referenceSpectrum['H'], eps=0.025)
2
'''
count = 0
if eps == None:#when there is no given value for eps
for i in referenceSpectrum:
for j in spectrum:
if abs(float(i)-float(j)) < 0.1:
# finding if the given elements and the spectrometer value satisfies the given condition
count += 1
break #stopping the loop to not get any overlap values
else: #when there is a given value for eps
for i in referenceSpectrum:
for j in spectrum:
if abs(float(i)-float(j)) < eps:
count += 1
break
return count
def decomposition(spectrum, referenceSpectrum, eps=None, minimum=None):
'''
>>> spectra = ['H 486.135,434.0472,656.279,410.1734', 'He 501.56783,667.8151,587.5621,471.31457,492.19313,504.7738,447.14802,438.79296,402.61914,412.08154', 'Li 610.354,670.791,413.259,610.365,670.776', 'Hg 404.6565,407.7837,434.74945,435.8335,535.4034,546.075,567.581,576.961,579.067,580.3782,585.9254,671.634,690.746']
>>> referenceSpectrum = referenceSpectra(spectra)
>>> referenceSpectrum['H']
(410.1734, 434.0472, 486.135, 656.279)
>>> referenceSpectrum['Li']
(413.259, 610.354, 610.365, 670.776, 670.791)
>>> spectrum1 = (410.1055, 434.1126, 434.1427, 486.3071, 656.224)
>>> referenceLines(spectrum1, referenceSpectrum['H'])
3
>>> spectrum2 = (410.1875, 434.0906, 486.2315, 524.7571, 656.2779)
>>> referenceLines(spectrum2, referenceSpectrum['H'], eps=0.1)
4
>>> referenceLines(spectrum2, referenceSpectrum['H'], eps=0.025)
2
>>> spectrum = (402.5579, 410.1914, 413.162, 434.1243, 486.0598, 504.7387, 610.157, 610.562, 656.354, 670.578, 670.991)
>>> decomposition(spectrum, referenceSpectrum)
['H']
>>> decomposition(spectrum, referenceSpectrum, eps=0.2)
['H', 'Li']
>>> decomposition(spectrum, referenceSpectrum, minimum=2)
['H', 'He']
>>> decomposition(spectrum, referenceSpectrum, eps=0.2, minimum=2)
['H', 'He', 'Li']
'''
if eps == None: #when there is no given value for eps
eps = 0.1
referenceLines_count = {} #dictionary that has the value for each element of how many referenceLines satisfies
satisfying = [] #list for return value
for i in referenceSpectrum:
#finding the counting for each element
value = referenceLines(spectrum, referenceSpectrum[i], eps)
referenceLines_count[i] = value
for j in referenceLines_count:
#sorting the value by the condition of minimum
if minimum == None: #if minimum condition is not given, then all the spectrometer must satisfy
if referenceLines_count[j] == len(referenceSpectrum[j]):
satisfying.append(j)
else: #if the minimum value is given
if referenceLines_count[j] >= minimum:
satisfying.append(j)
return sorted(satisfying)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
import tkinter as tk
from tkinter import *
from tkinter import Menu
mw = tk.Tk()
mw.option_add("*Button.Background", "black")
mw.option_add("*Button.Foreground", "red")
mw.title('Screen Timer Program')
mw.geometry("500x500") #Selects the size of the window
mw.resizable(0, 0) #Doesn't allow resizing
menu = Menu(mw)
mw.config(menu=menu)
new_item = Menu(menu, tearoff=0)
new_item.add_command(label='New User')
new_item.add_command(label='Edit User')
new_item.add_command(label='Exit Program',command=mw.destroy)
menu.add_cascade(label='File', menu=new_item)
new_item = Menu(menu, tearoff=0)
new_item.add_command(label='Help infomation')
new_item.add_command(label='About Screen Timer')
menu.add_cascade(label='Help', menu=new_item)
back = tk.Frame(master=mw,bg='black')
back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height
back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window
go = tk.Button(master=back, text='Admin Settings')
go.pack()
go = tk.Button(master=back, text='CMD')
go.pack()
close = tk.Button(master=back, text='Close', command=mw.destroy)
close.pack()
mw.mainloop()
|
import networks
import tensorflow as tf
import time
import os
import sys
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(base,'../../'))
from basemodels.GanLosses import GanLoss
from basemodels.GanOptimizers import Adam
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import numpy as np
from PIL import Image
import datetime
###############global paraments###################
"""
记录那些无法被模型定义传递的参数
尤其是@tf.function() 中需要的参数
学习率与损失函数系数则应当在模型训练过程中予以控制
"""
global_input_X_shape = [1,128,128,1]
global_input_Y_shape = [1,128,128,1]
global_mask_X_shape = [1,128,128,1]
global_mask_Y_shape = [1,128,128,1]
################################################
class CycleGAN(tf.keras.Model):
"""
模型只负责给定训练集和测试(验证)集后的操作
"""
def __init__(self,
train_set,
test_set,
loss_name="WGAN-GP",
mixed_precision=False,
learning_rate=2e-4,
tmp_path=None,
out_path=None):
super(CycleGAN,self).__init__()
#接收数据集和相关参数
self.train_set = train_set
self.test_set = test_set
self.tmp_path = tmp_path
self.out_path = out_path
#定义模型
self.G = networks.Generator(name="G_X2Y")
self.F = networks.Generator(name="G_Y2X")
if loss_name in ["WGAN-SN","WGAN-GP-SN"]:
self.Dy = networks.Discriminator(name="If_is_real_Y",use_sigmoid=False,sn=True)
self.Dx = networks.Discriminator(name="If_is_real_X",use_sigmoid=False,sn=True)
self.loss_name = loss_name[:-3]
elif loss_name in ["WGAN","WGAN-GP"]:
self.Dy = networks.Discriminator(name="If_is_real_Y",use_sigmoid=False,sn=False)
self.Dx = networks.Discriminator(name="If_is_real_X",use_sigmoid=False,sn=False)
self.loss_name = loss_name
elif loss_name in ["Vanilla","LSGAN"]:
self.Dy = networks.Discriminator(name="If_is_real_Y",use_sigmoid=True,sn=False)
self.Dx = networks.Discriminator(name="If_is_real_X",use_sigmoid=True,sn=False)
self.loss_name = loss_name
else:
raise ValueError("Do not support the loss "+loss_name)
self.model_list=[self.G,self.F,self.Dy,self.Dx]
#定义损失函数 优化器 记录等
self.gan_loss = GanLoss(self.loss_name)
self.optimizers_list = self.optimizers_config(mixed_precision=mixed_precision,learning_rate=learning_rate)
self.mixed_precision = mixed_precision
self.matrics_list = self.matrics_config()
self.checkpoint_config()
self.get_seed()
def build(self,X_shape,Y_shape):
"""
input_shape必须切片 因为在底层会被当做各层的输出shape而被改动
"""
self.G.build(input_shape=X_shape[:])#G X->Y
self.Dy.build(input_shape=Y_shape[:])#Dy Y or != Y
self.F.build(input_shape=Y_shape[:])#F Y->X
self.Dx.build(input_shape=X_shape[:])#Dx X or != X
self.built = True
def optimizers_config(self,mixed_precision=False,learning_rate=2e-4):
self.G_optimizer = Adam(learning_rate=1e-4,beta_1=0.0,beta_2=0.9)
self.Dy_optimizer = Adam(learning_rate=4e-4,beta_1=0.0,beta_2=0.9)
self.F_optimizer = Adam(learning_rate=1e-4,beta_1=0.0,beta_2=0.9)
self.Dx_optimizer = Adam(learning_rate=4e-4,beta_1=0.0,beta_2=0.9)
if mixed_precision:
self.G_optimizer=self.G_optimizer.get_mixed_precision()
self.Dy_optimizer=self.Dy_optimizer.get_mixed_precision()
self.F_optimizer=self.F_optimizer.get_mixed_precision()
self.Dx_optimizer=self.Dx_optimizer.get_mixed_precision()
return [self.G_optimizer,self.Dy_optimizer,self.F_optimizer,self.Dx_optimizer]
def matrics_config(self):
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_logdir = self.tmp_path+"/logs/" + current_time
self.train_summary_writer = tf.summary.create_file_writer(train_logdir)
self.m_psnr_X2Y = tf.keras.metrics.Mean('psnr_y', dtype=tf.float32)
self.m_psnr_Y2X = tf.keras.metrics.Mean('psnr_x', dtype=tf.float32)
self.m_ssim_X2Y = tf.keras.metrics.Mean('ssim_y', dtype=tf.float32)
self.m_ssim_Y2X = tf.keras.metrics.Mean('ssim_x', dtype=tf.float32)
return [self.m_psnr_X2Y,self.m_psnr_Y2X,self.m_ssim_X2Y,self.m_ssim_Y2X]
# return None
def checkpoint_config(self):
self.ckpt = tf.train.Checkpoint(step=tf.Variable(1),optimizer=self.optimizers_list,model=self.model_list,dataset=self.train_set)
self.manager = tf.train.CheckpointManager(self.ckpt,self.tmp_path+'/tf_ckpts', max_to_keep=3)
def pix_gradient(self,x):
x = tf.reshape(x,shape=[1,64,64,1])#在各batch和通道上进行像素梯度 对2D单通道而言其实没必要reshape
dx,dy = tf.image.image_gradients(x)
return dx,dy
@tf.function(input_signature=[tf.TensorSpec(shape=global_input_X_shape,dtype=tf.float32),
tf.TensorSpec(shape=global_input_Y_shape,dtype=tf.float32),
tf.TensorSpec(shape=global_mask_X_shape,dtype=tf.float32),
tf.TensorSpec(shape=global_mask_Y_shape,dtype=tf.float32),
tf.TensorSpec(shape=[4],dtype=tf.int32),
tf.TensorSpec(shape=[1],dtype=tf.uint32)])
def train_step_D(self,trainX,trainY,maskX,maskY,wgp_shape,step):
with tf.GradientTape(persistent=True) as D_tape:
GeneratedY = self.G(trainX)
GeneratedY = tf.multiply(GeneratedY,maskY)
Dy_real_out = self.Dy(trainY)
Dy_fake_out = self.Dy(GeneratedY)
GeneratedX = self.F(trainY)
GeneratedX = tf.multiply(GeneratedX,maskX)
Dx_real_out = self.Dx(trainX)
Dx_fake_out = self.Dx(GeneratedX)
e = tf.random.uniform(shape=wgp_shape,minval=0.0,maxval=1.0)
mid_Y = e*trainY+(1-e)*GeneratedY
with tf.GradientTape() as gradient_penaltyY:
gradient_penaltyY.watch(mid_Y)
inner_loss = self.Dy(mid_Y)
penalty = gradient_penaltyY.gradient(inner_loss,mid_Y)
penalty_normY = 10.0*tf.math.square(tf.norm(tf.reshape(penalty,shape=[wgp_shape[0],-1]),ord=2,axis=-1)-1)
e = tf.random.uniform(shape=wgp_shape,minval=0.0,maxval=1.0)
mid_X = e*trainX+(1-e)*GeneratedX
with tf.GradientTape() as gradient_penaltyX:
gradient_penaltyX.watch(mid_X)
inner_loss = self.Dx(mid_X)
penalty = gradient_penaltyX.gradient(inner_loss,mid_X)
penalty_normX = 10.0*tf.math.square(tf.norm(tf.reshape(penalty,shape=[wgp_shape[0],-1]),ord=2,axis=-1)-1)
Dy_loss = self.gan_loss.DiscriminatorLoss(Dy_real_out,Dy_fake_out)+tf.reduce_mean(penalty_normY)
Dx_loss = self.gan_loss.DiscriminatorLoss(Dx_real_out,Dx_fake_out)+tf.reduce_mean(penalty_normX)
if self.mixed_precision:
scaled_Dy_loss = self.Dy_optimizer.get_scaled_loss(Dy_loss)
scaled_Dx_loss = self.Dx_optimizer.get_scaled_loss(Dx_loss)
if self.mixed_precision:
scaled_gradients_of_Dy=D_tape.gradient(scaled_Dy_loss,self.Dy.trainable_variables)
scaled_gradients_of_Dx=D_tape.gradient(scaled_Dx_loss,self.Dx.trainable_variables)
gradients_of_Dy = self.Dy_optimizer.get_unscaled_gradients(scaled_gradients_of_Dy)
gradients_of_Dx = self.Dx_optimizer.get_unscaled_gradients(scaled_gradients_of_Dx)
else:
gradients_of_Dy = D_tape.gradient(Dy_loss,self.Dy.trainable_variables)
gradients_of_Dx = D_tape.gradient(Dx_loss,self.Dx.trainable_variables)
self.Dy_optimizer.apply_gradients(zip(gradients_of_Dy,self.Dy.trainable_variables))
self.Dx_optimizer.apply_gradients(zip(gradients_of_Dx,self.Dx.trainable_variables))
return Dy_loss,Dx_loss
@tf.function(input_signature=[tf.TensorSpec(shape=global_input_X_shape,dtype=tf.float32),
tf.TensorSpec(shape=global_input_Y_shape,dtype=tf.float32),
tf.TensorSpec(shape=global_mask_X_shape,dtype=tf.float32),
tf.TensorSpec(shape=global_mask_Y_shape,dtype=tf.float32),
tf.TensorSpec(shape=[4],dtype=tf.int32),
tf.TensorSpec(shape=[1],dtype=tf.uint32)])
def train_step_G(self,trainX,trainY,maskX,maskY,wgp_shape,step):
with tf.GradientTape(persistent=True) as G_tape:
GeneratedY = self.G(trainX)
GeneratedY = tf.multiply(GeneratedY,maskY)
# Dy_real_out = self.Dy(trainY)
Dy_fake_out = self.Dy(GeneratedY)
GeneratedX = self.F(trainY)
GeneratedX = tf.multiply(GeneratedX,maskX)
# Dx_real_out = self.Dx(trainX)
Dx_fake_out = self.Dx(GeneratedX)
cycle_consistent_loss_X2Y = tf.reduce_mean(tf.abs(self.F(GeneratedY)-trainX))
cycle_consistent_loss_Y2X = tf.reduce_mean(tf.abs(self.G(GeneratedX)-trainY))
cycle_consistent = cycle_consistent_loss_X2Y+cycle_consistent_loss_Y2X
if step>=0:#先不进行像素梯度和重建损失的使用
cycle_l = 10.0
else:
cycle_l = 10.0
G_loss = self.gan_loss.GeneratorLoss(Dy_fake_out)+cycle_l*(cycle_consistent)
F_loss = self.gan_loss.GeneratorLoss(Dx_fake_out)+cycle_l*(cycle_consistent)
if self.mixed_precision:
scaled_G_loss = self.G_optimizer.get_scaled_loss(G_loss)
scaled_F_loss = self.F_optimizer.get_scaled_loss(F_loss)
if self.mixed_precision:
scaled_gradients_of_G=G_tape.gradient(scaled_G_loss,self.G.trainable_variables)
scaled_gradients_of_F=G_tape.gradient(scaled_F_loss,self.F.trainable_variables)
gradients_of_G = self.G_optimizer.get_unscaled_gradients(scaled_gradients_of_G)
gradients_of_F = self.F_optimizer.get_unscaled_gradients(scaled_gradients_of_F)
else:
gradients_of_G = G_tape.gradient(G_loss,self.G.trainable_variables)
gradients_of_F = G_tape.gradient(F_loss,self.F.trainable_variables)
self.G_optimizer.apply_gradients(zip(gradients_of_G,self.G.trainable_variables))
self.F_optimizer.apply_gradients(zip(gradients_of_F,self.F.trainable_variables))
return G_loss,F_loss
@tf.function(input_signature=[tf.TensorSpec(shape=global_input_X_shape,dtype=tf.float32),
tf.TensorSpec(shape=global_input_Y_shape,dtype=tf.float32),
tf.TensorSpec(shape=global_mask_X_shape,dtype=tf.float32),
tf.TensorSpec(shape=global_mask_Y_shape,dtype=tf.float32),
tf.TensorSpec(shape=[4],dtype=tf.int32),
tf.TensorSpec(shape=[1],dtype=tf.uint32)])
def train_step(self,trainX,trainY,maskX,maskY,wgp_shape,step):
with tf.GradientTape(persistent=True) as cycle_type:
GeneratedY = self.G(trainX)
GeneratedY = tf.multiply(GeneratedY,maskY)
Dy_real_out = self.Dy(trainY)
Dy_fake_out = self.Dy(GeneratedY)
GeneratedX = self.F(trainY)
GeneratedX = tf.multiply(GeneratedX,maskX)
Dx_real_out = self.Dx(trainX)
Dx_fake_out = self.Dx(GeneratedX)
cycle_consistent_loss_X2Y = tf.reduce_mean(tf.abs(self.F(GeneratedY)-trainX))
cycle_consistent_loss_Y2X = tf.reduce_mean(tf.abs(self.G(GeneratedX)-trainY))
cycle_consistent = cycle_consistent_loss_X2Y+cycle_consistent_loss_Y2X
if step>=0:#先不进行像素梯度和重建损失的使用
cycle_l = 10.0
else:
cycle_l = 10.0
Dy_loss = self.gan_loss.DiscriminatorLoss(Dy_real_out,Dy_fake_out)
Dx_loss = self.gan_loss.DiscriminatorLoss(Dx_real_out,Dx_fake_out)
G_loss = self.gan_loss.GeneratorLoss(Dy_fake_out)+cycle_l*(cycle_consistent)
F_loss = self.gan_loss.GeneratorLoss(Dx_fake_out)+cycle_l*(cycle_consistent)
gradients_of_Dy = cycle_type.gradient(Dy_loss,self.Dy.trainable_variables)
gradients_of_Dx = cycle_type.gradient(Dx_loss,self.Dx.trainable_variables)
gradients_of_G = cycle_type.gradient(G_loss,self.G.trainable_variables)
gradients_of_F = cycle_type.gradient(F_loss,self.F.trainable_variables)
self.Dy_optimizer.apply_gradients(zip(gradients_of_Dy,self.Dy.trainable_variables))
self.Dx_optimizer.apply_gradients(zip(gradients_of_Dx,self.Dx.trainable_variables))
self.G_optimizer.apply_gradients(zip(gradients_of_G,self.G.trainable_variables))
self.F_optimizer.apply_gradients(zip(gradients_of_F,self.F.trainable_variables))
return G_loss,Dy_loss,F_loss,Dx_loss
def train(self,epoches):
self.ckpt.restore(self.manager.latest_checkpoint)
for _ in range(epoches):
start = time.time()
for trainX,trainY,maskX,maskY in self.train_set:
self.ckpt.step.assign_add(1)
step = int(self.ckpt.step)
if self.loss_name in ["WGAN","WGAN-GP"]:
for __ in range(1):
Dy_loss,Dx_loss = self.train_step_D(trainX,trainY,
maskX,maskY,
tf.constant([trainY.shape[0],1,1,1],shape=[4],dtype=tf.int32),
tf.constant(step,shape=[1],dtype=tf.uint32))
for __ in range(1):
G_loss,F_loss = self.train_step_G(trainX,trainY,
maskX,maskY,
tf.constant([trainY.shape[0],1,1,1],shape=[4],dtype=tf.int32),
tf.constant(step,shape=[1],dtype=tf.uint32))
elif self.loss_name in ["Vanilla","LSGAN"]:
G_loss,Dy_loss,F_loss,Dx_loss = self.train_step(trainX,trainY,
maskX,maskY,
tf.constant([trainY.shape[0],1,1,1],shape=[4],dtype=tf.int32),
tf.constant(step,shape=[1],dtype=tf.uint32))
else:
raise ValueError("Inner Error")
if step % 100 == 0:
save_path = self.manager.save()
print("Saved checkpoint for step {}: {}".format(step,save_path))
self.G.save_weights(self.tmp_path+'/weights_saved/G.ckpt')
self.Dy.save_weights(self.tmp_path+'/weights_saved/Dy.ckpt')
self.F.save_weights(self.tmp_path+'/weights_saved/F.ckpt')
self.Dx.save_weights(self.tmp_path+'/weights_saved/Dx.ckpt')
self.wirte_summary(step=step,
seed=self.seed,
G=self.G,
F=self.F,
G_loss=G_loss,
Dy_loss=Dy_loss,
F_loss=F_loss,
Dx_loss=Dx_loss,
out_path=self.out_path)
print ('Time to next 100 step {} is {} sec'.format(step,time.time()-start))
start = time.time()
def get_seed(self):
seed_get = iter(self.test_set)
testX,testY,maskX,maskY = next(seed_get)
print(testX.shape,testY.dtype,maskX.dtype,maskY.shape)
plt.imshow(testX[0,:,:,0],cmap='gray')
plt.show()
plt.imshow(testY[0,:,:,0],cmap='gray')
plt.show()
plt.imshow(maskX[0,:,:,0],cmap='gray')
plt.show()
plt.imshow(maskY[0,:,:,0],cmap='gray')
plt.show()
self.seed = testX,testY,maskX,maskY
def wirte_summary(self,step,seed,G,F,G_loss,Dy_loss,F_loss,Dx_loss,out_path):
testX,testY,maskX,maskY = seed
GeneratedY = G(testX)
GeneratedY = tf.multiply(GeneratedY,maskX)
GeneratedX = F(testY)
GeneratedX = tf.multiply(GeneratedX,maskY)#测试时mask正好相反 因为只知道原来模态和原来模态的mask
plt.figure(figsize=(5,5))#图片大一点才可以承载像素
plt.subplot(2,2,1)
plt.title('real X')
plt.imshow(testX[0,:,:,0],cmap='gray')
plt.axis('off')
plt.subplot(2,2,2)
plt.title('fake Y')
plt.imshow(GeneratedY[0,:,:,0],cmap='gray')
plt.axis('off')
plt.subplot(2,2,3)
plt.title('fake X')
plt.imshow(GeneratedX[0,:,:,0],cmap='gray')
plt.axis('off')
plt.subplot(2,2,4)
plt.title('real Y')
plt.imshow(testY[0,:,:,0],cmap='gray')
plt.axis('off')
plt.savefig(out_path+'/image_at_{}.png'.format(step))
plt.close()
img = Image.open(out_path+'/image_at_{}.png'.format(step))
img = tf.reshape(np.array(img),shape=(1,500,500,4))
with self.train_summary_writer.as_default():
##########################
self.m_psnr_X2Y(tf.image.psnr(GeneratedY,testY,1.0,name=None))
self.m_psnr_Y2X(tf.image.psnr(GeneratedX,testX,1.0,name=None))
self.m_ssim_X2Y(tf.image.ssim(GeneratedY,testY,1, filter_size=11,filter_sigma=1.5,k1=0.01,k2=0.03))
self.m_ssim_Y2X(tf.image.ssim(GeneratedX,testX,1, filter_size=11,filter_sigma=1.5,k1=0.01,k2=0.03))
tf.summary.scalar('G_loss',G_loss,step=step)
tf.summary.scalar('Dy_loss',Dy_loss,step=step)
tf.summary.scalar('F_loss',F_loss,step=step)
tf.summary.scalar('Dx_loss',Dx_loss,step=step)
tf.summary.scalar('test_psnr_y', self.m_psnr_X2Y.result(), step=step)
tf.summary.scalar('test_psnr_x', self.m_psnr_Y2X.result(), step=step)
tf.summary.scalar('test_ssim_y', self.m_ssim_X2Y.result(), step=step)
tf.summary.scalar('test_ssim_x', self.m_ssim_Y2X.result(), step=step)
tf.summary.image("img",img,step=step)
##########################
self.m_psnr_X2Y.reset_states()
self.m_psnr_Y2X.reset_states()
self.m_ssim_X2Y.reset_states()
self.m_ssim_Y2X.reset_states()
def test(self):
self.ckpt.restore(self.manager.latest_checkpoint)
step = 0
black_board_X = np.zeros(shape=[240,240],dtype=np.float32)
black_board_Y = np.zeros(shape=[240,240],dtype=np.float32)
black_board_rX = np.zeros(shape=[240,240],dtype=np.float32)
black_board_rY = np.zeros(shape=[240,240],dtype=np.float32)
for i,(testX,testY,maskX,maskY) in enumerate(self.test_set):
GeneratedY = self.G(testX)
GeneratedY = tf.multiply(GeneratedY,maskX)
GeneratedX = self.F(testY)
GeneratedX = tf.multiply(GeneratedX,maskY)#测试时mask正好相反 因为只知道原来模态和原来模态的mask
if (i+1)%4==1:
black_board_Y[48:175+1,22:149+1]+=GeneratedY.numpy()[0,:,:,0]
black_board_X[48:175+1,22:149+1]+=GeneratedX.numpy()[0,:,:,0]
black_board_rY[48:175+1,22:149+1]+=testY.numpy()[0,:,:,0]
black_board_rX[48:175+1,22:149+1]+=testX.numpy()[0,:,:,0]
elif (i+1)%4==2:
black_board_Y[48:175+1,90:217+1]+=GeneratedY.numpy()[0,:,:,0]
black_board_X[48:175+1,90:217+1]+=GeneratedX.numpy()[0,:,:,0]
black_board_rY[48:175+1,90:217+1]+=testY.numpy()[0,:,:,0]
black_board_rX[48:175+1,90:217+1]+=testX.numpy()[0,:,:,0]
elif (i+1)%4==3:
black_board_Y[64:191+1,22:149+1]+=GeneratedY.numpy()[0,:,:,0]
black_board_X[64:191+1,22:149+1]+=GeneratedX.numpy()[0,:,:,0]
black_board_rY[64:191+1,22:149+1]+=testY.numpy()[0,:,:,0]
black_board_rX[64:191+1,22:149+1]+=testX.numpy()[0,:,:,0]
elif (i+1)%4==0:
black_board_Y[64:191+1,90:217+1]+=GeneratedY.numpy()[0,:,:,0]
black_board_X[64:191+1,90:217+1]+=GeneratedX.numpy()[0,:,:,0]
black_board_rY[64:191+1,90:217+1]+=testY.numpy()[0,:,:,0]
black_board_rX[64:191+1,90:217+1]+=testX.numpy()[0,:,:,0]
#norm
black_board_Y[64:175+1,:]=black_board_Y[64:175+1,:]/2.0
black_board_Y[:,90:149+1]=black_board_Y[:,90:149+1]/2.0
black_board_X[64:175+1,:]=black_board_X[64:175+1,:]/2.0
black_board_X[:,90:149+1]=black_board_X[:,90:149+1]/2.0
black_board_rY[64:175+1,:]=black_board_rY[64:175+1,:]/2.0
black_board_rY[:,90:149+1]=black_board_rY[:,90:149+1]/2.0
black_board_rX[64:175+1,:]=black_board_rX[64:175+1,:]/2.0
black_board_rX[:,90:149+1]=black_board_rX[:,90:149+1]/2.0
else:
raise ValueError("inner error")
out_path = self.out_path
if (i+1)%4==0:
step += 1
plt.figure(figsize=(10,10))#图片大一点才可以承载像素
plt.subplot(2,2,1)
plt.title('real X')
plt.imshow(black_board_rX,cmap='gray')
plt.axis('off')
plt.subplot(2,2,2)
plt.title('fake Y')
plt.imshow(black_board_Y,cmap='gray')
plt.axis('off')
plt.subplot(2,2,3)
plt.title('fake X')
plt.imshow(black_board_X,cmap='gray')
plt.axis('off')
plt.subplot(2,2,4)
plt.title('real Y')
plt.imshow(black_board_rY,cmap='gray')
plt.axis('off')
plt.savefig(out_path+'/test/image_at_{}.png'.format(step))
plt.close()
img = Image.open(out_path+'/test/image_at_{}.png'.format(step))
img = tf.reshape(np.array(img),shape=(1,1000,1000,4))
with self.train_summary_writer.as_default():
##########################
black_board_Y = tf.reshape(tf.constant(black_board_Y,dtype=tf.float32),shape=[1,240,240,1])
black_board_X = tf.reshape(tf.constant(black_board_X,dtype=tf.float32),shape=[1,240,240,1])
black_board_rY = tf.reshape(tf.constant(black_board_rY,dtype=tf.float32),shape=[1,240,240,1])
black_board_rX = tf.reshape(tf.constant(black_board_rX,dtype=tf.float32),shape=[1,240,240,1])
self.m_psnr_X2Y(tf.image.psnr(black_board_Y,black_board_rY,1.0,name=None))
self.m_psnr_Y2X(tf.image.psnr(black_board_X,black_board_rX,1.0,name=None))
self.m_ssim_X2Y(tf.image.ssim(black_board_Y,black_board_rY,1, filter_size=11,filter_sigma=1.5,k1=0.01,k2=0.03))
self.m_ssim_Y2X(tf.image.ssim(black_board_X,black_board_rX,1, filter_size=11,filter_sigma=1.5,k1=0.01,k2=0.03))
tf.summary.scalar('test_psnr_y', self.m_psnr_X2Y.result(), step=step)
tf.summary.scalar('test_psnr_x', self.m_psnr_Y2X.result(), step=step)
tf.summary.scalar('test_ssim_y', self.m_ssim_X2Y.result(), step=step)
tf.summary.scalar('test_ssim_x', self.m_ssim_Y2X.result(), step=step)
tf.summary.image("img",img,step=step)
##########################
self.m_psnr_X2Y.reset_states()
self.m_psnr_Y2X.reset_states()
self.m_ssim_X2Y.reset_states()
self.m_ssim_Y2X.reset_states()
black_board_X = np.zeros(shape=[240,240],dtype=np.float32)
black_board_Y = np.zeros(shape=[240,240],dtype=np.float32)
black_board_rX = np.zeros(shape=[240,240],dtype=np.float32)
black_board_rY = np.zeros(shape=[240,240],dtype=np.float32)
|
# Parallels Arrays
fire_station = ["ALPHA", "BETA", "THETA","CENTER", "RAILWAY", "HARBOR", "SUBURB"]
personnel = [12, 13, 23, 44, 23, 11, 43]
# Cloning the fire stations
fire_duty = fire_station
# Finding the understaffed station
p
# Main Loop
i = 0
loop = 0
# 1 year = 52 weeks
while i < 52:
# Mayor's input
input_device = input("true / false: ")
# Input is true
if input_device == "true":
# Pick the station
station_on_duty = fire_duty[loop]
if station_on_duty == understaffed: # Check if it is understaffed
print("this station is understaffed") # If understaffed print "Station is Understaffed"
print(station_on_duty) # Printing the station on duty
loop += 1 # Prepare for next week
if loop >= 7: loop = 0 # Resets if there are no more stations left
elif input_device == "false": # If mayors input is false
i = 53 # Make i = 53 to end the loop
print("Emergency stop of procedure")
i += 1
|
from __future__ import annotations
from . edge import Edge
from typing import List, Mapping, Any, Collection, Set, Dict
from . algorithm_ordering import AlgorithmOrdering
""" Module that contains the definition of a vertex in the context of a
directed graph """
class Vertex():
""" Class to represent details about a vertex in the context of a directed
graph, being the indegree, outdegree and the tails that are its
successors. It inherits from the generic Vertex class """
def __init__(self, label: str,
algorithm_ordering=AlgorithmOrdering.NATURAL, **attrs):
""" Initialises the vertex by adding some specifics
Args:
label(str): the label of the vertex
**attrs: additional attributes that define the vertex
"""
self._label = label
self._algorithm_ordering: AlgorithmOrdering = algorithm_ordering
self._attrs: Dict[str, Any] = attrs
self._edges: Set[Edge] = set()
self._indegree: int = 0
def add_edge(self, head_vertex: Vertex):
""" This method adds an edge to the set of edges maintained by the
vertex
Args:
head_vertex: the head vertex to be added
"""
self._edges.add(Edge(self, head_vertex))
def set_attr(self, attr: str, value: Any):
self._attrs[attr] = value
def get_attr(self, attr: str):
return self._attrs.get(attr)
def get_attrs(self) -> Mapping[str, Any]:
return self._attrs
def reset_attrs(self):
self._attrs = dict()
def has_enabled_attr(self, attr: str) -> bool:
return attr in self.get_attrs() and self.get_attrs()[attr]
def get_label(self) -> str:
return self._label
def increase_indegree(self):
""" This method increases the indegree for the incumbent vertex """
self._indegree += 1
def decrease_indegree(self):
""" This method decreases the indegree for the incumbent vertex """
self._indegree -= 1
def get_edge_heads(self) -> List[Vertex]:
""" Returns the head vertices of the edges of the target vertex """
return [e.get_head() for e in self.get_edges()]
def get_edges(self) -> Collection[Edge]:
""" Returns the edges of a vertex
Args:
vertex_sorting: Indicates the ordering of vertices that
will be used in algorithms where appropriate
Returns
self._vertices according to the indicated vertex ordering """
if self._algorithm_ordering == AlgorithmOrdering.NATURAL:
return self._edges
else:
return sorted(self._edges,
key=lambda edge: edge.get_head().get_label(),
reverse=self._algorithm_ordering ==
AlgorithmOrdering.DESC)
def remove_edges(self):
self._edges = set()
def get_indegree(self) -> int:
return self._indegree
def get_outdegree(self) -> int:
return len(self._edges)
def __str__(self):
return str(self.get_label()) + ", outdegree: {}".format(
self.get_outdegree()) + \
", indegree: {}".format(self.get_indegree()) + \
", heads: " + ",".join([str(tail.get_label())
for tail in self.get_edge_heads()])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.