text
stringlengths 8
6.05M
|
|---|
# I pledge my honor that I have abided by the Stevens Honor Code
# Riley Sikorski
#This program will accept a list of numbers
#It will return the sum of the numbers in the list
def main():
print("This program will accept a list of numbers and squares them all. ")
list = []
n = eval(input("Please enter the number of numbers in your list:"))
for i in range(0 , n):
item = eval(input("Enter the number:"))
list.append(item)
print("The list is", list)
sum = 0
for i in range(0,n):
sum = sum + list[i]
print("The sum of the elements in the list is ", sum)
main()
|
import os, random, socket, urllib
from Game import host, Status, Power, Game, Mail, View, Time, TimeZone
from Map import Map
class Page:
# ----------------------------------------------------------------------
def __init__(self, form = {}):
# ---------------------------------------------------
# Parameters that may appear in the url (as GET).
# If they also appear in the form (as POST),
# they end up as a list of 2 nearly identical values,
# which is undesirable.
# ---------------------------------------------------
self.variant = self.game = self.power = self.password = self.user = self.page = ''
for key in form.keys():
if type(form) is dict or type(form[key]) != list:
vars(self)[key] = unicode(form.get(key), 'latin-1')
elif key in vars(self) and type(vars(self)[key]) == str:
vars(self)[key] = unicode(form[key][0].value, 'latin-1')
else: vars(self)[key] = [unicode(x.value, 'latin-1')
for x in form[key]]
self.wireless = 'UPG1' in os.environ.get('HTTP_USER_AGENT', '')
# -----------------------------------------------------------------
# Add banner message if one is provided.
# NOTE: if testing a new DPPD, though, you don't want this text to
# show, at least for the "page=whois" poor-man's Web Service
# invocations because this header will be included in that response,
# and bin/mail.py will be confused about a JOIN (etc.)'ing player's
# DPPD status.
# -----------------------------------------------------------------
if host.bannerHtml and (
not self.page or self.page[0].lower() != self.page[0]):
self.write(self.adaptToHTML(host.bannerHtml))
if self.game:
game = self.game.lower().replace('%23', '#')
self.game = Status().load(game)
if self.game and not self.game.name: self.game = None
if self.game: self.variant = self.game.status[0]
else:
self.write('<script>'
'alert("No such game (%s) can be found on this DPjudge.");'
"window.location.replace('%s%s');</script>" % (game,
host.dpjudgeURL, '/index.cgi' * host.needIndexWWW))
raise SystemExit
elif not self.variant:
dir = os.path.dirname(os.environ.get('SCRIPT_NAME',''))
dir = dir.split('/')[-1]
if dir and os.path.isdir(host.packageDir + '/' + dir):
self.variant = dir
if self.variant and not self.game:
module = __import__('DPjudge.variants.' + self.variant,
globals(), locals(), `self.variant`)
globals().update(vars(module))
if not self.page:
if not self.game: self.page = 'Index'
elif self.power and self.password: self.page = 'Status'
else: self.page = 'Login'
if self.power:
self.power = self.power.upper().replace('%23', '#')
if self.game:
try: self.power = [x for x in self.game.powers if x.name in
(self.power, '_' + self.power)][0]
except:
if self.power in ('MASTER', 'JUDGEKEEPER'):
self.power = Power(self.game, self.power)
if self.user:
try: self.user = int(self.user)
except: self.user = -1
# ---------------------------------------------------------------
# Values for pwdFlag:
# 0: Bad or no password or power
# 1: Valid DPPD user, but no power specified or not in control
# 2: Good enough to provide read-only access (omniscient)
# 3: Valid password for power (player is or controls power)
# 4: Game password (Master)
# 5: Host password (Judgekeeper)
# ---------------------------------------------------------------
self.pwdFlag = 0
if not self.password or not hasattr(self.power, 'name'): pass
elif self.password == host.judgePassword: self.pwdFlag = 5
elif self.game:
if self.password == self.game.password: self.pwdFlag = 4
elif self.power:
self.pwdFlag = self.power.isValidPassword(self.password)
if self.include(): raise SystemExit
self.write("<script>window.location.replace('%s');</script>" %
host.dpjudgeURL)
# ----------------------------------------------------------------------
def setdefault(self, var, val = ''):
return vars(self).setdefault(var, val)
# ----------------------------------------------------------------------
def get(self, var, val = ''):
return urllib.unquote(vars(self).get(var, val))
# ----------------------------------------------------------------------
def getint(self, var, val = None):
if not var in vars(self): return None
try: return vars(self).get(var)
except: return val
# ----------------------------------------------------------------------
def has(self, var):
return var in vars(self)
# ----------------------------------------------------------------------
def write(self, text = ''):
if not text:
print
return
# ------------------------------------------------------------
# Strip spaces in front for texts surrounded by triple quotes.
# ------------------------------------------------------------
lines = text.split('\n')
if not lines[-1].strip() and not lines[0].strip():
text = '\n'.join([x.strip() for x in lines[1:-1]])
try: print text.encode('latin-1')
except UnicodeDecodeError: print text
# ----------------------------------------------------------------------
def silent(self):
self.write = lambda x,y=0: 0
# ----------------------------------------------------------------------
def apprise(self, option, value):
try: value = value.name
except: pass
self.write('<input type=hidden name="%s" value="%s">' % (option, value))
# ----------------------------------------------------------------------
def comprise(self, options):
for option in options:
self.write('<input type=hidden name="%s">' % option)
# ----------------------------------------------------------------------
def surprise(self, option, fallBack):
try: self.apprise(option, vars(self)[option])
except: self.apprise(option, fallBack)
# ----------------------------------------------------------------------
def reprise(self, options):
for option in options:
try: self.apprise(option, vars(self)[option])
except: pass
# ----------------------------------------------------------------------
def convertPlainTextToHTML(self, text):
return text.replace('-', '−').replace(
'<', '<').replace('>', '>')
# ----------------------------------------------------------------------
def isolateHTMLTag(self, text):
tag = ''
while 1:
slashes = text.split('>', 1)
if len(slashes) == 1: return [tag + text]
cuts = text.split('<', 1)
if len(cuts) == 1 or len(cuts[0]) > len(slashes[0]):
return [tag + slashes[0], slashes[1]]
slashes = self.isolateHTMLTag(cuts[1])
if len(slashes) == 1: return [tag + text]
tag += cuts[0] + '<' + slashes[0] + '>'
text = slashes[1]
# ----------------------------------------------------------------------
def adaptToHTML(self, text):
html = ''
while 1:
cuts = text.split('<', 1)
if len(cuts) == 1: break
slashes = self.isolateHTMLTag(cuts[1])
html += self.convertPlainTextToHTML(cuts[0])
if len(slashes) == 1:
html += '<'
text = cuts[1]
else:
html += '<' + slashes[0] + '>'
text = slashes[1]
return html + self.convertPlainTextToHTML(text)
# ----------------------------------------------------------------------
def addURLParam(self, url, param):
if not url: return ''
return url + ('?' in url and '&' or '?') + param
# ----------------------------------------------------------------------
def include(self, fileName = None, lims = ('<:', ':>'), data = None):
global page
if not (fileName or data): fileName = self.page
if self.variant: variant = '/variants/' + self.variant
else: variant = ''
if fileName:
# this next line causes problems for the DPPD urllib playerchecker
# print '<!-- including', fileName, '-->'
for subdir in (host.hostDir,
host.packageDir + variant, host.packageDir):
try: file = open(subdir + '/pages/' + fileName)
except: continue
data = file.read()
file.close()
break
else: return
pageURL = self.addURLParam(host.dpjudgeURL, 'page=')
if not host.dppdURL: dppdURL = ''
else:
dppdURL = host.dppdURL.split(',')[0]
if dppdURL not in (host.dpjudgeURL + host.dppdSubURL,
os.path.join(host.dpjudgeURL, host.dppdSubURL)):
dppdURL = self.addURLParam(dppdURL, 'dpjudge=' + host.dpjudgeID)
page, inCode = self, 0
while data:
where = data.find(lims[inCode])
if where < 0: stuff, data = data, ''
else: stuff, data = data[:where], data[where + len(lims[inCode]):]
stuff = (stuff
.replace('<URL>', host.dpjudgeURL)
.replace('<MAP>', host.gameMapURL)
.replace('<PAGE>', pageURL)
.replace('<WEB>', host.dpjudgeDir)
.replace('<ID>', host.dpjudgeID)
.replace('<MAIL>', host.dpjudge)
.replace('<KEEPER>', host.judgekeeper)
.replace('<PKG>', host.packageDir)
.replace('<DPPD>', dppdURL)
.replace('<POUCH>', 'http://www.diplom.org'))
inCode = not inCode
if inCode:
stuff = stuff.strip()
if stuff: self.write(stuff)
elif stuff[:1] != '=':
try:
exec stuff in globals()
except:
print('<!-- Exception while executing:\n' +
stuff.replace('<','<') + '-->')
raise
else: self.write(eval(stuff[1:]))
# --------------
# Template shown
# --------------
return 1
# ----------------------------------------------------------------------
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Artist.description'
db.delete_column('artists_artist', 'description')
# Deleting field 'Artist.birth'
db.delete_column('artists_artist', 'birth')
# Deleting field 'Artist.story'
db.delete_column('artists_artist', 'story')
# Deleting field 'Artist.death'
db.delete_column('artists_artist', 'death')
# Deleting field 'Artist.granted'
db.delete_column('artists_artist', 'granted')
# Deleting field 'Artist.externals'
db.delete_column('artists_artist', 'externals')
# Changing field 'Artist.slug'
db.alter_column('artists_artist', 'slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100))
# Adding unique constraint on 'Artist', fields ['slug']
db.create_unique('artists_artist', ['slug'])
def backwards(self, orm):
# Removing unique constraint on 'Artist', fields ['slug']
db.delete_unique('artists_artist', ['slug'])
# Adding field 'Artist.description'
db.add_column('artists_artist', 'description', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Adding field 'Artist.birth'
db.add_column('artists_artist', 'birth', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Adding field 'Artist.story'
db.add_column('artists_artist', 'story', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Adding field 'Artist.death'
db.add_column('artists_artist', 'death', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Adding field 'Artist.granted'
db.add_column('artists_artist', 'granted', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'Artist.externals'
db.add_column('artists_artist', 'externals', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
# Changing field 'Artist.slug'
db.alter_column('artists_artist', 'slug', self.gf('django.db.models.fields.SlugField')(max_length=100, null=True))
models = {
'artists.artist': {
'Meta': {'ordering': "['lastname', 'firstname']", 'object_name': 'Artist'},
'display': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'firstname': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastname': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "'void'", 'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'artists.band': {
'Meta': {'ordering': "['name']", 'object_name': 'Band'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'externals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['artists.Artist']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['artists']
|
import time
from pyspark.sql import SQLContext
from pyspark import SparkContext, SparkConf
import sys
from glob import glob
from pyspark.sql.types import *
from decimal import *
from pyspark.sql import *
# Receive as parameter the Scale Factor
SF=sys.argv[1]
conf = SparkConf().set('spark.memory.fraction', '1.0').set('spark.memory.storage', '0.0').set('spark.sql.exchange.reuse', False)
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
sqlContext.clearCache()
# Ericsson
basePath = f"hdfs://IP:8020/tpch/tbl/tpc_h_SF_{SF}/"
# Local
#basePath = f"hdfs://192.168.213.23:9000/tpc_data/tpc_h_SF_{SF}/"
# Create table views
########################## CUSTOMER BEGIN
tablename = "customer"
rdd = sc.textFile(basePath + f'{tablename}/*')
fields=[StructField("C_CUSTKEY", IntegerType(), True),
StructField("C_NAME", StringType(),True),
StructField("C_ADDRESS", StringType(),True),
StructField("C_NATIONKEY", IntegerType(),True),
StructField("C_PHONE", StringType(),True),
StructField("C_ACCTBAL", FloatType(),True),
StructField("C_MKTSEGMENT", StringType(),True),
StructField("C_COMMENT", StringType(),True)]
schema=StructType(fields)
df = rdd.\
map(lambda x: x.split("|")).\
map(lambda x: {
'C_CUSTKEY':int(x[0]),
'C_NAME':x[1],
'C_ADDRESS':x[2],
'C_NATIONKEY':int(x[3]),
'C_PHONE':x[4],
'C_ACCTBAL':float(x[5]),
'C_MKTSEGMENT':x[6],
'C_COMMENT':x[7]
})\
.toDF(schema)
df.createOrReplaceTempView(tablename)
########################## CUSTOMER END
########################## LINEITEM BEGIN
tablename = "lineitem"
rdd = sc.textFile(basePath + f'{tablename}/*')
fields=[
StructField("L_ORDERKEY", IntegerType(), True),
StructField("L_PARTKEY", IntegerType(),True),
StructField("L_SUPPKEY", IntegerType(),True),
StructField("L_LINENUMBER", IntegerType(),True),
StructField("L_QUANTITY", FloatType(),True),
StructField("L_EXTENDEDPRICE", FloatType(),True),
StructField("L_DISCOUNT", FloatType(),True),
StructField("L_TAX", FloatType(),True),
StructField("L_RETURNFLAG", StringType(),True),
StructField("L_LINESTATUS", StringType(),True),
StructField("L_SHIPDATE", StringType(),True),
StructField("L_COMMITDATE", StringType(),True),
StructField("L_RECEIPTDATE", StringType(),True),
StructField("L_SHIPINSTRUCT", StringType(),True),
StructField("L_SHIPMODE", StringType(),True),
StructField("L_COMMENT", StringType(),True)]
schema=StructType(fields)
df = rdd.\
map(lambda x: x.split("|")).\
map(lambda x: {
'L_ORDERKEY':int(x[0]),
'L_PARTKEY': int(x[1]),
'L_SUPPKEY': int(x[2]),
'L_LINENUMBER': int(x[3]),
'L_QUANTITY':float(x[4]),
'L_EXTENDEDPRICE':float(x[5]),
'L_DISCOUNT':float(x[6]),
'L_TAX':float(x[7]),
'L_RETURNFLAG':x[8],
'L_LINESTATUS':x[9],
'L_SHIPDATE':x[10],
'L_COMMITDATE':x[11],
'L_RECEIPTDATE':x[12],
'L_SHIPINSTRUCT':x[13],
'L_SHIPMODE':x[14],
'L_COMMENT':x[15]
})\
.toDF(schema)
df.createOrReplaceTempView(tablename)
########################## LINEITEM END
########################## NATION BEGIN
tablename = "nation"
rdd = sc.textFile(basePath + f'{tablename}/*')
fields=[
StructField("N_NATIONKEY", IntegerType(), True),
StructField("N_NAME", StringType(), True),
StructField("N_REGIONKEY", IntegerType(), True),
StructField("N_COMMENT", StringType(), True)]
schema=StructType(fields)
df = rdd.\
map(lambda x: x.split("|")).\
map(lambda x: {
'N_NATIONKEY':int(x[0]),
'N_NAME':x[1],
'N_REGIONKEY':int(x[2]),
'N_COMMENT':x[3]})\
.toDF(schema)
df.createOrReplaceTempView(tablename)
########################## NATION END
########################## ORDERS BEGIN
tablename = "orders"
rdd = sc.textFile(basePath + f'{tablename}/*')
fields=[StructField("O_ORDERKEY", IntegerType(), True),
StructField("O_CUSTKEY", IntegerType(),True),
StructField("O_ORDERSTATUS", StringType(),True),
StructField("O_TOTALPRICE", FloatType(),True),
StructField("O_ORDERDATE", StringType(),True),
StructField("O_ORDERPRIORITY", StringType(),True),
StructField("O_CLERK", StringType(),True),
StructField("O_SHIPPRIORITY", IntegerType(),True),
StructField("O_COMMENT", StringType(),True)]
schema=StructType(fields)
df = rdd.\
map(lambda x: x.split("|")).\
map(lambda x: {
'O_ORDERKEY':int(x[0]),
'O_CUSTKEY':int(x[1]),
'O_ORDERSTATUS':x[2],
'O_TOTALPRICE':float(x[3]),
'O_ORDERDATE': x[4],
'O_ORDERPRIORITY':x[5],
'O_CLERK':x[6],
'O_SHIPPRIORITY':int(x[7]),
'O_COMMENT':x[8],
})\
.toDF(schema)
df.createOrReplaceTempView(tablename)
########################## ORDERS END
########################## PART BEGIN
tablename = "part"
rdd = sc.textFile(basePath + f'{tablename}/*')
fields=[
StructField("P_PARTKEY", IntegerType(), True),
StructField("P_NAME", StringType(),True),
StructField("P_MFGR", StringType(),True),
StructField("P_BRAND", StringType(),True),
StructField("P_TYPE", StringType(),True),
StructField("P_SIZE", IntegerType(),True),
StructField("P_CONTAINER", StringType(),True),
StructField("P_RETAILPRICE", FloatType(),True),
StructField("P_COMMENT", StringType(),True)]
schema=StructType(fields)
df = rdd.\
map(lambda x: x.split("|")).\
map(lambda x: {
'P_PARTKEY':int(x[0]),
'P_NAME':x[1],
'P_MFGR':x[2],
'P_BRAND':x[3],
'P_TYPE':x[4],
'P_SIZE':int(x[5]),
'P_CONTAINER':x[6],
'P_RETAILPRICE':float(x[7]),
'P_COMMENT':x[8],
})\
.toDF(schema)
df.createOrReplaceTempView(tablename)
########################## PART END
########################## PARTSUPP BEGIN
tablename = "partsupp"
rdd = sc.textFile(basePath + f'{tablename}/*')
fields=[
StructField("PS_PARTKEY", IntegerType(), True),
StructField("PS_SUPPKEY", IntegerType(),True),
StructField("PS_AVAILQTY", IntegerType(),True),
StructField("PS_SUPPLYCOST", FloatType(),True),
StructField("PS_COMMENT", StringType(),True)]
schema=StructType(fields)
df = rdd.\
map(lambda x: x.split("|")).\
map(lambda x: {
'PS_PARTKEY':int(x[0]),
'PS_SUPPKEY':int(x[1]),
'PS_AVAILQTY':int(x[2]),
'PS_SUPPLYCOST':float(x[3]),
'PS_COMMENT':x[4]
})\
.toDF(schema)
df.createOrReplaceTempView(tablename)
########################## PARTSUPP END
########################## REGION BEGIN
tablename = "region"
rdd = sc.textFile(basePath + f'{tablename}/*')
fields=[StructField("R_REGIONKEY", IntegerType(), True),
StructField("R_NAME", StringType(),True),
StructField("R_COMMENT", StringType(),True)]
schema=StructType(fields)
df = rdd.\
map(lambda x: x.split("|")).\
map(lambda x: {
'R_REGIONKEY':int(x[0]),
'R_NAME':x[1],
'R_COMMENT':x[2]})\
.toDF(schema)
df.createOrReplaceTempView(tablename)
########################## REGION END
########################## SUPPLIER BEGIN
tablename = "supplier"
rdd = sc.textFile(basePath + f'{tablename}/*')
fields=[StructField("S_SUPPKEY", IntegerType(), True),
StructField("S_NAME", StringType(),True),
StructField("S_ADDRESS", StringType(),True),
StructField("S_NATIONKEY", IntegerType(),True),
StructField("S_PHONE", StringType(),True),
StructField("S_ACCTBAL", FloatType(),True),
StructField("S_COMMENT", StringType(),True)]
schema=StructType(fields)
df = rdd.\
map(lambda x: x.split("|")).\
map(lambda x: {
'S_SUPPKEY':int(x[0]),
'S_NAME':x[1],
'S_ADDRESS':x[2],
'S_NATIONKEY': int(x[3]),
'S_PHONE':x[4],
'S_ACCTBAL':float(x[5]),
'S_COMMENT':x[6],
})\
.toDF(schema)
df.createOrReplaceTempView(tablename)
########################## SUPPLIER END
# Start and time the query
start = time.time()
dataframe = sqlContext.sql("""
select
l_returnflag,
l_linestatus,
sum(l_quantity) as sum_qty,
sum(l_extendedprice) as sum_base_price,
sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
avg(l_quantity) as avg_qty,
avg(l_extendedprice) as avg_price,
avg(l_discount) as avg_disc,
count(*) as count_order
from
lineitem
where
l_shipdate <= date '1998-12-01' - interval '120' day
group by
l_returnflag,
l_linestatus
order by
l_returnflag,
l_linestatus
LIMIT 1;
""")
count_ = dataframe.count()
end = time.time()
query_ex_time=end-start
print("TIEMPO DE EJECUCION DE CONSULTA SQL ==========> ", query_ex_time," ",count_)
|
from setuptools import setup, find_packages
from patchworkdocker.meta import VERSION, DESCRIPTION, PACKAGE_NAME, EXECUTABLE_NAME
setup(
name=PACKAGE_NAME,
version=VERSION,
author="Colin Nolan",
author_email="cn580@alumni.york.ac.uk",
packages=find_packages(exclude=["tests"]),
install_requires=open("requirements.txt", "r").readlines(),
url="https://github.com/colin-nolan/patchwork-docker",
license="MIT",
description=DESCRIPTION,
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
entry_points={
"console_scripts": [
f"{EXECUTABLE_NAME}={PACKAGE_NAME}.cli:entrypoint"
]
},
zip_safe=True
)
|
"""
================
Disease Observer
================
This module contains tools for observing disease incidence and prevalence
in the simulation.
"""
from collections import Counter
import pandas as pd
from .utilities import (get_age_bins, get_prevalent_cases, get_state_person_time,
get_transition_count, TransitionString)
class DiseaseObserver:
"""Observes disease counts, person time, and prevalent cases for a cause.
By default, this observer computes aggregate susceptible person time
and counts of disease cases over the entire simulation. It can be
configured to bin these into age_groups, sexes, and years by setting
the ``by_age``, ``by_sex``, and ``by_year`` flags, respectively.
It also can record prevalent cases on a particular sample date each year,
though by default this is disabled. These will also be binned based on the
flags set for the observer. Additionally, the sample date is configurable
and defaults to July 1st of each year.
In the model specification, your configuration for this component should
be specified as, e.g.:
.. code-block:: yaml
configuration:
metrics:
{YOUR_DISEASE_NAME}_observer:
by_age: True
by_year: False
by_sex: True
sample_prevalence:
sample: True
date:
month: 4
day: 10
"""
configuration_defaults = {
'metrics': {
'disease_observer': {
'by_age': False,
'by_year': False,
'by_sex': False,
'sample_prevalence': {
'sample': False,
'date': {
'month': 7,
'day': 1,
}
},
}
}
}
def __init__(self, disease: str):
self.disease = disease
self.configuration_defaults = {
'metrics': {f'{disease}_observer': DiseaseObserver.configuration_defaults['metrics']['disease_observer']}
}
@property
def name(self):
return f'disease_observer.{self.disease}'
def setup(self, builder):
self.config = builder.configuration['metrics'][f'{self.disease}_observer']
self.clock = builder.time.clock()
self.age_bins = get_age_bins(builder)
self.counts = Counter()
self.person_time = Counter()
self.prevalence = Counter()
comp = builder.components.get_component(f'disease_model.{self.disease}')
self.states = comp.state_names
self.transitions = comp.transition_names
self.previous_state_column = f'previous_{self.disease}'
builder.population.initializes_simulants(self.on_initialize_simulants,
creates_columns=[self.previous_state_column])
columns_required = ['alive', f'{self.disease}', self.previous_state_column]
if self.config.by_age:
columns_required += ['age']
if self.config.by_sex:
columns_required += ['sex']
self.population_view = builder.population.get_view(columns_required)
builder.value.register_value_modifier('metrics', self.metrics)
# FIXME: The state table is modified before the clock advances.
# In order to get an accurate representation of person time we need to look at
# the state table before anything happens.
builder.event.register_listener('time_step__prepare', self.on_time_step_prepare)
builder.event.register_listener('collect_metrics', self.on_collect_metrics)
def on_initialize_simulants(self, pop_data):
self.population_view.update(pd.Series('', index=pop_data.index, name=self.previous_state_column))
def on_time_step_prepare(self, event):
pop = self.population_view.get(event.index)
for state in self.states:
# noinspection PyTypeChecker
state_person_time_this_step = get_state_person_time(pop, self.config, self.disease,
state, self.clock().year, event.step_size,
self.age_bins)
self.person_time.update(state_person_time_this_step)
# This enables tracking of transitions between states
prior_state_pop = self.population_view.get(event.index)
prior_state_pop[self.previous_state_column] = prior_state_pop[self.disease]
self.population_view.update(prior_state_pop)
if self._should_sample(event.time):
point_prevalence = get_prevalent_cases(pop, self.config.to_dict(), self.disease, event.time, self.age_bins)
self.prevalence.update(point_prevalence)
def on_collect_metrics(self, event):
pop = self.population_view.get(event.index)
for transition in self.transitions:
# noinspection PyTypeChecker
transition_counts_this_step = get_transition_count(pop, self.config, self.disease,
TransitionString(transition), event.time, self.age_bins)
self.counts.update(transition_counts_this_step)
def _should_sample(self, event_time: pd.Timestamp) -> bool:
"""Returns true if we should sample on this time step."""
should_sample = self.config.sample_prevalence.sample
if should_sample:
sample_date = pd.Timestamp(year=event_time.year, **self.config.sample_prevalence.date.to_dict())
should_sample &= self.clock() <= sample_date < event_time
return should_sample
def metrics(self, index, metrics):
metrics.update(self.counts)
metrics.update(self.person_time)
metrics.update(self.prevalence)
return metrics
def __repr__(self):
return "DiseaseObserver()"
|
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import stat
import shutil
from urlgrabber import progress
from mic import kickstart, msger
from mic.utils import fs_related, runner, misc
from mic.utils.partitionedfs import PartitionedMount
from mic.utils.errors import CreatorError, MountError
from baseimager import BaseImageCreator
class RawImageCreator(BaseImageCreator):
"""Installs a system into a file containing a partitioned disk image.
ApplianceImageCreator is an advanced ImageCreator subclass; a sparse file
is formatted with a partition table, each partition loopback mounted
and the system installed into an virtual disk. The disk image can
subsequently be booted in a virtual machine or accessed with kpartx
"""
def __init__(self, creatoropts=None, pkgmgr=None, compress_image=None):
"""Initialize a ApplianceImageCreator instance.
This method takes the same arguments as ImageCreator.__init__()
"""
BaseImageCreator.__init__(self, creatoropts, pkgmgr)
self.__instloop = None
self.__imgdir = None
self.__disks = {}
self.__disk_format = "raw"
self._diskinfo = []
self.vmem = 512
self.vcpu = 1
self.checksum = False
self.appliance_version = None
self.appliance_release = None
self.compress_image = compress_image
#self.getsource = False
#self.listpkg = False
self._dep_checks.extend(["sync", "kpartx", "parted", "extlinux"])
def configure(self, repodata = None):
import subprocess
def chroot():
os.chroot(self._instroot)
os.chdir("/")
if os.path.exists(self._instroot + "/usr/bin/Xorg"):
subprocess.call(["/bin/chmod", "u+s", "/usr/bin/Xorg"],
preexec_fn = chroot)
BaseImageCreator.configure(self, repodata)
def _get_fstab(self):
s = ""
for mp in self.__instloop.mountOrder:
p = None
for p1 in self.__instloop.partitions:
if p1['mountpoint'] == mp:
p = p1
break
s += "%(device)s %(mountpoint)s %(fstype)s %(fsopts)s 0 0\n" % {
'device': "UUID=%s" % p['uuid'],
'mountpoint': p['mountpoint'],
'fstype': p['fstype'],
'fsopts': "defaults,noatime" if not p['fsopts'] else p['fsopts']}
if p['mountpoint'] == "/":
for subvol in self.__instloop.subvolumes:
if subvol['mountpoint'] == "/":
continue
s += "%(device)s %(mountpoint)s %(fstype)s %(fsopts)s 0 0\n" % {
'device': "/dev/%s%-d" % (p['disk'], p['num']),
'mountpoint': subvol['mountpoint'],
'fstype': p['fstype'],
'fsopts': "defaults,noatime" if not subvol['fsopts'] else subvol['fsopts']}
s += "devpts /dev/pts devpts gid=5,mode=620 0 0\n"
s += "tmpfs /dev/shm tmpfs defaults 0 0\n"
s += "proc /proc proc defaults 0 0\n"
s += "sysfs /sys sysfs defaults 0 0\n"
return s
def _create_mkinitrd_config(self):
"""write to tell which modules to be included in initrd"""
mkinitrd = ""
mkinitrd += "PROBE=\"no\"\n"
mkinitrd += "MODULES+=\"ext3 ata_piix sd_mod libata scsi_mod\"\n"
mkinitrd += "rootfs=\"ext3\"\n"
mkinitrd += "rootopts=\"defaults\"\n"
msger.debug("Writing mkinitrd config %s/etc/sysconfig/mkinitrd" \
% self._instroot)
os.makedirs(self._instroot + "/etc/sysconfig/",mode=644)
cfg = open(self._instroot + "/etc/sysconfig/mkinitrd", "w")
cfg.write(mkinitrd)
cfg.close()
def _get_parts(self):
if not self.ks:
raise CreatorError("Failed to get partition info, "
"please check your kickstart setting.")
# Set a default partition if no partition is given out
if not self.ks.handler.partition.partitions:
partstr = "part / --size 1900 --ondisk sda --fstype=ext3"
args = partstr.split()
pd = self.ks.handler.partition.parse(args[1:])
if pd not in self.ks.handler.partition.partitions:
self.ks.handler.partition.partitions.append(pd)
# partitions list from kickstart file
return kickstart.get_partitions(self.ks)
def get_diskinfo(self):
if self._diskinfo:
return self._diskinfo
#get partition info from ks handler
parts = self._get_parts()
for i in range(len(parts)):
if parts[i].disk:
disk = parts[i].disk
else:
raise CreatorError("Failed to create disks, no --ondisk "
"specified in partition line of ks file")
if not parts[i].fstype:
raise CreatorError("Failed to create disks, no --fstype "
"specified in partition line of ks file")
size = parts[i].size * 1024L * 1024L
# If we have alignment set for partition we need to enlarge the
# drive, so that the alignment changes fits there as well
if parts[i].align:
size += parts[i].align * 1024L
found = False
for j in range(len(self._diskinfo)):
if self._diskinfo[j]['name'] == disk:
self._diskinfo[j]['size'] = self._diskinfo[j]['size'] + size
found = True
break
else:
found = False
if not found:
self._diskinfo.append({ 'name': disk, 'size': size })
return self._diskinfo
#
# Actual implemention
#
def _mount_instroot(self, base_on = None):
self.__imgdir = self._mkdtemp()
parts = self._get_parts()
#create disk
for item in self.get_diskinfo():
msger.debug("Adding disk %s as %s/%s-%s.raw with size %s bytes" %
(item['name'], self.__imgdir, self.name, item['name'],
item['size']))
disk = fs_related.SparseLoopbackDisk("%s/%s-%s.raw" % (
self.__imgdir,
self.name,
item['name']),
item['size'])
self.__disks[item['name']] = disk
self.__instloop = PartitionedMount(self.__disks, self._instroot)
for p in parts:
self.__instloop.add_partition(int(p.size),
p.disk,
p.mountpoint,
p.fstype,
p.label,
fsopts = p.fsopts,
boot = p.active,
align = p.align)
self.__instloop.mount()
self._create_mkinitrd_config()
def _get_required_packages(self):
required_packages = BaseImageCreator._get_required_packages(self)
if not self.target_arch or not self.target_arch.startswith("arm"):
required_packages += ["syslinux", "syslinux-extlinux"]
return required_packages
def _get_excluded_packages(self):
return BaseImageCreator._get_excluded_packages(self)
def _get_syslinux_boot_config(self):
bootdevnum = None
rootdevnum = None
rootdev = None
for p in self.__instloop.partitions:
if p['mountpoint'] == "/boot":
bootdevnum = p['num'] - 1
elif p['mountpoint'] == "/" and bootdevnum is None:
bootdevnum = p['num'] - 1
if p['mountpoint'] == "/":
rootdevnum = p['num'] - 1
rootdev = "/dev/%s%-d" % (p['disk'], p['num'])
prefix = ""
if bootdevnum == rootdevnum:
prefix = "/boot"
return (bootdevnum, rootdevnum, rootdev, prefix)
def _create_syslinux_config(self):
#Copy splash
splash = "%s/usr/lib/anaconda-runtime/syslinux-vesa-splash.jpg" \
% self._instroot
if os.path.exists(splash):
shutil.copy(splash, "%s%s/splash.jpg" \
% (self._instroot, "/boot/extlinux"))
splashline = "menu background splash.jpg"
else:
splashline = ""
(bootdevnum, rootdevnum, rootdev, prefix) = \
self._get_syslinux_boot_config()
options = self.ks.handler.bootloader.appendLine
#XXX don't hardcode default kernel - see livecd code
syslinux_conf = ""
syslinux_conf += "prompt 0\n"
syslinux_conf += "timeout 1\n"
syslinux_conf += "\n"
syslinux_conf += "default vesamenu.c32\n"
syslinux_conf += "menu autoboot Starting %s...\n" % self.distro_name
syslinux_conf += "menu hidden\n"
syslinux_conf += "\n"
syslinux_conf += "%s\n" % splashline
syslinux_conf += "menu title Welcome to %s!\n" % self.distro_name
syslinux_conf += "menu color border 0 #ffffffff #00000000\n"
syslinux_conf += "menu color sel 7 #ffffffff #ff000000\n"
syslinux_conf += "menu color title 0 #ffffffff #00000000\n"
syslinux_conf += "menu color tabmsg 0 #ffffffff #00000000\n"
syslinux_conf += "menu color unsel 0 #ffffffff #00000000\n"
syslinux_conf += "menu color hotsel 0 #ff000000 #ffffffff\n"
syslinux_conf += "menu color hotkey 7 #ffffffff #ff000000\n"
syslinux_conf += "menu color timeout_msg 0 #ffffffff #00000000\n"
syslinux_conf += "menu color timeout 0 #ffffffff #00000000\n"
syslinux_conf += "menu color cmdline 0 #ffffffff #00000000\n"
versions = []
kernels = self._get_kernel_versions()
symkern = "%s/boot/vmlinuz" % self._instroot
if os.path.lexists(symkern):
v = os.path.realpath(symkern).replace('%s-' % symkern, "")
syslinux_conf += "label %s\n" % self.distro_name.lower()
syslinux_conf += "\tmenu label %s (%s)\n" % (self.distro_name, v)
syslinux_conf += "\tlinux /vmlinuz\n"
syslinux_conf += "\tappend ro root=%s %s\n" % (rootdev, options)
syslinux_conf += "\tmenu default\n"
else:
for kernel in kernels:
for version in kernels[kernel]:
versions.append(version)
footlabel = 0
for v in versions:
shutil.copy("%s/boot/vmlinuz-%s" %(self._instroot, v),
"%s%s/vmlinuz-%s" % (self._instroot,
"/boot/extlinux/", v))
syslinux_conf += "label %s%d\n" \
% (self.distro_name.lower(), footlabel)
syslinux_conf += "\tmenu label %s (%s)\n" % (self.distro_name, v)
syslinux_conf += "\tlinux vmlinuz-%s\n" % v
syslinux_conf += "\tappend ro root=%s %s\n" \
% (rootdev, options)
if footlabel == 0:
syslinux_conf += "\tmenu default\n"
footlabel += 1;
msger.debug("Writing syslinux config %s/boot/extlinux/extlinux.conf" \
% self._instroot)
cfg = open(self._instroot + "/boot/extlinux/extlinux.conf", "w")
cfg.write(syslinux_conf)
cfg.close()
def _install_syslinux(self):
i = 0
for name in self.__disks.keys():
loopdev = self.__disks[name].device
i =i+1
msger.debug("Installing syslinux bootloader to %s" % loopdev)
(bootdevnum, rootdevnum, rootdev, prefix) = \
self._get_syslinux_boot_config()
#Set MBR
mbrsize = os.stat("%s/usr/share/syslinux/mbr.bin" \
% self._instroot)[stat.ST_SIZE]
rc = runner.show(['dd',
'if=%s/usr/share/syslinux/mbr.bin' % self._instroot,
'of=' + loopdev])
if rc != 0:
raise MountError("Unable to set MBR to %s" % loopdev)
#Set Bootable flag
parted = fs_related.find_binary_path("parted")
rc = runner.quiet([parted,
"-s",
loopdev,
"set",
"%d" % (bootdevnum + 1),
"boot",
"on"])
#XXX disabled return code check because parted always fails to
#reload part table with loop devices. Annoying because we can't
#distinguish this failure from real partition failures :-(
if rc != 0 and 1 == 0:
raise MountError("Unable to set bootable flag to %sp%d" \
% (loopdev, (bootdevnum + 1)))
#Ensure all data is flushed to disk before doing syslinux install
runner.quiet('sync')
fullpathsyslinux = fs_related.find_binary_path("extlinux")
rc = runner.show([fullpathsyslinux,
"-i",
"%s/boot/extlinux" % self._instroot])
if rc != 0:
raise MountError("Unable to install syslinux bootloader to %sp%d" \
% (loopdev, (bootdevnum + 1)))
def _create_bootconfig(self):
#If syslinux is available do the required configurations.
if os.path.exists("%s/usr/share/syslinux/" % (self._instroot)) \
and os.path.exists("%s/boot/extlinux/" % (self._instroot)):
self._create_syslinux_config()
self._install_syslinux()
def _unmount_instroot(self):
if not self.__instloop is None:
self.__instloop.cleanup()
def _resparse(self, size = None):
return self.__instloop.resparse(size)
def _stage_final_image(self):
"""Stage the final system image in _outdir.
write meta data
"""
self._resparse()
if self.compress_image:
for imgfile in os.listdir(self.__imgdir):
if imgfile.endswith('.raw') or imgfile.endswith('bin'):
imgpath = os.path.join(self.__imgdir, imgfile)
misc.compressing(imgpath, self.compress_image)
if self.pack_to:
dst = os.path.join(self._outdir, self.pack_to)
msger.info("Pack all raw images to %s" % dst)
misc.packing(dst, self.__imgdir)
else:
msger.debug("moving disks to stage location")
for imgfile in os.listdir(self.__imgdir):
src = os.path.join(self.__imgdir, imgfile)
dst = os.path.join(self._outdir, imgfile)
msger.debug("moving %s to %s" % (src,dst))
shutil.move(src,dst)
self._write_image_xml()
def _write_image_xml(self):
imgarch = "i686"
if self.target_arch and self.target_arch.startswith("arm"):
imgarch = "arm"
xml = "\n"
msger.debug("writing image XML to %s/%s.xml" %(self._outdir, self.name))
cfg = open("%s/%s.xml" % (self._outdir, self.name), "w")
cfg.write(xml)
cfg.close()
|
import logging
import os
from sklearn import tree
class DecisionTree():
"""This class represents a decision tree classifier for IoT devices. It is
used to train a DT using the number of unique ips as a feature between an
IoT device and a non-IoT.
It parses the .dat files produced in other analysis.
It expects the following input parameters:
- sampling_seconds: the sampling frequency
- memory_seconds: the length of the memory
- features_lag: the number of consecutive time slots used to generate the
features
- dt_depth: the maximum depth of the decision tree
DAT files must be organized using the following directory structure:
trace/IoT/TIMESTAMP_IP_(sampling_seconds)_(memory_seconds).dat
trace/non-IoT/TIMESTAMP_IP_(sampling_seconds)_(memory_seconds).dat
It is not mandatory to provide traffic traces. In that case, the default
answer is having a 50% probability for being IoT and 50% for not being
as such.
"""
IOT_CLASS_ID = 1
NON_IOT_CLASS_ID = 0
DEFAULT_IOT_TRACE_PATH = "trace/IoT"
DEFAULT_NON_IOT_TRACE_PATH = "trace/non-IoT"
def __init__(self, sampling_seconds, memory_seconds, features_lag,
dt_depth=None):
print "Training the decision tree. Please wait..."
self._logger = logging.getLogger(__name__)
self._dt_depth = dt_depth
self.sampling_seconds = sampling_seconds
self.memory_seconds = memory_seconds
self.features_lag = features_lag
self._num_iot_definitions = 0
self._num_non_iot_definitions = 0
self._dt = None
self._train()
self._iot_class = None
if (self._dt is not None):
for i in range(len(self._dt.classes_)):
if (self._dt.classes_[i] == DecisionTree.IOT_CLASS_ID):
self._iot_class = i
break
def _train(self):
(X, Y) = self._generate_data_samples()
info = ("Training decision tree. Sampling seconds %f"
" - Memory seconds %f - Features lag %f - Max depth %s. "
"Number of samples: %d")
self._logger.info(info % (self.sampling_seconds, self.memory_seconds,
self.features_lag, self._dt_depth, len(X)))
if (len(X) > 0):
self._dt = tree.DecisionTreeClassifier(max_depth=self._dt_depth)
self._dt.fit(X, Y)
def _generate_data_samples(self):
X_iot = self._parse_dat_files(DecisionTree.DEFAULT_IOT_TRACE_PATH)
X_non_iot = self._parse_dat_files(DecisionTree.DEFAULT_NON_IOT_TRACE_PATH)
self._num_iot_definitions = len(X_iot)
self._num_non_iot_definitions = len(X_non_iot)
self._logger.info("Loaded %d definitions for IoT devices" % self._num_iot_definitions)
self._logger.info("Loaded %d definitions for non-IoT devices" % self._num_non_iot_definitions)
Y_iot = [DecisionTree.IOT_CLASS_ID] * len(X_iot)
Y_non_iot = [DecisionTree.NON_IOT_CLASS_ID] * len(X_non_iot)
X = X_iot
Y = Y_iot
X.extend(X_non_iot)
Y.extend(Y_non_iot)
return (X, Y)
def _parse_dat_files(self, folder):
files = self._get_dat_files_in_folder(folder)
features_array = []
for f in files:
feature = [-1] * self.features_lag
with open(f, 'rb') as dat_file:
for l in dat_file.readlines():
feature.append(int(l))
feature.pop(0)
features_array.append(list(feature))
return features_array
def _get_dat_files_in_folder(self, folder):
file_paths = []
file_name_ending = "_%d_%d.dat" % (self.sampling_seconds, self.memory_seconds)
for root, subdirs, files in os.walk(folder):
for f in files:
if (f.endswith(file_name_ending)):
file_paths.append(os.path.join(root, f))
return file_paths
def is_iot(self, data):
return self.iot_probability(data) >= 0.5
def iot_probability(self, data):
if (self._dt is not None):
if (self._iot_class is None):
return 0
else:
return self._dt.predict_proba(data)[0][self._iot_class]
else:
return 0.5
def get_num_iot_definitions(self):
return self._num_iot_definitions
def get_num_non_iot_definitions(self):
return self._num_non_iot_definitions
|
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import json
from bs4 import BeautifulSoup
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/documents', "https://www.googleapis.com/auth/drive"]
def read_paragraph_element(element):
"""Returns the text in the given ParagraphElement.
Args:
element: a ParagraphElement from a Google Doc.
"""
text_run = element.get('textRun')
if not text_run:
return ''
return text_run.get('content')
def read_strucutural_elements(elements):
"""Recurses through a list of Structural Elements to read a document's text where text may be
in nested elements.
Args:
elements: a list of Structural Elements.
"""
text = ''
for value in elements:
if 'paragraph' in value:
elements = value.get('paragraph').get('elements')
for elem in elements:
text += read_paragraph_element(elem)
elif 'table' in value:
# The text in table cells are in nested Structural Elements and tables may be
# nested.
table = value.get('table')
for row in table.get('tableRows'):
cells = row.get('tableCells')
for cell in cells:
text += read_strucutural_elements(cell.get('content'))
elif 'tableOfContents' in value:
# The text in the TOC is also in a Structural Element.
toc = value.get('tableOfContents')
text += read_strucutural_elements(toc.get('content'))
return text
def main():
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('docs', 'v1', credentials=creds)
vols = [
# 1,
# 2,
3,
# 4,
# 5, 6, 7, 8, 9, 10, 11,
# 12, 13, 14, 15, 16, 17, 18, 19, 20, 21
# 16
]
with open("data/ids.json") as f:
ids = json.load(f)
itaiji = {}
import csv
with open('../001_scripto/data2/itaiji.csv', 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
key = row[1]
text = row[2]
elements = text.split(" ")
for e in elements:
itaiji[e] = key
for vol in vols:
print(vol)
vol = str(vol).zfill(2)
from bs4 import BeautifulSoup
with open('/Users/nakamurasatoru/git/d_genji/kouigenjimonogatari.github.io/tei/'+vol+'.xml') as doc:
soup = BeautifulSoup(doc, "xml") # 第2引数でパーサを指定
gid = ids[vol]
document = service.documents().get(documentId=gid).execute()
doc_content = document.get('body').get('content')
text = read_strucutural_elements(doc_content)
for key in itaiji:
text = text.replace(key, itaiji[key])
text = text.replace("(か)", "")
text = text.replace("(む)", "")
# print(text)
texts = text.split("\n")
#-----------
for text in texts:
tmp = text.split(" ")
id = tmp[0]
uri = "https://w3id.org/kouigenjimonogatari/api/items/"+id+".json"
if len(tmp) != 2:
print("text", text)
continue
t = tmp[1]
t = t.replace("[", "<anchor corresp='https://genji.dl.itc.u-tokyo.ac.jp/data/tei/yosano/"+vol+".xml#").replace("]", "'/>")
ans = '<seg corresp="'+uri+'">'+t+'</seg>'
soup.find("seg", {"corresp" : uri}).replace_with(BeautifulSoup(ans,'xml'))
f = open("data/gd/"+vol+".xml", "w")
f.write(soup.prettify())
f.close()
if __name__ == '__main__':
main()
|
import urllib.request
# urllib.request.urlretrieve('http://www.hellobi.com',filename='1.html') #爬取网页存储
# urllib.request.urlcleanup() #清除缓存
# file=urllib.request.urlopen('http://www.hellobi.com',timeout=1) #实现对目标url的访问 timeout为超时,崩了为超时
# print(file.getcode()) #获取访问的状态码
# print(file.geturl()) #获取获取的URL
for i in range(10):
try:
file=urllib.request.urlopen('http://yum.iqianyue.com',timeout=1)
date=file.read()
print(len(data))
except Exception as e:
print(str(e))
|
from datetime import datetime
from constants import IDLE
import logging
from utils import clamp
from rules_manager import RulesManager
from history_manager import HistoryManager
from application_definition import ApplicationDefinition
class AutoScaler(object):
"""
The source of the scaling decision.
"""
def __init__(self, marathon_client, logger=None, dd_client=None, cli_args=None):
self.marathon_client = marathon_client
self.logger = logger or logging.getLogger(__name__)
self.dd_client = dd_client
self.enforce_version_match = False
self.hm = HistoryManager(dd_client=dd_client)
if cli_args is not None:
self.enforce_version_match = cli_args.enforce_version_match
def scale(self, app_def, rule_manager):
""" Take scale action
:param app_def: dict of marathon application settings
:param rule_manager: object of scaling properties.
:return: marathon response
"""
if not app_def.is_app_participating:
return
scale_factor = int(rule_manager.last_triggered_criteria.get("scale_factor"))
min_instances = int(rule_manager.min_instances)
max_instances = int(rule_manager.max_instances)
scale_to = app_def.instances + scale_factor
scale_to_size = clamp(scale_to, min_instances, max_instances)
if app_def.instances == scale_to_size:
msg = "{app_name}: application already scaled to {size}"
self.logger.info(msg.format(app_name=app_def.app_name,
size=scale_to_size))
return
self.marathon_client.scale_app(app_def.id, scale_to_size)
msg = "{app_name}: scaled to {size}"
self.logger.info(msg.format(app_name=app_def.app_name,
size=scale_to_size))
def decide(self, app_metrics_summary):
"""
The decision-maker of the autoscaler.
:param app_metrics_summary: dict of app definitions and metrics
:return: None
"""
self.logger.info("Decision process beginning.")
app_scale_recommendations = {}
for app, metrics_summary in app_metrics_summary.items():
app_def = ApplicationDefinition(metrics_summary.get("application_definition"))
rm = RulesManager(app_def)
if rm.is_app_participating():
vote = 0
scale_factor = 0
cpu = metrics_summary.get("cpu_avg_usage")
mem = metrics_summary.get("memory_avg_usage")
metrics = dict(cpu=cpu,
mem=mem)
rm.trigger_rules(metrics)
if rm.last_triggered_criteria:
scale_factor = int(rm.last_triggered_criteria.get("scale_factor"))
vote = 1 if scale_factor > 0 else -1
app_scale_recommendations[app] = dict(vote=vote,
checksum=app_def.version,
timestamp=datetime.now(),
rule=rm.last_triggered_rule)
info_msg = "{app_name}: vote: {vote} ; scale_factor requested: {scale_factor}"
self.logger.info(info_msg.format(app_name=app_def.app_name,
vote=vote,
scale_factor=scale_factor))
# Check if app is participating
# Check if app is ready
# Check if app instances is greater than or equal to min and less than max
if (rm.is_app_ready() and
rm.is_app_within_min_or_max() and
rm.last_triggered_criteria):
tolerance_reached = self.hm.tolerance_reached(app,
rm.last_triggered_criteria.get("tolerance"),
vote)
within_backoff = self.hm.within_backoff(app,
rm.last_triggered_criteria.get("backoff"),
vote)
if vote is not IDLE and tolerance_reached and not within_backoff:
self.logger.info("{app}: Decision made: Scale.".format(app=app_def.app_name))
app_scale_recommendations[app]["decision"] = vote
self.scale(app_def, rm)
elif vote == IDLE:
app_scale_recommendations[app]["decision"] = IDLE
self.logger.info("{app}: Decision made: No Change.".format(app=app_def.app_name))
self.hm.add_to_perf_tail(app_scale_recommendations)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 17:31:36 2020
@author: white
"""
import numpy as np
import scipy.stats as sts
import matplotlib.pyplot as plt
import scipy.constants as sc
import scipy.special as scp
import timeit
start = timeit.default_timer()
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot2grid((1,3), (0,0), colspan=2,rowspan=2)
ax2 = plt.subplot2grid((1,3), (0,2) )
ax1.set_axisbelow(True)
# Turn on the minor TICKS, which are required for the minor GRID
ax1.minorticks_on()
# Customize the major grid
ax1.grid(which='major', linestyle='-', linewidth='0.5', color='red')
# Customize the minor grid
ax1.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
E0 = 900
Beta = 0.55
RF = 5e6 # EOM's RF input
AOM = 335e6 # "AOM"'s detuning
'''
I M P O R T E D C S V O F C A V I T Y
'''
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
''' E O M & F r e q D u b t i m e '''
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
G = 38.11e6
wab = 2*np.pi*384.23e12 # Freq of transation
beta = np.linspace(0,4,80)
def EOM(RF, Beta): # [][1=Freq 0=E],AOM
' [0,1,2,3,4] = Center High1stSideBand Low1stSideBand High2ndSideBand... '
Ei0 = scp.jv(0,Beta)
Ei1 = scp.jv(1,Beta)
Ei2 = scp.jv(2,Beta)
Ei3 = scp.jv(3,Beta)
F0 = wab/2
Fp1, Fm1 = wab/2+RF, wab/2-RF
Fp2, Fm2 = wab/2+2*RF, wab/2-2*RF
Fp3, Fm3 = wab/2+3*RF, wab/2-3*RF
return [Ei0,F0],[Ei1,Fp1],[-Ei1,Fm1],[Ei2,Fp2],[Ei2,Fm2],[Ei3,Fp3],[-Ei3,Fm3]
# -- EOM[band][E,F] --
ax2.plot(beta, abs(EOM(RF, beta)[0][0]),linewidth=9,c='darkviolet')
ax2.plot(beta, abs(EOM(RF, beta)[2][0]),linewidth=7,c='mediumorchid')
ax2.plot(beta, abs(EOM(RF, beta)[4][0]),linewidth=5,c='violet')
ax2.plot(beta, abs(EOM(RF, beta)[6][0]),linewidth=3,c='indigo')
ax2.axvline(Beta,c='k', linestyle='dashed')
ax2.axhline(abs(EOM(RF, Beta)[0][0]),linewidth=8,c='darkviolet', linestyle='dotted')
ax2.axhline(abs(EOM(RF, Beta)[2][0]),linewidth=6,c='mediumorchid', linestyle='dotted')
ax2.axhline(abs(EOM(RF, Beta)[4][0]),linewidth=4,c='violet', linestyle='dotted')
ax2.axhline(abs(EOM(RF, Beta)[6][0]),linewidth=3,c='indigo', linestyle='dotted')
# Freq and Efield together
A0 = [EOM(RF, Beta)[0][1],EOM(RF, Beta)[0][0]]
A1p = [EOM(RF, Beta)[1][1],EOM(RF, Beta)[1][0]]
A1m = [EOM(RF, Beta)[2][1],EOM(RF, Beta)[2][0]]
A2p = [EOM(RF, Beta)[3][1],EOM(RF, Beta)[3][0]]
A2m = [EOM(RF, Beta)[4][1],EOM(RF, Beta)[4][0]]
A3p = [EOM(RF, Beta)[5][1],EOM(RF, Beta)[5][0]]
A3m = [EOM(RF, Beta)[6][1],EOM(RF, Beta)[6][0]]
# Adding and multiplying all permutations
# abcde = 1st thing 0,1p.. = 2nd thing
e0 = [A0[0]+A0[0], A0[1]*A0[1]]
e1p = [A0[0]+A1p[0], A0[1]*A1p[1]]
e1m = [A0[0]+A1m[0], A0[1]*A1m[1]]
e2p = [A0[0]+A2p[0], A0[1]*A2p[1]]
e2m = [A0[0]+A2m[0], A0[1]*A2m[1]]
e3p = [A0[0]+A3p[0], A0[1]*A3p[1]]
e3m = [A0[0]+A3m[0], A0[1]*A3m[1]]
EE = [e0,e1p,e1m,e2p,e2m,e3p,e3m]
a0 = [A1p[0]+A0[0], A1p[1]*A0[1]]
a1p = [A1p[0]+A1p[0], A1p[1]*A1p[1]]
a1m = [A1p[0]+A1m[0], A1p[1]*A1m[1]]
a2p = [A1p[0]+A2p[0], A1p[1]*A2p[1]]
a2m = [A1p[0]+A2m[0], A1p[1]*A2m[1]]
a3p = [A1p[0]+A3p[0], A1p[1]*A3p[1]]
a3m = [A1p[0]+A3m[0], A1p[1]*A3m[1]]
AA = [a0,a1p,a1m,a2p,a2m,a3p,a3m]
b0 = [A1m[0]+A0[0], A1m[1]*A0[1]]
b1p = [A1m[0]+A1p[0], A1m[1]*A1p[1]]
b1m = [A1m[0]+A1m[0], A1m[1]*A1m[1]]
b2p = [A1m[0]+A2p[0], A1m[1]*A2p[1]]
b2m = [A1m[0]+A2m[0], A1m[1]*A2m[1]]
b3p = [A1m[0]+A3p[0], A1m[1]*A3p[1]]
b3m = [A1m[0]+A3m[0], A1m[1]*A3m[1]]
BB = [b0,b1p,b1m,b2p,b2m,b3p,b3m]
c0 = [A2p[0]+A0[0], A2p[1]*A0[1]]
c1p = [A2p[0]+A1p[0], A2p[1]*A1p[1]]
c1m = [A2p[0]+A1m[0], A2p[1]*A1m[1]]
c2p = [A2p[0]+A2p[0], A2p[1]*A2p[1]]
c2m = [A2p[0]+A2m[0], A2p[1]*A2m[1]]
c3p = [A2p[0]+A3p[0], A2p[1]*A3p[1]]
c3m = [A2p[0]+A3m[0], A2p[1]*A3m[1]]
CC = [c0,c1p,c1m,c2p,c2m,c3p,c3m]
d0 = [A2m[0]+A0[0], A2m[1]*A0[1]]
d1p = [A2m[0]+A1p[0], A2m[1]*A1p[1]]
d1m = [A2m[0]+A1m[0], A2m[1]*A1m[1]]
d2p = [A2m[0]+A2p[0], A2m[1]*A2p[1]]
d2m = [A2m[0]+A2m[0], A2m[1]*A2m[1]]
d3p = [A2m[0]+A3p[0], A2m[1]*A3p[1]]
d3m = [A2m[0]+A3m[0], A2m[1]*A3m[1]]
DD = [d0,d1p,d1m,d2p,d2m,d3p,d3m]
f0 = [A3m[0]+A0[0], A3p[1]*A0[1]]
f1p = [A3m[0]+A1p[0], A3p[1]*A1p[1]]
f1m = [A3m[0]+A1m[0], A3p[1]*A1m[1]]
f2p = [A3m[0]+A2p[0], A3p[1]*A2p[1]]
f2m = [A3m[0]+A2m[0], A3p[1]*A2m[1]]
f3p = [A3m[0]+A3p[0], A3p[1]*A3p[1]]
f3m = [A3m[0]+A3m[0], A3p[1]*A3m[1]]
FF = [f0,f1p,f1m,f2p,f2m,f3p,f3m]
g0 = [A3m[0]+A0[0], A3m[1]*A0[1]]
g1p = [A3m[0]+A1p[0], A3m[1]*A1p[1]]
g1m = [A3m[0]+A1m[0], A3m[1]*A1m[1]]
g2p = [A3m[0]+A2p[0], A3m[1]*A2p[1]]
g2m = [A3m[0]+A2m[0], A3m[1]*A2m[1]]
g3p = [A3m[0]+A3p[0], A3m[1]*A3p[1]]
g3m = [A3m[0]+A3m[0], A3m[1]*A3m[1]]
GG = [g0,g1p,g1m,g2p,g2m,g3p,g3m]
for j in range(7):
ax1.scatter(EE[j][0]-AOM ,EE[j][1],s=100,alpha=0.3)
ax1.scatter(AA[j][0]-AOM ,AA[j][1],s=100,alpha=0.3)
ax1.scatter(BB[j][0]-AOM ,BB[j][1],s=100,alpha=0.3)
ax1.scatter(CC[j][0]-AOM ,CC[j][1],s=100,alpha=0.3)
ax1.scatter(DD[j][0]-AOM ,DD[j][1],s=100,alpha=0.3)
ax1.scatter(FF[j][0]-AOM ,FF[j][1],s=100,alpha=0.3)
ax1.scatter(GG[j][0]-AOM ,GG[j][1],s=100,alpha=0.3)
BBig = np.array([a0,a1p,a1m,a2p,a2m,b0,b1p,b1m,b2p,b2m,c0,c1p,c1m,c2p,c2m,d0,d1p,d1m,d2p,d2m,e0,e1p,e1m,e2p,e2m,f0,f1p,f1m,f2p,f2m,f3p,f3m,g0,g1p,g1m,g2p,g2m,g3p,g3m])
Flist = BBig[:,0]
XX = [] # Index of frequency values
for l in range(len(BBig)):
'''test for dups - get index symm - choose 1st'''
x = np.where(BBig[:,0] == Flist[l])[0]
xx = x[0]
XX.append(x[0])
XX_0 = list(dict.fromkeys(XX)) # Dup Prepended list
XX_ = np.sort(XX_0)
Fplot_ = []
for i in range(len(XX_)):
'''Takes the 1st index & gives freq'''
Fplot_.append(Flist[XX_[i]])
Fplot = np.subtract(Fplot_,AOM)
print('XXXXXXX',XX_)
Jsum = []
X0 = []
X0_ = []
for I in range(len(Flist)):
x0 = np.where(BBig[:,0] == Flist[I])[0]
X0.append(x0)
u =[]
for i in range(len(X0)):
U=[]
u = (X0[i])
for j in range(len(X0[i])):
U.append(u[j])
X0_.append(U)
X0_U = np.unique(X0_) # Index Prepended list
Yy=[]
for j in range(len(Fplot)):
for l in range(len(X0_U[j])):
y = BBig[X0_U[j][l]][1]
Yy.append([y])
Y = np.sum(Yy)
Jsum.append(Y)
Yy=[]
ax1.scatter(Fplot,np.square(Jsum),s=1500,c='y',alpha=0.4)
ax1.scatter(Fplot,Jsum,s=200,c='G',alpha=0.3)
ax1.set_xlabel('MHz',size=10)
ax1.set_ylabel('Electric Field / E0',size=15)
ax2.set_xlabel(r'$ \beta $',size=22)
ax2.set_ylabel('Bessel Value',size=13)
ax1.grid(which='major')
#ax1.plot(Fplot,np.square(Jsum),c='y')
for i in range(len(Fplot)):
ax1.axvline(Fplot[i],alpha=0.3)
#ax1.axvline(wab, c='pink',linewidth=9)
#ax1.set_xlim(left=min(Fplot)-2*RF, right=wab+RF*2)
ax1.set_xlim(left=2*RF+min(Fplot)-5*RF, right=2*RF+min(Fplot)+RF*12)
####################################################
import scipy.constants as sc
v = np.linspace(0,1000,50)
Xi = 25
E0 = 130
""" Physical & Atomic Constants """
kb=sc.Boltzmann
mu0 = sc.mu_0
muB = 9.2740099*10**-24
u=sc.proton_mass
hbar=sc.hbar
c=sc.c
pi=np.pi
e=sc.e
M=87*u
wab=2*pi*384.23e12
G=38.11e6
Z =337
dip= 3.485e-29
##d = G*Xi
''' Variable Orgy '''
Rabiii = dip*max(Jsum)/hbar
IoIs = 2*Rabiii**2/G**2
IrE = c*8.85e-12/2*E0**2/10000 # This is W/cm^2
d = np.subtract(wab, Fplot)
w = wab - d
i=0
zs=[]
vs=[]
ts=[]
a=0
L = len(Fplot)
#print(mu0)
#print()
#Ir=power/(a**2*pi)
v = np.linspace(0,600,100)
def dv(v,F,D):
Lambda=2*pi*c/F
k = 2*pi/Lambda
'Incremental Acceleration'
O = F/(2*pi*c)
c1 =1+IoIs+4*D**2/G**2
c2 = O*8/G**2*D
c3 = 4*O**2/G**2
rhoaa = -IoIs**2/(c1+c2*v+c3*v**2) + IoIs**2/(c1-c2*v+c3*v**2)
return rhoaa*hbar*k*G/M
for i in range(L):
ax1.plot(v, dv(v,Fplot[i],d[i]))
S = []
for i in range(L):
S.append(dv(v,Fplot[i],d[i]))
S = np.reshape(S,(len(Fplot),len(v)))
#print(S)
Ss = []
for i in range(L):
Sim = np.argmax(S[i])
Ss = np.append(Ss,Sim)
Vv = []
for i in range(L):
Vv.append(v[int(Ss[i])])
for i in range(L):
ax1.axvline(Vv[i])
ax1.grid()
plt.title('Force on Rb Atom v Velocity for different detunings', fontsize=12)
plt.ylabel('∝ Force', fontsize=12)
plt.xlabel('Velocity m/s', fontsize=12)
ax1.axhline(0,c='k')
print(Fplot)
plt.show()
|
# Python script that builds timeseries prophet models (72) for each merchant user
# and predicts the total number of product sales. Tests results for previous month
# and predicts for future month.
# Script packages results in a csv file and sends it via email.
import warnings
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from fbprophet import Prophet
from psycopg2 import connect
import copy
from snap_con import get_db_connection
import matplotlib.pyplot as plt
import smtplib
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
warnings.filterwarnings('ignore')
pd.options.mode.chained_assignment = None
pd.set_option('display.float_format', lambda x: '%.3f' % x)
def query_data(db, query):
con = connect(get_db_connection(db))
result = pd.read_sql(query, con)
# print(f"Data has {result.shape[0]} rows & {result.shape[1]} columns.")
return result
# Completed end date: where to truncate data (so we don't have incomplete months)
def get_data(compl_end_dt):
# moving_window_query = """
# SELECT ca.ae_user,DATE_TRUNC('months',complete_ts::DATE)::DATE AS month,COUNT(*) AS origs
# FROM snapanalytics.curated_applications ca
# LEFT JOIN snapanalytics.merchant m ON m.merchant_id = ca.merchant_id
# WHERE complete_ts < '{}'
# AND complete_ts::DATE > (m.application_signed_dt::DATE + INTERVAL '90 Days')
# AND merchant_status != 'INACTIVE'
# GROUP BY ca.ae_user,month
# order by ae_user, month
# """.format(compl_end_dt)
fixed_window_query = """
SELECT ca.ae_user,DATE_TRUNC('months',complete_ts::DATE)::DATE AS month,COUNT(*) AS origs
FROM snapanalytics.curated_applications ca
LEFT JOIN snapanalytics.merchant m ON m.merchant_id = ca.merchant_id
WHERE complete_ts < '{}'
AND complete_ts::DATE >= (DATE_TRUNC('months',m.application_signed_dt::DATE + INTERVAL '90 Days')::DATE + INTERVAL '1 month')
AND merchant_status != 'INACTIVE'
GROUP BY ca.ae_user,month
""".format(compl_end_dt)
curated_apps = pd.DataFrame(query_data('aws', fixed_window_query))
curated_apps['month'] = pd.to_datetime(curated_apps['month'], utc=True).values.astype('datetime64[M]')
curated_apps.rename(axis=1, mapper={'month': 'ds', 'origs': 'y'}, inplace=True)
curated_apps.sort_values(by=['ae_user', 'ds'], axis=0, inplace=True)
return curated_apps
def months_between(d1, d2):
# Inaccurate as distance gets larger between months
d1 = datetime.strptime(d1, "%Y-%m-%d")
d2 = datetime.strptime(d2, "%Y-%m-%d")
return int(np.floor((abs((d2 - d1).days)+2)/30))
def firstDay(curr_dt=datetime.now()):
return str(curr_dt.year) + '-' + str(curr_dt.month) + '-' + '01'
def reformatDateString(dt, ft):
return datetime.strftime(datetime.strptime(dt, '%Y-%m-%d'), ft)
def holidays():
# Tax Season (Feb) and Holiday Season (Dec)
holidays = pd.DataFrame({'holiday': 'Holiday_Season'
, 'ds': pd.period_range('2012-12-01', '2050-12-01', freq='M')
, 'lower_window': 0
, 'upper_window': 0
})
holidays['month'] = getattr(holidays['ds'].dt, 'month')
holidays = holidays[(holidays['month'] == 12) | (holidays['month'] == 2)]
holidays[holidays['month'] == 2] = holidays[holidays['month'] == 2].replace({'Holiday_Season': 'Tax Season'})
holidays.drop(['month'], axis=1, inplace=True)
holidays['ds'] = holidays['ds'].values.astype('datetime64[M]')
return holidays
# Returns data(from df) for a given name
def get_name_data(df, name):
return df[df['ae_user'] == name]
# Returns valid names for prediction and a separate list for names that failed the 1st and 2nd conditions
def get_names(df, test_dt, min_datapoints=10, max_month_before_test=0):
names = np.unique(df['ae_user'])
dat_inds = np.array([get_name_data(df, x).shape[0] for x in names]) >= min_datapoints
mon_inds = np.array([months_between(get_name_data(df, x).iloc[-1, np.where(df.columns == 'ds')[0][0]].
strftime('%Y-%m-%d'), test_dt) for x in names]) <= max_month_before_test
return names[dat_inds * mon_inds], [names[~dat_inds], names[~mon_inds]]
# Computes a single forecast from a given df and model
def compute_forecast(model, df, test_dt='2019-07-01', trn_beg_dt='2010-01-01',
ret_metrics=False, verbose=False, plotit=False):
# Training Data
collections_data = df[(df['ds'] < test_dt) & (df['ds'] >= trn_beg_dt)]
collections_data = collections_data[collections_data['y'] > 0]
trn_end_dt = collections_data['ds'].iloc[-1]
# Model Fitting / Months at a time
model.fit(collections_data)
future = model.make_future_dataframe(periods=months_between(test_dt, trn_end_dt.strftime('%Y-%m-%d')), freq='M')
# Forecasting
forecast = model.predict(future)
predictions = forecast[['ds', 'yhat']]
predictions = predictions[predictions['ds'] > trn_end_dt]
# print(predictions)
pred_val = predictions['yhat'].values[-1]
pred_val = round(pred_val * (pred_val > 0) + (pred_val <= 0), 0) # Positive predictions & round to integer count
test_set = df[df['ds'] == test_dt]
test_exists = len(test_set) != 0
if verbose:
if test_exists:
print(f"\nError: ${pred_val - test_set['y'].values[0]}")
print(f"Predicted: ${round(predictions['yhat'].values[0],1)}")
print(f"Actual: ${test_set['y'].values[0]}")
else:
print('\n No Testing data available')
# Visual of model results
if plotit:
model.plot(forecast)
if test_exists:
plt.scatter(datetime.strptime(test_dt, "%Y-%m-%d"), test_set['y'].values[0], color='r')
# model.plot_components(forecast)
# If you want to return the prediction accuracy metrics
if ret_metrics:
test_set = df[df['ds'] == test_dt]
if test_exists:
metric_df = pd.Series([test_set['y'].values[0],
round(pred_val, 1),
pred_val - test_set['y'].values[0]],
index=['y', 'yhat', 'Error'])
else:
metric_df = pd.Series([float('nan'),
round(pred_val, 1),
float('nan')],
index=['y', 'yhat', 'Error'])
return metric_df
return pred_val
# df is a dataframe containing all the data (test+train). Make sure it is sorted by user, then ascending date order
# prediction_dt is the date you wish to predict for (this will become the "test set")
# test_metrics determines if you want to generate testing set metrics. Leave false for predictions only!
def forecast_all_users(df, prediction_dt, test_metrics=False, print_loading=False):
names = get_names(df, prediction_dt, max_month_before_test=1-int(test_metrics))[0]
preds = pd.DataFrame(np.zeros([len(names), 1 + test_metrics*2]))
h = holidays()
# The Model---------------------------------------------------
model = Prophet(seasonality_mode='multiplicative'
, holidays=h
, daily_seasonality=False, weekly_seasonality=False
,seasonality_prior_scale=10
,yearly_seasonality=5
,holidays_prior_scale=1
,changepoint_prior_scale=.15
,n_changepoints=10
,changepoint_range=.85)
# model_full.add_seasonality(name='quarterly', period=91, fourier_order=8)
# -------------------------------------------------------------
for i in range(len(names)):
dat = get_name_data(df, names[i])
cf = compute_forecast(copy.deepcopy(model), df=dat, test_dt=prediction_dt, ret_metrics=test_metrics)
if test_metrics:
preds.iloc[i] = cf.values
else:
preds.iloc[i] = cf
if print_loading and ((i+1) % 10 == 0):
print("Finished {} out of {}\n".format(i + 1, len(names)))
results = pd.concat([pd.Series(names).reset_index(drop=True), pd.DataFrame(preds)], ignore_index=True, axis=1)
if test_metrics:
results.columns = ['Name', 'Actual', 'Predicted', 'Error']
results['Percent Error'] = results['Error']/results['Actual']
# print('\nMedian Absolute Percent Error: {} %'.
#format(100*round(np.nanmedian(abs(results[['Percent Error']])),4)))
# print('Median Absolute Error: {}'.format(np.nanmedian(abs(results[['Error']]))))
# print('Mean Absolute Error: {}'.format(round(np.nanmean(abs(results[['Error']])),3)))
else:
results.columns = ['Name', 'Future Prediction']
return results
if __name__ == "__main__":
prediction_date = firstDay()
testing_date = firstDay(datetime.strptime(prediction_date, "%Y-%m-%d") - timedelta(days=1))
dat = get_data(prediction_date)
names_ignored = np.unique(np.concatenate(get_names(dat, testing_date)[1])) # Names not predicted
test_results = forecast_all_users(dat, prediction_dt=testing_date, test_metrics=True, print_loading=True)
pred_results = forecast_all_users(dat, prediction_dt=prediction_date, test_metrics=False)
final_results = pd.merge(test_results, pred_results, on='Name', how='outer')
filename = 'csm_fixed_predictions_{}.csv'.format(reformatDateString(testing_date, '%Y_%m'))
str_csv = final_results.to_csv(filename, index=False) # CSV file
# Email csv file with metrics
prediction_month_name = reformatDateString(prediction_date, '%b %Y')
testing_month_name = reformatDateString(testing_date, '%b %Y')
MAE = round(np.nanmean(abs(final_results[['Error']])), 2)
MedAE = round(np.nanmedian(abs(final_results[['Error']])),2)
MedAPE = 100 * round(np.nanmedian(abs(final_results[['Percent Error']])), 4)
msg = MIMEMultipart()
msg['From'] = 'thisisatestingemail497@gmail.com'
msg['To'] = 'TO_EMIAL_ADDRESS'
msg['Subject'] = 'CSM predictions {}'.format(testing_month_name)
message = MIMEText(
"""CSM accuracy testing for {} and future predictions for {}
Metrics
Mean Absolute Error: {}
Median Absolute Error: {}
Median Absolute % Error: {}%
""".format(testing_month_name, prediction_month_name, MAE, MedAE, MedAPE))
msg.attach(message)
part = MIMEApplication(str_csv, Name=basename(filename))
# part['Content-Disposition'] = 'attachment; filename="%s"' % basename(filename)
msg.attach(part)
gmail_sender = 'thisisatestingemail497@gmail.com'
gmail_passwd = 'INSERT PASSCODE HERE'
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login(gmail_sender, gmail_passwd)
server.send_message(msg)
server.quit()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: step01_check_valid_country_list
# @Date: 2020/3/6
# @Author: Mark Wang
# @Email: wangyouan@gamil.com
"""
python -m SortData.ConstructVariable.step01_check_valid_country_list
"""
import os
import pandas as pd
from pandas import DataFrame
from Constants import Constants as const
if __name__ == '__main__':
ctat_df: DataFrame = pd.read_csv(os.path.join(const.DATABASE_PATH, 'Compustat',
'198706_202003_global_compustat_all_firms.zip')).sort_values(
by='datadate', ascending=True).rename(columns={'fyear': const.YEAR}).drop_duplicates(
subset=[const.GVKEY, const.YEAR], keep='last')
country_list: DataFrame = ctat_df.loc[:, ['loc', const.YEAR]].drop_duplicates()
country_year_min: DataFrame = country_list.groupby('loc')[const.YEAR].min().rename(
columns={const.YEAR: 'start_year'})
country_year_max: DataFrame = country_list.groupby('loc')[const.YEAR].max().rename(
columns={const.YEAR: 'end_year'})
country_year_data: DataFrame = country_year_min.merge(country_year_max, left_index=True, right_index=True)
country_year_data.to_csv(os.path.join(const.RESULT_PATH, '20200306_ctat_global_country_list.csv'))
|
NIinf=['One','Two','Three','Four','Five','Six','Seven','Eight','Nine','Ten']
PAas=int(input())
if 1<=PAas<=10:
print(NIinf[PAas-1])
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm, neural_network, naive_bayes
from sklearn.linear_model import Perceptron
Attributes = pd.read_csv("hm_hospitales_covid_structured_30d_train.csv", na_values=0, na_filter=True)
Outcomes = pd.read_csv("split_train_export_30d.csv")
Output_format = {'PATIENT ID': Attributes['PATIENT ID'], 'hospital_outcome': np.zeros(1834, dtype=int)}
Output = pd.DataFrame(Output_format)
X = Attributes.drop(labels=['PATIENT ID', 'admission_datetime', 'ed_diagnosis'], axis='columns')
X.loc[X['sex'] == 'FEMALE', 'sex'] = 0
X.loc[X['sex'] == 'MALE', 'sex'] = 1
X = X[['age','sex', 'pmhx_diabetes', 'pmhx_hld', 'pmhx_htn', 'pmhx_ihd'
, 'pmhx_copd', 'pmhx_activecancer', 'pmhx_chronicliver', 'pmhx_stroke', 'pmhx_chf', 'pmhx_dementia', 'lab_ddimer'
, 'lab_crp', 'lab_lymphocyte_percentage', 'lab_urea', 'lab_lymphocyte', 'lab_neutrophil_percentage']]
X = X.fillna(X.median())
X = X.to_numpy()
Y = Outcomes.drop(labels='PATIENT ID', axis='columns')
Y = Y.to_numpy()
Y = Y.ravel()
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
clf = Perceptron(penalty='l2', tol=0.0001, n_iter_no_change=50)
Y_pred = clf.fit(X_train, Y_train).predict(X_test)
TN, FN, TP, FP = 0, 0, 0, 0
for i in range(len(Y_test)):
if Y_test[i] == 0 and Y_pred[i] == 0:
TN += 1
if Y_test[i] == 1 and Y_pred[i] == 0:
FN += 1
if Y_test[i] == 0 and Y_pred[i] == 1:
FP += 1
if Y_test[i] == 1 and Y_pred[i] == 1:
TP += 1
print("TN:", TN, ", FN:", FN, ", TP:", TP, ", FP:", FP)
precision, recall = (TP/(FP+TP)), (TP/(FN+TP))
print("precision:", precision, ", recall:", recall)
print('F1:', 2 * ((precision*recall)/(precision+recall)))
|
import socket
import time
import threading
from queue import Queue
NUMBER_OF_THREADS = 2
JOB_NUMBER = [1, 2]
queue = Queue()
all_connections = []
all_addresses = []
# Create Socket (allow to computer communicate)
def socket_create():
try:
global host
global port
global s
host = ''
port = 9999
s = socket.socket()
except socket.error as msg:
print("Socket creation error : " + str(msg))
# Binding socket with port and wait for connection
def socket_bind():
try:
global host
global port
global s
#print("Binding Socket with port : " + str(port))
s.bind((host, port))
s.listen(5)
except socket.error as msg:
print("Socket Binding error : " + str(msg) + "\n" + "Retrying....")
time.sleep(5)
socket_bind()
# Accept connection from multiple client and save into List
def accept_conections():
for c in all_connections:
c.close()
del all_connections[:]
del all_addresses[:]
while 1:
try:
conn, address = s.accept()
conn.setblocking(1)
all_connections.append(conn)
all_addresses.append(address)
print("\nConnection has been established " + address[0])
except:
print("Errors accepting connections ")
# Interactive Prompt to send commands remotely
def start_thirdeye():
while True:
cmd = input('ThirdEye> ')
if cmd == 'list':
list_connections()
elif 'select' in cmd:
conn = get_target(cmd)
if conn is not None:
send_target_commands(conn)
else:
print("Command not recognized !! Try again ")
# Display current connections
def list_connections():
results = ' '
for i, conn in enumerate(all_connections):
try:
conn.send(str.encode(' '))
conn.recv(20480)
except:
del all_connections[i]
del all_addresses[i]
continue
results += str(i) + ' ' + str(all_addresses[i][0]) + ' ' + str(all_addresses[i][1]) + '\n'
print('--------Clients--------' + '\n' + results)
# select the target
def get_target(cmd):
try:
target = cmd.replace('select ', '')
target = int(target)
conn = all_connections[target]
print("You are now connecting to " + str(all_addresses[target][0]))
print(str(all_addresses[target][0]) + '> ', end="")
return conn
except:
print("Not Valid selection")
return None
# Sending commands to selected Client
def send_target_commands(conn):
while True:
try:
cmd = input()
if len(str.encode(cmd)) > 0:
print('entered in if')
conn.send(str.encode(cmd))
client_response = str(conn.recv(20480), "utf-8")
print(client_response, end="")
if cmd == 'quit':
break
except:
print('Connection was lost ')
break
# Create Worker Thread
def create_worker():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
# Do the next Job in Queue (1 to handle connection 2 to send commands)
def work():
while True:
x = queue.get()
if x == 1:
socket_create()
socket_bind()
accept_conections()
if x == 2:
start_thirdeye()
queue.task_done()
# Each list item is a new JOB
def create_jobs():
for x in JOB_NUMBER:
queue.put(x)
queue.join()
create_worker()
create_jobs()
|
person = {
"name": "Tung",
"age" : 21
}
print(person)
print(person == {'name'})
if(person == {'name'}):
print('key ‘nationality’ does not exist in my dictionary')
else:
print('key ‘name’ exists in my dictionary')
|
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='move-my-files',
version='0.2.4',
description='CLI tools to organize files on your computer',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/jan25/move-my-files',
author='Abhilash Gnan',
author_email='abhilashgnan@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
packages=find_packages(where='.', exclude=['tests']),
package_dir={'': '.'},
python_requires='>=3.5, <4',
install_requires=[
'watchdog',
'Click',
'pyyaml',
],
extras_require={
'dev': [
'pytest',
'tox',
]
},
entry_points={
'console_scripts': [
'mmf=app.cli:mmf',
],
},
)
|
import math
r = {1 : "radius R", 2 : "diametr D", 3 : "length L", 4 : "are circle S"}
c = []
i =int(input("i= "))
print(i)
N = float(input(""))
print(r[i],":",N)
if i == 1:
R = N
c.append(R)
c.append(2 * R)
c.append(2 * math.pi * R)
c.append(math.pi * R**2)
elif i == 2:
D = N
R = D / 2
c.append(R)
c.append(D)
c.append(math.pi * D)
c.append(math.pi * R**2)
elif i == 3:
L = N
R = L / 2 / math.pi
c.append(R)
c.append(2 * R)
c.append(L)
c.append(math.pi * R**2)
elif i == 4:
S = N
R = math.sqrt(S / math.pi)
c.append(R)
c.append(2 * R)
c.append(2 * math.pi * R)
c.append(S)
print()
print("Elements of a circle:")
for i in range(0,4):
print(r[i+1],":",c[i])
|
from model import ENModel, val_transform
from data import UnlabeledImagesDataset, ImageCsvDataset
import torch
import torch.utils.data
import torch.nn.functional as F
import csv
import re
from pathlib import Path
import os.path
from pytorch_lightning.metrics.classification import ConfusionMatrix
from argparse import ArgumentParser
argp = ArgumentParser(
description='Uses a cross-validation ensemble to analyze images.'
)
argp.add_argument(
'-c', '--cv_dir', type=str, required=True,
help='The path to a cross-validation output directory.'
)
argp.add_argument(
'-i', '--images', type=str, required=True,
help='The path to a collection of images.'
)
argp.add_argument(
'-l', '--labels_csv', type=str, required=False, default='',
help='The path of a labels CSV file. If not provided, no accuracy or '
'other metrics will be calculated.'
)
argp.add_argument(
'-x', '--fnames_col', type=str, required=False, default='',
help='The column in the CSV file containing the image file names.'
)
argp.add_argument(
'-y', '--labels_col', type=str, required=False, default='',
help='The column in the CSV file containing the image labels.'
)
argp.add_argument(
'-b', '--batch_size', type=int, required=False, default=8,
help='The batch size.'
)
argp.add_argument(
'-o', '--output', type=str, required=False, default='',
help='The path of an output CSV file.'
)
args = argp.parse_args()
if args.labels_csv != '' and (args.fnames_col == '' or args.labels_col == ''):
exit(
'\nError: If a labels CSV file is provided, file and label column '
'names must also be provided.\n'
)
if args.labels_csv == '':
imgs_ds = UnlabeledImagesDataset(args.images, transform=val_transform)
else:
imgs_ds = ImageCsvDataset(
args.labels_csv, args.images, args.fnames_col, args.labels_col,
transform=val_transform
)
dl = torch.utils.data.DataLoader(
imgs_ds, batch_size=args.batch_size, shuffle=False, num_workers=12
)
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
# Get the cross-validation model checkpoints.
ckpts = []
cv_dir = Path(args.cv_dir)
for fold_dir in cv_dir.glob('fold_*'):
if fold_dir.is_dir():
best_epoch = -1
best_ckpt = ''
for ckpt in fold_dir.glob('*.ckpt'):
m = re.search(r'epoch=([0-9]+)', str(ckpt))
if m is not None:
epoch = int(m.group(1))
if epoch > best_epoch:
best_epoch = epoch
best_ckpt = str(ckpt)
ckpts.append(best_ckpt)
if args.output != '':
writer = csv.DictWriter(
open(args.output, 'w'),
['file', 'prediction', '0', '1']
)
writer.writeheader()
else:
writer = None
rowout = {}
models = []
with torch.no_grad():
for i, ckpt in enumerate(ckpts):
print(f'Loading best model from fold {i}...')
model = ENModel.load_from_checkpoint(ckpt, lr=0.001, n_classes=2)
model.to(device)
model.eval()
models.append(model)
c_mat = ConfusionMatrix(num_classes=2)
img_cnt = 0
correct_cnt = 0
for batch, labels in dl:
# Get the predictions for the batch from each model.
outputs = []
for model in models:
output = model(batch.to(device)).cpu()
output = F.softmax(output, 1)
#print(output)
outputs.append(output)
# For each image in the batch, average the model predictions.
p_labels = torch.zeros_like(labels)
for i in range(len(labels)):
# Gather the predictions for this image.
img_preds = []
for j in range(len(models)):
img_preds.append(outputs[j][i,:])
# Calculate the ensemble prediction for this image.
img_preds = torch.stack(img_preds)
#print(img_preds)
model_avg = torch.mean(img_preds, 0)
#print(model_avg)
p_label = torch.max(model_avg, 0).indices
p_labels[i] = p_label
#print(label, int(p_label), imgfile)
#print(p_labels, all_images.samples[i])
#print(p_labels)
# Make adjustments to the ground truth labels for unclassifiable and
# mixed images.
adj_labels = torch.zeros_like(labels)
for i, label in enumerate(labels):
if imgs_ds.classes[label] == 'U':
# An unclassifiable image; the adjustment should be the
# opposite of the prediction (i.e., it is always wrong).
adj_labels[i] = abs(p_labels[i] - 1)
elif imgs_ds.classes[label] == 'B':
# An image containing both color morphs.
adj_labels[i] = p_labels[i]
else:
adj_labels[i] = labels[i]
if writer is not None:
rowout['file'] = os.path.basename(imgfile)
rowout['prediction'] = int(p_label)
rowout['0'] = float(model_avg[0])
rowout['1'] = float(model_avg[1])
writer.writerow(rowout)
c_mat.update(p_labels, adj_labels)
img_cnt += len(adj_labels)
correct_cnt += sum(p_labels == adj_labels)
print(correct_cnt / img_cnt, img_cnt)
print('\nTotal images:', img_cnt)
print('Accuracy:', float(correct_cnt / img_cnt))
print('Confusion matrix:')
print(c_mat.compute().numpy())
|
from distutils.core import setup
import py2exe
setup(console=['test.py'],
options = {"py2exe": {'includes':'decimal'}})
|
import tornado.web
import config
from views import index
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/', index.IndexHandler),
]
super(Application,self).__init__(handlers, **config.settings)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
l = self.maxDepth(root.left)
r = self.maxDepth(root.right)
return max(l, r)+1
if __name__ == '__main__':
# import doctest
# doctest.testmod()
a = TreeNode(5)
print(a.maxDepth(None))
|
from itertools import izip_longest
def transpose_two_strings(arr):
output = '{} {}'.format
return '\n'.join(output(*a) for a in izip_longest(*arr, fillvalue=' '))
|
from flask_admin.contrib.sqla import ModelView
from wtforms import TextAreaField
from wtforms.widgets import TextArea
class CKTextAreaWidget(TextArea):
def __call__(self, field, **kwargs):
if kwargs.get('class'):
kwargs['class'] += " ckeditor"
else:
kwargs.setdefault('class', 'ckeditor')
return super(CKTextAreaWidget, self).__call__(field, **kwargs)
class CKTextAreaField(TextAreaField):
widget = CKTextAreaWidget()
class CKEditorModelView(ModelView):
column_list = ['id', 'value']
can_delete = False
can_create = False
form_overrides = dict(value=CKTextAreaField)
create_template = 'text_model_view.html'
edit_template = 'text_model_view.html'
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from utils.comparisons import allclose
from utils.generators import mk_0to1_array
import cunumeric as cn
DTYPES = [np.float32, np.complex64]
def _vdot(a_dtype, b_dtype, lib):
return lib.vdot(
mk_0to1_array(lib, (5,), dtype=a_dtype),
mk_0to1_array(lib, (5,), dtype=b_dtype),
)
@pytest.mark.parametrize("a_dtype", DTYPES)
@pytest.mark.parametrize("b_dtype", DTYPES)
def test(a_dtype, b_dtype):
assert allclose(_vdot(a_dtype, b_dtype, np), _vdot(a_dtype, b_dtype, cn))
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
from flask import request, session, make_response, jsonify
from flask_restx import fields, Resource, Api, Namespace
from flask_cors import CORS, cross_origin
from werkzeug import FileStorage
from werkzeug.datastructures import ImmutableMultiDict
from pytezos import Contract, Key
from pytezos import pytezos
from pytezos.operation.result import OperationResult
from ast import literal_eval
from controllers.validate import Validate
#import redis
import requests
import urllib
import json
import os
import uuid
pytezos = pytezos
api = Namespace('keys', description='generate keys, activate, reveal')
upload_parser = api.parser()
upload_parser.add_argument('file', location='files',
type=FileStorage, required=True)
upload_parser.add_argument('network', choices=('mainnet', 'carthagenet'))
# POST key configuration from faucet wallet
@api.route('/faucet')
@api.expect(upload_parser)
class faucet(Resource):
@api.expect(type)
def post(self):
try:
args = upload_parser.parse_args()
uploaded_faucet = json.loads(args['file'].read())
session['auth'] = 'faucet'
session['faucet'] = uploaded_faucet
session['network'] = args['network']
v = Validate()
p = v.read_session(session)
return p.key.public_key_hash()
except:
return 500
# POST key configuration from mneumonic
@api.route('/post_mnemonic')
@api.doc(params = {
'mnemonic' : 'wallet mnemonic',
'password' : 'wallet password' ,
'email': 'wallet email',
'network' : 'mainnet / carthagenet'
})
class mnemonics(Resource):
def post(self):
try:
if (request.data.__len__() == 0):
session['auth'] = 'mnemonic'
session['mnemonic'] = request.args.get('mnemonic')
session['password'] = request.args.get('password')
session['email'] = request.args.get('email')
session['network'] = request.args.get('network')
else:
req = json.loads(request.data)
session['auth'] = 'mnemonic'
session['mnemonic'] = req['mnemonic']
session['password'] = req['password']
session['email'] = req['email']
session['network'] = req['network']
return session
except:
return 500
@api.route('/post_secret')
@api.doc(params = {
'secret' : 'wallet secret key',
'password' : 'wallet password',
'network' : 'mainnet / carthagenet'
})
class secret_key(Resource):
def post(self):
uid = str(uuid.uuid4())
#print(uid)
#print(request.get_json())
if (request.data.__len__() == 0):
req = request.args.to_dict(flat=True)
req['auth'] = 'secret'
req['id'] = uid
session['auth'] = 'secret'
session['secret'] = req['secret']
session['password'] = req['password']
session['network'] = req['network']
else:
req = json.loads(request.data)
req['id'] = uid
session['auth'] = 'secret'
session['secret'] = req['secret']
session['password'] = req['password']
session['network'] = req['network']
#print(req)
v = Validate()
p = v.read_session(session)
#session['id'] = uid
#r.set(uid, json.dumps(req))
return p.key.public_key_hash()
# POST password, return a faucet wallet
@api.route('/generate')
@api.doc(params={'password' : 'wallet password'})
class gen_keys(Resource):
def post(self):
if (request.data.__len__() == 0):
if (request.args.get('password', '') == None):
key = Key.generate()
else:
key = Key.generate(request.args.get('password', ''))
else:
password = json.loads(request.data).get("password")
if (password != "") :
key = Key.generate(password)
else:
key = Key.generate()
file_name = './{}.json'.format(key.public_key_hash())
with open(file_name) as json_file:
data = json.load(json_file)
os.remove(file_name)
#activate / reveal
data['secret_key'] = key.secret_key()
return data
@api.route('/test_session')
class test_session(Resource):
def get(self):
return session['secret']
|
import random
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
from src.evaluator import ModelBasedEstimator, DoublyRobustEstimator, IPSEvaluator
from src.main import simulation
from src.policy import RandomPolicy, DeterministicPolicy, CBVowpalWabbit
def create_plot(PolicyClass, policy_name):
real_means_ips = []
eval_means_ips = []
real_means_dr = []
eval_means_dr = []
real_means_mb = []
eval_means_mb = []
all_res = []
for i in tqdm(range(100)):
real_means_ips_once = []
eval_means_ips_once = []
real_means_dr_once = []
eval_means_dr_once = []
real_means_mb_once = []
eval_means_mb_once = []
for _ in range(100):
r_mean_ips, e_mean_ips = simulation(IPSEvaluator, PolicyClass)
real_means_ips_once.append(r_mean_ips)
eval_means_ips_once.append(e_mean_ips)
r_mean_dr, e_mean_dr = simulation(DoublyRobustEstimator, PolicyClass)
real_means_dr_once.append(r_mean_dr)
eval_means_dr_once.append(e_mean_dr)
r_mean_mb, e_mean_mb = simulation(ModelBasedEstimator, PolicyClass)
real_means_mb_once.append(r_mean_mb)
eval_means_mb_once.append(e_mean_mb)
all_res += real_means_ips_once + eval_means_ips_once + real_means_dr_once + eval_means_dr_once + real_means_mb_once + eval_means_mb_once
real_means_ips.append(np.array(real_means_ips_once).mean())
eval_means_ips.append(np.array(eval_means_ips_once).mean())
real_means_dr.append(np.array(real_means_dr_once).mean())
eval_means_dr.append(np.array(eval_means_dr_once).mean())
real_means_mb.append(np.array(real_means_mb_once).mean())
eval_means_mb.append(np.array(eval_means_mb_once).mean())
np.save('../results/25.05.2021/simulate_10_2.npy', all_res)
# fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(35, 20))
# fig.suptitle(policy_name, fontsize=35)
#
# ax1.set_title("Inverse Propensity Scoring")
# ax1.plot(real_means_ips, label="real average rewards")
# ax1.plot(eval_means_ips, label="estimated average rewards")
# ax1.legend()
#
# ax2.set_title("Doubly Robust")
# ax2.plot(real_means_dr, label="real average rewards")
# ax2.plot(eval_means_dr, label="estimated average rewards")
# ax2.legend()
#
# ax3.set_title("Model Based")
# ax3.plot(real_means_mb, label="real average rewards")
# ax3.plot(eval_means_mb, label="estimated average rewards")
# ax3.legend()
# plt.savefig("../results/" + '_'.join(policy_name.split()) + '_1000')
# plt.show()
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(15, 20))
fig.suptitle(policy_name, fontsize=25)
ax1.hist([real_means_ips, eval_means_ips], edgecolor="black", bins='rice',
label=["real average rewards", "estimated average rewards"])
ax1.set_title("Inverse Propensity Scoring")
ax1.legend()
ax2.hist([real_means_dr, eval_means_dr], edgecolor="black", bins='rice',
label=["real average rewards", "estimated average rewards"])
ax2.set_title("Doubly Robust")
ax2.legend()
ax3.hist([real_means_mb, eval_means_mb], edgecolor="black", bins='rice',
label=["real average rewards", "estimated average rewards"])
ax3.set_title("Model Based")
ax3.legend()
plt.savefig("../results/" + '_'.join(policy_name.split()) + '_hist_100_10_lot_of_context')
plt.show()
create_plot(CBVowpalWabbit, "CBVowpalWabbit")
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Body(object):
"""Implementation of the 'body' model.
TODO: type model description here.
Attributes:
mfrom (string): A valid Ytel Voice enabled number (E.164 format) that
will be initiating the phone call.
to (string): To number
url (string): URL requested once the call connects
method (string): Specifies the HTTP method used to request the
required URL once call connects.
status_call_back_url (string): URL that can be requested to receive
notification when call has ended. A set of default parameters will
be sent here once the call is finished.
status_call_back_method (string): Specifies the HTTP methodlinkclass
used to request StatusCallbackUrl.
fall_back_url (string): URL requested if the initial Url parameter
fails or encounters an error
fall_back_method (string): Specifies the HTTP method used to request
the required FallbackUrl once call connects.
heart_beat_url (string): URL that can be requested every 60 seconds
during the call to notify of elapsed tim
heart_beat_method (string): Specifies the HTTP method used to request
HeartbeatUrl.
timeout (int): Time (in seconds) Ytel should wait while the call is
ringing before canceling the call
play_dtmf (string): DTMF Digits to play to the call once it connects.
0-9, #, or *
hide_caller_id (bool): Specifies if the caller id will be hidden
record (bool): Specifies if the call should be recorded
record_call_back_url (string): Recording parameters will be sent here
upon completion
record_call_back_method (string): Method used to request the
RecordCallback URL.
transcribe (bool): Specifies if the call recording should be
transcribed
transcribe_call_back_url (string): Transcription parameters will be
sent here upon completion
if_machine (IfMachineEnum): How Ytel should handle the receiving
numbers voicemail machine
if_machine_url (string): URL requested when IfMachine=continue
if_machine_method (string): Method used to request the IfMachineUrl.
feedback (bool): Specify if survey should be enable or not
survey_id (string): The unique identifier for the survey.
"""
# Create a mapping from Model property names to API property names
_names = {
"mfrom":'From',
"to":'To',
"url":'Url',
"method":'Method',
"status_call_back_url":'StatusCallBackUrl',
"status_call_back_method":'StatusCallBackMethod',
"fall_back_url":'FallBackUrl',
"fall_back_method":'FallBackMethod',
"heart_beat_url":'HeartBeatUrl',
"heart_beat_method":'HeartBeatMethod',
"timeout":'Timeout',
"play_dtmf":'PlayDtmf',
"hide_caller_id":'HideCallerId',
"record":'Record',
"record_call_back_url":'RecordCallBackUrl',
"record_call_back_method":'RecordCallBackMethod',
"transcribe":'Transcribe',
"transcribe_call_back_url":'TranscribeCallBackUrl',
"if_machine":'IfMachine',
"if_machine_url":'IfMachineUrl',
"if_machine_method":'IfMachineMethod',
"feedback":'Feedback',
"survey_id":'SurveyId'
}
def __init__(self,
mfrom=None,
to=None,
url=None,
method=None,
status_call_back_url=None,
status_call_back_method=None,
fall_back_url=None,
fall_back_method=None,
heart_beat_url=None,
heart_beat_method=None,
timeout=None,
play_dtmf=None,
hide_caller_id=None,
record=None,
record_call_back_url=None,
record_call_back_method=None,
transcribe=None,
transcribe_call_back_url=None,
if_machine=None,
if_machine_url=None,
if_machine_method=None,
feedback=None,
survey_id=None):
"""Constructor for the Body class"""
# Initialize members of the class
self.mfrom = mfrom
self.to = to
self.url = url
self.method = method
self.status_call_back_url = status_call_back_url
self.status_call_back_method = status_call_back_method
self.fall_back_url = fall_back_url
self.fall_back_method = fall_back_method
self.heart_beat_url = heart_beat_url
self.heart_beat_method = heart_beat_method
self.timeout = timeout
self.play_dtmf = play_dtmf
self.hide_caller_id = hide_caller_id
self.record = record
self.record_call_back_url = record_call_back_url
self.record_call_back_method = record_call_back_method
self.transcribe = transcribe
self.transcribe_call_back_url = transcribe_call_back_url
self.if_machine = if_machine
self.if_machine_url = if_machine_url
self.if_machine_method = if_machine_method
self.feedback = feedback
self.survey_id = survey_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
mfrom = dictionary.get('From')
to = dictionary.get('To')
url = dictionary.get('Url')
method = dictionary.get('Method')
status_call_back_url = dictionary.get('StatusCallBackUrl')
status_call_back_method = dictionary.get('StatusCallBackMethod')
fall_back_url = dictionary.get('FallBackUrl')
fall_back_method = dictionary.get('FallBackMethod')
heart_beat_url = dictionary.get('HeartBeatUrl')
heart_beat_method = dictionary.get('HeartBeatMethod')
timeout = dictionary.get('Timeout')
play_dtmf = dictionary.get('PlayDtmf')
hide_caller_id = dictionary.get('HideCallerId')
record = dictionary.get('Record')
record_call_back_url = dictionary.get('RecordCallBackUrl')
record_call_back_method = dictionary.get('RecordCallBackMethod')
transcribe = dictionary.get('Transcribe')
transcribe_call_back_url = dictionary.get('TranscribeCallBackUrl')
if_machine = dictionary.get('IfMachine')
if_machine_url = dictionary.get('IfMachineUrl')
if_machine_method = dictionary.get('IfMachineMethod')
feedback = dictionary.get('Feedback')
survey_id = dictionary.get('SurveyId')
# Return an object of this model
return cls(mfrom,
to,
url,
method,
status_call_back_url,
status_call_back_method,
fall_back_url,
fall_back_method,
heart_beat_url,
heart_beat_method,
timeout,
play_dtmf,
hide_caller_id,
record,
record_call_back_url,
record_call_back_method,
transcribe,
transcribe_call_back_url,
if_machine,
if_machine_url,
if_machine_method,
feedback,
survey_id)
|
from django.shortcuts import render
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.shortcuts import redirect
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required
from allauth.account.forms import SignupForm
from django.utils.decorators import method_decorator
class IndexView(LoginRequiredMixin, TemplateView):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['is_not_author'] = not self.request.user.groups.filter(name='author').exists()
return context
class BasicSignupForm(SignupForm):
def save(self, request):
user = super(BasicSignupForm, self).save(request)
basic_group = Group.objects.get(name='common')
basic_group.user_set.add(user)
return user
@login_required
def upgrade_me(request):
user = request.user
premium_group = Group.objects.get(name='author')
if not request.user.groups.filter(name='author').exists():
premium_group.user_set.add(user)
return redirect('/news/search')
|
"""
Carbon Core
###########
:Author: Juti Noppornpitak
"""
from contextlib import contextmanager
from imagination.helper.assembler import Assembler
from imagination.helper.data import Transformer
# from imagination.entity import CallbackProxy
from imagination.entity import Entity
from imagination.loader import Loader
from imagination.locator import Locator
class Core(object):
""" The Core of the Framework
This relies on Imagination Framework.
"""
def __init__(self, locator=None):
self.locator = locator or Locator()
self.transformer = Transformer(self.locator)
self.assembler = Assembler(self.transformer)
self._cache_map = None
@contextmanager
def passive_mode(self):
self.assembler.activate_passive_loading()
yield
self.assembler.deactivate_passive_loading()
def get(self, id):
""" Get the service container. """
return self.locator.get(id)
def load(self, *paths):
""" Load service containers from multiple configuration files. """
with self.passive_mode():
[
self.assembler.load(path)
for path in paths
]
self._cache_map = None
def all(self):
if not self._cache_map:
self._cache_map = {
i: self.locator.get(i)
for i in self.locator.entity_identifiers
}
return self._cache_map
def set_entity(self, entity_id, entity_fqcn, *args, **kwargs):
try:
entity = self._create_entity(entity_id, entity_fqcn, args, kwargs)
self.locator.set(entity_id, entity)
except ImportError as exception:
raise ImportError('Failed to register {} ({})'.format(entity_id, entity_fqcn))
def _create_entity(self, id, entity_fqcn, args, kwargs):
loader = Loader(entity_fqcn)
return Entity(id, loader, *args, **kwargs)
|
from typing import List
from gensim.models import Doc2Vec as GensimDoc2Vec
from kts_linguistics.string_transforms.abstract_transform import AbstractTransform
from kts_linguistics.string_transforms.transform_pipeline import TransformPipeline
from kts_linguistics.misc import Vector1D
class Doc2VecTransform(AbstractTransform):
def __init__(self, model: GensimDoc2Vec = None, **doc2vec_params):
if model is None:
self.model = GensimDoc2Vec(**doc2vec_params)
else:
self.model = model
def fit(self, groups: List[List[str]], pipeline: TransformPipeline):
sentences = [s for group in groups for s in group]
sentences = [pipeline.custom_transform(s, apply_before_transform=self) for s in sentences]
self.model.build_vocab(sentences)
self.model.train(sentences, total_examples=self.model.corpus_count, epochs=self.model.iter)
def transform(self, s: List[str]) -> Vector1D:
return self.model.infer_vector(s)
|
import numpy as np
from stat_util import get_best_distribution
import scipy.stats as st
class REPD:
def __init__(self,dim_reduction_model,error_func=lambda x: np.linalg.norm(x,ord=2,axis=1)):
self.dim_reduction_model = dim_reduction_model
self.dnd = None #Distribution non defect
self.dnd_pa = None#Distribution non defect parameters
self.dd = None#Distribution defect
self.dd_pa = None#Distribution defect parameters
self.error_func = error_func
'''
X should be a N*M matrix of data instances
y should be a binary vector where 1 indicates a defect instance and 0 a normal instance
'''
def fit(self,X,y):
#Prepare data
X_nd = X[y==0]
X_d = X[y==1]
#Dim reduction model initialization
self.dim_reduction_model.fit(X_nd)
#Caclculate reconstruction errors for train defective and train non-defective
nd_errors = self.calculate_reconstruction_error(X_nd)
d_errors = self.calculate_reconstruction_error(X_d)
#Determine distribution
best_distribution_nd = get_best_distribution(nd_errors)
best_distribution_d = get_best_distribution(d_errors)
#Initialize distributions
self.dnd = getattr(st, best_distribution_nd[0])
self.dnd_pa = best_distribution_nd[1]
self.dd = getattr(st, best_distribution_d[0])
self.dd_pa = best_distribution_d[1]
def predict(self,X):
#Test model performance
test_errors = self.calculate_reconstruction_error(X)
p_nd = self.get_non_defect_probability(test_errors)
p_d = self.get_defect_probability(test_errors)
return np.asarray([0 if p_nd[i] >= p_d[i] else 1 for i in range(len(X))])
def get_non_defect_probability(self,errors):
return self.__get_data_probability__(errors,self.dnd,self.dnd_pa)
def get_defect_probability(self,errors):
return self.__get_data_probability__(errors,self.dd,self.dd_pa)
def calculate_reconstruction_error(self,X):
t = self.dim_reduction_model.transform(X)
r = self.dim_reduction_model.inverse_transform(t)
x_diff = r-X
return self.error_func(x_diff)
def get_probability_data(self):
example_errors = np.linspace(0,3000,100)
nd_p = self.get_non_defect_probability(example_errors)
d_p = self.get_defect_probability(example_errors)
return example_errors,nd_p,d_p
def __get_data_probability__(self,data,distribution,distribution_parameteres):
parameter_count = len(distribution_parameteres)
probability = None
if parameter_count==1:
probability = distribution.pdf(data,distribution_parameteres[0])
elif parameter_count==2:
probability = distribution.pdf(data,distribution_parameteres[0],distribution_parameteres[1])
elif parameter_count==3:
probability = distribution.pdf(data,distribution_parameteres[0],distribution_parameteres[1],distribution_parameteres[2])
elif parameter_count==4:
probability = distribution.pdf(data,distribution_parameteres[0],distribution_parameteres[1],distribution_parameteres[2],distribution_parameteres[3])
elif parameter_count==5:
probability = distribution.pdf(data,distribution_parameteres[0],distribution_parameteres[1],distribution_parameteres[2],distribution_parameteres[3],distribution_parameteres[4])
return probability
|
#6*6的棋盘求最大路径
import random
global count
totalprice=0
tmp=0
count=0
a=[[0 for i in range(6)]for i in range(6)]
b=[[0 for i in range(6)]for i in range(6)]
next=[[1,0],[0,1]]
def find(x, y):
global totalprice
global tmp
totalprice+=a[x][y]
if x==5 and y==5:
if totalprice>tmp:
tmp=totalprice
return
for k in range(2):
tx=x+next[k][0]
ty=y+next[k][1]
if tx<0 or tx>5 or ty<0 or ty>5:
continue
if b[tx][ty]==0:
b[tx][ty]=1
find(tx,ty)
totalprice-=a[tx][ty]
b[tx][ty]=0
return
for i in range(6):
for j in range(6):
a[i][j] = random.randint(1, 100)
print('%2d'%a[i][j],end=" ")
count+=1
if count%6==0:
print("\n")
b[0][0]=1
find(0,0)
print(tmp)
|
"""
This program uses a Monte Carlo simulation to randomly generate the
army compositions used in our linear program.
Requires the file sim_units.py and the random package
"""
import random
from sim_units import get_Units, get_Terran, get_Protoss, get_Zerg
import json
def init_army_comps(race, supply_cap=200, num_comps=1000):
"""
Input is the race we wish to build army comps for
race: 'Terran', 'Protoss', 'Zerg'
Supply cap is an integer of the largest army size (default 200)
num_comps is the number of army compositions we will generate
Returns a list of num_comps randomly generated valid army compositions
"""
race_units = {}
if race == 'Terran':
race_units = list(get_Terran().keys())
elif race == 'Protoss':
race_units = list(get_Protoss().keys())
# Intercceptors are a special case
race_units.remove('Interceptor')
elif race == 'Zerg':
race_units = list(get_Zerg().keys())
# Locust and Broodlings are special case
race_units.remove('Locust')
race_units.remove('Broodling')
Units = get_Units()
comps = []
# initialize base format of an army composition
base = {}
for name in race_units:
base[name] = 0
count = 0
while count < num_comps:
comp = base.copy()
# randomly generate an army comp
# shuffle order of names, continuously update the
# max number of units that can be randomly generated
names = list(base.keys())
random.shuffle(names)
current_supply = supply_cap
for name in names:
max_unit = int(current_supply / Units[name]['supply'])
if (get_army_supply(comp) <= current_supply) and (not extra_Motherships(comp)):
comp[name] = random.randint(0, max_unit)
current_supply -= get_army_supply(comp)
temp_comp = comp.copy()
# check that random comp is not already generated
# only include valid comps
if temp_comp not in comps:
if (get_army_supply(temp_comp) <= supply_cap) and (not extra_Motherships(temp_comp)):
comps.append(temp_comp)
count += 1
return comps
def get_army_supply(comp):
"""
Input is an army composition
Returns the total supply used by that army
"""
supply_total = 0
Units = get_Units()
for name in comp:
supply = Units[name]['supply'] * comp[name]
supply_total += supply
return supply_total
def extra_Motherships(comp):
"""
Input is an army composition
Returns True if more than one Mothership is
in that army, else returns False
"""
if 'Mothership' in comp:
if comp['Mothership'] > 1:
return True
else:
return False
def get_Terran_comps():
"""
Reads the file 'Terran_comps.json'
Returns a list of dictionaries of all possible
Terran army compositions
"""
terran_comps = {}
with open("Terran_comps.json", 'r') as read_file:
terran_comps = json.load(read_file)
return terran_comps
def get_Protoss_comps():
"""
Reads the file 'Protoss_comps.json'
Returns a list of dictionaries of all possible
Protoss army compositions
"""
protoss_comps = {}
with open("Protoss_comps.json", 'r') as read_file:
protoss_comps = json.load(read_file)
return protoss_comps
def get_Zerg_comps():
"""
Reads the file 'Zerg_comps.json'
Returns a list of dictionaries of all possible
Zerg army compositions
"""
zerg_comps = {}
with open("Zerg_comps.json", 'r') as read_file:
zerg_comps = json.load(read_file)
return zerg_comps
def generate_terran(supply_cap=200, num_comps=200):
"""
Creates a list of terran armies saved as the file
'Terran_comps.json'
"""
with open('Terran_comps.json', 'w') as fout:
json.dump(init_army_comps('Terran'), fout, indent=4)
def main():
if True:
generate_terran()
print("Done with Terran")
if True:
with open('Protoss_comps.json', 'w') as fout:
json.dump(init_army_comps('Protoss'), fout, indent=4)
print("Done with Protoss")
if True:
with open('Zerg_comps.json', 'w') as fout:
json.dump(init_army_comps('Zerg'), fout, indent=4)
print("Done with Zerg")
if __name__ == "__main__":
main()
|
import nltk
import math
import numpy as np
import pandas as pd
import tensorflow as tf
import os,sys
import GPyOpt
from sklearn.base import clone
from sklearn.metrics import confusion_matrix
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder,LabelBinarizer
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential,clone_model
from tensorflow.keras.layers import Dense, Embedding, Activation, Dropout,BatchNormalization,Conv2D,Conv1D,Flatten,LSTM,MaxPool1D,TimeDistributed
DATA_DIR = "data"
class modelClass:
def __init__(self):
#Model parameters
self.nCNN = None
self.nDense = None
self.nEmbedding = None
self.nCNNFilters = None
self.nNNFilters = None
self.nKernel = None
self.nStrides = None
self.poolSize = None
self.vocab_size = None
self.maxLen = None
self.nClasses = None
#Data
self.xTrain = None
self.yTrain = None
self.xValidation = None
self.yValidation = None
self.xTest = None
self.yTest = None
self.model = None
def loadDataOneHot(self):
dataFrame = pd.read_csv(os.path.join(DATA_DIR, "dataset_examples.tsv"),sep="\t")
vectorizer = CountVectorizer()
texts = vectorizer.fit_transform(list(dataFrame["text"]))
#make labels into 0,1
encoder = LabelBinarizer()
labels = encoder.fit_transform(list(dataFrame["sentiment"]))
self.stratifyData(texts,labels)
def loadDataSequence(self):
#dataFrame = pd.read_csv("/content/drive/My Drive/Colab Notebooks/data/dataset_examples.tsv",sep="\t")
dataFrame = pd.read_csv(os.path.join(DATA_DIR, "dataset_examples.tsv"),sep="\t")
texts = list(dataFrame["text"])
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
self.vocab_size = len(tokenizer.word_index) + 1
sequenceText = tokenizer.texts_to_sequences(texts)
self.maxLen = max([len(text) for text in sequenceText ])
padSequenceText = pad_sequences(sequenceText,padding = "post",maxlen = self.maxLen )
#make labels into 0,1
encoder = LabelBinarizer()
#encoder = LabelEncoder()
#labels = to_categorical(dataFrame["sentiment"])
labels = encoder.fit_transform(dataFrame["sentiment"])
#labels = labels.flatten()
self.stratifyData(padSequenceText,labels)
def stratifyData(self,texts,labels):
"""
Given data is split stratified wise into 70% training, 15% validation and 15% test sets.
"""
xTrain,xValidation,yTrain,yValidation = train_test_split(texts,labels,test_size = 0.3,random_state=42,stratify=labels)
xValidation,xTest,yValidation,yTest = train_test_split(xValidation,yValidation,test_size = 0.5,random_state=42,stratify=yValidation)
self.xTrain = xTrain
self.xValidation = xValidation
self.xTest = xTest
self.yTrainDecoded = yTrain
self.yTrain = yTrain
self.yValidation = yValidation
self.yTest = yTest
self.nClasses = len(set(yTest.flatten()))
def optimizeLR(self,C):
print("C is right now",C[0][0])
self.model = LogisticRegression(C=C[0][0])
score = self.crossEval(10)
return score
def optimizeCNN(self,variables):# nDense, nEmbedding, nCNNFilters, nNNFilters, nKernel, nStrides,poolSize):
self.nCNN = int(variables[0][0])
self.nDense = int(variables[0][1])
self.nEmbedding = int(variables[0][2])
self.nCNNFilters = int(variables[0][3])
self.nNNFilters = int(variables[0][4])
self.nKernel = int(variables[0][5])
self.nStrides = int(variables[0][6])
self.poolSize = int(variables[0][7])
self.buildCNN()
score = self.crossEval(10)
return score
def addConvLayer(self,model):
model.add(Conv1D(kernel_size = self.nKernel, filters = self.nCNNFilters,strides = self.nStrides, padding="valid" ))
model.add(Activation("elu"))
model.add(BatchNormalization())
model.add(MaxPool1D(pool_size = self.poolSize,padding="valid"))
return model
def buildCNN(self):
model = Sequential()
model.add(Embedding(input_dim = self.vocab_size, output_dim = self.nEmbedding, input_length = self.maxLen ))
#add nCNN conv layers
for _ in range(0,self.nCNN):
model = self.addConvLayer(model)
model.add(Flatten())
#add nDense
for _ in range(0,self.nDense):
model.add(Dense(self.nNNFilters))
model.add(Dense(1, activation = "softmax" ))
model.compile(loss='binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
self.model = model
def buildLR(self,C):
self.model = LogisticRegression(C=C)
def crossEval(self,folds):
skf = StratifiedKFold(n_splits=folds, shuffle=True,random_state=42)
type1 ="<class 'sklearn.linear_model.logistic.LogisticRegression'>"
count = 1
scores = []
for train,test in skf.split(self.xTrain,self.yTrain):
if str(type(self.model)) == type1:
self.model.fit(self.xTrain[train],self.yTrain[train])
score = self.model.score(self.xTrain[test],self.yTrain[test])
else:
self.buildCNN()
self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
self.model.fit(self.xTrain[train],self.yTrain[train],epochs=100,batch_size=10,verbose=0)
score = self.model.evaluate(self.xTrain[test],self.yTrain[test])
print("The score of iteration ", count, "was", score)
scores.append(score)
count = count+1
meanScore = np.mean(scores)*100
print("Final score was ", meanScore)
return 1-meanScore
def trainModel(self):
self.model.fit(self.xTrain,self.yTrain)
def validateModel(self):
print("Validation score is", self.model.score(self.xValidation,self.yValidation))
def testModel(self):
print("test score is", self.model.score(self.xTest,self.yTest))
def main():
dataModel = modelClass()
dataModel.loadDataSequence()
domain = [{'name': 'nCNN','type':'discrete','domain':tuple(range(1,6))},
{'name': 'nDense','type':'discrete','domain':tuple(range(0,3))},
{'name': 'nEmbedding','type':'discrete','domain':tuple(range(5,200))},
{'name': 'nCNNFilters','type':'discrete','domain':tuple(range(2,1000))},
{'name': 'nNNFilters','type':'discrete','domain':tuple(range(3,1000))},
{'name': 'nKernel','type':'discrete','domain':tuple(range(1,4))},
{'name': 'nStrides','type':'discrete','domain':tuple(range(1,2))},
{'name': 'poolSize','type':'discrete','domain':tuple(range(1,2))}]
# bounds = [{"name":'C','type':'continuous','domain':(0.1,100)}]
#tf.enable_eager_execution()
optimizer = GPyOpt.methods.BayesianOptimization(
f = dataModel.optimizeCNN,
domain = domain,
acquisition_type ='LCB', # LCB acquisition
acquisition_weight = 0.1
)
max_iter = 20
optimizer.run_optimization(max_iter)
print(optimizer.x_opt)
if __name__ == "__main__":
main()
|
# File: main.py
# Auth: David Cerny
# Date: 26/07/2021
##################################################
""" Find the nth number in the Fibonacci sequence """
import argparse
def calculate_fib_number(n):
assert n >= 0, "n must be non-negative!"
if n == 0:
return 0
elif n == 1:
return 1
else:
# init seq
second_last = 0
last = 1
current = 0
for i in range(2, n + 1):
# calc ith number
current = second_last + last
# update previous numbers
second_last = last
last = current
return current
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int, required=True)
args = parser.parse_args()
fib_num = calculate_fib_number(args.n)
print(f"{args.n}. fibonacci number is {fib_num}")
|
# Bài 10: Viết hàm đệ quy đếm và trả về số lượng chữ số lẻ của số nguyên dương n cho trước.
# Ví dụ: Hàm trả về 4 nếu n là 19922610 (do n có 4 số lẻ là 1, 9, 9, 1)
def dem(n) :
if n == 0 :
return 0
else :
if (n%10)%2 != 0 :
return 1 + dem(int(n/10))
else :
return dem(int(n/10))
n = -1
while(n < 0) :
n = int(input('Nhap so nguyen duong : '))
print(dem(n))
|
class Config:
FPS = 9
MENU_FPS = 60
WINDOW_WIDTH = 640
WINDOW_HEIGHT = 480
CELLSIZE = 20
assert WINDOW_WIDTH % CELLSIZE == 0,"Window width must be a multiple of cellsize"
assert WINDOW_HEIGHT % CELLSIZE == 0,"Window height must be a multiple of cellsize"
CELLWIDTH = int(WINDOW_WIDTH/CELLSIZE)
CELLHEIGHT = int(WINDOW_HEIGHT/CELLSIZE)
#colors
WHITE = (255,255,255)
BLACK = (0,0,0)
RED = (255,0,0)
GREEN = (0,255,0)
DARKGREEN =(0,155,0)
DARKGREY = (40,40,40)
BG_COLOR = BLACK
|
import time
from enum import Enum
BLOCK_SIZE = 2**14
class State(Enum):
FREE = 0
PENDING = 1
COMPLETE = 2
class Block():
def __init__(self, size=BLOCK_SIZE):
self.state = State.FREE
self.last_seen = 0
self.size = size
self.data = b''
def flush(self):
self.data = b''
self.state = State.FREE
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import requests
import json
import queue
from locust import HttpUser,TaskSet,task,between
class Ncar_Login(TaskSet):
@task(1)
def work_user_top(self):
headers={"content-type":"application/json"}
data={"data":{"channel":1,"algorithm":"BASE64","passWd":"YWJjLjEyMw==","userNm":"eXVlag=="}}
with self.client.post("/api/v3/iw-framework/login/simpleLogin",name="login",headers=headers,data=json.dumps(data),catch_response=True) as response:
if response.status_code==200 and response.json()['status']['status']==0:
response.success()
else:
response.failure("登录 接口异常!")
print(response.text)
class WebsiteUser(HttpUser):
tasks = [Ncar_Login]
host='XXXX'
wait_time = between(1,3)
|
# -*- python -*-
from flask import Flask, render_template, redirect, request, session
app = Flask( __name__ )
app.secret_key = "CounterSecretKey"
@app.route( '/' )
def index():
if 'counter' not in session:
session['counter'] = 1
else:
session['counter'] += 1
return( render_template( "index.html" ) )
@app.route( '/update', methods=['POST'] )
def update():
# Increment by 1 because index() will also increment by 1
session['counter'] += 1
return( redirect( '/' ) )
@app.route( '/reset', methods=['POST'] )
def reset():
# Reset to 0 because index() will also increment by 1
session['counter'] = 0
return( redirect( '/' ) )
# @app.route( '/update', methods=['POST'] )
# def update():
# if request.form['action'] == "incr":
# # Increment by 1 because index() will also increment by 1
# session['counter'] += 1
# else:
# # Reset to 0 because index() will also increment by 1
# session['counter'] = 0
# return( redirect( '/' ) )
app.run( debug=True )
|
import unittest
from katas.kyu_7.product_of_main_diagonal import main_diagonal_product
class MainDiagonalProductTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(main_diagonal_product([[1, 0], [0, 1]]), 1)
def test_equals_2(self):
self.assertEqual(main_diagonal_product(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]), 45
)
|
from os.path import abspath, dirname, join, exists
import yaml
def find_config(config_filename):
"""
Return the default config file location. Normally this is the package
installation directory, except when install in develop mode or using pytest.
If in develop mode, the config file is in the package source root. If using
pytest, the config file is in the dodo project root.
"""
this_dir = dirname(abspath(__file__))
source_root_dir = dirname(this_dir)
project_root_dir = dirname(source_root_dir)
if exists(join(this_dir, config_filename)):
cfg_file = join(this_dir, config_filename)
elif exists(join(source_root_dir, config_filename)):
cfg_file = join(source_root_dir, config_filename)
elif exists(join(project_root_dir, config_filename)):
cfg_file = join(project_root_dir, config_filename)
else:
raise FileNotFoundError("The config file is missing.")
return cfg_file
def config_param(param, config="default", cfg_file=find_config("config.yml")):
"""
Get a configuration parameter.
Parameters
----------
param : str
The name of the config parameter.
config : str, optional
The key indicating which configuration parameters to retrieve (e.g., "default").
cfg_file : str, optional
The configuration file to read from.
Returns
-------
The value of the requested configuration parameter. An error is thrown if
the given parameter name is not found in the config file.
"""
with open(cfg_file) as ymlfile:
cfg = yaml.safe_load(ymlfile)
assert param in cfg[config], "Config parameter {} not found".format(param)
return cfg[config][param]
|
#The "assert" keyword can be used to check the input data for validity.
# If the assert condition is not true, the program quits with the "AssertionError" exception (if you did not catch it).
def aun(x,y):
assert x!=2
return x*y
print(aun(3,2)) #ok
print(aun(2,4)) #error x!=2
|
# Generated by Django 3.0.7 on 2020-07-21 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0004_auto_20200623_1522'),
]
operations = [
migrations.AddField(
model_name='course',
name='interested',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='course',
name='stage',
field=models.PositiveIntegerField(default=0),
),
]
|
#!/usr/bin/env python
# coding: utf-8
# https://github.com/usnistgov/iprPy
import iprPy
if __name__ == '__main__':
iprPy.command_line()
|
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText, textio
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'idkey:STRING, '
'fecha:STRING, '
'NIT:STRING, '
'NOMBRES:STRING, '
'NOOBLIGACION:STRING, '
'CODIGO_ANTERIOR:STRING, '
'CLASE_GESTION:STRING, '
'RESPONSABLE_COBRO:STRING, '
'GESTOR:STRING, '
'CODIGO_GESTION:STRING, '
'DESCRIPCION_CAUSAL:STRING, '
'DIAS_MORA:STRING, '
'TELEFONO:STRING, '
'FECHA_GESTION:STRING, '
'DURACION:STRING, '
'HORA_DE_INICIO_DE_GRABACION:STRING, '
'HORA_DE_GRABACION:STRING, '
'VALOR_COMPROMISO:STRING, '
'FECHA_COMPROMISO:STRING, '
'CUOTAS_VENCIDAS:STRING, '
'DIRECCION:STRING, '
'FECHA_VENCIMIENTO:STRING, '
'CODIGO_DE_CONTACTO:STRING, '
'CONSDOCDEU:STRING, '
'NOTA:STRING, '
'GESTION_MOVIL:STRING, '
'DESCRIPCION_ZONA:STRING, '
'DESCRIPCION_NEGOCIO:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split('|')
tupla= {'idkey' : str(uuid.uuid4()),
# 'fecha' : datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), #datetime.datetime.today().strftime('%Y-%m-%d'),
'fecha' : self.mifecha,
'NIT' : arrayCSV[0],
'NOMBRES' : arrayCSV[1],
'NOOBLIGACION' : arrayCSV[2],
'CODIGO_ANTERIOR' : arrayCSV[3],
'CLASE_GESTION' : arrayCSV[4],
'RESPONSABLE_COBRO' : arrayCSV[5],
'GESTOR' : arrayCSV[6],
'CODIGO_GESTION' : arrayCSV[7],
'DESCRIPCION_CAUSAL' : arrayCSV[8],
'DIAS_MORA' : arrayCSV[9],
'TELEFONO' : arrayCSV[10],
'FECHA_GESTION' : arrayCSV[11],
'DURACION' : arrayCSV[12],
'HORA_DE_INICIO_DE_GRABACION' : arrayCSV[13],
'HORA_DE_GRABACION' : arrayCSV[14],
'VALOR_COMPROMISO' : arrayCSV[15],
'FECHA_COMPROMISO' : arrayCSV[16],
'CUOTAS_VENCIDAS' : arrayCSV[17],
'DIRECCION' : arrayCSV[18],
'FECHA_VENCIMIENTO' : arrayCSV[19],
'CODIGO_DE_CONTACTO' : arrayCSV[20],
'CONSDOCDEU' : arrayCSV[21],
'NOTA' : arrayCSV[22],
'GESTION_MOVIL' : arrayCSV[23],
'DESCRIPCION_ZONA' : arrayCSV[24],
'DESCRIPCION_NEGOCIO' : arrayCSV[25]
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-adeinco_juridico" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
mi_runner = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
# pipeline = beam.Pipeline(runner="DirectRunner")
pipeline = beam.Pipeline(runner=mi_runner, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "5",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT")
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/prejuridico/info_carga_banco_prej",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery Adeinco' >> beam.io.WriteToBigQuery(
gcs_project + ":adeinco_juridico.gestiones",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio Full HD")
|
#!/usr/bin/env python
"""
plugin_classifier.py
v0.1 Given a list of VOSource XML strings, or filepaths,
this generates classifications by calling WEKA and other classifier
code. Returns information in classification dictionaries.
"""
import sys, os
import copy
sys.path.append(os.environ.get("TCP_DIR") + '/Software/feature_extract/Code/extractors')
import mlens3 # for microlensing classification
import sn_classifier
sys.path.append(os.environ.get("TCP_DIR") + '/Software/feature_extract/MLData')
import arffify
# KLUDGE: this is ugly (importing ptf_master.py from within here.) the Diff_Obj_Source_Populator
# class should exist in a seperate module / file:
import ptf_master # required by tions_for_tcp_marked_variables.get_overall_classification_without_repopulation()
import ingest_tools
try:
import get_classifications_for_caltechid
except:
pass
try:
import get_classifications_for_tcp_marked_variables # required by tions_for_tcp_marked_variables.get_overall_classification_without_repopulation()
except:
pass
try:
import jpype
except:
print "EXCEPT: plugin_classifier.py. Possibly on a development system without Java Weka of JPype installed."
pass # KLUDGE: This would except on a development system without Java Weka of JPype installed.
import weka_classifier
os.environ["JAVA_HOME"] = '/usr/lib/jvm/java-6-sun-1.6.0.03'
os.environ["CLASSPATH"] += os.path.expandvars(':$TCP_DIR/Software/ingest_tools')
class PluginClassifier:
""" Given a list of VOSource XML strings, or filepaths,
this generates classifications by calling WEKA and other classifier
code. Returns information in classification dictionaries.
"""
def __init__(self, class_schema_definition_dicts={}, class_abrv_lookup={}, \
use_weka_jvm=True, training_arff_features_list=[]):
self.class_schema_definition_dicts = class_schema_definition_dicts
self.training_arff_features_list = training_arff_features_list
self.arffmaker = arffify.Maker(search=[], \
skip_class=True, local_xmls=True, dorun=False, \
class_abrv_lookup=class_abrv_lookup)
if use_weka_jvm:
# TODO/NOTE: I think a WekaClassifier() class needs to be
# instantiated for each WEKA classification instance
# which uses a different .model and/or training .arff
# We initialize a Java virtual machine for Weka classifications
#try:
if not jpype.isJVMStarted():
#TODO / DEBUG: disable the next line for speed-ups once stable?
_jvmArgs = ["-ea"] # enable assertions
_jvmArgs.append("-Djava.class.path=" + \
os.environ["CLASSPATH"])
###20091905 dstarr comments out:
#_jvmArgs.append("-Xmx1000m")
_jvmArgs.append("-Xmx12000m") # 4000 & 5000m works, 3500m doesnt for some WEKA .models
jpype.startJVM(jpype.getDefaultJVMPath(), *_jvmArgs)
class_schema_name_list = self.class_schema_definition_dicts.keys()
class_schema_name_list.remove('mlens3 MicroLens')
class_schema_name_list.remove('Dovi SN')
class_schema_name_list.remove('General')
self.wc = {}
for class_schema_name in class_schema_name_list:
class_schema_dict = self.class_schema_definition_dicts[class_schema_name]
weka_training_model_fpath = class_schema_dict['weka_training_model_fpath']
weka_training_arff_fpath = class_schema_dict['weka_training_arff_fpath']
self.wc[class_schema_name] = weka_classifier.WekaClassifier( \
weka_training_model_fpath, weka_training_arff_fpath)
#except:
# print "EXCEPT: most likely (javac JPypeObjectInputStream.java) has not been done. See header of weka_classifier.py"
def get_class_probs_using_jvm_weka_instance(self, vosource_list, \
plugin_name='default plugin name'):
""" Use an already-instantiated Java JVM to run weka classification.
Format the results.
RETURN:
out_plugin_classification_dict[src_id][plugin_name]
class_probs_dict = {} # {src_id:[{'schema_id':'', 'class_id':'', 'prob':'', 'class_rank':'', 'prob_weight':''}]}
"""
#self.arffmaker.populate_features_and_classes_using_local_xmls(\
# srcid_xml_tuple_list=vosource_list)
out_plugin_classification_dict = {}
new_master_features_list = []
master_features_dict = dict(self.arffmaker.master_features) # NOTE: I believe the features in .master_features are the features found in the current source's vosurce-xml-string. # KLUDGE: this makes a dictionary from a list of tuples.
#for feat_name in self.training_arff_features_list:
for feat_name in self.class_schema_definition_dicts[plugin_name]['features_list']:
new_master_features_list.append((feat_name,
master_features_dict[feat_name]))
### This doesn't preserve the feature list order in the training .arff:
#for feat_name,feat_type in self.arffmaker.master_features:
# if feat_name in self.training_arff_features_list:
# new_master_features_list.append((feat_name,feat_type))
#stored_arffmaker_master_features = copy.deepcopy(self.arffmaker.master_features)
#self.arffmaker.master_features = new_master_features_list
# TODO: extract these features from the vosource:
# (should be similar to methods used when makeing a temp .arff file)
#arff_record = [0.65815,3.518955,0.334025,0.79653,44.230391,3.163003,0.025275,0.004501,0.295447,-0.133333,3.144411,-0.65161,None,None]
#classified_result = self.wc.get_class_distribution(arff_record)
#print classified_result
prob_weight = 1.0 # This property may be used in Nat/Dovi to represent
# science classes which are known to be non applicable (0.0)
class_probs_dict = {}
for obj in self.arffmaker.master_list:
#if remove_sparse_classes:
# if not obj.get('class','') in self.arffmaker.master_classes:
# continue # skip this object due to being in a sparse class
#tmp = []
src_id = obj['num']
class_probs_dict[src_id] = []
arff_record = []
for fea in new_master_features_list:
val = None
if obj['features'].has_key(fea):
str_fea_val = str(obj['features'][fea])
if ((str_fea_val == "False") or
(str_fea_val == "inf") or
(str_fea_val == "nan") or
(str_fea_val == "None")):
val = None
elif fea[1] == 'float':
val = obj['features'][fea] # str_fea_val
else:
val = "%s" % str_fea_val # """'%s'""" % str_fea_val
arff_record.append(val)
# 20090130 old:
#for fea in self.arffmaker.master_features:
# val = None # "?"
# if obj['features'].has_key(fea):
# if fea[1] == 'float':
# if ((obj['features'][fea] == "False") or
# (str(obj['features'][fea]) == "inf") or
# (str(obj['features'][fea]) == "nan")):
# val = None # "?"
# elif obj['features'][fea] != None:
# val = obj['features'][fea] #val = str(obj['features'][fea])
# else:
# val = """'%s'""" % str(obj['features'][fea])
# arff_record.append(val)
classified_result = self.wc[plugin_name].get_class_distribution(arff_record)
out_plugin_classification_dict[src_id] = {plugin_name:{'probabilities':{}}}
for i, (class_name,class_prob) in enumerate(classified_result[:3]):
class_id = self.class_schema_definition_dicts[plugin_name] \
['class_name_id_dict'][class_name]
class_probs_dict[src_id].append(\
{'schema_id':self.class_schema_definition_dicts\
[plugin_name]['schema_id'],
'class_id':class_id,
'class_name':class_name,
'prob':class_prob,
'class_rank':i,
'prob_weight':prob_weight})
out_plugin_classification_dict[src_id][plugin_name] \
['probabilities'][class_name] = {'prob':class_prob, \
'prob_weight':1.0} # WEKA default KLUDGE
# # # # # # #
# TODO: eventually add WEKA ['value_added_properties'] to the returned dict
return out_plugin_classification_dict # class_probs_dict
def do_classification(self, vosource_list, class_schema_definition_dicts, do_logging=False):
""" Given a list of VOSource XML strings, or filepaths,
this generates classifications by calling WEKA and other classifier
code. Returns information in classification dictionaries.
TODO: maybe only do vosource XML parsing once
TODO: DiffObjSourcePopulator usage is very KLUDGY since
- it opens an rdt connection
- it imports many modules,
- etc
-> so, we shoud find a better way to pass in/reference a static object which has
access to all this stuff.
"""
if len(vosource_list) == 0:
return ({},{})
plugin_classification_dict = {} # This is returned.
for src_id,vosource_xml_str in vosource_list:
plugin_classification_dict[src_id] = {}
##### Weka Classification:
# TODO: run a full WEKA .model classification
# as well as a couple tailored n_epochs .model classification
# TODO: Maybe pass in a .model into this function:
if do_logging:
print "before: self.arffmaker.populate_features_and_classes_using_local_xmls()"
self.arffmaker.populate_features_and_classes_using_local_xmls(\
srcid_xml_tuple_list=vosource_list)
try:
n_epochs_fromfeats = self.arffmaker.master_list[0]['features'][('n_points', 'float')]
except:
print "EXCEPT: self.arffmaker.master_list[0]['features'][('n_points', 'float')] : Empty array?"
n_epochs_fromfeats = 0
if do_logging:
print "before: n_epochs_fromfeats > 1.0 try/except"
if n_epochs_fromfeats > 1.0:
class_schema_name_list = self.class_schema_definition_dicts.keys()
class_schema_name_list.remove('mlens3 MicroLens')
class_schema_name_list.remove('Dovi SN')
class_schema_name_list.remove('General')
for class_schema_name in class_schema_name_list:
#if 1:
try:
plugin_classification_dict__general = \
self.get_class_probs_using_jvm_weka_instance( \
vosource_list, plugin_name=class_schema_name)
for src_id, plugin_dict in plugin_classification_dict__general.\
iteritems():
plugin_classification_dict[src_id].update(plugin_dict)
except:
print "EXCEPT: Calling get_class_probs_using_jvm_weka_instance()"
if do_logging:
print "after: n_epochs_fromfeats > 1.0 try/except"
#DEBUG# return ({},{})
##### Microlensing classification:
#class_probs_dict__mlens = {}
for src_id,vosource_xml_str in vosource_list:
##########s_fp = cStringIO.StringIO(vosource_xml_str)
# TODO: I need to create google-pseudo-fp for this string:
if do_logging:
print "before: mlens3.EventData(vosource_xml_str)"
d = mlens3.EventData(vosource_xml_str)
##########del s_fp #.close()
if do_logging:
print "before: mlens3.Mlens(datamodel=d,doplot=False)"
## run the fitter (turn off doplot for running without pylab)
m = mlens3.Mlens(datamodel=d,doplot=False)#,doplot=True)
### prob_mlens should be between 0 and 1...anything above 0.8 is pretty sure bet
#prob_mlens = m.final_results["probabilities"]["single-lens"]
plugin_classification_dict[src_id]['mlens3 MicroLens'] = m.final_results
##### Nat/Dovi case:
if do_logging:
print "before: sn_classifier.Dovi_SN(datamodel=d,doplot=False)"
sn = sn_classifier.Dovi_SN(datamodel=d,doplot=False)#,doplot=True)
plugin_classification_dict[src_id]['Dovi SN'] = sn.final_results
#import pprint
#pprint.pprint(plugin_classification_dict[src_id]['Dovi SN'].get('probabilities',{}))
#print 'yo'
if do_logging:
print "after: for src_id,vosource_xml_str in vosource_list"
##### Combined / Final Classification:
# # # # # # # #
# # # TODO: if mlens prob >= 0.8 and weka_prob[0] < 0.8 : mlens is primary class (otherwise incorperate it by probability if mlens >= 0.6 as either 2nd or 3rd)
# TODO: combine info from previous classifications to make a final classification
# i.e.: Use plugin_classification_dict{} to make a 3 element class_probs_dict{<srcid>:[1,2,3]}
# TODO: get class_id for mlens3
# NOTE: class_probs_dict is used by generate_insert_classification_using_vosource_list() to INSERT classifications into RDB
#class_probs_dict = class_probs_dict__weka # TODO: extend this when other classification modules are used as well.
class_probs_dict = {}
for src_id,a_dict in plugin_classification_dict.iteritems():
#prob_list = []
class_probs_dict[src_id] = []
for plugin_name,plugin_dict in a_dict.iteritems():
prob_list = []
for class_name,class_dict in plugin_dict.get('probabilities',{}).iteritems():
class_id = self.class_schema_definition_dicts[plugin_name]['class_name_id_dict'][class_name] # TODO: get the MLENS class_id from somewhere!!!
temp_dict = {'schema_id':self.class_schema_definition_dicts[plugin_name]['schema_id'],
'class_id':class_id,
'class_name':class_name,
'plugin_name':plugin_name,
'prob':class_dict['prob'],
'prob_weight':class_dict['prob_weight']}
prob_list.append((class_dict['prob'],temp_dict))
#? OBSOLETE ? : source_class_probs_list.append(temp_dict)
#NOTE: for WEKA case, we generate class_ranks 1,2,3. Otherwise, we just pass on probability as class rank=1
if self.class_schema_definition_dicts[plugin_name]['predicts_multiple_classes']:
prob_list.sort(reverse=True)
#NOTE: for WEKA case, we generate class_ranks 1,2,3. Otherwise, we just pass on probability as class rank=1
for i,(prob_float,prob_dict) in enumerate(prob_list[:3]):
prob_dict['class_rank'] = i
class_probs_dict[src_id].append(prob_dict)
else:
for i,(prob_float,prob_dict) in enumerate(prob_list):
prob_dict['class_rank'] = i
class_probs_dict[src_id].append(prob_dict)
# 2) step in and make sure general_classif_dict{} is created below:
# 3) make sure classification & schema TABLE can take the new general/overview class & schema (update ingest.pars)
# 4) TEST and migrate changes to ipengine nodes. Then run for recent PTF night.
#########
# KLUDGE: this is ugly (importing ptf_master.py from within here.) the Diff_Obj_Source_Populator
# class should exist in a seperate module / file:
if do_logging:
print "before: DiffObjSourcePopulator = ptf_master.Diff_Obj_Source_Populator"
DiffObjSourcePopulator = ptf_master.Diff_Obj_Source_Populator(use_postgre_ptf=False) #True)
if do_logging:
print "after: DiffObjSourcePopulator = ptf_master.Diff_Obj_Source_Populator"
src_id = int(vosource_list[0][0]) # we can assume at this point that len(vosource_list) > 0
select_str = """SELECT id, realbogus, ujd, source_test_db.srcid_lookup.ra, source_test_db.srcid_lookup.decl FROM object_test_db.obj_srcid_lookup JOIN object_test_db.ptf_events ON (object_test_db.ptf_events.id = object_test_db.obj_srcid_lookup.obj_id) JOIN source_test_db.srcid_lookup USING (src_id) where survey_id=3 AND src_id=%d""" % (src_id)
if do_logging:
print select_str
DiffObjSourcePopulator.rdbt.cursor.execute(select_str)
rdb_rows = DiffObjSourcePopulator.rdbt.cursor.fetchall()
if do_logging:
print "after select .execute()"
general_classif_source_dict = {'obj_id':[],
'realbogus':[],
'ujd':[],
'ra': rdb_rows[0][3],
'dec':rdb_rows[0][4],
'src_id':src_id}
for row in rdb_rows:
general_classif_source_dict['obj_id'].append(row[0])
general_classif_source_dict['realbogus'].append(row[1])
general_classif_source_dict['ujd'].append(row[2])
if do_logging:
print "before: Get_Classifications_For_Ptfid = get_classifications_for_caltechid.GetClassifications"
#PTFPostgreServer = ptf_master.PTF_Postgre_Server(pars=ingest_tools.pars, \
# rdbt=DiffObjSourcePopulator.rdbt)
PTFPostgreServer = None
Get_Classifications_For_Ptfid = get_classifications_for_caltechid.GetClassificationsForPtfid(rdbt=DiffObjSourcePopulator.rdbt)
if do_logging:
print "before: general_classif_dict = get_classifications_for_tcp_marked_variables.get_overall"
general_classif_dict = get_classifications_for_tcp_marked_variables.get_overall_classification_without_repopulation(DiffObjSourcePopulator, PTFPostgreServer, Get_Classifications_For_Ptfid, Caltech_DB=None, matching_source_dict=general_classif_source_dict)
if do_logging:
print "after: general_classif_dict = get_classifications_for_tcp_marked_variables.get_overall"
DiffObjSourcePopulator.rdbt.cursor.close()
if general_classif_dict.has_key('science_class'):
class_type = general_classif_dict['science_class']
else:
class_type = general_classif_dict['overall_type']
try:
table_class_id = class_schema_definition_dicts['General']['class_list'].index(class_type)
except:
table_class_id = 0 # This is the "other" class, which may represent new periodic classes which havent been added to ingest_tools.py..pars['class_schema_definition_dicts']['General']['class_list']
class_probs_dict[src_id].append({'class_id': table_class_id,
'class_name': general_classif_dict['overall_type'],
'class_rank': 0,
'plugin_name': 'General',
'prob': general_classif_dict.get('class_prob',1.0),
'prob_weight': 1.0,
'schema_id': class_schema_definition_dicts['General']['schema_id']})
# how do I add
# TODO: then update the (class_probs_dict, plugin_classification_dict) information so that
# these classifications can be INSERTED into MySQL table
# TODO: new schema will need to be defined, which will allow INSERT of new classification schema.
#######################
# - this will eventually be called 1 stack up, using the singly passed:
# plugin_classification_dict{}
##### DEBUG:
#print class_probs_dict
if do_logging:
print "(end of) do_classification()"
return (class_probs_dict, plugin_classification_dict)
if __name__ == '__main__':
pass
|
from typing import List
import json
import math
class BaseFixture(object):
def __init__(self, x:int, z:int):
self.type = "7-pixel-base"
self.x = x
self.z = z
class Protocol(object):
def __init__(self, host:str, universe:int, start:int, num:int):
self.host = host
self.protocol = "artnet"
self.byteOrder = "rgb"
self.universe = universe
self.start = start
self.num = num
class Fixture(object):
def __init__(self, label:str, children:List[BaseFixture], outputs:List[Protocol]):
self.label = label
self.children = children
self.outputs = outputs
bases = []
FEET = 12 #using 1 = 1 inch
NUM_BASES = 200
POINTS_PER_BASE = 7
CENTER_DIAMETER = 30*FEET
BASE_SPACING = 8*FEET
CENTER_RADIUS = CENTER_DIAMETER/2
IP = "192.168.0.60"
i = 0
numbases = NUM_BASES
radius = CENTER_RADIUS
angle = 0
while numbases > 0:
perimeter = math.pi * 2 * radius
ring_bases = int(perimeter / BASE_SPACING)
radius_add = BASE_SPACING/ring_bases
skip = False
for j in range(20):
if i==j*(11+(3*j)) or i == 3 + j*(13+(3*j)) or i == 6 + j*(14+(3*j)) or i == 10 + j*(16+(3*j)):
skip = True
break
if not(skip):
base = BaseFixture(x=radius * math.cos(angle), z=radius*math.sin(angle))
bases.append (base)
numbases = numbases-1
angle = angle + math.pi * 2 / ring_bases
radius = radius + radius_add
i = i +1
numpoints = NUM_BASES * POINTS_PER_BASE
universe = 0
start = 0
BASES_PER_UNIVERSE = int(512 / (POINTS_PER_BASE * 3)) # 3 channels per point = RGB
outputs = []
while numpoints > 0:
points = min(numpoints,BASES_PER_UNIVERSE * POINTS_PER_BASE)
output = Protocol(IP, universe, start, points)
universe = universe + 1
numpoints = numpoints - points
start = start + points
outputs.append (output)
spiral = Fixture(label="Light Spiral", children=bases, outputs=outputs)
json_data = json.dumps(spiral, default=lambda o:o.__dict__, indent=4)
print(json_data)
|
#Saumit Madireddy
#I pledge my Honor that I have abided by the Stevens Honor System
#I understand that I may access the course textbook and course lecture notes but
#I am not to access any other resource. I also pledge that I worked alone on this exam.
def main():
menu()
def menu():
choice = input("""
For Mathematical Functions, Please Enter the Number 1
For String Operations, Please Enter the Number 2
""")
if choice == "1":
math()
elif choice == "2":
english()
else:
print("You must only select either 1 or 2")
print("Please try again")
menu()
def math():
choice = input("""
For Addition, Please Enter the Number 1
For Subtraction, Please Enter the Number 2
For Multiplication, Please Enter the Number 3
For Division, Please Enter the Number 4
""")
if choice == "1":
x = eval(input("Enter Number 1: "))
y = eval(input("Enter Number 2: "))
theSum = x + y
print(theSum)
elif choice == "2":
x = eval(input("Enter Number 1: "))
y = eval(input("Enter Number 2: "))
sub = x - y
print(sub)
elif choice == "3":
x = eval(input("Enter Number 1: "))
y = eval(input("Enter Number 2: "))
mult = x * y
print(mult)
elif choice == "4":
x = eval(input("Enter Number 1: "))
y = eval(input("Enter Number 2: "))
div = x / y
print(div)
else:
print("You must only select either 1,2,3, or 4")
print("Please try again")
math()
def english():
choice = input("""
To Determine the Number of Vowels in a String; Enter the Number 1
To Encrypt a String; Enter the Number 2
""")
if choice == "1":
string1 = input("Enter a Word: ")
count = 0
vowel = set("aeiouAEIOU")
for alphabet in string1:
if alphabet in vowel:
count = count + 1
print("No. of vowels :", count)
elif choice == "2":
string1 = input("Enter a Word: ")
print("\nHere is the encrypted Word:")
for i in string1:
x = ord(i)
print(" ", x + 5, end = "")
else:
print("You must only select either 1 or 2")
print("Please try again")
english()
main()
|
# Generated by Django 3.2 on 2021-05-28 15:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Case',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('First_Name', models.CharField(max_length=45)),
('Last_Name', models.CharField(max_length=45)),
('CaseType', models.CharField(max_length=45)),
('Address', models.CharField(max_length=45)),
('phone_no', models.CharField(max_length=45)),
('Status', models.CharField(max_length=45)),
('Age', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Customer_Name', models.CharField(max_length=255)),
('Writte_Comment', models.TextField()),
],
),
migrations.CreateModel(
name='Employee',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Emp_Id', models.CharField(max_length=45)),
('First_Name', models.CharField(max_length=45)),
('Last_Name', models.CharField(max_length=45)),
('Address', models.CharField(max_length=45)),
('phone_no', models.CharField(max_length=45)),
('Sallary', models.CharField(max_length=45)),
('Status', models.CharField(max_length=45)),
('Age', models.CharField(max_length=45)),
('qr_code', models.ImageField(blank=True, upload_to='qr_codes')),
],
),
migrations.CreateModel(
name='Judge',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Judge_id', models.CharField(max_length=45)),
('Employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.employee')),
],
),
migrations.CreateModel(
name='LowOfficer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('officer_id', models.CharField(max_length=45)),
('Employee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.employee')),
],
),
migrations.CreateModel(
name='Prisoner_Registration',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('age', models.IntegerField()),
('crime', models.CharField(max_length=30)),
('address', models.CharField(max_length=30)),
('room_number', models.IntegerField()),
('Punishment', models.CharField(max_length=30)),
('sex', models.CharField(max_length=30)),
('profile_pic', models.ImageField(blank=True, default='profile1.png', null=True, upload_to='')),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.CreateModel(
name='UserAccount',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=45)),
('Password', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='Summon',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Summon_body', models.TextField()),
('Judge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.judge')),
('LowOfficer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.lowofficer')),
],
),
migrations.CreateModel(
name='Shedule',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateTimeField(auto_now=True)),
('Court_Room', models.IntegerField()),
('body', models.TextField()),
('Case', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.case')),
('Judge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.judge')),
('LowOfficer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.lowofficer')),
],
),
migrations.CreateModel(
name='Manager',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('First_Name', models.CharField(max_length=45)),
('Last_Name', models.CharField(max_length=45)),
('Address', models.CharField(max_length=45)),
('phone_no', models.CharField(max_length=45)),
('UserAccount', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.useraccount')),
],
),
migrations.AddField(
model_name='employee',
name='UserAccount',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.useraccount'),
),
migrations.CreateModel(
name='Decision',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Writte_Dession', models.TextField()),
('Date', models.DateTimeField(auto_now=True)),
('Case', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.case')),
('Judge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.judge')),
],
),
migrations.AddField(
model_name='case',
name='Judge',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.judge'),
),
migrations.AddField(
model_name='case',
name='LowOfficer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courtmanagement.lowofficer'),
),
]
|
n=input()
listn=[int(x) for x in raw_input().split(" ")]
m=input()
list_label=[int(x) for x in raw_input().split(" ")]
list1=[0]
for i in listn:
list1.append(i+list1[-1])
list1.remove(0)
def send(N,list2,ind):
a=len(list2)
if a==1:
if N<=list2[0]:
print ind
else:
print ind+1
elif N<list2[a/2]:
send(N,list2[0:a/2],ind/2)
else:
send(N,list2[a/2:a+1],ind)
for j in list_label:
send(j,list1,n/2)
|
import io
from io import UnsupportedOperation
class S3StreamObj(io.RawIOBase):
# https://alexwlchan.net/2019/02/working-with-large-s3-objects/
def __init__(self, s3file,prefix=""):
self.s3_object = s3file
self.position = 0
self.size = self.s3_object.content_length
self.totalbytes = 0
self.eventGets = 0
self.prefix = prefix
def tell(self):
# https://docs.python.org/3/library/io.html#io.IOBase.seek
return self.position
def seekable(self):
arg = self
return True
def seek(self, offset, whence=io.SEEK_SET):
# https://python-reference.readthedocs.io/en/latest/docs/file/seek.html
# https://docs.python.org/3/library/io.html#io.IOBase.seek
if whence == io.SEEK_SET:
self.position = offset
elif whence == io.SEEK_CUR:
self.position += offset
elif whence == io.SEEK_END:
self.position = self.size + offset
else:
raise UnsupportedOperation("UnSupported Operation (%r) %s" % (whence, "https://docs.python.org/3.8/library/os.html#os.SEEK_SET"))
return self.position
def setBytesHeader(self,start,end=''):
return "bytes={}-{}".format(start,end)
def read(self, size=-1):
# https://docs.python.org/3/library/io.html#io.IOBase.seek
if size == -1:
bytesHeader = self.setBytesHeader(self.position)
self.totalbytes += (self.size - self.position)
self.seek(offset=0, whence=io.SEEK_END)
else:
new_position = self.position + size
# If we're going to read beyond the end of the object, return
# the entire object.
if new_position >= self.size:
return self.read()
# range_header = "bytes=%d-%d" % (self.position, new_position - 1)
# minus 1 since byte positions are inclusive
bytesHeader = self.setBytesHeader(self.position,new_position - 1)
# self.totalbytes += (new_position - 1 - self.position)
self.seek(offset=size, whence=io.SEEK_CUR)
self.eventGets += 1
event = self.s3_object.get(Range=bytesHeader)
self.totalbytes += event["ContentLength"]
return event["Body"].read()
def readinto(self,b):
# https://github.com/python/cpython/blob/6fdfcec5b11f44f27aae3d53ddeb004150ae1f61/Modules/_io/bytesio.c#L564
# Ignore WRITE as only READ required
return ""
def readall(self):
return self.s3_object.get()["Body"].read()
def readinto(b):
# https://docs.python.org/3/library/io.html#io.RawIOBase
# Ignoring WRITE as only READ required
return ""
def write():
# https://docs.python.org/3/library/io.html#io.RawIOBase
# Ignoring WRITE as only READ required
return ""
|
import _BNode
import bisect
class _BPlusLeaf(_BNode):
__slots__ = ["tree", "contents", "data", "next"]
def __init__(self, tree, contents=None, data=None, next=None):
self.tree = tree
self.contents = contents or []
self.data = data or []
self.next = next
assert len(self.contents) == len(self.data), "one data per key"
def insert(self, index, key, data, ancestors):
self.contents.insert(index, key)
self.data.insert(index, data)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def lateral(self, parent, parent_index, dest, dest_index):
if parent_index > dest_index:
dest.contents.append(self.contents.pop(0))
dest.data.append(self.data.pop(0))
parent.contents[dest_index] = self.contents[0]
else:
dest.contents.insert(0, self.contents.pop())
dest.data.insert(0, self.data.pop())
parent.contents[parent_index] = dest.contents[0]
def split(self):
center = len(self.contents) // 2
# median = self.contents[center - 1]
sibling = type(self)(
self.tree,
self.contents[center:],
self.data[center:],
self.next)
self.contents = self.contents[:center]
self.data = self.data[:center]
self.next = sibling
return sibling, sibling.contents[0]
def remove(self, index, ancestors):
minimum = self.tree.order // 2
if index >= len(self.contents):
self, index = self.next, 0
key = self.contents[index]
# if any leaf that could accept the key can do so
# without any rebalancing necessary, then go that route
current = self
while current is not None and current.contents[0] == key:
if len(current.contents) > minimum:
if current.contents[0] == key:
index = 0
else:
index = bisect.bisect_left(current.contents, key)
current.contents.pop(index)
current.data.pop(index)
return
current = current.next
self.grow(ancestors)
def grow(self, ancestors):
minimum = self.tree.order // 2
parent, parent_index = ancestors.pop()
left_sib = right_sib = None
# try borrowing from a neighbor - try right first
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) > minimum:
right_sib.lateral(parent, parent_index + 1, self, parent_index)
return
# fallback to left
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) > minimum:
left_sib.lateral(parent, parent_index - 1, self, parent_index)
return
# join with a neighbor - try left first
if left_sib:
left_sib.contents.extend(self.contents)
left_sib.data.extend(self.data)
parent.remove(parent_index - 1, ancestors)
return
# fallback to right
self.contents.extend(right_sib.contents)
self.data.extend(right_sib.data)
parent.remove(parent_index, ancestors)
|
# -*- coding: utf-8 -*-
import os
from itertools import imap
from operator import itemgetter
from avl_tree import AVLTree
from helpers import (treatment_add_del, find_polygon, l, calc_Y, get_row_dict,
update_dict_vals)
ALL_XS = [[-181, AVLTree()], ]
# runs throughout ALL_XS
# stopped = 0
# def get_coordinates():
# """
# из файла читаем строки
# """
# file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'simple_coords2')
#
# with open(file_path) as f:
# for line in f.readlines():
# if not line.startswith('#'):
# pol_id, other_id, line_co = line.split(' ', 2)
# icoords = imap(lambda x: x.split(), line_co.split(','))
# coord_processing(pol_id, icoords)
# def process_tree_nodes(nodes, x_middle, n_x):
# """
# Определяем val ноды, определяем ноды на удаление,
# сортируем по val
# """
# # global deletions
# to_delete = []
# # если х2 совпадает с n_x, то на удаление
# for n in nodes:
# n['val'] = calc_Y(x_middle, n['a'], n['b'])
#
# if n['x2'] == n_x:
# to_delete.append({
# 'val': n['val'], 'y2': n['y2'], 'pol_id': n['pol_id'], })
#
# return sorted(to_delete, key=itemgetter('val')), sorted(nodes, key=itemgetter('val'))
# списки на удаление, элемент состоит из val и y2
# будем группировать по y2
deletions = []
def process_tree(row, main_file, err_del_nodes_old, del_nodes_old, is_holl):
# находим ноды для нового дерева
row_float = float(row['x1'])
all_x2 = [float(row['x2']), ]
print 'STAAAAAAAAAAAAAAAAAAAAAAAAAAAAAART'
print 'is_holl', is_holl
try:
next_line = main_file.next()
next_row = get_row_dict(next_line)
print 'next_row', next_row
next_float = float(next_row['x1'])
except StopIteration:
return None, 1, 1, 1
add_nodes = [row, ]
while next_float == row_float:
add_nodes.append(next_row)
all_x2.append(float(next_row['x2']))
try:
next_line = main_file.next()
next_row = get_row_dict(next_line)
next_float = float(next_row['x1'])
except StopIteration:
next_float = 181
break
min_x2_float = min(all_x2)
max_x2_float = max(all_x2)
x_middle = (min_x2_float + row_float) / 2
# print 'all_x2', all_x2
# print 'x_middle', x_middle, 'prev_row', row_float, 'next_row', next_float
prev_tree = ALL_XS[-1][1]
next_tree = AVLTree()
# l()
# print 'prev_tree'
# prev_tree.show()
# l()
print 'err_del_nodes_old', err_del_nodes_old
l()
print 'prev_tree'
prev_tree.show()
# l()
print 'next_tree'
next_tree.show()
# актуализируем все значения новых нодов для добавления
update_dict_vals(add_nodes, x_middle)
# если предыдущее дерево пусто или между полигонами дыра
if is_holl or prev_tree.root.val is None:
for a in add_nodes:
# print 'add', float(a['val'])
next_tree.add(next_tree.root, float(a['val']), a['a'], a['b'], a['pol_id'], a['x2'], a['y2'])
else:
# удаляем ноды битые, у которых х2 меньше, чем следующий х1
for err_d in err_del_nodes_old:
print 'delete err_del_node', float(err_d['val'])
next_tree.delete_versionly(prev_tree, float(err_d['val'])) # 45.7427962225
print 'del_nodes_old', del_nodes_old
# обработка нодов на замену/добавление/удаление
to_replace, proc_del, proc_add = treatment_add_del(del_nodes_old, add_nodes)
print 'proc_del', proc_del
for d in proc_del:
next_tree.delete_versionly(prev_tree, float(d['val']))
print 'to_replace', to_replace
for (d, a) in to_replace:
next_tree.replace_versionly(prev_tree, float(d['val']), a)
# актуализируем все значения нодов в дереве
next_tree.update_vals(x_middle)
# актуализируем все значения новых нодов для добавления
# update_dict_vals(proc_add, x_middle)
print 'proc_add', proc_add
# предыдущее дерево непусто, но новое дерево пусто,
# и если мы удаляли уже, то без версионности
if next_tree.root.val is None and (del_nodes_old or err_del_nodes_old):
l()
print 'next_tree.root.val is None and (del_nodes_old or err_del_nodes_old)!!!'
for a in proc_add:
# print 'add', a['val']
next_tree.add(next_tree.root, float(a['val']), a['a'], a['b'], a['pol_id'], a['x2'], a['y2'])
# новое просто пусто(т.к. ничего не удадяли) или непусто, то версионность
else:
l()
print 'next_tree.root.val is None or is not empty!!!'
for a in proc_add:
# print 'add_versionly', a['val']
next_tree.add_versionly(prev_tree, a)
# обнуляем версионность дерева next_tree
next_tree.remove_update_flags(next_tree.root)
ALL_XS.append([float(row_float), next_tree])
err_del_nodes = [node for node in add_nodes if float(node['x2']) < next_float]
print 'err_del_nodes', err_del_nodes
# if err_del_nodes:
# print 'Err_del_nodes exists', row_float, err_del_nodes
del_nodes = [node for node in add_nodes if float(node['x2']) == next_float]
print 'next_tree'
next_tree.show()
print 'EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEND'
print 'max_x2_float', max_x2_float, 'next_float', next_float
l()
return next_row, err_del_nodes, del_nodes, max_x2_float < next_float
# # обновляем все значения в нодах
# next_tree.update_vals(next_tree.root, x_middle)
# # обнуляем флаги updates
# next_tree.remove_update_flags(next_tree.root)
#
# # процесс перестраивания дерева
# ref_to_tree = next_tree
# # next_tree.show()
#
# ALL_XS[-1][1] = ref_to_tree
# # следующее значение Х
# ALL_XS.append([n_x, None])
#
# return n_x if not is_end else None
if __name__ == "__main__":
file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'outer_sort', 'cut')
with open(file_path) as main_file:
line = main_file.next()
row = get_row_dict(line)
new_row, err_del_nodes, del_nodes, is_holl = process_tree(
row, main_file, [], [], False)
while new_row is not None:
new_row, err_del_nodes, del_nodes, is_holl = process_tree(
new_row, main_file, err_del_nodes, del_nodes, is_holl)
for (x, tree) in ALL_XS:
l()
print x
print '\n'
tree.show()
print len(ALL_XS)
print ALL_XS
# pol_id = find_polygon(second_tree.root, 1.5, 1.4)
# avl = AVLTree([3,2,4,1,5])
# avl.show()
# l()
# avl2 = AVLTree()
# avl2.replace_versionly(avl, 1, {'a': 'a1', 'b': 'b1', 'pid': 'pid'})
# avl2.show()
# print avl2.root.left.left.pids
# avl.add(avl.root, 1)
# avl.show()
# l()
# avl = AVLTree()
# left rotate
# x = [1,2,] # 3
# x = [5,1,7,6,8,] # add 9 (avl2.ADD_versionly(avl, 9))
# x = [2,1,3,4,] # 5
# x = [3,2,5,4,6,] # 7
# x = [4,2,6,1,3,5,8,7,9] # 10
# x = [2,1,3,4,] # 5
# x = [2,1,5,3,6] # 4
# x = [4,2,8,1,3,6,10,5,7,9,11,] # 12
# big left rotate
# x = [1,3] # 2
# x = [2,1,3,5,] # 4
# x = [2,1,6,4,7] # 5
# x = [4,1,5,3] # 2
# x = [3,1,7,2,4,9,3.5,5,8,10,] # 6
# x = [3,1,7,2,4,9,3.5,5,8,10,] # 3.7
# right rotate
# x = [3,2] # 1
# x = [5,3,6,2,4,] # 1
# x = [6,4,7,3,5,8,2,] # 1
# x = [-4,-2,-6,-1,-3,-5,-8,-7,-9] # -10
# x = [3,1,7,2,4,9,3.5,5,] # 3.7
# big right rotate
# x = [3,1] # 2
# x = [2,1,5,3,] # 4
# x = [3,1,7,2,4,9,3.5,5,] # 6
# get_node
# x = [2,1,3]
# x = [1,]
# x = []
# delete versionly
# left rotate
# x = [5,1,7,6,8,] # del 1
# x = [2,1,4,3,5] # del 1
# x = [3,2,5,1,4,7,6,8] # del 4
# big left rotate
# x = [2,1,4,3] # del 1
# right rotate
# x = [3,2,4,1] # 4
# big right rotate
# x = [3,1,4,2] # 4
# x = [6,4,7,2,5,8,3] # 5
# x = [-3,-1,-7,-2,-5,-9,-4,-6] # -2
# state of node to delete
# has l, no r
# x = [2,1,4,3] # 4
# x = [3,2,4,1] # 1
# has r, no l
# x = [3,1,4,2] # 1
# x = [5,3,10,2,4,8,11,1,6,9,12,6,7] # 4
# x = [5,2,10,1,4,8,11,3,6,9,12,6,7] # 1
# has r, has l
# x = [2,1,3] # 2
# x = [4,2,5,1,3,6] # 2
# for i in x:
# avl.add(avl.root, i)
# l()
# avl.show()
#
# avl2 = AVLTree()
#
# avl2.delete_versionly(avl, 2)
# avl2.add_versionly(avl, 12)
# print avl2.get_node_versionly(avl, 1)
# l()
# avl.show()
# l()
# avl2.show()
# import os
# print os.path.dirname(__file__)
# print dir(__file__)
|
inp=input("enter list items:")
etc=inp.split()
for values in etc:
for i in range(int(values)):
print('*',end="")
print("\n")
''' other way
inp=input("enter list items:")
etc=inp.split()
x=len(etc)
y=int(x)
for values in etc:
y=int(values)
print("\n")
while(y>0):
print("*",end="")
y=y-1
'''
|
class NodoPila :
def __init__(self,valor ,anterior=None):
self.valor = valor
self.anterior = anterior
def getValor(self):
return self.valor
def getAnterior(self):
return self.anterior
class Pila :
def __init__(self):
self.size = 0
self.nodo = None
def getSize(self):
return self.size
def push(self,valor):
nuevo = NodoPila(valor, self.nodo)
self.nodo = nuevo
self.size = self.size + 1
def top(self):
if(nodo != None):
return nodo.getValor()
else:
return None
def pop(self):
if(self.nodo != None):
retu = self.nodo.getValor()
self.nodo = self.nodo.getAnterior()
self.size = self.size -1
return retu
else:
return None
objeto = NodoPila(12)
#print(objeto.getValor())
pila = Pila()
print("se ingresan 12 valores")
for a in range(0,12):
pila.push(a)
print(pila.getSize())
print("se sacan 12 valores")
for a in range(0,12):
print(pila.pop())
print(pila.getSize())
|
import utility
from flask_jwt_extended import jwt_required
from flask import request
from flask_restful import Resource
from connection import db_session, commit
from models.sys_rmodul import SysRmodul
class GetRmodul(Resource):
@staticmethod
@jwt_required
def get():
try:
with db_session:
data = SysRmodul.select()
result = [row.to_dict() for row in data]
return utility.give_response("00", "GET ROLE MODUL SUKSES", result)
except Exception as e:
return utility.give_response("01", str(e))
class InsertRModul(Resource):
@staticmethod
@jwt_required
def post():
try:
with db_session:
sysrole_kode = request.form["sysrole_kode"]
sysmodul_kode = request.form["sysmodul_kode"]
c = SysRmodul(sysrole_kode=sysrole_kode, sysmodul_kode=sysmodul_kode)
commit()
return utility.give_response("00", "INSERT ROLE MODUL SUKSES", c.to_dict())
except Exception as e:
return utility.give_response("01", str(e))
class DeleteRModul(Resource):
@staticmethod
@jwt_required
def post():
try:
with db_session:
sysrole_kode = request.form["sysrole_kode"]
sysmodul_kode = request.form["sysmodul_kode"]
SysRmodul[sysrole_kode, sysmodul_kode].delete()
return utility.give_response("00", "DELETE ROLE MODUL SUKSES")
except Exception as e:
return utility.give_response("01", str(e))
|
class Array:
def __init__(self, length):
self.__items = [0] * length
def print(self):
for i in self.__items:
print(i)
array = Array(10)
array.print()
|
import sys, os, logging, __main__, string
import json
import inspect
from ConfigParser import SafeConfigParser
from StringIO import StringIO
import psycopg2
import psycopg2.pool
import psycopg2.extras
class AttrDict(dict):
"""
Extended dict with items accessible as attributes
"""
def __getattr__(self, name):
return self.get(name)
def __setattr__(self, name, value):
self[name] = value
def getint(self, key, default=0):
return int(self.get(key, default))
def getstr(self, key, default=''):
return str(self.get(key, default))
def getfloat(self, key, default=0.0):
return float(self.get(key, default))
def getlist(self, key, default=None, itemtype=None):
value = self.get(key, default or [])
if isinstance(value, tuple):
value = list(value)
elif isinstance(value, str):
value = map(string.strip, value.split(','))
elif not isinstance(value, list):
value = [value]
if itemtype:
value = map(itemtype, value)
return value
def script_dir():
"""
Current script dir
"""
try:
script = __main__.__file__
return os.path.dirname(os.path.abspath(script))
except AttributeError:
return os.getcwd()
def read_config(section, config_file=None, defaults=None):
"""
Reads config file and returns configuration.
config file name defaults to config.ini when not given as program arg
"""
if not config_file:
if len(sys.argv) > 1:
config_file = sys.argv[1]
else:
path = script_dir()
config_file = os.path.join(path, "config.ini")
if not os.path.isfile(config_file):
raise Exception("File %s not found" % config_file)
cp = SafeConfigParser()
cp.read(config_file)
config = AttrDict(defaults or {})
if cp.has_section(section):
config.update(cp.items(section))
return config
def config_logging(config):
"""
Configures logging based on config data
"""
params = {}
if 'log_file' in config:
params['filename'] = config['log_file']
else:
params['stream'] = sys.stdout
config.setdefault('log_level', 'info')
params['level'] = getattr(logging, config['log_level'].upper())
if 'log_format' in config:
params['format'] = config['log_format']
# remove existing handlers
handlers = logging.root.handlers
while handlers:
handlers.pop()
# do config
logging.basicConfig(**params)
return logging
def db_wrapper(dsn, keyword='db', autocommit=True):
def wrapper(func):
args = inspect.getargspec(func)[0]
if keyword not in args:
return func
def wrapped(*p, **kw):
conn = psycopg2.connect(dsn, cursor_factory=psycopg2.extras.RealDictCursor)
kw[keyword] = conn.cursor()
try:
result = func(*p, **kw)
if autocommit:
conn.commit()
return result
except:
conn.rollback()
raise
finally:
conn.close()
return wrapped
return wrapper
def pool_wrapper(pool, keyword='db', autocommit=True):
def wrapper(func):
args = inspect.getargspec(func)[0]
if keyword not in args:
return func
def wrapped(*p, **kw):
conn = pool.getconn()
kw[keyword] = conn.cursor()
try:
result = func(*p, **kw)
if autocommit:
conn.commit()
return result
except:
conn.rollback()
raise
finally:
pool.putconn(conn)
return wrapped
return wrapper
class DB:
"""
Database connection helper
"""
def __init__(self, dsn, **kwargs):
kwargs.setdefault('cursor_factory', psycopg2.extras.RealDictCursor)
self.dsn = dsn
self.kwargs = kwargs
self._con = None
@property
def con(self):
"""
lazy connect
"""
if not self._con:
self._con = psycopg2.connect(self.dsn, **self.kwargs)
return self._con
@classmethod
def from_config(cls, config):
return cls(config.db)
def __getattr__(self, name):
return getattr(self.con, name)
def load_data(self, data, table, columns=None, clean=False, transactional=False):
try:
if columns:
# get column data from dict
data = ([row[col] for col in columns] if isinstance(row, dict) else row
for row in data)
# join columns
data = (row if isinstance(row, str) else '\t'.join(row) for row in data)
# join lines and make StringIO object
f = StringIO('\n'.join(data))
if clean:
self.truncate(table)
cur = self.cursor()
cur.copy_from(f, table, columns=columns)
if transactional:
self.commit()
except:
if transactional:
self.rollback()
raise
copy_from = load_data
def truncate(self, table):
return self.execute('truncate table %s' % table)
def query(self, sql, params=()):
cur = self.con.cursor()
cur.execute(sql, params)
return cur.fetchall()
select = query
def execute(self, sql, params=()):
cur = self.con.cursor()
cur.execute(sql, params)
return cur
def queryproc(self, proc, params=()):
cur = self.con.cursor()
cur.callproc(proc, params)
return cur.fetchall()
callproc = queryproc
def execproc(self, proc, params=()):
cur = self.con.cursor()
cur.callproc(proc, params)
return cur
class Query:
"""
SQL query builder
Builds sql from given parts
"""
def __init__(self, db, sql=None, params=None):
"""
db - database cursor object
sql - initial sql part
params - initial params value
"""
self.db = db
self.sql = []
# result row map. when col name is in map, value is replaced with result
# of mapping function
self.map = {}
# query parameters
self.params = AttrDict(params or {})
self.add(sql, sql)
def add(self, sql, cond=True, params=None):
"""
Adds SQL part if condition is True
"""
if cond:
self.sql.append(sql)
params and self.params.update(params)
return self
def get_sql(self):
"""
Returns SQL string
"""
return "\n".join(self.sql)
def execute(self, **kw):
"""
Execute SQL with self.params + call params and map result values
"""
params = {}
params.update(self.params)
params.update(kw)
self.db.execute(self.get_sql(), params)
result = self.db.fetchall()
map(
lambda r: r.update((k, f(r[k])) for k,f in self.map.items()),
result
)
return result
__call__ = execute
def first(self, **kw):
result = self.execute(**kw)
if result:
return result[0]
else:
return None
# geometry object conversions
def geojson_to_arr_coord(geojson):
"""
converts geojson to array of coordinates
"""
geojson = json.loads(geojson)
coords = geojson['coordinates']
if geojson['type'] == 'Polygon':
coords = coords[0]
return coords
def arr_coord_to_area(arr_coord):
"""
converts coordinate array to json containing {sw:..., ne:...}
"""
# convert coordinates to string
join = ','.join
coords = map(join, [map(str, coord) for coord in arr_coord])
return json.dumps({
'sw': coords[0],
'ne': coords[2]
})
def geojson_to_area(geojson):
return arr_coord_to_area(geojson_to_arr_coord(geojson))
def geojson_to_lng_lat_dict(geojson):
coords = map(str, geojson_to_arr_coord(geojson))
return {
"lng": coords[0],
"lat": coords[1],
"point": ','.join(coords),
}
def geojson_to_point(geojson):
return ','.join(map(str, geojson_to_arr_coord(geojson)))
|
#!/usr/bin/env python3
if __name__ == '__main__':
n = int(input())
arr = map(int, input().split())
maxvalue = runnerup = -100
for value in arr:
if value > maxvalue:
runnerup = maxvalue
maxvalue = value
elif value > runnerup and value < maxvalue:
runnerup = value
print(runnerup)
|
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='spotifyscraper',
version='1.0.0',
description='A sample Python project',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/pypa/sampleproject',
author='The Python Packaging Authority',
packages=["SpotifyScraper"],
author_email='pypa-dev@googlegroups.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='spotify spotifydownloader downloader mp3downloader webscraper spotifyscraper music cover setuptools development',
python_requires='>=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
install_requires=['appdirs',
'beautifulsoup4',
'bs4',
'certifi',
'chardet',
'cssselect',
'deprecation',
'eyeD3',
'fake-useragent',
'filetype',
'idna',
'lxml',
'packaging',
'parse',
'pyee',
'pyparsing',
'pyppeteer',
'pyquery',
'PyYAML',
'requests',
'six',
'soupsieve',
'tqdm',
'urllib3',
'w3lib',
'websockets',
], # Optional
project_urls={ # Optional
'Bug Reports': 'https://github.com/pypa/sampleproject/issues',
'Funding': 'https://donate.pypi.org',
'Say Thanks!': 'http://saythanks.io/to/example',
'Source': 'https://github.com/pypa/sampleproject/',
},
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import zmq
import time
def main():
context = zmq.Context()
xsub_socket = context.socket(zmq.SUB)
xsub_socket.connect('tcp://localhost:6001')
xsub_socket.setsockopt(zmq.SUBSCRIBE, b'')
n = 0
ot = time.time()
ln = 0
while True:
#print('listen..')
try:
data = xsub_socket.recv()
except KeyboardInterrupt:
print('KeyboardInterrupt - quit')
break
n += 1
if n % 1000 == 0:
t = time.time()
if t - ot > 1:
print("%s | %.2f | %d | %.1fm/s" % (data.decode(), t - ot, n - ln, (n - ln) / (t - ot) ))
ot = t
ln = n
if data == b'quit':
break
if __name__ == '__main__':
main()
|
6# -*- coding: utf-8 -*-
"""
Spyder Editor
Dies ist eine temporäre Skriptdatei.
"""
import matplotlib.pyplot as plt
import numpy as np
img = plt.imread("test.png")
fig, axs = plt.subplots(4,2, figsize=(15,15))
### a
rgb_weights = [1.0, 1.0, 1.0]
coloredImage = np.dot(img[...,:3], rgb_weights)
axs[0,0].imshow(img, origin="upper")
axs[0,0].title('Original')
rgb_weights = [1.0, 0.0, 0.0]
redImage = np.dot(img[...,:3], rgb_weights)
axs[0,1].imshow(redImage, origin="upper", cmap = "Reds")
plt.title('Red')
rgb_weights = [0.0, 1.0, 0.0]
greenImage = np.dot(img[...,:3], rgb_weights)
axs[1,0].imshow(greenImage, origin="upper", cmap = "Greens")
plt.title('Green')
rgb_weights = [0.0, 0.0, 1.0]
blueImage = np.dot(img[...,:3], rgb_weights)
axs[1,1].imshow(blueImage, origin="upper", cmap = "Blues")
plt.title('Blue')
### b
cyanImage = np.full((1024,1024), np.subtract(redImage, coloredImage))
axs[2,0].imshow(cyanImage, origin="upper", cmap = "gray")
plt.title('Cyan')
magentaImage = np.full((1024,1024), np.subtract(blueImage, coloredImage))
axs[2,1].imshow(magentaImage, origin="upper", cmap = "gray")
plt.title('Magenta')
yellowImage = np.full((1024,1024), np.subtract(greenImage, coloredImage))
axs[3,0].imshow(yellowImage, origin="upper", cmap = "gray")
plt.title('Yellow')
plt.show()
#plt.imshow(grayscale_image, cmap = plt.get_cmap("gray"))
|
nome = input('Digite seu nome: ')
print('É um prazer te , {}'.format(nome))
|
import logging
from constants import CONF, Confirmation, FUNCTION, ID, Slot
from dialog_action import ActionType, DialogAction
class DialogPolicy:
"""Policy for the dialog agent.
Attributes:
config (`configs.DialogConfiguration`): Dialog configuration that
parametrize dialog policy.
Args:
config (`configs.DialogConfiguration`): Dialog configuration that
parametrize dialog policy.
"""
def __init__(self, config):
self.config = config
def next_action(self, state, prev_action, parse):
"""Computes the next action for the dialog agent.
The calculation is done according to the policy based on current
state, previous action, and the parse of the most recent user-utterance.
Args:
state (`dialog_state.DialogState`): Current state of dialog agent.
prev_action (`dialog_action.DialogAction`): The previous
system-action.
parse (dict): A dictionary of slot-value pairs representing the
parse of the current user-utterance.
Returns:
`dialog_action.DialogAction`: The next system-action.
"""
if prev_action is None:
return self._greet_action()
elif prev_action.type is ActionType.greet:
return self._next_action_after_greet(state, prev_action)
elif prev_action.type is ActionType.ask_slot:
return self._next_action_after_ask_slot(state, prev_action)
elif prev_action.type is ActionType.confirm:
return self._next_action_after_confirm(state, prev_action, parse)
elif prev_action.type is ActionType.inform:
return self._next_action_after_inform(state, prev_action, parse)
else:
logging.error("%s: Illegal action type %s.",
self.next_action.__name__, prev_action)
raise TypeError
def _next_action_after_greet(self, state, prev_action):
# If all four slots have very low confidences, ask the user to reword.
if self._is_rewording_required(state):
return self._reword_action(prev_action=prev_action)
# Determine which of the four slots have confidences above the threshold
# `self.config.alpha` to not require confirmation. Such slots will be
# not be explicitly confirmed due to parser's high confidence in their
# values. Their values are fixed.
# The slots whose confidences lie between `self.config.alpha` and
# `self.config.beta` are the ones whose value the parser is not very
# sure of. Their values are explicitly confirmed before being fixed, or
# before asking for values of other slots with even lower confidences.
fixed_predicates, fixed_values = [], []
# Trigger has preference over Action
trigger_channel = state.trigger
if trigger_channel[CONF] < self.config.beta:
# Confidence is too low. The system should ask the user explicitly
# for the slot value.
return self._ask_slot_action(predicate=Slot.trigger_channel)
elif trigger_channel[CONF] < self.config.alpha:
# Confidence is low enough to merit a confirmation, but not too low
# to completely ignore the value and ask for slot again.
return self._confirm_action(predicate=Slot.trigger_channel,
value=trigger_channel[ID])
else:
# Confidence is high enough to not even require a confirmation.
fixed_predicates.append(Slot.trigger_channel)
fixed_values.append(trigger_channel[ID])
# If control reaches here, the Trigger Channel is filled confidently.
trigger_fn = trigger_channel[FUNCTION]
if trigger_fn[CONF] < self.config.beta:
# Confidence is too low. The system should ask the user explicitly
# for the slot value.
return self._ask_slot_action(predicate=Slot.trigger_fn)
elif trigger_fn[CONF] < self.config.alpha:
# Confidence is low enough to merit a confirmation, but not too low
# to completely ignore the value and ask for slot again.
return self._confirm_action(predicate=Slot.trigger_fn,
value=trigger_fn[ID])
else:
# Confidence is high enough to not even require a confirmation.
fixed_predicates.append(Slot.trigger_fn)
fixed_values.append(trigger_fn[ID])
# Both Trigger Channel and Trigger Function have sufficient confidence.
# Proceed to Action.
action_channel = state.action
if action_channel[CONF] < self.config.beta:
# Confidence is too low. The system should ask the user explicitly
# for the slot value.
return self._ask_slot_action(predicate=Slot.action_channel)
elif action_channel[CONF] < self.config.alpha:
# Confidence is low enough to merit a confirmation, but not too low
# to completely ignore the value and ask for slot again.
return self._confirm_action(predicate=Slot.action_channel,
value=action_channel[ID])
else:
# Confidence is high enough to not even require a confirmation.
fixed_predicates.append(Slot.action_channel)
fixed_values.append(action_channel[ID])
# If control reaches here, the Action Channel is filled confidently.
action_fn = action_channel[FUNCTION]
if action_fn[CONF] < self.config.beta:
# Confidence is too low. The system should ask the user explicitly
# for the slot value.
return self._ask_slot_action(predicate=Slot.action_fn)
elif action_fn[CONF] < self.config.alpha:
# Confidence is low enough to merit a confirmation, but not too low
# to completely ignore the value and ask for slot again.
return self._confirm_action(predicate=Slot.action_fn,
value=action_fn[ID])
else:
# Confidence is high enough to not even require a confirmation.
fixed_predicates.append(Slot.action_fn)
fixed_values.append(action_fn[ID])
# If control reaches here, all four slots have high confidences.
# The system should inform the user of its interpretation before moving
# ahead with other slots like Fields. This is not a confirmation action.
# If the user disagrees, the system will terminate the dialog.
return self._inform_action(predicate=fixed_predicates,
value=fixed_values)
def _next_action_after_ask_slot(self, state, prev_action):
# If the parsed value of slot requested by te system in the last turn
# has confidence above `self.config.alpha`, it does not need
# confirmation. We can proceed to request another slot. If the
# confidence is below `self.config.alpha` but above `self.config.beta`,
# then it requires an explicit confirmation. Otherwise, the parser is
# not at all confident about the parse, and the system should ask the
# user to reword.
def ask_slot_helper(state_component, slot):
if state_component[CONF] >= self.config.alpha:
return self._pick_next_system_action(state)
elif state_component[CONF] >= self.config.beta:
return self._confirm_action(predicate=slot,
value=state_component[ID])
else:
return self._reword_action(prev_action)
method_name = self._next_action_after_ask_slot.__name__
requested_slot = prev_action.predicate
if requested_slot is Slot.trigger_channel:
return ask_slot_helper(state.trigger, Slot.trigger_channel)
elif requested_slot is Slot.trigger_fn:
return ask_slot_helper(state.trigger[FUNCTION], Slot.trigger_fn)
elif requested_slot is Slot.action_channel:
return ask_slot_helper(state.action, Slot.action_channel)
elif requested_slot is Slot.action_fn:
return ask_slot_helper(state.action[FUNCTION], Slot.action_fn)
else:
logging.error("%s: Illegal slot type %s.", method_name,
requested_slot)
raise TypeError
def _next_action_after_confirm(self, state, prev_action, parse):
if parse[Slot.confirmation] is Confirmation.unknown:
return self._reword_action(prev_action=prev_action)
elif parse[Slot.confirmation] is Confirmation.no:
return self._ask_slot_action(predicate=prev_action.predicate)
elif parse[Slot.confirmation] is Confirmation.yes:
return self._pick_next_system_action(state)
else:
logging.error("%s: Illegal response type %s",
self._next_action_after_confirm.__name__,
parse[Slot.confirmation])
raise TypeError
def _next_action_after_inform(self, state, prev_action, parse):
if parse[Slot.confirmation] is Confirmation.unknown:
return self._reword_action(prev_action=prev_action)
elif parse[Slot.confirmation] is Confirmation.no:
# Dialog failed.
return self._close_action()
elif parse[Slot.confirmation] is Confirmation.yes:
return self._pick_next_system_action(state, prev_action)
else:
logging.error("%s: Illegal response type %s",
self._next_action_after_confirm.__name__,
parse[Slot.confirmation])
raise TypeError
def _pick_next_system_action(self, state, prev_action=None):
def action_for_slot(state_component, slot):
if self.config.beta <= state_component[CONF] < self.config.alpha:
return self._confirm_action(predicate=slot,
value=state_component[ID])
elif state_component[CONF] < self.config.beta:
return self._ask_slot_action(predicate=slot)
else:
return None
t_channel_action = action_for_slot(state.trigger, Slot.trigger_channel)
if t_channel_action is not None:
return t_channel_action
t_fn_action = action_for_slot(state.trigger[FUNCTION], Slot.trigger_fn)
if t_fn_action is not None:
return t_fn_action
a_channel_action = action_for_slot(state.action, Slot.action_channel)
if a_channel_action is not None:
return a_channel_action
a_fn_action = action_for_slot(state.action[FUNCTION], Slot.action_fn)
if a_fn_action is not None:
return a_fn_action
# If control reaches here, no slot needs an action. If the previous
# system-action was `inform`, close the dialog. Otherwise, inform the
# user of the systems' understanding of the recipe.
if prev_action is not None and prev_action.type is ActionType.inform:
return self._close_action()
else:
predicates = [Slot.trigger_channel, Slot.trigger_fn,
Slot.action_channel, Slot.action_fn]
values = [state.trigger[ID], state.trigger[FUNCTION][ID],
state.action[ID], state.action[FUNCTION][ID]]
return self._inform_action(predicate=predicates, value=values)
def _is_rewording_required(self, state):
# If all four slots have confidences below `self.config.beta`, ask for
# a rewording.
return (state.trigger[CONF] < self.config.beta and
state.trigger[FUNCTION][CONF] < self.config.beta and
state.action[CONF] < self.config.beta and
state.action[FUNCTION][CONF] < self.config.beta)
def _greet_action(self):
return DialogAction(action_type=ActionType.greet)
def _reword_action(self, prev_action):
return DialogAction(
action_type=ActionType.reword, prev_action=prev_action)
def _ask_slot_action(self, predicate):
return DialogAction(
action_type=ActionType.ask_slot, predicate=predicate)
def _confirm_action(self, predicate, value):
return DialogAction(
action_type=ActionType.confirm, predicate=predicate, value=value)
def _inform_action(self, predicate, value):
return DialogAction(
action_type=ActionType.inform, predicate=predicate, value=value)
def _close_action(self):
return DialogAction(action_type=ActionType.close)
|
from search_tags_service.services.v1.tags import get_tags_from_text
def test_same_tags_result(mg_tag_2, same_tag_words, one_word_tag):
assert get_tags_from_text(same_tag_words, one_word_tag, mg_tag_2) == {'tags': ['toyota']}
def test_combinations_indents(mg_tag_2, tags_combinations_indents, three_words_tag, three_words_tag_str):
for text in tags_combinations_indents:
assert get_tags_from_text(text, three_words_tag, mg_tag_2) == {'tags': three_words_tag_str}
def test_combinations_words(mg_tag_2, tags_combination_words):
text, tree_tags, result_tags = tags_combination_words
generated_tags = get_tags_from_text(text, tree_tags, mg_tag_2)['tags']
for tag in result_tags:
assert tag in generated_tags
|
def dot_product(vec1,vec2):
new=0
for i in range(len(vec1)):
new+=vec1[i]*vec2[i]
print(new)
dot_product([1, 1], [1, 1])
#== 2
dot_product([1, 2], [1, 4])
#== 9
dot_product([1, 2, 1], [1, 4, 3])
#== 12
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# File: osc4py3/oscpeeradapt.py
# <pep8 compliant>
"""Tools to manage high level communications between systems using OSC.
The high level packet management encoding/decoding (with support for
addrpattern compression/decompression, for checksum, authentication and
encryption) is already in oscbuildparse.
Low-level raw packets transport (read/write with devices or network) is
realized with objects of subclasses of :class:`OSCChannel`. We call these
objects readers and writers.
Readers identify OSC raw packets (datagram or stream based packet control),
source (who send the data) and store them in a received_rawpackets global
queue (defined in oscreadwrite module).
This allow readers to have different read policy (poll, thread, etc)
disconnected from our upper layer management of received data.
This module organize communications on the upper level, receiving and sending
packets from/to different OSC systems. It allows to identify systems (for
the supported communication options listed before) and have specific
processing for some messages (ex. authenticate packets, negociate addrpattern
compression by requesting string/int mapping, etc).
Finally, it call registered functions matching message address pattern.
"""
import time
import collections
import threading
from . import oscchannel
from . import oscbuildparse
from . import oscscheduling
from . import oscpacketoptions
# How many +- seconds around current time is being considered "current".
# Must be positive of zero.
NOW_RESOLUTION = 0.001 # 1 ms
# Bundles waiting their time to be dispatched, *sorted by ascending time tag*.
# A condition variable is used to insert a new bundle and reschedule
# the processing thread.
PendingBundle = collections.namedtuple("PendingBundle",
"when disp bundle packopt")
pending_bundles = []
pending_mutex = threading.Lock()
pending_condvar = threading.Condition(pending_mutex)
SendingPacket = collections.namedtuple("SendingPacket",
"packet packopt targets")
sending_packets = collections.deque()
sending_semaphore = threading.Semaphore(0)
# Parameters to use in delay_bundle(*LAST_PENDINGBUNDLE) to signalspecial
# post (generally to have a thread wake up and check its termination flag).
# Important: when is 0,not None, to have the "last" pending bundle
# inserted at beginning and processed immediatly (else we should wait until
# last pending bundle time, who know how long...).
# Tested on object identity.
LAST_PENDINGBUNDLE = PendingBundle(0, None, None, None)
# Same for sending packet thread.
# Tested on identity too.
LAST_SENDINGPACKET = SendingPacket(None, None, None)
# Reserved names unusable for channel names.
# Note that we reserve all names starting by '_'.
RESERVED_PEER_NAMES = {
"_all", # To address all peer and channels.
"_allpeers", # To address all peers (see inpeersgroup).
"_alladmin", # To address all admin peers (see inadmingroup).
"_allchannels", # To address all channels.
"_local", # To bypass peers/channels and dispatch locally.
}
# Thread to manage pending bundles if not managed by polling loop.
delayed_thread = None
delayed_terminate = False
# Thread to process raw packets if not managed by polling loop.
rawpackets_thread = None
rawpackets_terminate = False
# Thread to process sent packets if not managed by polling loop.
sendpackets_thread = None
sendpackets_terminate = False
# Reference to all created dispatchers by their name.
all_dispatchers = {}
# General generic dispatcher for all normal messages.
global_dispatcher = None
# Reference to all created peers by their name.
all_peers = {}
# Mapping from (readername, sourceidentifier) to PeerManager objects.
# Allow to have special processing of some incoming packets.
peer_packet_receivers = {}
# ========================= OSC PEERS COMMUNICATIONS =======================
class PeerManager(object):
"""Indentify another OSC system to support specific communications.
Subclasses can add extra processing when receiving or sending OSC packets,
to support things like address pattern compression, peer authentication,
data encryption, etc.
This can be also used as a way to group multiple channels with a
specific name usable in send_packet(), just add wanted writer names
to the peer with add_writer() and use the peer name as target.
:ivar peername: readable identifier for the peer
:type peername: str
:ivar admindispatch: dispatcher for management methods trigged on
messages. Or None if not used.
:type admindispatch: Dispatcher or None
:ivar adminmanager: peer manager to use for sending admin communications.
If no adminmanager is specified, then messages use normal writers
and procedures of the peer manager.
:type adminmanager: PeerManager
:ivar inadmingroup: flag to select the peer manager for _alladmin target.
Default to False.
:type inadmingroup: bool
:ivar inpeersgroup: flat to select the peer manager for _allpeers target.
Default to True.
:type inpeersgroup: bool
:ivar writersnames: set of names of transport channels to communicate
with the peer(s)
:type writersnames: { str }
:ivar special_transmit: flag to indicate that manager use special
encoding or transmission procedure when sending osc data
(ie. pattern compression, or encryption, etc... cannot use
a standard shared processus with other managers).
:type special_transmit: bool
:ivar addrpattern_compression: mapping of addrpattern to int for
addrpattern compression when sending messages. Default to {}.
:type addrpattern_compression: dict
:ivar addrpattern_decompression: mapping of int to addrpattern for
addrpattern decompression when reading messages. Default to {}.
:type addrpattern_decompression: dict
:ivar inpackopt: options for processing incoming packets,
:type inpackopt: dict
"""
def __init__(self, peername, options):
if peername.startswith('_'):
raise ValueError("OSC peer name {!r} beginning with _ "\
"reserved.".format(peername))
if peername in oscchannel.all_channels_peers:
raise ValueError("OSC channel/peer name {!r} already "\
"used.".format(peername))
self.peername = peername
self.admindispatch = None
self.adminmanager = None
self.inadmingroup = options.get("inadmingroup", False)
self.inpeersgroup = options.get("innormalgroup", True)
self.writersnames = set()
self.special_transmit = False
self.addrpattern_compression = {}
self.addrpattern_decompression = {}
self.logger = options.get("logger", None)
# Default processing options for packets coming to this manager.
self.inpackopt = oscpacketoptions.PacketOptions()
self.inpackopt.setup_processing(options)
all_peers[peername] = self
oscchannel.all_channels_peers.add(peername)
def terminate(self):
"""Properly terminate the peer.
"""
oscchannel.all_channels_peers.remove(self.peername)
del all_peers[self.peername]
def register_manager(self, readername, srcident):
"""Install a PeerManager in the incoming OSC data processing path.
.. note:: Only one manager per reader/source
You can install only one manager for a (readername, srcident) key,
especially, there is only one global manager for ("*", "*").
:param readername: specific reader interresting this manager, or
"*" for peer manager interrested by all readers.
:type readername: str
:param scrident: identification of packets source, or "*" for peer
manager interested by all packet sources.
:type srcident: hashable
"""
if (readername, srcident) in peer_packet_receivers:
raise ValueError("OSC PeerManager already "\
"registered for {}".format((readername, srcident)))
peer_packet_receivers[(readername, srcident)] = self
def unregister_manager(self, readername, srcident):
"""Remove a PeerManager from the incoming OSC data processing path.
:param disp: the peer manager to register
:type disp: PeerManager
:param readername: specific reader interresting this manager, or
"*" for peer manager interrested by all readers.
:type readername: str
:param scrident: identification of packets source, or "*" for peer
manager interested by all packet sources.
:type srcident: hashable
"""
if (readername, srcident) not in peer_packet_receivers:
raise ValueError("OSC no PeerManager "\
"registered for {}".format((readername, srcident)))
if peer_packet_receivers[(readername, srcident)] \
is not self:
raise ValueError("OSC different registered packet dispatcher "\
"for {}".format((readername, srcident)))
del peer_packet_receivers[(readername, srcident)]
@staticmethod
def get_manager(readername, srcident):
"""Get peer manager to process reader/source incoming packets.
The peer manager is searched for most specialized to generic,
matching for (readername, srcident), then for ('*',srcident'),
then for (readername,'*') and finally for ('*','*').
:param readername: reader name.
:type readername: str
:param scrident: identification of packets source.
:type srcident: hashable
:return: the most-specialized manager for the reader/source, or None
if not found.
:rtype: PeerManager
"""
m = peer_packet_receivers.get((readername, srcident),
None)
if m is None:
m = peer_packet_receivers.get(('*', srcident),
None)
if m is None:
m = peer_packet_receivers.get((readername, '*'),
None)
if m is None:
m = peer_packet_receivers.get(('*', '*'), None)
return m
def received_rawpacket(self, rawoscdata, packopt):
"""Called when a raw packet come from peer channels.
:param rawoscdata: raw OSC packet received from peer reader channel
:type rawoscdata: memoryview
:param packopt: the packet options for processing
:type packopt: dict
:return: the decoded packet and its adjusted processing options, if
the packet must not be processed further the function return a
None packet.
:rtype: OSCMessage or OSCBundle, dict
"""
try:
packet = self.decode_rawpacket(rawoscdata, packopt)
if self.logger is not None:
self.logger.debug("OSC PeerManager %s, decoded packet "\
"id %d: %r", self.peername, id(packet), packet)
except:
if self.logger is not None:
self.logger.exception("OSC PeerManager %s, error when "\
"decoding packet %r", self.peername, rawoscdata)
return None, {}
packopt.update_processing(self.inpackopt)
if self.process_adminpacket(packet, packopt):
return None, {}
else:
return packet, packopt
def decode_rawpacket(self, rawoscdata, packopt):
"""Implement a basic raw OSC data parsing.
Subclasses can override this method if they have extra process
to decode the packet.
:param rawoscdata: binary representation of an OSC packet.
:type rawoscdata: bytes or bytearray or memoryview
:param packopt: options for processing the packet
:type packopt: PacketOptions
"""
return oscbuildparse.decode_packet(rawoscdata)
def process_adminpacket(self, packet, packopt):
"""Process the packet as administrative message if it is.
Subclasses can override this method to test if the message is one
they have to detect for administrative tasks (ex. setup addrpattern
compression/decompression scheme), process it and return True, else
they must return False.
Base inherited method try to dispatch packet amond the private
admindispatch Dispatcher, and consider the packet to have been
processed if at least one method has been immediatly called.
.. note: Mixing admin / normal messages in bundles.
With that solution, you should *not* mix admin messages with
normal messages in the same bundle.
:param packet: the message or bundle to process as administative
informations.
:type packet: OSCMessage or OSCBundle
:param packopt: options for processing the packet
:type packopt: PacketOptions
:return: boolean indicating that packet has been processed as
admnistrative task (and should not be processed elsewhere).
:rtype: bool
"""
if self.admindispatch is None:
return False
else:
admincount = self.admindispatch.dispatch_packet(packet, packopt)
return admincount != 0
def send_packet_to_peer(self, packet, packopt):
"""Send a packet using the manager writer(s).
:param packet: osc message or bundle to send to peer systems.
:type packet: OSCMessage or OSCBundle
:param packopt: options for packet transmissions
:type packopt: PacketOptions
"""
# Delegate administration packets to special manager if any.
if packopt.adminpacket and self.adminmanager is not None:
# We can have a manager for high level communications.
self.adminmanager.send_packet_to_peer(packet, packopt)
return
if self.logger is not None:
self.logger.debug("OSC PeerManager %s, sending packet id %d: %r",
self.peername, id(packet), packet)
# Do we have a way to send the packet?
if not self.writersnames:
if self.logger is not None:
self.logger.info("OSC PeerManager %s, no writer to send "\
"packet id %d: %r", self.peername, id(packet), packet)
return
packopt = packopt.duplicate()
packopt.peertarget = self.peername
# Use overridable encoding method.
rawdata = self.encode_packet(packet, packopt)
# Use overridable transmission method.
self.transmit_rawpacket(rawdata, packopt)
def encode_packet(self, packet, packopt):
"""Implement a basic OSC data encoding into raw format.
Subclasses can override this method if they have extra process
to encode the packet.
In such case, they must set special_transmit member to True.
:param packet: osc message or bundle to encode for peer systems.
:type packet: OSCMessage or OSCBundle
:param packopt: options for packet transmissions
:type packopt: PacketOptions
"""
return oscbuildparse.encode_packet(packet)
def transmit_rawpacket(self, rawpacket, packopt):
"""Send the raw OSC packet to writer channels for transmission.
Subclasses can override this method if they have extra process
to transmit the packet.
In such case, they must set special_transmit member to True.
:param rawpacket: binary representation of osc message or bundle.
:type rawpacket: bytes or bytearray
:param packopt: options for packet transmissions
:type packopt: PacketOptions
"""
# Simply use function to call writers transmission methods.
transmit_rawpacket(rawpacket, packopt, self.writersnames, self.logger)
def add_writer(self, writername):
"""Add a writer(s) for peer communications.
.. note: Transport channel existence
This is not tested a this method call time, so the channel itself
can be installed later.
:param writername: name of the transport channel object acting as a
writer.
:type writername: str
"""
self.writersnames.add(writername)
def remove_writer(self, writername):
"""Remove a writer from peer communications.
:param writername: name of the transport channel object acting as a
writer.
:type writername: str
"""
self.writersnames.remove(writername)
# Q? définir des "adminpeer" ou "parentpeer", qui regroupent
# l'authentification, les clés de compression/décompression, etc...
# Les options de com pourraient aussi être définies par
# Q? in case of multicast sending, shouln't we have a "peers", or a shared
class PeerGroup(object):
"""Identify a group of other OSC systems.
Define same write methods as Peer to send OSC packets to different
peer systems.
"""
def __init__(self):
self.peers = []
def add(self, peer):
self.peers.append(peer)
def remove(self, peer):
self.peers.remove(peer)
# ============================ INTERNAL FUNCTIONS ==========================
def delay_bundle(when, disp, bundle, packopt):
"""Insert a bundle in the pending queue for late execution.
:param when: absolute time in seconds to execute the bundle.
:type when: float
:param disp:
:type disp: Dispatcher
"""
if disp is not None and disp.logger is not None:
disp.logger.debug("OSC dispatcher %s, delayed bundle id %d",
disp.dispname, id(bundle))
delayed = PendingBundle(when, disp, bundle, packopt)
pending_condvar.acquire()
pending_bundles.append(delayed)
pending_bundles.sort()
pending_condvar.notify() # Delayed thread may have to reschedule
# early than previously planned.
pending_condvar.release()
def next_bundle(timeout):
"""Return next available pending bundle.
Return None if there is no *in-time* pending bundle and timeout elapse.
Return LAST_PENDINGBUNDLE PendingBundle if it has been posted to indicate
a thread termination request.
:param timeout: maximum time to wait for a new pending bundle, 0 for
immediate return, None for infinite wait.
:type timeout: float or None
:return: the next in-time bundle or None or LAST_PENDINGBUNDLE.
:rtype: PendingBundle or None
"""
pending_condvar.acquire() # === Can work on que list.
# Search for a pending bundle in time for execution.
pb = None
if pending_bundles:
# As list is sorted - and first item in tuples are 'when', the
# next scheduled bundle is first.
nextwhen = pending_bundles[0].when
# Note: delay is used for "now" test but also as wait() timeout
# parameter.
delay = nextwhen - time.time()
if delay < 0 or abs(delay) < NOW_RESOLUTION:
pb = pending_bundles.pop(0)
elif timeout is not None and delay > timeout:
delay = timeout
else:
delay = timeout
# Wait for a new bundle to be signaled (and then be called again
# to recalculate timings), or for delay to next bundle to elapse.
if pb is None:
# Note that the conditional variable wait() relase the lock,
# so an extern thread can work on pending_bundles during that
# time.
pending_condvar.wait(delay)
pending_condvar.release() # === Must no longer work on the list.
return pb
def next_sendpacket(timeout):
"""Return next packet and options to send.
:param timeout: maximum time to wait for another packet to send, 0 for
immediate return, None for infinite wait.
:type timeout: float or None
:return: name of reader, identification of source, raw OSC packet data
:rtype: tuple or None if no data available
"""
if not sending_semaphore.acquire(True, timeout):
return None
if sending_packets:
return sending_packets.popleft()
else:
return None
def post_sendpacket(sendingpacket):
"""Send a new packet in the reception queue.
:param sendingpacket: packet and packet options to send.
:type sendingpacket: SendingPacket
"""
sending_packets.append(sendingpacket)
# Awake waiting thread if any.
sending_semaphore.release()
# ========================= SENDING GLOBAL FUNCTIONS =======================
def send_packet(packet, peername, packopt=None):
"""Send an OSC message or bundle to some peers.
The peername can use some specific names:
* _all To address all standard writer channels.
An later maybe:
* _filter To select standard writer channels with filtering
on addrpattern matching.
:param packet: the bundle or message to send.
:type packet: OSCMessage or OSCBundle
:param peername: names to select what PeerManagers to use.
:type peername: str or [str] or {str}
"""
# Make peername a set of target.
if isinstance(peername, (set, list, tuple)):
targets = set(peername)
else:
targets = set([peername])
if packopt is None:
packopt = oscpacketoptions.PacketOptions()
if packopt.nodelay:
# Call the send function with no queue.
packet_send_function(packet, packopt, targets)
else:
# Queue the packet to not consume time here.
post_sendpacket(SendingPacket(packet, packopt, targets))
def packet_send_function(packet, packopt, targets, logger):
"""Identify peers / channels, and manage transmission.
Proceed packet sending operations with managing multiple destinations
with as common operations as possible.
:param packet: the bundle or message to send.
:type packet: OSCMessage or OSCBundle
:param packopt: the options for packet transmission.
:type packopt: PacketOptions
:param targets: targetted peers names or channel names.
:type targets: set
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
if logger is not None:
logger.debug("OSC packet_send_function to targets %r with%s",
targets, packet)
if "_local" in targets:
# Directly call dispatching function, not going through OSC packet
# sending via network & co.
if global_dispatcher is not None:
global_dispatcher.dispatch_packet(packet, packopt)
else:
if logger is not None:
logger.warning("OSC no global dispatcher registered")
# packet is not processed.
targets.remove("_local")
# We continue as we may have _local as only one of the peer targets.
# Selection of peer manager / channel names with special names.
if "_all" in targets:
targets.remove("_all")
for name in oscchannel.all_channels_peers:
targets.add(name)
if "_allpeers" in targets:
targets.remove("_allpeers")
for peername in all_peers:
if all_peers[peername].inpeersgroup:
targets.add(peername)
if "_alladmin" in targets:
targets.remove("_alladmin")
for peername in all_peers:
if all_peers[peername].inadmingroup:
targets.add(peername)
if "_allchannels" in targets:
targets.remove("_allchannels")
for cname in oscchannel.all_channels:
targets.add(cname)
# Names of channels who will directly receive the packet, without peer
# management.
directchannels = set()
for tname in targets:
manager = all_peers.get(tname, None)
if manager is None: # Not a manager target.
if tname in oscchannel.all_channels:
directchannels.add(tname)
else: # Neither a channel target.
raise ValueError("OSC peer/channel {!r} "\
"unknown".format(tname))
else:
if packopt.adminpacket and manager.adminmanager is not None:
continue # Manager peer administrator will do the job.
if manager.special_transmit:
# Direct send with channel own processing.
if len(targets):
newoptions = packopt.duplicate()
else:
newoptions = packopt
newoptions.peertarget = tname
manager.send_packet_to_peer(packet, newoptions)
else:
# Collect all channels in a set (ie this will count each
# channel one time only).
directchannels.update(manager.writersnames)
# Now, transmit to all channels needing direct transmission.
if directchannels:
rawoscdata = oscbuildparse.encode_packet(packet)
transmit_rawpacket(rawoscdata, packopt, directchannels, logger)
def transmit_rawpacket(rawpacket, packopt, writersnames, logger):
"""Call writer channels functions to transmit raw data.
This function transmit the *same* raw OSC packet to a set of transport
channels.
:param rawpacket: the binary packet to write.
:type rawpacket: bytes or bytearray
:param packopt: the options for packet transmission.
:type packopt: PacketOptions
:param writersnames: set of names of writers to send data.
:type writersnames: [ str ]
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
:param workqueue: queue of works to planify transmissions in multiple
threads or in a late event loop execution.
"""
if logger is not None:
logger.debug("OSC transmit_rawpacket to channels %r", writersnames)
for cname in writersnames:
chanel = oscchannel.all_channels.get(cname, None)
if chanel is None:
if logger is not None:
logger.error("OSC transmit_rawpacket, channel name "\
"%r is not referenced", cname)
# Dismiss the packet.
continue
if not chanel.is_writer:
if logger is not None:
logger.error("OSC transmit_rawpacket, writer channel "\
"%r has not writing flag",
packopt.peertarget, cname)
# Dismiss the packet.
continue
newopt = packopt.duplicate()
newopt.chantarget = cname
chanel.transmit_data(rawpacket, newopt)
# ====================== EVENT LOOP PUBLIC FUNCTIONS ======================
def sendingpackets_process_loop(deadlinetime=0, logger=None):
"""Manage encoding and sending of packets.
Called as a thread entry point, or as a simple function in a envent
loop.
:param deadlinetime: exit from loop after this time, even if there are
remaining packets to send. Its an *absolute* time in seconds,
in same time base as time.time().
A 0 value do process all packets until the queue is empty then return.
A None value is used when in own thread.
:param deadlinetime: float or None
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
try:
if deadlinetime is None and logger is not None:
logger.info("OSC Starting sendingpackets_process_loop()")
while True:
timeout = oscscheduling.deadline2timeout(deadlinetime)
nextsend = next_sendpacket(timeout)
# If no packet while in event loop, return.
if nextsend is None and deadlinetime == 0:
return
# If no packet while in own thread, continue with waiting.
if nextsend is None and deadlinetime is None:
continue
# Proper termination via a special tuple.
if nextsend is LAST_SENDINGPACKET:
if rawpackets_terminate:
break # Properly exit from function (from thread...)
else:
nextsend = None
if nextsend is not None:
if logger is not None:
logger.debug("OSC send packets processing %r", nextsend)
packet, packopt, targets = nextsend
packet_send_function(packet, packopt, targets, logger)
# If has deadline elapsed, continue with waiting.
if deadlinetime and time.time() > deadlinetime:
break
if deadlinetime is None and logger is not None:
logger.info("OSC Finishing sendingpackets_process_loop()")
except:
if logger is not None:
logger.exception("OSC Failure in sendingpackets_process_loop()")
def delayed_process_loop(deadlinetime=0, logger=None):
"""Manage bundles delayed up to their time tag.
Called as a thread entry point, or as a simple function in a envent
loop.
:param deadlinetime: exit from loop after this time, even if there are
remaining bundles to process. Its an *absolute* time in seconds,
in same time base as time.time().
A 0 value do process all bundles until the queue is empty then return.
A None value is used when in own thread.
:param deadlinetime: float or None
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
try:
if deadlinetime is None and logger is not None:
logger.info("OSC Starting delayed_process_loop()")
while True:
timeout = oscscheduling.deadline2timeout(deadlinetime)
pb = next_bundle(timeout)
# If no bundle while in event loop, return.
if pb is None and deadlinetime == 0:
return
# If no bundle while in own thread, continue with waiting.
if pb is None and deadlinetime is None:
continue
if pb is LAST_PENDINGBUNDLE:
if delayed_terminate:
break # Properly exit from function (from thread...)
else:
pb = None
if pb is not None:
if pb.disp.logger is not None:
pb.disp.logger.debug("OSC dispatcher %s, delay elapsed "\
"for bundle id %d",
pb.disp.dispname, id(pb.bundle))
pb.disp.execute_bundle(pb.bundle, pb.packopt)
# If has deadline elapsed, continue with waiting.
if deadlinetime and time.time() > deadlinetime:
break
if deadlinetime is None and logger is not None:
logger.info("OSC Finishing delayed_process_loop()")
except:
if logger is not None:
logger.exception("OSC Failure in delayed_process_loop()")
def rawpackets_process_loop(deadlinetime=0, logger=None):
"""Called by readers when some raw data come from the system.
Process all queued packets until the queue is empty or a the deadlinetime
is reach.
:param deadlinetime: exit from loop after this time, even if there are
remaining packets to process. Its an *absolute* time in seconds,
in same time base as time.time().
A 0 value do process all messages until the queue is empty then return.
A None value is used when in own thread.
:param deadlinetime: float or None
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
try:
if deadlinetime is None and logger is not None:
logger.info("OSC Starting rawpackets_process_loop()")
while True:
timeout = oscscheduling.deadline2timeout(deadlinetime)
nextpacket = oscchannel.next_rawpacket(timeout)
# If no packet while in event loop, return.
if nextpacket is None and deadlinetime == 0:
return
# If no packet while in own thread, continue with waiting.
if nextpacket is None and deadlinetime is None:
continue
# Proper termination via a special tuple.
if nextpacket == oscchannel.LAST_RAWPACKET:
if rawpackets_terminate:
break # Properly exit from function (from thread...)
else:
nextpacket = None
if nextpacket is not None:
if logger is not None:
logger.debug("OSC raw packets processing %r", nextpacket)
# Identify peer manager for decoding.
rawoscdata, packopt = nextpacket
readername = packopt.readername
srcident = packopt.srcident
peermanager = PeerManager.get_manager(readername, srcident)
if peermanager is None:
# No peer manager, just call standard OSC decoding
# and use default packet procssing options.
packet = oscbuildparse.decode_packet(rawoscdata)
packopt = oscpacketoptions.PacketOptions()
else:
# There is a peer manager for the raw packet, call
# it to get the packet content and its processing options.
packet, packopt = peermanager.received_rawpacket(
rawoscdata, packopt)
# If peer manager has already processed the message
# (or dont want it to be dispatched), it simply
# return a None packet.
if packet is not None:
if global_dispatcher is None:
if logger is not None:
logger.warning("OSC no global dispatcher "\
"registered")
# packet is not processed.
else:
global_dispatcher.dispatch_packet(packet, packopt)
# If has deadline elapsed, continue with waiting.
if deadlinetime and time.time() > deadlinetime:
break
if deadlinetime is None and logger is not None:
logger.info("OSC Finishing rawpackets_process_loop()")
except:
if logger is not None:
logger.exception("OSC Failure in rawpackets_process_loop()")
# ================== BACKGROUND THREADS PUBLIC FUNCTIONS ===================
def create_delayed_thread(logger=None):
"""Build a thread to manage dispatching messages by calling functions.
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
global delayed_thread, delayed_terminate
if delayed_thread is not None:
return
delayed_terminate = False
delayed_thread = threading.Thread(target=delayed_process_loop,
args=(None, logger), # Its not polling.
name="DelayThread")
delayed_thread.daemon = False
delayed_thread.start()
def terminate_delayed_thread():
"""Set a flag and signal condition variable to finish delayed thread.
Wait for the effective thread termination.
Note: remaining bundles in the pending list are ignored.
"""
global delayed_terminate, delayed_thread
pending_condvar.acquire()
pending_bundles.append(LAST_PENDINGBUNDLE)
delayed_terminate = True
pending_condvar.notify()
pending_condvar.release()
delayed_thread.join()
delayed_thread = None
def create_rawpackets_thread(logger=None):
"""Build a thread to manage dispatching messages by calling functions.
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
global rawpackets_terminate, rawpackets_thread
if rawpackets_thread is not None:
return
rawpackets_terminate = False
rawpackets_thread = threading.Thread(target=rawpackets_process_loop,
args=(None, logger), # Its not polling.
name="RawPackThread")
rawpackets_thread.daemon = False
rawpackets_thread.start()
def terminate_rawpackets_thread():
"""Set a flag and signal condition variable to finish delayed thread.
Wait for the effective thread termination.
Note: remaining packets in the received queue are ignored.
"""
global rawpackets_terminate, rawpackets_thread
rawpackets_terminate = True
oscchannel.post_rawpacket(oscchannel.LAST_RAWPACKET)
rawpackets_thread.join()
rawpackets_thread = None
def create_sendingpackets_thread(logger=None):
"""Build a thread to manage dispatching messages by calling functions.
:param logger: Python logger to trace activity.
Default to None
:type logger: logging.Logger
"""
global sendpackets_thread, sendpackets_terminate
if sendpackets_thread is not None:
return
sendpackets_terminate = False
sendpackets_thread = threading.Thread(target=sendingpackets_process_loop,
args=(None, logger), # Its not polling.
name="SendPackThread")
sendpackets_thread.daemon = False
sendpackets_thread.start()
def terminate_sendingpackets_thread():
"""Set a flag and signal condition variable to finish delayed thread.
Wait for the effective thread termination.
Note: remaining packets in the received queue are ignored.
"""
global sendpackets_thread, sendpackets_terminate
sendpackets_terminate = True
post_sendpacket(LAST_SENDINGPACKET)
sendpackets_thread.join()
sendpackets_thread = None
# ======================= DISPATCHING PUBLIC FUNCTIONS =====================
# To setup dispatcher and install OSC methods.
def register_method(method):
"""Install an OSC method to be called when receiving some messages.
:param method: object to filter and call the method.
:type method: MethodFilter
"""
if global_dispatcher is None:
raise RuntimeError("OSC no global dispatcher registered")
global_dispatcher.add_method(method)
def unregister_method(method):
"""Remove ann installed OSC method.
:param method: object to filter and call the method.
:type method: MethodFilter
"""
if global_dispatcher is None:
raise RuntimeError("OSC no global dispatcher registered")
global_dispatcher.remove_method(method)
def register_global_dispatcher(disp):
"""Install the dispatcher for global dispatching of OSC messages.
:param disp: the dispatcher to use or None to have a new standard
dispatcher created with no special option.
:type disp: Dispatcher
"""
global global_dispatcher
if global_dispatcher is not None:
raise RuntimeError("OSC global dispatcher already registered")
if disp is None:
disp = Dispatcher("global", {})
global_dispatcher = disp
def unregister_global_dispatcher():
"""
"""
global global_dispatcher
if global_dispatcher is None:
raise RuntimeError("OSC no global dispatcher registered")
global_dispatcher = None
|
from django.utils.translation import gettext_lazy as _
# Activation types
ACTIVATION_TYPE_ACTIVE = 'active'
ACTIVATION_TYPE_PASSIVE = 'passive'
ACTIVATION_TYPES = (
(ACTIVATION_TYPE_ACTIVE, _("actif")),
(ACTIVATION_TYPE_PASSIVE, _("passif"))
)
# Character types
CHARACTER_TYPE_PC = 'pc'
CHARACTER_TYPE_NPC = 'npc'
CHARACTER_TYPE_MINION = 'minion'
CHARACTER_TYPE_RIVAL = 'rival'
CHARACTER_TYPE_NEMESIS = 'nemesis'
CHARACTER_TYPES = (
(CHARACTER_TYPE_PC, _("personnage joueur")),
(CHARACTER_TYPE_NPC, _("personnage non joueur")),
(CHARACTER_TYPE_MINION, _("sbire")),
(CHARACTER_TYPE_RIVAL, _("rival")),
(CHARACTER_TYPE_NEMESIS, _("nemesis"))
)
# Dice
# Dice types
DICE_TYPE_FORTUNE = 'fortune'
DICE_TYPE_MISFORTUNE = 'misfortune'
DICE_TYPE_APTITUDE = 'aptitude'
DICE_TYPE_DIFFICULTY = 'difficulty'
DICE_TYPE_MASTERY = 'mastery'
DICE_TYPE_CHALLENGE = 'challenge'
DICE_TYPE_FORCE = 'force'
DICE_TYPES = (
(DICE_TYPE_FORTUNE, _("fortune")),
(DICE_TYPE_MISFORTUNE, _("infortune")),
(DICE_TYPE_APTITUDE, _("aptitude")),
(DICE_TYPE_DIFFICULTY, _("difficulté")),
(DICE_TYPE_MASTERY, _("maitrise")),
(DICE_TYPE_CHALLENGE, _("défi")),
(DICE_TYPE_FORCE, _("force"))
)
# Dice values
DICE_SUCCESS = 'success'
DICE_FAILURE = 'failure'
DICE_ADVANTAGE = 'advantage'
DICE_THREAT = 'threat'
DICE_TRIUMPH = 'triumph'
DICE_DISASTER = 'disaster'
DICE_DARK_FORCE = 'dark_force'
DICE_LIGHT_FORCE = 'light_force'
DICE = {
DICE_TYPE_FORTUNE: {
0: None, 1: None,
2: {DICE_SUCCESS: 1},
3: {DICE_SUCCESS: 1, DICE_ADVANTAGE: 1},
4: {DICE_ADVANTAGE: 2},
5: {DICE_ADVANTAGE: 1}
},
DICE_TYPE_MISFORTUNE: {
0: None, 1: None,
2: {DICE_FAILURE: 1},
3: {DICE_FAILURE: 1},
4: {DICE_THREAT: 1},
5: {DICE_THREAT: 1}
},
DICE_TYPE_APTITUDE: {
0: None,
1: {DICE_SUCCESS: 1},
2: {DICE_SUCCESS: 1},
3: {DICE_SUCCESS: 2},
4: {DICE_ADVANTAGE: 1},
5: {DICE_ADVANTAGE: 1},
6: {DICE_SUCCESS: 1, DICE_ADVANTAGE: 1},
7: {DICE_ADVANTAGE: 2}
},
DICE_TYPE_DIFFICULTY: {
0: None,
1: {DICE_FAILURE: 1},
2: {DICE_FAILURE: 2},
3: {DICE_THREAT: 1},
4: {DICE_THREAT: 1},
5: {DICE_THREAT: 1},
6: {DICE_THREAT: 2},
7: {DICE_FAILURE: 1, DICE_THREAT: 1}
},
DICE_TYPE_MASTERY: {
0: None,
1: {DICE_SUCCESS: 1},
2: {DICE_SUCCESS: 1},
3: {DICE_SUCCESS: 2},
4: {DICE_SUCCESS: 2},
5: {DICE_ADVANTAGE: 1},
6: {DICE_SUCCESS: 1, DICE_ADVANTAGE: 1},
7: {DICE_SUCCESS: 1, DICE_ADVANTAGE: 1},
8: {DICE_SUCCESS: 1, DICE_ADVANTAGE: 1},
9: {DICE_ADVANTAGE: 2},
10: {DICE_ADVANTAGE: 2},
11: {DICE_TRIUMPH: 1}
},
DICE_TYPE_CHALLENGE: {
0: None,
1: {DICE_FAILURE: 1},
2: {DICE_FAILURE: 1},
3: {DICE_FAILURE: 2},
4: {DICE_FAILURE: 2},
5: {DICE_THREAT: 1},
6: {DICE_THREAT: 1},
7: {DICE_FAILURE: 1, DICE_THREAT: 1},
8: {DICE_FAILURE: 1, DICE_THREAT: 1},
9: {DICE_THREAT: 2},
10: {DICE_THREAT: 2},
11: {DICE_DISASTER: 1}
},
DICE_TYPE_FORCE: {
0: {DICE_DARK_FORCE: 1},
1: {DICE_DARK_FORCE: 1},
2: {DICE_DARK_FORCE: 1},
3: {DICE_DARK_FORCE: 1},
4: {DICE_DARK_FORCE: 1},
5: {DICE_DARK_FORCE: 1},
6: {DICE_DARK_FORCE: 2},
7: {DICE_LIGHT_FORCE: 1},
8: {DICE_LIGHT_FORCE: 1},
9: {DICE_LIGHT_FORCE: 2},
10: {DICE_LIGHT_FORCE: 2},
11: {DICE_LIGHT_FORCE: 2}
}
}
# Test difficulty
DIFFICULTY_SIMPLE = 0
DIFFICULTY_EASY = 1
DIFFICULTY_AVERAGE = 2
DIFFICULTY_HARD = 3
DIFFICULTY_DAUNTING = 4
DIFFICULTY_FORMIDABLE = 5
# EFFECTS
# EFFECT DURATIONS
EFFECT_DURATION_DIRECT = 'direct'
EFFECT_DURATION_PERMANENT = 'permanent'
EFFECT_DURATION_SOURCE_TURN = 'source_turn'
EFFECT_DURATION_TARGET_TURN = 'target_turn'
EFFECT_DURATION_FIGHT = 'fight'
EFFECT_DURATIONS = (
(EFFECT_DURATION_DIRECT, _("direct (one shot)")),
(EFFECT_DURATION_PERMANENT, _("permanent")),
(EFFECT_DURATION_SOURCE_TURN, _("nombre de tours (source)")),
(EFFECT_DURATION_TARGET_TURN, _("nombre de tours (cible)")),
(EFFECT_DURATION_FIGHT, _("durée du combat")),
)
# EFFECT TYPES
EFFECT_ATTRIBUTE_MODIFIER = 'attribute_modifier'
EFFECT_DICE_POOL_MODIFIER = 'dice_pool_modifier'
EFFECT_HEALTH_MODIFIER = 'health_modifier'
EFFECT_STRAIN_MODIFIER = 'strain_modifier'
EFFECT_TYPES = (
(EFFECT_ATTRIBUTE_MODIFIER, _("modificateur d'attribut")),
(EFFECT_HEALTH_MODIFIER, _("modificateur de santé")),
(EFFECT_DICE_POOL_MODIFIER, _("modificateur de dés")),
(EFFECT_STRAIN_MODIFIER, _("modificateur de stress"))
)
# ITEM TYPES
ITEM_WEAPON = 'weapon'
ITEM_ARMOR = 'armor'
ITEM_CONSUMABLE = 'consumable'
ITEM_MISC = 'misc'
ITEM_TYPES = (
(ITEM_WEAPON, _("arme")),
(ITEM_ARMOR, _("armure")),
(ITEM_CONSUMABLE, _("consommable")),
(ITEM_MISC, _("autre"))
)
# RANGE BANDS
RANGE_ENGAGED = 'engaged'
RANGE_SHORT = 'short'
RANGE_MEDIUM = 'medium'
RANGE_LONG = 'long'
RANGE_EXTREME = 'extreme'
RANGE_BANDS = (
(RANGE_ENGAGED, _("corps à corps")),
(RANGE_SHORT, _("portée courte")),
(RANGE_MEDIUM, _("portée moyenne")),
(RANGE_LONG, _("portée longue")),
(RANGE_EXTREME, _("portée extrème")),
)
# Stats
STAT_AGILITY = 'agility'
STAT_CUNNING = 'cunning'
STAT_BRAWN = 'brawn'
STAT_INTELLECT = 'intellect'
STAT_PRESENCE = 'presence'
STAT_WILLPOWER = 'willpower'
STAT_FORCE = 'force'
STATS = (
(STAT_AGILITY, _("agilité")),
(STAT_CUNNING, _("ruse")),
(STAT_BRAWN, _("vigueur")),
(STAT_INTELLECT, _("intelligence")),
(STAT_PRESENCE, _("présence")),
(STAT_WILLPOWER, _("volonté")),
(STAT_FORCE, _("force"))
)
DICT_STATS = dict(STATS)
# Skills
ATHLETICS = 'athletics'
ASTROGATION = 'astrogation'
BRAWL = 'brawl'
CHARM = 'charm'
COERCION = 'coercion'
COMPUTERS = 'computers'
COOL = 'cool'
COORDINATION = 'coordination'
CORE_WORLD = 'core_world'
DECEPTION = 'deception'
DISCIPLINE = 'discipline'
EDUCATION = 'education'
GUNNERY = 'gunnery'
LEADERSHIP = 'leadership'
LIGHTSABER = 'lightsaber'
LORE = 'lore'
MECHANICS = 'mechanics'
MEDECINE = 'medecine'
MELEE = 'melee'
NEGOCIATION = 'negociation'
OUTER_RIM = 'outer_rim'
PERCEPTION = 'perception'
PILOTING = 'piloting'
RANGED_HEAVY = 'ranged_heavy'
RANGED_LIGHT = 'ranged_light'
RESILIENCE = 'resilience'
SKULDUGGERY = 'skulduggery'
STEALTH = 'stealth'
STREETWISE = 'streetwise'
SURVIVAL = 'survival'
UNDERWORLD = 'underworld'
VIGILANCE = 'vigilance'
XENOLOGY = 'xenology'
ITEM_SKILLS = (
(BRAWL, _("pugilat")),
(GUNNERY, _("artillerie")),
(LIGHTSABER, _("sabre laser")),
(MECHANICS, _("mécanique")),
(MEDECINE, _("médecine")),
(MELEE, _("corps à corps")),
(RANGED_HEAVY, _("distance (armes lourdes)")),
(RANGED_LIGHT, _("distance (armes légères)")),
)
ALL_SKILLS = ITEM_SKILLS + (
(ATHLETICS, _("athlétisme")),
(ASTROGATION, _("astrogation")),
(CHARM, _("charme")),
(COERCION, _("coercition")),
(COMPUTERS, _("informatique")),
(COOL, _("calme")),
(COORDINATION, _("coordination")),
(CORE_WORLD, _("mondes du noyau")),
(DECEPTION, _("tromperie")),
(DISCIPLINE, _("sang froid")),
(EDUCATION, _("education")),
(LEADERSHIP, _("commandement")),
(LORE, _("culture")),
(NEGOCIATION, _("negociation")),
(OUTER_RIM, _("bordure exterieure")),
(PERCEPTION, _("perception")),
(PILOTING, _("pilotage")),
(RESILIENCE, _("résistance")),
(SKULDUGGERY, _("magouilles")),
(STEALTH, _("discretion")),
(STREETWISE, _("système D")),
(SURVIVAL, _("survie")),
(UNDERWORLD, _("pègre")),
(VIGILANCE, _("vigilance")),
(XENOLOGY, _("xénologie"))
)
DICT_SKILLS = dict(ALL_SKILLS)
# Skill dependancies
SKILL_DEPENDANCIES = {
STAT_BRAWN: (ATHLETICS, BRAWL, LIGHTSABER, MELEE, RESILIENCE, ),
STAT_AGILITY: (COORDINATION, GUNNERY, PILOTING, RANGED_HEAVY, RANGED_LIGHT, STEALTH, ),
STAT_INTELLECT: (ASTROGATION, COMPUTERS, CORE_WORLD, EDUCATION, LORE, MECHANICS,
MEDECINE, OUTER_RIM, UNDERWORLD, XENOLOGY),
STAT_CUNNING: (DECEPTION, PERCEPTION, SKULDUGGERY, STREETWISE, SURVIVAL, ),
STAT_WILLPOWER: (COERCION, DISCIPLINE, VIGILANCE, ),
STAT_PRESENCE: (CHARM, COOL, LEADERSHIP, NEGOCIATION, )
}
# EFFECT ATTRIBUTES (STATS + SKILLS + PROPERTIES)
ATTRIBUTE_DEFENSE = 'defense'
ATTRIBUTE_MAX_HEALTH = 'max_health'
ATTRIBUTE_MAX_STRAIN = 'max_strain'
ATTRIBUTE_SOAK_VALUE = 'soak_value'
ATTRIBUTES = STATS + ALL_SKILLS + (
(ATTRIBUTE_DEFENSE, _("défense")),
(ATTRIBUTE_MAX_HEALTH, _("santé max")),
(ATTRIBUTE_MAX_STRAIN, _("stress max")),
(ATTRIBUTE_SOAK_VALUE, _("valeur d'encaissement"))
)
# SPECIES
SPECIES_HUMAN = 'human'
SPECIES_TWILEK = 'twilek'
SPECIES_BOTHAN = 'bothan'
SPECIES_DROID = 'droid'
SPECIES_GAND = 'gand'
SPECIES_RODIAN = 'rodian'
SPECIES_TRANDOSHAN = 'trandoshan'
SPECIES_WOOKIE = 'wookie'
SPECIES_CEREAN = 'cerean'
SPECIES_KELDOR = 'keldor'
SPECIES_MIRIALAN = 'mirialan'
SPECIES_NAUTOLAN = 'nautolan'
SPECIES_TOGRUTA = 'togruta'
SPECIES_ZABRAK = 'zabrak'
SPECIES_CREATURE = 'creature'
SPECIES = (
# Common
(SPECIES_HUMAN, _("humain")),
(SPECIES_TWILEK, _("twi'lek")),
# Edge of the Empire
(SPECIES_BOTHAN, _("bothan")),
(SPECIES_DROID, _("droïde")),
(SPECIES_GAND, _("gand")),
(SPECIES_RODIAN, _("rodien")),
(SPECIES_TRANDOSHAN, _("trandoshan")),
(SPECIES_WOOKIE, _("wookie")),
# Force and Destiny
(SPECIES_CEREAN, _("céréen")),
(SPECIES_KELDOR, _("kel'dor")),
(SPECIES_MIRIALAN, _("mirialan")),
(SPECIES_NAUTOLAN, _("nautolan")),
(SPECIES_TOGRUTA, _("togruta")),
(SPECIES_ZABRAK, _("zabrak")),
# Other
(SPECIES_CREATURE, _("créature")),
)
# SPECIES_ABILITIES - default=10
SPECIES_ABILITIES = {
SPECIES_BOTHAN: {'max_strain': 11},
SPECIES_CEREAN: {'max_strain': 13},
SPECIES_MIRIALAN: {'max_health': 11},
SPECIES_NAUTOLAN: {'max_health': 11, 'max_strain': 9},
SPECIES_TRANDOSHAN: {'max_health': 12, 'max_strain': 9},
SPECIES_TWILEK: {'max_strain': 11},
SPECIES_WOOKIE: {'max_health': 14, 'max_strain': 8},
}
|
# encoding.py
# Copyright (C) 2011-2014 Andrew Svetlov
# andrew.svetlov@gmail.com
#
# This module is part of BloggerTool and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from docutils import core
from docutils.writers import html4css1
class Meta(object):
pass
class Engine(object):
def do(self, source):
writer = Writer()
core.publish_string(source, writer=writer)
meta = Meta()
meta.title = ''.join(writer.title)
meta.slug = None
meta.labels = []
meta.subtitle = ''.join(writer.subtitle)
meta.footer = ''.join(writer.footer)
return ''.join(writer.body), meta
class Writer(html4css1.Writer):
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = Translator
def apply_template(self):
subs = self.interpolation_dict()
#from pprint import pprint
#pprint(subs)
return ''.join(writer.body)
class Translator(html4css1.HTMLTranslator):
def __init__(self, document):
html4css1.HTMLTranslator.__init__(self, document)
|
#! /usr/bin/env python3
from numpy.lib.function_base import append
import open3d
import numpy as np
from ctypes import * # convert float to uint32
import tf
import rospy
from std_msgs.msg import Header, String
import geometry_msgs.msg
from sensor_msgs.msg import PointCloud2, PointField
from visualization_msgs.msg import Marker
import sensor_msgs.point_cloud2 as pc2
import matplotlib.pyplot as plt
import math
from sklearn.cluster import MeanShift, estimate_bandwidth
from itertools import cycle
global o3dpc
global header
FIELDS_XYZ = [
PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1),
]
FIELDS_XYZRGB = FIELDS_XYZ + \
[PointField(name='rgb', offset=12, datatype=PointField.UINT32, count=1)]
# Bit operations
BIT_MOVE_16 = 2**16
BIT_MOVE_8 = 2**8
convert_rgbUint32_to_tuple = lambda rgb_uint32: (
(rgb_uint32 & 0x00ff0000)>>16, (rgb_uint32 & 0x0000ff00)>>8, (rgb_uint32 & 0x000000ff)
)
convert_rgbFloat_to_tuple = lambda rgb_float: convert_rgbUint32_to_tuple(
int(cast(pointer(c_float(rgb_float)), POINTER(c_uint32)).contents.value)
)
def convertImage(msg):
global received_ros_cloud
received_ros_cloud = msg
# rospy.loginfo("-- Received ROS PointCloud2 message.")
def convertCloudFromRosToOpen3d(ros_cloud):
global header
# Get cloud data from ros_cloud
field_names=[field.name for field in ros_cloud.fields]
cloud_data = list(pc2.read_points(ros_cloud, skip_nans=True, field_names = field_names))
header = ros_cloud.header.frame_id
# Check empty
open3d_cloud = open3d.geometry.PointCloud()
if len(cloud_data)==0:
print("Converting an empty cloud")
return None
# Set open3d_cloud
if "rgb" in field_names:
IDX_RGB_IN_FIELD=3 # x, y, z, rgb
# Get xyz
xyz = [(x,y,z) for x,y,z,rgb in cloud_data ] # (why cannot put this line below rgb?)
# Get rgb
# Check whether int or float
if type(cloud_data[0][IDX_RGB_IN_FIELD])==float: # if float (from pcl::toROSMsg)
rgb = [convert_rgbFloat_to_tuple(rgb) for x,y,z,rgb in cloud_data ]
else:
rgb = [convert_rgbUint32_to_tuple(rgb) for x,y,z,rgb in cloud_data ]
# combine
open3d_cloud.points = open3d.utility.Vector3dVector(np.array(xyz))
open3d_cloud.colors = open3d.utility.Vector3dVector(np.array(rgb)/255.0)
else:
xyz = [(x,y,z) for x,y,z in cloud_data ] # get xyz
open3d_cloud.points = open3d.utility.Vector3dVector(np.array(xyz))
# return
return open3d_cloud
def convertCloudFromOpen3dToRos(open3d_cloud, frame_id="camera_depth_frame"):
# Set "header"
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = frame_id
# Set "fields" and "cloud_data"
points=np.asarray(open3d_cloud.points)
fields=FIELDS_XYZ
cloud_data=points
# create ros_cloud
return pc2.create_cloud(header, fields, cloud_data)
def sink_func(point_cloud,z_min, table_height, debug, open3d_debug):
planes = []
count = 0
check = False
while(len(np.asarray(point_cloud.points)) > 1000):
plane_model, inliers = point_cloud.segment_plane(distance_threshold=0.0005, ransac_n=3, num_iterations=1000)
inlier_cloud = point_cloud.select_by_index(inliers)
if debug or open3d_debug:
print("cloud: ",count," Normals")
[a, b, c, d] = plane_model
print("a:",a)
print("b:",b)
print("c:",c)
print("d:",d)
print(plane_model)
# Setting colour for planes
if(count == 0):
colours_red = 0
colours_green = 0
colours_blue = 1.0
elif(count == 1):
colours_red = 0
colours_green = 1.0
colours_blue = 1.0
elif(count == 2):
colours_red = 0
colours_green = 1.0
colours_blue = 0
elif(count == 3):
colours_red = 1.0
colours_green = 1.0
colours_blue = 1.0
elif(count == 4):
colours_red = 1.0
colours_green = 1.0
colours_blue = 0
elif(count == 5):
colours_red = 1.0
colours_green = 0
colours_blue = 0
elif(count == 6):
colours_red = 1.0
colours_green = 1.0
colours_blue = 0
# setting the point clouds colour
inlier_cloud.paint_uniform_color([colours_red, colours_green, colours_blue])
# checking the normal of the plane
if (plane_model[0] < 0.05) and (plane_model[1] < 0.05) and (plane_model[2] > 0.95):
# Finding the Center
center = inlier_cloud.get_center()
if (center[2] > z_min) and (center[2]< table_height -0.1):
# Found the table
planes.append(inlier_cloud)
check = True
return planes, check
# Subtracting the previous plane
point_cloud = point_cloud.select_by_index(inliers, invert=True)
count+=1
planes.append(point_cloud)
return planes, check
def transform_frames(target_frame,source_frame, debug, open3d_debug):
#Finding the transform
listner = tf.TransformListener()
while not rospy.is_shutdown():
try:
trans1, quat1 = listner.lookupTransform(target_frame, source_frame, rospy.Time(0))
break
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
homogenous = []
homogenous = tf.transformations.quaternion_matrix(quat1)
for i in range(0,3):
homogenous[i][3] = trans1[i]
if debug or open3d_debug:
print("matrix")
print(homogenous)
return homogenous
def main():
global received_ros_cloud
global downsampled_cloud
downsampled_cloud =[]
rospy.init_node('sink_detection')
status_pub = rospy.Publisher('sink_detected', String, queue_size=10)
# Subscribe to point cloud
sub2 = rospy.Subscriber('/camera/depth/points', PointCloud2, callback=convertImage, queue_size=10)
# Topics publishing
topic_debug = "apbot/points_debug"
topic_sink = "apbot/sink"
# pub_debug = rospy.Publisher(topic_debug, PointCloud2, queue_size=1)
pub_sink = rospy.Publisher(topic_sink, PointCloud2, queue_size=1)
# The 2 variables that can be used to see the point clouds being processed
# Ros vizualization
debug = 0
# open3d vizualization
open3d_debug = 0
# Transformation matix from camera frame to base_footprint to have a common frame to process the point cloud
transform = transform_frames('/base_footprint', '/camera_depth_frame', debug, open3d_debug)
# wait until we recive the point cloud, required for slow processors
while True:
try:
received_ros_cloud
break
except:
continue
# Set the range in which the table will be
z_min = 0.5
table_height = 1
check = False
# Run the code untill stopped, it can be made into a service as well
while not check:
# Convert to open3d data type
received_open3d_cloud = convertCloudFromRosToOpen3d(received_ros_cloud)
# Set the origin of the point cloud to be vizualized
FOR = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0,0,0])
# After transformation
received_open3d_cloud.transform(transform)
if(debug):
ros_plane = convertCloudFromOpen3dToRos(received_open3d_cloud, "base_footprint")
# pub_debug.publish(ros_plane)
if(open3d_debug):
open3d.visualization.draw_geometries([received_open3d_cloud, FOR])
# The function to find the different planes
print("Started Segmentation")
found_plane, check = sink_func(received_open3d_cloud, z_min, table_height, debug, open3d_debug)
if(check):
copy_of_plane=[found_plane[0]]
center = found_plane[0].get_center()
print("Found sink plane at height: ",center[2])
# Publishing the Table Top
ros_plane_sink = convertCloudFromOpen3dToRos(found_plane[0], "base_footprint")
pub_sink.publish(ros_plane_sink)
# Transform back to publish the table tf
transform_back = transform_frames('/camera_depth_frame','/base_footprint', debug, open3d_debug)
copy_of_plane[0].transform(transform_back)
# Publishing the table tf
center_sink = copy_of_plane[0].get_center()
br = tf.TransformBroadcaster()
br.sendTransform((center_sink[0], center_sink[1], center_sink[2]), (0,0,0,1), rospy.Time.now(), "sink", "camera_depth_frame")
# downsampled_cloud = [copy_of_plane[0].voxel_down_sample(voxel_size=0.05)]
# print(downsampled_cloud[0])
else:
print("Could not Find Table Top")
rate = rospy.Rate(10)
while not rospy.is_shutdown():
pub_sink.publish(ros_plane_sink)
br = tf.TransformBroadcaster()
br.sendTransform((center_sink[0], center_sink[1], center_sink[2]), (0,0,0,1), rospy.Time.now(), "sink", "camera_depth_frame")
print("Published TF...")
status_pub.publish("Sink Detected")
rate.sleep()
if __name__ == '__main__':
main()
|
import random
import time
from flask import current_app, render_template
from flask_mail import Message, Mail
from app import mail, create_app
from app.celery import celery
@celery.task
def send_async_email(to, subject, body, html):
app = create_app('development')
with app.app_context():
msg = Message(subject, recipients=[to])
msg.body = body
msg.html = html
Mail(app).send(msg)
# 接受者,标题,邮件模板,token
def send_email(to, subject, template, **kwargs):
body = render_template(template + '.txt', **kwargs)
html = render_template(template + '.html', **kwargs)
send_async_email.delay(to, subject, body, html)
@celery.task(bind=True)
def long_task(self):
total=random.randint(10,50)
for i in range(total):
self.update_state(state=u'处理中', meta={'current': i, 'total': total})
time.sleep(1)
return {'current': 100, 'total': 100, 'result': u'完成'}
|
import pandas as pd
from sklearn import preprocessing
from preprocessing import read, split, non_numerical_features, one_hot_encoding
data = read('data.csv')
label = data['label']
output = read('output.csv')
prediction = output['Prediction']
print(prediction)
correct = 0
for i in range(0,len(output)):
if prediction[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
|
import os, sys, argparse
import pandas as pd
import backtrader as bt
from strategies.GoldenCross import GoldenCross
from strategies.BuyHold import BuyHold
strategies = {
'golden_cross':GoldenCross,
'buy_hold':BuyHold
}
parser = argparse.ArgumentParser()
parser.add_argument('strategy', help='which strategy to run', type=str)
args = parser.parse_args()
if not args.strategy in strategies:
print('Invalid strategy, must be one of {}'.format(strategies.keys()))
sys.exit()
cerebro = bt.Cerebro()
cerebro.broker.setcash(800)
fname='data/spy_2000-2020.csv'
data = pd.read_csv(fname, index_col='Date', parse_dates=True)
feed = bt.feeds.PandasData(dataname=data)
cerebro.adddata(feed)
cerebro.addstrategy(strategies[args.strategy])
cerebro.run()
cerebro.plot()
|
from typing import List
from repository.user_repository import User
from repository.user_repository import UserRepository
class UserService:
def __init__(self, user_repo: UserRepository) -> None:
self._user_repo = user_repo
def get_all(self) -> List[User]:
return self._user_repo.get_all()
def create_or_update(self, user: User) -> User:
if (not user.id):
return self._user_repo.create(user)
else:
self._user_repo.update(user)
return user
def delete(self, user_id: str):
return self._user_repo.delete(user_id)
|
from ..FeatureExtractor import InterExtractor
from numpy import *
class jansky_flux_extractor(InterExtractor):
""" Convert the flux from magnitudes to janskies """
active = True
extname = 'jansky_flux' #extractor's name
# def extract(self):
# table1 = { \
# "u": {"central":3650 , "width":680 , "f_lambda(0)": 4.27e-9 , "f_nu(0)": 1.90e-23, "constant": 13.84}, \
# "b": {"central":4400 , "width":980 , "f_lambda(0)": 6.61e-9 , "f_nu(0)": 4.27e-23, "constant": 12.97}, \
# "v": {"central":5500 , "width":890 , "f_lambda(0)": 3.64e-9 , "f_nu(0)": 3.67e-23, "constant": 13.72}, \
# "r": {"central":7000 , "width":2200 , "f_lambda(0)": 1.74e-9 , "f_nu(0)": 2.84e-23, "constant": 13.54}, \
# "i": {"central":9000 , "width":2400 , "f_lambda(0)": 8.32e-10, "f_nu(0)": 2.25e-23, "constant": 14.25}, \
# "j": {"central":12500, "width":3000 , "f_lambda(0)": 3.18e-10, "f_nu(0)": 1.65e-23, "constant": 15.05}, \
# "h": {"central":16500, "width":4000 , "f_lambda(0)": 1.18e-10, "f_nu(0)": 1.07e-23, "constant": 15.82}, \
# "k": {"central":22000, "width":6000 , "f_lambda(0)": 4.17e-11, "f_nu(0)": 6.73e-24, "constant": 16.50}, \
# "l": {"central":36000, "width":12000, "f_lambda(0)": 6.23e-12, "f_nu(0)": 2.69e-24, "constant": 17.82}, \
# } # table 1 from Misconceptions About Astronomical Magnitudes," E. Schulman and C. V. Cox, American Journal of Physics, Vol. 65, pg. 1003 (1997).
# watts_m2 = self.fetch_extr('watt_per_m2_flux')
# centerA = table1[self.band.lower()]['central'] # Angstrom
# width = table1[self.band.lower()]['width'] # Angstrom
# minA = centerA - width / 2
# maxA = centerA + width / 2
# minm = minA * 1e-10 # Angstrom to m
# maxm = maxA * 1e-10
# c = 299792458 # m / s
# maxHz = c / minm
# minHz = c / maxm
# width_Hz = maxHz - minHz
# jansky = watts_m2 / width_Hz # Jansky = W / m^2 / Hz
# return jansky
def extract(self):
try:
unit = self.flux_data_unit
except NameError:
unit = self.assumed_unit
if unit in ['mag','mags','magnitude']:
""" table from http://ssc.spitzer.caltech.edu/tools/magtojy/ref.html """
zero_magnitude_fluxes = { \
"u": 1823, \
"b": 4130, \
"v": 3781, \
"r": 2941, \
"i": 2635, \
"j": 1603, \
"h": 1075, \
"k": 667 , \
"l": 288 , \
"m": 170 , \
"n": 36 , \
"o": 9.4 }
try:
""" This is inside a try/except to catch the possibility of the band not being in the zero_magnitude_fluxes dictionary """
zero_magnitude = zero_magnitude_fluxes[self.band.lower()]
except KeyError:
self.ex_error("Band %s not found" % (self.band))
""" follow the conversion method from http://ircamera.as.arizona.edu/astr_250/Lectures/Lec13_sml.htm """
janskies = zero_magnitude * power(10, self.flux_data / (-2.5) )
self.uncertainty = self.uncer_calc(janskies)
return janskies
else:
print "units not recognized", self.flux_data_unit, self.extname
self.uncertainty = self.rms_data
return self.flux_data # else assume it's already in those units, no unit conversion implemented for the moment
def uncer_calc(self, flux_wm2):
""" calculate the uncertainty in the SI flux
Latex for the approximation (valid if the flux uncertainty is less than 10%):
\sigma_m &=& \sqrt{ \sigma_{m,higher} \times \sigma_{m, lower}} \\
&=& \sqrt{ \left( 2.5 \log_{10}(f - \sigma_f) - 2.5\log_{10}f \right) \times \left( -2.5 \log_{10}(f + \sigma_f) + 2.5\log_{10}f \right) } \\
&=& \sqrt{ -2.5^2 \log_{10}\left(\frac{f}{f-\sigma_f}\right) \left(\log_{10}\frac{f}{f+\sigma_f}\right)} \\
&=& \sqrt{ -2.5^2 \log_{10}\left(\frac{f-\sigma_f}{f}\right) \log_{10}\left(\frac{f+\sigma_f}{f}\right)} \\
&=& 2.5\sqrt{ -\log_{10}\left( 1 - \frac{\sigma_f}{f} \right) \log_{10}\left( 1 + \frac{\sigma_f}{f} \right)} \\
&=& \frac{2.5 }{\ln 10} \sqrt{ -\ln\left( 1 - \frac{\sigma_f}{f} \right) \ln\left( 1 + \frac{\sigma_f}{f} \right)} \\
&\approx & \frac{2.5 }{\ln 10} \sqrt{ \left(\frac{\sigma_f}{f} \right) ^2 } \\
&\approx & \frac{\sigma_f}{f}
"""
f = flux_wm2
sigmam = self.rms_data
sigmaf = f * sigmam
return sigmaf
|
import imageio
import logging
import os
import shutil
import six
from smqtk.utils import file_utils, video_utils
from smqtk.utils.mimetype import get_mimetypes
MIMETYPES = get_mimetypes()
class PreviewCache (object):
"""
Create and cache saved located of preview images for data elements.
"""
# Preview generation methods based on content type
# - types can either be specific types or just type classes (i.e. "image" or
# "video"), which are fall-backs when a data element's specific content
# type is not found in the mapping.
# - This should be populated with methods that take two arguments, the first
# being a DataElement instance, and the second being the directory to
# place generated files under. These methods should return the full path
# to the generated preview image.
#: :type: dict[collections.Hashable, collections.Callable]
PREVIEW_GEN_METHOD = {}
@property
def _log(self):
return logging.getLogger('.'.join([self.__module__,
self.__class__.__name__]))
def __init__(self, cache_dir):
"""
:param cache_dir: Directory to cache preview image elements into.
:type cache_dir: str
"""
self._cache_dir = os.path.abspath(os.path.expanduser(cache_dir))
# Cache of preview images for data elements encountered.
#: :type: dict[collections.Hashable, str]
self._preview_cache = {}
self._video_work_dir = os.path.join(cache_dir, 'tmp_video_work')
def __del__(self):
"""
Cleanup after ourselves.
"""
for fp in six.itervalues(self._preview_cache):
os.remove(fp)
def get_preview_image(self, elem):
"""
Get the filepath to the preview image for the given data element.
:raises ValueError: Do not know how to generate a preview image for the
given element's content type.
:param elem: Data element to generate a preview image for.
:type elem: smqtk.representation.DataElement
:return: Path to the preview image for the given data element.
:rtype: str
"""
if elem.uuid() in self._preview_cache:
return self._preview_cache[elem.uuid()]
# else, generate preview image based on content type / content class
if elem.content_type() in self.PREVIEW_GEN_METHOD:
self._log.debug("Generating preview image based on content type: "
"%s", elem.content_type)
file_utils.safe_create_dir(self._cache_dir)
fp = self.PREVIEW_GEN_METHOD[elem.content_type()](self, elem,
self._cache_dir)
else:
content_class = elem.content_type().split('/', 1)[0]
if content_class in self.PREVIEW_GEN_METHOD:
self._log.debug("Generating preview image based on content "
"class: %s", content_class)
file_utils.safe_create_dir(self._cache_dir)
fp = self.PREVIEW_GEN_METHOD[content_class](self, elem,
self._cache_dir)
else:
raise ValueError("No preview generation method for the data "
"element provided, of content type '%s'."
% elem.content_type())
self._preview_cache[elem.uuid()] = fp
return fp
# noinspection PyMethodMayBeStatic
def gen_image_preview(self, elem, output_dir):
"""
Copy temporary image to specified output filepath.
:param elem: Data element to get the preview image for.
:type elem: smqtk.representation.DataElement
:param output_dir: Directory to save generated image to.
:type output_dir: str
"""
output_fp = os.path.join(
output_dir,
"%s%s" % (str(elem.uuid()),
MIMETYPES.guess_extension(elem.content_type()))
)
if not os.path.isfile(output_fp):
tmp_img_fp = elem.write_temp()
shutil.copy(tmp_img_fp, output_fp)
elem.clean_temp()
return output_fp
def gen_video_preview(self, elem, output_dir):
"""
Copy temporary image to specified output filepath.
:param elem: Data element to get the preview image for.
:type elem: smqtk.representation.DataElement
:param output_dir: Directory to save generated image to.
:type output_dir: str
"""
output_fp = os.path.join(output_dir,
"%s.gif" % elem.uuid())
if not os.path.isfile(output_fp):
tmp_vid_fp = elem.write_temp()
interval = 0.5 # ~2fps gif
fm = video_utils.ffmpeg_extract_frame_map(
self._video_work_dir, tmp_vid_fp,
second_interval=interval
)
img_arrays = []
for frm_num in sorted(fm.keys()):
img_arrays.append(imageio.imread(fm[frm_num]))
imageio.mimwrite(output_fp, img_arrays, duration=interval)
elem.clean_temp()
return output_fp
PreviewCache.PREVIEW_GEN_METHOD = {
"image": PreviewCache.gen_image_preview,
"video": PreviewCache.gen_video_preview,
}
|
from email.policy import default
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, SubmitField, SelectField, BooleanField, FileField
from wtforms.validators import Optional, DataRequired, NumberRange, ValidationError
from wtforms.widgets import TextArea
class InfiniteRechargeForm(FlaskForm):
team_number = IntegerField("Team Number", validators=[DataRequired(), NumberRange(1,20000)])
match = IntegerField("Match Number", validators=[DataRequired()])
disabled = BooleanField("Disabled/AFK")
auto = SelectField("Autonomous", choices=[
(5, "4+ Balls in High Port"),
(4, "1-3 Balls in High Port"),
(3, "4+ Balls in Low Port"),
(2, "1-3 Balls in Low Port"),
(1, "Only Crossed Auto Line"),
(0, "None of the Above/Stayed Motionless")
])
lower = IntegerField("Lower Port", validators=[DataRequired()])
outer = IntegerField("Outer Port", validators=[DataRequired()])
inner = IntegerField("Inner Port", validators=[DataRequired()])
climb = SelectField("Climb", choices=[
(3, "Is Climbed and Balanced"),
(2, "Climbs but doesn't balance"),
(1, "Drives to inside Rendezvous Point"),
(0, "Did not go to any platforms")
])
type = SelectField("Robot's Reach", choices=[
(2, "Uses shooter to shoot balls high"),
(1, "Can dump balls low only"),
(0, "Can't hold any game pieces")
])
driver = IntegerField("Rate Driver Skill", validators=[DataRequired(), NumberRange(0,100)])
notes = StringField("Notes", validators=[DataRequired()], widget=TextArea())
submit = SubmitField("Submit")
class RapidReactForm(FlaskForm):
team_number = IntegerField("Team Number", validators=[DataRequired(), NumberRange(1, 20000)])
match = IntegerField("Match Number", validators=[DataRequired()])
disabled = BooleanField("Disabled/AFK")
disconnected = BooleanField("Disconnected during the match")
disconnected_total_seconds = IntegerField("Total seconds disconnected", validators=[DataRequired()], default=0)
# disconnected_total_seconds = IntegerField("Total seconds disconnected", validators=[Optional(strip_whitespace=True)])
crossed_tarmac = BooleanField("Crossed Tarmac during Auto")
auto_upper = IntegerField("Auto - Upper", validators=[DataRequired()])
auto_lower = IntegerField("Auto - Lower", validators=[DataRequired()])
teleop_upper = IntegerField("Teleop - Upper", validators=[DataRequired()])
teleop_lower = IntegerField("Teleop - Lower", validators=[DataRequired()])
defense = SelectField("Defense", choices=[
("no defense", "No defense"),
("bad defense", "Bad defense"),
("good defense", "Good defense")
])
type = SelectField("Type of Shooter", choices=[
("cannot shoot", "Cannot hold game pieces"),
("shoots low", "Can shoot balls into lower hub only"),
("shoots high", "Can shoot balls into upper hub only"),
("shoots high and low", "Can shoot balls into either the upper or lower hub")
])
position = SelectField("Shooting Position", choices=[
("hub", "Touching the Lower Hub"),
("tarmac", "Inside of the Tarmac"),
("outside tarmac", "Outside of the Tarmac"),
("anywhere","Anywhere on the Field")
])
speed = SelectField("Robot's Speed", choices=[
("slow", "Drives Slow"),
("fast", "Drives Fast")
])
stability = SelectField("Robot Stability", choices=[
("very stable", "Very Stable"),
("penguin walk", "Penguin Walk"),
("tilting", "Tilting during matches"),
("flipped over", "Flipped Over")
])
driver = SelectField("Rate Driver Skill", choices=[
(3, "Good"),
(2, "Average"),
(1, "Bad")
])
accuracy = SelectField("Rate Robot Accuracy", choices=[
(1, "Always Missed Shots"),
(2, "Frequently Missed Shots"),
(3, "Sometimes Missed Shots"),
(4, "Rarely Missed Shots"),
(5, "Never Missed Shots")
])
climb = SelectField("Climb", choices=[
("cannot", "Cannot Climb"),
("did not" , "Can climb, but didn't"),
("attempted", "Attempted climb, but failed"),
("low", "Low Rung"),
("mid", "Mid Rung"),
("high", "High Rung"),
("traverse", "Traverse Rung")
])
climb_seconds = SelectField("Climb Time", choices=[
("less than 10s", "Less than 10 seconds"),
("around 20s", "Around 20 seconds"),
("around 30s", "Around 30 seconds"),
("more than 40s", "More than 40 seconds")
])
notes = StringField("Notes", validators=[DataRequired()], widget=TextArea())
name = StringField("Name", validators=[DataRequired()])
submit = SubmitField("Submit")
# 2023
class ChargedUpForm(FlaskForm):
name = StringField("Name", validators=[DataRequired()])
team_number = IntegerField("Team Number", validators=[DataRequired(), NumberRange(1, 9999)])
match = IntegerField("Match Number", validators=[DataRequired(), NumberRange(1, 300)])
starting_pos = SelectField("Starting Position", choices=[
("Left", "Left"),
("Middle", "Middle"),
("Right", "Right")
])
mobility = SelectField("Mobility", choices=[
("Yes", "Yes, left community during auto"),
("No", "No, didn't leave community during auto")
])
cone_auto_top = IntegerField ("Cone Auto - Top", render_kw = {"class":"pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 6)])
cone_auto_middle = IntegerField("Cone Auto - Middle", render_kw = {"class":"pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 6)])
cone_auto_hybrid= IntegerField("Cone Auto - Hybrid", render_kw = {"class":"pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 9)])
cube_auto_top = IntegerField ("Cube Auto - Top", render_kw = {"class":"pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 3)])
cube_auto_middle = IntegerField("Cube Auto - Middle", render_kw = {"class":"pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 3)])
cube_auto_hybrid= IntegerField("Cube Auto - Hybrid", render_kw = {"class":"pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 9)])
cone_teleop_top = IntegerField ("Cone Teleop - Top", render_kw = {"class":"pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 6)])
cone_teleop_middle = IntegerField("Cone Teleop - Middle", render_kw = {"class":"pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 6)])
cone_teleop_hybrid= IntegerField("Cone Teleop - Hybrid", render_kw = {"class":"pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 9)])
cube_teleop_top = IntegerField ("Cube Teleop - Top", render_kw = {"class": "pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 3)])
cube_teleop_middle = IntegerField("Cube Teleop - Middle", render_kw = {"class": "pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 3)])
cube_teleop_hybrid= IntegerField("Cube Teleop - Hybrid", render_kw = {"class": "pointField"}, default = 0, validators=[DataRequired(), NumberRange(0, 9)])
defense = IntegerField("Defense Percentage", validators=[DataRequired(), NumberRange(0,100)])
# 0% means that the team did not play any defense. please dont forgor to add that in the question.
auto_charge = SelectField("Charging Station State Auto", choices=[
("No Dock nor Engage", "No Dock nor Engage"),
("Docked Points Earned", "Docked Points Earned"),
("Engaged Points Earned", "Engaged Points Earned")
])
teleop_charge = SelectField("Charging Station State TeleOp", choices=[
("No Dock nor Engage", "No Dock nor Engage"),
("1 Docked Points Earned", "1 Docked Points Earned"),
("2 Docked Points Earned", "2 Docked Points Earned"),
("3 Docked Points Earned", "3 Docked Points Earned"),
("1 Engaged Points Earned", "1 Engaged Points Earned"),
("2 Engaged Points Earned", "2 Engaged Points Earned"),
("3 Engaged Points Earned", "3 Engaged Points Earned"),
])
speed = IntegerField ("speed", default = 0, validators=[DataRequired(), NumberRange (1, 10)])
disabled = BooleanField("Disabled/AFK")
disconnected = BooleanField("Disconnected")
disconnected_total_seconds = StringField("Total Seconds Disconnected", default = 0)
notes = StringField("Notes", validators=[DataRequired()], widget=TextArea())
submit = SubmitField("Submit")
class FindTeamForm(FlaskForm):
team_number = IntegerField("Team Number", validators=[DataRequired(), NumberRange(1, 20000)])
submit = SubmitField("Submit")
class PitScoutingForm(FlaskForm):
name = StringField("Name", validators=[DataRequired()])
team_number = IntegerField("Team Number", validators=[DataRequired(), NumberRange(1, 9999)])
drive_train = StringField("Type of Drivetrain", validators=[DataRequired()], widget=TextArea())
auto_start = SelectField("Auto Start Preference", choices = [
("Left", "Left"),
("Right", "Right"),
("Center", "Center"),
("None", "None"),
])
auto_piece = StringField("Auto Field Preference", validators= [DataRequired()], widget =TextArea())
auto_max_points = IntegerField ("Auto Max Points", validators= [DataRequired(), NumberRange(0, 60)])
game_pieces_type_scored = StringField("Auto Start Preference", choices = [
("Cube", "Cube"),
("Cone", "Cone"),
])
where_pieces_scored = SelectField("Where Game Pieces are Scored", choices = [
("Top", "Top"),
("Middle", "Middle"),
("Hybrid", "Hybrid"),
("Cannot", "Cannot Score"),
])
dock_engage = SelectField("Can they Dock and Engage?", choices = [
("Neither", "Neither"),
("Park", "Park"),
("Docked", "Docked"),
("Engaged", "Engaged"),
])
weight = StringField ("Weight", validators = [DataRequired()], widget = TextArea())
height = StringField ("Non-extended height", [DataRequired()], widget = TextArea())
image = FileField ("Image of Robot")
notes = StringField("Notes", validators=[DataRequired()], widget=TextArea())
submit = SubmitField("Submit")
|
n=input("Enter a number : ")
for i in range (1,n+1,1):
for b in range(1,n+1,-1):
print "",
for j in range (1,i+1,1):
print j,
for k in range(i-1,0,-1):
print k,
print"\n",
print range(1,55666666,1000)
|
import numpy as np
import gym
from collections import defaultdict
from tensorboardX import SummaryWriter
class offMCO_Agent(object):
def __init__(self, env, maxEpi=100000, gamma = 1, epsilon = 0.1):
self.env = env
self.maxEpi = maxEpi
self.gamma = gamma
self.Q = defaultdict(lambda: 0)
self.P = defaultdict(lambda: 0)
self.epsilon = epsilon
self.action_no = env.action_space.n
def e_soft(self,observation):
r = np.random.uniform()
if r < self.epsilon:
return np.random.randint(0,self.action_no)
else:
return self.P[observation]
def Generate_Episode(self):
observation = self.env.reset()
qs = []
rewards = []
isFirst = defaultdict(lambda: 0)
updates = []
for t in range(100):
action = self.e_soft(observation)
qs.append((observation,action))
if isFirst[(observation,action)] == 0:
isFirst[(observation,action)] = 1
updates.append(1)
else:
updates.append(0)
observation, reward, done, _ = env.step(action)
rewards.append(reward)
if done:
break
return (qs,rewards,updates)
def learn(self, eval = 1000, maxStep = 100000):
self.Q = defaultdict(lambda: 0) # Q value.
C = defaultdict(lambda: 0) # Number of time a Q is visited.
self.P = defaultdict(lambda: 0) # Greedy Policy with respect to Q.
writer = SummaryWriter(comment="MCoffW")
self.eval(writer,0, episodes=1000,maxStep = maxStep)
for epi in range(1,self.maxEpi+1):
if(epi%eval==0):
self.eval(writer,epi, episodes=1000,maxStep = maxStep)
qs, rewards, updates = self.Generate_Episode()
G = 0
W = 1
for i in range(len(updates)-1,-1,-1):
G = self.gamma*G+rewards[i]
if updates[i]==1:
Cn = C[qs[i]]+1
Qn = self.Q[qs[i]]
C[qs[i]] = Cn
self.Q[qs[i]] = Qn + 1.0/Cn * (W*G-Qn)
best_action = np.argmax(np.asarray([self.Q[(qs[i][0],j)] for j in range(self.action_no)]))
self.P[qs[i][0]] = best_action
if qs[i][1] != best_action:
break
W = W*1/(1-self.epsilon+self.epsilon/self.action_no)
self.eval(writer,self.maxEpi, episodes=1000,maxStep = maxStep)
def eval(self, writer, itr, episodes=1000, maxStep = 100000):
score = 0
steps_list = []
for episode in range(episodes):
observation = self.env.reset()
steps=0
while True:
action = self.P[observation]
observation, reward, done, _ = self.env.step(action)
steps+=1
score+=reward
if done:
steps_list.append(steps)
break
if steps>maxStep:
steps_list.append(steps)
break
print('----------------------------------------------')
print("{}/{}".format(itr,self.maxEpi))
print('You took an average of {:.0f} steps'.format(np.mean(steps_list)))
print('Average reward {:.2f}'.format((score/episodes)))
print('----------------------------------------------')
if writer is not None:
writer.add_scalar("Episode Length",np.mean(steps_list),itr)
writer.add_scalar("Reward",score/episodes,itr)
if __name__ == "__main__":
env = gym.make("FrozenLake-v0").env
agent = offMCO_Agent(env)
agent.learn()
|
#!/usr/bin/python3
"""
Copyright (c) 2015, Joshua Saxe
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name 'Joshua Saxe' nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL JOSHUA SAXE BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
def jaccard(arg1, arg2, feature='strings'):
"""
Compute the jaccard index between two arguments (lists | sets) by taking
their intersection, union and then dividing the number of elements in the
intersection by the number of elements in their union.
Args:
arg1: attributes of malware 1
arg2: attributes of malware 2
Raises:
None
Returns:
jaccard index value
"""
if feature == 'api':
# Input args will be lists
list1 = arg1
list2 = arg2
# Counters for keeping track of common hashes and unique hashes
unionlen = float(0)
intersectionlen = float(0)
# For two exe samples to be similar, the order of dynamic API calls
# is relevant and important
for hash1, hash2 in zip(list1, list2):
if hash1 == hash2:
# Hash match, increment `intersectionlen`
intersectionlen = intersectionlen + 1
# One unique hash between the two samples, increment `unionlen`
unionlen = unionlen + 1
else:
# Two unique hashes between the two samples, increment
# `unionlen` by 2
unionlen = unionlen + 2
# Extra hashes are unique hashes, so increment `unionlen` by equivalent
# number
if len(list1) != len(list2):
unionlen = unionlen + abs(len(list1) - len(list2))
return intersectionlen / unionlen
elif feature == 'strings':
# Input args will be sets
set1 = arg1
set2 = arg2
# Calculate intersection and union between two sets. Order is not
# relevant
intersection = set1 & set2
intersectionlength = float(len(intersection))
union = set1 | set2
unionlength = float(len(union))
return intersectionlength / unionlength
else:
raise Exception("Feature not supported. Jaccard index could not be calculated!")
|
import unittest
from main import insere_dados
teste = insere_dados()
class MyTestCase(unittest.TestCase):
def testa_nome_vazio(self):
self.assertFalse(teste.Nome == "", "Nome em branco")
def testa_nome_inicia_com_maiuscula(self):
self.assertTrue(teste.Nome[0].isupper(), "Nome não inicia com maiúscula")
def testa_idade_maior_ou_igual_a_zero(self):
self.assertGreaterEqual(teste.Idade, 0, "Idade menor que zero")
def testa_sexo_eh_M_ou_F(self):
self.assertTrue(teste.Sexo.__eq__("M") or teste.Sexo.__eq__("F"), "Sexo não é M ou F")
def testa_cidade_vazio(self):
self.assertFalse(teste.Cidade == "", "Nome em branco")
def testa_cidade_inicia_com_maiuscula(self):
self.assertTrue(teste.Cidade[0].isupper(), "Cidade com inicial minúscula")
def testa_estado_vazio(self):
self.assertFalse(teste.Estado == "", "Estado em branco")
def testa_estado_maiusculo(self):
self.assertTrue(teste.Estado.isupper(), "Estado em minúsculo")
def testa_estado_com_duas_letras(self):
self.assertEqual(len(teste.Estado), 2, "Estado não tem duas letras")
if __name__ == '__main__':
unittest.main()
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import *
from mysite.views import *
from mysite.books import views
from mysite import inner , inner2
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
urlpatterns = patterns('mysite.views',
# 第一行这里放的是公共视图前缀,如果没有的话就是‘’ 就行了
(r'^admin/', include(admin.site.urls)),
##('^time7/',include('mysite.inner'),
(r'^hello/$', hello),
# 注意有两个views 这里面的函数都是这两个views 里面的
(r'^contact/$', contact),
(r'^search/$', views.search),
(r'^display/$', display_meta),
('^time/$',current_datetime),
# 这是另一种表示视图的方式,但是要注意 这种后面的要加引号
('^time2/$','current_datetime2'),
('^time3/$',current_datetime3),
('^time4/$',current_datetime4),
('^time5/$',current_datetime5),)
##(r'^time/plus/(\d{1,2})/$',hours_ahead),
## #可以增加多个 urlpattenerns 对象 像线面这样
urlpatterns = patterns('',
('^time6/$',current_datetime5 ),
('^time7/',include('mysite.inner')),
('^time8/',include('mysite.inner2')),
)
|
from deeppavlov import build_model
model = build_model("topic_ag_news", download=True)
# get predictions for 'input_text1', 'input_text2'
predictions = model([
'Britain Prince Andrew has no recollection of meeting sex accuser Report',
'Maher panics says Nikki Haley has gone full on Team Deplorable This is so scary to me',
'Government must act after Glasgow hospital water infection scandal',
'More excited to represent Wales than Real Madrid says Gareth Bale',
'Joker about to cross $1 billion global box office milestone',
'New Pixel reacts to gestures, but charge still short',
'LETTER On vets, vettes and other vets',
'Apple TV See Is a Great Show that Transcends Its Captivating Premise',
'Google is scaling back its weekly all hands meetings after leaks Sundar Pichai tells staff',
'Czechs use anniversary of Velvet Revolution to pressure PM',
'Everything You Need To Watch On Netflix Tonight',
'Learn how to make your own video games with this online course',
'Hunters head out for start of deer firearms season in northeast Missouri',
'The HUAWEI FreeBuds 3 with Intelligent Noise Cancellation',
'Some of the protesters targetted cars and shop windows during the clashes',
'Thomas leads Appalachian St. to 56-27 win over Georgia St.'
])
print("predictions: ", predictions)
|
# The necessary imports for this application
import json
import requests
from time import sleep
# The URL of the Extractions Endpoint
url = 'https://api.dowjones.com/alpha/extractions/documents'
#Our prompts to be inserted into our query.
prompt = "> "
print("What are you searching for?")
search_term = raw_input(prompt)
print("What is your article limit?")
limit = raw_input(prompt)
request_body = {
"query": {
"where": "body contains '" + search_term + "' AND language_code='en' AND publication_date >= '2015-01-01 00:00'",
"limit": str(limit)
}
}
# Call the endpoint with the given query
print("Creating the extraction: " + json.dumps(request_body))
response = requests.post(url, data=json.dumps(request_body), headers={'content-type': 'application/json', 'user-key': 'USER_KEY'})
print(response.text)
# Verify the response from creating an extraction is OK
if response.status_code == 201:
extraction = response.json()
print(extraction)
print("Extraction Created. Job ID: " + extraction['data']['id'])
self_link = extraction["links"]["self"]
print "Checking state of the job."
while True:
# We now call the second endpoint, which will tell us if the extraction is ready.
status_response = requests.get(self_link, headers={'content-type': 'application/json', 'user-key': 'USER_KEY'})
# Verify the response from the self_link is OK
if status_response.status_code == 200:
# There is an edge case where the job does not have a current_state yet. If current_state
# does not yet exist in the response, we will sleep for 10 seconds
status = status_response.json()
if 'currentState' in status['data']['attributes']:
currentState = status['data']['attributes']['currentState']
print("Current state is: " + currentState)
# Job is still running, Sleep for 10 seconds
if currentState == "JOB_STATE_RUNNING":
print("Sleeping for 10 seconds...Job state running")
sleep(10)
elif currentState == "JOB_QUEUED":
print("Sleeping for 10 seconds...Job queued")
sleep(10)
elif currentState == "JOB_CREATED":
print("Sleeping for 10 seconds...Job created")
sleep(10)
else:
# If currentState is JOB_STATE_DONE then everything completed successfully
if currentState == "JOB_STATE_DONE":
print("Job completed successfully")
print("Data is available here: " + status["data"]["attributes"]["destination"])
print("To list the contents of the extraction use: gsutil ls " + status["data"]["attributes"]["destination"])
# job has another state that means it was not successful.
else:
print("An error occurred with the job. Final state is: " + currentState)
break
else:
print("Sleeping for 10 seconds...")
sleep(10)
else:
print("ERROR: an error occurred getting the details for the extraction: " + status_response.text)
else:
print("ERROR: An error occurred creating an extraction: " + response.text)
|
def square_it(digits):
s = str(digits)
s_len = len(s)
len_sqrt = s_len ** 0.5
int_sqrt = int(len_sqrt)
if len_sqrt == int_sqrt:
return '\n'.join(s[a:a + int_sqrt] for a in xrange(0, s_len, int_sqrt))
return 'Not a perfect square!'
|
s = "If Comrade Napoleon says it, it must be right."
a = [100, 200, 300]
def foo(arg):
print('arg = {arg}')
class Foo:
pass
|
# Generated from /Users/labtop/PyCharm/BioScript/grammar/grammar/BSParser.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .BSParser import BSParser
else:
from BSParser import BSParser
# /* parser/listener/visitor header section */
# This class defines a complete listener for a parse tree produced by BSParser.
class BSParserListener(ParseTreeListener):
# Enter a parse tree produced by BSParser#program.
def enterProgram(self, ctx:BSParser.ProgramContext):
pass
# Exit a parse tree produced by BSParser#program.
def exitProgram(self, ctx:BSParser.ProgramContext):
pass
# Enter a parse tree produced by BSParser#globalDeclarations.
def enterGlobalDeclarations(self, ctx:BSParser.GlobalDeclarationsContext):
pass
# Exit a parse tree produced by BSParser#globalDeclarations.
def exitGlobalDeclarations(self, ctx:BSParser.GlobalDeclarationsContext):
pass
# Enter a parse tree produced by BSParser#moduleDeclaration.
def enterModuleDeclaration(self, ctx:BSParser.ModuleDeclarationContext):
pass
# Exit a parse tree produced by BSParser#moduleDeclaration.
def exitModuleDeclaration(self, ctx:BSParser.ModuleDeclarationContext):
pass
# Enter a parse tree produced by BSParser#manifestDeclaration.
def enterManifestDeclaration(self, ctx:BSParser.ManifestDeclarationContext):
pass
# Exit a parse tree produced by BSParser#manifestDeclaration.
def exitManifestDeclaration(self, ctx:BSParser.ManifestDeclarationContext):
pass
# Enter a parse tree produced by BSParser#stationaryDeclaration.
def enterStationaryDeclaration(self, ctx:BSParser.StationaryDeclarationContext):
pass
# Exit a parse tree produced by BSParser#stationaryDeclaration.
def exitStationaryDeclaration(self, ctx:BSParser.StationaryDeclarationContext):
pass
# Enter a parse tree produced by BSParser#functions.
def enterFunctions(self, ctx:BSParser.FunctionsContext):
pass
# Exit a parse tree produced by BSParser#functions.
def exitFunctions(self, ctx:BSParser.FunctionsContext):
pass
# Enter a parse tree produced by BSParser#functionDeclaration.
def enterFunctionDeclaration(self, ctx:BSParser.FunctionDeclarationContext):
pass
# Exit a parse tree produced by BSParser#functionDeclaration.
def exitFunctionDeclaration(self, ctx:BSParser.FunctionDeclarationContext):
pass
# Enter a parse tree produced by BSParser#formalParameters.
def enterFormalParameters(self, ctx:BSParser.FormalParametersContext):
pass
# Exit a parse tree produced by BSParser#formalParameters.
def exitFormalParameters(self, ctx:BSParser.FormalParametersContext):
pass
# Enter a parse tree produced by BSParser#formalParameterList.
def enterFormalParameterList(self, ctx:BSParser.FormalParameterListContext):
pass
# Exit a parse tree produced by BSParser#formalParameterList.
def exitFormalParameterList(self, ctx:BSParser.FormalParameterListContext):
pass
# Enter a parse tree produced by BSParser#formalParameter.
def enterFormalParameter(self, ctx:BSParser.FormalParameterContext):
pass
# Exit a parse tree produced by BSParser#formalParameter.
def exitFormalParameter(self, ctx:BSParser.FormalParameterContext):
pass
# Enter a parse tree produced by BSParser#functionTyping.
def enterFunctionTyping(self, ctx:BSParser.FunctionTypingContext):
pass
# Exit a parse tree produced by BSParser#functionTyping.
def exitFunctionTyping(self, ctx:BSParser.FunctionTypingContext):
pass
# Enter a parse tree produced by BSParser#returnStatement.
def enterReturnStatement(self, ctx:BSParser.ReturnStatementContext):
pass
# Exit a parse tree produced by BSParser#returnStatement.
def exitReturnStatement(self, ctx:BSParser.ReturnStatementContext):
pass
# Enter a parse tree produced by BSParser#blockStatement.
def enterBlockStatement(self, ctx:BSParser.BlockStatementContext):
pass
# Exit a parse tree produced by BSParser#blockStatement.
def exitBlockStatement(self, ctx:BSParser.BlockStatementContext):
pass
# Enter a parse tree produced by BSParser#statements.
def enterStatements(self, ctx:BSParser.StatementsContext):
pass
# Exit a parse tree produced by BSParser#statements.
def exitStatements(self, ctx:BSParser.StatementsContext):
pass
# Enter a parse tree produced by BSParser#ifStatement.
def enterIfStatement(self, ctx:BSParser.IfStatementContext):
pass
# Exit a parse tree produced by BSParser#ifStatement.
def exitIfStatement(self, ctx:BSParser.IfStatementContext):
pass
# Enter a parse tree produced by BSParser#whileStatement.
def enterWhileStatement(self, ctx:BSParser.WhileStatementContext):
pass
# Exit a parse tree produced by BSParser#whileStatement.
def exitWhileStatement(self, ctx:BSParser.WhileStatementContext):
pass
# Enter a parse tree produced by BSParser#repeat.
def enterRepeat(self, ctx:BSParser.RepeatContext):
pass
# Exit a parse tree produced by BSParser#repeat.
def exitRepeat(self, ctx:BSParser.RepeatContext):
pass
# Enter a parse tree produced by BSParser#heat.
def enterHeat(self, ctx:BSParser.HeatContext):
pass
# Exit a parse tree produced by BSParser#heat.
def exitHeat(self, ctx:BSParser.HeatContext):
pass
# Enter a parse tree produced by BSParser#dispose.
def enterDispose(self, ctx:BSParser.DisposeContext):
pass
# Exit a parse tree produced by BSParser#dispose.
def exitDispose(self, ctx:BSParser.DisposeContext):
pass
# Enter a parse tree produced by BSParser#mix.
def enterMix(self, ctx:BSParser.MixContext):
pass
# Exit a parse tree produced by BSParser#mix.
def exitMix(self, ctx:BSParser.MixContext):
pass
# Enter a parse tree produced by BSParser#usein.
def enterUsein(self, ctx:BSParser.UseinContext):
pass
# Exit a parse tree produced by BSParser#usein.
def exitUsein(self, ctx:BSParser.UseinContext):
pass
# Enter a parse tree produced by BSParser#useinType.
def enterUseinType(self, ctx:BSParser.UseinTypeContext):
pass
# Exit a parse tree produced by BSParser#useinType.
def exitUseinType(self, ctx:BSParser.UseinTypeContext):
pass
# Enter a parse tree produced by BSParser#detect.
def enterDetect(self, ctx:BSParser.DetectContext):
pass
# Exit a parse tree produced by BSParser#detect.
def exitDetect(self, ctx:BSParser.DetectContext):
pass
# Enter a parse tree produced by BSParser#split.
def enterSplit(self, ctx:BSParser.SplitContext):
pass
# Exit a parse tree produced by BSParser#split.
def exitSplit(self, ctx:BSParser.SplitContext):
pass
# Enter a parse tree produced by BSParser#dispense.
def enterDispense(self, ctx:BSParser.DispenseContext):
pass
# Exit a parse tree produced by BSParser#dispense.
def exitDispense(self, ctx:BSParser.DispenseContext):
pass
# Enter a parse tree produced by BSParser#gradient.
def enterGradient(self, ctx:BSParser.GradientContext):
pass
# Exit a parse tree produced by BSParser#gradient.
def exitGradient(self, ctx:BSParser.GradientContext):
pass
# Enter a parse tree produced by BSParser#store.
def enterStore(self, ctx:BSParser.StoreContext):
pass
# Exit a parse tree produced by BSParser#store.
def exitStore(self, ctx:BSParser.StoreContext):
pass
# Enter a parse tree produced by BSParser#numberAssignment.
def enterNumberAssignment(self, ctx:BSParser.NumberAssignmentContext):
pass
# Exit a parse tree produced by BSParser#numberAssignment.
def exitNumberAssignment(self, ctx:BSParser.NumberAssignmentContext):
pass
# Enter a parse tree produced by BSParser#math.
def enterMath(self, ctx:BSParser.MathContext):
pass
# Exit a parse tree produced by BSParser#math.
def exitMath(self, ctx:BSParser.MathContext):
pass
# Enter a parse tree produced by BSParser#binops.
def enterBinops(self, ctx:BSParser.BinopsContext):
pass
# Exit a parse tree produced by BSParser#binops.
def exitBinops(self, ctx:BSParser.BinopsContext):
pass
# Enter a parse tree produced by BSParser#parExpression.
def enterParExpression(self, ctx:BSParser.ParExpressionContext):
pass
# Exit a parse tree produced by BSParser#parExpression.
def exitParExpression(self, ctx:BSParser.ParExpressionContext):
pass
# Enter a parse tree produced by BSParser#methodInvocation.
def enterMethodInvocation(self, ctx:BSParser.MethodInvocationContext):
pass
# Exit a parse tree produced by BSParser#methodInvocation.
def exitMethodInvocation(self, ctx:BSParser.MethodInvocationContext):
pass
# Enter a parse tree produced by BSParser#methodCall.
def enterMethodCall(self, ctx:BSParser.MethodCallContext):
pass
# Exit a parse tree produced by BSParser#methodCall.
def exitMethodCall(self, ctx:BSParser.MethodCallContext):
pass
# Enter a parse tree produced by BSParser#expressionList.
def enterExpressionList(self, ctx:BSParser.ExpressionListContext):
pass
# Exit a parse tree produced by BSParser#expressionList.
def exitExpressionList(self, ctx:BSParser.ExpressionListContext):
pass
# Enter a parse tree produced by BSParser#typeType.
def enterTypeType(self, ctx:BSParser.TypeTypeContext):
pass
# Exit a parse tree produced by BSParser#typeType.
def exitTypeType(self, ctx:BSParser.TypeTypeContext):
pass
# Enter a parse tree produced by BSParser#unionType.
def enterUnionType(self, ctx:BSParser.UnionTypeContext):
pass
# Exit a parse tree produced by BSParser#unionType.
def exitUnionType(self, ctx:BSParser.UnionTypeContext):
pass
# Enter a parse tree produced by BSParser#typesList.
def enterTypesList(self, ctx:BSParser.TypesListContext):
pass
# Exit a parse tree produced by BSParser#typesList.
def exitTypesList(self, ctx:BSParser.TypesListContext):
pass
# Enter a parse tree produced by BSParser#variableDefinition.
def enterVariableDefinition(self, ctx:BSParser.VariableDefinitionContext):
pass
# Exit a parse tree produced by BSParser#variableDefinition.
def exitVariableDefinition(self, ctx:BSParser.VariableDefinitionContext):
pass
# Enter a parse tree produced by BSParser#variable.
def enterVariable(self, ctx:BSParser.VariableContext):
pass
# Exit a parse tree produced by BSParser#variable.
def exitVariable(self, ctx:BSParser.VariableContext):
pass
# Enter a parse tree produced by BSParser#primary.
def enterPrimary(self, ctx:BSParser.PrimaryContext):
pass
# Exit a parse tree produced by BSParser#primary.
def exitPrimary(self, ctx:BSParser.PrimaryContext):
pass
# Enter a parse tree produced by BSParser#literal.
def enterLiteral(self, ctx:BSParser.LiteralContext):
pass
# Exit a parse tree produced by BSParser#literal.
def exitLiteral(self, ctx:BSParser.LiteralContext):
pass
# Enter a parse tree produced by BSParser#primitiveType.
def enterPrimitiveType(self, ctx:BSParser.PrimitiveTypeContext):
pass
# Exit a parse tree produced by BSParser#primitiveType.
def exitPrimitiveType(self, ctx:BSParser.PrimitiveTypeContext):
pass
# Enter a parse tree produced by BSParser#chemicalType.
def enterChemicalType(self, ctx:BSParser.ChemicalTypeContext):
pass
# Exit a parse tree produced by BSParser#chemicalType.
def exitChemicalType(self, ctx:BSParser.ChemicalTypeContext):
pass
# Enter a parse tree produced by BSParser#timeIdentifier.
def enterTimeIdentifier(self, ctx:BSParser.TimeIdentifierContext):
pass
# Exit a parse tree produced by BSParser#timeIdentifier.
def exitTimeIdentifier(self, ctx:BSParser.TimeIdentifierContext):
pass
# Enter a parse tree produced by BSParser#temperatureIdentifier.
def enterTemperatureIdentifier(self, ctx:BSParser.TemperatureIdentifierContext):
pass
# Exit a parse tree produced by BSParser#temperatureIdentifier.
def exitTemperatureIdentifier(self, ctx:BSParser.TemperatureIdentifierContext):
pass
# Enter a parse tree produced by BSParser#unitTracker.
def enterUnitTracker(self, ctx:BSParser.UnitTrackerContext):
pass
# Exit a parse tree produced by BSParser#unitTracker.
def exitUnitTracker(self, ctx:BSParser.UnitTrackerContext):
pass
del BSParser
|
import urllib2
import re
def downURL(url,filename):
try:
fp=urllib2.urlopen(url)
except:
print "download exception"
return 0
op=open(filename,"wb")
while 1:
s=fp.read()
if not s:
break
op.write(s)
fp.close()
op.close()
return 1
def getURL(url):
try:
fp=urllib2.urlopen(url)
except:
print "get url exception"
return []
pattern=re.compile("http://sports.sina.com.cn/[^\>]+.shtml")
while 1:
s=fp.read()
if not s:
break
urls=pattern.findall(s)
fp.close()
return urls
def sspider(startURL,times):
urls=[]
urls.append(startURL)
i=0
while 1:
if i>times:
break
if len(urls)>0:
url=urls.pop(0)
print url,len(urls),i
downURL(url,str(i)+".htm")
i=i+1
if len(urls)<times:
urllist=getURL(url)
for url in urllist:
if urls.count(url)==0:
urls.append(url)
else:
break
return 1
sspider("http://www.sina.com.cn",20)
|
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
res = []
def go(sofar, sumi, i):
if sumi > target:
return
if sumi == target:
res.append(sofar)
return
if i >= len(candidates):
return
go(sofar+[candidates[i]], sumi+candidates[i], i)
go(sofar, sumi, i+1)
return
go([], 0, 0)
return res
|
from selenium import webdriver
import pytest
menu = [('Appearence', 'Template'), ('Logotype', 'Logotype'), ('Catalog', 'Catalog'),
('Product Groups', 'Product Groups'), ('Option Groups', 'Option Groups'), ('Manufacturers', 'Manufacturers'),
('Suppliers', 'Suppliers'), ('Delivery Statuses', 'Delivery Statuses'),
('Sold Out Statuses', 'Sold Out Statuses'), ('Quantity Units', 'Quantity Units'),
('CSV Import/Export', 'CSV Import/Export'), ('Countries', 'Countries'), ('Currencies', 'Currencies'),
('Customers', 'Customers'), ('CSV Import/Export', 'CSV Import/Export'), ('Newsletter', 'Newsletter'),
('Geo Zones', 'Geo Zones'), ('Languages', 'Languages'), ('Storage Encoding', 'Storage Encoding'),
('Modules', 'Job Modules'), ('Customer', 'Customer Modules'), ('Shipping', 'Shipping Modules'),
('Payment', 'Payment Modules'), ('Order Total', 'Order Total Modules'),
('Order Success', 'Order Success Modules'), ('Order Action', 'Order Action Modules'), ('Orders', 'Orders'),
('Order Statuses', 'Order Statuses'), ('Pages', 'Pages'), ('Reports', 'Monthly Sales'),
('Most Sold Products', 'Most Sold Products'), ('Most Shopping Customers', 'Most Shopping Customers'),
('Settings', 'Settings'), ('Defaults', 'Settings'), ('General', 'Settings'), ('Listings', 'Settings'),
('Images', 'Settings'), ('Checkout', 'Settings'), ('Advanced', 'Settings'), ('Security', 'Settings'),
('Slides', 'Slides'), ('Tax', 'Tax Classes'), ('Tax Rates', 'Tax Rates'),
('Translations', 'Search Translations'), ('Scan Files', 'Scan Files For Translations'),
('CSV Import/Export', 'CSV Import/Export'), ('Users', 'Users'), ('vQmods', 'vQmods')]
@pytest.yield_fixture()
def driver():
_driver = webdriver.Chrome()
yield _driver
_driver.quit()
def login(driver, username, password):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys(username)
driver.find_element_by_name("password").send_keys(password)
driver.find_element_by_name("login").click()
def navigate_admin_panel(driver, point, title):
locator = "//li[@id='app-']//span[text()='%s']"
driver.find_element_by_xpath(locator % point).click()
assert title + " | My Store" in driver.title
def test_home_task_7(driver):
login(driver, username="admin", password="admin")
for (point, title) in menu:
navigate_admin_panel(driver, point, title)
|
import subprocess
import asyncio
import os
from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler
def runner(path_of_exe):
subprocess.call(path_of_exe)
def schedule(path_of_exe,dt):
scheduler = BackgroundScheduler()
print(scheduler.get_jobs())
try:
pass
except:
pass
try:
scheduler.add_job(runner,args=[path_of_exe],next_run_time=dt,id='software')
try:scheduler.start()
except:pass
print(scheduler.get_jobs())
except Exception as e:
print("done")
|
from collections import defaultdict
import numpy as np
from pyNastran.bdf.bdf import read_bdf, BDF, CTRIA3, GRID
from ..logger import msg
from .classes import Edge
def read_mesh(filepath, silent=True):
"""Read a Nastran triangular mesh
Parameters
----------
filepath : str
Path to the Nastran input deck file
Returns
-------
mesh : :class:`BDF`
A pyNastran's :class:`BDF` object modified to have the proper edge
references used in the ES-PIM
"""
msg('Reading mesh...', silent=silent)
mesh = read_bdf(filepath, debug=False)
for node in mesh.nodes.values():
node.trias = set()
node.edges = set()
node.index = set()
node.prop = None
trias = []
for elem in mesh.elements.values():
if isinstance(elem, CTRIA3):
elem.edges = []
elem.prop = None
trias.append(elem)
else:
raise NotImplementedError('Element type %s not supported' %
type(elem))
edges = {}
edges_ids = defaultdict(list)
for tria in trias:
for edge_id in tria.get_edge_ids():
edges_ids[edge_id].append(tria)
tria.nodes = tria.nodes_ref
for (n1, n2), e_trias in edges_ids.items():
edge = Edge(mesh.nodes[n1], mesh.nodes[n2])
edge.trias = e_trias
edges[(n1, n2)] = edge
mesh.nodes[n1].edges.add(edge)
mesh.nodes[n2].edges.add(edge)
for edge in edges.values():
for tria in edge.trias:
tria.edges.append(edge)
for tria in trias:
for nid in tria.node_ids:
mesh.nodes[nid].trias.add(tria.eid)
mesh.edges = edges
msg('finished!', silent=silent)
return mesh
def read_delaunay(points, tri, silent=True):
"""Read a Delaunay mesh
Creates a mesh with the proper references for the ES-PIM using data from
:class:`scipy.spatial.Delaunay`.
Parameters
----------
points : (N, ndim) array-like
Cloud of N points with ndim coordinates (usually 2D or 3D), also passed
as input to the Delaunay algorithm
tri : :class:`scipy.spatial.qhull.Delaunay` object
A triangular mesh generated by the Delaunay class
Returns
-------
mesh : :class:`BDF`
A pyNastran's :class:`BDF` object with the proper edge references used
in the ES-PIM
"""
msg('Reading Delaunay ouptut...', silent=silent)
mesh = BDF()
nodes = []
nid = 0
for pt in points:
if len(pt) == 2:
pt = np.array([pt[0], pt[1], 0.])
nid += 1
node = GRID(nid, cp=0, cd=0, xyz=pt)
node.trias = set()
node.edges = set()
node.index = set()
node.prop = None
nodes.append(node)
mesh.nodes[nid] = node
eid = 0
trias = []
for i1, i2, i3 in tri.simplices:
n1 = nodes[i1]
n2 = nodes[i2]
n3 = nodes[i3]
eid += 1
tria = CTRIA3(eid, 0, (n1.nid, n2.nid, n3.nid))
tria.nodes = [n1, n2, n3]
tria.nodes_ref = tria.nodes
tria._node_ids(nodes=tria.nodes_ref)
tria.edges = []
tria.prop = None
trias.append(tria)
mesh.elements[eid] = tria
edges = {}
edges_ids = defaultdict(list)
for tria in trias:
for edge_id in tria.get_edge_ids():
edges_ids[edge_id].append(tria)
for (n1, n2), e_trias in edges_ids.items():
edge = Edge(mesh.nodes[n1], mesh.nodes[n2])
edge.trias = e_trias
edges[(n1, n2)] = edge
mesh.nodes[n1].edges.add(edge)
mesh.nodes[n2].edges.add(edge)
for edge in edges.values():
for tria in edge.trias:
tria.edges.append(edge)
for tria in trias:
for nid in tria.node_ids:
mesh.nodes[nid].trias.add(tria.eid)
mesh.edges = edges
msg('finished!', silent=silent)
return mesh
|
# -*- coding: utf-8 -*-
import itertools
class Solution:
def fromBase3(self, digits):
return int("".join(str(digit) for digit in reversed(digits)), 3)
def sumBase3(self, digits):
return sum(digit for digit in digits if digit) % 3
def toBase3(self, num):
digits = []
while num:
digits.append(num % 3)
num //= 3
return digits
def singleNumber(self, nums):
min_num = min(nums)
nums = [num + abs(min_num) for num in nums]
nums_in_base3 = [self.toBase3(num) for num in nums]
sums_in_base3 = [
self.sumBase3(el) for el in itertools.zip_longest(*nums_in_base3)
]
return self.fromBase3(sums_in_base3) - abs(min_num)
if __name__ == "__main__":
solution = Solution()
assert 3 == solution.singleNumber([2, 2, 3, 2])
assert 99 == solution.singleNumber([0, 1, 0, 1, 0, 1, 99])
assert -4 == solution.singleNumber([-2, -2, 1, 1, -3, 1, -3, -3, -4, -2])
|
"""
Flingo TV Queue/Player for XBMC
Announce, then display the Flingo queue
"""
import sys
import os
import xbmc
import xbmcplugin
import xbmcaddon
import xbmcgui
import socket
import uuid
import httplib, urllib
import re
settings = xbmcaddon.Addon(id='plugin.video.flingo')
# dbg = settings.getSetting("debug") == "true"
ROOT_FOLDER = settings.getAddonInfo('path')
RESOURCE_FOLDER = os.path.join(str(ROOT_FOLDER), 'resources')
LIB_FOLDER = os.path.join(str(RESOURCE_FOLDER), 'lib')
WORKING_FOLDER = os.path.normpath( xbmc.translatePath(settings.getAddonInfo("profile")) )
LINKS_FOLDER = os.path.join(str(WORKING_FOLDER), 'links')
REAL_LINK_PATH = os.path.join(str(WORKING_FOLDER), 'links')
USERINFO_FOLDER = WORKING_FOLDER
XBMCPROFILE = xbmc.translatePath('special://profile')
# The userinfo.txt file (with persistent GUID) might be
# in the working folder for the script.service.flingo addon
# if that was started first. That addon will also consider
# this addon's file. script.service.flingo (the longpoll
# handler) must operate as a standalone thing.
#
SERVICE_USERINFO = os.path.join( os.path.dirname( str(USERINFO_FOLDER) ), 'script.service.flingo/userinfo.txt' )
print "[FlingoTV] root folder: " + ROOT_FOLDER
print "[FlingoTV] working folder: " + WORKING_FOLDER
print "[FlingoTV] links folder: " + LINKS_FOLDER
print "[FlingoTV] real link path: " + REAL_LINK_PATH
print "[FlingoTV] resource folder: " + RESOURCE_FOLDER
print "[FlingoTV] lib folder: " + LIB_FOLDER
print "[FlingoTV] userinfo folder: " + USERINFO_FOLDER
try:
import json as simplejson
if not hasattr( simplejson, "loads" ):
raise Exception( "Hmmm! Error with json %r" % dir( simplejson ) )
except Exception, e:
print "[FlingoTV] %s" % str( e )
import simplejson
thisPlugin = int(sys.argv[1])
service = None
"""
The GUID for this device is stored in USERINFO_FOLDER/userinfo.txt
"""
UUID = '9f3ab0ca-01ca-11e2-a1f4-b8ac6f8bf825'
class Service:
def __init__(self, guid):
self.guid = guid
self.post_headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
def announce(self):
hostname = socket.gethostname()
params = urllib.urlencode({'guid': self.guid,
'name': 'XBMC Queue',
'description': 'XBMC Flingo Queue on' + hostname,
'make': 'XBMC',
'model': 'XBMC',
'dev_name': 'XBMC Queue',
'dev_description': 'XBMC Flingo Queue on ' + hostname,
'version': '0.1'})
try:
conn = httplib.HTTPConnection("flingo.tv")
conn.request("POST", "/fling/announce", params, self.post_headers)
res = conn.getresponse()
except:
return({'error': 'Failed to send announcement'})
data = {}
if res.status == 200:
try:
data = simplejson.loads( res.read() )
except:
data = {'error': 'Failed to parse JSON'}
else:
data = {'error': 'Bad Status', 'status': res.status, 'reason': res.reason}
return data
def discover(self):
try:
conn = httplib.HTTPConnection("flingo.tv")
conn.request("POST", "/fling/discover")
res = conn.getresponse()
except:
return({'error': 'Failed to discover'})
data = {}
if res.status == 200:
try:
data = simplejson.loads( res.read() )
except:
data = {'error': 'Failed to parse JSON'}
else:
data = {'error': 'Bad Status', 'status': res.status, 'reason': res.reason}
return data
def longpoll(self):
params = urllib.urlencode({'guid': self.guid})
try:
conn = httplib.HTTPConnection("flingo.tv")
conn.request("POST", "/fling/longpoll", params, self.post_headers)
res = conn.getresponse()
except:
return({'error': 'Failed to longpoll'})
data = {}
if res.status == 200:
try:
data = simplejson.loads( res.read() )
except:
data = {} # This is OK
else:
data = {'error': 'Bad Status', 'status': res.status, 'reason': res.reason}
return data
def queue(self, start, num):
p = {'guid': self.guid}
if start != None:
p['index'] = start
if num != None:
p['howmany'] = num
params = urllib.urlencode(p)
try:
conn = httplib.HTTPConnection("flingo.tv")
conn.request("POST", "/fling/queue", params, self.post_headers)
res = conn.getresponse()
except:
return({'error': 'Failed to get queue'})
data = {}
if res.status == 200:
try:
string = res.read()
data = simplejson.loads( string )
except:
data = {} # This is OK
else:
data = {'error': 'Bad Status', 'status': res.status, 'reason': res.reason}
return data
def rm(self,id):
params = urllib.urlencode({'guid': self.guid, 'link_id': id})
try:
conn = httplib.HTTPConnection("flingo.tv")
conn.request("POST", "/fling/remove_queue", params, self.post_headers)
res = conn.getresponse()
except:
return({'error': 'Failed to remove from queue'})
data = {}
if res.status == 200:
try:
data = simplejson.loads( res.read() )
except:
data = {} # This is OK
else:
data = {'error': 'Bad Status', 'status': res.status, 'reason': res.reason}
return data
def get_vimeo_url(self,data):
context = data['deobfuscator_context']
params = urllib.urlencode({'video_id': context})
try:
conn = httplib.HTTPConnection("flingo.tv")
conn.request("POST", "/api/vimeo", params, self.post_headers)
res = conn.getresponse()
except:
return None
if res.status == 200:
return res.read()
else:
return None
# Extract URL-style parameters from the string passed
# into this plugin when performing sub-actions. This
# is a lot like CGI ...
#
def getParameters(parameterString):
commands = {}
splitCommands = parameterString[parameterString.find('?') + 1:].split('&')
for command in splitCommands:
if (len(command) > 0):
splitCommand = command.split('=')
key = splitCommand[0]
value = splitCommand[1]
commands[key] = value
return commands
# Execute a plugin action
#
def executeAction(args):
global service
if args['action'] == 'remove':
service.rm( args['link_id'] )
xbmc.executebuiltin("Container.Refresh")
# Return the persistent GUID, or generate and save a new one.
#
def getUUID():
path = os.path.join( str(USERINFO_FOLDER), 'userinfo.txt' )
# If the script.service.flingo version exists, use that
if os.path.isfile( SERVICE_USERINFO ):
path = SERVICE_USERINFO
UUID = None
if os.path.isfile( path ):
f = open( path )
s = f.read()
f.close()
reobj = re.compile( r"guid=(.*)\n")
match = reobj.search( s )
if match:
UUID = match.group(1).strip()
if UUID == None:
UUID = str( uuid.uuid4() )
if not os.path.isdir( str(USERINFO_FOLDER) ):
os.makedirs( str(USERINFO_FOLDER) )
f = open( os.path.join( str(USERINFO_FOLDER), 'userinfo.txt' ), 'w' )
f.write( 'guid=' + UUID + '\n' )
f.close()
return UUID
# If I wasn't lazy, I'd return a default 256x256 png to display
# when/if a queue item does not have an image.
#
def defaultImage():
return None
# Extract reliable metadata from a queue item for display
#
def metadata(item):
md = {}
md['title'] = item.has_key('title') and item['title'] or 'No Title'
md['description'] = item.has_key('description') and item['description'] or 'No Description'
md['publisher'] = item.has_key('publisher') and item['publisher'] or 'No Publisher'
image = None
if item.has_key('image'):
image = item['image']
if not image and item.has_key('coverimage'):
image = item['coverimage']
if not image and item.has_key('thumbnail'):
image = item['thumbnail']
icon = None
if item.has_key('thumbnail'):
icon = item['thumbnail']
if not image:
image = defaultImage()
if not icon:
icon = image
md['image'] = image
md['icon'] = icon
url = None
dur = '0'
if item.has_key( 'page_url' ) and re.match('.*vimeo.*', item['page_url']):
# Vimeo can be reverse engineered
url = service.get_vimeo_url( item )
elif item.has_key( 'encodings' ):
# Pick the first encoding choise out of lazyness
if item['encodings'][0].has_key('url'):
url = item['encodings'][0]['url']
if item['encodings'][0].has_key('duration'):
dur = str(item['encodings'][0]['duration'])
md['url'] = url
md['dur'] = dur
return md
if (__name__ == '__main__'):
UUID = getUUID()
service = Service( UUID )
if sys.argv[2]:
# Perform a sub-action
args = getParameters(sys.argv[2])
executeAction( args )
else:
res = service.announce()
if res.has_key('error'):
print "[FlingoTV] Error: %s" % res['error']
res = service.queue(None, None)
if res.has_key('error'):
print "[FlingoTV] Error: %s" % res['error']
if res.has_key( 'items' ):
for item in res['items']:
print "[FlingoTV] item: %s" % repr(item['title'])
md = metadata( item )
if md['url'] != None:
listitem = xbmcgui.ListItem(md['title'],
'[' + md['publisher'] + ']',
iconImage=md['icon'],
thumbnailImage=md['image'])
# Add metadata for vids; as much as Flingo supplies
listitem.setInfo( type='video',
infoLabels={'title': md['title'],
'duration': md['dur'],
'plot': md['description']})
# Attach our link id for context menu operations
listitem.setProperty('link_id', item['link_id'])
# Add the context menu operations
cm = []
cm.append(( 'Remove from Queue',
"XBMC.RunPlugin(%s?action=remove&link_id=%s&)" % (sys.argv[0], item['link_id'])))
listitem.addContextMenuItems(cm, False)
# Add it along with the playable url
xbmcplugin.addDirectoryItem(thisPlugin,md['url'],listitem)
xbmcplugin.endOfDirectory(thisPlugin)
|
from flask import Flask, session
from flask_socketio import SocketIO
from .chatlog import *
from .mem import *
from .mongo import *
try:
monogo.Init()
chatlog.Init(monogo.AddChatLog, monogo.Getlast20)
print('mongo')
except Exception as e:
chatlog.Init(mem.AddChatLog, mem.Getlast20)
print('mem')
chatlog.AddChatLog('test', 'this is a test')
app = Flask(__name__)
app.secret_key = 'SECRET_KEY!!'
socketio = SocketIO(app, async_mode=None)
from .routes import *
from .socketio import *
socketio.run(app, host='0.0.0.0')
|
import os
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
def train_history(acc_dir, set_sizes=(1, 2, 4, 8), save_as=None):
"""plots training history; specifically, accuracy on entire
training set for each epoch, with separate lines on plot for each
"set size" of visual search stimulus. Requires that the
SAVE_ACC_BY_SET_SIZE_BY_EPOCH option in the config.ini file was
set to True during training.
Parameters
----------
acc_dir : str
path to directory where accuracy on training set, computed
for each visual search stimulus set size, was saved during
training
set_sizes : tuple, list
list of visual search stimulus set sizes.
Default is (1, 2, 4, 8).
save_as : str
filename to save figure as. Default is None, in which case
figure is not saved.
Returns
-------
None
"""
acc_txt_files = glob(os.path.join(acc_dir, '*.txt'))
num_rows = len(acc_txt_files) / 3
num_rows = int(np.ceil(num_rows))
fig, ax = plt.subplots(num_rows, 3)
fig.set_size_inches(15, 10)
ax = ax.ravel()
for ax_ind, acc_txt_file in enumerate(acc_txt_files):
acc = np.loadtxt(acc_txt_file, delimiter=',')
rows = acc.shape[0]
for set_size, col in zip(set_sizes, acc.T):
ax[ax_ind].plot(np.arange(rows), col, label=f"set_size: {set_size}")
ax[ax_ind].set_title(f"replicate {ax_ind + 1}")
ax[ax_ind].set_ylabel("acc")
ax[ax_ind].set_xlabel("epoch")
ax[ax_ind].legend(loc='lower right')
if ax.shape[0] > len(acc_txt_files):
extra = ax.shape[0] - len(acc_txt_files)
for ind in range(1, extra + 1):
ax[-ind].set_visible(False)
fig.tight_layout()
if save_as:
plt.savefig(save_as)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.