text stringlengths 8 6.05M |
|---|
import yfinance as yf
import streamlit as st
import pandas as pd
from datetime import date
st.write("""
# Financial Dashboard
""")
## define the ticker symbol (e.g. AAPL - Apple, AMZN - Amazon, MSFT - Microsoft)
tickerSymbol = st.selectbox("Select Stock", ("Facebook (FB)", "Apple (AAPL)", "Amazon (AMZN)", "Netflix (NFLX)", "Google (GOOGL)", "Other"))
if tickerSymbol == "Other":
tickerSymbol = st.text_input("Ticker Symbol (e.g. MSFT)")
tickerData = yf.Ticker(tickerSymbol)
else:
tickerData = yf.Ticker(tickerSymbol[tickerSymbol.find("(") + 1:tickerSymbol.find(")")]) # get data on this sticker
# tickerData.info ### uncomment to see more info about the yf ticker data
## get the historical prices for this ticker
start_date = st.date_input("Start Date", date(2010,6,30))
end_date = st.date_input("End Date", date(2020,6,30))
tickerDf = tickerData.history(period="1d", start=start_date, end=end_date)
## Open, High, Low, Close, Volume, Dividends, Stock, Splits
## search button
search_button = st.button("Search")
## show statistics based on user input when search button is pressed
if search_button:
st.write(f"""
***
# Stock overview: {tickerSymbol}
""")
## Ticker Info
info = tickerData.info
st.subheader("Business Summary")
summary = info["longBusinessSummary"].split(".")[:5]
st.write(".".join(summary))
st.subheader("Location")
st.write(f"""
{info["city"]}, {info["state"]}, {info["country"]}
""")
st.subheader("Statistics")
stat_df = pd.DataFrame({"Profit Margins": info["profitMargins"],
"Revenue Growth": info["revenueGrowth"],
"EBITDA": info["ebitda"],
"Market Cap": info["marketCap"],
"Buy(1) / Sell(0)": 1 if info["recommendationKey"]=="buy" else 0,
"Current Price": info["currentPrice"],
"Dividend Rate": info["dividendRate"],
"Payout Ratio": info["payoutRatio"],
"Earnings Growth": info["earningsGrowth"],
"Gross Profit": info["grossProfits"],
"Free Cash Flow": info["freeCashflow"],
"Current Ratio": info["currentRatio"],
"Quick Ratio": info["quickRatio"],
"Debt To Equity": info["debtToEquity"],
"Return on Equity": info["returnOnEquity"]},
index=["values"])
stat_df = pd.melt(stat_df)
st.write(stat_df)
st.subheader("Financials")
tickerData.financials
st.subheader("Actions (Dividends, Splits)")
tickerData.actions
st.subheader("Major Holders")
tickerData.major_holders
st.subheader("Institutional Holders")
tickerData.institutional_holders
# st.subheader("Corporate Sustainability")
# tickerData.sustainability
st.subheader("Balance Sheet")
tickerData.balancesheet
st.subheader("Cash Flow")
tickerData.cashflow
st.subheader("Earnings")
tickerData.earnings
st.subheader("Analysts Recommendations")
tickerData.recommendations
st.write("***")
## Line Chart
st.write("# Price")
st.subheader("Closing Price")
st.line_chart(tickerDf.Close)
st.subheader("Volume Price")
st.line_chart(tickerDf.Volume)
## tickerDf.Open, tickerDf.High, tickerDf.Low
|
from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
from marshmallow import fields, post_load, pre_load, Schema
client_db = SQLAlchemy()
# Models
class User(UserMixin, client_db.Model):
id = client_db.Column(client_db.Integer, primary_key=True)
username = client_db.Column(client_db.String(100), nullable=False)
password = client_db.Column(client_db.String(100), nullable=False)
reviews = client_db.relationship(
"Review",
backref=client_db.backref("user"),
)
class Product(client_db.Model):
id = client_db.Column(client_db.Integer, primary_key=True)
name = client_db.Column(client_db.String(100), nullable=False)
description = client_db.Column(client_db.String(100), nullable=False)
price = client_db.Column(client_db.Numeric(10, 2), nullable=False)
quantity = client_db.Column(client_db.Integer, nullable=False)
class Review(client_db.Model):
user_id = client_db.Column(
client_db.Integer,
client_db.ForeignKey("user.id"),
primary_key=True,
)
product_id = client_db.Column(
client_db.Integer,
client_db.ForeignKey("product.id"),
primary_key=True,
)
rating = client_db.Column(client_db.Integer, nullable=False)
product = client_db.relationship("Product")
# Schemas
class BaseSchema(Schema):
__fields_to_skip_none__ = ()
__model__ = None
@pre_load
def remove_null_fields(self, data, **kwargs):
if type(data) == dict:
for i in self.__fields_to_skip_none__:
if i in data and data[i] == None:
del data[i]
return data
@post_load
def make_model(self, data, **kwargs):
return self.__model__(**data) if self.__model__ is not None else None
class Meta:
ordered = True
class ProductSchema(BaseSchema):
__fields_to_skip_none__ = ("id",)
__model__ = Product
id = fields.Integer()
name = fields.Str(required=True)
description = fields.Str(required=True)
price = fields.Float(required=True)
quantity = fields.Str(required=True)
class ReviewSchema(BaseSchema):
__model__ = Review
user_id = fields.Integer(required=True)
product_id = fields.Integer(required=True)
rating = fields.Integer(required=True)
product = fields.Nested(ProductSchema())
class UserSchema(BaseSchema):
__fields_to_skip_none__ = ("id",)
__model__ = User
id = fields.Integer()
username = fields.Str(required=True)
password = fields.Str(required=True)
reviews = fields.List(fields.Nested(ReviewSchema()))
|
import numpy as np
def zip_em_all(biglist, nfiles):
if (nfiles < 2):
print 'returning 0th element'
zippedlist = np.array(biglist[0])
return zippedlist
zippedlist = np.append(biglist[0], biglist[1], axis = 0)
count = 0
endgame = nfiles - 2
while (count < endgame):
zippedlist = np.append(zippedlist, biglist[count+2], axis =0)
count += 1
return zippedlist
|
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city=input('Please User! enter the name of the city that you want to explore its data. Choose between: Chicago, New York City, or Washington')
if city.lower() not in ['chicago','new york city','washington']:
print("Please User! choose one of the three given cities")
else:
city=city.lower()
break
print('You chose city of {}'.format(city.title()))
# TO DO: This is to get user input for month (all, january, february, ... , june)
while True:
months=['all','jan','feb','mar','apr','may','jun']
month=input("Please User! Choose one month that you want to view its data by typing only the first three characters. choose between: January, February, March, April, May, June or \"All\"")
if month[:3].lower() not in months:
print("Please User! Choose one of the six months, or choose \"All\"")
else:
month=month[:3].lower()
break
# TO DO: This is to get user input for day of week (all, monday, tuesday, ... sunday)
while True:
day=input("Please User! Enter the first three characters of the day of the week. choose between:Mon, Tue, Wed ,Thu, Fri, Sat, Sun; or \"All\"")
days=['mon','tue','wed','thu','fri','sat','sun','all']
day=day[:3].lower()
if day not in days:
print("Please User!choose one of the days of the week, or \"All")
else:
day=days.index(day)
break
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df=pd.read_csv(CITY_DATA[city])
df['Start Time']=pd.to_datetime(df['Start Time'])
df['month']=df['Start Time'].dt.month
df['day_of_week']=df['Start Time'].dt.weekday
df['hour']=df['Start Time'].dt.hour
months=['all','jan','feb','mar','apr','may','jun']
if month!='all':
month=months.index(month)
df=df[df['month']==month]
if day !=7:
df=df[df['day_of_week']==day]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
common_month=df['month'].mode()
print ("The Most Common Month!")
print(common_month)
# TO DO: display the most common day of week
common_day=df['day_of_week'].mode()
print ('The Most Common day of the week!')
print(common_day)
# TO DO: display the most common start hour
most_start_hour=df['hour'].mode()
print('The Most Common Hour!')
print(most_start_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
common_start_station=df['Start Station'].mode()[0]
print('Most Popular Start Station: {}'.format(common_start_station))
# TO DO: display most commonly used end station
common_end_station=df['End Station'].mode()[0]
print('Most Popular End Station: {}'.format(common_end_station))
# TO DO: display most frequent combination of start station and end station trip
dfa=df.groupby(['Start Station','End Station']).size().sort_values(ascending=False)
a=dfa['Start Station'].iloc[0]
b=dfa['End Station'].iloc[0]
print('Most Popular Combination of Start and End Stations: Start: {} End {}'.format(a,b))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
total_travel_time=df['Trip Duration'].sum()
print("Total Travel Time is: {}".format(total_travel_time))
# TO DO: display mean travel time
mean_travel_time=df['Trip Duration'].mean()
print('Mean Travel Time is: {}'.format(mean_travel_time))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
user_types=df.groupby(['User Type']).sum()
print('User Types\n',user_types)
# TO DO: Display counts of gender
if 'Gender' in df.columns:
gender_counts=df['Gender'].value_counts()
print("Gender Counts")
print(gender_counts)
# TO DO: Display earliest, most recent, and most common year of birth
if 'Birth Year' in df.columns:
early_year=df['Birth Year'].max()
late_year=df['Birth Year'].min()
common_year=df['Birth Year'].mode()
print('The earliest birth year is: {}'.format(early_year))
print('The most recent birth year is: {}'.format(late_year))
print('The most common birth year is: {}'.format(common_year))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
def display_data(df):
prompt = input('\nWould you like to see 5 rows of raw data? Enter yes or no.\n')
s = 0
e = 5
if prompt.lower() == 'yes':
df2 = df.iloc[s:e]
print(df2)
more = input('Do you want to see more five lines? yes or no')
while more.lower() == 'yes':
s = 0
e += 5
df2 = df.iloc[s:e]
print(df2)
more = input('Do you want to see more five lines? yes or no')
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
|
#--------------------------------------------------------------------------------
# G e n e r a l I n f o r m a t i o n
#--------------------------------------------------------------------------------
# Name: Exercise 2.1 - Another ball dropped from a tower
#
# Usage: Calculates time a dropped object takes to hit the ground.
#
# Description: Calculates the time it takes for a ball to hit the ground
# when dropped from a user-entered height.
#
# Inputs: Height (number) which the ball is dropped from.
#
# Outputs: Time taken to hit the ground.
#
# Auxiliary Files:
#
# Special Instructions:
#
#--------------------------------------------------------------------------------
# C o d e H i s t o r y
#--------------------------------------------------------------------------------
# Version: 2.0
#
# Author(s): Kole Frazier
#
#--------------------------------------------------------------------------------
import math as m
#Get user input for height of tower
# In kinematic equations, this will be treated as the total distance, or 'y'.
Height = float(input("Enter the height (in meters) of the tower: "))
#Calculate final velocity based on known information:
# y0 = 0, v0 = 0, y = Height
# v^2 = v0^2 + 2(y-y0) => v = sqrt(v0^2 + 2(y-y0)) = FinalVelocity
InitialVelocity = 0.0 #Enforce floating point math instead of truncating-int math.
InitialHeight = 0.0
GravityAcceleration = 9.8
FinalVelocity = m.sqrt((InitialVelocity**2)+2*(Height-InitialHeight))
#Now calculate time
# From: v=v0+at, we can see that t=(v-v0)/a
# Or, Time = (FinalVelocity-InitialVelocity)/GravityAcceleration
Time = (FinalVelocity-InitialVelocity)/GravityAcceleration
#Print out the result.
print('Falling from {0} meters, the ball hits the ground moving at {1} m/s after {2} seconds have passed'.format(Height, FinalVelocity, Time))
|
from PyQt5 import QtCore, QtGui, QtWidgets
class CursorInterface(QtWidgets.QGraphicsView):
"""
A 2D cursor control interface implemented using a QGraphicsView.
This view essentially just holds a QGraphicsScene that grows to fit the
size of the view, keeping the aspect ratio square. The scene is displayed
with a gray border.
"""
initleft = -200
initbottom = initleft
initwidth = -initleft*2
initheight = -initleft*2
border_color = '#444444'
def __init__(self, parent=None):
super(CursorInterface, self).__init__(parent)
self._init_scene()
self._init_border()
def _init_scene(self):
scene = QtWidgets.QGraphicsScene()
scene.setSceneRect(self.initleft, self.initbottom,
self.initwidth, self.initheight)
scene.setItemIndexMethod(QtWidgets.QGraphicsScene.NoIndex)
self.setScene(scene)
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setBackgroundBrush(QtCore.Qt.white)
def _init_border(self):
rect = self.scene().sceneRect()
pen = QtGui.QPen(QtGui.QColor(self.border_color))
lines = [
QtCore.QLineF(rect.topLeft(), rect.topRight()),
QtCore.QLineF(rect.topLeft(), rect.bottomLeft()),
QtCore.QLineF(rect.topRight(), rect.bottomRight()),
QtCore.QLineF(rect.bottomLeft(), rect.bottomRight())
]
for line in lines:
self.scene().addLine(line, pen)
def resizeEvent(self, event):
super().resizeEvent(event)
self.fitInView(self.sceneRect(), QtCore.Qt.KeepAspectRatio)
def map_coords(self, nx, ny):
return self.map_size(nx), -self.map_size(ny)
def map_size(self, size):
return size * (self.sceneRect().width()/2)
|
from us_names import SURNAMES, FEMALE_NAMES, MALE_NAMES
#Option 1: for-loop
for i in range(0,2):
print(FEMALE_NAMES[i] + ' ' + SURNAMES[i])
#Option 2: while-loop
counter = 2
while counter > 0:
print(MALE_NAMES[counter] + ' ' + SURNAMES[counter])
counter -= 1
|
# -*- encoding: utf-8 -*-
import inspect
import re
from collections import defaultdict, OrderedDict
from typing import List, Tuple, Optional, Dict, Union
from .register import MobaseRegister
from .mtypes import (
Type,
CType,
Class,
PyClass,
Enum,
Arg,
Ret,
Method,
Constant,
Property,
Function,
)
from . import logger
def magic_split(value: str, sep=",", open="(<", close=")>"):
"""Split the value according to the given separator, but keeps together elements
within the given separator. Useful to split C++ signature function since type names
can contain special characters...
Examples:
- magic_split("a,b,c", sep=",") -> ["a", "b", "c"]
- magic_split("a<b,c>,d(e,<k,c>),p) -> ["a<b,c>", "d(e,<k,c>)", "p"]
Args:
value: String to split.
sep: Separator to use.
open: List of opening characters.
close: List of closing characters. Order must match open.
Returns: The list of split parts from value.
"""
i, j = 0, 0
s: List[str] = []
r = []
while i < len(value):
j = i + 1
while j < len(value):
c = value[j]
# Separator found and the stack is empty:
if c == sep and not s:
break
# Check close/open:
if c in open:
s.append(open.index(c))
elif c in close:
# The stack might be empty if the separator is also an opening element:
if not s and sep in open and j + 1 == len(value):
pass
else:
t = s.pop()
if t != close.index(c):
raise ValueError(
"Found closing element {} for opening element {}.".format(
c, open[t]
)
)
j += 1
r.append(value[i:j])
i = j + 1
return r
def parse_ctype(s: str) -> CType:
"""Parse a C++ type from the given string.
Args:
s: String to parse.
Returns: A C++ type parsed from the given string.
"""
# List of strings that can be removed from the names:
for d in [
"__64",
"__cdecl",
"__ptr64",
"{lvalue}",
"class",
"struct",
"enum",
"unsigned",
]:
s = s.replace(d, "")
# Remove the namespace remaing:
for d in ["MOBase", "boost::python"]:
s = s.replace(d + "::", "")
# Specific replacement:
s = s.replace("__int64", "int")
s = s.replace(" const &", "")
s = s.replace("&", "")
return CType(s.strip())
def parse_carg(s: str, has_default: bool) -> Arg:
"""Parse the given C++ argument.
Args:
s: The string to parse.
has_default: Indicates if this argument as a default.
Returns: An argument parsed from the given string.
"""
v, d = s, None
if s.find("=") != -1:
v, d = [x.strip() for x in s.split("=")]
if d is None and has_default:
d = Arg.DEFAULT_NONE
return Arg("", parse_ctype(v), d)
def parse_csig(s, name) -> Tuple[CType, List[Arg]]:
"""Parse a boost::python C++ signature.
Args:
s: The signature to parse.
name: Name of the function, or "" if the signature correspond to a type.
Returns: (RType, Args) where RType is a CType object, and Args is a list of Arg
objects containing CType.
"""
# Remove the [ and ] which specifies default arguments but are useless since
# we already have = to tell us - The replacement is weird because the way boost
# present these is weird, and to avoid breaking default argument such as = []:
c = s.count("[,")
s = s.replace("[,", ",")
s = s.replace("]" * c, "")
# Split return type/arguments:
if name:
rtype_s, args_s = s.split(name)
# Remove the ( and ).
args_s = args_s.strip()[1:-1]
else:
rtype_s, args_s = magic_split(s, "(", "(<", ")>")
# Only remove the last ) because the first one is removed by magic_split:
args_s = args_s.strip()[:-1]
# Parse return type:
rtype = parse_ctype(rtype_s.strip())
# Parse arguments:
# Strip spaces and remove the first and last ():
args_s = args_s.strip()
args_ss = magic_split(args_s, ",", "(<", ")>")
args = [parse_carg(v, i > len(args_ss) - c - 1) for i, v in enumerate(args_ss)]
return rtype, args
def parse_psig(s: str, name: str) -> Tuple[Type, List[Arg]]:
"""Parse a boost::python python signature.
Args:
s: The signature to parse.
name: Name of the function.
Returns: (RType, Args) where RType is a Type object, and Args is a list of Arg
objects containing Type.
"""
c = s.count("[,")
s = s.replace("[,", ",")
s = s.replace("]" * c, "")
# This is pretty brutal way of extracting stuff... But most things can be
# retrieve from the C++ signature, here we are mainly interested in extracting
# the python type if possible:
m: re.Match[str] = re.search(
r"{}\((.*)\)\s*->\s*([^\s]+)\s*:".format(name), s
) # type: ignore
pargs = []
args = list(filter(bool, m.group(1).strip().split(",")))
for i, pa in enumerate(args):
pa = pa.strip()
# Index of the right bracket:
irbrack = pa.find(")")
# The type is within the brackets:
t = pa[1:irbrack]
pa = pa[irbrack + 1 :]
n: str = pa.strip()
d: Optional[str] = None
if pa.find("=") != -1:
n, d = pa.split("=")
n = n.strip()
d = d.strip()
elif i > len(args) - c - 1:
d = Arg.DEFAULT_NONE
pargs.append(Arg(n, Type(t), d))
return Type(m.group(2)), pargs
def find_best_argname(iarg: int, pname: str, cname: str):
"""Find the best name for the ith argument of a function.
Args:
iarg: Index of the argument, use for default.
pname: Name of the argument in the python signature.
cname: Name of the argument in the C++ signature.
Returns:
The best name for the corresponding argument.
"""
if not cname and not pname:
return "arg{}".format(iarg + 1)
return pname
def find_best_type(ptype: Type, ctype: CType) -> Type:
"""Find the best type from the given python and C++ type.
Args:
ptype: The python type.
ctype: The C++ type.
Returns: The best of the two types.
"""
from .register import MOBASE_REGISTER
if ptype.name == ctype.name:
return ptype
elif ptype.is_none() and ctype.is_none():
return ptype
assert ptype.is_none() == ctype.is_none()
MOBASE_REGISTER.register_type(ptype, ctype)
if ptype.is_object():
if ctype.is_object():
return ptype
return ctype
# Returned pointer are treated differently because they can often be null:
if ctype.is_pointer():
return ctype
return ptype
def find_best_value(pvalue: str, cvalue: str) -> str:
"""Find the best value (default value) from the given python and C++ one.
WARNING: This currently always return pvalue and only warns the user if
the two values are not identical.
Args:
pvalue: Python default value.
cvalue: C++ default value.
Returns: The best of the two values.
"""
if pvalue != cvalue:
logger.warning("Mismatch default value: {} {}.".format(pvalue, cvalue))
return pvalue
def is_enum(e: type) -> bool:
"""Check if the given class is an enumeration.
Args:
e: The class object to check.
Returns: True if the object is an enumeration (boost::python enumeration, not
python) False otherwize.
"""
# Yet to find a better way...
if not isinstance(e, type):
return False
return any(
"{}.{}".format(c.__module__, c.__name__) == "Boost.Python.enum"
for c in inspect.getmro(e)
)
def make_enum(fullname: str, e: type) -> Enum:
"""Construct a Enum object from the given class.
Args:
fullname: Fully qualified name of the enumeration.
e: The class representing a boost::python enumeration.
Returns: An Enum object representing the given enumeration.
"""
# All boost enums have a .values attributes:
values = e.values # type: ignore
return Enum(
e.__name__, OrderedDict((values[k].name, k) for k in sorted(values.keys())),
)
class Overload:
""" Small class to avoid mypy issues... """
rtype: Type
args: List[Arg]
def __init__(self, rtype, args):
self.rtype = rtype
self.args = args
def parse_bpy_function_docstring(e) -> List[Overload]:
"""Parse the docstring of the given element.
Args:
e: The function to "parse".
Returns: A list of overloads for the given function, where each overload is
a dictionary with a "rtype" entry containing the return type and a "args"
entry containing the list of arguments.
"""
lines = e.__doc__.split("\n")
# Find the various overloads:
so = [i for i, line in enumerate(lines) if line.strip().startswith(e.__name__)]
so.append(len(lines))
# We are going to parse the python and C++ signature, and try to merge
# them...
overloads: List[Overload] = []
for i, j in zip(so[:-1], so[1:]):
psig = lines[i].strip()
for k in range(i, j):
if lines[k].strip().startswith("C++ signature"):
csig = lines[k + 1].strip()
prtype, pargs = parse_psig(psig, e.__name__)
crtype, cargs = parse_csig(csig, e.__name__)
# Currently there is no way to automatically check so we add [optional]
# in the doc:
if e.__doc__.find("[optional]") != -1:
crtype._optional = True
assert len(pargs) == len(cargs)
# Now we need to find the "best" type from both signatures:
rtype = find_best_type(prtype, crtype)
args = []
for iarg, (parg, carg) in enumerate(zip(pargs, cargs)):
args.append(
Arg(
find_best_argname(iarg, parg.name, carg.name),
find_best_type(parg.type, carg.type), # type: ignore
find_best_value(parg.value, carg.value), # type: ignore
)
) # type: ignore
overloads.append(Overload(rtype=rtype, args=args))
return overloads
def make_functions(name: str, e) -> List[Function]:
overloads = parse_bpy_function_docstring(e)
return [
Function(
e.__name__, Ret(ovld.rtype), ovld.args, has_overloads=len(overloads) > 1,
)
for ovld in overloads
]
def make_class(fullname: str, e: type, register: MobaseRegister) -> Class:
"""Constructs a Class objecgt from the given python class.
Args:
fullname: Name of the class (might be different from __name__ for inner
classes).
e: The python class (created from boost) to construct an object for.
class_register:
Returns: A Class object corresponding to the given class.
"""
base_classes_s: List[str] = []
# Kind of ugly, but...:
for c in inspect.getmro(e):
if c != e and c.__module__ == "mobase":
base_classes_s.append(c.__name__)
if c.__module__ == "Boost.Python":
break
# Keep as a comment but this is/should be fixed in the actual C++ code:
# Lots of class exposed do not inherit IPlugin while they should:
# if "IPlugin" not in base_classes_s and e.__name__.startswith("IPlugin") \
# and e.__name__ != "IPlugin":
# base_classes_s.append("IPlugin")
# This contains ALL the parent classes, not the direct ones:
base_classes: List[Class] = [
register.make_object(name) for name in base_classes_s # type: ignore
]
# Retrieve all the attributes... The hasattr is required but I don't know why:
all_attrs = [(n, getattr(e, n)) for n in dir(e) if hasattr(e, n)]
# Some exclusions:
EXCLUDED_MEMBERS = [
"__weakref__",
"__dict__",
"__doc__",
"__instance_size__",
"__module__",
"__getattr__",
]
all_attrs = [
a
for a in all_attrs
# Using getattr() here since some attribute do not have name (e.g. constants):
if a[0] not in EXCLUDED_MEMBERS
]
# Fetch all attributes from the base classes:
base_attrs: Dict[str, List[Union[Constant, Property, Method, Class]]] = defaultdict(
list
)
for bc in base_classes:
# Thanks mypy for the naming...
for a1 in bc.constants:
base_attrs[a1.name].append(a1)
for a2 in bc.methods:
base_attrs[a2.name].append(a2)
for a3 in bc.properties:
base_attrs[a3.name].append(a3)
for a4 in bc.inner_classes:
base_attrs[a4.name].append(a4)
# Retrieve the enumerations and classes:
inner_classes = [
ic[1]
for ic in all_attrs
if isinstance(ic[1], type)
and ic[1].__name__ != "class"
and ic[0] not in base_attrs
]
pinner_classes: List[Class] = [
register.make_object("{}.{}".format(fullname, ic.__name__), ic) # type: ignore
for ic in inner_classes
]
# Find the methods:
methods = [m[1] for m in all_attrs if callable(m[1])]
methods = sorted(methods, key=lambda m: m.__name__)
# Filter out methods not provided or implemented:
methods = [
m
for m in methods
if m.__doc__ is not None and m.__doc__.find("C++ signature") != -1
]
# List of methods that must return bool:
BOOL_METHODS = ["__eq__", "__lt__", "__le__", "__ne__", "__gt__", "__ge__"]
pmethods = []
for method in methods:
if method.__doc__ is None:
continue
overloads = parse_bpy_function_docstring(method)
# __eq__ must accept an object in python, so we need to add an overload:
if method.__name__ in ["__eq__", "__ne__"]:
overloads.append(
Overload(
rtype=Type("bool"),
args=[Arg("", Type(e.__name__)), Arg("other", Type("object"))],
)
)
cmethods = []
for ovld in overloads:
args = ovld.args
# This is a very heuristic way of checking if the method is static but I did
# not find anything better yet...
static = False
if len(args) == 0:
static = True
elif method.__name__.startswith("__"): # Special method cannot be static
static = False
else:
arg0_name = args[0].type.name
if arg0_name in register.cpp2py:
arg0_name = register.cpp2py[arg0_name].name
arg0_name = (
arg0_name.replace("*", "")
.replace("&", "")
.replace("const", "")
.strip()
)
static = (
arg0_name
not in [e.__name__, e.__name__ + "Wrapper"] + base_classes_s
)
# We need to fix some default values (basically default values that
# comes from inner enum):
for arg in ovld.args:
if arg.has_default_value():
value: str = arg.value # type: ignore
bname = value.split(".")[0]
for bclass in base_classes:
for biclass in bclass.inner_classes:
if isinstance(biclass, Enum) and biclass.name == bname:
arg._value = bclass.name + "." + value
pmethod = Method(
method.__name__,
Ret(ovld.rtype),
ovld.args,
static=static,
has_overloads=len(overloads) > 1,
)
if method.__name__ in BOOL_METHODS:
pmethod.ret = Ret(Type("bool"))
cmethods.append(pmethod)
pmethods.extend(cmethods)
# Retrieve the attributes:
constants = []
properties = []
for name, attr in all_attrs:
if callable(attr) or isinstance(attr, type):
continue
# Maybe we should check an override here (e.g., different value for a constant):
if name in base_attrs:
continue
if isinstance(attr, property):
properties.append(Property(name, Type("Any"), attr.fset is None))
elif not hasattr(attr, "__name__"):
constants.append(Constant(name, Type(type(attr).__name__), attr))
direct_bases: List[Class] = []
for c in e.__bases__:
if c.__module__ != "Boost.Python":
direct_bases.append(register.get_object(c.__name__))
# Forcing QWidget base for XWidget classes since these do not show up
# and we use a trick:
if e.__name__.endswith("Widget"):
logger.info(
"Forcing base {} for class {}.".format(
"PyQt5.QtWidgets.QWidget", e.__name__
)
)
direct_bases.append(PyClass("PyQt5.QtWidgets.QWidget"))
return Class(
e.__name__,
direct_bases,
pmethods,
inner_classes=pinner_classes,
properties=properties,
constants=constants,
)
|
class Animal:
def __init__(self,name,age):
self.name=name
self.age=age
class Dog(Animal):
def breedname(self,breed):
self.breed=breed
print(self.name)
print(self.age)
print(self.breed)
d=Dog('chakki',2)
d.breedname('Beagle') |
import os
import json
import sys
import commons.functions as func
import commons.errors as errors
cwd = os.getcwd()
guildsdata_file_path = os.path.expandvars(f"{cwd}/data/guilds_data.json")
help_file_path = os.path.expandvars(f"{cwd}/data/help_messages.json")
notice_file_path = os.path.expandvars(f"{cwd}/data/notice_messages.json")
class Json:
def __init__(self) -> None:
pass
def language(self, lang: str) -> str:
lang = str(lang).lower()
if lang in ["en", "english", "英語"]:
return "english"
elif lang in ["ja", "japanese", "日本語"]:
return "japanese"
else:
return "english"
def check_guildsdata_file(self) -> None:
if not os.path.exists(guildsdata_file_path):
with open(guildsdata_file_path, mode="w") as f:
f.write("{}")
"""
Get data.
--- about file var ---
int type
1: guilds_data.json
2: notice_messages.json
3: help_messages.json
データを取得します.
--- file変数について ---
整数型
1: guilds_data.json
2: notice_messages.json
3: help_messages.json
"""
def get_data(self, file: int) -> dict:
file = func.str_to_int(file)
file_path = None
if not file:
return {}
if file == 1:
self.check_guildsdata_file()
file_path = guildsdata_file_path
elif file == 2:
file_path = notice_file_path
elif file == 3:
file_path = help_file_path
else:
return {}
try:
with open(file_path, mode="rt", encoding="utf-8") as f:
data = json.load(f)
return data
except Exception as e:
errors.critical_print(e)
sys.exit(1)
"""
Update data of a guild.
ギルドデータを更新します.
"""
def update_guild_data(self, guild_id: str, data: dict) -> bool:
guilds_data = self.get_data(1)
guilds_data[str(guild_id)] = data
try:
with open(guildsdata_file_path, mode="w", encoding="utf-8") as f:
json.dump(guilds_data, f, indent=4, ensure_ascii=False)
return True
except Exception as e:
errors.error_print(e)
return False |
#!/usr/bin/env python
x=['Fruit']
y='Apple'
x.append(y)
print x
|
def authenticate(uname, password):
if (uname == "jashin" and password == "awesome"):
return True
else:
return False
|
import numpy as np
from scipy.optimize import minimize
from scipy.io import loadmat
from numpy.linalg import det, inv
from math import sqrt, pi
import scipy.io
import matplotlib.pyplot as plt
import pickle
import sys
def ldaLearn(X,y):
# Inputs
# X - a N x d matrix with each row corresponding to a training example
# y - a N x 1 column vector indicating the labels for each training example
#
# Outputs
# means - A d x k matrix containing learnt means for each of the k classes
# covmat - A single d x d learnt covariance matrix
# IMPLEMENT THIS METHOD
k=np.unique(y);
count=np.zeros((len(k)));
means=np.zeros((X.shape[1],len(k)));
for j in range(len(k)):
for i in range(len(y)):
if y[i]==k[j]:
means[:,j]+=X[i,:];
count[j]+=1;
means = (means / count [None,:]);
#print (means);
covmat=np.cov(X.transpose());
#print (covmat);
return means,covmat
def qdaLearn(X,y):
# Inputs
# X - a N x d matrix with each row corresponding to a training example
# y - a N x 1 column vector indicating the labels for each training example
#
# Outputs
# means - A d x k matrix containing learnt means for each of the k classes
# covmats - A list of k d x d learnt covariance matrices for each of the k classes
# IMPLEMENT THIS METHOD
covmats=[];
k=np.unique(y);
count=np.zeros(len(k));
means=np.zeros((X.shape[1],len(k)));
for j in range(len(k)):
covmat1=[];
for i in range(len(y)):
if y[i]==k[j]:
means[:,j]+=X[i,:];
count[j]+=1;
covmat1.append(X[i,:]);
covmat1 = (np.array(covmat1));
covmats.append(np.cov(covmat1.transpose()));
means = (means / count [None,:]);
#print (means);
#covmat=np.cov(X);
#print (covmats);
return means,covmats
def ldaTest(means,covmat,Xtest,ytest):
# Inputs
# means, covmat - parameters of the LDA model
# Xtest - a N x d matrix with each row corresponding to a test example
# ytest - a N x 1 column vector indicating the labels for each test example
# Outputs
# acc - A scalar accuracy value
# ypred - N x 1 column vector indicating the predicted labels
# IMPLEMENT THIS METHOD
ypred=np.zeros((Xtest.shape[0],1));
acc=0;
for i in range(len(Xtest)):
predicted=0;
px=np.zeros(len(means[0]));
for j in range(len(means[0])):
transpose1=(np.subtract(Xtest[i],means[:,j])).transpose();
product1=np.matmul(transpose1,np.linalg.inv(covmat));
px[j]=np.matmul(product1,np.subtract(Xtest[i],means[:,j]));
px[j]/=np.linalg.det(covmat);
if px[j]<px[predicted]:
predicted=j;
ypred[i][0]=(predicted+1);
#print(str(predicted+1)+"----"+str(ytest[i][0]));
if(ypred[i][0]==ytest[i][0]):
acc+=1;
#print(acc);
return acc,ypred
def qdaTest(means,covmats,Xtest,ytest):
# Inputs
# means, covmats - parameters of the QDA model
# Xtest - a N x d matrix with each row corresponding to a test example
# ytest - a N x 1 column vector indicating the labels for each test example
# Outputs
# acc - A scalar accuracy value
# ypred - N x 1 column vector indicating the predicted labels
# IMPLEMENT THIS METHOD
ypred=np.zeros((Xtest.shape[0],1));
acc=0;
for i in range(len(Xtest)):
predicted=0;
px=np.zeros(len(means[0]));
for j in range(len(means[0])):
transpose1=(np.subtract(Xtest[i],means[:,j])).transpose();
product1=np.matmul(transpose1,np.linalg.inv(covmats[j]));
px[j]=np.matmul(product1,np.subtract(Xtest[i],means[:,j]));
px[j]=np.exp(px[j]*(-0.5));
px[j]/=sqrt(np.linalg.det(covmats[j]));
if px[j]>px[predicted]:
predicted=j;
ypred[i][0]=(predicted+1);
#print(str(predicted+1)+"----"+str(ytest[i][0]));
if(ypred[i][0]==ytest[i][0]):
acc+=1;
#print(acc);
return acc,ypred
def learnOLERegression(X,y):
# Inputs:
# X = N x d
# y = N x 1
# Output:
# w = d x 1
# IMPLEMENT THIS METHOD
term1=np.matmul(X.transpose(), X);
term2=np.matmul(inv(term1), X.transpose());
w=np.matmul(term2,y);
return w
def learnRidgeRegression(X,y,lambd):
# Inputs:
# X = N x d
# y = N x 1
# lambd = ridge parameter (scalar)
# Output:
# w = d x 1
# IMPLEMENT THIS METHOD
#for ridge formula is lambda * idenetity matrix + xTx
cols = X.shape[1]
rows = X.shape[0]
identity = lambd * np.identity(cols)
w = identity + np.dot(X.transpose(),X)
w = np.linalg.inv(w)
w = np.dot(w,X.transpose())
w = np.dot(w,y)
return w
def testOLERegression(w,Xtest,ytest):
# Inputs:
# w = d x 1
# Xtest = N x d
# ytest = X x 1
# Output:
# mse
# IMPLEMENT THIS METHOD
term1=np.matmul(Xtest,w);
term2=np.subtract(ytest,term1);
mse=np.matmul(term2.transpose(),term2);
#mse = np.sum(np.square((ytest-np.dot(Xtest,w.reshape((w.shape[0],1))))))
mse/=Xtest.shape[0];
return mse
def regressionObjVal(w, X, y, lambd):
# compute squared error (scalar) and gradient of squared error with respect
# to w (vector) for the given data X and y and the regularization parameter
# lambda
# IMPLEMENT THIS METHOD
#dJ(w)/dw + lambda*w'.
rows=X.shape[0]
sumSquare = np.sum(np.square((y-np.dot(X,w.reshape((w.shape[0],1))))))
error = (0.5 * sumSquare) + (0.5 * lambd * np.dot(w.T, w))
error_grad=(np.dot(w.transpose(),np.dot(X.transpose(),X))-np.dot(y.transpose(),X))+lambd*w.transpose()
error_grad=error_grad.transpose()
error_grad=error_grad.flatten()
return error, error_grad
def mapNonLinear(x,p):
# Inputs:
# x - a single column vector (N x 1)
# p - integer (>= 0)
# Outputs:
# Xd - (N x (d+1))
# IMPLEMENT THIS METHOD
return Xd
# Main script
# Problem 1
# load the sample data
if sys.version_info.major == 2:
X,y,Xtest,ytest = pickle.load(open('sample.pickle','rb'))
else:
X,y,Xtest,ytest = pickle.load(open('sample.pickle','rb'),encoding = 'latin1')
# LDA
means,covmat = ldaLearn(X,y)
ldaacc,ldares = ldaTest(means,covmat,Xtest,ytest)
print('LDA Accuracy = '+str(ldaacc))
# QDA
means,covmats = qdaLearn(X,y)
qdaacc,qdares = qdaTest(means,covmats,Xtest,ytest)
print('QDA Accuracy = '+str(qdaacc))
# plotting boundaries
x1 = np.linspace(-5,20,100)
x2 = np.linspace(-5,20,100)
xx1,xx2 = np.meshgrid(x1,x2)
xx = np.zeros((x1.shape[0]*x2.shape[0],2))
xx[:,0] = xx1.ravel()
xx[:,1] = xx2.ravel()
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
zacc,zldares = ldaTest(means,covmat,xx,np.zeros((xx.shape[0],1)))
plt.contourf(x1,x2,zldares.reshape((x1.shape[0],x2.shape[0])),alpha=0.3)
plt.scatter(Xtest[:,0],Xtest[:,1],c=ytest)
plt.title('LDA')
plt.subplot(1, 2, 2)
zacc,zqdares = qdaTest(means,covmats,xx,np.zeros((xx.shape[0],1)))
plt.contourf(x1,x2,zqdares.reshape((x1.shape[0],x2.shape[0])),alpha=0.3)
plt.scatter(Xtest[:,0],Xtest[:,1],c=ytest)
plt.title('QDA')
plt.show()
# Problem 2
if sys.version_info.major == 2:
X,y,Xtest,ytest = pickle.load(open('diabetes.pickle','rb'))
else:
X,y,Xtest,ytest = pickle.load(open('diabetes.pickle','rb'),encoding = 'latin1')
# add intercept
X_i = np.concatenate((np.ones((X.shape[0],1)), X), axis=1)
Xtest_i = np.concatenate((np.ones((Xtest.shape[0],1)), Xtest), axis=1)
w = learnOLERegression(X,y)
mle = testOLERegression(w,Xtest,ytest)
w_i = learnOLERegression(X_i,y)
mle_i = testOLERegression(w_i,Xtest_i,ytest)
print('MSE without intercept '+str(mle))
print('MSE with intercept '+str(mle_i))
# Problem 3
k = 101
lambdas = np.linspace(0, 1, num=k)
i = 0
mses3_train = np.zeros((k,1))
mses3 = np.zeros((k,1))
for lambd in lambdas:
w_l = learnRidgeRegression(X_i,y,lambd)
mses3_train[i] = testOLERegression(w_l,X_i,y)
mses3[i] = testOLERegression(w_l,Xtest_i,ytest)
i = i + 1
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
plt.plot(lambdas,mses3_train)
plt.title('MSE for Train Data')
plt.subplot(1, 2, 2)
plt.plot(lambdas,mses3)
plt.title('MSE for Test Data')
plt.show()
# Problem 4
k = 101
lambdas = np.linspace(0, 1, num=k)
i = 0
mses4_train = np.zeros((k,1))
mses4 = np.zeros((k,1))
opts = {'maxiter' : 20} # Preferred value.
w_init = np.ones((X_i.shape[1],1))
for lambd in lambdas:
args = (X_i, y, lambd)
w_l = minimize(regressionObjVal, w_init, jac=True, args=args,method='CG', options=opts)
w_l = np.transpose(np.array(w_l.x))
w_l = np.reshape(w_l,[len(w_l),1])
mses4_train[i] = testOLERegression(w_l,X_i,y)
mses4[i] = testOLERegression(w_l,Xtest_i,ytest)
i = i + 1
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
plt.plot(lambdas,mses4_train)
plt.plot(lambdas,mses3_train)
plt.title('MSE for Train Data')
plt.legend(['Using scipy.minimize','Direct minimization'])
plt.subplot(1, 2, 2)
plt.plot(lambdas,mses4)
plt.plot(lambdas,mses3)
plt.title('MSE for Test Data')
plt.legend(['Using scipy.minimize','Direct minimization'])
plt.show()
# Problem 5
pmax = 7
lambda_opt = 0 # REPLACE THIS WITH lambda_opt estimated from Problem 3
mses5_train = np.zeros((pmax,2))
mses5 = np.zeros((pmax,2))
for p in range(pmax):
Xd = mapNonLinear(X[:,2],p)
Xdtest = mapNonLinear(Xtest[:,2],p)
w_d1 = learnRidgeRegression(Xd,y,0)
mses5_train[p,0] = testOLERegression(w_d1,Xd,y)
mses5[p,0] = testOLERegression(w_d1,Xdtest,ytest)
w_d2 = learnRidgeRegression(Xd,y,lambda_opt)
mses5_train[p,1] = testOLERegression(w_d2,Xd,y)
mses5[p,1] = testOLERegression(w_d2,Xdtest,ytest)
fig = plt.figure(figsize=[12,6])
plt.subplot(1, 2, 1)
plt.plot(range(pmax),mses5_train)
plt.title('MSE for Train Data')
plt.legend(('No Regularization','Regularization'))
plt.subplot(1, 2, 2)
plt.plot(range(pmax),mses5)
plt.title('MSE for Test Data')
plt.legend(('No Regularization','Regularization'))
plt.show()
|
from django.urls import path
from backend.api_gateway.group_endpoint.views import group, update_driver, update_cost
urlpatterns = [
path('', group, name='group'),
path('<int:group_id>', update_driver, name='update_driver'),
path('<int:group_id>/cost', update_cost, name='update_cost'),
]
|
import random
import string
live = True
specadd = str("")
while live:
user_input1 = int(input("How Long Do You Want Your Password?"))
user_input2 = int(input("How Many Letters?"))
while user_input2 > user_input1:
error_input1 = int(input("Number Higher Than Total Password Length, Please Try Again."))
user_input2 = error_input1
continue
if user_input1 != user_input2:
user_input3 = int(input("How Many Numbers?"))
while user_input1 < user_input2 + user_input3:
error_input3 = int(input("Combined Number Higher Than Total Password Length, Please Try Again."))
user_input3 = error_input2
continue
if user_input1 != user_input2 + user_input3:
specadd = ""
spec = user_input1 - (user_input2 + user_input3)
charlist = "!/\"#$%&\'()*+,-./:;?@[\\]^_{|}~"
speclist = list(charlist)
while spec > 0:
specadd = specadd + random.choice(speclist)
spec = spec - 1
continue
if user_input1 == user_input2 + user_input3 + len(specadd):
finalpw = str("")
numrange = str("")
while user_input2 > 0:
user_input2 = user_input2 - 1
finalpw = finalpw + random.choice(string.ascii_lowercase)
while user_input3 > 0:
user_input3 = user_input3 - 1
numrange = str(numrange) + str(random.randrange(0, 9))
finalpw = str(finalpw) + str(numrange) + str(specadd)
finalpw = list(finalpw)
finalpw = random.sample(finalpw, len(finalpw))
finalpw = ''.join(finalpw)
print(finalpw)
live = False
elif user_input1 == user_input2:
finalpw = ""
while user_input2 > 0:
user_input2 = user_input2 - 1
finalpw = finalpw + random.choice(string.ascii_lowercase)
print(finalpw)
live = False
|
from datetime import date
atual = date.today().year
sexo = str(input('Qual o seu sexo? ')).strip().capitalize()
#Se for masculino
if 'Homem' in sexo or 'Masculino' in sexo:
nasc = int(input('Digite o ano de seu nascimento: '))
idade = atual - nasc
print(f'Você nasceu em {nasc}, tem {idade} ano(s) em {atual}')
if idade > 18:
saldo = idade - 18
print(f'\033[31mVocê deveria ter se alistado há {saldo} ano(s)')
print(f'\033[34mSeu alistamento foi em {atual -saldo}')
elif idade == 18:
print(f'\033[32mÉ AGORA! Neste ano de {atual}, você terá que se alistar!')
elif idade < 18:
saldo = 18 - idade
print(f'\033[34mAinda não é tempo de seu alistamento')
print(f'Seu alistamento será daqui a {saldo} ano(s) em {atual + saldo}')
elif 'Feminino' in sexo or 'Mulher' in sexo:
desejo = str(input('Mulheres não são obrigadas a alistarem-se, mas deseja se alistar? '))
if 'Não' in desejo or 'Nao' in desejo:
print('Tenha um bom dia!')
elif 'Sim' in desejo or 'Quero':
nasc = int(input('Digite o ano de seu nascimento: '))
idade = atual - nasc
print(f'Você nasceu em {nasc}, tem {idade} ano(s) em {atual}')
if idade > 18:
saldo = idade -18
print(f'\033[31mVocê deveria ter se alistado a {saldo} ano(s)')
print(f'\033[34mO seu alistamento foi em {atual-saldo}')
elif idade == 18:
print(f'\033[32mÉ AGORA! Neste ano de {atual}, você terá que se alistar!')
elif idade < 18:
saldo = 18 - idade
print('Ainda não é tempo de seu alistamento')
print(f'Seu alistamento será daqui a {saldo} ano(s) em {atual + saldo}')
print('\033[34mTenha um bom dia!')
|
import network, time, urequests, utime, framebuf
from time import sleep
from machine import Pin, SoftSPI, I2C
from mfrc522 import MFRC522
from ssd1306 import SSD1306_I2C
#leds
registro_correcto = Pin(4, Pin.OUT)
registro_Incorrecto = Pin(2, Pin.OUT)
#Modulo RFID
sck = Pin(18, Pin.OUT)
mosi = Pin(23, Pin.OUT)
miso = Pin(19, Pin.OUT)
spi = SoftSPI(baudrate=100000, polarity=0, phase=0, sck=sck, mosi=mosi, miso=miso)
sda = Pin(5, Pin.OUT)
#pantalla led
ancho = 128
alto = 64
i2c = I2C(0, scl=Pin(22), sda=Pin(21))
oled = SSD1306_I2C(ancho, alto, i2c)
print(i2c.scan())
#conexion a red wifi
def conectaWifi(red, password):
global miRed
miRed = network.WLAN(network.STA_IF)
if not miRed.isconnected(): #Si no está conectado…
miRed.active(True) #activa la interface
miRed.connect('Telenet_Silva', 'Andres1996*') #Intenta conectar con la red
print('Conectando a la red', red +"…")
timeout = time.time ()
while not miRed.isconnected(): #Mientras no se conecte..
if (time.ticks_diff (time.time (), timeout) > 10):
return False
return True
def buscar_icono(ruta):
dibujo= open(ruta, "rb") # Abrir en modo lectura de bist
dibujo.readline() # metodo para ubicarse en la primera linea de los bist
xy = dibujo.readline() # ubicarnos en la segunda linea
x = int(xy.split()[0]) # split devuelve una lista de los elementos de la variable solo 2 elemetos
y = int(xy.split()[1])
icono = bytearray(dibujo.read()) # guardar en matriz de bites
dibujo.close()
return framebuf.FrameBuffer(icono, x, y, framebuf.MONO_HLSB)
def do_read():
# Usuario creados
valor1 = "0x11d063a3"
nomchip = "Andres_Ricardo_Silva"
valor2 = "0xd6dc781a"
nombre_tar = "Ricardo_Rodriguez"
salinas = "Concesión_Salinas"
alcalis = "Alcalis_cierre"
alcalis_re = "Alcalis_Reconocimiento "
res_pro = "Reservas_probables"
fin = "Finanzas"
try:
while True:
if conectaWifi("Telenet_Silva", "Andres1996*"):
oled.text("Conexion exitosa!", 0, 20)
oled.show()
time.sleep(0.05)
oled.fill(0)
url = "https://maker.ifttt.com/trigger/Registro/with/key/bBwDkFTwnLS3YTX6waOYIi?"
#Envio de correos
correo = "https://maker.ifttt.com/trigger/Control_acceso/with/key/bBwDkFTwnLS3YTX6waOYIi?"
#Mostar logo en Pantalla oled
oled.blit(buscar_icono("Imagenes/Fiducoldex.pbm"), 3, 8) # ruta y sitio de ubicación
oled.show()
time.sleep(4)
oled.fill(0)
oled.show()
#Mostar mensaje en pantalla oled
oled.text("colocar carnet!", 0, 20)
oled.show()
time.sleep(3)
oled.fill(0)
oled.show()
rdr = MFRC522(spi, sda)
uid = ""
(stat, tag_type) = rdr.request(rdr.REQIDL)
if stat == rdr.OK:
(stat, raw_uid) = rdr.anticoll()
if stat == rdr.OK:
uid = ("0x%02x%02x%02x%02x" % (raw_uid[0], raw_uid[1], raw_uid[2], raw_uid[3]))
#comparar valor 1 con el valor ingresado en uid
if valor1 == uid:
print ("Registro correcto de ",nomchip, alcalis.format(uid))
registro_correcto(1)
utime.sleep(0.6)
registro_correcto(0)
utime.sleep(0.1)
#Imprimir en pantalla
oled.text('Registro correcto', 0, 15)
oled.text(nomchip, 0, 30)
oled.show()
time.sleep(4)
oled.fill(0)
oled.show()
# Envio de registro a IFTTT_Maker_Webhooks_Events
respuesta_registro = urequests.get(url+"&value1="+str(nomchip)+"&value2="+(alcalis))
respuesta_registro.close ()
#Envio de correo de registro de llegada
respuesta_correo = urequests.get(correo+"&value1="+str(nomchip)+"&value2="+(alcalis))
respuesta_correo.close ()
time.sleep(1)
elif valor2 == uid:
print ("Registro correcto de ", nombre_tar, alcalis_re.format(uid))
#Encender Led
registro_correcto(1)
utime.sleep(0.6)
registro_correcto(0)
utime.sleep(0.1)
#Imprimir en pantalla
oled.text('Registro correcto', 0, 15)
oled.text(nombre_tar, 0, 30)
oled.show()
time.sleep(4)
oled.fill(0)
oled.show()
# Envio de registro a IFTTT_Maker_Webhooks_Events
respuesta_registro = urequests.get(url+"&value1="+str(nombre_tar)+"&value2="+(alcalis_re))
respuesta_registro.close ()
#Envio de correo de registro de llegada
respuesta_correo = urequests.get(correo+"&value1="+str(nombre_tar)+"&value2="+(alcalis_re))
respuesta_correo.close ()
time.sleep(1)
else:
print("Error de autentificación")
registro_Incorrecto(1)
utime.sleep(0.6)
registro_Incorrecto(0)
utime.sleep(0.1)
oled.text("Error de ", 0, 15)
oled.text("autentificacion", 0, 25)
oled.show()
time.sleep(4)
oled.fill(0)
oled.show()
else:
print("Error de conexion con WIFI")
except KeyboardInterrupt:
print("Bye")
do_read()
if __name__==("__main__"):
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from smorest_sfs.modules.auth import ROLES
from tests._utils.injection import GeneralGet
class TestListView(GeneralGet):
fixture_names = ("flask_app_client", "flask_app", "regular_user")
item_view = "Role.RoleItemView"
listview = "Role.RoleListView"
view = "Role.RoleView"
login_roles = [ROLES.RoleManager]
def test_get_options(self) -> None:
self._get_options()
def test_get_list(self) -> None:
data = self._get_list(name="e")
assert data[0].keys() >= {
"id",
"name",
"permissions",
"user_default",
"group_default",
} and data[0]["permissions"][0].keys() == {"id", "name"}
def test_get_item(self) -> None:
data = self._get_item(role_id=1)
assert data.keys() > {"id", "name", "created", "modified", "deleted"} and data[
"permissions"
][0].keys() >= {"id", "name"}
|
import unittest
from twitter_search import new_tweet_request
# A few things to test for:
# - If we ask to get x number of tweets, it returns x number of tweets
# - It throws an error with a tag that has no tweets with it.
class search_test_case(unittest.TestCase):
def test_standard_request(self):
expected_JSON = {
"queryStringParameters": {
"term": "Wednesday"
}
}
self.assertTrue(len(new_tweet_request(expected_JSON,None)),20)
def test_no_tweets(self):
expected_no_results_parameters_JSON = {
"queryStringParameters": {
"term": "N0Twe3tsWithThisLab3l"
}
}
expected_no_results_parameters_return= {
"statusCode": 200,
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
},
"body": "{\"Error\": \"Not Enough Tweets Found\"}"
}
self.assertEqual(new_tweet_request(expected_no_results_parameters_JSON,None),expected_no_results_parameters_return)
def test_unexpected_term(self):
unexpected_term_parameters = {
"queryStringParameters": {
"tem": "N0Twe3tsWithThisLab3l"
}
}
unexpected_term_parameters_return= {
"statusCode": 200,
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*"
},
"body": "{\"Error\": \"\\\"term\\\" not included\"}"
}
self.assertEqual(new_tweet_request(unexpected_term_parameters,None),unexpected_term_parameters_return)
if __name__ == '__main__':
unittest.main() |
#coding: utf-8
import requests
import json
def post_data():
headers = {"Content-Type": "application/x-www-form-urlencoded"}
data = {"name":4,"sn_num":'123sds4',"remark":'数据1'}
url = 'http://127.0.0.1:8080/service_info/'
cent = requests.post(url, data=json.dumps(data), headers=headers)
print(cent.content)
post_data() |
"""
Useful tools for manipulating pieces of the URL according to the various RFCs.
Original implementation is from the following repo:
https://github.com/rbaier/python-urltools
and used here with the MIT License, as posted here:
https://github.com/rbaier/python-urltools/blob/master/LICENSE
Copyright is Roderick Baier, 2014.
The implementations are from git SHA-1 76bf599aeb4cb463df8e38367aa40a7d8ec7d9a1
"""
import posixpath
_HEXTOCHR = dict(('%02x' % i, chr(i)) for i in range(256))
def unquote(text, exceptions=''):
"""Unquote a text but ignore the exceptions.
>>> unquote('foo%23bar')
'foo#bar'
>>> unquote('foo%23bar', ['#'])
'foo%23bar'
Unmodified function from python-urltools
"""
if not text:
if text is None:
raise TypeError('None object cannot be unquoted')
else:
return text
if '%' not in text:
return text
split_s = text.split('%')
res = [split_s[0]]
for hexchar in split_s[1:]:
char = _HEXTOCHR.get(hexchar[:2])
if char and char not in exceptions:
if len(hexchar) > 2:
res.append(char + hexchar[2:])
else:
res.append(char)
else:
res.append('%' + hexchar)
return ''.join(res)
def normalize_path(path):
"""Normalize path: collapse etc.
>>> normalize_path('/a/b///c')
'/a/b/c'
Modified function from python-urltools to include reducing the starting slashes, ie
"//a/b/c" -> "/a/b/c"
"""
if path in ['//', '/', '']:
return '/'
npath = posixpath.normpath(unquote(path, exceptions='/?+#'))
if path[-1] == '/' and npath != '/':
npath += '/'
while npath.startswith("//"):
npath = npath[1:]
return npath
|
def bubble_sort(array: list):
swaps = -1
while swaps != 0:
swaps = 0
for i in range(len(array) - 1):
if array[i] > array[i + 1]:
tmp = array[i]
array[i] = array[i + 1]
array[i + 1] = tmp
swaps += 1
def selection_sort(array: list):
for i in range(len(array)):
minimum = i
for j in range(i, len(array)):
if array[j] < array[minimum]:
minimum = j
tmp = array[minimum]
array[minimum] = array[i]
array[i] = tmp
def insertion_sort(array: list):
for i in range(1, len(array)):
# Swap the firt unsorted element until it finds it's place
for j in reversed(range(i)):
if array[j + 1] < array[j]:
tmp = array[j + 1]
array[j + 1] = array[j]
array[i] = tmp
else:
break
from math import floor
def merge_sort(array: list, start: int = 0, end: int = None):
if end is None:
end = len(array)
if start == end - 1: return # sorted
middle = floor((start + end)/2)
# Sort each part
merge_sort(array, start, middle)
merge_sort(array, middle, end)
tmp, l, r = [], start, middle
# Merge the two parts
while l < middle and r < end:
if array[r] < array[l]:
tmp.append(array[r])
r += 1
else:
tmp.append(array[l])
l += 1
while l < middle:
tmp.append(array[l])
l += 1
while r < end:
tmp.append(array[r])
r += 1
for i in range(len(tmp)):
array[start + i] = tmp[i]
array = [4, 3, 1, 6, 2, 0, 5]
merge_sort(array)
print(array)
|
'''a bootstrap server, returns a short list of nodes that are alive'''
import socket
import time
import random
def BootstrapServer():
"""
"""
nodes = {}
host = "127.0.0.1" #loopback adress
port = 5555
publicKey = hex(random.randint(0, 256**64 - 1))[2:]
socket1 = socket.socket()
socket1.bind((host, port))
socket1.settimeout(1)
socket1.listen(512)
print("server Listening")
while (True):
try:
connection, address = socket1.accept()
except:
time.sleep(1)
continue
peerHandshake = connection.recv(256).decode("utf-8") #will wait until new data is received
if not peerHandshake:
continue #if the connection terminates, no data is received, and breaks from the loop
print("Debug: Recived: " + str(peerHandshake))
print("Debug: address: " + str(address))
processed = peerHandshake.split(":")
clientPublicKey = processed[0]
version = processed[1]
clientName = processed[2]
handshake = publicKey + ":" + "0000" + ":" + "bootstrap" + ":" + "Bootstrap.py:v0.0" + ":"
handshake = handshake.ljust(192, ' ').ljust(256, '=')
connection.send(handshake.encode("utf-8"))
'''the next step would be to wait to receve a handshake encrypted and signed by the peer, to verify connection,
then this would respond with their original handshake encrypted and signed by server, to verify connection to them
'''
connection.recv(256).decode("utf-8")
connection.send(handshake.encode("utf-8"))
#assume connection and peer are verified at this point
#all communication should be encrypted beyond this point
nodes[clientPublicKey] = (clientPublicKey, version, clientName, str(address), int(time.time()))
bootstrapIP = ""
for i in range(64):
keys = sorted(nodes.keys())
random.randint(0, len(keys) - 1)
bootstrapIP += nodes[keys[random.randint(0, len(keys) - 1)]][3] + ":"
bootstrapIP = "BootstrapIP:" + bootstrapIP
bootstrapIP = bootstrapIP.ljust(2048, "_")
connection.send(bootstrapIP.encode("utf-8"))
connection.shutdown(socket.SHUT_RDWR) #close doesn't 'close' the connection immediatly
connection.close()
def DummyClient():
"""Starts a TCP client
Connects to server
Pings some data off of the server
"""
host = "127.0.0.1" #loopback adress
port = 5555
socket1 = socket.socket()
socket1.settimeout(1)
socket1.connect((host, port))
publicKey = hex(random.randint(0, 256**64 - 1))[2:]
publicKey = publicKey.upper().rjust(128, "0")
version = 'v0.0'
clientName = "DummyClient"
handshake = publicKey + ":" + version + ":" + clientName + ":"
handshake = handshake.ljust(256, '_')
print("Sending data: " + str(handshake))
socket1.send(handshake.encode('utf-8'))
data = socket1.recv(256).decode('utf-8')
print("\tRecived data: " + str(data))
'''the next step would be to wait to receve a handshake encrypted and signed by the peer, to verify connection,
then this would respond with their original handshake encrypted and signed by server, to verify connection to them
'''
socket1.send(handshake.encode("utf-8"))
socket1.recv(256).decode("utf-8")
#assume connection and peer are verified at this point
#all communication should be encrypted beyond this point
data = socket1.recv(2048).decode('utf-8')
print("\tRecived data: " + str(data))
socket1.close()
ClientServer = input("Is it a server? (T/F):").lower()
if ClientServer == 'f':
print("Running TCP dummy Client")
for i in range(64):
DummyClient()
time.sleep(0)
else:
print("Running TCP Server")
BootstrapServer()
|
import numpy
a = numpy.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
print(numpy.min(a))
print(numpy.min(a, axis = 0))
print(numpy.min(a, axis = 1))
s1 = "4 2"
s2 = ["2 5", "3 7", "1 3", "4 0"]
dim = list(map(int, s1.split()))
row = dim[0]
col = dim[1]
a = []
for x in s2:
temp = list(map(int, x.split()))
a.append(temp)
print(max(numpy.min(a, axis = 1)))
|
'''Write a Python function to check whether a number is in a given range. '''
def test_range(num):
if num in range(1,5):
print(f" {num} is in the range")
else :
print(f"{num} is Not in given range.")
test_range(2)
test_range(22)
|
# Generated by Django 2.2.6 on 2020-03-24 09:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('candidate', '0008_recruiterprofileinfo'),
]
operations = [
migrations.CreateModel(
name='JobInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('job_name', models.CharField(max_length=50)),
('job_description', models.TextField()),
('experience', models.CharField(max_length=1)),
('deadline', models.DateField()),
('skill', models.CharField(max_length=30)),
('salary', models.IntegerField()),
('posting_date', models.DateField()),
('recruiter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# PUBLIC DOMAIN NOTICE
# National Center for Biotechnology Information
#
# This software is a "United States Government Work" under the
# terms of the United States Copyright Act. It was written as part of
# the authors' official duties as United States Government employees and
# thus cannot be copyrighted. This software is freely available
# to the public for use. The National Library of Medicine and the U.S.
# Government have not placed any restriction on its use or reproduction.
#
# Although all reasonable efforts have been taken to ensure the accuracy
# and reliability of the software and data, the NLM and the U.S.
# Government do not and cannot warrant the performance or results that
# may be obtained by using this software or data. The NLM and the U.S.
# Government disclaim all warranties, express or implied, including
# warranties of performance, merchantability or fitness for any particular
# purpose.
#
# Please cite NCBI in any work or product based on this material.
"""
Tests for elb/elb_config.py
Author: Greg Boratyn (boratyn@ncbi.nlm.nih.gov)
Created: Fri 26 Feb 2021 05:28:21 AM EDT
"""
import re
import configparser
from dataclasses import dataclass, fields
from unittest.mock import MagicMock, patch
from elb.constants import CSP
from elb.constants import CFG_CLOUD_PROVIDER
from elb.constants import CFG_CP_GCP_PROJECT, CFG_CP_GCP_REGION, CFG_CP_GCP_ZONE
from elb.constants import CFG_CP_GCP_NETWORK, CFG_CP_GCP_SUBNETWORK
from elb.constants import CFG_CP_AWS_REGION, CFG_CP_AWS_VPC, CFG_CP_AWS_SUBNET
from elb.constants import CFG_CP_AWS_JOB_ROLE, CFG_CP_AWS_BATCH_SERVICE_ROLE
from elb.constants import CFG_CP_AWS_INSTANCE_ROLE, CFG_CP_AWS_SPOT_FLEET_ROLE
from elb.constants import CFG_CP_AWS_SECURITY_GROUP, CFG_CP_AWS_KEY_PAIR
from elb.constants import CFG_BLAST, CFG_BLAST_PROGRAM, CFG_BLAST_DB
from elb.constants import CFG_BLAST_DB_SRC, CFG_BLAST_RESULTS, CFG_BLAST_QUERY
from elb.constants import CFG_BLAST_OPTIONS, CFG_BLAST_BATCH_LEN
from elb.constants import CFG_BLAST_MEM_REQUEST, CFG_BLAST_MEM_LIMIT
from elb.constants import CFG_BLAST_TAXIDLIST, CFG_BLAST_DB_MEM_MARGIN
from elb.constants import ELB_BLASTDB_MEMORY_MARGIN
from elb.constants import CFG_CLUSTER, CFG_CLUSTER_NAME, CFG_CLUSTER_MACHINE_TYPE
from elb.constants import CFG_CLUSTER_NUM_NODES, CFG_CLUSTER_NUM_CPUS
from elb.constants import CFG_CLUSTER_PD_SIZE, CFG_CLUSTER_USE_PREEMPTIBLE
from elb.constants import CFG_CLUSTER_DRY_RUN, CFG_CLUSTER_DISK_TYPE
from elb.constants import CFG_CLUSTER_PROVISIONED_IOPS, CFG_CLUSTER_BID_PERCENTAGE
from elb.constants import CFG_CLUSTER_LABELS, CFG_CLUSTER_EXP_USE_LOCAL_SSD
from elb.constants import CFG_CLUSTER_MIN_NODES, CFG_CLUSTER_MAX_NODES
from elb.constants import CFG_CLUSTER_ENABLE_STACKDRIVER
from elb.constants import ELB_DFLT_NUM_NODES
from elb.constants import ELB_DFLT_USE_PREEMPTIBLE
from elb.constants import ELB_DFLT_GCP_PD_SIZE, ELB_DFLT_AWS_PD_SIZE
from elb.constants import ELB_DFLT_GCP_MACHINE_TYPE, ELB_DFLT_AWS_MACHINE_TYPE
from elb.constants import ELB_DFLT_INIT_PV_TIMEOUT, ELB_DFLT_BLAST_K8S_TIMEOUT
from elb.constants import ELB_DFLT_AWS_SPOT_BID_PERCENTAGE
from elb.constants import ELB_DFLT_AWS_DISK_TYPE, ELB_DFLT_OUTFMT
from elb.base import ConfigParserToDataclassMapper, ParamInfo, DBSource
from elb.base import InstanceProperties
from elb.elb_config import CloudURI, GCPString, AWSRegion
from elb.elb_config import GCPConfig, AWSConfig, BlastConfig, ClusterConfig
from elb.elb_config import ElasticBlastConfig, get_instance_props
from elb.constants import ElbCommand
from elb.util import UserReportError, get_query_batch_size
import pytest
def test_default_labels():
cfg = ElasticBlastConfig(gcp_project = 'test-gcp-project',
gcp_region = 'test-gcp-region',
gcp_zone = 'test-gcp-zone',
program = 'blastn',
db = 'My:Fancy*DB65',
queries = 'test-queries.fa',
results = 'gs://some-bucket-with-interesting-name',
cluster_name = 'some-cluster-name',
task = ElbCommand.SUBMIT)
labels = cfg.cluster.labels
# "Label keys must start with a lowercase letter."
# From https://cloud.google.com/compute/docs/labeling-resources#label_format
assert(not re.search(r'[A-Z]', labels))
# Parse generated labels and verify some parts
parts = labels.split(',')
label_dict = {key: value for key, value in map(lambda x: x.split('='), parts)}
assert(label_dict['project'] == 'elastic-blast')
assert(label_dict['cluster-name'] == 'some-cluster-name')
assert('client-hostname' in label_dict)
assert('created' in label_dict)
created_date = label_dict['created']
assert(re.match(r'[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{2}', created_date))
assert(label_dict['owner'] == label_dict['creator'])
assert(label_dict['db'] == 'my-fancy-db65')
assert(label_dict['program'] == 'blastn')
assert(label_dict['billingcode'] == 'elastic-blast')
assert(label_dict['results'] == 'gs---some-bucket-with-interesting-name')
print('labels', labels)
def test_clouduri():
"""Test CloudURI type"""
assert issubclass(CloudURI, str)
for val in ['gs://bucket-123', 's3://bucket-123']:
assert CloudURI(val) == val
for val in [123, 'bucket', 'gs:bucket', 's3//bucket', 's3://bucket!@#$']:
with pytest.raises(ValueError):
CloudURI(val)
uri = CloudURI('s3://bucket')
assert len(uri.md5)
def test_gcpstring():
"""Test GCPString type"""
assert issubclass(GCPString, str)
for val in ['us-east-4b', 'some-string', 'name-1234']:
assert GCPString(val) == val
for val in ['UPPERCASE', 'some@name', '']:
with pytest.raises(ValueError):
GCPString(val)
def test_awsregion():
"""Test AWSRegion type"""
assert issubclass(AWSRegion, str)
for val in ['us-east-1', 'some-Region', 'REGION-123']:
assert AWSRegion(val) == val
for val in ['re@ion', 'region-!@#', '']:
with pytest.raises(ValueError):
AWSRegion(val)
def test_gcpconfig():
"""Test GCPConfig defaults"""
PROJECT = 'test-project'
REGION = 'test-region'
ZONE = 'test-zone'
cfg = GCPConfig(project = PROJECT,
region = REGION,
zone = ZONE)
assert cfg.cloud == CSP.GCP
assert cfg.project == PROJECT
assert cfg.region == REGION
assert cfg.zone == ZONE
assert not cfg.network
assert not cfg.subnet
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert not errors
def test_gcpconfig_validation():
"""Test GCPConfig validation"""
cfg = GCPConfig(project = 'test-project',
region = 'test-region',
zone = 'test-zone')
cfg.network = 'some-network'
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert errors
assert [message for message in errors if 'gcp-network and gcp-subnetwork' in message]
cfg.network = None
cfg.subnet = 'subnet'
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert errors
assert [message for message in errors if 'gcp-network and gcp-subnetwork' in message]
def test_gcpconfig_from_configparser():
"""Test GCPConfig initialized from a ConfigParser object"""
PROJECT = 'test-project'
REGION = 'test-region'
ZONE = 'test-zone'
NETWORK = 'network'
SUBNET = 'subnet'
confpars = configparser.ConfigParser()
confpars[CFG_CLOUD_PROVIDER] = {CFG_CP_GCP_PROJECT: PROJECT,
CFG_CP_GCP_REGION: REGION,
CFG_CP_GCP_ZONE: ZONE,
CFG_CP_GCP_NETWORK: NETWORK,
CFG_CP_GCP_SUBNETWORK: SUBNET}
cfg = GCPConfig.create_from_cfg(confpars)
assert cfg.cloud == CSP.GCP
assert cfg.project == PROJECT
assert cfg.region == REGION
assert cfg.zone == ZONE
assert cfg.network == NETWORK
assert cfg.subnet == SUBNET
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert not errors
def test_gcpconfig_from_configparser_missing():
"""Test missing required parameters are reported when initializing
GCPConfig from a ConfigParser object"""
REQUIRED_PARAMS = [CFG_CP_GCP_PROJECT, CFG_CP_GCP_REGION, CFG_CP_GCP_ZONE]
with pytest.raises(ValueError) as err:
cfg = GCPConfig.create_from_cfg(configparser.ConfigParser())
for param in REQUIRED_PARAMS:
assert 'Missing ' + param in str(err.value)
def test_gcpconfig_from_configparser_errors():
"""Test that incorrect parameter values in ConfigParser are properly
reported"""
confpars = configparser.ConfigParser()
confpars[CFG_CLOUD_PROVIDER] = {CFG_CP_GCP_PROJECT: 'inval!d-PROJECT',
CFG_CP_GCP_REGION: 'invalie-rEg!on',
CFG_CP_GCP_ZONE: 'inavlid-zone-@#$'}
with pytest.raises(ValueError) as err:
cfg = GCPConfig.create_from_cfg(confpars)
# test that each invalid parameter value is reported
errors = str(err.value).split('\n')
for key in confpars[CFG_CLOUD_PROVIDER]:
assert [message for message in errors if key in message and 'invalid value' in message and confpars[CFG_CLOUD_PROVIDER][key] in message]
def test_awsconfig():
"""Test AWSConfig defaults"""
REGION = 'test-region'
cfg = AWSConfig(region = REGION)
assert cfg.region == REGION
assert not cfg.vpc
assert not cfg.subnet
assert not cfg.security_group
assert not cfg.key_pair
assert not cfg.job_role
assert not cfg.instance_role
assert not cfg.batch_service_role
assert not cfg.spot_fleet_role
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert not errors
def test_awsconfig_from_configparser():
"""Test AWSConfig initialized from a ConfigParser object"""
REGION = 'test-region'
VPC = 'test-vpc'
SUBNET = 'test-subnet'
SECURITY_GROUP = 'test-security-group'
KEY_PAIR = 'test-key-pair'
JOB_ROLE = 'test-job-role'
INSTANCE_ROLE = 'test-instance-role'
BATCH_SERV_ROLE = 'test-batch-service-role'
SPOT_FLEET_ROLE = 'test-spot-fleet-role'
confpars = configparser.ConfigParser()
confpars[CFG_CLOUD_PROVIDER] = {CFG_CP_AWS_REGION: REGION,
CFG_CP_AWS_VPC: VPC,
CFG_CP_AWS_SUBNET: SUBNET,
CFG_CP_AWS_SECURITY_GROUP: SECURITY_GROUP,
CFG_CP_AWS_KEY_PAIR: KEY_PAIR,
CFG_CP_AWS_JOB_ROLE: JOB_ROLE,
CFG_CP_AWS_INSTANCE_ROLE: INSTANCE_ROLE,
CFG_CP_AWS_BATCH_SERVICE_ROLE: BATCH_SERV_ROLE,
CFG_CP_AWS_SPOT_FLEET_ROLE: SPOT_FLEET_ROLE}
cfg = AWSConfig.create_from_cfg(confpars)
assert cfg.region == REGION
assert cfg.vpc == VPC
assert cfg.subnet == SUBNET
assert cfg.security_group == SECURITY_GROUP
assert cfg.key_pair == KEY_PAIR
assert cfg.job_role == JOB_ROLE
assert cfg.instance_role == INSTANCE_ROLE
assert cfg.batch_service_role == BATCH_SERV_ROLE
assert cfg.spot_fleet_role == SPOT_FLEET_ROLE
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert not errors
def test_awsconfig_from_configparser_missing():
"""Test missing required parameters are reported when initializing
GCPConfig from a ConfigParser object"""
REQUIRED_PARAMS = [CFG_CP_AWS_REGION]
with pytest.raises(ValueError) as err:
cfg = AWSConfig.create_from_cfg(configparser.ConfigParser())
for param in REQUIRED_PARAMS:
assert 'Missing ' + param in str(err.value)
def test_blastconfig():
"""Test BlastConfig defaults"""
PROGRAM = 'blastn'
DB = 'test-db'
QUERIES = 'test-queries'
cloud_provider = GCPConfig(project = 'test-project',
region = 'test-region',
zone = 'test-zone')
machine_type = 'n1-standard-32'
cfg = BlastConfig(program = PROGRAM,
db = DB,
queries_arg = QUERIES,
cloud_provider = cloud_provider,
machine_type = machine_type)
assert cfg.program == PROGRAM
assert cfg.db == DB
assert cfg.queries_arg == QUERIES
assert cfg.db_source.name == cloud_provider.cloud.name
assert cfg.batch_len == get_query_batch_size(cfg.program)
assert not cfg.queries
assert cfg.options == f'-outfmt {ELB_DFLT_OUTFMT}'
assert cfg.mem_request
assert cfg.mem_limit
assert not cfg.taxidlist
assert cfg.db_mem_margin == ELB_BLASTDB_MEMORY_MARGIN
def test_blastconfig_validation():
"""Test BlastConfig validation"""
BAD_URI = 'gs://@BadURI!'
cfg = BlastConfig(program = 'blastn',
db = 'test-db',
queries_arg = BAD_URI,
cloud_provider = GCPConfig(project = 'test-project',
region = 'test-region',
zone = 'test-zone'),
machine_type = 'n1-standard-32')
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert errors
assert [message for message in errors if BAD_URI in message]
@patch(target='elb.elb_config.aws_get_machine_properties', new=MagicMock(return_value=InstanceProperties(32, 120)))
def test_blastconfig_from_configparser():
"""Test BlastConfig initialized from a ConfigParser object"""
PROGRAM = 'blastn'
DB = 'test-db'
QUERIES = 'test-queries'
DB_SOURCE = 'GCP'
BATCH_LEN = 5000
OPTIONS = f'test options -outfmt {ELB_DFLT_OUTFMT}'
MEM_REQUEST = '1.3G'
MEM_LIMIT = '21.9G'
DB_MEM_MARGIN = 91.6
confpars = configparser.ConfigParser()
confpars[CFG_BLAST] = {CFG_BLAST_PROGRAM: PROGRAM,
CFG_BLAST_DB: DB,
CFG_BLAST_QUERY: QUERIES,
CFG_BLAST_DB_SRC: DB_SOURCE,
CFG_BLAST_BATCH_LEN: str(BATCH_LEN),
CFG_BLAST_OPTIONS: OPTIONS,
CFG_BLAST_MEM_REQUEST: MEM_REQUEST,
CFG_BLAST_MEM_LIMIT: MEM_LIMIT,
CFG_BLAST_DB_MEM_MARGIN: str(DB_MEM_MARGIN)}
cfg = BlastConfig.create_from_cfg(confpars,
cloud_provider = AWSConfig(region = 'test-region'),
machine_type = 'test-machine-type')
assert cfg.program == PROGRAM
assert cfg.db == DB
assert cfg.queries_arg == QUERIES
assert cfg.db_source == DBSource[DB_SOURCE]
assert cfg.batch_len == BATCH_LEN
assert not cfg.queries
assert cfg.options == OPTIONS
assert cfg.mem_limit == MEM_LIMIT
assert cfg.mem_request == MEM_REQUEST
# taxid list is later parsed from BLAST options
assert not cfg.taxidlist
assert cfg.db_mem_margin == DB_MEM_MARGIN
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert not errors
def test_blastconfig_from_configparser_missing():
"""Test BlastConfig initialization from a ConfigParser object with missing
required parameters"""
REQUIRED_PARAMS = [CFG_BLAST_PROGRAM, CFG_BLAST_DB, CFG_BLAST_QUERY]
with pytest.raises(ValueError) as err:
cfg = BlastConfig.create_from_cfg(configparser.ConfigParser(),
cloud_provider = AWSConfig(region = 'test-region'),
machine_type = 'test-machine-type')
for param in REQUIRED_PARAMS:
assert 'Missing ' + param in str(err.value)
@patch(target='elb.elb_config.aws_get_machine_properties', new=MagicMock(return_value=InstanceProperties(32, 120)))
def test_blastconfig_from_configparser_errors():
"""Test that incorrect parameter values in ConfigParser are properly
reported"""
PROGRAM = 'some-program'
DB_SOURCE = 'some-db-source'
BATCH_LEN = -5
MEM_LIMIT = '5'
MEM_REQUEST = -5
DB_MEM_MARGIN = 'margin'
confpars = configparser.ConfigParser()
confpars[CFG_BLAST] = {CFG_BLAST_PROGRAM: PROGRAM,
CFG_BLAST_DB: 'some-db',
CFG_BLAST_QUERY: 'some-query',
CFG_BLAST_DB_SRC: DB_SOURCE,
CFG_BLAST_BATCH_LEN: str(BATCH_LEN),
CFG_BLAST_MEM_LIMIT: str(MEM_LIMIT),
CFG_BLAST_MEM_REQUEST: str(MEM_REQUEST),
CFG_BLAST_DB_MEM_MARGIN: str(DB_MEM_MARGIN)}
with pytest.raises(ValueError) as err:
cfg = BlastConfig.create_from_cfg(confpars,
cloud_provider = AWSConfig(region = 'test-region'),
machine_type = 'some-machine-type')
# test that each invalid parameter value is reported
errors = str(err.value).split('\n')
for key in [CFG_BLAST_PROGRAM,
CFG_BLAST_DB_SRC,
CFG_BLAST_BATCH_LEN,
CFG_BLAST_MEM_LIMIT,
CFG_BLAST_MEM_REQUEST,
CFG_BLAST_DB_MEM_MARGIN]:
assert [message for message in errors if key in message and 'invalid value' in message and confpars[CFG_BLAST][key] in message]
def test_clusterconfig_gcp():
"""Test ClusterConfig defaults for GCP"""
RESULTS = CloudURI('gs://test-results')
gcp_cfg = GCPConfig(project = 'test-project',
region = 'test-region',
zone = 'test-zone')
cfg = ClusterConfig(cloud_provider = gcp_cfg, results = RESULTS)
assert cfg.name.startswith('elasticblast')
assert cfg.machine_type == ELB_DFLT_GCP_MACHINE_TYPE
assert cfg.pd_size == ELB_DFLT_GCP_PD_SIZE
assert cfg.num_cpus == get_instance_props(gcp_cfg, cfg.machine_type).ncpus - 1
assert cfg.num_nodes == ELB_DFLT_NUM_NODES
assert cfg.results == RESULTS
assert not cfg.min_nodes
assert not cfg.max_nodes
assert not cfg.use_preemptible
assert not cfg.iops
assert not cfg.labels
assert not cfg.use_local_ssd
assert not cfg.enable_stackdriver
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert not errors
@patch(target='elb.elb_config.aws_get_machine_properties', new=MagicMock(return_value=InstanceProperties(32, 120)))
def test_clusterconfig_aws():
"""Test ClusterConfig defaults for GCP"""
RESULTS = CloudURI('s3://test-results')
aws_cfg = AWSConfig(region = 'test-region')
cfg = ClusterConfig(cloud_provider = aws_cfg, results = RESULTS)
assert cfg.name.startswith('elasticblast')
assert cfg.results == RESULTS
assert cfg.machine_type == ELB_DFLT_AWS_MACHINE_TYPE
assert cfg.pd_size == ELB_DFLT_AWS_PD_SIZE
assert cfg.num_cpus == get_instance_props(aws_cfg, cfg.machine_type).ncpus
assert cfg.num_nodes == ELB_DFLT_NUM_NODES
assert not cfg.min_nodes
assert not cfg.max_nodes
assert not cfg.use_preemptible
assert cfg.disk_type == ELB_DFLT_AWS_DISK_TYPE
assert not cfg.iops
assert cfg.bid_percentage == int(ELB_DFLT_AWS_SPOT_BID_PERCENTAGE)
assert not cfg.labels
assert not cfg.use_local_ssd
assert not cfg.enable_stackdriver
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert not errors
def test_clusterconfig_validation():
"""Test ClusterConfig validation"""
cfg = ClusterConfig(cloud_provider = GCPConfig(project = 'test-project',
region = 'test-region',
zone = 'test-zone'),
results = CloudURI('gs://test-results'),
min_nodes = 5)
errors = []
cfg.validate(errors, ElbCommand.SUBMIT)
assert errors
assert [message for message in errors if 'min-nodes and max-nodes' in message]
@patch(target='elb.elb_config.aws_get_machine_properties', new=MagicMock(return_value=InstanceProperties(32, 120)))
def test_clusterconfig_from_configparser():
"""Test ClusterConfig initialized from a ConfigParser object"""
RESULTS = 's3://test-bucket'
NAME = 'test-name'
MACHINE_TYPE = 'test-machine-type'
PD_SIZE = 'test-pd-size'
NUM_CPUS = 123
NUM_NODES = 5000
MIN_NODES = 12
MAX_NODES = 999
USE_PREEMPTIBLE = 'Yes'
DISK_TYPE = 'test-disk-type'
IOPS = 987
BID_PERC = 45
LABELS = 'test-labels'
USE_LOCAL_SSD = 'yes'
ENABLE_STACKDRIVER = 'true'
confpars = configparser.ConfigParser()
confpars[CFG_CLUSTER] = {CFG_CLUSTER_NAME: NAME,
CFG_CLUSTER_MACHINE_TYPE: MACHINE_TYPE,
CFG_CLUSTER_PD_SIZE: PD_SIZE,
CFG_CLUSTER_NUM_CPUS: str(NUM_CPUS),
CFG_CLUSTER_NUM_NODES: str(NUM_NODES),
CFG_CLUSTER_MIN_NODES: str(MIN_NODES),
CFG_CLUSTER_MAX_NODES: str(MAX_NODES),
CFG_CLUSTER_USE_PREEMPTIBLE: USE_PREEMPTIBLE,
CFG_CLUSTER_DISK_TYPE: DISK_TYPE,
CFG_CLUSTER_PROVISIONED_IOPS: IOPS,
CFG_CLUSTER_BID_PERCENTAGE: BID_PERC,
CFG_CLUSTER_LABELS: LABELS,
CFG_CLUSTER_EXP_USE_LOCAL_SSD: USE_LOCAL_SSD,
CFG_CLUSTER_ENABLE_STACKDRIVER: ENABLE_STACKDRIVER}
confpars[CFG_BLAST] = {CFG_BLAST_RESULTS: RESULTS}
cfg = ClusterConfig.create_from_cfg(confpars,
cloud_provider = AWSConfig(region = 'test-region'))
assert cfg.name == NAME
assert cfg.machine_type == MACHINE_TYPE
assert cfg.pd_size == PD_SIZE
assert cfg.num_cpus == NUM_CPUS
assert cfg.num_nodes == NUM_NODES
assert cfg.min_nodes == MIN_NODES
assert cfg.max_nodes == MAX_NODES
assert cfg.use_preemptible == True
assert cfg.disk_type == DISK_TYPE
assert cfg.iops == IOPS
assert cfg.bid_percentage == BID_PERC
assert cfg.labels == LABELS
assert cfg.use_local_ssd == True
assert cfg.enable_stackdriver == True
errors = []
# caused by use_local_ssd == True
with pytest.raises(NotImplementedError):
cfg.validate(errors, ElbCommand.SUBMIT)
assert not errors
@patch(target='elb.elb_config.aws_get_machine_properties', new=MagicMock(return_value=InstanceProperties(32, 120)))
def test_clusterconfig_from_configparser_missing():
"""Test ClusterConfig initialization from a ConfigParser object with
missing required parameters"""
REQUIRED_PARAMS = [CFG_BLAST_RESULTS]
with pytest.raises(ValueError) as err:
cfg = ClusterConfig.create_from_cfg(configparser.ConfigParser(),
cloud_provider = AWSConfig(region = 'test-region'))
for param in REQUIRED_PARAMS:
assert 'Missing ' + param in str(err.value)
def test_clusterconfig_from_configparser_errors():
"""Test that incorrect parameter values in ConfigParser are properly
reported"""
confpars = configparser.ConfigParser()
confpars[CFG_CLUSTER] = {CFG_CLUSTER_NUM_CPUS: '-25',
CFG_CLUSTER_NUM_NODES: 'abc',
CFG_CLUSTER_MIN_NODES: '0.1',
CFG_CLUSTER_MAX_NODES: 'aaa',
CFG_CLUSTER_BID_PERCENTAGE: '101'}
with pytest.raises(ValueError) as err:
cfg = ClusterConfig.create_from_cfg(confpars,
cloud_provider = CSP.AWS)
# test that each invalid parameter value is reported
errors = str(err.value).split('\n')
for key in confpars[CFG_CLUSTER].keys():
assert [message for message in errors if key in message and 'invalid value' in message and confpars[CFG_CLUSTER][key] in message]
def test_ElasticBlastConfig_init_errors():
"""Test that __init__ method arguments are checked"""
with pytest.raises(AttributeError) as err:
cfg = ElasticBlastConfig()
assert 'task parameter must be specified' in str(err.value)
with pytest.raises(AttributeError) as err:
cfg = ElasticBlastConfig(5)
assert 'one positional parameter' in str(err.value)
assert 'ConfigParser object' in str(err.value)
with pytest.raises(AttributeError) as err:
cfg = ElasticBlastConfig(configparser.ConfigParser(), 5)
assert 'one positional parameter' in str(err.value)
assert 'ConfigParser object' in str(err.value)
with pytest.raises(AttributeError) as err:
cfg = ElasticBlastConfig(configparser.ConfigParser(), results = 's3://results')
assert 'task parameter must be specified' in str(err.value)
with pytest.raises(AttributeError) as err:
cfg = ElasticBlastConfig(aws_region = 'some-region', results = 's3://results')
assert 'task parameter must be specified' in str(err.value)
@patch(target='elb.elb_config.aws_get_machine_properties', new=MagicMock(return_value=InstanceProperties(2, 8)))
def test_validate_too_many_cpus():
"""Test that requesting too many CPUs is reported"""
cfg = ElasticBlastConfig(aws_region = 'test-region',
program = 'blastn',
db = 'test-db',
queries = 'test-query.fa',
results = 's3://results',
task = ElbCommand.SUBMIT)
cfg.cluster.machine_type = 'm5.large'
cfg.cluster.num_cpus = 16
with pytest.raises(UserReportError) as err:
cfg.validate(ElbCommand.SUBMIT)
assert re.search(r'number of CPUs [\w "]* exceeds', str(err.value))
|
from enum import Enum,auto
#Enum that contains types of networks
#SA_TO_Q = a network with state-action input that ouputs a single q value
#S_TO_QA = a network with state input that ouputs a q value for each action
#SM_TO_QA = a network with multiple state input (frame stacking) that outputs a q value for each action
#SR_TO_QA = a recurrent network built with time distributed layers which accepts multiple states similiar to frame stacking, ouptuting a q value for eeach action
#a SM network expects an input with size (batch_size, )
class Network(Enum):
SA_TO_Q = auto()
S_TO_QA = auto()
SM_TO_QA = auto()
SR_TO_QA = auto()
class Networks(Enum):
DOOM_CNN_SM = auto()
DUELING_SM = auto()
DUELING_S = auto()
DUELING_LSTM= auto() |
# Generated by Django 3.0.5 on 2020-09-11 04:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ServerKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('ssh_port', models.IntegerField()),
('ssh_user', models.CharField(max_length=100)),
('ssh_pass', models.CharField(max_length=200)),
('sudo_pass', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 't_server_key',
},
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ipaddr', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('inventory', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='server', to='server.ServerKey')),
],
options={
'db_table': 't_server',
},
),
]
|
Esse é o conteúdo do Feature2
Esse é o conteúdo do Feature2
|
"""unittest for plugin"""
import json
import os
import tempfile
import unittest
from wechaty import Wechaty, WechatyOptions
from wechaty.plugin import WechatyPlugin
from wechaty.utils.data_util import WechatySetting
from wechaty.fake_puppet import FakePuppet
def test_setting():
with tempfile.TemporaryDirectory() as cache_dir:
os.environ['CACHE_DIR'] = cache_dir
plugin = WechatyPlugin()
plugin.setting['unk'] = 11
assert 'count' not in plugin.setting
plugin.setting['count'] = 20
# load the setting file
assert os.path.exists(plugin.setting_file)
with open(plugin.setting_file, 'r', encoding='utf-8') as f:
data = json.load(f)
assert data['unk'] == 11
assert data['count'] == 20
class TestWechatySetting(unittest.TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self) -> None:
self.tempdir.cleanup()
def test_simple_init(self):
setting_file = os.path.join(self.tempdir.name, 'simple_setting.json')
wechaty_setting: WechatySetting = WechatySetting(setting_file)
assert os.path.exists(setting_file)
wechaty_setting['a'] = 'a'
assert 'a' in wechaty_setting.read_setting()
assert wechaty_setting.read_setting()['a'] == 'a'
assert 'b' not in wechaty_setting
assert wechaty_setting.get("b", "b") == "b"
wechaty_setting.save_setting({"c": "c"})
assert 'a' not in wechaty_setting
assert 'c' in wechaty_setting
def test_sub_setting(self):
setting_file = os.path.join(self.tempdir.name, "sub", 'simple_setting.json')
wechaty_setting: WechatySetting = WechatySetting(setting_file)
assert os.path.exists(setting_file)
wechaty_setting['a'] = 'a'
assert 'a' in wechaty_setting.read_setting()
assert wechaty_setting.read_setting()['a'] == 'a'
assert 'b' not in wechaty_setting
assert wechaty_setting.get("b", "b") == "b"
wechaty_setting.save_setting({"c": "c"})
assert 'a' not in wechaty_setting
assert 'c' in wechaty_setting
async def test_finder(self):
fake_puppet = FakePuppet()
bot = Wechaty(options=WechatyOptions(puppet=fake_puppet))
contact_id = fake_puppet.add_random_fake_contact()
contact_payload = await fake_puppet.contact_payload(contact_id)
from wechaty_plugin_contrib.finders.contact_finder import ContactFinder
finder = ContactFinder(
contact_id
)
contacts = await finder.match(bot)
assert len(contacts) == 1
contact = contacts[0]
await contact.ready()
assert contact.payload.name == contact_payload.name
|
# Generated by Django 3.0.3 on 2020-03-04 08:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('city', '0002_auto_20200304_1458'),
('sites', '0002_auto_20200304_1420'),
]
operations = [
migrations.AlterModelOptions(
name='sitecategory',
options={'verbose_name_plural': 'Business Unit'},
),
migrations.RemoveField(
model_name='sitepage',
name='city',
),
migrations.AddField(
model_name='sitepage',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='city.CityPage'),
),
]
|
#!/usr/bin/env python
#
# aws_snapshot_manager.py
#
# Takes a daily snapshot and manages your archival snapshot inventory.
#
# Requirements:
#
# * A working boto configuration for establishing connection to your AWS environment
# * This script is meant to be run once a day with a cron job
#
# Snapshot management logic is as follows:
#
# * Always keep the past 7 days of snapshots
# * Always keep the snapshot taken on the last day of the month.
#
# To do:
#
# Add localtime vs ntp server check just in case localtime is way off
# Add argparse and include target volume option, debug, etc.
# Add support for snapshotting multiple volumes
#
# v0.1 Initial script, catdevurandom
import boto # Note: AWS API credentials must be supplied in your user's homedir in a file called .boto for this to work. See boto documentation: http://code.google.com/p/boto/w/list
import re
import datetime
import calendar
import syslog
import sys
def main():
global debug
debug = True
syslog.openlog("aws_snapshot_manager.py", syslog.LOG_PID, syslog.LOG_LOCAL0)
syslog.syslog('INFO: Starting snapshot management process')
conn = start_aws_connection()
selected_volume = select_volume(conn, "vol-140bd69d") # change this to the volume you'd like to use
snapshot_result = take_snapshot(selected_volume)
archival_snapshot_list = get_snapshot_list(conn)
result = manage_snapshot_inventory(archival_snapshot_list)
def start_aws_connection():
'''Initiate AWS API connection'''
try:
conn = boto.connect_ec2()
if debug is True: syslog.syslog('DEBUG: Initiated %s successfully' % conn)
except Exception:
syslog.syslog('ERROR: Failed to connect to AWS')
return conn
def select_volume(conn, volume_id):
'''Select specified volume ID and return volume object'''
volume_list = conn.get_all_volumes([volume_id])
selected_volume = volume_list[0]
if selected_volume is None:
syslog.syslog('ERROR: Failed to locate %s' % volume_id)
elif debug is True:
syslog.syslog('DEBUG: Selected %s' % selected_volume)
return selected_volume
def take_snapshot(selected_volume):
'''Take snapshot of selected volume and add proper descriptor'''
snapshot_description = 'Created by aws_snapshot_manager.py at ' + datetime.datetime.today().isoformat(' ')
try:
snapshot_result = selected_volume.create_snapshot(snapshot_description)
syslog.syslog('INFO: Created %s successfully!' % snapshot_result)
except:
syslog.syslog('ERROR: Failed to create snapshot! Cancelling job.')
sys.exit(1)
return snapshot_result
def get_snapshot_list(conn):
'''Get a list of all snapshots with matching Description field'''
description = 'Created by aws_snapshot_manager.py'
all_snapshots = conn.get_all_snapshots()
archival_snapshot_list = [i for i in all_snapshots if re.match(description, i.description)]
if debug is True: syslog.syslog('DEBUG: %s snapshots currently in inventory' % len(archival_snapshot_list))
if len(archival_snapshot_list) < 7: syslog.syslog('ERROR: Only %s snapshots currently in AWS inventory, we should always have at least 7!' % len(archival_snapshot_list))
return archival_snapshot_list
def manage_snapshot_inventory(archival_snapshot_list):
'''Check current date and process documented archival/deletion logic'''
today = datetime.date.today()
last_monthday = calendar.mdays[today.month]
week_delta = today - datetime.timedelta(days=7)
expired_snapshots = []
for snapshot in archival_snapshot_list:
snapshot_datetime = datetime.datetime.strptime(snapshot.start_time, '%Y-%m-%dT%H:%M:%S.%fZ')
snapshot_date = snapshot_datetime.date()
if snapshot.start_time <= week_delta.isoformat() and snapshot_date.day != calendar.mdays[snapshot_date.month]:
# snapshots more than 7 days old are deleted, unless it was the last snapshot of any given month
expired_snapshots.append(snapshot)
if len(expired_snapshots) > 0:
result = delete_snapshots(expired_snapshots)
elif len(expired_snapshots) == 0:
if debug is True:
syslog.syslog('DEBUG: Not deleting any snapshots today')
result = 0
else:
syslog.syslog('ERROR: How the hell did I come back with a negative number of snapshots?')
result = 1
return result
def delete_snapshots(expired_snapshots):
for snapshot in expired_snapshots:
syslog.syslog('INFO: Deleting %s (created on: %s)' % (snapshot, snapshot.start_time))
try:
snapshot.delete()
except:
syslog.syslog('ERROR: Failed to delete %s' % snapshot)
if __name__ == '__main__':
main()
|
import os
from glob import glob
from random import shuffle
from torch.utils.data import Dataset
from torchvision.transforms import ToTensor
import cv2
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, RandomBrightnessContrast, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, Flip, OneOf, Compose
)
class GlassesDataset(Dataset):
def __init__(self, datapath, use_aug=True):
self.files_path = []
self.totensor = ToTensor()
self.use_aug = use_aug
self.aug = Compose([RandomRotate90(),
Flip(),
Transpose(),
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
], p=0.2),
OneOf([
MotionBlur(p=.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
GridDistortion(p=.1),
IAAPiecewiseAffine(p=0.3),
], p=0.2),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomBrightnessContrast(),
], p=0.3),
HueSaturationValue(p=0.3),
], p=0.8)
for file_path in glob(os.path.join(datapath, "glasses/*.jpg")):
self.files_path.append((file_path, 1))
for file_path in glob(os.path.join(datapath, "no_glasses/*.jpg")):
self.files_path.append((file_path, 0))
shuffle(self.files_path)
def __len__(self):
return len(self.files_path)
def __getitem__(self, indx):
file_path, label = self.files_path[indx]
image = cv2.imread(file_path)[:,:,(2,1,0)]
if self.use_aug:
image = self.aug(image=image)["image"]
image = cv2.resize(image, (256, 256))
image = self.totensor(image)
return {"image": image, "label": label}
|
from django.contrib import admin
from .models import TeachingTask,Semester
# Register your models here.
@admin.register(TeachingTask)
class TeachingTaskAdmin(admin.ModelAdmin):
list_display = ['semester','teacher','course','classes','is_changed','create_time']
search_fields = ['classes__name','teacher__teacher__username','classes__major__grade']
ordering = ['-semester','classes__major__grade']
@admin.register(Semester)
class SemesterAdmin(admin.ModelAdmin):
list_display = ['semester_year','semester_period','is_execute',]
|
from adapters.contact_adapter import ContactAdapter
from devices.sensor.contact import ContactSensor
class AV201021(ContactAdapter):
def __init__(self, devices):
super().__init__(devices)
self.devices.append(ContactSensor(devices, 'tamper', 'tamper'))
|
from flask.ext.admin import BaseView, AdminIndexView, expose, form
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.admin.contrib.fileadmin import FileAdmin
from flask.ext.login import current_user
from flask import url_for, current_app, flash
from wtforms import HiddenField, TextAreaField, PasswordField
from jinja2 import Markup
from hashlib import md5
import time
import os.path as op
BaseView.is_accessible = lambda x: current_user.is_active() and current_user.is_manager()
AdminIndexView.is_accessible = lambda x: current_user.is_active() and current_user.is_manager()
ModelView.is_accessible = lambda x: current_user.is_active() and current_user.is_manager()
FileAdmin.is_accessible = lambda x: current_user.is_active() and current_user.is_manager()
class AdminAccess(ModelView):
def is_accessible(self):
return current_user.is_active() and current_user.is_admin()
class PersonModelView(ModelView):
column_searchable_list = ('first_name', 'middle_name', 'last_name')
column_list = ('last_name', 'adult_name', 'birthday', 'baptized', 'married', 'status', 'age')
column_sortable_list = ('last_name', 'birthday', 'baptized', 'married')
column_labels = dict(
name='Имя', birthday='День рождения', baptized='Крещен', married='В браке',
status='Статус', age='Возраст', emails='E-mail', social='Интернет',
relations='Семья', actions='Деятельность', first_name='Имя',
middle_name='Отчество', last_name='Фамилия', maiden_name='Девичья фамилия',
occupation='Род занятий', specialty='Специальность', description='Примечания',
phones='Телефоны', address='Адрес', events='События', skills='Дары и таланты',
gender='Пол', adult_name='Полное имя'
)
from models import Person
column_default_sort = (Person.last_name, False)
form_columns = (
'first_name', 'middle_name', 'last_name', 'maiden_name', 'gender',
'birthday', 'phones', 'address',
'emails', 'social',
'relations', 'occupation', 'specialty', 'skills',
'baptized', 'married',
'status', 'events',
'description'
)
form_choices = {'gender': [('m', 'Мужской'), ('f', 'Женский')]}
form_extra_fields = dict(
description=TextAreaField(label='Примечания')
)
def __init__(self, model, session, **kwargs):
super(PersonModelView, self).__init__(model, session, **kwargs)
class PhoneModelView(ModelView):
column_searchable_list = ('number',)
column_labels = dict(number='Номер', type='Тип', people='Владельцы')
def __init__(self, model, session, **kwargs):
super(PhoneModelView, self).__init__(model, session, **kwargs)
class SocialModelView(ModelView):
column_searchable_list = ('people.first_name', 'people.last_name', 'people.middle_name', 'name')
column_labels = dict(name='Название сети', link='Ссылка', people='Имя')
column_list = ('people', 'name', 'link')
column_sortable_list = ('name',)
def __init__(self, model, session, **kwargs):
super(SocialModelView, self).__init__(model, session, **kwargs)
class EmailModelView(ModelView):
column_searchable_list = ('people.first_name', 'people.last_name', 'people.middle_name', 'address')
column_labels = dict(address='Email', people='Имя')
column_list = ('people', 'address')
column_sortable_list = ()
def __init__(self, model, session, **kwargs):
super(EmailModelView, self).__init__(model, session, **kwargs)
class RelationModelView(ModelView):
column_searchable_list = (
'rel.first_name', 'rel.last_name', 'rel.middle_name', 'type.title',
'person.first_name'
)
column_labels = dict(rel='Кто', type='Кем приходится', person='Кому')
def __init__(self, model, session, **kwargs):
super(RelationModelView, self).__init__(model, session, **kwargs)
class AddressModelView(ModelView):
column_searchable_list = ('people.first_name', 'people.last_name', 'people.middle_name', 'text')
column_labels = dict(
name='Место', description='Пояснение', text='Полный адрес',
people='Проживают', events='События'
)
column_list = ('name', 'description', 'text')
edit_template = 'admin/edit.html'
create_template = 'admin/edit.html'
form_extra_fields = dict(
lat=HiddenField(),
lng=HiddenField()
)
def __init__(self, model, session, **kwargs):
super(AddressModelView, self).__init__(model, session, **kwargs)
class StatusModelView(ModelView):
column_searchable_list = ('name',)
column_labels = dict(name='Статус', people='Люди')
form_columns = ('name', 'people')
def __init__(self, model, session, **kwargs):
super(StatusModelView, self).__init__(model, session, **kwargs)
class ActionModelView(ModelView):
column_searchable_list = ('name',)
column_labels = dict(name='Активность')
def __init__(self, model, session, **kwargs):
super(ActionModelView, self).__init__(model, session, **kwargs)
class EventRoleModelView(ModelView):
column_searchable_list = ('name',)
column_labels = dict(
name='Событие', started_at='Начало', finished_at='Завершение', type='Тип',
place='Место', people='Участники'
)
form_columns = ('name', 'started_at', 'finished_at', 'type', 'place', 'people')
def __init__(self, model, session, **kwargs):
super(EventRoleModelView, self).__init__(model, session, **kwargs)
class ActionRoleModelView(ModelView):
column_searchable_list = (
'person.first_name', 'person.last_name', 'person.middle_name', 'action.name'
)
column_labels = dict(action='Активность', person='Участник', role='Роль')
def __init__(self, model, session, **kwargs):
super(ActionRoleModelView, self).__init__(model, session, **kwargs)
class SkillModelView(ModelView):
column_searchable_list = ('name',)
column_labels = dict(name='Навык', people='Люди')
form_columns = ('name', 'people')
def __init__(self, model, session, **kwargs):
super(SkillModelView, self).__init__(model, session, **kwargs)
class UserModelView(AdminAccess):
form_columns = (
'login', 'email', 'name', 'password', 'role',
'is_authenticated', 'is_active'
)
column_labels = dict(
login='Имя входа', email='E-mail',
name='Имя', password='Пароль', role='Доступ',
created_at='Зарегистрирован', is_active='Разрешен',
is_authenticated='Подтвержден'
)
column_list = (
'name', 'login', 'email', 'role', 'is_authenticated', 'is_active',
'created_at'
)
form_extra_fields = dict(
password=PasswordField(label='Пароль')
)
def update_model(self, form, model):
form.password.data = md5(form.password.data.encode('utf-8')).hexdigest()
super(UserModelView, self).update_model(form, model)
def create_model(self, form):
form.password.data = md5(form.password.data.encode('utf-8')).hexdigest()
super(UserModelView, self).create_model(form)
def __init__(self, model, session, **kwargs):
super(UserModelView, self).__init__(model, session, **kwargs)
class PeriodModelView(ModelView):
# column_searchable_list = ('name',)
column_labels = dict(
dow_name='День недели', time_from='Начало', time_to='Завершение',
people='Люди'
)
column_list = ('dow_name', 'time_from', 'time_to')
form_choices = {
'dow': [
('0', 'Воскресенье'), ('1', 'Понедельник'), ('2', 'Вторник'),
('3', 'Среда'), ('4', 'Четверг'), ('5', 'Пятница'), ('6', 'Суббота')
]
}
def __init__(self, model, session, **kwargs):
super(PeriodModelView, self).__init__(model, session, **kwargs)
class PhotoModelView(ModelView):
column_list = ('person.last_name', 'person.adult_name', 'small')
form_columns = ('person', 'path')
column_labels = {'small': 'Фото', 'person.last_name': 'Фамилия', 'person.adult_name': 'Полное имя'}
from models import Person
column_default_sort = (Person.last_name, False)
column_sortable_list = ('person.last_name',)
def _list_thumbnail(view, context, model, name):
if not model.path:
return ''
return Markup('<img width=100 src="%s">' % model.small)
column_formatters = {
'small': _list_thumbnail
}
file_path = op.join(op.dirname(__file__), 'static', 'upload', 'photos')
form_extra_fields = dict(
path=form.ImageUploadField(
base_path=file_path,
thumbnail_size=(200, 200, True),
namegen=lambda obj, fn: '{0}.{1}'.format(md5( (fn.filename+str(time.time())).encode('utf-8') ).hexdigest(), fn.filename.lower()[-3:]),
thumbgen=lambda fn: '{0}thumb.{1}'.format(fn[:-3], fn[-3:]),
url_relative_path='upload/photos/',
label=u'Фотография'
)
)
def __init__(self, model, session, **kwargs):
super(PhotoModelView, self).__init__(model, session, **kwargs)
def charge_admin(db, models, admin):
# https://flask-admin.readthedocs.org/en/latest/
# HOWTO : https://github.com/mrjoes/flask-admin/blob/master/examples/sqla/simple.py
admin.add_view(PersonModelView(models.Person, db.session, name='Люди', endpoint='people'))
admin.add_view(RelationModelView(models.Relation, db.session, name='Связи', endpoint='relations'))
admin.add_view(PhoneModelView(models.Phone, db.session, name='Телефоны', endpoint='phones'))
admin.add_view(SocialModelView(models.Social, db.session, name='Сети', endpoint='socials'))
admin.add_view(EmailModelView(models.Email, db.session, name='Email', endpoint='emails'))
admin.add_view(AddressModelView(models.Address, db.session, name='Адреса', endpoint='address'))
admin.add_view(StatusModelView(models.Status, db.session, name='Статусы', endpoint='statuses'))
admin.add_view(ActionModelView(models.Action, db.session, name='Активности', endpoint='actions'))
admin.add_view(ActionRoleModelView(models.ActionRole, db.session, name='Роли', endpoint='actionroles'))
admin.add_view(EventRoleModelView(models.Event, db.session, name='События', endpoint='events'))
admin.add_view(SkillModelView(models.Skill, db.session, name='Навыки', endpoint='skills'))
admin.add_view(UserModelView(models.User, db.session, name='Users', endpoint='users'))
admin.add_view(PeriodModelView(models.Period, db.session, name='Периоды', endpoint='periods'))
admin.add_view(PhotoModelView(models.Photo, db.session, name='Фото', endpoint='photo'))
|
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return '{}:{}'.format(self.name, self.age)
if __name__ == '__main__':
# Sort float list
float_list = list_float = [645.32, 37.40, 76.30, 5.40, -34.23, 1.11, -34.94, 23.37, 635.46, -876.22, 467.73, 62.26]
float_list.sort()
print(float_list)
print()
person_list = [Person("Hal", 20),
Person("Susann", 31),
Person("Dwight", 19),
Person("Kassandra", 21),
Person("Lawrence", 25),
Person("Cindy", 22),
Person("Cory", 27),
Person("Mac", 19),
Person("Romana", 27),
Person("Doretha", 32),
Person("Danna", 20),
Person("Zara", 23),
Person("Rosalyn", 26),
Person("Risa", 24),
Person("Benny", 28),
Person("Juan", 33),
Person("Natalie", 25)]
# Sort Persons by name
person_list.sort(key=lambda Person: Person.name)
print(person_list)
print()
# Sort Persons by age
person_list.sort(key=lambda Person: Person.age, reverse=True)
print(person_list)
|
# -*- coding: utf-8 -*-
import sys
if sys.version_info.major == 3:
from .config import Config
else:
from config import Config |
N = 12
temp = [0] * 12
maior = 0.0
menor = 0.0
# primeiro mes deve ser lido seprado para atribuir a maior e a menor temperatura do momemto
temp[0] = float(input("Digite a temperatura do mes de janeiro: "))
maior = temp[0]
menor = temp[0]
""" MAIORES E MENORES TEMPERATURAS """
cont = 1
while cont < N:
temp[cont] = float(input("Digite a temperatura do proximo mes: "))
if temp[cont] >= maior:
maior = temp[cont]
elif temp[cont] <= menor:
menor = temp[cont]
cont += 1
""" MESES DA MAIORES E MENORES TEMPERATURAS """
cont = 0
while cont < N:
if temp[cont] == maior:
print("meses das maiores temperaturas: ", cont + 1)
cont += 1
#-----------------#
cont = 0
while cont < N:
if temp[cont] == menor:
print("meses das menores temperaturas: ", cont + 1)
print(maior, menor)
|
import socket
import threading
PORT = 5000
ADDRESS_CLIENT = "10.90.37.15"
ADDRESS_MID = "10.90.37.16"
ADDRESS_SEVERNAME1 = "10.90.37.17"
ADDRESS_SEVERNAME2 = "10.90.37.19"
ADRESS_SERVER = "10.90.37.18"
class ServerName1():
def __init__(self):
self.retornaServer(ADDRESS_SEVERNAME1, PORT)
def retornaServer(self, client, port):
endereco = '10.90.37.18' + ' '
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
address = (client, port)
udp_socket.bind((address))
while True:
msg, cli = udp_socket.recvfrom(1024)
if msg == 'Soma':
udp_socket.sendto(endereco, cli)
elif msg == 'Subtracao':
udp_socket.sendto(endereco, cli)
elif msg == 'Multiplicacao':
udp_socket.sendto(endereco, cli)
udp_socket.close()
if __name__ == "__main__":
servername1 = ServerName1()
|
import configparser
import json
import numpy as np
import sys
from tqdm import tqdm
def load_glove_from_npy(glove_vec_path, glove_vocab_path):
vectors = np.load(glove_vec_path)
with open(glove_vocab_path, "r", encoding="utf8") as f:
vocab = [l.strip() for l in f.readlines()]
assert(len(vectors) == len(vocab))
glove_embeddings = {}
for i in range(0, len(vectors)):
glove_embeddings[vocab[i]] = vectors[i]
print("Read " + str(len(glove_embeddings)) + " glove vectors.")
return glove_embeddings
def weighted_average(avg, new, n):
# TODO: maybe a better name for this function?
return ((n - 1) / n) * avg + (new / n)
def max_pooling(old, new):
# TODO: maybe a better name for this function?
return np.maximum(old, new)
def write_embeddings_npy(embeddings, embeddings_cnt, npy_path, vocab_path):
words = []
vectors = []
for key, vec in embeddings.items():
words.append(key)
vectors.append(vec)
matrix = np.array(vectors, dtype="float32")
print(matrix.shape)
print("Writing embeddings matrix to " + npy_path, flush=True)
np.save(npy_path, matrix)
print("Finished writing embeddings matrix to " + npy_path, flush=True)
print("Writing vocab file to " + vocab_path, flush=True)
to_write = ["\t".join([w, str(embeddings_cnt[w])]) for w in words]
with open(vocab_path, "w", encoding="utf8") as f:
f.write("\n".join(to_write))
print("Finished writing vocab file to " + vocab_path, flush=True)
def create_embeddings_glove(pooling="max", dim=100):
print("Pooling: " + pooling)
config = configparser.ConfigParser()
config.read("paths.cfg")
with open(config["paths"]["triple_string_cpnet_json"], "r", encoding="utf8") as f:
triple_str_json = json.load(f)
print("Loaded " + str(len(triple_str_json)) + " triple strings.")
glove_embeddings = load_glove_from_npy(config["paths"]["glove_vec_npy"], config["paths"]["glove_vocab"])
print("Loaded glove.", flush=True)
concept_embeddings = {}
concept_embeddings_cnt = {}
rel_embeddings = {}
rel_embeddings_cnt = {}
for i in tqdm(range(len(triple_str_json))):
data = triple_str_json[i]
words = data["string"].strip().split(" ")
rel = data["rel"]
subj_start = data["subj_start"]
subj_end = data["subj_end"]
obj_start = data["obj_start"]
obj_end = data["obj_end"]
subj_words = words[subj_start:subj_end]
obj_words = words[obj_start:obj_end]
subj = " ".join(subj_words)
obj = " ".join(obj_words)
# counting the frequency (only used for the avg pooling)
if subj not in concept_embeddings:
concept_embeddings[subj] = np.zeros((dim,))
concept_embeddings_cnt[subj] = 0
concept_embeddings_cnt[subj] += 1
if obj not in concept_embeddings:
concept_embeddings[obj] = np.zeros((dim,))
concept_embeddings_cnt[obj] = 0
concept_embeddings_cnt[obj] += 1
if rel not in rel_embeddings:
rel_embeddings[rel] = np.zeros((dim,))
rel_embeddings_cnt[rel] = 0
rel_embeddings_cnt[rel] += 1
if pooling == "avg":
subj_encoding_sum = sum([glove_embeddings.get(word, np.zeros((dim,))) for word in subj_words])
obj_encoding_sum = sum([glove_embeddings.get(word, np.zeros((dim,))) for word in obj_words])
if rel in ["relatedto", "antonym"]:
# Symmetric relation.
rel_encoding_sum = sum([glove_embeddings.get(word, np.zeros((dim,))) for word in words]) - subj_encoding_sum - obj_encoding_sum
else:
# Asymmetrical relation.
rel_encoding_sum = obj_encoding_sum - subj_encoding_sum
subj_len = subj_end - subj_start
obj_len = obj_end - obj_start
subj_encoding = subj_encoding_sum / subj_len
obj_encoding = obj_encoding_sum / obj_len
rel_encoding = rel_encoding_sum / (len(words) - subj_len - obj_len)
concept_embeddings[subj] = subj_encoding
concept_embeddings[obj] = obj_encoding
rel_embeddings[rel] = weighted_average(rel_embeddings[rel], rel_encoding, rel_embeddings_cnt[rel])
elif pooling == "max":
subj_encoding = np.amax([glove_embeddings.get(word, np.zeros((dim,))) for word in subj_words], axis=0)
obj_encoding = np.amax([glove_embeddings.get(word, np.zeros((dim,))) for word in obj_words], axis=0)
mask_rel = []
for j in range(len(words)):
if subj_start <= j < subj_end or obj_start <= j < obj_end:
continue
mask_rel.append(j)
rel_vecs = [glove_embeddings.get(words[i], np.zeros((dim,))) for i in mask_rel]
rel_encoding = np.amax(rel_vecs, axis=0)
# here it is actually avg over max for relation
concept_embeddings[subj] = max_pooling(concept_embeddings[subj], subj_encoding)
concept_embeddings[obj] = max_pooling(concept_embeddings[obj], obj_encoding)
rel_embeddings[rel] = weighted_average(rel_embeddings[rel], rel_encoding, rel_embeddings_cnt[rel])
print(str(len(concept_embeddings)) + " concept embeddings")
print(str(len(rel_embeddings)) + " relation embeddings")
write_embeddings_npy(concept_embeddings, concept_embeddings_cnt,config["paths"]["concept_vec_npy_glove"] + "." + pooling,
config["paths"]["concept_vocab_glove"] + "." + pooling + ".txt")
write_embeddings_npy(rel_embeddings, rel_embeddings_cnt, config["paths"]["relation_vec_npy_glove"] + "." + pooling,
config["paths"]["relation_vocab_glove"] + "." + pooling + ".txt")
if __name__ == "__main__":
create_embeddings_glove()
|
import sys
input = sys.stdin.readline
def main():
N = int( input())
t = 1
if N%2 == 0:
t = 2
AB = [ tuple( map( lambda x: int(x)*t, input().split())) for _ in range(N)]
A = [ab[0] for ab in AB]
B = [ab[1] for ab in AB]
A.sort()
B.sort()
if t == 1:
m = A[N//2]
M = B[N//2]
else:
m = (A[N//2-1]+A[N//2])//2
M = (B[N//2-1]+B[N//2])//2
print(M-m+1)
if __name__ == '__main__':
main()
|
import json
# import 3rd party data analysis package 'Pandas'
import pandas
# we use pyplot in particular here from matplotlib for creating
# our charts, by convention pyplot is named plt when imported.
# matplotlib is a 3rd party graphics package.
import matplotlib.pyplot as plt
tweets_data_path = 'tweet_mining.json'
# create a results list that will store tweets read in from
# the JSON file.
results = []
# Read in the contents of tweet_mining.json line by line and
# store it in results.
tweets_file = open(tweets_data_path, "r")
for tweet_line in tweets_file:
try:
status = json.loads(tweet_line)
results.append(status)
except:
continue
# Test the code to see if the results have been populated.
print len(results)
# create a dataframe. A DataFrame is a 2-dimensional labeled data
# structure with columns of potentially different types. You can
# think of it like a spreadsheet or SQL table, or a dict of
# Series objects. It is generally the most commonly used pandas object.
statuses = pandas.DataFrame()
# Create a column in our status’ DataFrame for text, lang,
# and country and populate the DataFrame with these values for each
# tweet read from the file.
# store the text values
statuses['text'] = map(lambda status: status['text'], results)
# store the language values
statuses['lang'] = map(lambda status: status['lang'], results)
# sometimes there may not be a 'place' listed in the tweet, so set to 'N/A' if not present.
statuses['country'] = map(lambda status: status['place']['country'] if status['place'] else "N/A", results)
# get each tweet language and the count of its appearance
# (not to be confused with programming languages)
# You get an ordered Pandas series. A series is a one-dimensional labeled array
# capable of holding any data type (integers, strings, floating point numbers,
# Python objects, etc.). It is ordered by default. In this case,
# the series holds the languages as labels and their count values
# as entries. This series is assigned to tweets_by_lang.
tweets_by_lang = statuses['lang'].value_counts()
# get each tweet country of origin and the count of its appearance
tweets_by_country = statuses['country'].value_counts()
# create our drawing space/window (figure). This is really the drawing
# window that you create subplots (graph/chart) on.
fig = plt.figure()
# add a plot area for our data on the figure - 1,1,1 means a single chart/graph
# create a subplot for the figure which is used to create your chart
# You can display more than one subplot on a figure, but in this case
# you will display a single bar chart
ax1 = fig.add_subplot(2,1,1)
# plot a second chart
ax2 = fig.add_subplot(2,1,2)
# style the axes and labels of our first plot
ax1.tick_params(axis='x', labelsize=15)
ax1.tick_params(axis='y', labelsize=10)
ax1.set_xlabel('Tweet Languages', fontsize=15)
ax1.set_xlabel('Number of tweets', fontsize=15)
ax1.xaxis.label.set_color('#666666')
ax1.yaxis.label.set_color('#666666')
ax1.tick_params(axis='x', colors='#666666')
ax1.tick_params(axis='y', colors='#666666')
# style the title
ax1.set_title('Top 10 languages', fontsize=15, color='#666666')
# plot the top 10 tweet languages and appearance count using a bar chart
tweets_by_lang[:10].plot(ax=ax1, kind='bar', color='#FF7A00')
# color the spines (borders) of your chart for some subtle UX.
for spine in ax1.spines.values():
spine.set_edgecolor('#666666')
# style the axes and labels of our first plot
ax2.tick_params(axis='x', labelsize=15)
ax2.tick_params(axis='y', labelsize=10)
ax2.set_xlabel('Tweet Languages', fontsize=15)
ax2.set_xlabel('Number of tweets', fontsize=15)
ax2.xaxis.label.set_color('#666666')
ax2.yaxis.label.set_color('#666666')
ax2.tick_params(axis='x', colors='#666666')
ax2.tick_params(axis='y', colors='#666666')
# style the title
ax2.set_title('Top 10 Countries', fontsize=15, color='#666666')
# plot the top 10 tweet coutries and appearance count using a bar chart
tweets_by_country[:10].plot(ax=ax2, kind='bar', color='#FF7A00')
# color the spines (borders) of your chart for some subtle UX.
for spine in ax2.spines.values():
spine.set_edgecolor('#666666')
# render the two graphs at once
plt.show() |
import turtle
def draw(curList,distance,run,t):
MAX_RUN = 8.
if run <= MAX_RUN:
BlueShade = 1-float(run)/MAX_RUN
RedShade = float(run)/MAX_RUN
GreenShade = float(run)/MAX_RUN
RunColor=(RedShade,GreenShade,BlueShade)
t.color(RunColor)
t.pensize(1+4*(MAX_RUN-run))
print("run: ",run)
#print(" curList: ",curList)
nextList = []
for coord in curList:
t.penup()
t.goto(coord)
t.pendown()
t.setheading(60)
t.forward(distance/2)
nextList.append(t.pos())
t.forward(distance/2)
t.setheading(-60)
t.forward(distance)
t.setheading(180)
t.forward(distance/2)
nextList.append(t.pos())
t.forward(distance/2)
for coord in nextList:
curList.append(coord)
#print("nextList: ",nextList)
run = run + 1
distance = distance / 2
draw(curList,distance,run,t)
def in_a_land_far_away(SCREEN_SIZE):
window = turtle.Screen()
window.bgcolor("white")
return window
def a_turtle_is_born(distance):
t = turtle.Turtle()
t.speed(1000)
t.color("green")
t.penup()
t.goto(-distance,-distance)
return t
def __init__():
SCREEN_SIZE = 700
init_run = 1
SHAPE_CORNER = 400
init_distance = SHAPE_CORNER * 2
w = in_a_land_far_away(SCREEN_SIZE)
print(w.screensize())
t = a_turtle_is_born(SHAPE_CORNER)
curList = []
print("initial run currList: ", curList)
curList.append(t.pos())
draw(curList,init_distance,init_run,t)
w.exitonclick()
exit()
__init__() |
#!/usr/bin/env python
# encoding: utf-8
from simulator.modules.sim import *
class TestEventHandler(EventHandler):
def __init__(self, simulation_engine):
super().__init__(simulation_engine)
self.start_callback_received = False
self.stop_callback_received = False
self.events = []
def handle_start(self):
self.start_callback_received = True
def handle_stop(self):
self.stop_callback_received = True
def handle_event(self, event):
self.events.append(event)
class SimulationEngineTests(unittest.TestCase):
def setUp(self):
self.se = SimulationEngine()
self.test_eh = TestEventHandler(self.se)
self.se.event_handler = self.test_eh
def test_raises_no_event_handler_exception(self):
with self.assertRaisesRegexp(Exception, 'No EventHandler attached!'):
SimulationEngine().start()
def test_notify_start(self):
self.se.stop(1)
self.se.start()
self.assertTrue(self.se.event_handler.start_callback_received)
def test_notify_stop(self):
self.se.stop(1)
self.se.start()
self.assertTrue(self.se.event_handler.stop_callback_received)
def test_notify_event(self):
self.se.stop(2)
self.se.schedule(Event("Dummy", 1))
self.se.start()
self.assertEqual(self.se.event_handler.events[0].identifier, "Dummy")
self.assertEqual(self.se.event_handler.events[0].time, 1)
self.assertEqual(self.se.event_handler.events[1].identifier, "End")
self.assertEqual(self.se.event_handler.events[1].time, 2)
class EventTests(unittest.TestCase):
def setUp(self):
self.e1 = Event("Arrival", 10)
self.e2 = Event("Arrival", 10, special="Special")
def test_properties(self):
self.assertEqual(self.e1.identifier, "Arrival")
self.assertEqual(self.e1.time, 10)
self.assertEqual(self.e2.identifier, "Arrival")
self.assertEqual(self.e2.time, 10)
self.assertEqual(self.e2.kwargs.get('special', None), "Special")
if __name__ == '__main__':
unittest.main()
|
import unittest
import json
import sys
sys.path.append('../Chapter02')
_404 = "The requested URL was not found on the server. " \
" If you entered the URL manually please check your" \
"spelling and try again."
class TestApp(unittest.TestCase):
def setUp(self):
from flask_error import app as _app
# To connect with app make a instance of FlaskClient
self.app = _app.test_client()
def test_raise(self):
# When we call /api flask_error will make an error then return 500 error in json format
hello = self.app.get('/api')
if(sys.version_info > (3, 0)):
body = json.loads(str(hello.data, 'utf8'))
else:
body = json.loads(str(hello.data).encode("utf8"))
self.assertEqual(body['code'], 500)
def test_proper_404(self):
# Call endpoint which does not exist on purpose
hello = self.app.get('/asdfjhadkjf')
# The status code should be 404 since the endpoint doesn't exist
self.assertEqual(hello.status_code, 404)
# Also, the description of the error should be return in Json format.
if(sys.version_info > (3,0)):
body = json.loads(str(hello.data, 'utf8'))
else:
body = json.loads(str(hello.data).encode("utf8"))
self.assertEqual(body['code'], 404)
self.assertEqual(body['message'], '404 Not Found')
self.assertEqual(body['description'], _404)
if __name__ == '__main__':
unittest.main()
|
import pytest
import time
import zmq
import cv2
import random
from threading import Thread
from back_machine.collector_node import collector
from back_machine.input_node import producer
from back_machine.ostu_node import consumer as otsu_consumer
from front_machine.contours_node import consumer as contours_consumer
from front_machine.output_node import result_collector
# fixtures with module scope to handle global variables and destroyed with the last test in the module
@pytest.fixture(scope = 'module')
def context():
context = zmq.Context()
return context
@pytest.fixture(scope = 'module')
def in_port():
return "tcp://127.0.0.1:5500"
@pytest.fixture(scope = 'module')
def out_port():
return "tcp://127.0.0.1:5501"
@pytest.fixture(scope = 'module')
def num_nodes():
return random.randint(2, 20)
# fixtures with function scope that get destroyed with the teardown of the function
# fixtures that request multi-fixtures
# Modularity: using fixtures from a fixture function
class BindSockets:
def __init__(self, context, in_port, out_port):
self.context = context
self.in_port = in_port
self.out_port = out_port
self.init_in_socket = self.init_in_socket_bind()
self.init_out_socket = self.init_out_socket_bind()
def init_in_socket_bind(self):
zmq_socket = self.context.socket(zmq.PUSH)
zmq_socket.bind(self.in_port)
return zmq_socket
def init_out_socket_bind(self):
zmq_socket = self.context.socket(zmq.PULL)
zmq_socket.bind(self.out_port)
return zmq_socket
class ConnectSockets:
def __init__(self, context, in_port, out_port):
self.context = context
self.in_port = in_port
self.out_port = out_port
self.init_in_socket = self.init_in_socket_connect()
self.init_out_socket = self.init_out_socket_connect()
def init_in_socket_connect(self):
zmq_socket = self.context.socket(zmq.PUSH)
zmq_socket.connect(self.in_port)
return zmq_socket
def init_out_socket_connect(self):
zmq_socket = self.context.socket(zmq.PULL)
zmq_socket.connect(self.out_port)
return zmq_socket
@pytest.fixture(scope = 'function')
def init_sockets(request, context, in_port, out_port):
socket_type = request.node.get_closest_marker("type").args[0]
if socket_type == 'connect':
sockets = ConnectSockets(context, in_port, out_port)
elif socket_type == 'bind':
sockets = BindSockets(context, in_port, out_port)
yield sockets
# release the sockets before running other tests
sockets.init_in_socket.close()
sockets.init_out_socket.close()
class TestReqMultiFixtures:
# autouse fixture to be called with each test
@pytest.fixture(autouse=True)
def _init_sockets(self, init_sockets):
self.in_socket = init_sockets.init_in_socket
self.out_socket = init_sockets.init_out_socket
@pytest.mark.type('connect')
# test that requests multi-fixtures
def test_input_terminate(self, out_port, num_nodes):
"""
Test the termination of input node
TEST TYPE : connect
"""
# input node that should send termination after finishing sending the video frames to otsu nodes
input_thread = Thread(target = producer, args = (out_port, './back_machine/inputs/1.mp4', num_nodes))
input_thread.start()
# count number of frames in the video
cap = cv2.VideoCapture('./back_machine/inputs/1.mp4')
frames_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# receiving frames from input node
for i in range(frames_count):
# make sure that input node is alive
assert input_thread.is_alive(), "input node thread died before sending all frames, test failed!"
frame_msg = self.out_socket.recv_pyobj()
# receiving num_nodes empty termination msgs from input node
for i in range(num_nodes):
termination_msg = self.out_socket.recv_pyobj()
# make sure that termination msgs are all empty
assert len(termination_msg['frame']) == 0, "termination msg is not empty!"
# wait till the input node dies
time.sleep(0.01)
# make sure that input node dies after sending termination msgs
assert not input_thread.is_alive(), "input node thread is still alive after s terminations, test failed!"
input_thread.join()
@pytest.mark.type('connect')
# test that requests multi-fixtures
def test_collector_terminate(self, in_port, out_port, num_nodes):
"""
Test the termination of collector node
TEST TYPE : connect
"""
# collector that needs num_nodes empty received msgs to terminate
collector_thread = Thread(target = collector, args = (in_port, out_port, num_nodes))
collector_thread.start()
empty_message = { 'binary' : [] }
# sending num_nodes empty msgs to collector
for i in range(num_nodes):
# make sure that collector is alive
assert collector_thread.is_alive(), "collector node thread died before num_nodes receives, test failed!"
self.in_socket.send_pyobj(empty_message)
# receiving num_nodes empty termination msgs from the collector
for i in range(num_nodes):
termination_msg = self.out_socket.recv_pyobj()
# make sure that termination msgs are all empty
assert len(termination_msg['binary']) == 0, "termination msg is not empty!"
# wait till the collector dies
time.sleep(0.01)
# make sure that collector dies after sending termination msgs
assert not collector_thread.is_alive(), "collector node thread is still alive after num_nodes terminations, test failed!"
collector_thread.join()
@pytest.mark.type('bind')
# test that requests multi-fixtures
def test_otsu_terminate(self, in_port, out_port, num_nodes):
"""
Test the termination of otsu node
TEST TYPE : bind
"""
# otsu_consumer that needs num_nodes empty received msgs to terminate
consumer_thread = Thread(target = otsu_consumer, args = (in_port, out_port, num_nodes))
consumer_thread.start()
empty_message = { 'frame' : [] }
# sending num_nodes empty msgs to otsu_consumer
for i in range(num_nodes):
# make sure that otsu_consumer is alive
assert consumer_thread.is_alive(), "otsu_consumer node thread died before num_nodes receives, test failed!"
self.in_socket.send_pyobj(empty_message)
# receiving 1 empty termination msg from the otsu_consumer
termination_msg = self.out_socket.recv_pyobj()
# make sure that termination msg is empty
assert len(termination_msg['binary']) == 0, "termination msg is not empty!"
# wait till the otsu_consumer dies
time.sleep(0.01)
# make sure that otsu_consumer dies after sending termination msg
assert not consumer_thread.is_alive(), "otsu_consumer node thread is still alive after the termination msg, test failed!"
consumer_thread.join()
@pytest.mark.type('bind')
# test that requests multi-fixtures
def test_contours_terminate(self, in_port, out_port, num_nodes):
"""
Test the termination of contours node
TEST TYPE : bind
"""
# contours_consumer that needs num_nodes empty received msgs to terminate
consumer_thread = Thread(target = contours_consumer, args = (in_port, out_port, num_nodes))
consumer_thread.start()
empty_message = { 'binary' : [] }
# sending num_nodes empty msgs to contours_consumer
for i in range(num_nodes):
# make sure that contours_consumer is alive
assert consumer_thread.is_alive(), "contours_consumer node thread died before num_nodes receives, test failed!"
self.in_socket.send_pyobj(empty_message)
# receiving 1 empty termination msg from the contours_consumer
termination_msg = self.out_socket.recv_pyobj()
# make sure that termination msg is empty
assert len(termination_msg['contours']) == 0, "termination msg is not empty!"
# wait till the contours_consumer dies
time.sleep(0.01)
# make sure that contours_consumer dies after sending termination msg
assert not consumer_thread.is_alive(), "contours_consumer node thread is still alive after the termination msg, test failed!"
consumer_thread.join()
@pytest.mark.type('connect')
# test that requests multi-fixtures
def test_output_terminate(self, in_port, num_nodes):
"""
Test the termination of output node
TEST TYPE : connect
"""
# output node that should send termination after finishing sending the video frames to otsu nodes
output_thread = Thread(target = result_collector, args = (in_port, './', num_nodes))
output_thread.start()
empty_message = { 'contours' : [] }
# sending num_nodes empty termination msgs to output node
for i in range(num_nodes):
# make sure that output node is alive
assert output_thread.is_alive(), "output node thread died before receiving all terminations, test failed!"
self.in_socket.send_pyobj(empty_message)
# wait till the output node dies
time.sleep(1)
# make sure that output node dies after receiving termination msgs
assert not output_thread.is_alive(), "output node thread is still alive after receiving all terminations, test failed!"
output_thread.join()
|
import logging
import click
import torch
from sonosco.common.constants import SONOSCO
from sonosco.common.utils import setup_logging
from sonosco.common.path_utils import parse_yaml
from sonosco.models import TDSSeq2Seq
from sonosco.decoders import GreedyDecoder
from sonosco.datasets.processor import AudioDataProcessor
from sonosco.common.global_settings import CUDA_ENABLED
from sonosco.serialization import Deserializer
LOGGER = logging.getLogger(SONOSCO)
@click.command()
@click.option("-c", "--config_path", default="../sonosco/config/infer.yaml", type=click.STRING,
help="Path to infer configuration file.")
@click.option("-a", "--audio_path", default="audio.wav", type=click.STRING, help="Path to an audio file.")
@click.option("-p", "--plot", default=False, is_flag=True, help="Show plots.")
def main(config_path, audio_path, plot):
config = parse_yaml(config_path)["infer"]
device = torch.device("cuda" if CUDA_ENABLED else "cpu")
loader = Deserializer()
model = loader.deserialize(TDSSeq2Seq, config["model_checkpoint_path"])
model.to(device)
model.eval()
decoder = GreedyDecoder(model.decoder.labels)
processor = AudioDataProcessor(**config)
spect, lens = processor.parse_audio_for_inference(audio_path)
spect = spect.to(device)
# Watch out lens is modified after this call!
# It is now equal to the number of encoded states
with torch.no_grad():
out, output_lens, attention = model(spect, lens)
decoded_output, decoded_offsets = decoder.decode(out, output_lens)
LOGGER.info(decoded_output)
if plot:
import matplotlib.pyplot as plt
plt.matshow(attention[0].numpy())
plt.show()
if __name__ == "__main__":
setup_logging(LOGGER)
main()
|
import socket
import os
import sys
def recibir(nombre):
s.send(nombre.encode())
t = s.recv(1024).decode()
tam = int(t)
#print(tam)
if(tam != 0):
f = open(nombre,'wb')
while (tam > 0):
l = s.recv(1024)
f.write(l)
tam -= sys.getsizeof(l)
#print(tam)
f.close()
print("Archivo '"+nombre+"' recibido")
else:
print("El archivo "+nombre+" no existe")
def enviar(nombre):
try:
#print(nombre)
s.send(nombre.encode())
f = open(nombre,'rb')
stats = os.stat(nombre)
tam = stats.st_size
#print(tam)
s.send(str(tam).encode())
if(tam == 0):
print("Archivo Vacio")
else:
l = f.read(1024)
while (l):
s.send(l)
l = f.read(1024)
print("Enviado")
f.close()
except IOError:
print("El archivo "+nombre+" no existe")
s.send(str(0).encode())
HOST = 'localhost'
PORT = 1025
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((HOST,PORT))
while True:
print("1. Lista de Archivos de Servidor")
print("2. Descargar Archivo")
print("3. Subir Archivo")
print("4. Salir")
op = int(input("Opcion: "))
s.send(str(op).encode())
if(op == 1):
lista = s.recv(1024).decode()
print("\n|LISTA DE ARCHIVOS|")
print(lista+"\n")
if(op == 2):
nombre = input("Nombre: ")
recibir(nombre)
if(op == 3):
nombre = input("Nombre: ")
enviar(nombre)
if(op == 4):
break;
s.close() |
# Use pip install MySQL-python to install mysql dependencies
# -*- coding: utf-8 -*-
import MySQLdb as MySql
import sys
class MySQLInterface():
def __init__(self, host, port, user, password):
self.host = host
self.port = port
self.user = user
self.password = password
self.database = ""
self.table = dict()
def connect(self):
try:
self.connection = MySql.connect(host = self.host, user = self.user, passwd = self.password, port = self.port)
self.connection.query("SELECT VERSION()")
result = self.connection.use_result()
return True
except MySql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
def disconnect(self):
if self.connection:
self.connection.close()
def createDatabase(self, databaseName):
sql = "CREATE DATABASE IF NOT EXISTS "+databaseName+";"
cursor = self.connection.cursor()
cursor.execute(sql)
self.connection.commit()
self.database = databaseName
sql = "USE "+self.database+"; "
cursor = self.connection.cursor()
cursor.execute(sql)
self.connection.commit()
'''
@table a dictionary with table names and columns information
'''
def createTable(self, table):
if self.connection:
sql = self.generateSQLTableCreation(table)
cursor = self.connection.cursor()
cursor.execute(sql)
self.connection.commit()
def insertData(self, data):
if self.connection:
sql = self.generateSQLDataInsertion(data)
return True
return False
def generateSQLTableCreation(self, table):
# save table information for validation of types later on when insert data
self.table[table["name"]] = dict()
# +++++++++++++++++++++++
foreignkeys = list()
tableName = table["name"]
sql = "CREATE TABLE "+tableName+" (id INT AUTO_INCREMENT PRIMARY KEY, "
# Columns value is a list of dictionaries
for column in table["columns"]:
columnName = column["name"]
columnType = self.convertType(column["type"])
self.table[tableName][columnName] = column["type"]
sql = sql + " " + columnName+ " "+ columnType+ ","
if column["type"]=="foreignkey":
foreignkeys.append({ "name": column["name"], "reference": column["reference"] })
sql = sql[:len(sql)-1]
if len(foreignkeys)==0:
sql = sql + ");"
else:
sql = sql + ","
for key in foreignkeys:
sql = sql + " FOREIGN KEY ("+key["name"]+") REFERENCES "+key["reference"]+" (id),"
if len(foreignkeys)>0:
sql = sql[:len(sql)-1] + ");"
return sql
def generateSQLDataInsertion(self, data):
# print "Inserindo Dados"
if self.connection:
# print data
sql = ""
for table in data:
for row in data[table]:
sql = "INSERT INTO "+table+" ("
# copy columns name
for name in data[table][row]:
sql = sql + name["column"] + " ,"
sql = sql[:len(sql)-1] + ") values (" # remove the last comma
# copy columns values
for value in data[table][row]:
sql = sql + self.getTypeConverted(table, value["column"], value["data"]) + " ,"
sql = sql[:len(sql)-1] + ") ;"
cursor = self.connection.cursor()
cursor.execute(sql)
self.connection.commit()
return sql
def getTypeConverted(self, table, column, value):
if self.table:
real_type = self.table[table][column]
if real_type=="string":
return "'"+value+"'"
else:
return value
def convertType(self, tpe):
types = {
"string" : "VARCHAR(255)",
"integer": "INT",
"float": "FLOAT",
"foreignkey": "INT"
}
return types[tpe]
|
#!/usr/bin/env python
"""
pip install urllib3
pip install bs4
"""
from coordinator import CoOrdinator
coordinator = CoOrdinator()
coordinator.farm()
|
import board
import digitalio
import time
import microcontroller
from DFPlayer import DFPlayer
dfplayer = DFPlayer()
builtin_led = digitalio.DigitalInOut(board.D13)
builtin_led.direction = digitalio.Direction.OUTPUT
led = digitalio.DigitalInOut(board.D2)
led.direction = digitalio.Direction.OUTPUT
pir = digitalio.DigitalInOut(board.D0)
pir.direction = digitalio.Direction.INPUT
dfplayer.stop()
dfplayer.set_volume(20)
reset_counter = 0
print('\n******\nStart!\n******\n')
while True:
builtin_led.value = True
time.sleep(0.5)
builtin_led.value = False
time.sleep(0.5)
reset_counter = reset_counter + 1
if reset_counter > 100:
print('reset: count due')
microcontroller.reset()
pir_value = pir.value
# 513 - busy, 512 - idle
if dfplayer.get_status() == 512:
led.value = False
if dfplayer.get_status() == None:
print('reset: dfplayer no response')
microcontroller.reset()
if pir_value:
if dfplayer.get_status() == 512:
dfplayer.random()
dfplayer.stop()
dfplayer.play()
led.value = True
reset_counter = 0
|
__version__ = "0.1.25"
|
from ovpn import OpenVpn
IP = "178.128.112.7"
PORT = 5555
s = OpenVpn(IP,PORT)
connect = s.connect()
print(connect)
clients = s.get_clients()
print(clients)
|
# This code is based off code included in pywcsgrid2
import numpy as np
from matplotlib.transforms import Transform
from .base import CurvedTransform
class WcsWorld2PixelTransform(CurvedTransform):
"""
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, wcs):
CurvedTransform.__init__(self)
self.wcs = wcs
def transform(self, world):
xw, yw = world[:, 0], world[:, 1]
xp, yp = self.wcs.wcs_world2pix(xw, yw, 1)
xp, yp = xp - 1, yp - 1
pixel = np.concatenate((xp[:, np.newaxis], yp[:, np.newaxis]), 1)
return pixel
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return WcsPixel2WorldTransform(self.wcs)
inverted.__doc__ = Transform.inverted.__doc__
class WcsPixel2WorldTransform(CurvedTransform):
"""
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, wcs):
CurvedTransform.__init__(self)
self.wcs = wcs
def transform(self, pixel):
xp, yp = pixel[:, 0], pixel[:, 1]
xp, yp = xp + 1, yp + 1
xw, yw = self.wcs.wcs_pix2world(xp, yp, 1)
world = np.concatenate((xw[:, np.newaxis], yw[:, np.newaxis]), 1)
return world
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return WcsWorld2PixelTransform(self.wcs)
inverted.__doc__ = Transform.inverted.__doc__
|
class Solution:
def minTimeToVisitAllPoints(self, points: List[List[int]]) -> int:
cp = np = []
steps = 0
for i,point in enumerate(points):
cp = point
if i+1<len(points):
np = points[i+1]
vtc = [np[0]-cp[0], np[1]-cp[1]]
steps += max(abs(vtc[0]),abs(vtc[1]))
return steps
|
import json
lines = file('decode.asl').read().split('\n')
insns = []
def parseCase(level, oknowns, fields):
case = lines.pop(0)
assert case.startswith('\t' * level + 'case ')
case = tuple(elem.strip() for elem in case.split('(', 1)[1].split(')', 1)[0].split(','))
case = () if len(case) == 1 and case[0] == '' else case
while lines and lines[0].startswith('\t' * (level + 1)):
line = lines.pop(0).strip()
assert line.startswith('when (')
cond = tuple(elem.strip() for elem in line.split('(', 1)[1].split(')', 1)[0].split(','))
cond = () if len(cond) == 1 and cond[0] == '' else cond
caused = line.split('=>', 1)[1].strip()
knowns = oknowns[::]
for where, what in zip(case, cond):
if what != '_':
isNot = what[0] == '!'
if isNot: what = what[1:]
assert what[0] == "'" and what[-1] == "'"
what = what[1:-1]
if isNot:
what = 'x' * len(what)
knowns.append((fields[where] if where in fields else where, what))
if caused:
if caused == '__UNALLOCATED' or caused == '__UNPREDICTABLE':
continue
assert caused.startswith('__encoding')
insn = caused.split(' ', 1)[1].strip()
assert ' ' not in insn
insns.append((insn, knowns, fields))
else:
parseInWhen(level + 2, knowns, fields)
def parseInWhen(level, knowns, fields):
knowns = knowns[::]
fields = fields.copy()
while lines and lines[0].startswith('\t' * level):
line = lines[0].strip()
if line.startswith('__field'):
_, name, value = lines.pop(0).split(' ', 2)
fields[name] = value
elif line.startswith('case'):
parseCase(level, knowns, fields)
else:
print 'Unknown line in when body:', `line`
assert False
def parsePlace(place):
assert ' +: ' in place
bottom, size = place.split('+:', 1)
bottom, size = int(bottom), int(size)
assert 0 < (bottom + size) <= 32
return bottom, size
parseCase(0, [], {})
encodings = {}
for insn, knowns, fields in insns:
encoding = 'x' * 32
for place, value in knowns:
bottom, size = parsePlace(place)
start = 32 - (bottom + size)
assert size == len(value)
encoding = encoding[:start] + value + encoding[start + size:]
assert len(encoding) == 32
fieldMap = {}
for i, (name, place) in enumerate(fields.items()):
bottom, size = parsePlace(place)
start = 32 - (bottom + size)
c = chr(ord('a') + i)
encoding = encoding[:start] + ''.join(c if before == 'x' else before for before in encoding[start:start + size]) + encoding[start + size:]
if c in encoding:
fieldMap[c] = name
assert len(encoding) == 32
if insn not in encodings:
encodings[insn] = {}
encodings[insn][encoding] = fieldMap
json.dump(encodings, file('encodings.json', 'w'), indent=2)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import signal
import time
from subprocess import Popen
chinadns = ['src/chinadns', '-l', 'iplist.txt', '-c', 'chnroute.txt',
'-p', '15353', '-v']
p1 = Popen(chinadns, shell=False, bufsize=0, close_fds=True)
with open(sys.argv[-1]) as f:
dig_cmd = f.read()
time.sleep(1)
p2 = Popen(dig_cmd.split() + ['-p', '15353'], shell=False,
bufsize=0, close_fds=True)
if p2 is not None:
r = p2.wait()
if r == 0:
print 'test passed'
for p in [p1]:
try:
os.kill(p.pid, signal.SIGTERM)
os.waitpid(p.pid, 0)
except OSError:
pass
sys.exit(r)
|
import gzip
import httpx
from app.core import settings
class SecurityTrails:
def __init__(self):
self.base_url = "https://api.securitytrails.com/v1"
async def download_new_domain_feed(self, *, date: str | None = None):
url = self._url_for("/feeds/domains/registered")
headers = {
"accept-encoding": "application/gzip",
"apikey": str(settings.SECURITY_TRAILS_API_KEY),
}
params = {}
if date is not None:
params["date"] = date
async with httpx.AsyncClient() as client:
res = await client.get(url, headers=headers, params=params)
res.raise_for_status()
text = gzip.decompress(res.content).decode()
lines = text.splitlines()
return [line.strip() for line in lines]
def _url_for(self, path: str):
return self.base_url + path
|
class Test:
def __init__(self):
self.cards = []
self.index = 0
def createList(self):
while self.index < 5:
self.cards.append("Hello")
self.index+=1
self.index = 0
def printList(self):
while self.index < 5:
print(self.cards[self.index])
self.index+=1
if __name__ == '__main__':
var = Test()
var.createList()
var.printList() |
from flask import (Flask,
render_template, url_for, request,
flash, redirect, session, abort)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'sghowh2hg82gh09g20gh2hgohg90whewkjdhoiwjf092'#Произвольные символы. Чем тяжелее, тем лучше
"""
! для активации виртуальной среды - source ./venv/bin/source
"""
"""
* Изучение Flask-RESTful API приложение
"""
"""
! В templates - хранятся шаблоны.
! В static - все статические зависимости, вроде JS&CSS
! render_template - возвращает шаблон
* url_for - возвращает контекст запроса. ЮРЛ текущего подключения
! url_for('/profile', [параметры для юрл])
! Искусственное создание контекста
*with app.test_request_context():
* print(url_for('about'))
* @app.route('/<name>') - <name> - динамический маршрут
? <path:name> - path: означает, что весь путь, что будет
? записан в строке, будет отображаться
!path: - любые символы
!int: - только числа
!float - только числа с плавающей точкой
! Статические файлы
? url_for('static', filename = "css/styles.css")
? templates
? static
? |
? |____css
? |
? |__styles.css
"""
menu = [
{"title": "Авторизация", "url" : "login"},
{"title" : "Установка", "url" : "install-flask"},
{"title" : "Первое приложение", "url" : "first-app"},
{"title" : "Обратная связь", "url" : "contact"}
]
@app.route('/')
def index():
return render_template(
"index.html",
menu=menu)
@app.route('/about/')
@app.route('/about/<name>')
def about(name="Про Flask"):
return render_template(
'about.html', name=name,
title="Flask",
menu=menu
)
"""
* @app.route('/path....', methods=['GET', 'POST'..])
* def name_function():
* if request.method == 'POST':
* print(request.form) # выведет на экран
* словарь формы с ее аргументами
! Данный шаблон илюстрирует, как нужно создавать маршруты для
! Функций, где может быть get или post запросы
"""
"""
! Функция abort - возвращает код с ошибкой и прерывает сессию
! Пример: abort(401) - странице будет возвращена ошибка с кодом 401
! redirect - перенаправление на страницу
"""
@app.route('/contact', methods=["POST", "GET"])
def contact():
if request.method == 'POST':
if len(request.form['username']) > 2:
flash("Сообщение оптравлено", category='success')
else:
flash("Ошибка отправки", category='error')
return render_template("contact.html", title="Обратная связь", menu=menu)
@app.route('/login', methods=['POST', 'GET'])
def login():
if 'userLogged' in session:
return redirect(url_for('profile', username=session['userLogged']))
elif request.method == 'POST' and request.form['username'] == 'selfedu' and request.form['psw'] == '123':
session['userLogged'] = request.form['username']
return redirect(url_for('profile', username=session['userLogged']))
return render_template('login.html', title='Авторизация', menu=menu)
@app.route('/profile/<username>')
def profile(username):
if 'userLogged' not in session or session['userLogged'] != username:
abort(401)
return f'Профиль пользователя: {username}'
@app.errorhandler(401)
def page401(error):
return f'Ошибка с кодом {error}'
@app.errorhandler(404)
def pageNotFound(error):
return render_template('page404.html', title='Страница не найдена', menu=menu)
if __name__ == "__main__":
app.run(debug=True) |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 21:50:16 2017
@author: Gavrilov
"""
def mult(base,exp):
"""
base == int or float, exp == int
"""
result=1
while exp>0:
result=result*base
exp=exp-1
return result
print(mult(float(input("Enter int or float: ")), int(input("Enter int: "))))
def recurPower(base, exp):
'''
base: int or float.
exp: int >= 0
returns: int or float, base^exp
'''
if exp==1:
return base
elif exp==0:
return 1.0000
else:
return base*recurPower(base, exp-1)
print(recurPower(float(input("Enter int or float: ")), int(input("Enter int: "))))
|
import pygame
from model.Board import Board
from model.Item import Item
from model.ScreenType import ScreenType
from model.SquareType import SquareType
from model.AIPlayer import AIPlayer
class BoardView:
def __init__(self, boardGame):
self.SCREEN_WIDTH = 600
self.SCREEN_HEIGHT = 600
self.boardScreen = pygame.display.set_mode([self.SCREEN_WIDTH, self.SCREEN_HEIGHT])
self.LINE_COLOR = (0, 0, 0)
self.boardColor = (0, 250, 0)
self.endScreenColor = (0, 0, 0, 200)
self.counter = 0
self.running = False
self.boardGame = boardGame
def drawGrid(self):
blockSize = 60 # Set the size of the grid block
pygame.draw.rect(self.boardScreen, self.boardColor, pygame.Rect(60, 60, 480, 480), 0)
for x in range(1, 9):
for y in range(1, 9):
rect = pygame.Rect(x * blockSize, y * blockSize,
blockSize, blockSize)
pygame.draw.rect(self.boardScreen, self.LINE_COLOR, rect, 1)
def drawBoard(self, board):
for row in board:
for item in row:
self.drawCircle(item, None)
def drawCircle(self, item, turn):
pos = ((item.row + 1) * 60 + 30, (item.col + 1) * 60 + 30)
planetRadius = 20
if item.val == SquareType.WHITE:
planetColor = (255, 255, 255)
elif item.val == SquareType.BLACK:
planetColor = (0, 0, 0)
elif item.val == SquareType.VALID:
transparency = self.counter
if turn == SquareType.WHITE:
planetColor = (255, 255, 255, transparency + 30)
else:
planetColor = (0, 0, 0, transparency + 20)
surface = pygame.Surface((self.SCREEN_WIDTH, self.SCREEN_HEIGHT), pygame.SRCALPHA)
pygame.draw.circle(surface, planetColor, pos, planetRadius)
self.boardScreen.blit(surface, (0, 0))
return
else:
return
pygame.draw.circle(self.boardScreen, planetColor, pos, planetRadius)
def move(self, board, event):
if event.pos[1] % 60 == 0 or event.pos[0] % 60 == 0:
return
row = event.pos[0] // 60
col = event.pos[1] // 60
board.move(row - 1, col - 1)
def showValidMoves(self, board):
for square in board.squares:
item = Item(square.row, square.col, SquareType.VALID)
self.drawCircle(item, board.turn)
def showScores(self, color):
if color == 'b':
score = self.boardGame.blackCount
text = 'Black score : '
pos = (440, 10)
else:
score = self.boardGame.whiteCount
text = 'White score : '
pos = (5, 10)
largeFont = pygame.font.SysFont('comicsans', 30)
scoreText = largeFont.render(text + str(score), 1, (0, 0, 0))
self.boardScreen.blit(scoreText, pos)
def showEndScreen(self):
largeFont = pygame.font.SysFont('comicsans', 50)
# main rect for end screen
surface = pygame.Surface((self.SCREEN_WIDTH, self.SCREEN_HEIGHT), pygame.SRCALPHA)
rect = (40, 40, 520, 520)
pygame.draw.rect(surface, self.endScreenColor, rect, 0)
self.boardScreen.blit(surface, (0, 0))
# button to restart
# TODO
surface2 = pygame.Surface((self.SCREEN_WIDTH, self.SCREEN_HEIGHT), pygame.SRCALPHA)
rect2 = (390, 420, 145, 55)
pygame.draw.rect(surface2, (250, 0, 0, 150), rect2, 0)
self.boardScreen.blit(surface2, (0, 0))
buttonPos = (400, 430)
endText = largeFont.render('Restart', 1, (255, 255, 255))
self.boardScreen.blit(endText, buttonPos)
# main text for end screen
pos = (200, 100)
endText = largeFont.render('Game Ended', 1, (250, 0, 0))
self.boardScreen.blit(endText, pos)
# score related texts
winnerPos = (140, 250)
if self.boardGame.whiteCount > self.boardGame.blackCount:
winnerText = largeFont.render(f"White Player Won!!", 1, (255, 255, 255))
elif self.boardGame.whiteCount < self.boardGame.blackCount:
winnerText = largeFont.render(f"Black Player Won!!", 1, (255, 255, 255))
else:
winnerPos = (270, 250)
winnerText = largeFont.render(f"Tie!!", 1, (255, 255, 255))
self.boardScreen.blit(winnerText, winnerPos)
pos2 = (100, 390)
endScoreText = largeFont.render(f"White Score is {self.boardGame.whiteCount}", 1, (255, 255, 255))
self.boardScreen.blit(endScoreText, pos2)
pos3 = (100, 450)
endScoreText2 = largeFont.render(f"Black Score is {self.boardGame.blackCount}", 1, (255, 255, 255))
self.boardScreen.blit(endScoreText2, pos3)
def restart(self, event):
if 390 <= event.pos[0] <= 535 and 420 <= event.pos[1] <= 475:
board = Board()
self.boardGame = board
def run(self):
pygame.init()
self.running = True
speed = 1
while self.running:
self.boardScreen.fill((255, 255, 255))
self.showScores('b')
self.showScores('w')
self.drawGrid()
self.drawBoard(self.boardGame.board)
self.showValidMoves(self.boardGame)
if self.boardGame.isEnded:
self.showEndScreen()
if self.counter == 100:
speed = -1
elif self.counter == 0:
speed = 1
self.counter += speed
if self.boardGame.turn == SquareType.BLACK:
player = AIPlayer(self.boardGame, SquareType.BLACK)
item = player.getNextMove()
if not self.boardGame.isEnded:
self.boardGame.move(item[0], item[1])
if self.boardGame.turn == SquareType.WHITE:
player = AIPlayer(self.boardGame, SquareType.WHITE)
item = player.getNextMove()
if not self.boardGame.isEnded:
self.boardGame.move(item[0], item[1])
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
# elif event.type == pygame.MOUSEBUTTONDOWN and not self.boardGame.isEnded:
# self.move(self.boardGame, event)
elif event.type == pygame.MOUSEBUTTONDOWN:
self.restart(event)
pygame.display.flip()
pygame.quit()
|
from OgameBot import OgameBot
from GalaxySearcher import GalaxySearcher
from Requirements import getRequirementsTech
from planetState import PlanetState
from fleet import Fleet
from decisionMaking import find_steps
from decisionMaking import convert_steps_to_orders
testing = OgameBot()
testing.launchBrowser()
testing.login('michael93509@gmail.com','oOunv72Pg744nd2d45zo','uriel')
taki_jak_chce_osiagnac = PlanetState()
testing.getInfoAll()
taki_jak_chce_osiagnac.set('MetalMine',12)
taki_jak_chce_osiagnac.set('CrystalMine',10)
taki_jak_chce_osiagnac.set('DeuterExtractor',7)
taki_jak_chce_osiagnac.set('SolarPowerPlant',12)
taki_jak_chce_osiagnac.set('RobotFactory',4)
testing.changePlanet(2)
"""
for i in testing.mainPlanetState._attributes:
print("planetState1.set(" + "'"+i+"'" + ", " + str(testing.mainPlanetState.get(i)) + ")")
zmienna = find_steps(testing.mainPlanetState,taki_jak_chce_osiagnac)
orders = convert_steps_to_orders(testing.mainPlanetState, zmienna)
"""
"""login: mich password: oOunv72Pg744nd2d45zo universe: Wezn"""
|
import numpy as np
import random
class Player:
def __init__(self, strategy):
self.strategy = strategy
def choose(self, init_choice: int, host_choice: int, doors: np.array) -> int:
if self.strategy == 'donkey':
return init_choice
elif self.strategy == 'switcher':
options = [n for n in range(len(doors)) if n != host_choice and n != init_choice]
return options[random.randint(0, len(options) - 1)]
class Host:
def __init__(self, strategy):
self.strategy = strategy
def choose(self, player_choice: int, doors: np.array) -> int:
options = []
if self.strategy == 'random':
options = [n for n in range(len(doors)) if n != player_choice]
elif self.strategy == 'knows-it-all':
options = [n for n in range(len(doors)) if n != player_choice and doors[n] == 0]
return options[random.randint(0, len(options) - 1)]
def play_game(player: Player, host: Host, n_doors: int, k_rewards: int) -> int:
doors = np.zeros(n_doors)
lucky_doors = random.sample(range(n_doors), k_rewards)
for door in lucky_doors:
doors[door] = 1
init_choice = random.randint(0, n_doors - 1)
host_choice = host.choose(init_choice, doors)
player_choice = player.choose(init_choice, host_choice, doors)
return doors[player_choice]
if __name__ == '__main__':
i_GAMES = 100000
n_DOORS = 3
k_REWARDS = 1
players = [Player(strategy='donkey'), Player(strategy='switcher')]
hosts = [Host(strategy='knows-it-all'), Host(strategy='random')]
for player in players:
for host in hosts:
successes = 0
for _ in range(i_GAMES):
successes += play_game(player, host, n_DOORS, k_REWARDS)
print(f'player={player.strategy:<10} host={host.strategy:<13} wins={100 * successes/i_GAMES:.2f}%')
|
from ED6ScenarioHelper import *
def main():
# 空贼要塞
CreateScenaFile(
FileName = 'C1303 ._SN',
MapName = 'Bose',
Location = 'C1303.x',
MapIndex = 52,
MapDefaultBGM = "ed60031",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'乔丝特', # 9
'吉尔', # 10
'多伦', # 11
'罐子', # 12
'\u3000T', # 13
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 52,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH00310 ._CH', # 00
'ED6_DT07/CH00300 ._CH', # 01
'ED6_DT07/CH00290 ._CH', # 02
'ED6_DT07/CH02130 ._CH', # 03
'ED6_DT07/CH02120 ._CH', # 04
'ED6_DT07/CH02110 ._CH', # 05
'ED6_DT07/CH00292 ._CH', # 06
'ED6_DT07/CH00100 ._CH', # 07
'ED6_DT07/CH00101 ._CH', # 08
'ED6_DT07/CH00110 ._CH', # 09
'ED6_DT07/CH00111 ._CH', # 0A
'ED6_DT07/CH00130 ._CH', # 0B
'ED6_DT07/CH00131 ._CH', # 0C
'ED6_DT07/CH00120 ._CH', # 0D
'ED6_DT07/CH00121 ._CH', # 0E
'ED6_DT07/CH00314 ._CH', # 0F
'ED6_DT07/CH00304 ._CH', # 10
'ED6_DT07/CH00294 ._CH', # 11
'ED6_DT07/CH00311 ._CH', # 12
'ED6_DT07/CH00301 ._CH', # 13
'ED6_DT07/CH00291 ._CH', # 14
'ED6_DT07/CH00305 ._CH', # 15
'ED6_DT06/CH20065 ._CH', # 16
'ED6_DT06/CH20066 ._CH', # 17
'ED6_DT06/CH20067 ._CH', # 18
)
AddCharChipPat(
'ED6_DT07/CH00310P._CP', # 00
'ED6_DT07/CH00300P._CP', # 01
'ED6_DT07/CH00290P._CP', # 02
'ED6_DT07/CH02130P._CP', # 03
'ED6_DT07/CH02120P._CP', # 04
'ED6_DT07/CH02110P._CP', # 05
'ED6_DT07/CH00292P._CP', # 06
'ED6_DT07/CH00100P._CP', # 07
'ED6_DT07/CH00101P._CP', # 08
'ED6_DT07/CH00110P._CP', # 09
'ED6_DT07/CH00111P._CP', # 0A
'ED6_DT07/CH00130P._CP', # 0B
'ED6_DT07/CH00131P._CP', # 0C
'ED6_DT07/CH00120P._CP', # 0D
'ED6_DT07/CH00121P._CP', # 0E
'ED6_DT07/CH00314P._CP', # 0F
'ED6_DT07/CH00304P._CP', # 10
'ED6_DT07/CH00294P._CP', # 11
'ED6_DT07/CH00311P._CP', # 12
'ED6_DT07/CH00301P._CP', # 13
'ED6_DT07/CH00291P._CP', # 14
'ED6_DT07/CH00305P._CP', # 15
'ED6_DT06/CH20065P._CP', # 16
'ED6_DT06/CH20066P._CP', # 17
'ED6_DT06/CH20067P._CP', # 18
)
DeclNpc(
X = -36460,
Z = 0,
Y = -82960,
Direction = 90,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -35810,
Z = 0,
Y = -83940,
Direction = 45,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -34100,
Z = 0,
Y = -82100,
Direction = 180,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = -34310,
Z = 1000,
Y = -83180,
Direction = 135,
Unknown2 = 0,
Unknown3 = 24,
ChipIndex = 0x18,
NpcIndex = 0x1C0,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 1000,
Direction = 0,
Unknown2 = 0,
Unknown3 = 24,
ChipIndex = 0x18,
NpcIndex = 0x1C1,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclEvent(
X = -22008,
Y = -3000,
Z = -168710,
Range = -26065,
Unknown_10 = 0x7D0,
Unknown_14 = 0xFFFD625F,
Unknown_18 = 0x0,
Unknown_1C = 8,
)
DeclActor(
TriggerX = -36040,
TriggerZ = 0,
TriggerY = -121030,
TriggerRange = 800,
ActorX = -36040,
ActorZ = 1000,
ActorY = -121030,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 3,
Unknown_22 = 0,
)
DeclActor(
TriggerX = -75460,
TriggerZ = 0,
TriggerY = -119560,
TriggerRange = 1000,
ActorX = -75450,
ActorZ = 1500,
ActorY = -118890,
Flags = 0x7C,
TalkScenaIndex = 0,
TalkFunctionIndex = 9,
Unknown_22 = 0,
)
ScpFunction(
"Function_0_27A", # 00, 0
"Function_1_27B", # 01, 1
"Function_2_2C5", # 02, 2
"Function_3_2DB", # 03, 3
"Function_4_280D", # 04, 4
"Function_5_2866", # 05, 5
"Function_6_28AC", # 06, 6
"Function_7_28D7", # 07, 7
"Function_8_28FD", # 08, 8
"Function_9_2A79", # 09, 9
)
def Function_0_27A(): pass
label("Function_0_27A")
Return()
# Function_0_27A end
def Function_1_27B(): pass
label("Function_1_27B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x70, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_28D")
OP_6F(0x2, 0)
Jump("loc_294")
label("loc_28D")
OP_6F(0x2, 60)
label("loc_294")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x6B, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2AB")
OP_6F(0x0, 0)
OP_72(0x0, 0x10)
Jump("loc_2AF")
label("loc_2AB")
OP_64(0x0, 0x1)
label("loc_2AF")
Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0x29), scpexpr(EXPR_PUSH_LONG, 0x389), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2C4")
OP_4F(0x1, (scpexpr(EXPR_PUSH_LONG, 0x57), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
label("loc_2C4")
Return()
# Function_1_27B end
def Function_2_2C5(): pass
label("Function_2_2C5")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_2DA")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_2C5")
label("loc_2DA")
Return()
# Function_2_2C5 end
def Function_3_2DB(): pass
label("Function_3_2DB")
EventBegin(0x0)
FadeToDark(300, 0, 100)
SetMessageWindowPos(72, 320, 56, 3)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"有熟悉的声音传出来。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
ChrTalk(
0x101,
"#002F这里是……\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#012F嗯……\x01",
"这里应该就是首领的房间了。\x02",
)
)
CloseMessageWindow()
FadeToDark(300, 0, 100)
RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
10,
0,
(
"【看准机会冲进去】\x01", # 0
"【还是算了】\x01", # 1
)
)
MenuEnd(0x0)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
OP_56(0x0)
FadeToBright(300, 0)
Switch(
(scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_END)),
(1, "loc_3EE"),
(0, "loc_3F3"),
(SWITCH_DEFAULT, "loc_280C"),
)
label("loc_3EE")
EventEnd(0x1)
Jump("loc_280C")
label("loc_3F3")
OP_20(0x5DC)
Fade(1000)
OP_6D(-34780, 0, -82570, 0)
OP_67(0, 8000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(262, 0)
SetChrPos(0xB, -34290, 500, -83750, 45)
SetMapFlags(0x400000)
SetChrFlags(0xB, 0x4)
SetChrFlags(0xA, 0x4)
SetChrFlags(0xB, 0x2)
SetChrSubChip(0xB, 0)
ClearChrFlags(0xB, 0x80)
ClearChrFlags(0x8, 0x80)
ClearChrFlags(0x9, 0x80)
ClearChrFlags(0xA, 0x80)
TurnDirection(0x8, 0xA, 0)
TurnDirection(0x9, 0xA, 0)
OP_8C(0xA, 225, 0)
OP_51(0xB, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xA, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x10), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_OR), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_0D()
OP_21()
OP_1D(0x57)
Sleep(500)
ChrTalk(
0xA,
(
"#193F哼哼哼……\x01",
"女王打算出赎金了吗?\x02\x03",
"这下总算和贫穷的生活说再见了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"#200F大哥,现在还不能大意。\x01",
"拿到赎金之后才能完全放心啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#211F嗯嗯,要不我们先\x01",
"计划一下怎么释放人质吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#193F释放人质?\x02\x03",
"喂喂,\x01",
"为啥我们非要干\x01",
"那么拖泥带水的事不可呀?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#213F哎……\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#193F米拉到手后\x01",
"把他们杀光不就了事了嘛。\x02\x03",
"没必要留活口。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#216F多、多伦大哥……?\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
"#201F你、你在开玩笑吧……\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#193F那些人质肯定\x01",
"记得我们的样子。\x02\x03",
"就算我们逃出了利贝尔,\x01",
"也难保日后没有后顾之忧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#214F但、但人质里面\x01",
"还有老人和小孩子啊。\x02\x03",
"你真的打算杀了他们吗!?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#193F哼!混了这么久,\x01",
"你们的思维还是这么单纯。\x02\x03",
"我们可不是在玩过家家!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#215F怎、怎么会……我……\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"#203F大哥……\x01",
"不好意思,我反对这样做。\x02\x03",
"要是真的这样做的话,\x01",
"空之女神也不会原谅我们的。\x02\x03",
"#200F而且……\x01",
"我也不想把染血的米拉带回故乡啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#193F……………………………………\x02\x03",
"吉尔,你小子……\x01",
"啥时候变得这么伟大了呀?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"#200F哎……\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
"#195F少给我说废话!\x02",
)
CloseMessageWindow()
OP_8C(0xA, 180, 400)
SetChrChipByIndex(0xA, 22)
SetChrSubChip(0xA, 0)
Sleep(150)
TurnDirection(0xA, 0x9, 0)
SetChrChipByIndex(0xB, 23)
def lambda_9DD():
label("loc_9DD")
OP_99(0xFE, 0x0, 0x7, 0x6A4)
OP_48()
Jump("loc_9DD")
QueueWorkItem2(0xB, 1, lambda_9DD)
SetChrFlags(0xB, 0x4)
SetChrFlags(0xB, 0x80)
OP_22(0x84, 0x0, 0x64)
SetChrSubChip(0xA, 1)
Sleep(150)
SetChrSubChip(0xA, 2)
ClearChrFlags(0xB, 0x80)
SetChrPos(0xB, -34270, 980, -81780, 225)
OP_96(0xB, 0xFFFF73C4, 0x190, 0xFFFEB902, 0x3E8, 0x1770)
def lambda_A3B():
OP_95(0xB, 0x0, 0xFFFFFC18, 0x0, 0x12C, 0xFA0)
ExitThread()
QueueWorkItem(0xB, 0, lambda_A3B)
def lambda_A59():
OP_9F(0xFE, 0xFF, 0xFF, 0xFF, 0x0, 0x1F4)
ExitThread()
QueueWorkItem(0xB, 1, lambda_A59)
OP_22(0x22B, 0x0, 0x64)
OP_22(0xF8, 0x0, 0x64)
OP_7C(0x0, 0x64, 0xBB8, 0x32)
OP_44(0x9, 0xFF)
OP_96(0x9, 0xFFFF704A, 0x0, 0xFFFEB3F8, 0x1F4, 0x1388)
SetChrChipByIndex(0x9, 16)
SetChrSubChip(0x9, 3)
SetChrChipByIndex(0xA, 5)
ChrTalk(
0x9,
"#205F啊啊!\x02",
)
CloseMessageWindow()
SetChrFlags(0xB, 0x80)
OP_62(0x8, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
TurnDirection(0x8, 0x9, 400)
ChrTalk(
0x8,
"#216F吉尔哥!\x02",
)
CloseMessageWindow()
SetChrFlags(0x8, 0x4)
OP_92(0x8, 0x9, 0x2BC, 0xBB8, 0x0)
ChrTalk(
0xA,
(
"#194F嘎哈哈,什么故乡呀!\x02\x03",
"好不容易才得到那么一大笔钱,\x01",
"难道你还打算浪费掉,\x01",
"去买回那种不值钱的土地吗!?\x02\x03",
"哈,我可是决定要去\x01",
"南方的度假别墅享受一番。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"#201F什么……可是……\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#193F要是米拉花完了,\x01",
"再干一票抢夺定期船的买卖不就搞定了。\x02\x03",
"这就是今后\x01",
"『卡普亚空贼团』要干的大事呀。\x02",
)
)
CloseMessageWindow()
Sleep(400)
ChrTalk(
0xA,
"#194F#5S嘎哇~哈哈哈!!\x02",
)
OP_7C(0x0, 0xC8, 0xBB8, 0x64)
CloseMessageWindow()
TurnDirection(0x8, 0xA, 400)
ChrTalk(
0x8,
(
"#215F多伦大哥……\x01",
"你真的要那样……?\x02\x03",
"#214F难道你真的要那样做吗!\x02",
)
)
CloseMessageWindow()
SetChrChipByIndex(0x101, 7)
SetChrFlags(0x101, 0x80)
SetChrPos(0x101, -35150, 0, -91730, 0)
OP_22(0x6, 0x0, 0x64)
Sleep(500)
ChrTalk(
0x101,
(
"#1P在这种时候突然插话\x01",
"真不好意思呢……\x02\x03",
"兄妹吵架可以放到以后吗?\x02",
)
)
CloseMessageWindow()
Sleep(100)
OP_62(0xA, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_62(0x8, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_62(0x9, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
def lambda_E03():
OP_6D(-34550, 0, -85900, 1500)
ExitThread()
QueueWorkItem(0xA, 2, lambda_E03)
SetChrChipByIndex(0x102, 9)
SetChrChipByIndex(0x104, 11)
SetChrChipByIndex(0x103, 13)
def lambda_E2A():
OP_8C(0xA, 180, 400)
ExitThread()
QueueWorkItem(0xA, 1, lambda_E2A)
def lambda_E38():
OP_8C(0x8, 180, 400)
ExitThread()
QueueWorkItem(0x8, 1, lambda_E38)
ClearChrFlags(0x101, 0x80)
def lambda_E4B():
OP_8E(0xFE, 0xFFFF768A, 0x0, 0xFFFEA9B2, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_E4B)
Sleep(200)
SetChrPos(0x103, -35150, 0, -91730, 0)
def lambda_E7C():
OP_8E(0xFE, 0xFFFF7B4E, 0x0, 0xFFFEA886, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x103, 1, lambda_E7C)
Sleep(200)
SetChrFlags(0x102, 0x4)
SetChrPos(0x102, -35150, 0, -91730, 0)
def lambda_EB2():
OP_8E(0xFE, 0xFFFF7112, 0x0, 0xFFFEA750, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x102, 1, lambda_EB2)
Sleep(200)
SetChrPos(0x104, -35150, 0, -91730, 0)
def lambda_EE3():
OP_8E(0xFE, 0xFFFF7644, 0x0, 0xFFFEA412, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x104, 1, lambda_EE3)
WaitChrThread(0x101, 0x1)
WaitChrThread(0x102, 0x1)
WaitChrThread(0x104, 0x1)
WaitChrThread(0x103, 0x1)
ClearChrFlags(0x102, 0x4)
ChrTalk(
0x8,
"#213F你、你们!?\x02",
)
CloseMessageWindow()
OP_96(0x9, 0xFFFF6D02, 0x0, 0xFFFEB434, 0x12C, 0xBB8)
SetChrChipByIndex(0x9, 4)
SetChrSubChip(0x9, 0)
def lambda_F50():
OP_8C(0x9, 180, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_F50)
ChrTalk(
0x9,
(
"#201F游击士!\x02\x03",
"怎、怎么会在这里……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x104,
(
"#035F呵呵……\x01",
"别说如此薄情的话嘛。\x02\x03",
"不正是你们用那艘飞艇\x01",
"将我们送过来的嘛。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"#201F混、混帐……\x01",
"你在开什么玩笑……\x02\x03",
"…………难道。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#006F你们不记得\x01",
"在琥珀之塔前面停过飞艇吗?\x02\x03",
"我们趁着空隙\x01",
"巧妙地藏到了船舱里。\x02\x03",
"#001F也就是偷渡啦㈱\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#214F很、很厉害嘛!\x01",
"你这个没大脑的女人。\x02",
)
)
CloseMessageWindow()
OP_62(0x101, 0x0, 2000, 0xC, 0xD, 0xFA, 0x2)
OP_22(0x31, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0x101,
(
"#509F谁、谁没大脑!\x02\x03",
"你这个傲慢的男人婆!\x02",
)
)
CloseMessageWindow()
OP_62(0x8, 0x0, 2000, 0xC, 0xD, 0xFA, 0x2)
OP_22(0x31, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0x8,
(
"#214F你、你说什么~!?\x02\x03",
"单纯女!暴力女!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#005F你、你竟敢这么说~!?\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#017F好了好了。\x01",
"求你们别再吵了。\x02\x03",
"#012F……人质已经被释放了,\x01",
"其他的空贼成员也都被打倒了。\x02\x03",
"现在只剩下你们了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#027F基于游击士协会的规定,\x01",
"现以协会的名义将你们逮捕归案。\x02\x03",
"劝你们放弃无谓的抵抗。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"#201F唔唔……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
"#212F混蛋……\x02",
)
CloseMessageWindow()
def lambda_13B9():
OP_6D(-34330, 0, -83800, 1000)
ExitThread()
QueueWorkItem(0xA, 1, lambda_13B9)
Sleep(1000)
ChrTalk(
0xA,
(
"#193F#5P吉尔,乔丝特……\x02\x03",
"这到底是怎么回事?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"#203F对、对不起,大哥……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
"#215F对不起……\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#193F#5P哼,算了。\x01",
"这次就先饶了你们。\x02\x03",
"只要把这些家伙\x01",
"通通杀光就没问题了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#005F你、你说什么!\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#194F#5P嘎哈哈,愚蠢的家伙!\x02\x03",
"就凭你们几个人\x01",
"也想逮捕我多伦·卡普亚吗?\x01",
"你们也想得太美了吧!!\x02",
)
)
CloseMessageWindow()
SetChrFlags(0xA, 0x4)
OP_96(0xA, 0xFFFF79C8, 0x3E8, 0xFFFEB9AC, 0x5DC, 0x1388)
OP_22(0x8E, 0x0, 0x64)
SetChrChipByIndex(0xA, 6)
OP_51(0xA, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_7C(0x0, 0xC8, 0xBB8, 0x64)
Sleep(500)
def lambda_1598():
OP_6D(-34220, 0, -85300, 1000)
ExitThread()
QueueWorkItem(0xA, 2, lambda_1598)
Sleep(1000)
def lambda_15B5():
OP_99(0xFE, 0x0, 0x7, 0x7D0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_15B5)
Sleep(200)
OP_22(0x1FA, 0x0, 0x64)
LoadEffect(0x2, "map\\\\mp019_00.eff")
SetChrPos(0xC, -35030, 0, -87040, 0)
PlayEffect(0x2, 0xFF, 0xA, 250, 1000, 250, 0, 0, 0, 1000, 1000, 1000, 0xC, 0, 0, 0, 0)
Sleep(1000)
def lambda_162E():
OP_96(0xFE, 0xFFFF72AC, 0x0, 0xFFFEA818, 0x1F4, 0x1770)
ExitThread()
QueueWorkItem(0x101, 1, lambda_162E)
def lambda_164C():
OP_96(0xFE, 0xFFFF7A5E, 0x0, 0xFFFEA7FA, 0x1F4, 0x1770)
ExitThread()
QueueWorkItem(0x103, 1, lambda_164C)
def lambda_166A():
OP_96(0xFE, 0xFFFF6E56, 0x0, 0xFFFEA386, 0x1F4, 0x1770)
ExitThread()
QueueWorkItem(0x102, 1, lambda_166A)
def lambda_1688():
OP_96(0xFE, 0xFFFF7C5C, 0x0, 0xFFFEA2DC, 0x1F4, 0x1770)
ExitThread()
QueueWorkItem(0x104, 1, lambda_1688)
Sleep(500)
LoadEffect(0x1, "map\\\\mp019_01.eff")
PlayEffect(0x1, 0xFF, 0xFF, -35030, 0, -87040, 0, 0, 0, 1000, 1000, 1000, 0xFF, 0, 0, 0, 0)
OP_6B(3100, 0)
OP_6B(3000, 80)
TurnDirection(0x103, 0xA, 0)
TurnDirection(0x101, 0xA, 0)
TurnDirection(0x102, 0x9, 0)
TurnDirection(0x104, 0xA, 0)
Sleep(600)
ChrTalk(
0x101,
"#504F呀啊啊!?\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#016F轻型导力炮……!\x02",
)
CloseMessageWindow()
OP_8C(0xA, 225, 0)
ChrTalk(
0xA,
(
"#195F#6P吉尔,乔丝特!\x01",
"快点给我上!\x02\x03",
"把这些家伙炸成炮灰!\x02",
)
)
CloseMessageWindow()
OP_8C(0xA, 180, 0)
Sleep(100)
SetChrChipByIndex(0x8, 18)
Sleep(100)
SetChrChipByIndex(0x9, 19)
Sleep(500)
SetChrChipByIndex(0xA, 20)
SetChrSubChip(0xA, 0)
def lambda_1804():
OP_96(0xFE, 0xFFFF768A, 0x0, 0xFFFEAC5A, 0x5DC, 0x1388)
ExitThread()
QueueWorkItem(0xA, 1, lambda_1804)
Sleep(100)
def lambda_1827():
OP_92(0xFE, 0x101, 0x3E8, 0x1770, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_1827)
Sleep(100)
def lambda_1841():
OP_92(0xFE, 0x101, 0x3E8, 0x1770, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_1841)
Sleep(200)
Battle(0x389, 0x0, 0x0, 0x0, 0xFF)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(1, "loc_186E"),
(SWITCH_DEFAULT, "loc_1871"),
)
label("loc_186E")
OP_B4(0x0)
Return()
label("loc_1871")
EventBegin(0x0)
OP_44(0x8, 0xFF)
OP_44(0x9, 0xFF)
OP_44(0xA, 0xFF)
SetChrChipByIndex(0x0, 65535)
SetChrChipByIndex(0x1, 65535)
SetChrChipByIndex(0x2, 65535)
SetChrChipByIndex(0x3, 65535)
SetChrChipByIndex(0x8, 3)
SetChrChipByIndex(0x9, 4)
SetChrChipByIndex(0xA, 17)
OP_51(0x8, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0x9, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xA, 0x8, (scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
SetChrPos(0x8, -36430, 0, -82700, 225)
SetChrPos(0x9, -38890, 0, -82380, 135)
SetChrPos(0xA, -37360, 0, -81500, 180)
SetChrPos(0x101, -38990, 0, -84930, 0)
SetChrPos(0x102, -38900, 0, -86070, 0)
SetChrPos(0x104, -37640, 0, -86250, 0)
SetChrPos(0x103, -37780, 0, -84870, 0)
OP_6D(-37600, 0, -82870, 0)
OP_67(0, 8000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(262, 0)
FadeToBright(1000, 0)
Sleep(1000)
ChrTalk(
0x9,
(
"#203F太、太强了……\x02\x03",
"这就是游击士的实力吗……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#215F可、可恶~……\x01",
"竟然输给了这个女人。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#502F哼哼,这是当然的啦㈱\x02",
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#027F好了,胜负已分。\x01",
"你们老老实实地投降吧。\x02\x03",
"#021F要是再敢抵抗的话,\x01",
"……后果你应该很明白的吧?\x02",
)
)
CloseMessageWindow()
FadeToDark(300, 0, 100)
SetMessageWindowPos(72, 320, 56, 3)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"雪拉扎德一边抽着鞭子一边向乔丝特微笑。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_62(0x8, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
Sleep(1000)
ChrTalk(
0x8,
(
"#216F呀……\x01",
"不要,饶了我吧!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"#203F呜呜……\x01",
"大势已去了吗……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
"………唔……………\x02",
)
CloseMessageWindow()
OP_6D(-37530, 0, -82040, 1000)
OP_99(0xA, 0x3, 0x0, 0x258)
ChrTalk(
0xA,
(
"#197F痛痛痛……\x01",
"到底怎么回事。\x02\x03",
"身体到处都在疼……\x02",
)
)
CloseMessageWindow()
Sleep(200)
Fade(250)
SetChrChipByIndex(0xA, 5)
OP_0D()
ChrTalk(
0xA,
(
"#192F我怎么……\x01",
"拿着这个导力炮啊?\x02\x03",
"…………咦?\x02",
)
)
CloseMessageWindow()
TurnDirection(0x9, 0xA, 400)
ChrTalk(
0x9,
"#201F大哥?\x02",
)
CloseMessageWindow()
TurnDirection(0x8, 0xA, 400)
ChrTalk(
0x8,
"#212F多伦大哥?\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#191F哦哦,乔丝特!\x01",
"从洛连特回来了吗?\x02\x03",
"这么快就回来了,\x01",
"果然还是失手了吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#213F哎……?\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#191F嘎哈哈,别想骗我了。\x02\x03",
"算了,只要你能接受教训就行了。\x01",
"以后这些要蛮力的差事还是交给我们吧。\x02\x03",
"虽然这样赚得少点,\x01",
"不过只要慢慢积累就行了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#216F多、多伦大哥,你在说什么呀?\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"#201F大哥,振作点!\x02\x03",
"乔丝特她老早\x01",
"就从洛连特回来了。\x02\x03",
"袭击了定期船后,\x01",
"我不是还去迎接过你吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#192F啥?\x01",
"袭击定期船?\x02\x03",
"老弟你在说什么梦话呀?\x02\x03",
"那么危险的事,\x01",
"只有白痴才会去做。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"#201F……………………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
"#213F……………………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
"#002F(这家伙在说什么呢?)\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#012F(嗯……\x01",
" 不像是故意开脱罪行的样子……)\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#190F刚才就注意到了,\x01",
"这些奇怪的家伙是谁啊?\x02\x03",
"难道是新来的?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#020F很遗憾不是。\x02\x03",
"我们是游击士协会的。\x02",
)
)
CloseMessageWindow()
OP_62(0xA, 0x0, 2300, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0xA,
"#192F#5S啥!?\x02",
)
OP_7C(0x0, 0xC8, 0xBB8, 0x64)
CloseMessageWindow()
ChrTalk(
0xA,
(
"#192F为、为啥\x01",
"游击士会在这里的啊!?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#007F这么说……\x01",
"他好像突然失忆了呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x104,
(
"#031F哈·哈·哈。\x01",
"看来剧情变得越来越有趣了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#022F就算你突然失忆了,\x01",
"我们也还是要将你逮捕归案。\x02\x03",
"抢夺定期船、监禁人质、\x01",
"要求赎金等案件都是既定的嫌疑。\x02",
)
)
CloseMessageWindow()
OP_62(0xA, 0x0, 2300, 0x14, 0x17, 0xFA, 0x1)
OP_22(0x31, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0xA,
(
"#192F抢夺定期船……\x01",
"监禁人质……要求赎金!?\x02\x03",
"吉尔!乔丝特!\x01",
"这、这开的是啥玩笑呀!?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#215F多伦大哥……\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"#204F我们也想知道是怎么回事啊……\x02\x03",
"#201F不过,多亏了大哥,\x01",
"……这下有机会了!\x02",
)
)
CloseMessageWindow()
SetChrChipByIndex(0x9, 21)
OP_8C(0x9, 180, 400)
Sleep(200)
OP_99(0x9, 0x4, 0x5, 0x320)
LoadEffect(0x0, "map\\\\mp004_00.eff")
SetChrPos(0xC, -38180, -3000, -85370, 0)
PlayEffect(0x0, 0xFF, 0x9, 250, 900, 330, 0, 0, 0, 1000, 1000, 1000, 0xC, 0, 0, 0, 0)
SetChrChipByIndex(0x9, 1)
Sleep(1300)
OP_22(0x7F, 0x0, 0x64)
FadeToDark(500, 16777215, -1)
OP_0D()
SetChrPos(0x9, -35240, 0, -89190, 0)
SetChrPos(0x8, -35240, 0, -89190, 0)
SetChrPos(0xA, -35240, 0, -89190, 0)
SetChrFlags(0x9, 0x80)
SetChrFlags(0x8, 0x80)
SetChrFlags(0xA, 0x80)
ChrTalk(
0x101,
"#004F#1P啊啊!\x02",
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#024F#1P糟了!\x01",
"又是这招……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
"#192F喂、喂……!\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
"#213F吉尔哥!\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"#201F有话以后再说吧!\x01",
"现在还是先逃为妙!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x104,
(
"#034F#5P咳咳……\x02\x03",
"呛、呛到烟了……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#016F#5P赶快离开这个房间!\x02",
)
CloseMessageWindow()
Sleep(100)
OP_6D(-35986, 0, -121600, 0)
OP_6F(0x0, 20)
Sleep(500)
FadeToBright(1000, 16777215)
OP_0D()
OP_43(0x101, 0x1, 0x0, 0x4)
OP_43(0x102, 0x1, 0x0, 0x5)
OP_43(0x103, 0x1, 0x0, 0x6)
WaitChrThread(0x101, 0x1)
ChrTalk(
0x101,
(
"#005F那些家伙~\x01",
"跑到哪里去了!?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#016F在上面……\x01",
"他们打算坐空贼飞艇逃走。\x02",
)
)
CloseMessageWindow()
OP_8C(0x101, 90, 400)
ChrTalk(
0x101,
"#580F啊……!\x02",
)
CloseMessageWindow()
ChrTalk(
0x103,
(
"#024F#5P现在追还来得及,\x01",
"绝对不能再让他们逃掉!\x02\x03",
"全力追击!\x02",
)
)
CloseMessageWindow()
TurnDirection(0x101, 0x103, 400)
ChrTalk(
0x101,
"#002F嗯!\x02",
)
CloseMessageWindow()
TurnDirection(0x102, 0x103, 400)
ChrTalk(
0x102,
"#012F明白了!\x02",
)
CloseMessageWindow()
OP_43(0x104, 0x1, 0x0, 0x7)
WaitChrThread(0x104, 0x1)
ChrTalk(
0x104,
(
"#034F咳咳……救、救命……\x02\x03",
"啊,真是悲剧!\x01",
"我精致完美的鼻腔啊……\x02",
)
)
CloseMessageWindow()
def lambda_2713():
TurnDirection(0xFE, 0x104, 400)
ExitThread()
QueueWorkItem(0x102, 1, lambda_2713)
def lambda_2721():
TurnDirection(0xFE, 0x104, 400)
ExitThread()
QueueWorkItem(0x103, 1, lambda_2721)
TurnDirection(0x101, 0x104, 400)
WaitChrThread(0x103, 0x1)
ChrTalk(
0x101,
(
"#005F喂,奥利维尔!\x01",
"再不快点就丢下你了!\x02",
)
)
CloseMessageWindow()
OP_62(0x104, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
ChrTalk(
0x104,
(
"#036F哇哇……\x01",
"等、等一下嘛!\x02",
)
)
CloseMessageWindow()
OP_4F(0x1, (scpexpr(EXPR_PUSH_LONG, 0x1F), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_20(0x5DC)
Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0xA), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_27E1")
OP_8E(0x104, 0xFFFF7360, 0x0, 0xFFFE22BC, 0xBB8, 0x0)
label("loc_27E1")
OP_A2(0x358)
OP_28(0x39, 0x1, 0x40)
OP_28(0x39, 0x1, 0x80)
ClearMapFlags(0x400000)
EventEnd(0x0)
OP_70(0x0, 0x0)
OP_71(0x0, 0x10)
OP_64(0x0, 0x1)
OP_21()
OP_1E()
Jump("loc_280C")
label("loc_280C")
Return()
# Function_3_2DB end
def Function_4_280D(): pass
label("Function_4_280D")
SetChrPos(0xFE, -36050, 0, -119700, 0)
OP_8E(0xFE, 0xFFFF7266, 0x0, 0xFFFE21F4, 0x1388, 0x0)
OP_8E(0xFE, 0xFFFF6EB0, 0x0, 0xFFFE1C72, 0x1388, 0x0)
OP_8C(0xFE, 90, 400)
Sleep(200)
OP_8C(0xFE, 265, 400)
Sleep(200)
OP_8C(0xFE, 180, 400)
Return()
# Function_4_280D end
def Function_5_2866(): pass
label("Function_5_2866")
Sleep(800)
SetChrPos(0xFE, -36050, 0, -119700, 0)
OP_8E(0xFE, 0xFFFF7266, 0x0, 0xFFFE21F4, 0x1388, 0x0)
OP_8E(0xFE, 0xFFFF7586, 0x0, 0xFFFE1DBC, 0x1388, 0x0)
OP_8C(0xFE, 90, 400)
Return()
# Function_5_2866 end
def Function_6_28AC(): pass
label("Function_6_28AC")
Sleep(1600)
SetChrPos(0xFE, -36050, 0, -119700, 0)
OP_8E(0xFE, 0xFFFF7266, 0x0, 0xFFFE21F4, 0x1388, 0x0)
Return()
# Function_6_28AC end
def Function_7_28D7(): pass
label("Function_7_28D7")
SetChrPos(0xFE, -36050, 0, -119700, 0)
OP_8E(0xFE, 0xFFFF7388, 0x0, 0xFFFE27A8, 0x7D0, 0x0)
Return()
# Function_7_28D7 end
def Function_8_28FD(): pass
label("Function_8_28FD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x6A, 7)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x6B, 5)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_2A78")
EventBegin(0x0)
FadeToDark(300, 0, 100)
SetMessageWindowPos(72, 320, 56, 3)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"道路的尽头是一面岩壁。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
ChrTalk(
0x101,
"#505F这里不能走了?\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#012F不……\x01",
"前面好像有什么。\x02\x03",
"试着推推看吧?\x02",
)
)
CloseMessageWindow()
FadeToDark(300, 0, 100)
RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0x18), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Menu(
0,
10,
10,
0,
(
"【推岩壁】\x01", # 0
"【不推】\x01", # 1
)
)
MenuEnd(0x0)
OP_4F(0x28, (scpexpr(EXPR_PUSH_LONG, 0xFFFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_5F(0x0)
OP_56(0x0)
FadeToBright(300, 0)
Switch(
(scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_END)),
(0, "loc_2A4B"),
(1, "loc_2A5A"),
(SWITCH_DEFAULT, "loc_2A78"),
)
label("loc_2A4B")
OP_A2(0x3FB)
NewScene("ED6_DT01/C1401 ._SN", 100, 0, 0)
IdleLoop()
Jump("loc_2A78")
label("loc_2A5A")
OP_90(0x0, 0x0, 0x0, 0x5DC, 0xBB8, 0x0)
Sleep(50)
EventEnd(0x4)
Jump("loc_2A78")
label("loc_2A78")
Return()
# Function_8_28FD end
def Function_9_2A79(): pass
label("Function_9_2A79")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x70, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2B74")
OP_22(0x2B, 0x0, 0x64)
OP_70(0x2, 0x3C)
Sleep(500)
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x1FC, 1)"), scpexpr(EXPR_END)), "loc_2AF3")
FadeToDark(300, 0, 100)
OP_22(0x11, 0x0, 0x64)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"复苏药\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_A2(0x384)
Jump("loc_2B71")
label("loc_2AF3")
FadeToDark(300, 0, 100)
SetChrName("")
AnonymousTalk(
(
"宝箱里装有\x07\x02",
"复苏药\x07\x00",
"。\x01",
"不过现有的数量太多,\x07\x02",
"复苏药\x07\x00",
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_22(0x2C, 0x0, 0x64)
OP_6F(0x2, 60)
OP_70(0x2, 0x0)
label("loc_2B71")
Jump("loc_2BF6")
label("loc_2B74")
FadeToDark(300, 0, 100)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么东西都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
OP_83(0xF, 0xF)
label("loc_2BF6")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_9_2A79 end
SaveToFile()
Try(main)
|
"""empty message
Revision ID: bd2b63bd04bc
Revises:
Create Date: 2020-03-27 20:31:38.665362
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bd2b63bd04bc'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('company',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('password', sa.String(length=128), nullable=False),
sa.Column('is_enable', sa.Boolean(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=False),
sa.Column('website', sa.String(length=256), nullable=True),
sa.Column('address', sa.String(length=64), nullable=True),
sa.Column('logo', sa.String(length=128), nullable=True),
sa.Column('role', sa.SmallInteger(), nullable=True),
sa.Column('finance_stage', sa.String(length=16), nullable=True),
sa.Column('field', sa.String(length=16), nullable=True),
sa.Column('description', sa.String(length=256), nullable=True),
sa.Column('details', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_company_is_enable'), 'company', ['is_enable'], unique=False)
op.create_table('user',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('password', sa.String(length=128), nullable=False),
sa.Column('is_enable', sa.Boolean(), nullable=True),
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('name', sa.String(length=8), nullable=False),
sa.Column('resume', sa.String(length=128), nullable=True),
sa.Column('role', sa.SmallInteger(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_index(op.f('ix_user_is_enable'), 'user', ['is_enable'], unique=False)
op.create_table('job',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.Column('salary_min', sa.SmallInteger(), nullable=False),
sa.Column('salary_max', sa.SmallInteger(), nullable=False),
sa.Column('company_id', sa.Integer(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('treatment', sa.Text(), nullable=True),
sa.Column('exp', sa.String(length=16), nullable=True),
sa.Column('education', sa.String(length=16), nullable=True),
sa.Column('city', sa.String(length=8), nullable=True),
sa.Column('tags', sa.String(length=64), nullable=True),
sa.Column('is_enable', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['company_id'], ['company.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_job_city'), 'job', ['city'], unique=False)
op.create_index(op.f('ix_job_education'), 'job', ['education'], unique=False)
op.create_index(op.f('ix_job_exp'), 'job', ['exp'], unique=False)
op.create_index(op.f('ix_job_is_enable'), 'job', ['is_enable'], unique=False)
op.create_index(op.f('ix_job_salary_max'), 'job', ['salary_max'], unique=False)
op.create_index(op.f('ix_job_salary_min'), 'job', ['salary_min'], unique=False)
op.create_table('delivery',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.BigInteger(), nullable=True),
sa.Column('company_id', sa.Integer(), nullable=True),
sa.Column('resume', sa.String(length=128), nullable=True),
sa.Column('status', sa.SmallInteger(), nullable=True),
sa.Column('company_response', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['company_id'], ['company.id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['job_id'], ['job.id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_delivery_status'), 'delivery', ['status'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_delivery_status'), table_name='delivery')
op.drop_table('delivery')
op.drop_index(op.f('ix_job_salary_min'), table_name='job')
op.drop_index(op.f('ix_job_salary_max'), table_name='job')
op.drop_index(op.f('ix_job_is_enable'), table_name='job')
op.drop_index(op.f('ix_job_exp'), table_name='job')
op.drop_index(op.f('ix_job_education'), table_name='job')
op.drop_index(op.f('ix_job_city'), table_name='job')
op.drop_table('job')
op.drop_index(op.f('ix_user_is_enable'), table_name='user')
op.drop_table('user')
op.drop_index(op.f('ix_company_is_enable'), table_name='company')
op.drop_table('company')
# ### end Alembic commands ###
|
from __future__ import division
import os
import numpy as np
from scipy import misc as ms
import sys
import re
import cv2
###################################################
def write_kitti_png(path, flow, valid=None):
temp = np.ones((flow.shape[0], flow.shape[1], 3), dtype=np.float64)
temp[:, :, :2] = flow.astype(np.float64) * 64.0 + 2**15
if valid is not None:
temp[:, :, 2] = valid
temp = temp.astype('uint16')
write_PNG_u16(path, temp)
def write_PNG_u16(path, flow):
""" Does not check if input flow is multichannel. """
print(flow.shape)
ret = cv2.imwrite(path, flow[..., ::-1])
if not ret:
print('Flow not written')
def read_flow_flo(filename):
""" Read flo file and return flow array. """
f = open(filename, 'rb')
magic = np.fromfile(f, np.float32, count=1)
data2d = None
if magic != 202021.25:
print('Magic number incorrect. Invalid .flo file.')
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print("Reading %d x %d flo file" % (h, w))
# data2d = np.fromfile(f, np.float32, count=2 * w * h)
# data2d = np.resize(data2d, (h, w, 2))
# Numpy bullshit adendum
data2d = np.fromfile(f, np.float32, count=int(2 * w * h))
# reshape data into 3D array (columns, rows, channels)
data2d = np.resize(data2d, (int(h), int(w), 2))
f.close()
return data2d
def crop_center(img,cropx,cropy):
y,x,s = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
###################################################
filenames = os.listdir('flow_noc_test')
print('############ ground truth renaming.... ############')
filenames = np.sort(filenames)
print(filenames)
for i,name in enumerate (filenames):
print(i, name)
print ('%06d'%i)
tempImage = ms.imread('flow_noc_test/'+name)
tempImage = crop_center(tempImage, 1216, 320)
print(tempImage.shape)
ms.imsave('GT_test/'+('%06d'%i)+'.png', tempImage)
filenames = os.listdir('predicted')
print('############ predicted conversion.... ############')
filenames = np.sort(filenames)
print(filenames)
for i,name in enumerate (filenames):
print(i, name)
data2d = read_flow_flo('predicted/'+name)
print(data2d.shape)
name = name.split('.')[0]
name = name + '.png'
write_kitti_png('converted/'+name, data2d)
|
import sys
sys.path.append('../streamlit-recommendation')
import pytest
from helper import data_processing
from helper import lookup
from helper.recommendation import get_recomendation
import streamlit as st
from random import randint
def test_data():
# clear streamlit cache because load_data uses cache decorator
from streamlit import caching
caching.clear_cache()
df1, df2 = data_processing.load_data()
assert df1.shape[0] > 0
assert df1.shape[1] > 0
assert df1[df1['youtube_url'].isna()].shape[0] == 0
assert df2.shape[0] > 0
assert df2.shape[1] > 0
final_movie_df, final_rating_df = data_processing.load_data()
@pytest.mark.parametrize("df", [final_movie_df])
def test_random_selection(df):
for i in range(10):
data = df.sample(10)
link = data['youtube_url'].values[0]
assert data.shape[0] == 10
assert st.video(link)
@pytest.mark.parametrize("df, selected_genres", [(final_movie_df['genres'], 'Documentary'),
(final_movie_df['genres'], 'Drama')])
def test_genre_filtering(df, selected_genres):
results = lookup.isin_genres(df, set(selected_genres))
assert results.sum() > 0
@pytest.mark.parametrize("df, selected_years", [(final_movie_df, [randint(1902, 1999), randint(2000, 2018)]),
(final_movie_df, [randint(1902, 1999), randint(2000, 2018)])])
def test_year_filtering(df, selected_years):
df = df.loc[(df['year'] >= selected_years[0]) & (final_movie_df['year'] <= selected_years[1])]
assert df.shape[0] > 0
@pytest.mark.parametrize("df, exploration", [(final_movie_df, 0),
(final_movie_df, 2),
(final_movie_df, 5),
(final_movie_df, 8),
(final_movie_df, 10)])
def test_exploration(df, exploration):
data = df.sample(20)
data = get_recomendation(data, final_movie_df, final_rating_df, exploration)
link = data['youtube_url'].values[0]
assert st.video(link)
@pytest.mark.parametrize("df, year_filter, exploration",
[(final_movie_df, [randint(1902, 1999), randint(2000, 2018)], randint(0, 10)),
(final_movie_df, [randint(1902, 1999), randint(2000, 2018)], randint(0, 10)),
(final_movie_df, [randint(1902, 1999), randint(2000, 2018)], randint(0, 10)),
(final_movie_df, [randint(1902, 1999), randint(2000, 2018)], randint(0, 10)),
(final_movie_df, [randint(1902, 1999), randint(2000, 2018)], randint(0, 10))])
def test_filtering_exploration(df, year_filter, exploration):
df = df.loc[(df['year'] >= year_filter[0]) &
(final_movie_df['year'] <= year_filter[1])]
data = df.sample(20)
data = get_recomendation(data, final_movie_df, final_rating_df, exploration)
link = data['youtube_url'].values[0]
assert st.video(link) |
import tensorflow as tf
import random
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
tf.set_random_seed(777) # for reproducibility
from tensorflow.examples.tutorials.mnist import input_data
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
nb_classes = 10
# MNIST data image of shape 28 * 28 = 784
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
# weights & bias for nn layers
W1 = tf.Variable(tf.random_normal([784, 256]))
b1 = tf.Variable(tf.random_normal([256]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_normal([256, 256]))
b2 = tf.Variable(tf.random_normal([256]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.Variable(tf.random_normal([256, 10]))
b3 = tf.Variable(tf.random_normal([10]))
#hypothesis = (tf.matmul(L2, W3) + b3)
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=Y))
last_layer =tf.matmul(L2, W3) + b3
hypothesis = tf.nn.softmax(last_layer)
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis+1e-22), axis=1))
#logging for tensorboard
cost_hist = tf.summary.scalar("Cost",cost)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
# Test model
is_correct = tf.equal(tf.arg_max(hypothesis, 1), tf.arg_max(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
# parameters
training_epochs = 15
batch_size = 100
global_steps = 0
with tf.Session() as sess:
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter('./logs/relu')
writer.add_graph(sess.graph)
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
c, _ , _s,h = sess.run([cost, optimizer,summary,last_layer], feed_dict={
X: batch_xs, Y: batch_ys})
avg_cost += c / total_batch
writer.add_summary(_s,global_step=global_steps)
global_steps = global_steps+1
print('Epoch:', '%04d' % (epoch + 1),
'cost =', '{:.9f}'.format(avg_cost) , 'hypo =',h)
print("Learning finished")
# Test the model using test sets
print("Accuracy: ", accuracy.eval(session=sess, feed_dict={
X: mnist.test.images, Y: mnist.test.labels}))
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))
|
# MSU CSE 231 Fall 2009 Project 4
# author: Joseph Malandruccolo
# date: January 13, 2013
#
# Program Specs:
# Prompt the user for a year
# Report the U.S. population
# - actual if in the past
# - projected if in the future
#constants
US_POPULATION_2013 = 315163513
SECONDS_BETWEEN_BIRTHS = 8
SECONDS_BETWEEN_DEATHS = 12
SECONDS_BETWEEN_NET_IMMIGRANT = 40
SECONDS_PER_MINUTE = 60
MINUTES_PER_HOUR = 60
HOURS_PER_DAY = 24
DAYS_PER_YEAR = 365.242
#compute population change per year
birthsPerMin = float(SECONDS_PER_MINUTE) / SECONDS_BETWEEN_BIRTHS
deathsPerMin = float(SECONDS_PER_MINUTE) / SECONDS_BETWEEN_DEATHS
immigrationPerMin = float(SECONDS_PER_MINUTE) / SECONDS_BETWEEN_NET_IMMIGRANT
netPopulationChangePerMin = birthsPerMin + immigrationPerMin - deathsPerMin
netPopulationChangePerYear = netPopulationChangePerMin * MINUTES_PER_HOUR * HOURS_PER_DAY * DAYS_PER_YEAR
#prompt the user for a year
print "Welcome to the American Console Population Estimator!"
bValidInput = False
while bValidInput == False:
try:
years = int(raw_input("How many years in the future would you like to see data for?\n"))
if years > 0:
bValidInput = True
else:
print "Year must be a positive integer (e.g. 4)"
except:
print "Year must be a positive integer (e.g. 4)"
#print results
print "2013 US Population:\t" + str(US_POPULATION_2013)
print "Estimated growth over " + str(years) + "years:\t" + str(netPopulationChangePerYear * years)
print str(2013 + years) + " US Population (Est)\t" + str(US_POPULATION_2013 + netPopulationChangePerYear * years) |
#!/usr/bin/env python
"""
Usage: ./dual_plot.py <ctab1> <ctab2>
"""
import sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
coi = ['t_name', 'FPKM']
df893 = pd.read_csv( sys.argv[1], sep="\t" )[coi]
df915 = pd.read_csv( sys.argv[2], sep="\t" )[coi]
df_m = pd.merge( df893 , df915, on='t_name')
fit = np.polyfit(df_m['FPKM_x'], df_m['FPKM_y'], 1)
x = [x for x in range(0, 10000)]
y = [fit[0]*i + fit[1] for i in range(0,10000)]
plt.figure()
plt.scatter( df_m['FPKM_x'], df_m['FPKM_y'], alpha=0.4)
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlabel('SRR072893 FPKM')
plt.ylabel('SRR072915 FPKM')
plt.title('FPKM for transcripts in SRR072893 vs SRR072915')
plt.xlim(0, 10000)
plt.ylim(0, 10000)
plt.plot(x, y, color='g')
plt.savefig( sys.argv[3] + '.png')
plt.close()
|
import decimal
import math
from inspect import getdoc
from pytest import raises
from fuzzyfields import (FuzzyField, MissingFieldError, DuplicateError,
MalformedFieldError, FieldTypeError)
from . import requires_pandas
class FooBar(FuzzyField):
"""Stub field that tests that the input is 'foo' and returns 'bar'
"""
def validate(self, value):
# Note: we'll never receive None as that is blocked upstream
if not isinstance(value, str):
raise FieldTypeError(self.name, value, 'foo')
if value == 'foo':
return 'bar'
raise MalformedFieldError(self.name, value, 'foo')
@property
def sphinxdoc(self):
return "Must be 'foo'"
class Anything(FuzzyField):
"""Anything goes
"""
def validate(self, value):
return value
@property
def sphinxdoc(self):
return "Anything goes"
class C:
"""Some class
"""
x = FooBar(description='my first foo')
y = FooBar(required=False, unique=True)
z = FooBar(required=False, default='baz', unique=True)
class D(C):
w = Anything(unique=True)
X_HELP = """
Name
x
Type
FooBar
required
True
Unique
False
Description
Must be 'foo'
my first foo
""".strip()
Y_HELP = """
Name
y
Type
FooBar
required
False
Default
None
Unique
True
Description
Must be 'foo'
""".strip()
Z_HELP = """
Name
z
Type
FooBar
required
False
Default
baz
Unique
True
Description
Must be 'foo'
""".strip()
def test_doc():
assert getdoc(C.x) == X_HELP
assert getdoc(C.y) == Y_HELP
assert getdoc(C.z) == Z_HELP
def test_property():
C.y.seen_values.clear()
C.z.seen_values.clear()
c = C()
# Test uninitialised __get__
with raises(AttributeError) as e:
c.x
assert str(e.value) == "Uninitialised property: C.x"
# Test __get__ -> __set__ round-trip
# Also test string cleanup in preprocess()
c.x = ' foo '
assert c.x == 'bar'
assert c.__dict__['x'] == 'bar'
# Test __del__
del c.x
with raises(AttributeError) as e:
c.x
assert str(e.value) == "Uninitialised property: C.x"
c.x = 'foo'
assert c.x == 'bar'
# Test Exceptions
with raises(FieldTypeError) as e:
c.x = []
assert str(e.value) == (
"Field x: Invalid field type: expected foo, got '[]'")
with raises(MalformedFieldError) as e:
c.x = 'other'
assert str(e.value) == (
"Field x: Malformed field: expected foo, got 'other'")
with raises(MissingFieldError) as e:
c.x = None
assert str(e.value) == 'Field x: Missing or blank field'
def test_parse():
ff = FooBar()
assert ff.parse(' foo ') == 'bar'
with raises(FieldTypeError) as e:
ff.parse([])
assert str(e.value) == (
"Invalid field type: expected foo, got '[]'")
with raises(MalformedFieldError) as e:
ff.parse('other')
assert str(e.value) == (
"Malformed field: expected foo, got 'other'")
with raises(MissingFieldError) as e:
ff.parse('N/A')
assert str(e.value) == 'Missing or blank field'
ff = Anything(required=False, unique=True, default=123)
assert ff.parse(1) == 1
assert ff.parse(' N/A ') == 123
# Default value doesn't trigger the uniqueness check
assert ff.parse(' N/A ') == 123
with raises(DuplicateError) as e:
ff.parse(1)
assert str(e.value) == "Duplicate value: '1'"
def test_not_required():
C.y.seen_values.clear()
C.z.seen_values.clear()
c = C()
# Read uninitialised non-required fields
assert c.y is None
assert c.z == 'baz'
c.y = 'foo'
c.z = 'foo'
assert c.y == 'bar'
assert c.z == 'bar'
c.y = None
c.z = None
assert c.y is None
assert c.z == 'baz'
def test_null_values():
ff = Anything(required=False)
assert ff.parse(' N/A ') is None
assert ff.parse(' ') is None
assert ff.parse(math.nan) is None
assert ff.parse(decimal.Decimal('nan')) is None
@requires_pandas
def test_null_values_pandas():
import numpy
import pandas
ff = Anything(required=False)
assert ff.parse(numpy.nan) is None
assert ff.parse(numpy.datetime64('NaT')) is None
assert ff.parse(pandas.NaT) is None
def test_unique():
C.y.seen_values.clear()
C.z.seen_values.clear()
D.w.seen_values.clear()
c = C()
d = D()
# FuzzyFields of the same class but different name do not share the same
# domain
c.y = 'foo'
c.z = 'foo'
with raises(DuplicateError) as e:
c.y = 'foo'
assert str(e.value) == "Field y: Duplicate value: 'bar'"
# Multiple instances of the same class or subclasses share the same domain
with raises(DuplicateError):
d.y = 'foo'
# Default is not tracked on seen_values
# The seen values are saved _after_ validate()
c.z = None
assert C.z.seen_values == {'bar'}
# Float and int should hit the same hash
d.w = 1
with raises(DuplicateError):
d.w = 1.0
d.w = 2
# Track unhashable values
d.w = [{1: 2}]
d.w = [{1: 3}]
with raises(DuplicateError) as e:
d.w = [{1: 2}]
assert str(e.value) == "Field w: Duplicate value: '[{1: 2}]'"
def test_instance_override():
c = C()
c.x = 'foo'
with raises(MalformedFieldError):
c.x = 'other'
c.x = Anything(required=False)
c.x = 'other'
c.x = None
# Altering the field on the instance did not taint the class
c = C()
with raises(MalformedFieldError):
c.x = 'other'
def test_copy():
ff1 = Anything(required=False, default='foo')
ff2 = ff1.copy()
assert ff2 is not ff1
assert type(ff2) == Anything
assert ff2.__dict__ == ff1.__dict__
def test_copy_unique():
ff1 = Anything(unique=True)
ff1.parse(1)
assert ff1.seen_values == {1}
ff2 = ff1.copy()
assert ff2.unique is True
assert ff2.seen_values == set()
assert ff1.seen_values == {1}
|
#use this to autenticate login
def authenticate(uname,pword):
if uname="
|
from preprocess_cnn import get_data
from import_data import import_data
from keras.models import Sequential
from keras.layers import Bidirectional, Masking, MaxPool2D, Conv2D, Conv1D, Input, Flatten,Reshape, ConvLSTM2D
import h5py
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.initializers import Constant
import numpy as np
from keras import backend as K
from keras.models import Model
import tensorflow as tf
feature_size = 39
width = 1
'''
def get_data():
import_data('train')
return fetch('train')
'''
def create_model():
#mask = Input(shape = (777,1))
model = Sequential()
model.add(ConvLSTM2D(filters=20, kernel_size=(3, 3),padding='same', return_sequences=True, input_shape=(777, width*2+1, feature_size,1)))
model.add(Flatten())
model.add(Reshape((777,-1)))
model.add(Masking(mask_value=0))
model.add(LSTM(100, return_sequences = True, dropout = 0.1, kernel_initializer='normal'))
model.add(BatchNormalization())
model.add(LSTM(100, return_sequences = True, dropout = 0.1, kernel_initializer='normal'))
model.add(BatchNormalization())
model.add(Dense(units = 128, activation = 'relu', kernel_initializer = 'normal'))
model.add(Dropout(0.3))
model.add(Dense(units = 128, activation = 'relu', kernel_initializer = 'normal'))
model.add(Dropout(0.3))
model.add(Dense(units = 48, activation = 'softmax', kernel_initializer = 'normal'))
print('model created')
return model
'''
inputs = tf.unstack(inputs)
#cnn
cnn=Conv2D(10, (3,3),padding='same', data_format='channels_last' ,activation = 'relu',bias_initializer=Constant(0.01), kernel_initializer='random_uniform')
#cnn_outputs = cnn(inputs)
cnn_outputs_flatten = [ Flatten()(cnn(input_cnn)) for input_cnn in inputs]
#cnn_outputs=Conv2D(32, (2, 2),padding='same', activation = 'relu',bias_initializer=Constant(0.01), kernel_initializer='random_uniform')(inputs)
#cnn_outputs_flatten = Flatten()(cnn_outputs)
masked_cnn = tf.multiply(cnn_outputs_flatten, mask)
masked = Masking(mask_value = 0)(masked_cnn)
bidirectional_outputs= Bidirectional(LSTM(128, return_sequences = True, dropout = 0.1, kernel_initializer='normal')(masked))
normalized = BatchNormalization()(bidirectional_outputs)
lstm_outputs = LSTM(96, return_sequences = True, dropout = 0.1, kernel_initializer='normal')(normalized)
normalized = BatchNormalization()(lstm_outputs)
outputs = Dense(units = 256, activation = 'relu', kernel_initializer = 'normal')(normalized)
outputs= Dropout(0.3)(outputs)
outputs = Dense(units = 128, activation = 'relu', kernel_initializer = 'normal')(outputs)
outputs= Dropout(0.3)(outputs)
outputs = Dense(units = 64, activation = 'relu', kernel_initializer = 'normal')(outputs)
outputs= Dropout(0.3)(outputs)
outputs = Dense(units = 48, activation = 'softmax', kernel_initializer = 'normal')(outputs)
'''
def train():
X,Y,ids,x_len, y_len, mask_input = get_data()
X = np.array(X)[:,:,:,:,np.newaxis]
print(X.shape)
model = create_model()
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
model.fit(X, Y, batch_size = 50, epochs=10, validation_split = 0.1)
score = model.evaluate(X, Y, batch_size=100)
print(score)
model.save("model_cRnn.h5")
if __name__ == "__main__":
print('here we go!')
train()
|
#!/usr/bin/env python
from troposphere import Ref, Join
from troposphere.dynamodb2 import (KeySchema, AttributeDefinition,
ProvisionedThroughput, Table)
from troposphere.cloudwatch import Alarm, MetricDimension
def init(t, r):
stackname = Ref('AWS::StackName')
dynamodb_capacity = Ref(r['dynamodb_capacity'])
# Create the DynamoDB Session Table
r['sessiontable'] = t.add_resource(Table(
"SessionTable",
AttributeDefinitions=[
AttributeDefinition(
AttributeName="id",
AttributeType="S"
)
],
KeySchema=[
KeySchema(
AttributeName="id",
KeyType="HASH"
)
],
ProvisionedThroughput=ProvisionedThroughput(
ReadCapacityUnits=dynamodb_capacity,
WriteCapacityUnits=dynamodb_capacity
)
))
# Read and Write Capacity alarms
for x in "Read", 'Write':
r['sessiontable_{}alarm'.format(x.lower())] = t.add_resource(Alarm(
"SessionTable{}CapacityAlarm".format(x),
AlarmDescription=Join("", [stackname, "{} capacity limit on the session table".format(x)]),
Namespace="AWS/DynamoDB",
MetricName="Consumed{}CapacityUnits".format(x),
Dimensions=[
MetricDimension(
Name="TableName",
Value=Ref(r['sessiontable'])
)
],
Statistic="Sum",
Period="300",
EvaluationPeriods="1",
Threshold="{}".format(240), # 80% of capacity. TODO: this needs to scale with dynamodb_capacity
ComparisonOperator="GreaterThanThreshold",
AlarmActions=[r['notify_topic']],
InsufficientDataActions=[r['notify_topic']]
))
# throttled requests alarm
r['sessiontable_throttlealarm'] = t.add_resource(Alarm(
"SessionTableThrottledRequestAlarm",
AlarmDescription=Join("", [stackname, "requests are being throttled on the session table"]),
Namespace="AWS/DynamoDB",
MetricName="ThrottledRequests",
Dimensions=[
MetricDimension(
Name="TableName",
Value=Ref(r['sessiontable'])
)
],
Statistic="Sum",
Period="300",
EvaluationPeriods="1",
Threshold="1", # warn about any errors
ComparisonOperator="GreaterThanOrEqualToThreshold",
AlarmActions=[r['notify_topic']],
InsufficientDataActions=[r['notify_topic']]
))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 切片:取一个list或tuple的部分元素是非常常见的操作
L = ['Michael', 'Sarah', 'Tracy', 'Bob', 'Jack']
print(L[0:3])
# L[0:3]表示,从索引0开始取,直到索引3为止,但不包括索引3。即索引0,1,2,正好是3个元素
print(L[-3:-1])
# 记住倒数第一个元素的索引是-1
print(L[-1:])
# 甚至什么都不写,只写[:]就可以原样复制一个list
print(L[:])
L = list(range(100))
# 前10个数,每两个取一个
print(L[:10:2])
# 所有数,每5个取一个
print(L[::5])
print((0, 1, 2, 3, 4, 5)[:3])
print('ABCDEFG'[:3])
|
greeting = input("Write a greeting: ")
print(greeting)
|
from voluptuous import Schema, All, ALLOW_EXTRA, Range
config_schema = Schema(
{
'DEBUG': bool,
'HOST': str,
'SECRET_KEY': str,
'PORT': All(int, Range(min=1, max=10000))
},
extra=ALLOW_EXTRA
)
|
from math import ceil
def merge_the_tools(string, k):
for i in range(ceil(len(string) / k)):
if (i * k + k < len(string)):
s = string[i * k:i * k + k]
else:
s = string[i * k:]
t = []
for j in range(len(s)):
if (s[j] not in t):
t.append(s[j])
print(''.join(t))
|
class Solution(object):
def maxRotateFunction(self, nums):
if not nums: return 0
res = sum([i * nums[i] for i in xrange(len(nums))])
total, n, tmp = sum(nums), len(nums), res
for idx in xrange(-1, - n - 1, -1):
res = max(res, tmp)
tmp += total - (n * nums[idx])
return res |
import math
import string
import pyclipper
import time
from GerberReader import GerberLayer, GerberData
__author__ = 'Thompson'
"""
Employs recursive descent parsing to parse a Gerber data file.
This methodology is more flexible to bad formatting of data file
This is a fully compliant parser.
"""
""" Maximum arc length when drawing arc segments """
MAX_ARC_LENGTH = 750
""" Minimum number of segs per 360 degrees """
MIN_SEG = 17
""" Set to True to print warnings to console """
PrintWarnings = True
""" Set to True to print errors to console """
PrintErrors = True
""" Set to True to print deprecated uses to console """
PrintDeprecatedUses = False
class ParseData:
"""
:type str: str
:type pos: int
:type buffer: str
"""
def __init__(self, str):
self.str = str.replace('\n','').replace('\r','')
self.strlen = len(self.str)
self.gd = GerberData()
self.pos = -1
self.gd = GerberData()
self.trackData = []
self.isDark = True
self.x = 0
self.y = 0
self.i = 0
self.j = 0
self.xold = 0
self.yold = 0
self.tracks = []
self.regions = []
self.pads = []
self.trackseg = -1
self.padseg = -1
self.regionseg = -1
self.aperture = -1
self.tracksize = []
self.macros = []
self.macrosName = []
self.macroParams = []
self.N_macros = 0
self.apertures = [[] for i in range(50)]
self.interpMode = 1
self.arcMode = 0
self.regionMode = False
self.errors = 0
self.warnings = 0
self.deps = 0
def parseUntil(self, marker):
st = self.pos
while self.pos < self.strlen and self.str[self.pos] != marker:
self.pos += 1
return self.str[st:self.pos]
def findNextChar(self):
"""
:return index of next non-whitespace character or -1 if end of expression
:rtype str
"""
if self.pos == -2: return -2
self.pos += 1
if self.pos >= self.strlen:
self.pos = -2
return -2
return self.str[self.pos]
# if self.pos == -2: return -2
# while True:
# self.pos += 1
# if self.pos >= self.strlen:
# self.pos = -2
# return -2
# if not self.str[self.pos].isspace():
# return self.str[self.pos]
def getChar(self):
"""
:rtype str
"""
return self.str[self.pos] if self.pos >= 0 else self.pos
def parseSign(self):
tmp = self.getChar()
if tmp == '-':
self.pos += 1
return -self.parseInt()
else:
if tmp == '+': self.pos += 1
return self.parseInt()
def parseInt(self):
oldp = self.pos
while True:
tmp = self.getChar()
if not tmp.isdigit():
return int(self.str[oldp:self.pos])
if self.findNextChar() == -2:
return int(self.str[oldp:self.pos])
def parseFloat(self):
val = self.parseSign()
tmp = self.getChar()
if tmp == '.':
if self.findNextChar() == -2: return val
op = self.pos
frac = self.parseInt()
val += frac*(10**(op - self.pos))
# if tmp == 'E' or tmp == 'e':
# self.findNextChar()
# val *= 10 ** self.parseInt()
return val
def store_to_gd(self):
# Stores the existing set of layer data as polygonal data points
# This is the equivalent of rasterisation of the draw commands
li = len(self.gd.layers) / 3
# Expand tracks from centrelines based on aperture
track_outlines = []
for seg in range(len(self.tracks)):
for vert in range(len(self.tracks[seg]) - 1):
xstart = self.tracks[seg][vert][0]
xend = self.tracks[seg][vert + 1][0]
ystart = self.tracks[seg][vert][1]
yend = self.tracks[seg][vert + 1][1]
singletrack = pyclipper.MinkowskiSum(self.apertures[self.tracksize[seg]], \
[[xstart, ystart], [xend, yend]], -1)
if len(singletrack) > 1:
biggest = []
myarea = 0
for mypath in singletrack:
newarea = pyclipper.Area(mypath)
if newarea > myarea:
biggest = mypath
myarea = newarea
singletrack = [[]]
singletrack[0] = (biggest)
track_outlines.extend(singletrack)
mergedBounds = self.union_boundary(track_outlines + self.pads, self.regions)
self.trackData.extend(self.tracks)
# Store data into layers.
self.gd.layers.append(GerberLayer(self.isDark, str(li) + "_Tracks", track_outlines, type=GerberLayer.TYPE_TRACK))
self.gd.layers.append(GerberLayer(self.isDark, str(li) + "_Boundaries", mergedBounds, False, False, "blue", GerberLayer.TYPE_BOUNDARY))
self.gd.layers.append(GerberLayer(self.isDark, str(li) + "_Regions", self.regions, type=GerberLayer.TYPE_REGION))
self.gd.layers.append(GerberLayer(self.isDark, str(li) + "_Pads", self.pads, type=GerberLayer.TYPE_PAD, color="#009000"))
# clear cache
self.regions = []
self.pads = []
self.tracks = []
self.tracksize = []
self.trackseg = -1
self.padseg = -1
self.regionseg = -1
def union(self, paths, union_type=pyclipper.PFT_NONZERO):
# performs union on list, or list of lists
c = pyclipper.Pyclipper()
polyclip = paths
# for path in range(len(polyclip)):
# c.AddPaths(polyclip[path], pyclipper.PT_SUBJECT, True)
c.AddPaths(polyclip, pyclipper.PT_SUBJECT, True)
polyclip = c.Execute(pyclipper.CT_UNION, union_type, union_type)
c.Clear()
return polyclip
def union_boundary(self, boundarys, regions):
# union intersecting polygons on boundary
if boundarys:
boundary = self.union(boundarys, pyclipper.PFT_NONZERO)
paths = boundary
else:
boundary = []
paths = []
if regions:
region = self.union(regions, pyclipper.PFT_NONZERO)
paths.extend(region)
boundary = self.union(paths, pyclipper.PFT_NONZERO)
if boundary:
boundary = pyclipper.CleanPolygons(boundary)
# boundary = pyclipper.SimplifyPolygons(boundary)
for segs in boundary:
if len(segs) == 0:
boundary.remove([])
break
return boundary
def warn(self, msg):
self.warnings += 1
if PrintWarnings: print(" WARNING: " + msg + " (pos = " + str(self.pos) + ")")
def error(self, msg):
self.errors += 1
if PrintErrors: print(" ERROR: " + msg + " (pos = " + str(self.pos) + ") " + self.str[max(0,self.pos - 20):self.pos])
def error_line(self, msg, marker='*'):
self.errors += 1
if PrintErrors: print(" WARNING: " + msg + "Ignoring data: " + self.parseUntil(marker))
def check_char(self, chr='*'):
if self.getChar() == chr:
self.pos += 1
else:
self.error("Command missing '" + chr + "' symbol")
def dep(self, msg):
self.deps += 1
if PrintDeprecatedUses: print(" DEPRECATED COMMAND: " + msg + " (pos = " + str(self.pos) + ")")
def add_circle(ptlist, diameter, isCW=True):
steps = max(int(diameter/(2.0*MAX_ARC_LENGTH)), MIN_SEG)
angleStep = 2.0 * math.pi / (steps - 1) * (-1 if isCW else 1)
print " Circle ", steps, ", ", angleStep
for i in range(steps):
angle = i*angleStep
x = (diameter / 2.0) * math.cos(angle)
y = (diameter / 2.0) * math.sin(angle)
ptlist.append([x, y])
def add_arc(ptlist, r, centreX, centreY, startAngle, endAngle, isCW):
endAngle -= startAngle # convert to delta
if endAngle < 0: endAngle += 2*math.pi # make it positive
if isCW == (endAngle > 0): # requires outer arc
endAngle -= 2*math.pi
# calc steps such that arc length < MAX_ARC_LENGTH and min seg per 360 deg is maintained
steps = int(abs(endAngle)*max(r/MAX_ARC_LENGTH, MIN_SEG/(2*math.pi)))
angleStep = endAngle / (steps - 1.0)
print " Arc at (X,Y) = ", centreX, ", ", centreY
print " start angle = ", (180*startAngle/math.pi)
print " delta angle = ", (180*endAngle/math.pi)
print " angle step = ", (180*angleStep/math.pi), " steps=", steps
for i in range(steps):
xarcseg = centreX + r * math.cos(startAngle + angleStep * i)
yarcseg = centreY + r * math.sin(startAngle + angleStep * i)
xarcseg = round(xarcseg)
yarcseg = round(yarcseg)
ptlist.append([xarcseg, yarcseg])
def load_file(filename):
"""
Parse gerber file from file path
:rtype filename: str
:rtype: [GerberData, list]
"""
file = open(filename, 'r')
str = file.read()
file.close()
return parse(str)
def parse(str):
"""
Parses Gerber data file
:return GerberData object and tracks list
:type lines: str
:rtype [GerberData,list]
"""
pd = ParseData(str)
return _parse0(pd)
def _parse0(pd):
"""
:type pd: ParseData
"""
stt = time.time()
pd.pos = 0
while True:
if pd.pos == -2 or pd.pos >= pd.strlen:
pd.warn("File was not terminated with 'M02*' command")
break
tmp = pd.getChar()
if tmp == 'D': # Draw, move, flash or set aperture command
pd.pos += 1
cd = pd.parseInt()
if cd == 0 or (3 < cd < 10) or cd >= len(pd.apertures):
pd.error_line("Invalid aperture code D" + str(cd) + ". ")
if cd == 1: # interpolate
if pd.interpMode == 1: # linear interpolation
if pd.regionMode:
pd.regions[pd.regionseg].append([pd.x, pd.y])
else:
pd.tracks[pd.trackseg].append([pd.x, pd.y])
elif pd.arcMode == 0: # Single Quadrant Mode
if pd.i < 0:
pd.warn("Negative sign for offset i was ignored for SINGLE QUADRANT mode")
pd.i *= -1
if pd.j < 0:
pd.warn("Negative sign for offset j was ignored for SINGLE QUADRANT mode")
pd.j *= -1
pd.warn("Single quadrant mode not implemented yet")
else: # Multi Quadrant Mode
# Implementation note:
# Draws a circle that passes through the start and end point with radius of distance
# from offset centre point to start point (i.e. the offset centre point will
# be moved such that the distance to the and end point become equal to the distance
# to the start point)
r = pow(pd.i, 2) + pow(pd.j, 2)
# The line that perpendicular bisects the line joining A (start) and B (end) is where
# the centre point will lie. The distance from centre point to A and B is r (circle radius)
# The distance from the line connecting AB to centre point is thus sqrt(r^2-L^2)
# Where L is the distance between midpoint and A or B.
# To optimise, square root is not taken until the end and 2L is distance between A and B
# and is the value that the normal must be divided by to normalise to length 1
# Mid Point
mx = (pd.xold + pd.x)/2
my = (pd.yold + pd.y)/2
# Normal
ny = pd.x - pd.xold
nx = pd.yold - pd.y
tmp = nx*nx + ny*ny # size of vector squared
tmp = math.sqrt(float(r)/tmp - 0.25) # normalise normal and scale by radius
if abs(pd.i - nx*tmp) + abs(pd.j - ny*tmp) > abs(pd.i + nx*tmp) + abs(pd.j + ny*tmp):
# pick the side the offset point lies on
tmp *= -1
centreX = mx + nx*tmp
centreY = my + ny*tmp
add_arc(pd.regions[pd.regionseg] if pd.regionMode else pd.tracks[pd.trackseg],
math.sqrt(r), centreX, centreY,
math.atan2(pd.yold - centreY, pd.xold - centreX),
math.atan2(pd.y - centreY, pd.x - centreX), pd.interpMode == 2)
# Consume offset
pd.i = 0
pd.j = 0
elif cd == 2: # move
if pd.regionMode: # Finish current region and creates a new one
pd.regions.append([])
pd.regionseg += 1
# pd.regions.append([])
pd.regions[pd.regionseg].append([pd.x, pd.y])
else: # Finish current track and creates a new one
pd.tracksize.append(pd.aperture)
pd.trackseg += 1
pd.tracks.append([[pd.x, pd.y]])
else:
if pd.regionMode:
pd.error("Command D" + str(cd) + " is not allowed in region mode")
if cd == 3: # flash aperture
pd.padseg += 1
pd.pads.append([])
for verts in pd.apertures[pd.aperture]:
pd.pads[pd.padseg].append([pd.x + verts[0], pd.y + verts[1]])
else: # Set aperture
pd.aperture = cd
pd.check_char('*')
elif tmp == 'G':
pd.pos += 1
cd = pd.parseInt()
if 0 < cd < 4: # Linear interpolation
pd.interpMode = cd
if pd.getChar() != '*':
pd.dep("Use of G" + str(cd) + " in a data block is deprecated")
continue
elif cd == 4: # Comment
print " Comment G04: ", pd.parseUntil('*')
elif cd == 36: # Region mode ON
pd.regionMode = True
elif cd == 37: # Region mode OFF
pd.regionMode = False
elif cd == 54:
if pd.getChar() != 'D':
pd.error("Command G54 wasn't followed by valid aperture Dnn. This command is also deprecated and should be removed")
else:
pd.pos += 1
cd = pd.parseInt()
if cd < 10 or cd >= len(pd.apertures):
pd.error("Invalid aperture index D" + str(cd) + " in deprecated commands G70.")
else:
pd.aperture = cd
pd.dep("Deprecated command found: G54 (command was parsed but should be removed)")
elif cd == 70: # Set mode to INCH
pd.gd.units = 1
pd.dep("Deprecated command found: G70 (command was parsed but should be removed)")
elif cd == 71: # Set mode to MM
pd.gd.units = 0
pd.dep("Deprecated command found: G71 (command was parsed but should be removed)")
elif cd == 74: # Set arc mode to SINGLE QUADRANT
pd.arcMode = 0
elif cd == 75: # Set arc mode to MULTI QUADRANT
pd.arcMode = 1
elif cd == 90: # set absolute coordinate format - deprecated command
pd.dep("Deprecated command found: G90 (command was IGNORED)")
elif cd == 91: # set incremental coordinate format - deprecated command
pd.dep("Deprecated command found: G91 (command was IGNORED)")
else:
pd.error_line("Unknown code: G" + str(cd) + ". ")
pd.check_char('*')
elif tmp == 'M':
pd.pos += 1
cd = pd.parseInt()
if cd == 2:
pd.check_char('*')
if pd.regionMode:
pd.warn("End of file reached while in region mode")
if pd.findNextChar() != -2:
pd.warn("Unparsed data: " + pd.str[pd.pos:-1])
break
else:
pd.error_line("Invalid code: M" + str(cd) + ". ")
pd.check_char('*')
# continue
elif tmp == 'X':
pd.pos += 1
pd.xold = pd.x
pd.x = pd.parseSign()
elif tmp == 'Y':
pd.pos += 1
pd.yold = pd.y
pd.y = pd.parseSign()
elif tmp == 'I':
pd.pos += 1
pd.i = pd.parseSign()
elif tmp == 'J':
pd.pos += 1
pd.j = pd.parseSign()
elif tmp == '%': # Extended command codes are surrounded by %
cmd = pd.str[pd.pos+1:pd.pos+3]
pd.pos += 2
if cmd == "FS": # File format
op = string.find(pd.str,'X',pd.pos)
# if pd.findNextChar() != 'L' or pd.findNextChar() != 'A' or pd.findNextChar() != 'X':
# pd.error("FS command was not followed by LA")
# pd.pos = op
# else:
# if not pd.findNextChar().isdigit() or not pd.findNextChar().isdigit():
# pd.error("FSLAX command must be followed by two digits. Command was ignored")
# else:
# pd.gd.fraction = int(pd.getChar())
# if pd.findNextChar() != 'Y':
# pd.error_line("FSLAXnn command was not followed by Ynn. ")
# else:
# print(" Ignored remainder of FSLA: " + pd.parseUntil('*'))
pd.gd.fraction = int(pd.str[op + 2]) # we only care about the fractional digit
pd.parseUntil('*')
elif cmd == "MO": # Set unit
tmp = pd.findNextChar()
if tmp == 'M':
pd.pos += 2
# if pd.findNextChar() != 'M':
# pd.error("Unit sepcified invalid. It was set to MM")
# else:
# pd.findNextChar()
pd.gd.units = 0
elif tmp == 'I':
pd.pos += 2
# if pd.findNextChar() != 'N':
# pd.error("Unit sepcified invalid. It was set to INCH")
# else:
# pd.findNextChar()
pd.gd.units = 1
else:
pd.error_line("Invalid unit specifier. ")
elif cmd == "AD": # Add aperture
if pd.str[pd.pos + 1] != 'D':
pd.error("Aperture index must be specified after AD command")
continue
pd.pos += 2
pd.aperture = pd.parseInt()
while len(pd.apertures) <= pd.aperture: pd.apertures.append([])
holesize = -1
if pd.str[pd.pos + 1] == ',':
tmp = pd.str[pd.pos]
pd.pos += 2
if tmp == 'C': # Circle
size = pd.parseFloat() * 10 ** pd.gd.fraction
add_circle(pd.apertures[pd.aperture], size, False)
if pd.getChar() == 'X':
pd.pos += 1
holesize = pd.parseFloat() * 10 ** pd.gd.fraction
add_circle(pd.apertures[pd.aperture], holesize, True)
print " read aperture", pd.aperture, ": circle diameter", size, ", hole size", holesize
elif tmp == 'R': # Rectangle
w = pd.parseFloat() * 10 ** pd.gd.fraction / 2.0
pd.check_char('X')
h = pd.parseFloat() * 10 ** pd.gd.fraction / 2.0
pd.apertures[pd.aperture].extend([[-w, -h], [w, -h], [w, h], [-w, h], [-w, -h]])
if pd.getChar() == 'X':
pd.pos += 1
holesize = pd.parseFloat() * 10 ** pd.gd.fraction
add_circle(pd.apertures[pd.aperture], holesize, True)
print " read aperture", pd.aperture, ": rectangle W", w, ", H", h, ", hole size", holesize
elif tmp == 'O': # Rectangle capped with semicircles
w = pd.parseFloat() * 10 ** pd.gd.fraction
pd.check_char('X')
h = pd.parseFloat() * 10 ** pd.gd.fraction
NVERTS = 16
if w > h:
for i in range(NVERTS / 2):
angle = i * math.pi / (NVERTS / 2 - 1.0) + math.pi / 2.0
x = -(w - h) / 2.0 + (h / 2.0) * math.cos(angle)
y = (h / 2.0) * math.sin(angle)
pd.apertures[pd.aperture].append([x, y])
for i in range(NVERTS / 2):
angle = i * math.pi / (NVERTS / 2 - 1.0) - math.pi / 2.0
x = (w - h) / 2.0 + (h / 2.0) * math.cos(angle)
y = (h / 2.0) * math.sin(angle)
pd.apertures[pd.aperture].append([x, y])
else:
for i in range(NVERTS / 2):
angle = i * math.pi / (NVERTS / 2 - 1.0) + math.pi
x = (w / 2.0) * math.cos(angle)
y = -(h - w) / 2.0 + (w / 2.0) * math.sin(angle)
pd.apertures[pd.aperture].append([x, y])
for i in range(NVERTS / 2):
angle = i * math.pi / (NVERTS / 2 - 1.0)
x = (w / 2.0) * math.cos(angle)
y = (h - w) / 2.0 + (w / 2.0) * math.sin(angle)
pd.apertures[pd.aperture].append([x, y])
if pd.getChar() == 'X':
holesize = pd.parseFloat() * 10 ** pd.gd.fraction
add_circle(pd.apertures[pd.aperture], holesize, True)
print " read aperture", pd.aperture, ": o-rectangle ", w, " x ", h, ", hole size", holesize
elif tmp == 'P': # Regular polygon
size = pd.parseFloat() * 10 ** pd.gd.fraction
pd.check_char('X')
n = pd.parseFloat()
rot = 0
if pd.getChar() == 'X':
pd.pos += 1
rot = pd.parseFloat()
for i in range(n):
angle = -(i * 2.0 * math.pi / (n - 1.0))
x = (size / 2.0) * math.cos(angle + rot)
y = (size / 2.0) * math.sin(angle + rot)
pd.apertures[pd.aperture].append([x, y])
if pd.getChar() == 'X':
pd.pos += 1
holesize = pd.parseFloat() * 10 ** pd.gd.fraction
add_circle(pd.apertures[pd.aperture], holesize, True)
print " read aperture", pd.aperture, ": polygon n", n, " diameter ", size, ", rot ", rot, " hole size", holesize
else:
pd.error("Unknown aperture shape " + tmp)
else: # Macro
# parseMacro(pd, tmp)
pd.error("Use of macros not supported yet")
elif cmd == "AM": # Create aperture macro
pd.macrosName.append(pd.parseUntil('*')) # Parse name of macro
tmp = pd.findNextChar()
tmplines = []
while tmp != '%': # Store macro commands (only parsed when added)
tmplines.append(pd.parseUntil("*"))
tmp = pd.findNextChar()
if tmp == -2:
# This is required to prevent infinite loop
pd.check_char('%')
pd.macros.append(tmplines)
pd.N_macros += 1
pd.pos += 1
continue
elif cmd == "SR": # Set and repeat
pass
elif cmd == "LP": # Create new layer
tmp = pd.findNextChar()
if tmp == 'D':
pd.store_to_gd()
pd.pos += 1
pd.isDark = True
elif tmp == 'C':
pd.store_to_gd()
pd.pos += 1
pd.isDark = False
else:
pd.error("Command LP must be followed by C or D but found " + tmp)
else:
pd.error_line("Unknown command code " + tmp + ". ", '*')
pd.check_char('*')
pd.check_char('%')
print "Execution time: ",(time.time() - stt)
# Finalise parsing
pd.store_to_gd()
print " Parsing completed with ", pd.warnings, " warnings, ", pd.deps, " deprecated commands and ", pd.errors, " errors"
return [pd.gd, pd.trackData]
def parseMacro(pd):
"""
Parses macro - complicated by variables and expressions so we're writing a
parser within a parser
:type pd: ParseData
"""
pd.findNextChar()
mn = pd.parseUntil('*')
tmp = string.find(mn,',')
if tmp != -2: # macro has params
pd.pos -= len(mn) + tmp
mn = mn[0:tmp]
pd.findNextChar()
for nm in pd.macrosName:
if mn == nm:
# TODO: add macro data to aperture
break
|
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if (len(matrix)==0) :
return 0
dp=[[0 for a in range(len(matrix[0]) +1)] for b in range(len(matrix) +1)]
m,n=len(matrix),len(matrix[0])
gmax=0
for i in range(1,m+1):
for j in range(1,n+1):
if matrix[i-1][j-1] == '1':
dp[i][j]=min(dp[i-1][j-1],dp[i][j-1],dp[i-1][j]) + 1
gmax=max(gmax,dp[i][j])
return gmax*gmax
|
from nltk.tokenize import WordPunctTokenizer
from gensim.models import Word2Vec
def train_w2v():
sentences = [['this', 'is', 'the', 'first', 'sentence', 'for', 'word2vec'],
['this', 'is', 'the', 'second', 'sentence'],
['yet', 'another', 'sentence'],
['one', 'more', 'sentence'],
['and', 'the', 'final', 'sentence']]
model = Word2Vec(sentences, min_count=1)
print(model.wv.get_vector('sentence'))
print(model.wv.most_similar('first'))
if __name__ == '__main__':
train_w2v()
|
# coding: utf-8
import sys
import os
import jump.commands
# returns an environment variable as uppercase
def get_env(field):
return os.environ[field.upper()]
# return the name of the function called as first argument of command
def get_command_name(args):
command_name = None
for command in list(args.__dict__):
if getattr(args, command) is not None:
command_name = command
break
return command_name
# return a list of the arguments passed after command.
# e.g. $ jump --upgrade arg1 arg2 would return ['arg1', 'arg2']
def get_arguments(args, command):
return getattr(args, command)
# Execute the correct command according to user input
def execute(parser):
args = parser.parse_args()
command_name = get_command_name(args)
if command_name is None:
parser.print_help()
sys.exit(0)
command = getattr(jump.commands, command_name)
arguments = get_arguments(args, command_name)
function = getattr(command, 'main')
function(arguments) |
from threading import local
class NavigationRootInfo(local):
root = None
_current_root = NavigationRootInfo()
def getNavigationRoot():
"""Get the current navigation root
"""
return _current_root.root
def setNavigationRoot(root):
"""Set the current navigation root. This is normally done by an event
subscriber during traversal.
"""
_current_root.root = root
|
from onegov.org.models.atoz import AtoZ
from onegov.org.models.clipboard import Clipboard
from onegov.org.models.dashboard import Boardlet
from onegov.org.models.dashboard import BoardletFact
from onegov.org.models.dashboard import Dashboard
from onegov.org.models.directory import DirectorySubmissionAction
from onegov.org.models.directory import ExtendedDirectory
from onegov.org.models.directory import ExtendedDirectoryEntry
from onegov.org.models.editor import Editor
from onegov.org.models.export import Export, ExportCollection
from onegov.org.models.extensions import AccessExtension
from onegov.org.models.extensions import ContactExtension
from onegov.org.models.extensions import ContentExtension
from onegov.org.models.extensions import CoordinatesExtension
from onegov.org.models.extensions import HoneyPotExtension
from onegov.org.models.extensions import PersonLinkExtension
from onegov.org.models.extensions import VisibleOnHomepageExtension
from onegov.org.models.file import GeneralFile
from onegov.org.models.file import GeneralFileCollection
from onegov.org.models.file import ImageFile
from onegov.org.models.file import ImageFileCollection
from onegov.org.models.file import ImageSet
from onegov.org.models.file import ImageSetCollection
from onegov.org.models.form import BuiltinFormDefinition, CustomFormDefinition
from onegov.org.models.legacy_file import LegacyFile
from onegov.org.models.legacy_file import LegacyFileCollection
from onegov.org.models.legacy_file import LegacyImage
from onegov.org.models.legacy_file import LegacyImageCollection
from onegov.org.models.message import DirectoryMessage
from onegov.org.models.message import EventMessage
from onegov.org.models.message import PaymentMessage
from onegov.org.models.message import ReservationMessage
from onegov.org.models.message import SubmissionMessage
from onegov.org.models.message import TicketChatMessage
from onegov.org.models.message import TicketMessage
from onegov.org.models.message import TicketNote
from onegov.org.models.organisation import Organisation
from onegov.org.models.page import AtoZPages, News, Topic
from onegov.org.models.page_move import PageMove
from onegov.org.models.person_move import FormPersonMove
from onegov.org.models.person_move import PagePersonMove
from onegov.org.models.person_move import PersonMove
from onegov.org.models.person_move import ResourcePersonMove
from onegov.org.models.publication import PublicationCollection
from onegov.org.models.recipient import ResourceRecipient
from onegov.org.models.recipient import ResourceRecipientCollection
from onegov.org.models.resource import DaypassResource
from onegov.org.models.search import Search
from onegov.org.models.sitecollection import SiteCollection
from onegov.org.models.swiss_holidays import SwissHolidays
from onegov.org.models.traitinfo import TraitInfo
__all__ = [
'AtoZ',
'AtoZPages',
'Boardlet',
'BoardletFact',
'BuiltinFormDefinition',
'Clipboard',
'ContactExtension',
'ContentExtension',
'CoordinatesExtension',
'CustomFormDefinition',
'Dashboard',
'DaypassResource',
'DirectoryMessage',
'DirectorySubmissionAction',
'Editor',
'EventMessage',
'Export',
'ExportCollection',
'ExtendedDirectory',
'ExtendedDirectoryEntry',
'FormPersonMove',
'GeneralFile',
'GeneralFileCollection',
'HoneyPotExtension',
'AccessExtension',
'ImageFile',
'ImageFileCollection',
'ImageSet',
'ImageSetCollection',
'LegacyFile',
'LegacyFileCollection',
'LegacyImage',
'LegacyImageCollection',
'News',
'Organisation',
'PageMove',
'PagePersonMove',
'PaymentMessage',
'PersonLinkExtension',
'PersonMove',
'PublicationCollection',
'ReservationMessage',
'ResourcePersonMove',
'ResourceRecipient',
'ResourceRecipientCollection',
'Search',
'SiteCollection',
'SubmissionMessage',
'SwissHolidays',
'TicketChatMessage',
'TicketMessage',
'TicketNote',
'Topic',
'TraitInfo',
'VisibleOnHomepageExtension',
]
|
import torch
import numpy as np
from PIL import Image
import os
import random
from IPython import display
from IPython.core.interactiveshell import InteractiveShell
import subprocess
InteractiveShell.ast_node_interactivity = "all"
import glob
import clip
perceptor, preprocess = clip.load('ViT-B/32')
import sys
c_encs=[]
categories = []
def load(categorylist):
global c_encs
global categories
load_categories = categorylist #@param ["imagenet", "dog vs cat", "pokemon", "words in the communist manifesto", "other (open this cell and write them into a list of strings)"]
if(load_categories not in ["emojis", "imagenet", "dog vs cat", "pokemon", "words in the communist manifesto", "other (open this cell and write them into a list of strings)"]):
categories = categorylist
elif(load_categories=="imagenet"):
import pandas as pd
categories = pd.read_csv("categories/map_clsloc.txt", sep=" ", header = None)[2]
for category in range(len(categories)):
categories[category] = categories[category].replace("_", " ")
elif(load_categories=="dog vs cat"):
categories = ["dog", "cat"]
elif(load_categories=="pokemon"):
import pandas as pd
categories = pd.read_csv("categories/pokemon.txt", sep=".", header=None)[1]
elif(load_categories=="words in the communist manifesto"):
ccc = open("categories/communism.txt", "r").read().split()
categories = []
for i in ccc:
if i not in categories:
categories.append(i)
elif(load_categories=="emojis"):
categories = open("categories/emojis.txt", "r").readlines()
c_encs = [perceptor.encode_text(clip.tokenize(category).cuda()).detach().clone() for category in categories]
import PIL
def classify(filename, return_raw=False):
im_enc = perceptor.encode_image(preprocess(Image.open(filename)).unsqueeze(0).to("cpu"))
distances = [torch.cosine_similarity(e, im_enc).item() for e in c_encs]
if(return_raw==False):
return categories[int(distances.index(max(distances)))]
else:
return distances
def encode(object):
o = object.lower()
if("jpg" in o[-5:]) or ("png" in o[-5:]) or ("jpeg" in o[-5:]):
return perceptor.encode_image(preprocess(Image.open(object)).unsqueeze(0).to("cpu"))
else:
return perceptor.encode_text(clip.tokenize(object).cuda()).detach().clone()
|
class Readfile:
def __init__(self, file):
self.file = open(file, "r", encoding = "utf-8")
self.mem = []
self.answer = []
path = self.file.readline()
while path[0:1] == "#":
path = self.file.readline()
while path[0:1] != "#":
path = path.strip("\n")
line_elements = path.split(",")
self.mem.append(line_elements)
path = self.file.readline()
while path[0:1] == "#":
path = self.file.readline()
while path:
path = path.strip("\n")
# line_elements = path.split(",")
self.answer.append(path)
path = self.file.readline()
def get_input(self):
return self.mem
def get_answer(self):
return self.answer
def test():
file = "/Users/han/Desktop/George Washington/6511/project4/program/input/hmm_customer_1586733275373.txt"
test = Readfile(file)
print(test.get_input())
# test()
|
import math
class Solution:
def judgeSquareSum(self, c):
"""
:type c: int
:rtype: bool
"""
divisorLimit = math.ceil(math.sqrt(c))
possible = set()
for i in range(0, divisorLimit+1):
possible.add(i**2)
for i in possible:
if c-i in possible:
return True
return False
print(Solution().judgeSquareSum(4)) |
# -*- coding: utf-8 -*-
import scrapy
from scrapy_redis.spiders import RedisSpider
import re
class IndonesiaGoodsNameSpider(RedisSpider):
name = 'indonesia_goods_name'
allowed_domains = ['www.tokopedia.com']
start_urls = ['https://www.tokopedia.com/p/buku/buku-remaja-dan-anak/dunia-pengetahuan']
redis_key = 'indonesia_goods_name'
custom_settings = {
# 指定redis数据库的连接参数
'REDIS_HOST': '123.56.11.156',
'REDIS_PORT': 6379,
# 指定 redis链接密码,和使用哪一个数据库
'REDIS_PARAMS': {
'password': '',
'db': 0
},
}
def parse(self, response):
names = response.xpath('//*[@id="search-result"]/div[2]/div[4]/div/div/div/a/div[2]/h3/text()').extract()
self.save(names)
print(names)
# next_link = response.xpath('//*[@id="search-result"]/div[2]/div[5]/span[8]/span/a/@href').extract()
# if next_link:
# url = 'http://www.tokopedia.com' + next_link[0]
# page = re.findall("page=(.*?)&", url)[0]
# if int(page) < 6:
# yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
def save(self, result):
with open(r'C:\Users\Administrator\Desktop\indonesia_temp\indonesia_goods_name.txt', 'a', encoding='utf-8')as f:
f.write('\n'.join(result))
|
from Pages.MediaPages.DocumentMedia import DocumentMedia
import pytest
@pytest.allure.feature('Nodes')
@pytest.allure.story('Press Release CT')
@pytest.mark.usefixtures('init_press_release_page')
class TestPressReleaseCT:
@pytest.allure.title('VDM-188 Press Release CT - creating')
def test_press_release_creating(self):
self.node.fill_press_release_mandatory()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url != url
self.node.delete_node()
@pytest.allure.title('VDM-387 Press Release CT - check empty fields validation')
def test_press_release_empty_fields(self):
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url == url
@pytest.allure.title('VDM-??? Press Release CT - check fields existing')
def test_press_release_fields_existing(self):
assert self.node.get_title().is_displayed()
assert self.node.get_hero_banner_tab().is_displayed()
assert self.node.get_content_push_tab().is_displayed()
assert self.node.get_documents_tab().is_displayed()
assert self.node.get_add_paragraph_button().is_displayed()
assert self.node.get_save_button().is_displayed()
@pytest.allure.title('VDM-297 Press Release CT - content push')
def test_press_release_content_push(self):
self.node.fill_press_release_mandatory()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url != url
assert (self.node.PR_test_data['title'] in self.driver.page_source)
self.node.delete_node()
@pytest.allure.title('VDM-297,296,294,292,291,298 Press Release CT - check content push')
def test_press_release_content_push(self):
self.node.fill_press_release_mandatory()
self.node.fill_content_push()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url != url
self.node.delete_node()
@pytest.allure.title('VDM-295,290 Press Release CT - Fields limit validation')
def test_press_release_content_push_CTA_title_limit(self):
self.node.fill_press_release_mandatory()
assert self.node.get_content_push_cta_text().get_attribute('maxlength') == '25'
assert self.node.get_content_push_title().get_attribute('maxlength') == '255'
assert self.node.get_hero_banner_title().get_attribute('maxlength') == '255'
assert self.node.get_title().get_attribute('maxlength') == '255'
@pytest.allure.title('VDM-288,286,284,283,281,280 Press Release CT - check hero banner')
def test_press_release_hero_banner(self):
self.node.fill_press_release_mandatory()
self.node.fill_hero_banner()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url != url
assert (self.node.test_data['hero_banner_title'] in self.driver.page_source)
assert (self.node.test_data['hero_banner_subtitle'] in self.driver.page_source)
self.node.delete_node()
@pytest.allure.title('VDM-205,204 Press Release CT - documents tab')
def test_press_release_document_tab(self):
self.node.fill_press_release_mandatory()
self.node.fill_document_tab()
url = self.driver.current_url
self.node.save_node()
document = DocumentMedia(self.driver)
assert self.driver.current_url != url
assert document.get_document()
self.node.delete_node()
@pytest.allure.title('VDM-? Press Release CT - check hero banner background color')
def test_press_release_ct_hero_banner_color(self):
self.node.set_hero_banner_color()
self.node.fill_press_release_mandatory()
url = self.driver.current_url
self.node.save_node()
assert self.driver.current_url != url
assert self.node.get_rose_hero_banner().is_displayed()
self.node.delete_node()
|
import sys
import pymongo
try:
mongo_host = sys.argv[1]
except IndexError as e:
print(f"请输入初始化的mongo地址!")
db = pymongo.MongoClient(mongo_host, 27017)['test_case']['tables']
db.insert_many(
[
{"_id": "users", "index": 1},
{"_id": "products", "index": 1},
{"_id": "product_category_index", "index": 1},
{"_id": "product_version_index", "index": 1},
{"_id": "cases", "index": 1},
{"_id": "case_priority_index", "index": 1},
{"_id": "run_index", "index": 1},
{"_id": "product_module_index", "index": 1},
{"_id": "plans", "index": 1},
{"_id": "run_case_index", "index": 1}
], ordered=False
)
|
import numpy as np
iteration=0
Y=0.99
delta=1e-3
stepcost=-20
finalreward=10
utility=np.zeros([5, 4, 3], dtype=float)
new=np.zeros([5, 4, 3], dtype=float)
actionarr=np.zeros([5, 4, 3], dtype=object)
action="-1"
def recharge(i, j, k, utility):
return round(0.8*(stepcost+Y*utility[i][j][min(utility.shape[2]-1, k+1)])+0.2*(stepcost+Y*utility[i][j][k]), 11)
def shoot(i, j, k, utility):
if j and k:
if i==1:
return round(0.5*(stepcost+finalreward+Y*utility[i-1][j-1][k-1])+0.5*(stepcost+Y*utility[i][j-1][k-1]),11)
else:
return round(0.5*(stepcost+Y*utility[i-1][j-1][k-1])+0.5*(stepcost+Y*utility[i][j-1][k-1]),11)
else :
return -100000
def dodge(i, j, k, utility):
if k==2:
return round(0.8*(0.8*(stepcost+Y*utility[i][min(j+1, utility.shape[1]-1)][k-1])+0.2*(stepcost+Y*utility[i][j][k-1]))+0.2*(0.8*(stepcost+Y*utility[i][min(j+1, utility.shape[1]-1)][k-2])+0.2*(stepcost+Y*utility[i][j][k-2])),11)
elif k==1:
return round(0.8*(stepcost+Y*utility[i][min(j+1, utility.shape[1]-1)][k-1])+0.2*(stepcost+Y*utility[i][j][k-1]),11)
else :
return -100000
def updateutility(i, j, k, utility, previousutility):
if i==0:
utility[i][j][k]=0
else :
global action
utility[i][j][k]=shoot(i, j, k, previousutility)
action="SHOOT"
if utility[i][j][k]<dodge(i, j, k, previousutility):
utility[i][j][k]=dodge(i, j, k, previousutility)
action="DODGE"
if utility[i][j][k]<recharge(i, j, k, previousutility):
utility[i][j][k]=recharge(i, j, k, previousutility)
action="RECHARGE"
# print(shoot(i, j, k, previousutility),dodge(i, j, k, previousutility),recharge(i, j, k, previousutility) )
while 1:
previousutility=np.copy(utility)
print("iteration=", iteration, sep="")
for i in range(0,utility.shape[0]):
for j in range(0, utility.shape[1]):
for k in range(0, utility.shape[2]):
action="-1"
updateutility(i, j, k, utility, previousutility)
actionarr[i][j][k]=action
# if delta<abs(previousutility[i][j][k]-utility[i][j][k]) or i==0:
# print("(", i, ",", j, ",", k, "):",action,"=[", round(utility[i][j][k], 3), "]", sep="")
if delta>np.max(abs(previousutility-utility)):
previousutility=np.copy(utility)
for i in range(0,utility.shape[0]):
for j in range(0, utility.shape[1]):
for k in range(0, utility.shape[2]):
action="-1"
updateutility(i, j, k, new, previousutility)
print("(", i, ",", j, ",", k, "):",action,"=[", round(utility[i][j][k], 3), "]", sep="")
print()
print()
break
for i in range(0,utility.shape[0]):
for j in range(0, utility.shape[1]):
for k in range(0, utility.shape[2]):
print("(", i, ",", j, ",", k, "):",actionarr[i][j][k],"=[", round(utility[i][j][k], 3), "]", sep="")
print()
print()
iteration+=1
print(iteration) |
import time
from sys import *
INF = 10**10
if len(argv) == 2:
f = open("X10M.txt", "r", encoding="utf-8")
string = f.read().replace(" ","").replace("\n","")
sl = int(argv[1])
string = string[:sl]
#print("input end")
else:
string = ""
f = input()
while (f):
try:
st = f.replace(" ","").replace("\n","")
string += st
f = input()
except EOFError:
#print("input end")
break
# Sentinal / 番兵
string = string + "$"
#It takes O(nlogn) for the sort method and O(n) for string comparison. eventually O(n^2logn).
#Lets make it O(n) using suffix array and induced sort. (sa-is)
#sort()メソッドを使うとO(nlogn)で、文字列比較にO(n)なのでO(n^2logn)となってしまう。
#なのでこれからなるべくO(n)オーダーでsuffix array をソートする。(SA-IS法)
def max_l_n(l):
m = -INF
for i in l:
if i>m:
m = i
return m
def is_lms(t,i):
return i>0 and t[i-1] == "L" and t[i] == "S"
#induced_sort() will be used recursively.
#induced_sort部分は再起で使うので関数として定義しておく
def induced_sort(string, k, t, lms_index):
if string == str(string):
s_str = [None]*256
for s in string:
s_str[ord(s)] = s
s_str = [s for s in s_str if s != None]
else:
s_str = [None]*k
for s in string:
s_str[s] = s
s_str = [s for s in s_str if s != None]
index_dic = {st:s for s,st in enumerate(s_str) }
n = len(string)
sa = [None]*n
bins = [0]*k
for s in string:
bins[index_dic[s]] += 1
for i in range(k-1):
bins[i+1] += bins[i]
lms_index = [i for i in range(n) if is_lms(t,i)]
#step 1 # insert the LMS index into suffix array.
#LMSのインデックスをsa(suffix arrat)に詰めていく
count = [0]*k
for lms in lms_index:
ch = string[lms]
index = index_dic[ch]
sa[bins[index]-1-count[index]] = lms
count[index] += 1
#step2 #scan the sa in forward direction and insert the L type suffix
#saを正順に走査してL型のsuffixを埋めていく
count = [0]*k
for s in sa:
if s == None or s == 0 or t[s-1] == "S":
continue
ch = string[s-1]
index = index_dic[ch]
sa[bins[index-1]+count[index]] = s-1
count[index] += 1
#step3 #scan the sa in backward direction and insert the S type suffix
#saを逆順に走査してS型のsuffixを埋めていく
count = [0]*k
for s in reversed(sa):
if s == None or s == 0 or t[s-1] == "L":
continue
ch = string[s-1]
index = index_dic[ch]
sa[bins[index]-1-count[index]] = s-1
count[index] +=1
return sa
def sa_is(string,k):
n = len(string)
if string == str(string):
s_str = [None]*256
for s in string:
s_str[ord(s)] = s
s_str = [s for s in s_str if s != None]
else:
s_str = [None]*k
for s in string:
s_str[s] = s
s_str = [s for s in s_str if s != None]
index_dic = {st:s for s,st in enumerate(s_str) }
if (n<2):
return string
# L or S type, L:s[i..]>s[i+1..] (Larger), S:s[i..]<s[i+1..] (Smaller)
t = [None]*n
t[-1] = "S"
for i in range(n-2,-1,-1):
if string[i] < string[i+1]:
t[i] = "S"
elif string[i] > string[i+1]:
t[i] = "L"
else:
t[i] = t[i+1]
#LMS:Left-Most-S, is_lms(t,i) judges wether t[i] is lms or not
lms_index = [i for i in range(n) if is_lms(t,i)]
# use the initial, temporary seed on purpose
# 間違った種をわざと入れる
seed = lms_index[:]
sa = induced_sort(string, k, t, seed)
sa = [s for s in sa if is_lms(t,s)]
nums = [None]*n
nums[sa[0]] = 0
num = 0
for s in range(len(sa)-1):
i = sa[s]
j = sa[s+1]
dif = None
for d in range(n):
if i+d>=n or j+d>=n:
break
if string[i+d] != string[j+d] or is_lms(t,i+d) != is_lms(t,j+d):
dif = True
break
elif d>0 and (is_lms(t,i+d) or is_lms(t,j+d)):
break
if dif:
num+=1
nums[j] = num
nums = [s for s in nums if s != None]
# use the list "nums" as an arguement of recursive call of sa_is()
#ここで得られたnumsで再起を行う。
if num +1 < len(nums):
# the size of argument nums is less than half of the size of the original
# so it becomes linear search (n+n/2+n/4+...=O(n))
#ここで再帰をしても大きさが半分以下なので線形探索となる
sa = sa_is(nums, num+1)
else:
# if there is no overlap in nums sa is obtained easily
#nums内に重複がない場合saが求まる
for i,ch in enumerate(nums):
sa[index_dic[ch]] = i
#correct seed is obtained
#正しい種
seed = [lms_index[i] for i in sa]
k = len(s_str)
sa = induced_sort(string,k,t,seed)
return sa
def lcp(string, sa):
# remove the sentinel
# 番兵をなくす
del sa[0]
n = len(sa)
#Longest Common Prefix
r = [None]*n
for i in range(n):
r[sa[i]] = i
l = 0
LCP = [None]*n
string = string[:-1]
# this for loop contains important algorithm to maintain the order O(n)
# 下のfor文の内容に注意。O(n)を保つためのアルゴリズムである。
for i in range(n):
k = r[i]
j = sa[k-1]
#print(i,j)
while (l+j<n and l+i<n and string[j+l] == string[i+l]):
l+=1
LCP[k] = l
if l>0:
l-=1
return LCP
start = time.time()
sorted_str = sorted(list(set(string)))
k = len(sorted_str)
sa = sa_is(string,k)
LCP = lcp(string, sa)
LCP[0] = -1000
m = max_l_n(LCP)
ind = LCP.index(m)
a,b = sa[ind],sa[ind-1]
a,b = min(a,b)+1,max(a,b)+1
n = len(string)
print("time {}".format(time.time() - start))
print(a,b,m)
|
# ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
import pytest
from twitter.common.contextutil import temporary_file
from twitter.common.metrics import Label
from twitter.common.metrics.metrics import Metrics
from twitter.common.metrics.sampler import (
MetricSampler,
SamplerBase,
DiskMetricWriter,
DiskMetricReader)
from twitter.common.quantity import Amount, Time
from twitter.common.testing.clock import ThreadedClock
def test_sampler_base():
class TestSampler(SamplerBase):
def __init__(self, period, clock):
self.count = 0
SamplerBase.__init__(self, period, clock)
def iterate(self):
self.count += 1
test_clock = ThreadedClock()
sampler = TestSampler(Amount(1, Time.SECONDS), clock=test_clock)
sampler.start()
assert test_clock.converge(threads=[sampler])
test_clock.assert_waiting(sampler, 1)
test_clock.tick(0.5)
assert test_clock.converge(threads=[sampler])
assert sampler.count == 0
test_clock.tick(0.5)
assert test_clock.converge(threads=[sampler])
assert sampler.count == 1
test_clock.tick(5)
assert test_clock.converge(threads=[sampler])
assert sampler.count == 6
assert not sampler.is_stopped()
sampler.stop()
# make sure that stopping the sampler short circuits any sampling
test_clock.tick(5)
assert test_clock.converge(threads=[sampler])
assert sampler.count == 6
def test_metric_read_write():
metrics = Metrics()
with temporary_file() as fp:
os.unlink(fp.name)
writer = DiskMetricWriter(metrics, fp.name)
reader = DiskMetricReader(fp.name)
assert reader.sample() == {}
reader.iterate()
assert reader.sample() == {}
writer.iterate()
assert reader.sample() == {}
reader.iterate()
assert reader.sample() == {}
metrics.register(Label('herp', 'derp'))
writer.iterate()
assert reader.sample() == {}
reader.iterate()
assert reader.sample() == {'herp': 'derp'}
def test_metric_sample():
metrics = Metrics()
sampler = MetricSampler(metrics)
assert sampler.sample() == {}
sampler.iterate()
assert sampler.sample() == {}
metrics.register(Label('herp', 'derp'))
assert sampler.sample() == {}
sampler.iterate()
assert sampler.sample() == {'herp': 'derp'}
|
import SocketServer
import RPi.GPIO as GPIO
import numpy as np
import cv2
import os
import thread
import json
import pickle
class Servo():
pin = None
NEUTRAL = 7.5
ZERO = 2.5
FULL = 12.5
servo = None
def __init__(self, pin):
self.pin = pin
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.pin, GPIO.OUT)
self.servo = GPIO.PWM(self.pin, 50) # 50?
self.servo.start(self.NEUTRAL)
def toDutyCycle(self, deg):
return self.ZERO + 10.0 * deg / 180.0
def turnTo(self, deg):
self.servo.ChangeDutyCycle(self.toDutyCycle(deg))
def cleanup(self):
GPIO.cleanup()
class Camera():
camera_port = None
pic_width = None
pic_height = None
FOV = 75
camera = None
img_url = "./img.jpg"
img1_url = "./img1.jpg"
def take_picture(self):
retval, im = self.camera.read()
if im is None or im.shape is None:
return
cv2.imwrite(self.img1_url, im)
if os.path.isfile(self.img_url):
os.remove(self.img_url)
os.rename(self.img1_url, self.img_url)
def get_image(self):
return cv2.imread(self.img_url, 0)
def __init__(self, camera_port):
self.camera_port = camera_port
self.camera = cv2.VideoCapture(self.camera_port)
self.pic_width = self.camera.get(3)
self.pic_height = self.camera.get(4)
if not self.camera.isOpened():
print "Error starting camera"
def get_camera_params(self):
print "Camera params: " + str(self.pic_width) + " " + str(self.pic_height) + " " + str(self.FOV)
return [self.pic_width, self.pic_height, self.FOV]
def shoot(camera):
while True:
camera.take_picture()
camera = None
servo = None
server = None
class MyTCPHandler(SocketServer.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def send_data(self, data):
lengeth = len(data)
#print "sending: " + str(lengeth).ljust(32)
self.request.send(str(lengeth).ljust(32))
#print "sending: " + data
self.request.sendall(data)
def handle(self):
while True:
# self.request is the TCP socket connected to the client
self.data = self.request.recv(1024)
if not self.data or self.data == '':
break
self.data = self.data.strip()
print "{} wrote:".format(self.client_address[0])
print self.data
if self.data == "getImg" :
img = None
while img is None:
img = camera.get_image()
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90 ]
result, imgencode = cv2.imencode('.jpg', img, encode_param)
data = np.array(imgencode)
stringData = data.tostring()
self.send_data(stringData)
elif self.data == "getParms":
self.send_data(json.dumps(camera.get_camera_params()))
else:
try:
deg = float(self.data)
if deg < 0 or deg >= 180:
self.send_data("Error")
return
servo.turnTo(deg)
self.send_data("Success")
except:
self.send_data("Error")
def start_server(HOST, PORT):
# Create the server, binding to HOST:PORT
server = SocketServer.ThreadingTCPServer((HOST, PORT), MyTCPHandler)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
if __name__ == "__main__":
HOST, PORT = "localhost", 9999
#start camera on channel 1
camera = Camera(-1)
thread.start_new_thread(shoot, (camera, ))
#start servo on pin 12
servo = Servo(12)
start_server(HOST, PORT)
servo.cleanup()
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
from authentication.models import User
from .forms import EditUserForm
import json
def update_user(request, user_id):
user = get_object_or_404(User, pk=user_id)
context = {
'user': user
}
if (request.POST):
form = EditUserForm(request.POST, instance=user)
response_data = {}
if form.is_valid():
user = form.save()
response_data['result'] = 'SUCCESS'
response_data['email'] = user.email
else:
response_data['error_message'] = 'Cette addresse e-mail est déjà associée à un compte ou est invalide'
return HttpResponse(
json.dumps(response_data),
content_type="application/json"
)
else:
form = EditUserForm(instance=user)
context['form'] = form
return HttpResponse(render(request, 'user/index.html', context))
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class CellApphook(CMSApp):
app_name = 'cells'
name = _("Celulas")
def get_urls(self, page=None, language=None, **kwargs):
return ["cells.urls"]
apphook_pool.register(CellApphook)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.