text stringlengths 38 1.54M |
|---|
from .location import LocationsApi, LocationApi
from .plan import PlansApi, PlanApi
from .user import UsersApi, UserApi, UserPlansApi
from .signIn import SignInApi
from .signUp import SignUpApi
# API endpoints to access Database
def initialize_routes(api):
api.add_resource(LocationsApi, '/api/locations/')
api.add_resource(LocationApi, '/api/locations/<id>')
api.add_resource(PlansApi, '/api/plans/')
api.add_resource(PlanApi, '/api/plans/<id>')
api.add_resource(UsersApi, '/api/users/')
api.add_resource(UserApi, '/api/users/<id>')
api.add_resource(UserPlansApi, '/api/userplans/<id>')
api.add_resource(SignInApi, '/api/signIn')
api.add_resource(SignUpApi, '/api/signUp')
|
#!/usr/bin/python
import sys
file=sys.argv[1]
# & numberOfContigs &scaffolds & bases & meanSize & n50 & max & coverage & misassembledContigs & misassembledScaffolds & mismatches & indels
# 300-strept.sh.Ray & 86 & 68 & 1969888 & 22905 & 44534 & 194158 & 0.9627 & 1 & 1 & 1 & 0 \\
print "<table border=\"1\"><tr><th>Dataset</th><th>Number of contigs</th><th>Number of scaffolds</th><th>Number of bases</th><th>Average contig length</th><th>N50 contig length</th><th>Maximum contig length</th><th>Genome coverage breadth</th><th>Number of misassembled contigs</th><th>Number of incorrect scaffolds</th><th>Nucleotide mismatches</th><th>Nucleotide indels</th></tr>"
for line in open(file):
if line.find(".Ray")!=-1:
tokens=line.split("&")
print "<tr>"
for i in tokens:
print "<td>"
print i.replace('\\','')
print "</td>"
print "</tr>"
print "</table>"
|
import tkinter as tk
# noinspection PyUnusedLocal
class ToolTip(object):
"""
Show a tooltip
from https://stackoverflow.com/a/56749167/5539184
"""
def __init__(self, widget, text):
self.widget = widget
self.tip_window = None
self.id = None
self.x = self.y = 0
self.text = text
def show_tip(self, *args):
if self.tip_window is not None or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 57
y = y + cy + self.widget.winfo_rooty() + 27
self.tip_window = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(tw, text=self.text, wraplength=400, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
font=("tahoma", "10", "normal"))
label.pack(ipadx=1)
def hide_tip(self, *args):
if self.tip_window is not None:
self.tip_window.destroy()
self.tip_window = None
|
from kiwoom import *
import pickle
f = open("data.db", "rb") ##얘는 읽고 주문 넣으면 되니까 r : 리드모드로 가져와
codes = pickle.load(f) ## 아까 list로 저장했으니 pickle은 list로 읽어온다
f.close() ## open을 했으면 항상 close하는 습관
print(codes) |
#!/usr/bin/env python
import os
try:
currdir = os.getcwd()
dir = currdir + "/a2sv-master"
os.chdir(dir)
os.system("chmod 777 install.sh")
os.system("./install.sh")
os.system("pip install -r requirements.txt")
os.chdir(currdir)
dir = currdir + "/Sublist3r-master"
os.chdir(dir)
os.system("pip install -r requirements.txt")
os.system("apt-get install nmap")
os.system("apt-get install whois")
os.system("apt-get install host")
os.system("apt-get install traceroute")
os.system("apt-get install nslookup")
os.system("apt-get install theharvester")
os.system("apt-get install metagoofil")
os.system("apt-get install dnsrecon")
os.system("apt-get install whatweb")
os.system("apt-get install sslscan")
os.system("apt-get install sslyze")
os.system("apt-get install wafw00f")
print "Done!"
except:
pass
|
def fib(nums):
'''
:param nums: 一个整数,相当于数列的下标
:return: 返回该下标的值
'''
if nums == 0 or nums == 1:
return nums
else:
return fib(nums-2) + fib(nums-1)
def createFib(n):
'''
:param n: 需要展示前面n个数
:return: 返回一个列表,费波那契数列
'''
list1 = []
for i in range(n):
list1.append(fib(i))
print(list1)
#调用生成费波那契数列函数,指定展示的前面n个数
createFib(20)
|
import os
def rename_files(path):
files = os.listdir(path)
table = str.maketrans(dict.fromkeys('0123456789'))
saved_path = os.getcwd()
os.chdir(path)
for file in files:
new_file = file.translate(table)
print("Old name file: {}".format(file))
print("New name file: {}".format(new_file))
os.rename(file, new_file)
os.chdir(saved_path)
if __name__ == '__main__':
rename_files("helloworld")
|
"""
@brief PSF characterization from distribution of Gaussian fit
parameters to Fe55 data.
@author J. Chiang <jchiang@slac.stanford.edu>
"""
from __future__ import print_function
import os
from MaskedCCD import MaskedCCD
from pipeline.TaskParser import TaskParser
import pylab_plotter as plot
from fe55_psf import PsfGaussFit, afwMath
if __name__ == '__main__':
parser = TaskParser('PSF characterization')
parser.add_argument('-f', '--file_pattern', type=str,
help='file pattern for Fe55 input files')
parser.add_argument('-F', '--Fe55_file_list', type=str,
help='file name of list of Fe55 files')
args = parser.parse_args()
sensor_id = args.sensor_id
files = args.files(args.file_pattern, args.Fe55_file_list)
fitter = PsfGaussFit()
for infile in files:
print(os.path.basename(infile))
ccd = MaskedCCD(infile, mask_files=args.mask_files())
for amp in ccd:
print(" amp", amp)
fitter.process_image(ccd, amp)
outfile = os.path.join(args.output_dir, '%s_psf_params.fits' % sensor_id)
fitter.write_results(outfile=outfile)
sigma, dn, chiprob, amp = fitter.results()
flags = afwMath.MEDIAN | afwMath.STDEVCLIP
stats = afwMath.makeStatistics(sigma, flags)
median = stats.getValue(afwMath.MEDIAN)
stdev = stats.getValue(afwMath.STDEVCLIP)
plot.histogram(sigma, xname='Fitted sigma values',
xrange=(median-3*stdev, median+3*stdev))
plot.pylab.savefig(os.path.join(args.output_dir,
'%s_sigma_distribution.png' % sensor_id))
|
"""
Contains a mapping of integers to the chart type they represent.
"""
symbol_dict = {
1: "line",
2: "scatter",
4: "bar",
19: "geographic_map",
35: "graph",
14: "chord",
10: "bubble",
37: "parallel_coordinates",
13: "sankey",
9: "box",
16: "area",
31: "stream_graph",
7: "heat_map",
15: "radial",
33: "hexabin",
38: "sunburst",
22: "treemap",
40: "voronoi",
18: "donut",
39: "waffle",
41: "word_cloud",
29: "pie"
}
|
with open("input.txt","r+") as f:
case = int(f.readline())
w = open("output.txt","w")
for j in range(1,case+1):
m = int(f.readline())
i = 1
flag = [0]*10
fl = 0
while(i<500):
res = i*m
for r in range(0,10):
if str(r) in str(res) and flag[r] == 0:
#print "hello"
flag[r] = 1
if all(j == 1 for j in flag):
fl = 1
break
i+=1
if fl == 1:
s = "Case #%d: %d\n"%(j,res)
w.write(s)
else:
s = "Case #%d: "%(j)+"INSOMNIA\n"
w.write(s)
print m,"-----",res
f.close()
w.close()
|
"""
评分卡
"""
import numpy as np
import statsmodels.api as sm
#import re
import pandas as pd
def tool_group_rank(tmp_frame,group):
c,s = pd.qcut(tmp_frame.iloc[:,0].unique(),group,retbins =1)
def get_group_num(x):
for i in range(len(s-1)):
if x<=s[i+1]:
return i
tmp_frame['group_num'] = tmp_frame.iloc[:,0].apply(get_group_num)
def make_scorecard(formular,woe,basescore=600.0,base_odds=50.0/1.0,pdo=50.0):
# #step6 生成评分卡
# basescore = float(600)
# base_odds = 50.0/1.0
# pdo = float(50)
#计算所需要的参数
a = formular[formular[u"参数"] == "Intercept"].iloc[0,1]
formular = formular.iloc[1:,:]
n = float(len(formular))
factor = pdo/np.log(2)
offset = basescore - factor*np.log(base_odds)
#保留两位小数
#生成评分卡
scorecard = pd.DataFrame()
for i in formular["参数"]:
woe_frame = woe[woe['var_name'] == i][['var_name','min','max','woe',"rank","total_rate"]]
beta_i = formular[formular[u"参数"] == i][u"估计值"].iloc[0]
woe_frame['score'] = woe_frame['woe'].apply(lambda woe : round(offset/n - factor*(a/n+beta_i*woe)))
scorecard = pd.concat((scorecard,woe_frame),axis=0)
return scorecard
def score_ks(data,types=1,group=10,ycol=-1):
'''计算评分卡 KS'''
all_iv_detail = pd.DataFrame([])
if type(ycol) == int:
ycol = data.columns[ycol]
if type(group) == int:
column_names = data.columns[data.columns != ycol]
elif isinstance(group,pd.DataFrame):
column_names = group['var_name'].unique()
else:
print("argument 'group' type is wrong")
return 0,0
# flag_ = 0
for i in column_names: #默认y在最后一列
print(i)
tmp = pd.concat([pd.DataFrame(data[i]),data[[ycol]]],axis=1)#tmp是临时的iv计算数据框
tmp = tmp.astype('float')
tmp.sort_values(by=tmp.columns[0],inplace=True)
if type(types) == 1:
tool_sas_rank(tmp,group) #使用上面写的分组函数
else:
tool_group_rank(tmp,group)
grouped = tmp.groupby(tmp['group_num'])
cols = grouped[tmp.columns[0]].agg({'min':min,'max':max})
cols['group'] = range(len(cols))
def len_minus_sum(x):
''' 默认了 1 代表坏人'''
return len(x)-sum(x)
col2 = grouped[tmp.columns[1]].agg({'y1_num':sum,'y0_num':len_minus_sum,'N':'size'})
cols = pd.concat([cols,col2],axis=1)
bad_totl_num = float(tmp[tmp.columns[1]].sum())
good_totl_num = float(len(tmp) - bad_totl_num)
cols['bad_cum'] = cols['y1_num'].cumsum()/bad_totl_num
cols['good_cum'] = cols['y0_num'].cumsum()/good_totl_num
cols['y1_percent'] = cols['y1_num'] / bad_totl_num
cols['y0_percent'] = cols['y0_num'] /good_totl_num
cols["y0/y1"] =cols["y0_percent"]/cols["y1_percent"]
cols["od"]=cols["y1_num"]/cols["N"]
cols['total_percent'] = cols['N'] / (bad_totl_num+ good_totl_num)
cols['woe'] = np.log(cols['y0_percent']/cols['y1_percent'])
cols.ix[cols['woe'] == np.inf,'woe'] = 0 # 分母为0的先设置为0吧
cols.ix[cols['woe'] == -np.inf,'woe'] = 0 # 分母为0的先设置为0吧
cols['MIV'] = (cols['y0_percent']-cols['y1_percent'])*cols['woe']
cols['ori_IV'] = cols['MIV'].sum()
cols['KS'] = cols['bad_cum']-cols['good_cum']
cols['score_KS'] = cols['KS'].max()
cols['var_name'] = i
all_iv_detail = pd.concat([all_iv_detail,cols],axis=0)
# flag_ = flag_+1
# if flag_>3:
# break
all_iv_detail = all_iv_detail.sort_values(by=['ori_IV','var_name','max'],ascending=[False,True,True])
return all_iv_detail
|
t=int(input())
ans=[[0,0,0]]*t
for i in range(t):
n=int(input())
n=n-1
bits=0
rem=n/26
bits=int(2**rem)
if(n%26<2):
ans[i][0]=bits
else:
ans[i][0]=0
if(n%26>=2)and(n%26<10):
ans[i][1]=bits
else:
ans[i][1]=0
if(n%26>=10)and(n%26<26):
ans[i][2]=bits
else:
ans[i][2]=0
print(ans[i][0],ans[i][1],ans[i][2])
|
# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Helpers to calculate channel capacities. These are based on following publications:
* Davide B. Bartolini, Philipp Miedl, and Lothar Thiele. 2016. On the capacity of thermal covert channels in multicores. In Proceedings of the Eleventh European Conference on Computer Systems (EuroSys ’16). Association for Computing Machinery, New York, NY, USA, Article 24, 1–16. DOI:https://doi.org/10.1145/2901318.2901322
* Philipp Miedl and Lothar Thiele. 2018. The security risks of power measurements in multicores. In Proceedings of the 33rd Annual ACM Symposium on Applied Computing (SAC ’18). Association for Computing Machinery, New York, NY, USA, 1585–1592. DOI:https://doi.org/10.1145/3167132.3167301
* Miedl, Philipp, Bruno Klopott, and Lothar Thiele. "Increased reproducibility and comparability of data leak evaluations using ExOT." Proceedings of the 2020 Design, Automation & Test in Europe Conference & Exhibition (DATE). IEEE, 2020.
"""
import numpy as np
__all__ = ("get_fspectrum", "waterfilling", "capacity_from_connection_matrix")
"""
Signatures
----------
classic_waterfilling :: (p0, Sqq, Shh) -> Capacity float
constrained_waterfilling :: (p0, Sqq, Shh) -> Capacity float
capacity_from_connection_matrix :: (A, T_min) -> Capacity float
"""
def classic_waterfilling(p0, Sqq, Shh):
"""Returns the capacity bound of a given channel determined using classic waterfiling
Args:
p0: Power cap for the waterfilling algorithm as float
Sqq: Noise power spectrum as np.darray shape(N,2) where N is the number of frequency
bins, column 0 holds the frequencies and column 1 the power spectral density.
Shh: Channel power spectrum as np.darray shape(N,2) where N is the number of frequency
bins, column 0 holds the frequencies and column 1 the power spectral density.
Returns:
Channel Capaicty: in bits per seconds
"""
_lambda = 1 # Waterfilling parameter
_alpha = 1 # Lagrangian parameter
error = np.inf # Error for input power allocation
Sxx = np.full(Sqq[:, 0].shape, np.nan) # Ideal input power allocation
f_diff = np.concatenate([np.diff(Shh[:, 0]).reshape((-1,)), np.zeros((1,))])
# Calculate the waterfilling parameter _lambda and consequently the ideal input power allocation Sxx
while (abs(error) > 10 ** -6) and (_alpha < 10 ** 3):
p = (1 / _lambda) - (Sqq[:, 1] / Shh[:, 1])
Sxx[p > 0] = p[p > 0]
Sxx[p < 0] = 0
error = (f_diff * Sxx).sum() - p0
if error > 0:
_lambda = _lambda * (1 + abs(error) / _alpha)
else:
_lambda = _lambda / (1 + abs(error) / _alpha)
_alpha += 0.01
return np.log2(1 + ((Sxx * Shh[:, 1]) / Sqq[:, 1])).sum() * np.diff(Sqq[:, 0]).mean()
def constrained_waterfilling(p0, Sqq, Shh):
"""Returns the capacity bound of a given channel determined using constrained waterfiling
Parameters:
p0: Power cap for the waterfilling algorithm as float
Sqq: Noise power spectrum as np.darray shape(N,2) where N is the number of frequency
bins, column 0 holds the frequencies and column 1 the power spectral density.
Shh: Channel power spectrum as np.darray shape(N,2) where N is the number of frequency
bins, column 0 holds the frequencies and column 1 the power spectral density.
Returns:
Channel Capacity: in bits per seconds
"""
max_error = 10 ** (-4) # 9)
max_alpha = 10 ** (10)
# Apply withening filter
N0 = Sqq[:, 1].mean()
whitening_filter = Sqq[:, 1] / N0
Sqq_white = Sqq[:, 1] / whitening_filter
Shh_white = Shh[:, 1] / whitening_filter
N0_white = Sqq_white.mean()
# Calculate the capacity C per subband
C = np.full(N0.shape, np.nan)
error = np.inf
df = np.diff(np.concatenate([Shh[:, 0].reshape((-1,)), (Shh[-1, 0]).reshape((-1,))]))
_lambda = 1
_alpha = 1
A_lambda = np.full(Shh_white.shape, True, dtype=bool)
# Calculate the waterfilling parameter _lambda and consequently the ideal input power allocation Sxx for a subband
while (abs(error) > max_error) and (_alpha < max_alpha):
_roh = (1 / 2) * (df[A_lambda] * (_lambda - (1 / Shh_white[A_lambda]))).sum()
error = _roh - (p0 / N0_white)
if error > 0:
_lambda = _lambda / (1 + abs(error) / _alpha)
else:
_lambda = _lambda * (1 + abs(error) / _alpha)
_alpha = _alpha + 0.01
A_lambda = (Shh_white * _lambda) >= 1
if abs(error) > max_error:
print(
"WARNING: The capacity could only be calculated with an error of " + str(abs(error))
)
return (1 / 2) * (df[A_lambda] * np.log2(_lambda * Shh_white[A_lambda])).sum()
def capacity_from_connection_matrix(A, T_min):
"""Returns the capacity bound of a given channel determined for noise free channel based on
the connection matrix.
Parameters:
A: Transition matrix of the channel model
T_min: Minimal channel access time in seconds
Returns:
Channel Capacity in bits per seconds
Channel Capacity in bits per channel use
"""
w = np.linalg.eigvals(A)
_lambda = w.max()
return np.log2(_lambda) / T_min, np.log2(_lambda)
|
# Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, double, integer
from .validators.appsync import resolver_kind_validator
class ApiCache(AWSObject):
"""
`ApiCache <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-apicache.html>`__
"""
resource_type = "AWS::AppSync::ApiCache"
props: PropsDictType = {
"ApiCachingBehavior": (str, True),
"ApiId": (str, True),
"AtRestEncryptionEnabled": (boolean, False),
"TransitEncryptionEnabled": (boolean, False),
"Ttl": (double, True),
"Type": (str, True),
}
class ApiKey(AWSObject):
"""
`ApiKey <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-apikey.html>`__
"""
resource_type = "AWS::AppSync::ApiKey"
props: PropsDictType = {
"ApiId": (str, True),
"ApiKeyId": (str, False),
"Description": (str, False),
"Expires": (double, False),
}
class DeltaSyncConfig(AWSProperty):
"""
`DeltaSyncConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-deltasyncconfig.html>`__
"""
props: PropsDictType = {
"BaseTableTTL": (str, True),
"DeltaSyncTableName": (str, True),
"DeltaSyncTableTTL": (str, True),
}
class DynamoDBConfig(AWSProperty):
"""
`DynamoDBConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-dynamodbconfig.html>`__
"""
props: PropsDictType = {
"AwsRegion": (str, True),
"DeltaSyncConfig": (DeltaSyncConfig, False),
"TableName": (str, True),
"UseCallerCredentials": (boolean, False),
"Versioned": (boolean, False),
}
class ElasticsearchConfig(AWSProperty):
"""
`ElasticsearchConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-elasticsearchconfig.html>`__
"""
props: PropsDictType = {
"AwsRegion": (str, True),
"Endpoint": (str, True),
}
class EventBridgeConfig(AWSProperty):
"""
`EventBridgeConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-eventbridgeconfig.html>`__
"""
props: PropsDictType = {
"EventBusArn": (str, True),
}
class AwsIamConfig(AWSProperty):
"""
`AwsIamConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-awsiamconfig.html>`__
"""
props: PropsDictType = {
"SigningRegion": (str, False),
"SigningServiceName": (str, False),
}
class AuthorizationConfig(AWSProperty):
"""
`AuthorizationConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-authorizationconfig.html>`__
"""
props: PropsDictType = {
"AuthorizationType": (str, True),
"AwsIamConfig": (AwsIamConfig, False),
}
class HttpConfig(AWSProperty):
"""
`HttpConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-httpconfig.html>`__
"""
props: PropsDictType = {
"AuthorizationConfig": (AuthorizationConfig, False),
"Endpoint": (str, True),
}
class LambdaConfig(AWSProperty):
"""
`LambdaConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-lambdaconfig.html>`__
"""
props: PropsDictType = {
"LambdaFunctionArn": (str, True),
}
class OpenSearchServiceConfig(AWSProperty):
"""
`OpenSearchServiceConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-opensearchserviceconfig.html>`__
"""
props: PropsDictType = {
"AwsRegion": (str, True),
"Endpoint": (str, True),
}
class RdsHttpEndpointConfig(AWSProperty):
"""
`RdsHttpEndpointConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-rdshttpendpointconfig.html>`__
"""
props: PropsDictType = {
"AwsRegion": (str, True),
"AwsSecretStoreArn": (str, True),
"DatabaseName": (str, False),
"DbClusterIdentifier": (str, True),
"Schema": (str, False),
}
class RelationalDatabaseConfig(AWSProperty):
"""
`RelationalDatabaseConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-datasource-relationaldatabaseconfig.html>`__
"""
props: PropsDictType = {
"RdsHttpEndpointConfig": (RdsHttpEndpointConfig, False),
"RelationalDatabaseSourceType": (str, True),
}
class DataSource(AWSObject):
"""
`DataSource <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-datasource.html>`__
"""
resource_type = "AWS::AppSync::DataSource"
props: PropsDictType = {
"ApiId": (str, True),
"Description": (str, False),
"DynamoDBConfig": (DynamoDBConfig, False),
"ElasticsearchConfig": (ElasticsearchConfig, False),
"EventBridgeConfig": (EventBridgeConfig, False),
"HttpConfig": (HttpConfig, False),
"LambdaConfig": (LambdaConfig, False),
"Name": (str, True),
"OpenSearchServiceConfig": (OpenSearchServiceConfig, False),
"RelationalDatabaseConfig": (RelationalDatabaseConfig, False),
"ServiceRoleArn": (str, False),
"Type": (str, True),
}
class DomainName(AWSObject):
"""
`DomainName <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-domainname.html>`__
"""
resource_type = "AWS::AppSync::DomainName"
props: PropsDictType = {
"CertificateArn": (str, True),
"Description": (str, False),
"DomainName": (str, True),
}
class DomainNameApiAssociation(AWSObject):
"""
`DomainNameApiAssociation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-domainnameapiassociation.html>`__
"""
resource_type = "AWS::AppSync::DomainNameApiAssociation"
props: PropsDictType = {
"ApiId": (str, True),
"DomainName": (str, True),
}
class AppSyncRuntime(AWSProperty):
"""
`AppSyncRuntime <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-resolver-appsyncruntime.html>`__
"""
props: PropsDictType = {
"Name": (str, True),
"RuntimeVersion": (str, True),
}
class LambdaConflictHandlerConfig(AWSProperty):
"""
`LambdaConflictHandlerConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-resolver-lambdaconflicthandlerconfig.html>`__
"""
props: PropsDictType = {
"LambdaConflictHandlerArn": (str, False),
}
class SyncConfig(AWSProperty):
"""
`SyncConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-resolver-syncconfig.html>`__
"""
props: PropsDictType = {
"ConflictDetection": (str, True),
"ConflictHandler": (str, False),
"LambdaConflictHandlerConfig": (LambdaConflictHandlerConfig, False),
}
class FunctionConfiguration(AWSObject):
"""
`FunctionConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-functionconfiguration.html>`__
"""
resource_type = "AWS::AppSync::FunctionConfiguration"
props: PropsDictType = {
"ApiId": (str, True),
"Code": (str, False),
"CodeS3Location": (str, False),
"DataSourceName": (str, True),
"Description": (str, False),
"FunctionVersion": (str, False),
"MaxBatchSize": (integer, False),
"Name": (str, True),
"RequestMappingTemplate": (str, False),
"RequestMappingTemplateS3Location": (str, False),
"ResponseMappingTemplate": (str, False),
"ResponseMappingTemplateS3Location": (str, False),
"Runtime": (AppSyncRuntime, False),
"SyncConfig": (SyncConfig, False),
}
class CognitoUserPoolConfig(AWSProperty):
"""
`CognitoUserPoolConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-graphqlapi-cognitouserpoolconfig.html>`__
"""
props: PropsDictType = {
"AppIdClientRegex": (str, False),
"AwsRegion": (str, False),
"UserPoolId": (str, False),
}
class LambdaAuthorizerConfig(AWSProperty):
"""
`LambdaAuthorizerConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-graphqlapi-lambdaauthorizerconfig.html>`__
"""
props: PropsDictType = {
"AuthorizerResultTtlInSeconds": (double, False),
"AuthorizerUri": (str, False),
"IdentityValidationExpression": (str, False),
}
class OpenIDConnectConfig(AWSProperty):
"""
`OpenIDConnectConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-graphqlapi-openidconnectconfig.html>`__
"""
props: PropsDictType = {
"AuthTTL": (double, False),
"ClientId": (str, False),
"IatTTL": (double, False),
"Issuer": (str, False),
}
class AdditionalAuthenticationProvider(AWSProperty):
"""
`AdditionalAuthenticationProvider <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-graphqlapi-additionalauthenticationprovider.html>`__
"""
props: PropsDictType = {
"AuthenticationType": (str, True),
"LambdaAuthorizerConfig": (LambdaAuthorizerConfig, False),
"OpenIDConnectConfig": (OpenIDConnectConfig, False),
"UserPoolConfig": (CognitoUserPoolConfig, False),
}
class LogConfig(AWSProperty):
"""
`LogConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-graphqlapi-logconfig.html>`__
"""
props: PropsDictType = {
"CloudWatchLogsRoleArn": (str, False),
"ExcludeVerboseContent": (boolean, False),
"FieldLogLevel": (str, False),
}
class UserPoolConfig(AWSProperty):
"""
`UserPoolConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-graphqlapi-userpoolconfig.html>`__
"""
props: PropsDictType = {
"AppIdClientRegex": (str, False),
"AwsRegion": (str, False),
"DefaultAction": (str, False),
"UserPoolId": (str, False),
}
class GraphQLApi(AWSObject):
"""
`GraphQLApi <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-graphqlapi.html>`__
"""
resource_type = "AWS::AppSync::GraphQLApi"
props: PropsDictType = {
"AdditionalAuthenticationProviders": (
[AdditionalAuthenticationProvider],
False,
),
"ApiType": (str, False),
"AuthenticationType": (str, True),
"LambdaAuthorizerConfig": (LambdaAuthorizerConfig, False),
"LogConfig": (LogConfig, False),
"MergedApiExecutionRoleArn": (str, False),
"Name": (str, True),
"OpenIDConnectConfig": (OpenIDConnectConfig, False),
"OwnerContact": (str, False),
"Tags": (Tags, False),
"UserPoolConfig": (UserPoolConfig, False),
"Visibility": (str, False),
"XrayEnabled": (boolean, False),
}
class GraphQLSchema(AWSObject):
"""
`GraphQLSchema <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-graphqlschema.html>`__
"""
resource_type = "AWS::AppSync::GraphQLSchema"
props: PropsDictType = {
"ApiId": (str, True),
"Definition": (str, False),
"DefinitionS3Location": (str, False),
}
class CachingConfig(AWSProperty):
"""
`CachingConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-resolver-cachingconfig.html>`__
"""
props: PropsDictType = {
"CachingKeys": ([str], False),
"Ttl": (double, True),
}
class PipelineConfig(AWSProperty):
"""
`PipelineConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-resolver-pipelineconfig.html>`__
"""
props: PropsDictType = {
"Functions": ([str], False),
}
class Resolver(AWSObject):
"""
`Resolver <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-resolver.html>`__
"""
resource_type = "AWS::AppSync::Resolver"
props: PropsDictType = {
"ApiId": (str, True),
"CachingConfig": (CachingConfig, False),
"Code": (str, False),
"CodeS3Location": (str, False),
"DataSourceName": (str, False),
"FieldName": (str, True),
"Kind": (resolver_kind_validator, False),
"MaxBatchSize": (integer, False),
"PipelineConfig": (PipelineConfig, False),
"RequestMappingTemplate": (str, False),
"RequestMappingTemplateS3Location": (str, False),
"ResponseMappingTemplate": (str, False),
"ResponseMappingTemplateS3Location": (str, False),
"Runtime": (AppSyncRuntime, False),
"SyncConfig": (SyncConfig, False),
"TypeName": (str, True),
}
class SourceApiAssociationConfig(AWSProperty):
"""
`SourceApiAssociationConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-appsync-sourceapiassociation-sourceapiassociationconfig.html>`__
"""
props: PropsDictType = {
"MergeType": (str, False),
}
class SourceApiAssociation(AWSObject):
"""
`SourceApiAssociation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-appsync-sourceapiassociation.html>`__
"""
resource_type = "AWS::AppSync::SourceApiAssociation"
props: PropsDictType = {
"Description": (str, False),
"MergedApiIdentifier": (str, False),
"SourceApiAssociationConfig": (SourceApiAssociationConfig, False),
"SourceApiIdentifier": (str, False),
}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 22:38:03 2020
@author: harsh
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
# Cleaning the texts
# Stemming Process
# Here all the words are converted into root word
# eg:- { loved - past tense}
# { love - root word}
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, 1000):
review = re.sub('[^a-zA-Z]', ' ', df['Review'][i]) # Removing everything except the alphabets
review = review.lower() # Converting into lowercase
review = review.split() # Splitting the sentence by space and forms a list
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('English'))] # set is used because set algorithm is faster than the list algorithm
review = ' '.join(review) # Joining the words using space
corpus.append(review)
# Creating the Bag of Words Model
# Take all the diff words of 1000 reviews here
# without taking duplicates or triplicates which is unique and will create column of each unique word
# Put all these columns in a table where the rows correspond to reviews
# This will create a sparse matrix(means matrix contains 0 value a lot)
# Bag of Words model simplify all the reviews and try to minimize the number of words and also creating sparse matrix through tokenization
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
X = cv.fit_transform(corpus).toarray()
Y = df.iloc[:, 1].values
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(X_train, Y_train)
y_pred = nb.predict(X_test)
print(nb.score(X_test,Y_test))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_test, y_pred)
print(cm)
|
from fastapi import FastAPI
from utils.io_utils import load_config
from utils.model_utils import load_model, load_bow, get_text_sentiment
import uvicorn
config = load_config()
model = load_model(config["paths"]["model"])
bow = load_bow(config["paths"]["matrix"])
app = FastAPI()
@app.get("/")
def read_root():
return "Up running"
@app.get("/predict/")
def predict(text):
sentimet = get_text_sentiment(text)
return {"sentimet": sentimet}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000) |
import cv2
import os
from ..modelo.Imagen import Imagen
from .Configuracion import Configuracion
class DaoDBMuestral:
def __init__(self):
'''
Constructor
'''
None
def leer_carpetas(self):
"""
leer_carpetas
@details lee las nombres de las carpetas dentro del directorio seleccionado
@rtype: []
@return: nombres_carpetas listado con los nombres de las carpetas
"""
nombres_carpetas = []
for filename in os.listdir(Configuracion.RUTA):
nombres_carpetas.append(filename)
return nombres_carpetas
def leer_imagenes(self,sujeto):
"""
leer_imagenes
@details lee todas las imagenes del directorio senalado
@type 1: String
@param 1: sujeto
@rtype: []
@return: lista de imagenes
"""
lista_img = []
nombres_archivos = os.listdir(Configuracion.RUTA+"/"+sujeto)
for nombre_archivo in nombres_archivos:
img = cv2.imread(Configuracion.RUTA+"/"+sujeto+"/"+nombre_archivo)
i = Imagen(img)
i.vectorizar()
lista_img.append(i)
return lista_img
|
import numpy as np
import streamlit as st
import math
import csv
from PIL import Image
import pandas as pd
def app():
st.title("Meters to kilometers")
a = st.number_input("Enter length in meters:")
b = ( a / 1000)
st.text("Length in kilometer is:")
st.write(b)
c = st.number_input("Enter length in kilometers:")
d = ( c * 1000)
st.text("Length in meters is:")
st.write(d)
|
import pygame
from character import character
class player(character.character):
facingDir = [0, 0]
def __init__(self, levelRect, startPos, playerSize, scale, physEnabled, inGame, floorGroup):
super().__init__(levelRect, startPos, playerSize, scale, physEnabled, inGame, floorGroup, 3)
self.actionBuffer = []
def actionMove(self, direction, uvect):
super(player, self).actionMove(direction, uvect)
def update(self, frameTime, colliders):
super().update(frameTime, colliders)
viewPortPositionX = self.rect.center[0] - self.levelRect.center[0]
viewPortPositionY = self.rect.center[1] - self.levelRect.center[1]
#if viewPortPositionY > maxY:
# pass
#elif viewPortPositionY < minY:
# pass
#
#if viewPortPositionX > maxX:
# pass
#elif viewPortPosition < minX:
# pass
def moveMap(self):
pass
|
#!/usr/bin/env python
from std_msgs.msg import Int16
from std_msgs.msg import Int32
from std_msgs.msg import String
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
import rospy
import tellopy
import time
import datetime
import os
import csvio
import getpass
import nofly
class pilot():
def create_pose_array(self):
self.posepos.position = Point(*self.coordinatespub)
self.msgpub.poses=[]
self.msgpub.poses.append(self.posepos)
'''
Function Name: __init__
Input: None
Output: Initiates all the variables in the class ImgProc and creates subscribers
Logic: initializes the value of the variables to predefined values
Example Call: It is called automatically when an object is created
'''
def __init__(self):
self.homelocation=1
rospy.init_node('drone_pilot')
self.msgpub = PoseArray()
self.posepos = Pose()
self.wppub = rospy.Publisher('/wp_cords', PoseArray, queue_size=60)
self.gui_status = rospy.Publisher('status_msg', String, queue_size=1, latch=True)
self.takeoff = rospy.Publisher('activation', Int32, queue_size=1, latch=True)
self.progress = rospy.Publisher('/progbar', Int16, queue_size=1)
rospy.Subscriber('whycon/poses', PoseArray, self.get_pose)
rospy.Subscriber('/qr', String, self.setqr)
rospy.Subscriber('drone_init', Int32, self.set_guicommand)
self.cruize = 15.0
self.delivery_z = 18.0
self.counter = 0
self.takeoffland = -100
self.drone_x = 0.0
self.drone_y = 0.0
self.drone_z = 24.0
self.home_x = 0.0
self.home_y = 0.0
self.startend = 2
#self.authenticationflag = 0 # Do not remove to be uncommented when feature of invalid QR is to be used
self.coordinatespub=[0.0,0.0,30.0]
self.create_pose_array()
self.coordinates=csvio.csvread('/home/'+getpass.getuser()+'/catkin_ws/src/shravas/src/coordinates.csv')
self.coordinates1=nofly.main(self.coordinates)
print(self.coordinates)
print(self.coordinates1)
for index in range(len(self.coordinates)):
self.coordinates[index]['x'] = float(self.coordinates[index]['x'])
self.coordinates[index]['y'] = float(self.coordinates[index]['y'])
self.coordinates[index]['z'] = float(self.coordinates[index]['z'])
self.coordinates[index]['delivery'] = int(self.coordinates[index]['delivery'])
self.qr_pub="no code"
'''
Function Name: check_delta
Input: Error allowed
Output: none
Logic: checks if drone is at specified position
Example Call: check_delta(0.2,0.5)
'''
def check_delta(self,wp_x,wp_y,wp_z,err_xy,err_z):
self.counter=0
while(self.counter < 100):
if((self.drone_x<(wp_x+err_xy)) & (self.drone_x>(wp_x-err_xy)) & (self.drone_y<(wp_y+err_xy)) & (self.drone_y>(wp_y-err_xy)) & (self.drone_z>(wp_z-err_z)) & (self.drone_z<(wp_z+err_z))):
self.counter+=1
else:
self.counter=0
if(self.startend!=1):
self.counter=200
self.progress.publish(0)
'''
Function Name: gotoloc
Input: travelling coordinates(x,y,z) and delta value to check at destination coordinates
Output: None
Logic: Assign the values to coordinates and publish them
Example Call: gotoloc(0.0,0.0,18.0,0.5,0.5)
'''
def gotoloc(self,wp_x,wp_y,wp_z,deltaxy,deltaz):
self.gui_status.publish("Travelling to new location")
self.coordinatespub=[wp_x,wp_y,wp_z]
self.create_pose_array()
self.gui_status.publish(str(wp_x)+","+str(wp_y)+","+str(wp_z))
self.check_delta(wp_x,wp_y,wp_z,deltaxy,deltaz)
'''
Function name : check_qr
Logic : To check and match the qr code shown by customer
'''
def check_qr(self,index):
moveahead=0
start_time = time.time()
while(moveahead!=1):
end_time = time.time()
if((end_time - start_time) < 12):
if(self.qr_pub == self.coordinates[index]['qr']):
#self.authenticationflag = 1 # Do not remove to be uncommented when feature of invalid QR is to be used
moveahead=1
self.gui_status.publish("Customer Authenticated")
#else: # Do not remove to be uncommented when feature of invalid QR is to be used
#self.authenticationflag = 0
else:
moveahead=1
self.gui_status.publish("No Customer found, taking package back to home")
if(self.startend!=1):
moveahead=1
'''
Function Name: land
Input: None
Output: None
Logic: Check for delta at cruize height over home location and land when delta satisfied
Example call: land(0,index)
'''
def land(self,endrun,index):
if(endrun == 1):
self.takeoffland = -1
else:
self.takeoffland = 0
self.coordinatespub.pop()
self.coordinatespub.append(self.delivery_z)
self.create_pose_array()
self.check_delta(self.coordinatespub[0],self.coordinatespub[1],self.coordinatespub[2],0.5,1.5)
self.gui_status.publish("Waiting for authentication")
self.check_qr(index)
rospy.sleep(3)
self.takeoffland=1
self.gui_status.publish("Taking off for next destination ")
'''
Function Name: fly
Input: Nil
Output: Next Delivery location
Logic: For delivery=1 Go for delivery and set the coordinates and for delivery=0 travel for no fly zone avoidance
Example call: fly()
'''
def fly(self):
while(self.startend != 1):
rospy.sleep(0.0001)
self.home_x = self.drone_x
self.home_y = self.drone_y
for index in range(len(self.coordinates)):
if(self.startend!=1):
break
elif(self.coordinates[index]['delivery'] == 0):
self.gui_status.publish("Takeoff")
self.takeoffland=1
rospy.sleep(3)
self.gotoloc(self.home_x,self.home_y,self.cruize,1.5,3.0)
elif(self.coordinates[index]['delivery'] > 0):
self.gotoloc(self.coordinates[index]['x'],self.coordinates[index]['y'],self.cruize,0.3,3.0)
self.land(0,index)
self.gotoloc(self.coordinates[index]['x'],self.coordinates[index]['y'],self.cruize,0.3,3.0)
elif(self.coordinates[index]['delivery']== -2):
self.gotoloc(self.coordinates[index]['x'],self.coordinates[index]['y'],self.cruize,0.3,3.0)
elif(self.coordinates[index]['delivery'] == -1):
self.gui_status.publish("ALL DELIVERIES COMPLETED GOING BACK HOME")
self.gotoloc(self.home_x,self.home_y,self.cruize,0.3,3.0)
self.land(1,index)
else:
self.gui_status.publish("BAD COORDINATES GOING BACK HOME")
self.gotoloc(self.home_x,self.home_y,self.cruize,0.3,3.0)
self.land(1,index)
######################## SUBSCRIBER FUNCTIONS ########################
'''
Function Name: get_pose
Input: takes poseArray message from whycon/poses
Output: sets the value of drone_x, drone_y, drone_z
Logic: subscribes to whycon/poses to get the coordinates of the whycon marker placed on the drone
Example Call: get_pose(data)
'''
def get_pose(self, pose):
self.drone_x = pose.poses[0].position.x
self.drone_y = pose.poses[0].position.y
self.drone_z = pose.poses[0].position.z
self.takeoff.publish(self.takeoffland)
self.wppub.publish(self.msgpub)
'''
Function Name: setqr
Input: takes qr data for authentication
Output: sets the value of qr matcher variable
Logic: subscribes to qr to get the qr code date of the user
Example Call: setqr(msg)
'''
def setqr(self, msg):
self.qr_pub=msg.data
'''
Function Name: set_guicommand
Input: takes status flag from GUI for start and emergency land
Output: make publisher ready to send command to drone
Logic: sets the flag value
Example Call: set_guicommand(msg)
'''
def set_guicommand(self,msg):
self.startend=msg.data # 1 for start , 0 for land ,-1 for call back
if(self.startend == 0):
rospy.sleep(1)
self.takeoffland=-1
self.takeoff.publish(self.takeoffland)
elif(self.startend == -1):
rospy.sleep(1)
self.takeoffland=1
self.coordinatespub = [self.home_x,self.home_y,self.cruize]
self.create_pose_array()
self.check_delta(self.home_x,self.home_y,self.cruize,0.5,1.5)
self.takeoffland=-1
######################## MAIN #########################
'''
Function Name: main
Input: none
Output: none
Logic: initializes send_data and calls fly
Example Call: called automatically
'''
if __name__ == '__main__':
test = pilot()
test.fly()
time.sleep(20) |
# flake8: noqa
from tune_notebook.monitors import (
NotebookSimpleChart,
NotebookSimpleHist,
NotebookSimpleRungs,
NotebookSimpleTimeSeries,
PrintBest,
)
|
from constants import Constants
from pre_processor import Pre_processor
from database import Database
from hpelm import ELM
# CONTAINS ALL STATIC MEMBERS
class Elm:
@staticmethod
def epoch(train_x, train_y, test_x, test_x_raw, filename):
features = train_x.shape[1]
train_y = Pre_processor.one_hot_encoding(train_y)
clf = ELM(features, Constants.tot_labels)
clf.add_neurons(550, "sigm")
clf.train(train_x, train_y, 'CV', 'OP', 'c', k=10)
pred_y = clf.predict(test_x)
pred_y = Pre_processor.one_hot_decoding_full(pred_y)
Database.save_results(test_x_raw, pred_y, filename)
@staticmethod
def feature_engineering_pca(train_x, train_y, test_x, test_x_raw):
print("ELM Feature Engineering with PCA...")
count = 1
while(count < Constants.tot_features):
print("Top " + str(count) + " features...")
train_x_mod = Pre_processor.get_top_k_features(train_x, count)
test_x_mod = Pre_processor.get_top_k_features(test_x, count)
filename = "elm_top_" + str(count) + "_features.csv"
Elm.epoch(train_x_mod, train_y, test_x_mod, test_x_raw, filename)
count = count+1
@staticmethod
def tune_elm(train_x, train_y, test_x_raw, test_x, act_funcs, neuron_counts):
'''
Assumptions:
1. NN has only 1 hidden layer
2. act_funcs: list of distinct activation functions
3. neuron_counts: list of distinct '# of neurons in the hidden layer'
'''
print("Tuning ELM...")
features = train_x.shape[1]
train_y = Pre_processor.one_hot_encoding(train_y)
ind_func = 0
while(ind_func < len(act_funcs)):
ind_neuron = 0
cur_act_func = act_funcs[ind_func]
while(ind_neuron < len(neuron_counts)):
cur_neuron_count = neuron_counts[ind_neuron]
print(cur_act_func + " | " + str(cur_neuron_count) + "...")
clf = ELM(features, Constants.tot_labels)
clf.add_neurons(cur_neuron_count, cur_act_func)
clf.train(train_x, train_y, 'CV', 'OP', 'c', k=10)
pred_y = clf.predict(test_x)
pred_y = Pre_processor.one_hot_decoding_full(pred_y)
file_name = "submission_" + str(cur_neuron_count) + "_" + cur_act_func + ".csv"
Database.save_results(test_x_raw, pred_y, file_name)
ind_neuron = ind_neuron+1
ind_func = ind_func+1
|
class Solution:
def exist(self, board: [[str]], word: str) -> bool:
if not board:
return False
maxH = len(board)
maxW = len(board[0])
def checkMove(x, y):
if x < 0 or y < 0 or x >= maxW or y >= maxH:
return False
return True
def existH(board, word, wordInd, x, y, soFar):
if wordInd == len(word):
return True
if not checkMove(x, y):
return False
let = board[y][x]
if let != word[wordInd]:
return False
soFar += let
board[y][x] = ''
for i in range(-1, 2, 2):
if existH(board, word, wordInd+1, x+i, y, soFar):
return True
elif existH(board, word, wordInd+1, x, y+i, soFar):
return True
board[y][x] = let
for i in range(len(board)):
for j in range(len(board[0])):
if existH(board, word, 0, j, i, ""):
return True
return False
s = Solution()
board = [
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
word = "ABFCEEE"
print(s.exist(board, word)) |
from json import load
from pprint import pprint
from collections import namedtuple
from itertools import chain, zip_longest
from statistics import mean, pstdev
from math import ceil
from operator import itemgetter
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
Square = namedtuple('Square', 'x y owner strength production')
Point = namedtuple('Point', 'x y')
replay = load(open('replay.hlt'))
width = replay['width']
height = replay['height']
productions = replay['productions']
first_frame = replay['frames'][0]
grid = [[Square(x, y, owner, strength, production)
for x, ((owner, strength), production)
in enumerate(zip(owner_strength_row, productions_row))]
for y, (owner_strength_row, productions_row)
in enumerate(zip(first_frame, productions))]
def resource(square):
if square.production == 0:
return square.strength
return square.strength / square.production
def distance(sq1, sq2):
"Returns Manhattan distance between two squares."
dx = min(abs(sq1.x - sq2.x), sq1.x + width - sq2.x, sq2.x + width - sq1.x)
dy = min(abs(sq1.y - sq2.y), sq1.y + height - sq2.y, sq2.y + height - sq1.y)
return dx + dy
window_size = 8
windows = []
double_grid = grid + grid
print(width, height)
for y in range(0, height):
for x in range(0, width):
windows.append(([Point(x, y), Point(x + window_size, y), Point(x, y + window_size), Point(x + window_size, y + window_size)], mean(map(resource, chain(*map(lambda l: (l+l)[x:x+window_size], double_grid[y:y+window_size]))))))
mycell = list(filter(lambda s: s.owner == 1, chain(*grid)))[0]
pprint(mycell)
# pprint(windows)
data_mean = mean(map(itemgetter(1), windows))
data_stddev = pstdev(map(itemgetter(1), windows), data_mean)
print('window_size', window_size)
print('mean', data_mean)
print('stddev', data_stddev)
print('2 stddev', data_mean - data_stddev * 1.9)
def below_two_stddev(entry):
return entry[1] < 3
pprint(sorted(list(zip(map(lambda s: distance(mycell, s), chain(*map(itemgetter(0), filter(below_two_stddev, windows)))), chain(*map(lambda points: zip(points[0], [points[1]] * len(points[0])), filter(below_two_stddev, windows)))))))
|
import datetime
import uuid
from enum import IntEnum
from tortoise import Model, fields
class ProductType(IntEnum):
article = 1
page = 2
class PermissionAction(IntEnum):
create = 1
delete = 2
update = 3
read = 4
class Status(IntEnum):
on = 1
off = 0
class User(Model):
username = fields.CharField(max_length=20, unique=True)
password = fields.CharField(max_length=100)
last_login = fields.DatetimeField(description="Last Login", default=datetime.datetime.now)
is_active = fields.BooleanField(default=True, description="Is Active")
is_superuser = fields.BooleanField(default=False, description="Is SuperUser")
intro = fields.TextField(default="")
longitude = fields.DecimalField(max_digits=10, decimal_places=8)
class Email(Model):
email_id = fields.IntField(pk=True)
email = fields.CharField(max_length=200, index=True)
is_primary = fields.BooleanField(default=False)
address = fields.CharField(max_length=200)
users = fields.ManyToManyField("models.User")
def default_name():
return uuid.uuid4()
class Category(Model):
slug = fields.CharField(max_length=100)
name = fields.CharField(max_length=200, null=True, default=default_name)
user = fields.ForeignKeyField("models.User", description="User")
created_at = fields.DatetimeField(auto_now_add=True)
class Product(Model):
categories = fields.ManyToManyField("models.Category")
name = fields.CharField(max_length=50)
view_num = fields.IntField(description="View Num", default=0)
sort = fields.IntField()
is_reviewed = fields.BooleanField(description="Is Reviewed")
type = fields.IntEnumField(
ProductType, description="Product Type", source_field="type_db_alias"
)
pic = fields.CharField(max_length=200)
body = fields.TextField()
created_at = fields.DatetimeField(auto_now_add=True)
class Meta:
unique_together = (("name", "type"),)
indexes = (("name", "type"),)
class Config(Model):
label = fields.CharField(max_length=200)
key = fields.CharField(max_length=20)
value = fields.JSONField()
status: Status = fields.IntEnumField(Status)
user = fields.ForeignKeyField("models.User", description="User")
class NewModel(Model):
name = fields.CharField(max_length=50)
|
# class Student:
# '''This is a class of student and his marks'''
# def __init__(self, name, marks):
# self.name = name
# self.marks = marks
# def display(self):
# print("Student name:", self.name)
# print("Marks of the student is", self.marks)
# def grade(self):
# if self.marks >= 60:
# print("student is grade 1")
# elif self.marks >= 50:
# print("student is grade 2")
# elif self.marks >= 35:
# print("Student is grade 3")
# else:
# print("Student has failed")
# n = input("Enter Number of student in class: ")
# for i in range(int(n)):
# name = input("Enter the name of the Student: ")
# marks = int(input("Enter the student marks: "))
# s = Student(name, marks)
# s.display()
# s.grade()
# print("*"*20)
#
# class Student:
# '''This is a class of student and his marks using getters and setters'''
# def setName(self, name):
# self.name = name
# def getName(self):
# return self.name
# def setMarks(self, marks):
# self.marks = marks
# def getMarks(self):
# return self.marks
#
# n = input("Enter Number of student in class: ")
# for i in range(int(n)):
# name = input("Enter the name of the Student: ")
# marks = int(input("Enter the student marks: "))
# s = Student()
# s.setName(name)
# s.setMarks(marks)
# print("Your name is {} and marks is {}".format(s.getName(),s.getMarks()))
# class Animal:
# '''THis is the class for representation of class method'''
# legs = 4
# @classmethod
# def walk(cls, name):
# print('{} walks with {} legs'.format(name, cls.legs))
# # print('{} walks with {} legs'.format(name, Animal.legs)) # THis will also work
# Animal.walk('Dog')
# Animal.walk('Cat')
# Write a program to track the number of objects created for a class
class Test:
count = 0
def __init__(self):
Test.count = Test.count + 1
@classmethod
def getNoOfObjects(cls):
print("Number of objects created:", cls.count)
t1=Test()
t2=Test()
Test.getNoOfObjects()
# t2.getNoOfObjects() #THis will also work
|
#!/usr/bin/env
#
from ansible.module_utils.basic import *
# 创建一个AnsibleModule的实例,argument_spec初始化参数为空字典,因为我们这个模块不需要传递参数,所以传递空字典进去就好了.
module = AnsibleModule(
argument_spec = dict(),
)
# 调用本地系统命令获取时区设置
status,output = commands.getstatusoutput('''date''')
if status == 0:
# 按照ansible 的返回格式定义返回内容,stdout为标准输出,changed代表系统有没有东西被变更,rc=0代表执行成功
result = dict(module='timezone',stdout=output,changed=False,rc=0)
# 使用ansible规则的module实例下的exit_json返回正常内容
module.exit_json(**result)
else:
result = dict(msg='execute failed',rc=status)
# 当调用失败返回错误信息的时候,数据字典只要传递msg信息就可了,然后调用module实例的fail_json方法给返回
module.fail_json(**result) |
def get_goal_string(object_dict, obj_list, obj_loc_list, goal_list,
goal_loc_list,env):
"""
Returns
========
str:
A generic goal condition that will place every object based on
its type and size at the correct goal.
"""
# Append your goal condition to this string.
#
# Below are a few hints to help you along the way:
# =================================================
#
# You can print the parameters to help you in coming up with a generalized
# goal condition.
#
# You can also look at the (:objects) and (:init) of problem.pddl to give
# you an idea of where your goal string is going wrong.
#
# Keep in mind the subject type and sizes of the bins and the books must
# match.
#
# High level plans do not need actual co-ordinates, rather they use the
# high-level locations found in the parameters of this method.
#
# Finally, there exists a way to write the goal condition using universal
# and existential quantifiers.
#
# Executing wrong plans on Gazebo might not give you the right execution!
#
# Remember that the two environments have unique objects, goals and predicates!
#
goal_string = "(:goal "
if env == "bookWorld":
#TODO:
# Replace this with the correct goal for bookWorld environment
goal_string += "(forall (?b - book) (exists (?l - location ?t - bin ?s - subject ?sz - size) (and (Book_Subject ?b ?s) (Bin_Subject ?t ?s)(Book_At ?b ?l)(Bin_At ?t ?l)(Book_Size ?b ?sz)(Bin_Size ?t ?sz)))))"
elif env == "cafeWorld":
#TODO:
# Replace this with the correct goal for cafeWorld environment
goal_string += "(forall (?b - food) (exists (?l - location ?t - table ?s - food_type ?sz - size) (and (Food_Type ?b ?s) (Ordered ?t ?s)(Food_At ?b ?l)(Table_At ?t ?l)(Portion_Size ?b ?sz)(Ordered_Portion ?t ?sz)))))"
return goal_string
def get_extra_credit_goal_string(object_dict, obj_list, obj_loc_list,
goal_list, goal_loc_list,env):
"""
Returns
========
str:
A generic goal condition using universal and existential
quantifiers that will place every object based on its type and
size at the correct location
"""
goal_string = "(:goal "
if env == "bookWorld":
goal_string += "(forall (?b - book) (exists (?l - location ?t - bin ?s - subject ?sz - size) (and (Book_Subject ?b ?s) (Bin_Subject ?t ?s)(Book_At ?b ?l)(Bin_At ?t ?l)(Book_Size ?b ?sz)(Bin_Size ?t ?sz)))))"
elif env == "cafeWorld":
goal_string += "(forall (?b - food) (exists (?l - location ?t - table ?s - food_type ?sz - size) (and (Food_Type ?b ?s) (Ordered ?t ?s)(Food_At ?b ?l)(Table_At ?t ?l)(Portion_Size ?b ?sz)(Ordered_Portion ?t ?sz)))))"
# Implement a goal condition using forall/existential quantifiers for extra
# credit!
#
# If this is the way you've already written the formula, then simply copy
# over the contents from get_goal_string()!
return goal_string
def sample_goal_condition(object_dict, obj_list, obj_loc_list, goal_list,
goal_loc_list):
"""
Returns
========
str:
A generic goal condition that moves the robot to any one of
the object locations.
"""
# raise Exception("This code will not be considered for grading.")
# You can replace the contents of get_goal_string() with the text below
# to get an idea of what is expected.
#
# The goal condition in the stock task here is VASTLY different from the
# expectation from you. Please review the homework documentation to identify
# your task.
#
# Here are some instructions to run this in Gazebo.
# 1. Replace the content of get_goal_string() with this method.
# 2. rosrun hw2 refinement.py \
# --objtypes <object types> \
# --objcount <number of objects> \
# --seed <seed>
# 3. rosrun hw2 gazebo.py
# The generic goal condition here is to move the robot to a object location.
#
# The stock task below generates a generic goal condition that moves the
# robot to a random object location and this is independent of the total
# number of locations and objects.
import random
assert len(obj_loc_list) > 0
i = random.randint(0, len(obj_loc_list) - 1)
goal_string = "(:goal (and "
goal_string += "(Robot_At tbot3 %s)" % (obj_loc_list[i])
goal_string += "))\n"
return goal_string |
from python_resources.functions import permutations
# print(permutations([8,11,15]))
# print(permutations([10,12,14]))
print(8 | 10)
print(0b1000 | 0b1010)
print(8 & 10)
print(0b1000 & 0b1010)
print(8 ^ 10)
print(0b1000 ^ 0b1010) |
from django.db import models
# Create your models here.
class CBTIapp2_model(models.Model):
username = models.CharField(max_length = 32, verbose_name = '사용자명')
useremail = models.EmailField(max_length = 32, verbose_name = '사용자이메일')
password = models.CharField(max_length = 32, verbose_name = '비밀번호')
register_datetime = models.DateField(auto_now_add = True, verbose_name = "가입날짜")
# 모델 안에 데이터가 문자열로 변환이 될 때 어떻게 나올지(반환할지) 정의하는 파이썬의 함수 __str__
# 파이썬은 특정 객체(클래스)를 문자열로 호출할떄 __str__ 함수가 실행되도록 되어있음,
# 아래 __str__함수 만들지 않으면 회원을 추가 하면 '클래스명 object'로 보임.
# 이 방법 외에 사용자명과 비밀번호를 한 화면에 보고싶으면 admin.py의 주석 참고
def __str__(self):
return self.username
class Meta:
# 별도의 테이블명을 지정하고 싶을때
#db_table = 'user_define_table'# 안에 이름 넣으면 끝
verbose_name = '사용자 모임'# 장고가 자동으로 's'붙여줌
verbose_name_plural = '사용자 모임'
'''
위 작성후 콘솔에서 프로젝트 폴더 위치에서 python .\manage.py makemigrations 명령어 치면
Migrations for 'CBTIapp2':
CBTIapp2\migrations\0001_initial.py
- Create model CBTIapp2_model
라고 출력된다.
이러면 migrations 폴더 안에 db를 어떻게 만들어야할지 models.py(현재 이 파일)을 참조해서 만들어진
CBTIapp2/migrations/0001_initial.py 에 적혀있다.
이후 python manage.py migrate
위 내용 수정할때도 수정 후 python .\manage.py makemigrations 하면 Alter 명령구가 나온다.
# Sqlite3
콘솔 프로젝트 폴더 에서 .\sqlite3.exe .\db.sqlite3
현재 테이블들 확인 => .tables
특정 테이븡 스키마 확인 => .schema 위에서찾은테이블이름
sqlite를 나가려면 => .q
'''
|
from point import Point
from typing import List, Optional, cast
from scl import Scl
from decimal_math import Decimal, sin, pi, cos, sqrt
from line import Line, CircleLine
Polygon = List[Point]
def construct_center_polygon(n: int, k: int, quasiregular: bool) -> Polygon:
# Initialize P as the center polygon in an n-k regular or quasiregular tiling.
# Let ABC be a triangle in a regular (n,k0-tiling, where
# A is the center of an n-gon (also center of the disk),
# B is a vertex of the n-gon, and
# C is the midpoint of a side of the n-gon adjacent to B.
angle_a = pi / n
angle_b = pi / k
angle_c = pi / 2
# For a regular tiling, we need to compute the distance s from A to B.
sin_a = sin(angle_a)
sin_b = sin(angle_b)
s = sin(angle_c - angle_b - angle_a) / sqrt(Decimal(1) - sin_b * sin_b - sin_a * sin_a)
# But for a quasiregular tiling, we need the distance s from A to C.
if quasiregular:
s = (s * s + Decimal(1)) / (Decimal(2) * s * cos(angle_a))
s = s - sqrt(s * s - Decimal(1))
# Now determine the coordinates of the n vertices of the n-gon.
# They're all at distance s from the center of the Poincare disk.
poly = [Point(s, s) for _ in range(n)]
for i, pt in enumerate(poly):
pt.x *= cos(Decimal(3 + 2 * i) * angle_a)
pt.y *= sin(Decimal(3 + 2 * i) * angle_a)
return poly
def get_lines(poly: Polygon) -> List[CircleLine]:
n = len(poly)
return [Line.new(pt, poly[(i + 1) % n]) for i, pt in enumerate(poly)]
def get_scl(poly: Polygon, width: int, height: int) -> Scl:
scl: Optional[Scl] = None
for line in get_lines(poly):
scl = line.append_scl(scl, width, height)
return cast(Scl, scl)
def poly(n: int) -> Polygon:
zero = Decimal(0)
return [Point(zero, zero) for _ in range(n)]
def is_not_nan(poly: Polygon) -> bool:
return not any(x.is_nan() for x in poly)
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"Click>=7.0",
"Flask==1.1.2",
"authlib==0.15.3",
"email_validator",
"flask_migrate",
"flask_cors",
"flask_login",
"flask_marshmallow",
"flask_sqlalchemy",
"flask_user",
"flask_swagger",
"google-api-core==1.25.1",
"requests",
"psycopg2-binary",
"flask-social-login==0.2.2",
]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="Pyunghyuk Yoo",
author_email='yoophi@gmail.com',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
entry_points={
'console_scripts': [
'auth_service=auth_service.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='auth_service',
name='auth_service',
packages=find_packages(include=['auth_service', 'auth_service.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/yoophi/auth_service',
version='0.1.0',
zip_safe=False,
)
|
import pygame, random
#variables
screen_width = 920
screen_height = 560
obj_size = 50
#Colors
white_color = (200, 200, 200)
light_gray = pygame.Color('grey12')
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((screen_width, screen_height))
def mover_rectangulo():
global speed
if rectangulo.top + obj_size < screen_height:
rectangulo.top += speed
def mover_bola():
global speed_bola_x,speed_bola_y
#invierte sentido vertical
if bola.top + obj_size > screen_height or bola.top <= 0:
speed_bola_x = -speed_bola_x
if bola.left + obj_size > screen_width or bola.left <= 0:
start_bola()
if bola.left < obj_size and rectangulo.top < bola.top < rectangulo.top + obj_size:
speed_bola_y = -speed_bola_y
bola.top += speed_bola_x
bola.left += speed_bola_y
def start_bola():
global speed_bola_x,speed_bola_y
bola.top = screen_height // 2
bola.left = screen_width // 2
speed_bola_x = 3 * random.choice((1, -1))
speed_bola_y = 3 * random.choice((1, -1))
rectangulo = pygame.Rect(10, 10, obj_size, obj_size)
bola = pygame.Rect(50, 10, obj_size, obj_size)
speed = 0
speed_bola_x = 3
speed_bola_y = 3
while True:
screen.fill(light_gray)
mover_rectangulo()
mover_bola()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
speed = -3
elif event.key == pygame.K_DOWN:
speed = 3
elif event.type == pygame.KEYUP:
speed = 0
pygame.draw.rect(screen,white_color, rectangulo )
pygame.draw.ellipse(screen, white_color, bola)
pygame.display.flip()
clock.tick(60)
|
#checks for observaciones and predicciones keywords and data
import sys
def headersFound(fileString):
observPos=fileString.find('observaciones')
predictPos=fileString.find('predicciones')
missing=''
try:
if observPos==-1:
missing='observPos'
print("Input File missing observation data. Exiting ...")
sys.exit()
elif predictPos==-1:
missing='predictPos'
if (len(missing)>0):
raise ValueError
return(observPos,predictPos)
except ValueError:
print("Input File missing {} data. Exiting ...".format(missing))
sys.exit()
|
import pandas as pd
import geopandas as gpd
from scipy import stats
import numpy as np
import math
# class Manipulation():
# def __init__(self):
# print("Initializing class 'Manipulation'")
def drop_unit_columns(df):
units = df.filter(like='.unit').columns
units.tolist()
df.drop(units, axis=1, inplace=True)
print('Dropped unit columns: ', units)
return df
def add_column_datetime(df):
df['datetime'] = pd.to_datetime(df['time'])
return df
def add_coordinate_columns(df):
df['lat'] = df['geometry'].apply(lambda coord: coord.y)
df['lng'] = df['geometry'].apply(lambda coord: coord.x)
return df
def normalize(df):
columnList=df.select_dtypes(['float64']).columns.tolist()
for variable in columnList:
df[variable]=df.groupby('track.id')[variable].transform(lambda x:(x-x.min())/(x.max()-x.min()))
return df
def standardize(df):
columnList=df.select_dtypes(['float64']).columns.tolist()
for variable in columnList:
df[variable]=df.groupby('track.id')[variable].transform(lambda x:(x - x.mean()) / x.std())
return df
def get_dummies_sensor(df):
sensor = df.filter(like='sensor.', axis=1).columns.copy()
sensorList = sensor.tolist()
newDF = pd.get_dummies(df, columns=sensorList)
return newDF
def interpolate_nearest(df):
columnList=df.select_dtypes(['float64']).columns.tolist()
for variable in columnList:
variableName=variable
#TODO!!! .groupby('track.id')--> not provided for groups in Geopandas
df[variableName]=df[variable]\
.interpolate(method='nearest', limit_direction="both", axis=0)\
.ffill()\
.bfill()
return df
def get_numerical(df):
numericalDF=df.select_dtypes(['float64']).copy()
return numericalDF
def squareRoot_transformation(df, column):
squareRoot=df[column]**(.5)
print(squareRoot.describe())
return squareRoot
def reciprocal_transformation(df, column):
reciprocal=1/(df[column]+1)
print(reciprocal.describe())
return reciprocal
def log_transformation(df, column):
log = np.log(df[column]+1)
print(log.describe())
return log |
import sys
import numpy as np
sys.path.append("..")
import grading
# code_size = 71
# img_shape = (38, 38, 3)
def submit_char_rnn(submission, email, token):
grader = grading.Grader("cULEpp2NEeemQBKZKgu93A")
history, samples = submission
assert len(samples) == 25
grader.set_answer("pttMO", int(np.mean(history[:10]) > np.mean(history[-10:])))
grader.set_answer("uly0D", len(set(samples)))
grader.submit(email, token)
|
import os.path
from time import sleep
import socket
import traceback
import struct
import copy
import logging
import paramiko
from .ssh import SSH
from .ssh_bruteforce import *
from utils.output import Output
from utils.dispatch import dispatch
from utils.db import DB
logging.getLogger("paramiko").setLevel(logging.CRITICAL)
def sshscan_worker(target, actions, creds, timeout):
try:
ssh = SSH(target['hostname'], target['port'], timeout)
version = ssh.get_version()
if not version:
return
Output.write({'target': ssh.url(), 'message': '%s' % version})
DB.insert_port({
'hostname': target['hostname'],
'port': target['port'],
'protocol': 'tcp',
'service': 'ssh',
'version': version,
})
if 'username' in creds and 'password' in creds:
success = ssh.auth(creds['username'], creds['password'])
if success:
Output.success({'target': ssh.url(), 'message': 'Successful authentication with username %s and password %s' % (creds['username'], creds['password'])})
cred_info = {
'hostname': target['hostname'],
'port': target['port'],
'service': 'ssh',
'url': ssh.url(),
'type': 'password',
'username': creds['username'],
'password': creds['password'],
}
DB.insert_credential(cred_info)
if 'command' in actions:
output = "Command '%s':\n" % actions['command']['command']
output += ssh.execute(actions['command']['command'])
Output.write({'target': target['hostname'], 'message': output})
else:
Output.minor({'target': ssh.url(), 'message': 'Authentication failure with username %s and password %s' % (creds['username'], creds['password'])})
if 'bruteforce' in actions:
if 'username_file' in actions['bruteforce'] != None:
Output.highlight({'target': ssh.url(), 'message': 'Starting bruteforce:'})
username_file = actions['bruteforce']['username_file']
password_file = actions['bruteforce']['password_file'] if 'password_file' in actions['bruteforce'] else None
bruteforce_workers = actions['bruteforce']['workers']
# The generator will provide a username:password_list couple
gen = bruteforce_generator(target, username_file, password_file)
gen_size = bruteforce_generator_count(target, username_file, password_file)
args = (timeout,actions['bruteforce']['bruteforce_delay'])
dispatch(gen, gen_size, bruteforce_worker, args, workers=bruteforce_workers, process=False, pg_name=target['hostname'])
except paramiko.AuthenticationException as e:
Output.minor({'target': ssh.url(), 'message': 'Authentication failure with username %s and password %s' % (creds['username'], creds['password'])})
except ValueError as e:
Output.minor({'target': ssh.url(), 'message': "Authentication failure because of crypto failure: %s" % str(e)})
except paramiko.SSHException as e:
pass
except socket.error:
pass
except Exception as e:
Output.write({'target': ssh.url(), 'message': '%s: %s\n%s' % (type(e), e, traceback.format_exc())})
finally:
ssh.disconnect()
|
# Generated by Django 2.2.17 on 2020-11-30 23:23
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20201201_0217'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='posted',
field=models.DateTimeField(db_index=True, default=datetime.datetime(2020, 12, 1, 2, 23, 25, 713904), verbose_name='Опубликована'),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.TextField(db_index=True, default=datetime.datetime(2020, 12, 1, 2, 23, 25, 714929), verbose_name='Дата'),
),
]
|
# -*- coding: utf-8 -*-
import time
import socket
import json
import threading
from queue import Queue
import logging
UDP_PORT = 8988
TCP_PORT = 8987
class Sock:
"""
当前主机探测直接采用udp群发消息,没有采用多线程tcp探测主机端口和arp缩小主机范围
目前已经打开 8987 TCP 主机探测端口; 8988 UDP 游戏初始化端口
发现就算将UDP切换为TCP协议,也无法改变类似UDP一样采用多线程,一样要统计每个玩家是否发送成功,跟自己构造回复消息类似,只是多了TCP的重传机制
"""
def __init__(self, tcp_bool=True, thread_udp_bool=True):
self.port_udp = UDP_PORT
self.port_tcp = TCP_PORT
self.tcp_bool = tcp_bool
self.udp_bool = thread_udp_bool
# UDP connect
address = (self.localip(), self.port_udp)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(address)
logging.info('Bind UDP socket %s ok.' % str(address))
self.done = False
# UDP sending
if self.udp_bool:
self.q_send = Queue() # SEND QUEUE [((info,msg), ip), (), ...]
thread_send = threading.Thread(target=self.thread_msg_send)
thread_send.setDaemon(True)
thread_send.start()
# UDP listening
self.q = Queue() # GET QUEUE [((info,msg), ip), (), ...]
thread1 = threading.Thread(target=self.thread_msg_recv)
thread1.setDaemon(True) # True:不关注这个子线程,主线程跑完就结束整个python process
thread1.start()
# TCP listening
if self.tcp_bool:
thread_tcp = threading.Thread(target=self.thread_tcp_server)
thread_tcp.setDaemon(True) # True:不关注这个子线程,主线程跑完就结束整个python process
thread_tcp.start()
def close(self):
# 先让 upd发送完
if self.udp_bool:
start_wait = time.time()
outtime_wait = 5
while not self.q_send.empty(): # 用来让 thread_msg_send 运行完,把消息都发出去
time.sleep(0.1)
if time.time()-start_wait > outtime_wait: # 设置个超时时间
logging.warning('Sock.thread_msg_send: Waiting OUT_TIME!')
break
# 停止所有
self.done = True
if self.tcp_bool: # 强关TCP
self.close_tcp()
self.sock.close() # 关闭socket
logging.info('done Sock.sock.close().')
def close_tcp(self):
self.sock_tcp.close()
logging.info('done Sock.sock_tcp.close().')
def thread_tcp_server(self):
"""任何程序都开启这个TCP监听"""
self.sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock_tcp.bind((self.localip(), self.port_tcp))
self.sock_tcp.listen(20)
while not self.done:
try:
client_connect, client_address = self.sock_tcp.accept() # 是阻塞的
# data, address = self.sock_tcp.recv(1024)
client_connect.close()
except Exception as msg:
logging.warning('SOCK_TCP ERROR-->%s' % msg)
def thread_msg_send(self):
"""
self.q_send是普通数据对象,传输的时候会加json.dumps
注: 消息队列不包含port,port都集成在这里
"""
while not self.done:
if not self.q_send.empty():
msg, ip = self.q_send.get()
tmp = json.dumps(msg, )
logging.info('SEND [%s]:%s' % (ip + ':' + str(self.port_udp), json.dumps(msg)))
self.sock.sendto(tmp.encode('utf-8'), (ip, self.port_udp))
else:
time.sleep(0.001) # 卧槽,加了这一句就神奇之至了!瞬间不卡了。
def msg_direct_send(self, data_tuple):
"""without threading, UDP directly send"""
msg, ip = data_tuple
tmp = json.dumps(msg, )
logging.info('SEND_DIRECT [%s]:%s' % (ip + ':' + str(self.port_udp), json.dumps(msg)))
self.sock.sendto(tmp.encode('utf-8'), (ip, self.port_udp))
def thread_msg_recv(self):
"""
注: 消息队列不包含port,port在这里直接剔除了
这里有个error: [Errno 10054],有可能是winsock自身的bug:
If sending a datagram using the sendto function results in an "ICMP port unreachable" response and the select
function is set for readfds, the program returns 1 and the subsequent call to the recvfrom function does not
work with a WSAECONNRESET (10054) error response. In Microsoft Windows NT 4.0, this situation causes the select
function to block or time out.
"""
while not self.done:
try:
data, address = self.sock.recvfrom(1024) # data=JSON, address=(ip, port) 是阻塞的
logging.info('RECV [%s]:%s' % (address[0] + ':' + str(self.port_udp), data))
self.q.put((json.loads(data), address[0]))
except Exception as msg:
logging.warning('SOCK RECV ERROR-->%s' % msg)
# logging.info('RECV [%s]:%s' % (address[0] + ':' + str(self.port_udp), data))
# self.q.put((json.loads(data), address[0])) # 获取数据,将数据转换为正常数据,并且只提取ip,不提取port
def localip(self):
return socket.gethostbyname(socket.gethostname())
def scan_ip_tcp(self):
time_start = time.time()
ip_up = []
ip_head = '.'.join(self.localip().split('.')[0:3])
ip_list = [ip_head + '.' + str(i) for i in range(256)]
port_list = [self.port_tcp]
for ip in ip_list:
# logging.info('Scan %s' % ip)
up = False
for port in port_list:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1) # 100ms,需要采用多线程来编写,要不然主机探测时间将达到接近25s, to be coninue..
# print 'scan..'
result = s.connect_ex((ip, port))
# print (ip,port),
if result == 0:
# print(ip)
logging.info('Port %d: open' % (port))
up = True
s.close()
if up:
ip_up.append(ip)
time_end = time.time()
# logging.info('IP Scan:%s'%ip_list)
# logging.info('IP Scan:%s'%port_list)
logging.info('PortScan done! %d IP addresses (%d hosts up) scanned in %f seconds.' % (
len(ip_list), len(ip_up), time_end - time_start))
logging.info('Up hosts:')
for i in ip_up:
logging.info(i)
# return [self.localip()]
return ip_up
def scan_ip_arp(self):
pass
def scan_hostip(self):
hostip_list = []
other_msg = []
while not self.q.empty():
(info, msg), ip = self.q.get()
if info == 'host created':
hostip_list.append(ip) # 记录ip地址,port就不记录了
else:
other_msg.append(((info, msg), ip))
for i in other_msg: # 将剩余非建主机的消息放回队列
self.q.put(i)
return hostip_list
def broadcast(self, messages, ip_list):
for i in ip_list:
self.q_send.put((messages, i))
def host_broadcast(self):
"""群发这么多人也不是个办法,to be continue.."""
ip_head = '.'.join(self.localip().split('.')[0:3])
# ip_list = [ip_head + '.' + str(i) for i in range(100, 111, 1)] # test, to be con...
ip_list = [ip_head + '.' + str(i) for i in range(256)]
# ip_list = self.scan_ip_tcp()
logging.info('broadcast host ip list:%s' % str(ip_list))
self.broadcast(messages=('host created', ''), ip_list=ip_list)
|
## @file
## Script for the quest room of the Brynknot Sewers Maze.
from Atrinik import *
from QuestManager import QuestManager
import os
activator = WhoIsActivator()
me = WhoAmI()
## Talthor's quest.
quest = {
"quest_name": "Enemies beneath Brynknot",
"type": QUEST_TYPE_KILL,
"kills": 1,
"message": "Go through the Brynknot Sewers Maze, which you can access by taking Llwyfen's portal in Underground City, and kill whoever is responsible for the planned attack on Brynknot, then return to Guard Captain Talthor in Brynknot.",
}
qm = QuestManager(activator, quest)
if not qm.finished():
activator.TeleportTo(os.path.dirname(me.map.path) + "/sewers_bu_0404", me.hp, me.sp, 1)
SetReturnValue(1)
|
import turtle
turtle.shape("turtle")
turtle.color("black","yellow")
turtle.begin_fill()
turtle.circle(100)
turtle.end_fill()
turtle.up()
turtle.setpos(-30,100)
turtle.down()
turtle.color("black","blue")
turtle.begin_fill()
turtle.circle(20)
turtle.end_fill()
turtle.up()
turtle.setpos(30,100)
turtle.down()
turtle.color("black","blue")
turtle.begin_fill()
turtle.circle(20)
turtle.end_fill()
turtle.up()
turtle.setpos(0,90)
turtle.down()
turtle.right(90)
turtle.forward(20)
turtle.up()
turtle.setpos(-60,80)
turtle.down()
turtle.pensize(10)
turtle.color("red","red")
turtle.circle(60,180)
|
def cuboid(arg):
centre = [float(arg[1]), float(arg[2]), float(arg[3])]
length = float(arg[4])
width = float(arg[5])
height = float(arg[6])
vertices = []
for i in [-1, 1]:
for j in [-1, 1]:
for k in [-1, 1]:
vertices.append([centre[0]+0.5*i*length, centre[1]+0.5*j*width, centre[2] + 0.5*k*height])
tri_i = [
[1,2,3],
[2,3,4],
[3,8,4],
[3,7,8],
[1,5,3],
[5,3,7],
[1,2,6],
[1,5,6],
[4,8,6],
[2,4,6],
[7,8,5],
[5,6,8],
]
v = []
for t in tri_i:
v.append([*vertices[t[0]-1], float(arg[7]), float(arg[8]), float(arg[9]), "1","0.0","0.0"])
v.append([*vertices[t[1]-1], float(arg[7]), float(arg[8]), float(arg[9]), "1","0.0","0.0"])
v.append([*vertices[t[2]-1], float(arg[7]), float(arg[8]), float(arg[9]), "1","0.0","0.0\n"])
# v.append([])
return v
def toFile(v, name):
f = open(name, "a")
for vi in v:
f.write(" ".join([str(i) for i in vi])+"\n")
f.close()
##front part
toFile(cuboid(["x",0,-50,0,30,60,10,0,0,1]),"front.raw")
toFile(cuboid(["x",0,-85,0,30,10,10,1,0,0]),"front.raw")
toFile(cuboid(["x",0,-100,0,30,20,10,0,1,0]),"front.raw")
toFile(cuboid(["x",7.5,-112.5,0,10,5,10,0,1,1]),"front.raw")
toFile(cuboid(["x",-7.5,-112.5,0,10,5,10,0,1,1]),"front.raw")
##right hand
toFile(cuboid(["x",20,-50,0,10,10,2.5,1,0,1]),"right_hand_low.raw")
toFile(cuboid(["x",35,-50,0,20,5,2.5,0.5,0.5,1]),"right_hand_low.raw")
toFile(cuboid(["x",55,-50,0,20,5,2.5,0.3,0,1]),"right_hand_high.raw")
toFile(cuboid(["x",55,-50,0,20,5,2.5,0.3,0,1]),"right_hand_high.raw")
toFile(cuboid(["x",67.5,-47.5,0,5,15,2.5,0.3,0,1]),"right_hand_high.raw")
toFile(cuboid(["x",70,-50,0,10,10,2.5,0.3,0,1]),"right_hand_high.raw")
##left hand
toFile(cuboid(["x",-20,-50,0,10,10,2.5,1,0,1]),"left_hand_low.raw")
toFile(cuboid(["x",-35,-50,0,20,5,2.5,0.5,0.5,1]),"left_hand_low.raw")
toFile(cuboid(["x",-55,-50,0,20,5,2.5,0.3,0,1]),"left_hand_high.raw")
toFile(cuboid(["x",-55,-50,0,20,5,2.5,0.3,0,1]),"left_hand_high.raw")
toFile(cuboid(["x",-67.5,-47.5,0,5,15,2.5,0.3,0,1]),"left_hand_high.raw")
toFile(cuboid(["x",-70,-50,0,10,10,2.5,0.3,0,1]),"left_hand_high.raw")
##right leg
toFile(cuboid(["x",7.5,-120,0,5,10,2.5,0.3,0,1]),"right_leg_high.raw")
toFile(cuboid(["x",7.5,-130,0,5,10,2.5,0.3,0,1]),"right_leg_low.raw")
toFile(cuboid(["x",7.5,-140,0,10,10,10,0.3,0,1]),"right_leg_low.raw")
##left leg
toFile(cuboid(["x",-7.5,-120,0,5,10,2.5,0.3,0,1]),"left_leg_high.raw")
toFile(cuboid(["x",-7.5,-130,0,5,10,2.5,0.3,0,1]),"left_leg_low.raw")
toFile(cuboid(["x",-7.5,-140,0,10,10,10,0.3,0,1]),"left_leg_low.raw")
|
'''
Created on Mar 11, 2021
@author: ssmup
'''
import discord
from discord.ext import commands
class LeagueOfLegends(commands.Cog):
'''
classdocs
'''
def __init__(self, bot):
'''
Constructor
'''
self.bot = bot
@commands.command(name='opgg', help='!eba opgg <SummonerName (no spaces)> [NA/EU/KR/..] - Get the op.gg link to a certain summoner.')
async def get_op_gg(self, ctx, summoner_name, region=None):
'''
Create an op.gg summoner search link from the given summoner name and region (defaults to NA) and sends it to the provided context.
'''
regions = ['kr', 'jp', 'na', 'euw', 'eune', 'oce', 'br', 'las', 'lan', 'ru', 'tr', 'sg', 'id', 'ph', 'tw', 'vn', 'th']
#First, check if region is in the given list.
#But also check if region is defined.
if(region is None):
#Region defaults to North America
url_prefix = 'na'
await ctx.send('Defaulting to North America.')
elif(region in regions):
#Check if the region is within the given list.
url_prefix = region
if(region == 'kr'):
#The korean op.gg is actually www.op.gg, not kr.op.gg
url_prefix = 'www'
else:
#Bad region given.
await ctx.send('You entered a bad region, or maybe you entered the summoner name with spaces.')
await ctx.send('Here are your region options:' + ('[%s]' % ', '.join(map(str, regions))).upper())
await ctx.send('Try again.')
return
#Make sure summoner name is given
if(summoner_name is None):
await ctx.send('Error: Next time, enter a summoner name.')
#Construct link
link = 'https://' + url_prefix + '.op.gg/summoner/userName=' + summoner_name
await ctx.send(link)
def setup(bot):
'''
Adds the LoL command Cog to the bot.
'''
bot.add_cog(LeagueOfLegends(bot))
|
#
# Copyright (C) 2013 - 2015 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
# pylint: disable=missing-docstring
import os
import tempfile
import unittest
import anyconfig.backend.configobj as TT
from anyconfig.tests.common import dicts_equal
CNF_0_S = """\
# This is the 'initial_comment'
# Which may be several lines
keyword1 = value1
'keyword 2' = 'value 2'
[ "section 1" ]
# This comment goes with keyword 3
keyword 3 = value 3
'keyword 4' = value4, value 5, 'value 6'
[[ sub-section ]] # an inline comment
# sub-section is inside "section 1"
'keyword 5' = 'value 7'
'keyword 6' = '''A multiline value,
that spans more than one line :-)
The line breaks are included in the value.'''
[[[ sub-sub-section ]]]
# sub-sub-section is *in* 'sub-section'
# which is in 'section 1'
'keyword 7' = 'value 8'
[section 2] # an inline comment
keyword8 = "value 9"
keyword9 = value10 # an inline comment
# The 'final_comment'
# Which also may be several lines
"""
_ML_0 = """A multiline value,
that spans more than one line :-)
The line breaks are included in the value."""
CNF_0 = {'keyword 2': 'value 2',
'keyword1': 'value1',
'section 1': {'keyword 3': 'value 3',
'keyword 4': ['value4', 'value 5', 'value 6'],
'sub-section': {'keyword 5': 'value 7',
'keyword 6': _ML_0,
'sub-sub-section': {
'keyword 7': 'value 8'}}},
'section 2': {'keyword8': 'value 9', 'keyword9': 'value10'}}
class Test10(unittest.TestCase):
psr = TT.Parser()
cnf = CNF_0
cnf_s = CNF_0_S
def test_10_loads(self):
cnf = self.psr.loads(self.cnf_s)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
def test_30_dumps(self):
cnf_s = self.psr.dumps(self.cnf)
cnf = self.psr.loads(cnf_s)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
class Test20(unittest.TestCase):
psr = TT.Parser()
cnf = CNF_0
cnf_s = CNF_0_S
def setUp(self):
(_, self.cpath) = tempfile.mkstemp(prefix="ac-bc-test-")
open(self.cpath, 'w').write(self.cnf_s)
def tearDown(self):
os.remove(self.cpath)
def test_20_load(self):
cnf = self.psr.load(self.cpath)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
def test_30_load__from_stream(self):
with open(self.cpath, 'rb') as stream:
cnf = self.psr.load(stream)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
def test_40_dump(self):
cpath = self.cpath + ".new"
self.psr.dump(self.cnf, cpath)
cnf = self.psr.load(cpath)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
def test_50_dump__to_stream(self):
cpath = self.cpath + ".new.2"
with open(cpath, 'wb') as stream:
self.psr.dump(self.cnf, stream)
cnf = self.psr.load(cpath)
self.assertTrue(dicts_equal(cnf, self.cnf), str(cnf))
# vim:sw=4:ts=4:et:
|
import asyncio
import json
import os
from pathlib import Path
from typing import List, Optional, Generator, Dict
from server.data import Metrics, Segment, Value
from server.server import post_after, post_after_async, Component, LogAccessMixin, JSONType
from abr.video import get_video_bit_rate, get_vmaf, get_chunk_size
TRAING_SERVER_PORT = 5000
PENALTY_REBUF = 4.3
PENALTY_QSWITCH = 1.0
BOOST_QUALITY = 1.0
K_in_M = 1000.0
REBUF_PENALITY_QOE = 100.
SWITCING_PENALITY_QOE = 2.5
SEGMENT_LENGTH = 4.0
class MetricsProcessor(LogAccessMixin):
"""
MetricsProcessor that prelucrates a stream of incoming front-end metrics into
derived metrics.
"""
metrics: List[Metrics]
timestamps: List[int]
segments: List[Segment]
index: int
def __init__(self, video: str, logging: bool = False) -> None:
super().__init__()
self.video = video
self.metrics = []
self.timestamps = [0]
self.segments = []
self.index = 1
self.logging = logging
self.vmaf_previous = 0
def check_quality(self, segment: Segment) -> Optional[Dict[str, float]]:
# Adjusts quality in case the rebuffering mechanism steps in
last_segment = self.segments[-1]
if segment.quality == last_segment.quality:
return None
if segment.index == last_segment.index and segment.timestamp > last_segment.timestamp:
self.log(f'Correction detected @{segment.index}: '
f'{last_segment.quality} -> {segment.quality}')
# update segment
last_segment.quality = segment.quality
self.segments[-1] = last_segment
# update vmaf previous
quality = get_video_bit_rate(self.video, segment.quality)
vmaf = get_vmaf(self.video, self.index, quality)
self.vmaf_previous = vmaf
# Sending the metric updates
return {
'index' : self.index,
'quality' : quality,
'vmaf' : self.vmaf_previous,
'timestamp' : segment.timestamp - self.timestamps[1],
}
return None
def advance(self, segment: Segment) -> Dict[str, float]:
self.index += 1
# We synchronize on the timestamp of segments getting loaded
last_timestamp = self.timestamps[-1]
timestamp = segment.timestamp
self.timestamps.append(timestamp)
# Compute player times and buffer levels
in_interval = lambda l: [v for v in l if
v.timestamp > last_timestamp and v.timestamp <= timestamp]
get_values = lambda l: sorted(in_interval(sum(l, [])), key=lambda x: x.timestamp)
player_times = get_values(m.playerTime for m in self.metrics)
buffer_levels = get_values(m.bufferLevel for m in self.metrics)
# Compute quality, rebuffering time and difference in quality switches
rebuffer = 0
delay_snapshot = 100
for time1, time2 in zip(player_times[:-1], player_times[1:]):
real_time2 = time2.timestamp
real_time1 = time1.timestamp
player_time2 = time2.value
player_time1 = time1.value
player_diffence = (real_time2 - real_time1) - (player_time2 - player_time1)
rebuffer += player_diffence / 1000.
if rebuffer < 0:
rebuffer = 0
# Addjust for variance while treating all timestamps as a single one
if len(player_times) > 1:
t1 = player_times[0].timestamp
t2 = player_times[-1].timestamp
t3 = player_times[0].value
t4 = player_times[-1].value
once = ((t2 - t1) - (t4 - t3)) / 1000.
maxerr = abs(once - rebuffer)
rebuffer = rebuffer + maxerr
# if buffer_levels are all much bigger then the player difference,
# we only encounter variance
if len(buffer_levels) > 0 and min([l.value for l in buffer_levels]) >= rebuffer * delay_snapshot * 2:
rebuffer = 0
# ---------------------- Raw qoe logic --------------------------------
quality = get_video_bit_rate(self.video, segment.quality)
switch = (abs(get_video_bit_rate(self.video, self.segments[-1].quality)
- get_video_bit_rate(self.video, self.segments[-2].quality))
if len(self.segments) > 1 else 0)
# Compute raw qoe
raw_qoe = (quality / K_in_M * BOOST_QUALITY
- PENALTY_REBUF * rebuffer
- PENALTY_QSWITCH * switch / K_in_M)
# ---------------------- Vmaf qoe logic --------------------------------
# Get vmaf
vmaf = get_vmaf(self.video, self.index, quality)
# Compute vmaf qoe
reward_vmaf = (vmaf
- REBUF_PENALITY_QOE * rebuffer
- SWITCING_PENALITY_QOE * abs(vmaf - self.vmaf_previous))
self.vmaf_previous = vmaf
# ---------------------- Bw estimation qoe -----------------------------
# Current bw estimate - note this is an estiamte because the backend can transmit
# 2 segments at the same time: hence the actual value may be around 20% bigger/smaller
segment_size = 8 * get_chunk_size(self.video, segment.quality, self.index - 1)
time = timestamp - last_timestamp
if time <= 0:
time = 1
bw = segment_size / time / 1000. # mbps
# Appending the segment
self.segments.append(segment)
# Sending the metrics
return {
'index' : self.index,
'quality' : quality,
'rebuffer' : rebuffer,
'raw_qoe' : raw_qoe,
'vmaf' : vmaf,
'vmaf_qoe' : reward_vmaf,
'bw' : bw,
'timestamp' : timestamp - self.timestamps[1],
}
def compute_qoe(self, metrics: Metrics) -> Generator[Dict[str, float], None, None]:
self.metrics.append(metrics)
for segment in metrics.segments:
if segment.index >= self.index + 1 and segment.loading:
if self.logging:
self.log('Current segment: ', segment)
yield self.advance(segment)
elif segment.loading:
maybe_check_quality = self.check_quality(segment)
if maybe_check_quality is not None:
self.log('Quality check successful: ', maybe_check_quality)
class Monitor(Component):
"""
Data monitor component that processes front-end incoming metrics.
It orders the front-end stream, logs the metrics and uses a MetricsProcessor to compute
derived metrics.
"""
video: str
path: Path
name: str
processor: MetricsProcessor
def __init__(self,
video: str,
path: Path,
name: str,
plot: bool = False,
request_port: Optional[int] = None,
port: Optional[int] = None,
training: bool = False,
log_to_file: bool = True,
) -> None:
super().__init__()
self.path = path / f'{name}_metrics.log'
self.port = port
self.name = name
self.processor = MetricsProcessor(video)
self.plot = plot
self.request_port = request_port
self.log_to_file = log_to_file
self.training = training
async def log_path(self, metrics: Metrics) -> None:
if self.log_to_file:
with open(self.path, 'a') as f:
f.write(json.dumps(metrics.json))
f.write('\n')
async def advance(self, processed_metrics: Dict[str, float]) -> None:
if not self.plot:
return
rebuffer = processed_metrics.get('rebuffer', None)
quality = processed_metrics.get('quality', None)
raw_qoe = processed_metrics.get('raw_qoe', None)
vmaf = processed_metrics.get('vmaf', None)
vmaf_qoe = processed_metrics.get('vmaf_qoe', None)
bw = processed_metrics.get('bw', None)
idx = processed_metrics['index']
port = self.request_port
timestamp = int(processed_metrics['timestamp']/1000)
make_value = lambda value: {
'name': self.name, 'timestamp': timestamp, 'value': Value(value, idx).json
}
make_bw = lambda value: {
'name': self.name, 'timestamp': timestamp, 'value': Value(value, timestamp).json
}
if rebuffer is not None: post_after_async(make_value(rebuffer), 0, "/rebuffer", port=port)
if quality is not None: post_after_async(make_value(quality), 0, "/quality", port=port)
if raw_qoe is not None: post_after_async(make_value(raw_qoe), 0, "/raw_qoe", port=port)
if vmaf is not None: post_after_async(make_value(vmaf), 0, "/vmaf", port=port)
if vmaf_qoe is not None: post_after_async(make_value(vmaf_qoe), 0, "/vmaf_qoe", port=port)
if bw is not None: post_after_async(make_bw(bw), 0, "/bw", port=port)
if self.training:
await post_after(
data = {
'name' : self.name,
'qoe' : vmaf_qoe,
'index' : idx,
},
wait = 0,
resource = '/reward',
port = TRAING_SERVER_PORT,
ssl = False,
)
async def compute_qoe(self, metrics: Metrics) -> None:
for processed_metrics in self.processor.compute_qoe(metrics):
await self.advance(processed_metrics)
async def process(self, json: JSONType) -> JSONType:
if 'json' in json:
json = json['json']
if 'stats' in json:
metrics = Metrics.from_json(json['stats'])
self.log('Processing ', metrics)
asyncio.gather(*[
self.log_path(metrics),
self.compute_qoe(metrics),
])
if 'complete' in json:
post_after_async(json, 0, "/complete", port=self.port)
return 'OK'
|
# -*- coding: utf-8 -*-
"""ERP - Product Search"""
import json
import logging
from json import JSONDecodeError
from random import choice
from types import BuiltinFunctionType, ModuleType
from typing import List, Tuple
from retrying import retry
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from directory_tests_shared import URLs
from directory_tests_shared.enums import PageType, Service
from directory_tests_shared.exceptions import PageLoadTimeout
from directory_tests_shared.utils import evaluate_comparison
from pages import common_selectors
from pages.common_actions import (
Selector,
check_for_sections,
check_url,
find_element,
find_elements,
find_selector_by_name,
go_to_url,
is_element_present,
wait_for_page_load_after_action,
)
from pages.erp import product_detail
NAME = "Product search"
SERVICE = Service.ERP
TYPE = PageType.FORM
URL = None
PAGE_TITLE = ""
SubURLs = {
f"{NAME} (Developing country)": URLs.ERP_DEVELOPING_COUNTRY_PRODUCT_SEARCH.absolute,
f"{NAME} (UK business)": URLs.ERP_BUSINESS_PRODUCT_SEARCH.absolute,
f"{NAME} (UK consumer)": URLs.ERP_CONSUMER_PRODUCT_SEARCH.absolute,
f"{NAME} (UK importer)": URLs.ERP_IMPORTER_PRODUCT_SEARCH.absolute,
}
SubURLs = {key.lower(): val for key, val in SubURLs.items()}
NAMES = list(SubURLs.keys())
PRODUCT_CATEGORIES_SELECTOR = Selector(By.CSS_SELECTOR, "#content form a.govuk-link")
PRODUCT_CODES_SELECTOR = Selector(
By.CSS_SELECTOR, "button.search-product-select-button"
)
SELECTORS = {}
SELECTORS.update(common_selectors.ERP_HEADER)
SELECTORS.update(common_selectors.ERP_BETA)
SELECTORS.update(common_selectors.ERP_BACK)
SELECTORS.update(common_selectors.ERP_SEARCH_FORM)
SELECTORS.update(common_selectors.ERP_SEARCH_RESULTS)
SELECTORS.update(common_selectors.ERP_HIERARCHY_CODES)
SELECTORS.update(common_selectors.ERP_FOOTER)
# This is only for an exporter from developing country
SELECTORS.update(common_selectors.ERP_SAVE_FOR_LATER)
def visit(driver: WebDriver, *, page_name: str = None):
url = SubURLs[page_name]
go_to_url(driver, url, page_name or NAME)
def should_be_here(driver: WebDriver, *, page_name: str = None):
url = SubURLs[page_name]
check_url(driver, url, exact_match=False)
def should_see_sections(driver: WebDriver, names: List[str]):
check_for_sections(driver, all_sections=SELECTORS, sought_sections=names)
def raised_page_load_timeout(exception) -> bool:
return isinstance(exception, PageLoadTimeout)
@retry(
stop_max_attempt_number=2,
retry_on_exception=raised_page_load_timeout,
wrap_exception=False,
)
def click_and_wait(driver: WebDriver, element: WebElement, timeout: int = 5):
with wait_for_page_load_after_action(driver, timeout=timeout):
element.click()
def drill_down_hierarchy_tree(
driver: WebDriver, *, use_expanded_category: bool = False
) -> Tuple[ModuleType, dict]:
if use_expanded_category:
last_expanded_level = Selector(
By.CSS_SELECTOR, "li.app-hierarchy-tree__parent--open:last-of-type"
)
last_opened_levels = find_elements(driver, last_expanded_level)
opened_first_level = last_opened_levels[-1]
first_id = opened_first_level.get_property("id")
logging.debug(f"Commencing from: {first_id} -> {opened_first_level.text}")
else:
first_level_selector = Selector(
By.CSS_SELECTOR, "ul.app-hierarchy-tree li.app-hierarchy-tree__section"
)
first_level = find_elements(driver, first_level_selector)
first = choice(first_level)
first_id = first.get_property("id")
logging.debug(f"First level: {first_id} -> {first.text}")
click_and_wait(driver, first)
select_code_selector = Selector(
By.CSS_SELECTOR, "button[name=product-search-commodity]"
)
is_select_product_button_present = is_element_present(driver, select_code_selector)
logging.debug(
f"Is Select product code button present: {is_select_product_button_present}"
)
current_parent_id = first_id
while not is_select_product_button_present:
child_level_selector = Selector(
By.CSS_SELECTOR, f"#{current_parent_id} ul li.app-hierarchy-tree__chapter"
)
child_level = find_elements(driver, child_level_selector)
if not child_level:
logging.debug("No more child level elements")
break
logging.debug(f"Child elements of '{current_parent_id}' are: {child_level}")
child = choice(child_level)
current_parent_id = child.get_property("id")
logging.debug(f"Selected child: {current_parent_id}")
click_and_wait(driver, child)
is_button_present = is_element_present(driver, select_code_selector)
logging.debug(f"Is Select product code button present: {is_button_present}")
is_select_product_button_present = is_button_present
if is_select_product_button_present:
select_codes = find_elements(driver, select_code_selector)
select = choice(select_codes)
selected_code_value = select.get_attribute("value")
try:
selected_code_value = json.loads(selected_code_value)
except JSONDecodeError:
pass
logging.debug(f"Selected product code: {selected_code_value}")
click_and_wait(driver, select)
else:
logging.error("Strange! Could not find 'Select' product codes button")
selected_code_value = {}
return product_detail, selected_code_value
def search(driver: WebDriver, phrase: str):
search_input = find_element(driver, find_selector_by_name(SELECTORS, "search"))
search_button = find_element(
driver, find_selector_by_name(SELECTORS, "search button")
)
search_input.clear()
search_input.send_keys(phrase)
search_button.click()
def should_see_number_of_product_codes_to_select(
driver: WebDriver, comparison_details: Tuple[BuiltinFunctionType, int]
):
found_elements = find_elements(driver, PRODUCT_CODES_SELECTOR)
evaluate_comparison(
f"number of product codes (on {driver.current_url}) to be",
len(found_elements),
comparison_details,
)
def should_see_number_of_product_categories_to_expand(
driver: WebDriver, comparison_details: Tuple[BuiltinFunctionType, int]
):
found_elements = find_elements(driver, PRODUCT_CATEGORIES_SELECTOR)
evaluate_comparison(
"number of product categories (on {driver.current_url}) to be",
len(found_elements),
comparison_details,
)
def click_on_random_search_result(driver: WebDriver, result_type: str):
result_type_selectors = {
"code": PRODUCT_CODES_SELECTOR,
"category": PRODUCT_CATEGORIES_SELECTOR,
}
found_elements = find_elements(driver, result_type_selectors[result_type.lower()])
search_result = choice(found_elements)
value = search_result.get_property("value")
href = search_result.get_attribute("href")
logging.debug(f"Will click on {result_type}: {value or href}")
with wait_for_page_load_after_action(driver, timeout=5):
search_result.click()
|
import os
import datetime
import time
from django.utils import timezone
# TODO: this is out of date, but do we really need it?
def populate():
clients()
inventory()
def clients():
# Constants for the client model
MALE = Client.MALE
FEMALE = Client.FEMALE
# Add Clients
eric = add_client("Eric", "Klinger", "11408 44 ave",
"Edmonton", "T6J0Z2", "780 437 1514",
datetime.date(1988, 12, 30), MALE)
chris = add_client("Chris", "Klinger", "11408 44 ave",
"Edmonton", "T6J0Z2", "780 937 1077",
datetime.date(1991, 6, 14), MALE)
jay = add_client("Jason", "Mu", "4077 69ave",
"Edmonton", "blah", "number",
datetime.date(1980, 6, 14), MALE)
dan = add_client("Danny", "Mu", "13499 70ave",
"Edmonton", "blah", "number",
datetime.date(1983, 8, 14), MALE)
cloney = add_client("Cloney", "McStudent", "12345 42 ave",
"Providence", "blah", "number",
datetime.date(1993, 5, 22), MALE)
jane = add_client("Jane", "Doe", "2943 69 ave",
"Vancouver", "blah", "number",
datetime.date(1985, 12, 8), FEMALE)
john = add_client("John", "Doe", "2943 69 ave",
"Vancouver", "blah", "number",
datetime.date(1984, 8, 20), MALE)
# Constants for Dependent model
SPOUSE = Dependent.SPOUSE
CHILD = Dependent.CHILD
# Add Dependents
kid_one = add_dependent(eric, "Kid", "one", CHILD, MALE,
datetime.date(1999, 1, 1))
kid_two = add_dependent(eric, "Kid", "two", CHILD, FEMALE,
datetime.date(2001, 1, 1))
wife = add_dependent(eric, "Jane", "Doe", SPOUSE, FEMALE,
datetime.date(1985, 12, 8))
# Constants for insurance model
ASSIGNMENT = Insurance.ASSIGNMENT
NON_ASSIGNMENT = Insurance.NON_ASSIGNMENT
# Add Insurances
# Commening out for now, looks like we are changing the way we do this
eric_insurance = add_insurance(eric, "Some_provider",
"PN9999", "CN9999", ASSIGNMENT)
chris_insurance = add_insurance(chris, "Some_provider",
"PN9998", "CN9998", ASSIGNMENT)
jay_insurance = add_insurance(jay, "Some_provider",
"PN9997", "CN9997", ASSIGNMENT)
dan_insurance = add_insurance(dan, "Some_provider",
"PN9996", "CN9996", ASSIGNMENT)
cloney_insurance = add_insurance(cloney, "Some_provider",
"PN9995", "CN9995", ASSIGNMENT)
jane_insurance = add_insurance(jane, "Some_provider",
"PN9994", "CN9994", ASSIGNMENT)
john_insurance = add_insurance(john, "Some_provider",
"PN9994", "CN9994", NON_ASSIGNMENT)
# Constants for coverage types model
ORTHOTICS = Coverage.ORTHOTICS
COMPRESSION = Coverage.COMPRESSION_STOCKINGS
ORTHO_SHOES = Coverage.ORTHOPEDIC_SHOES
# Add Coverages
eric_coverage = add_coverage(
eric_insurance, ORTHOTICS, 100, 250, eric)
chris_coverage = add_coverage(
chris_insurance, COMPRESSION, 100, 300, chris)
jay_coverage = add_coverage(
jay_insurance, ORTHO_SHOES, 100, 350, jay)
dan_coverage = add_coverage(
dan_insurance, ORTHO_SHOES, 100, 350, dan)
cloney_coverage = add_coverage(
cloney_insurance, ORTHO_SHOES, 100, 350, cloney)
jane_coverage = add_coverage(
jane_insurance, ORTHO_SHOES, 100, 350, jane)
john_coverage = add_coverage(
john_insurance, ORTHO_SHOES, 100, 350, john)
# Add Claims
tz = timezone.get_current_timezone()
eric_claim = add_claim(eric, eric_insurance, eric,
timezone.make_aware(datetime.datetime.now(), tz))
time.sleep(0.01)
chris_claim = add_claim(chris, chris_insurance, chris,
timezone.make_aware(datetime.datetime.now(), tz))
time.sleep(0.01)
jay_claim = add_claim(jay, jay_insurance, jay,
timezone.make_aware(datetime.datetime.now(), tz))
time.sleep(0.01)
dan_claim = add_claim(dan, dan_insurance, dan,
timezone.make_aware(datetime.datetime.now(), tz))
time.sleep(0.01)
cloney_claim = add_claim(cloney, cloney_insurance, cloney,
timezone.make_aware(datetime.datetime.now(), tz))
time.sleep(0.01)
jane_claim = add_claim(jane, jane_insurance, jane,
timezone.make_aware(datetime.datetime.now(), tz))
time.sleep(0.01)
john_claim = add_claim(john, john_insurance, john,
timezone.make_aware(datetime.datetime.now(), tz))
ClaimCoverage.objects.create(
claim=eric_claim, coverage=eric_coverage)
ClaimCoverage.objects.create(
claim=chris_claim, coverage=chris_coverage)
ClaimCoverage.objects.create(
claim=jay_claim, coverage=jay_coverage)
ClaimCoverage.objects.create(
claim=dan_claim, coverage=dan_coverage)
ClaimCoverage.objects.create(
claim=cloney_claim, coverage=cloney_coverage)
ClaimCoverage.objects.create(
claim=jane_claim, coverage=jane_coverage)
ClaimCoverage.objects.create(
claim=john_claim, coverage=john_coverage)
# Add admin users
# Have to hash passwords so get_or_create will work
password = hashers.make_password("admin")
add_admin("admin", password, "Admin")
add_admin("jay", password, "Jay")
add_admin("dan", password, "Dan")
add_admin("eric", password, "Eric")
add_admin("chris", password, "Chris")
add_admin("airith", hashers.make_password("perfectarch"), "Andrew")
def inventory():
# Constants for Shoe
WOMENS = Shoe.WOMENS
ORDERABLE = Shoe.ORDERABLE
# Add Shoes
s1 = add_shoe(
"Test Shoe", category=WOMENS, availability=ORDERABLE, style="Toe Shoe")
sa1 = add_shoe_attributes(
s1, "1", 1)
def add_admin(username, password, first_name):
# Need to try and return here since django admin users are dumb
try:
a = User.objects.get_or_create(username=username,
password=password,
first_name=first_name,
is_staff=True,
is_superuser=True)
return a[0]
except:
return
def add_client(firstName, lastName, address, city,
postalCode, phoneNumber, birthdate, gender):
c = Client.objects.get_or_create(first_name=firstName,
last_name=lastName,
address=address,
city=city,
postal_code=postalCode,
phone_number=phoneNumber,
birth_date=birthdate,
gender=gender)
return c[0]
def add_dependent(client, firstName, lastName,
relationship, gender, birthdate):
d = Dependent.objects.get_or_create(client=client,
first_name=firstName,
last_name=lastName,
relationship=relationship,
gender=gender,
birth_date=birthdate)
return d[0]
def add_insurance(main_claimant, provider, policyNumber, contractNumber,
benefits):
i = Insurance.objects.get_or_create(main_claimant=main_claimant,
provider=provider,
policy_number=policyNumber,
contract_number=contractNumber,
benefits=benefits)
return i[0]
def add_coverage(insurance, coverage_type, coverage_percent,
max_claim_amount, claimant):
c = Coverage.objects.get_or_create(insurance=insurance,
coverage_type=coverage_type,
coverage_percent=coverage_percent,
max_claim_amount=max_claim_amount,
claimant=claimant)
return c[0]
def add_claim(client, insurance, patient, submitted_datetime):
c = Claim.objects.get_or_create(insurance=insurance,
patient=patient,
submitted_datetime=submitted_datetime)
return c[0]
def add_shoe(name, image=None, category="", size="", availability="",
brand="", style="", sku="", colour="", description="",
credit_value=0, quantity=0, cost=0):
s = Shoe.objects.get_or_create(
image=image, category=category, availability=availability,
brand=brand, style=style, name=name, sku=sku, colour=colour,
description=description, credit_value=credit_value,
cost=cost)
return s[0]
def add_shoe_attributes(shoe, size, quantity=0):
sa = ShoeAttributes.objects.get_or_create(
shoe=shoe, size=size, quantity=quantity)
return sa[0]
if __name__ == '__main__':
print("Starting PerfectArchOrthotics database population script...")
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'perfect_arch_orthotics.settings')
import django
django.setup()
from django.contrib.auth.models import User
import django.contrib.auth.hashers as hashers
from clients.models import Client, Insurance, Claim, \
Dependent, Coverage, ClaimCoverage
from inventory.models import Shoe, ShoeAttributes
populate()
print("Finished PerfectArchOrthotics database population script.")
|
# Generated by Django 3.0.3 on 2020-03-04 09:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extendflix', '0004_auto_20200304_0944'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='image',
field=models.URLField(blank='True', max_length=250),
),
migrations.AlterField(
model_name='movie',
name='released',
field=models.IntegerField(blank='True'),
),
migrations.AlterField(
model_name='movie',
name='runtime',
field=models.CharField(blank='True', max_length=8),
),
]
|
import numpy as np
import random
class perceptron:
#global weights;
def __init__(self,NumEntradas):
"""
NumEntradas: para genererar los pesos aleatorios
"""
self.weights=list(random.uniform(-0.5,0.5) for i in range(NumEntradas+1))
self.lr=0.3 #taza de aprendizaje
self.umbral= random.uniform(-0.5,0.5)
print ("\nPesos aleatorios:", self.weights)
def escalon(self,n):
if n >= 0:
return 1
else:
return 0
def guess(self,inputs):
"""
funcion que se encarga de realiza la suma y aplicar la funcion de activacion
return el valor de la funcion de activacion
"""
sum=0.00
for i in range(len(self.weights)-1):
sum+=self.weights[i]*inputs[i]#-self.umbral
sum+=self.weights[i+1]
output= self.escalon(sum)
return output
def corrige(self, inputs, target):
"""
entrenamiento de nuestro perceptron. Se ncarga de coregir lo valores de error
inputs es una tupla que es un ejemplar de la forma (x1,x2....,xn)
target target es el objetivo esperado en base a nuestra entrada
"""
aprox=self.guess(inputs) #obtengo el valor de la funcion de activacion
error=target-aprox #obtengo el error
for i in range(len(self.weights)-1):
self.weights[i]=self.weights[i] + (error * inputs[i] * self.lr)
self.weights[i+1]=self.weights[i+1]+(error * self.lr)
print ("\n")
def entrenamiento(self,conj,valores):
"""
Metodo que revisa si los valores de las entradas coinciden con las dadas en el entrenamiento
y coincide con el resultado de la funcion escalon de estas entradas, de lo contrario
manda a corregir los valores.
conj conjunto de todas las entradas para entrenar
valores conjunto de los valores dados por cada entrada
"""
i=0
while i < len(conj):
estimado=self.guess(conj[i])
if (estimado != valores[i]):
print ("\nEl valor esperado de ",conj[i]," es: ",valores[i],"y se obtuvo",estimado)
print ("******CORRIGE PESOS***********\npesos anteriores:",self.weights)
self.corrige(conj[i],valores[i])
print ("Pesos actuales",self.weights,"\n******************************\n")
i= - 1
else:
print ("Se obtuvo el valor deseado de la entrada",conj[i],"con salida",valores[i])
i=i+1
|
from dcbase.apps import TIMEZONE_SESSION_KEY
from dcbase.decorator.profileFormView import profile_form_view
from dcbase.forms.userProfile import UserProfileForm
from dcbase.views.profile.profileEditFormView import ProfileEditFormView
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _, LANGUAGE_SESSION_KEY
@profile_form_view()
class ProfileEditGeneralView(ProfileEditFormView):
form_class = UserProfileForm
profile_nav_name = _('General')
profile_panel_name = _('General Settings')
profile_edit_url = reverse_lazy('account_profile_edit_general')
def form_valid(self, form):
response = super().form_valid(form)
self.request.session[LANGUAGE_SESSION_KEY] = self.request.POST['language']
self.request.session[TIMEZONE_SESSION_KEY] = self.request.POST['timezone']
return response
profileEditGeneralView = login_required(ProfileEditGeneralView.as_view())
|
import requests
import dotenv
import json
import pandas as pd
import openpyxl as xl
import time
class constants:
name=0
company_number=1
Jurisdiction_code=2
cmpnyType=3
registry_url=4
branch=5
branch_status=6
current_status=7
street_add=8
locality=9
region=10
postal_code=11
country=12
const_list=["name","company_number","jurisdiction_code", "company_type", "registry_url", "branch","branch_status","current_status","street_address", "locality","region","postal_code","country" ]
# wb=Workbook()
# sheet1=wb.add_sheet("Sheet 1")
headigs=["Name", "Company Number", "Jurisdiction Code", "Company Type", "Registry URL", "Branch", "Branch Status", "Current Status", "Street Address", "Locality", "Region","Postal Code","Country"]
api_token=""
parameters={
"industry_codes":"eu_nace_2-7211",
"api_token":api_token,
"per_page":100,
"page":1
}
base_url="https://api.opencorporates.com/v0.4/"
companies_search_url="companies/search"
base_file_name="eu_nace_2-7211"
file_extn=".json"
def get_file_name(page):
end=page*parameters['per_page']
start=end-(parameters['per_page']-1)
return base_file_name+"("+str(start)+"-"+str(end)+")"+file_extn
def create_files(page,text):
name=get_file_name(page)
try:
print ('Creating file - ', name)
# x=input('enter- ')
with open(name,"w") as file:
json.dump(text, file)
file.close()
print ('Created...')
return True
except Exception:
return False
def create_excel():
wb=xl.Workbook()
sheet=wb.create_sheet("sheet1")
for i in range(len(headigs)):
sheet.cell(1, i+1, headigs[i])
wb.save("excel1.xlsx")
def excel(dict1, page):
# keys=dict1.keys()
# items=dict1.items()
#print ("keys - ", keys)
#print ("items - ", items)
# for i in range(len(headigs)):
# sheet1.write(0, i+1, headigs[i])
# wb.save("excel.xls")
wb=xl.load_workbook("excel1.xlsx")
sheet1=wb.get_sheet_by_name("sheet1")
try:
k=0
for j in range(page*parameters['per_page']-(parameters['per_page']-1)-1, page*parameters['per_page']):
#for k in range(len(dict1['results']['companies'])):
#print (dict1['results']['companies'])
for i in range(len(const_list)):
if(i<8):
print ('k=', k)
try:
# print (dict1['results']['companies'][j]['company'][const_list[i]])
# x=input("enter - ")
#print (dict1['results']['companies'][k]['company'][const_list[i]])
#print("j+2 - ", j+2, "i+1 - ", i+1, "k - ", k)
#x=input('enter - ')
sheet1.cell(j+2, i+1, dict1['results']['companies'][k]['company'][const_list[i]])
except IndexError:
print ("Index Error")
pass
if(i>=8 and i<=12):
try:
#print (dict1['results']['companies'][j]['company']['registered_address'])
#print("j+2 - ", j+2, "i+1 - ", i+1, "k - ", k)
#x=input("enter - ")
sheet1.cell(j+2, i+1, dict1['results']['companies'][k]['company']['registered_address'][const_list[i]])
except TypeError:
print ("TypeError")
sheet1.cell(j+2, i+1, "Information not available")
except IndexError:
print ("Index Error")
pass
k+=1
wb.save("excel1.xlsx")
except EOFError:
print ("EOF Error")
return -2
except IndexError:
print ("Index Error")
return 0
# except Exception as e:
# return -1
return 0
def write_toExcel(page):
name=get_file_name(page)
with open(name, encoding="utf8") as file:
print ("loading from "+ name)
data=json.load(file)
# print (data)
file.close()
return excel(json.loads(data, encoding="utf8"), page)
def get_companies(parameter, page):
# page=0
# endPage=92
# parameters["page"]=page+1
# while(page<endPage):
try:
response=requests.get(base_url+companies_search_url, params=parameter)
text = json.dumps(response.json(), indent=0)
return create_files(page, text)
except TimeoutError:
with open('erro pages.txt', 'a') as file:
file.write(parameter['page']+" ")
return True
offline=2 #0 means offline, 1 means only online, 2 means do both
def loop_pages_api(off):
page=91
endPage=92
while(page<endPage):
parameters['page']=page+1
print ("page in loop - ", page)
print("parameters in loop - ", parameters['page'])
if (off==1 or off==2):
if not get_companies(parameters, parameters['page']):
return False
else: print ("get companies done")
if(off==2): off=0
if(off==0):
print ("page - ", parameters['page'])
print (write_toExcel(parameters['page']))
off=offline
#time.sleep(3)
page+=1
print ("page in loop end - ", page)
return True
print (loop_pages_api(offline))
# response=requests.get(base_url+companies_search_url, params=parameters)
# text = json.dumps(response.json(), indent=0)
# with open(file_name,"w") as file:
# json.dump(text, file)
# file.close()
# # # print ("73 - ", text[73])
# # # print ("74 - ", text[74])
# # # print ("75 - ", text[75])
# # # print ("context - ", text[40:100])
# # # # print ("to remove - ", text[4595:6615])
# # # print ("full text - ", text)
# # # text.replace(' ', '')
# # # print ("replaced text - ",text)
# with open(file_name, encoding="utf8") as file:
# data=json.load(file)
# print (data)
# dict2=json.loads(data, encoding="utf8")
# # # print (dict2['results']['companies'][1]['company']['name'])
# # """print ("dict2 - ", dict2)"""
# excel(dict2)
# # create_excel()
# dict1=pd.read_json(r'eu_nace_2-7211(1-20).json')
# dict1.to_csv(r'excel.csv', index=None)
# class Webapi:
# url=""
# parameters=None
# def __init__(self, url, parameters):
# self.url=url
# self.parameters=parameters
# def getCompanies(self,url, parameters):
# response=requests.get(url, params=parameters)
# json_response = json.dumps(response.json(), sort_keys=False, indent=4)
# return json_response
# class Localdb:
# file=None
# def __init__(self, file):
# self.file=file
# def write_json(self, text):
# with open('eu_nace_2-7211(1-20).json', 'w') as file:
# json.dump(text, file)
# file.close()
# class Repository:
# def __init__(self):
# pass
# n=1
# if(n==1):
# api=new Webapi(base_url+companies_search_url)
# response=api.getCompanies()
# with open('eu_nace_2-7211(1-20).json', 'w') as file:
# json.dump(text, file)
# file.close()
# text=""
# with open("eu_nace_2-7211(1-20).json", "r") as file:
# text=json.load(file)
# file.close()
# dict=json.loads(text)
# print (dict)
|
from django.contrib import admin
from .models import Article, Person, Update_items
from django.contrib.auth.apps import AuthConfig as _AuthConfig
from django.contrib.admin.apps import AdminConfig as _AdminConfig
from django.apps import AppConfig
class ArticleAdmin(admin.ModelAdmin):
list_display = ('title', 'pub_date', 'update_date',)
search_fields = ('title', 'content')
class PersonAdmin(admin.ModelAdmin):
list_display = ('full_name', 'age')
search_fields = ('first_name', 'last_name', 'age',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super(PersonAdmin, self).get_search_results(request, queryset, search_term)
try:
search_term_as_int = int(search_term)
queryset |= self.model.objects.filter(age=search_term_as_int)
except:
pass
return queryset, use_distinct
class MyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs = super(MyModelAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
else:
return qs.filter(author=request.user)
class ItemsAdmin(admin.ModelAdmin):
list_display = ('items_name', 'items_place', 'items_system', 'items_resource',)
search_fields = ('items_name',)
class AuthConfig(_AuthConfig):
name = 'django.contrib.auth'
verbose_name = u'用户管理'
class AdminConfig(_AdminConfig):
name = 'django.contrib.admin'
verbose_name = u'后台管理'
class WTCConfig(AppConfig):
name = u'WTC'
verbose_name = u"项目管理"
# admin.site.register(Article, ArticleAdmin)
# admin.site.register(Person, PersonAdmin)
admin.site.register(Update_items, ItemsAdmin)
admin.site.site_header = "CMDB管理后台"
|
#!python
from linkedlist import LinkedList # from folder.filename import Class
class Queue(object):
def __init__(self, iterable=None):
"""Initialize this queue and enqueue the given items, if any."""
# Initialize a new linked list to store the items
self.list = LinkedList()
if iterable is not None:
for item in iterable:
self.enqueue(item)
def __repr__(self):
"""Return a string representation of this queue."""
return 'Queue({} items, front={})'.format(self.length(), self.front())
def is_empty(self):
"""Return True if this queue is empty, or False otherwise."""
return True if self.list.size == 0 else False
def length(self):
"""Return the number of items in this queue."""
return self.list.size
def enqueue(self, item):
""" Insert given item at back of queue
Running time: O(1) since only need to change pointer to head"""
self.list.append(item)
def front(self):
"""Return the item at the front of this queue without removing it,
or None if this queue is empty."""
head = self.list.head
return None if head is None else head.data
def dequeue(self):
"""Remove and return item at front of queue or raise ValueError if queue empty
Running time: O(1) since only need to change pointer to head"""
head = self.front()
if head == None:
raise ValueError("list is empty")
else:
self.list.delete(head)
return head
|
import os, subprocess, tempfile
def run_test(infile, outfile):
print("running " + infile)
with open(outfile) as f:
expected = f.read()
with os.popen("../microchess < " + infile) as f:
actual = f.read()
if not actual.endswith('\n'):
actual += '\n'
if actual != expected:
print("FAILED")
tmpfile = tempfile.NamedTemporaryFile(delete=False)
try:
tmpfile.write(actual)
tmpfile.close()
subprocess.call(["diff", "-U8", outfile, tmpfile.name])
finally:
os.unlink(tmpfile.name)
return False
else:
print("passed")
return True
def main():
test_dir = os.path.dirname(__file__)
os.chdir(test_dir)
files = os.listdir(os.curdir)
failures = 0
for filename in files:
if filename.endswith("_in.txt"):
out_filename = filename[:-len("_in.txt")] + "_out.txt"
result = run_test(filename, out_filename)
if result == False:
failures += 1
if failures:
print("\n{} test(s) failed".format(failures))
main()
|
from math import log
def f(x):
return log(x) - 2
def checkrange(func):
def inner(x):
if x <= 0:
print("X has to be greater than 0")
else:
return func(x)
return inner
#def test_checkrange():
#return assert(f_safe(2))
f_safe = checkrange(f) # f_safe is now a function
print(f_safe(-2))
print(f_safe(2))
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class aaaradiusparams(base_resource) :
"""Configuration for RADIUS parameter resource."""
def __init__(self) :
self._serverip = ""
self._serverport = 0
self._authtimeout = 0
self._radkey = ""
self._radnasip = ""
self._radnasid = ""
self._radvendorid = 0
self._radattributetype = 0
self._radgroupsprefix = ""
self._radgroupseparator = ""
self._passencoding = ""
self._ipvendorid = 0
self._ipattributetype = 0
self._accounting = ""
self._pwdvendorid = 0
self._pwdattributetype = 0
self._defaultauthenticationgroup = ""
self._callingstationid = ""
self._groupauthname = ""
self._ipaddress = ""
self._builtin = []
@property
def serverip(self) :
"""IP address of your RADIUS server.<br/>Minimum length = 1."""
try :
return self._serverip
except Exception as e:
raise e
@serverip.setter
def serverip(self, serverip) :
"""IP address of your RADIUS server.<br/>Minimum length = 1
:param serverip:
"""
try :
self._serverip = serverip
except Exception as e:
raise e
@property
def serverport(self) :
"""Port number on which the RADIUS server listens for connections.<br/>Default value: 1812<br/>Minimum length = 1."""
try :
return self._serverport
except Exception as e:
raise e
@serverport.setter
def serverport(self, serverport) :
"""Port number on which the RADIUS server listens for connections.<br/>Default value: 1812<br/>Minimum length = 1
:param serverport:
"""
try :
self._serverport = serverport
except Exception as e:
raise e
@property
def authtimeout(self) :
"""Maximum number of seconds that the NetScaler appliance waits for a response from the RADIUS server.<br/>Default value: 3<br/>Minimum length = 1."""
try :
return self._authtimeout
except Exception as e:
raise e
@authtimeout.setter
def authtimeout(self, authtimeout) :
"""Maximum number of seconds that the NetScaler appliance waits for a response from the RADIUS server.<br/>Default value: 3<br/>Minimum length = 1
:param authtimeout:
"""
try :
self._authtimeout = authtimeout
except Exception as e:
raise e
@property
def radkey(self) :
"""The key shared between the RADIUS server and clients.
Required for allowing the NetScaler appliance to communicate with the RADIUS server.<br/>Minimum length = 1.
"""
try :
return self._radkey
except Exception as e:
raise e
@radkey.setter
def radkey(self, radkey) :
"""The key shared between the RADIUS server and clients.
Required for allowing the NetScaler appliance to communicate with the RADIUS server.<br/>Minimum length = 1
:param radkey:
"""
try :
self._radkey = radkey
except Exception as e:
raise e
@property
def radnasip(self) :
"""Send the NetScaler IP (NSIP) address to the RADIUS server as the Network Access Server IP (NASIP) part of the Radius protocol.<br/>Possible values = ENABLED, DISABLED."""
try :
return self._radnasip
except Exception as e:
raise e
@radnasip.setter
def radnasip(self, radnasip) :
"""Send the NetScaler IP (NSIP) address to the RADIUS server as the Network Access Server IP (NASIP) part of the Radius protocol.<br/>Possible values = ENABLED, DISABLED
:param radnasip:
"""
try :
self._radnasip = radnasip
except Exception as e:
raise e
@property
def radnasid(self) :
"""Send the Network Access Server ID (NASID) for your NetScaler appliance to the RADIUS server as the nasid part of the Radius protocol."""
try :
return self._radnasid
except Exception as e:
raise e
@radnasid.setter
def radnasid(self, radnasid) :
"""Send the Network Access Server ID (NASID) for your NetScaler appliance to the RADIUS server as the nasid part of the Radius protocol.
:param radnasid:
"""
try :
self._radnasid = radnasid
except Exception as e:
raise e
@property
def radvendorid(self) :
"""Vendor ID for RADIUS group extraction.<br/>Minimum length = 1."""
try :
return self._radvendorid
except Exception as e:
raise e
@radvendorid.setter
def radvendorid(self, radvendorid) :
"""Vendor ID for RADIUS group extraction.<br/>Minimum length = 1
:param radvendorid:
"""
try :
self._radvendorid = radvendorid
except Exception as e:
raise e
@property
def radattributetype(self) :
"""Attribute type for RADIUS group extraction.<br/>Minimum length = 1."""
try :
return self._radattributetype
except Exception as e:
raise e
@radattributetype.setter
def radattributetype(self, radattributetype) :
"""Attribute type for RADIUS group extraction.<br/>Minimum length = 1
:param radattributetype:
"""
try :
self._radattributetype = radattributetype
except Exception as e:
raise e
@property
def radgroupsprefix(self) :
"""Prefix string that precedes group names within a RADIUS attribute for RADIUS group extraction."""
try :
return self._radgroupsprefix
except Exception as e:
raise e
@radgroupsprefix.setter
def radgroupsprefix(self, radgroupsprefix) :
"""Prefix string that precedes group names within a RADIUS attribute for RADIUS group extraction.
:param radgroupsprefix:
"""
try :
self._radgroupsprefix = radgroupsprefix
except Exception as e:
raise e
@property
def radgroupseparator(self) :
"""Group separator string that delimits group names within a RADIUS attribute for RADIUS group extraction."""
try :
return self._radgroupseparator
except Exception as e:
raise e
@radgroupseparator.setter
def radgroupseparator(self, radgroupseparator) :
"""Group separator string that delimits group names within a RADIUS attribute for RADIUS group extraction.
:param radgroupseparator:
"""
try :
self._radgroupseparator = radgroupseparator
except Exception as e:
raise e
@property
def passencoding(self) :
"""Enable password encoding in RADIUS packets that the NetScaler appliance sends to the RADIUS server.<br/>Default value: pap<br/>Possible values = pap, chap, mschapv1, mschapv2."""
try :
return self._passencoding
except Exception as e:
raise e
@passencoding.setter
def passencoding(self, passencoding) :
"""Enable password encoding in RADIUS packets that the NetScaler appliance sends to the RADIUS server.<br/>Default value: pap<br/>Possible values = pap, chap, mschapv1, mschapv2
:param passencoding:
"""
try :
self._passencoding = passencoding
except Exception as e:
raise e
@property
def ipvendorid(self) :
"""Vendor ID attribute in the RADIUS response.
If the attribute is not vendor-encoded, it is set to 0.
"""
try :
return self._ipvendorid
except Exception as e:
raise e
@ipvendorid.setter
def ipvendorid(self, ipvendorid) :
"""Vendor ID attribute in the RADIUS response.
If the attribute is not vendor-encoded, it is set to 0.
:param ipvendorid:
"""
try :
self._ipvendorid = ipvendorid
except Exception as e:
raise e
@property
def ipattributetype(self) :
"""IP attribute type in the RADIUS response.<br/>Minimum length = 1."""
try :
return self._ipattributetype
except Exception as e:
raise e
@ipattributetype.setter
def ipattributetype(self, ipattributetype) :
"""IP attribute type in the RADIUS response.<br/>Minimum length = 1
:param ipattributetype:
"""
try :
self._ipattributetype = ipattributetype
except Exception as e:
raise e
@property
def accounting(self) :
"""Configure the RADIUS server state to accept or refuse accounting messages.<br/>Possible values = ON, OFF."""
try :
return self._accounting
except Exception as e:
raise e
@accounting.setter
def accounting(self, accounting) :
"""Configure the RADIUS server state to accept or refuse accounting messages.<br/>Possible values = ON, OFF
:param accounting:
"""
try :
self._accounting = accounting
except Exception as e:
raise e
@property
def pwdvendorid(self) :
"""Vendor ID of the password in the RADIUS response. Used to extract the user password.<br/>Minimum length = 1."""
try :
return self._pwdvendorid
except Exception as e:
raise e
@pwdvendorid.setter
def pwdvendorid(self, pwdvendorid) :
"""Vendor ID of the password in the RADIUS response. Used to extract the user password.<br/>Minimum length = 1
:param pwdvendorid:
"""
try :
self._pwdvendorid = pwdvendorid
except Exception as e:
raise e
@property
def pwdattributetype(self) :
"""Attribute type of the Vendor ID in the RADIUS response.<br/>Minimum length = 1."""
try :
return self._pwdattributetype
except Exception as e:
raise e
@pwdattributetype.setter
def pwdattributetype(self, pwdattributetype) :
"""Attribute type of the Vendor ID in the RADIUS response.<br/>Minimum length = 1
:param pwdattributetype:
"""
try :
self._pwdattributetype = pwdattributetype
except Exception as e:
raise e
@property
def defaultauthenticationgroup(self) :
"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64."""
try :
return self._defaultauthenticationgroup
except Exception as e:
raise e
@defaultauthenticationgroup.setter
def defaultauthenticationgroup(self, defaultauthenticationgroup) :
"""This is the default group that is chosen when the authentication succeeds in addition to extracted groups.<br/>Maximum length = 64
:param defaultauthenticationgroup:
"""
try :
self._defaultauthenticationgroup = defaultauthenticationgroup
except Exception as e:
raise e
@property
def callingstationid(self) :
"""Send Calling-Station-ID of the client to the RADIUS server. IP Address of the client is sent as its Calling-Station-ID.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED."""
try :
return self._callingstationid
except Exception as e:
raise e
@callingstationid.setter
def callingstationid(self, callingstationid) :
"""Send Calling-Station-ID of the client to the RADIUS server. IP Address of the client is sent as its Calling-Station-ID.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
:param callingstationid:
"""
try :
self._callingstationid = callingstationid
except Exception as e:
raise e
@property
def groupauthname(self) :
"""To associate AAA users with an AAA group, use the command
"bind AAA group ... -username ...".
You can bind different policies to each AAA group. Use the command
"bind AAA group ... -policy ...".
"""
try :
return self._groupauthname
except Exception as e:
raise e
@property
def ipaddress(self) :
"""IP Address."""
try :
return self._ipaddress
except Exception as e:
raise e
@property
def builtin(self) :
"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL."""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(aaaradiusparams_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaaradiusparams
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
"""Use this API to update aaaradiusparams.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
updateresource = aaaradiusparams()
updateresource.serverip = resource.serverip
updateresource.serverport = resource.serverport
updateresource.authtimeout = resource.authtimeout
updateresource.radkey = resource.radkey
updateresource.radnasip = resource.radnasip
updateresource.radnasid = resource.radnasid
updateresource.radvendorid = resource.radvendorid
updateresource.radattributetype = resource.radattributetype
updateresource.radgroupsprefix = resource.radgroupsprefix
updateresource.radgroupseparator = resource.radgroupseparator
updateresource.passencoding = resource.passencoding
updateresource.ipvendorid = resource.ipvendorid
updateresource.ipattributetype = resource.ipattributetype
updateresource.accounting = resource.accounting
updateresource.pwdvendorid = resource.pwdvendorid
updateresource.pwdattributetype = resource.pwdattributetype
updateresource.defaultauthenticationgroup = resource.defaultauthenticationgroup
updateresource.callingstationid = resource.callingstationid
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
"""Use this API to unset the properties of aaaradiusparams resource.
Properties that need to be unset are specified in args array.
:param client:
:param resource:
:param args:
"""
try :
if type(resource) is not list :
unsetresource = aaaradiusparams()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the aaaradiusparams resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = aaaradiusparams()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Passencoding:
""" """
pap = "pap"
chap = "chap"
mschapv1 = "mschapv1"
mschapv2 = "mschapv2"
class Builtin:
""" """
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Callingstationid:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Accounting:
""" """
ON = "ON"
OFF = "OFF"
class Radnasip:
""" """
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class aaaradiusparams_response(base_response) :
""" """
def __init__(self, length=1) :
self.aaaradiusparams = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaaradiusparams = [aaaradiusparams() for _ in range(length)]
|
from pypokerengine.players import BasePokerPlayer
class FishPlayer(BasePokerPlayer): # Do not forget to make parent class as "BasePokerPlayer"
# we define the logic to make an action through this method. (so this method would be the core of your AI)
def declare_action(self, valid_actions, hole_card, round_state):
# valid_actions format => [raise_action_info, call_action_info, fold_action_info]
call_action_info = valid_actions[1]
action, amount = call_action_info["action"], call_action_info["amount"]
return action, amount # action returned here is sent to the poker engine
def receive_game_start_message(self, game_info):
pass
def receive_round_start_message(self, round_count, hole_card, seats):
pass
def receive_street_start_message(self, street, round_state):
pass
def receive_game_update_message(self, action, round_state):
pass
def receive_round_result_message(self, winners, hand_info, round_state):
pass
import pypokerengine.utils.visualize_utils as U
class ConsolePlayer(BasePokerPlayer):
def declare_action(self, valid_actions, hole_card, round_state):
print(U.visualize_declare_action(valid_actions, hole_card, round_state, self.uuid))
action, amount = self._receive_action_from_console(valid_actions)
return action, amount
def receive_game_start_message(self, game_info):
print(U.visualize_game_start(game_info, self.uuid))
self._wait_until_input()
def receive_round_start_message(self, round_count, hole_card, seats):
print(U.visualize_round_start(round_count, hole_card, seats, self.uuid))
self._wait_until_input()
def receive_street_start_message(self, street, round_state):
print(U.visualize_street_start(street, round_state, self.uuid))
self._wait_until_input()
def receive_game_update_message(self, new_action, round_state):
print(U.visualize_game_update(new_action, round_state, self.uuid))
self._wait_until_input()
def receive_round_result_message(self, winners, hand_info, round_state):
print(U.visualize_round_result(winners, hand_info, round_state, self.uuid))
self._wait_until_input()
def _wait_until_input(self):
raw_input("Enter some key to continue ...")
# FIXME: This code would be crash if receives invalid input.
# So you should add error handling properly.
def _receive_action_from_console(self, valid_actions):
action = raw_input("Enter action to declare >> ")
if action == 'fold': amount = 0
if action == 'call': amount = valid_actions[1]['action']
if action == 'raise': amount = int(raw_input("Enter raise amount >> "))
return action, amount
from pypokerengine.players import BasePokerPlayer
from pypokerengine.utils.card_utils import gen_cards, estimate_hole_card_win_rate
NB_SIMULATION = 1000
class HonestPlayer(BasePokerPlayer):
def declare_action(self, valid_actions, hole_card, round_state):
community_card = round_state['community_card']
win_rate = estimate_hole_card_win_rate(
nb_simulation=NB_SIMULATION,
nb_player=self.nb_player,
hole_card=gen_cards(hole_card),
community_card=gen_cards(community_card)
)
if win_rate >= 1.0/self.nb_player:
action = valid_actions[1]
# call
else:
action = valid_actions[0]
return action["action"], action["amount"]
def receive_game_start_message(self, game_info):
self.nb_player = game_info["player_num"]
def receive_round_start_message(self, round_count, hole_card, seats):
pass
def receive_street_start_message(self, street, round_state):
pass
def receive_game_update_message(self, new_action, round_state):
pass
def receive_round_result_message(self, winners, hand_info, round_state):
pass
def trail_play():
from pypokerengine.api.game import setup_config, start_poker
config = setup_config(max_round=10, initial_stack=100, small_blind_amount=5)
config.register_player(name="p1", algorithm=FishPlayer())
config.register_player(name="p2", algorithm=FishPlayer())
config.register_player(name="p3", algorithm=FishPlayer())
game_result = start_poker(config, verbose=1)
print(game_result)
|
#!/usr/bin/env python3
import os
import logging
import argparse
import csv
OK_SIGN = "OK ]"
FAILED_SIGN = "FAILED ]"
SEGFAULT = "Segmentation fault"
SIGNAL = "received signal SIG"
PASSED = "PASSED"
def get_test_name(line):
elements = reversed(line.split(" "))
for element in elements:
if "(" not in element and ")" not in element:
return element
raise Exception("No test name in line '{}'".format(line))
def process_result(result_folder):
summary = []
total_counter = 0
failed_counter = 0
result_log_path = "{}/test_result.txt".format(result_folder)
if not os.path.exists(result_log_path):
logging.info("No output log on path %s", result_log_path)
return "exception", "No output log", []
status = "success"
description = ""
passed = False
with open(result_log_path, "r") as test_result:
for line in test_result:
if OK_SIGN in line:
logging.info("Found ok line: '%s'", line)
test_name = get_test_name(line.strip())
logging.info("Test name: '%s'", test_name)
summary.append((test_name, "OK"))
total_counter += 1
elif FAILED_SIGN in line and "listed below" not in line and "ms)" in line:
logging.info("Found fail line: '%s'", line)
test_name = get_test_name(line.strip())
logging.info("Test name: '%s'", test_name)
summary.append((test_name, "FAIL"))
total_counter += 1
failed_counter += 1
elif SEGFAULT in line:
logging.info("Found segfault line: '%s'", line)
status = "failure"
description += "Segmentation fault. "
break
elif SIGNAL in line:
logging.info("Received signal line: '%s'", line)
status = "failure"
description += "Exit on signal. "
break
elif PASSED in line:
logging.info("PASSED record found: '%s'", line)
passed = True
if not passed:
status = "failure"
description += "PASSED record not found. "
if failed_counter != 0:
status = "failure"
if not description:
description += "fail: {}, passed: {}".format(
failed_counter, total_counter - failed_counter
)
return status, description, summary
def write_results(results_file, status_file, results, status):
with open(results_file, "w") as f:
out = csv.writer(f, delimiter="\t")
out.writerows(results)
with open(status_file, "w") as f:
out = csv.writer(f, delimiter="\t")
out.writerow(status)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
parser = argparse.ArgumentParser(
description="ClickHouse script for parsing results of unit tests"
)
parser.add_argument("--in-results-dir", default="/test_output/")
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
args = parser.parse_args()
state, description, test_results = process_result(args.in_results_dir)
logging.info("Result parsed")
status = (state, description)
write_results(args.out_results_file, args.out_status_file, test_results, status)
logging.info("Result written")
|
_PAGE_ACCESS_TOKEN = ""
_VERIFY_TOKEN = ""
def get_page_access_token():
return _PAGE_ACCESS_TOKEN
def get_verify_token():
return _VERIFY_TOKEN
|
# Copyright 2017-2023 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
from abc import abstractmethod
try:
from abc import ABC
except ImportError:
from abc import ABCMeta
ABC = ABCMeta("ABC", (object,), {"__slots__": ()})
import sys
import warnings
from .query import Query
from .compound import (
CompoundQuery,
ConjunctionQuery,
DisjunctionQuery,
ExclusiveDisjunctionQuery,
NegationQuery,
)
from .object_dialect import ObjectQuery
from .string_dialect import parse_string_dialect
from .engine import QueryEngine
from .errors import BadNumberNaryQueryArgs, InvalidQueryPath
# QueryEngine object for running the legacy "apply" methods
COMPATABILITY_ENGINE = QueryEngine()
class AbstractQuery(ABC):
"""Base class for all 'old-style' queries."""
@abstractmethod
def apply(self, gf):
pass
def __and__(self, other):
"""Create a new AndQuery using this query and another.
Arguments:
other (AbstractQuery): the other query to use in constructing the AndQuery
Returns:
(AndQuery): a new AndQuery that performs the AND of the results of both queries
"""
return AndQuery(self, other)
def __or__(self, other):
"""Create a new OrQuery using this query and another.
Arguments:
other (AbstractQuery): the other query to use in constructing the OrQuery
Returns:
(OrQuery): a new OrQuery that performs the OR of the results of both queries
"""
return OrQuery(self, other)
def __xor__(self, other):
"""Create a new XorQuery using this query and another.
Arguments:
other (AbstractQuery): the other query to use in constructing the XorQuery
Returns:
(XorQuery): a new XorQuery that performs the XOR of the results of both queries
"""
return XorQuery(self, other)
def __invert__(self):
"""Create a new NotQuery using this query.
Returns:
(NotQuery): a new NotQuery that inverts the results of this query
"""
return NotQuery(self)
@abstractmethod
def _get_new_query(self):
pass
class NaryQuery(AbstractQuery):
"""Base class for all compound queries that act on
and merged N separate subqueries."""
def __init__(self, *args):
"""Create a new NaryQuery object.
Arguments:
*args (AbstractQuery, str, or list): the subqueries to be performed
"""
self.compat_subqueries = []
if isinstance(args[0], tuple) and len(args) == 1:
args = args[0]
for query in args:
if isinstance(query, list):
self.compat_subqueries.append(QueryMatcher(query))
elif isinstance(query, str):
self.compat_subqueries.append(CypherQuery(query))
elif issubclass(type(query), AbstractQuery):
self.compat_subqueries.append(query)
elif issubclass(type(query), Query) or issubclass(
type(query), CompoundQuery
):
self.compat_subqueries.append(query)
else:
raise TypeError(
"Subqueries for NaryQuery must be either a\
high-level query or a subclass of AbstractQuery"
)
def apply(self, gf):
"""Applies the query to the specified GraphFrame.
Arguments:
gf (GraphFrame): the GraphFramme on which to apply the query
Results:
(list): A list of nodes representing the result of the query
"""
true_query = self._get_new_query()
return COMPATABILITY_ENGINE.apply(true_query, gf.graph, gf.dataframe)
def _get_new_query(self):
"""Gets all the underlying 'new-style' queries in this object.
Returns:
(List[Union[Query, CompoundQuery]]): a list of all the underlying 'new-style' queries in this object
"""
true_subqueries = []
for subq in self.compat_subqueries:
true_subq = subq
if issubclass(type(subq), AbstractQuery):
true_subq = subq._get_new_query()
true_subqueries.append(true_subq)
return self._convert_to_new_query(true_subqueries)
@abstractmethod
def _convert_to_new_query(self, subqueries):
pass
class AndQuery(NaryQuery):
"""Compound query that returns the intersection of the results
of the subqueries."""
def __init__(self, *args):
"""Create a new AndQuery object.
Arguments:
*args (AbstractQuery, str, or list): the subqueries to be performed
"""
warnings.warn(
"Old-style queries are deprecated and will be removed in the \
future. Please use new-style queries (e.g., \
hatchet.query.ConjunctionQuery) instead.",
DeprecationWarning,
stacklevel=2,
)
if sys.version_info[0] == 2:
super(AndQuery, self).__init__(*args)
else:
super().__init__(*args)
if len(self.compat_subqueries) < 2:
raise BadNumberNaryQueryArgs("AndQuery requires 2 or more subqueries")
def _convert_to_new_query(self, subqueries):
return ConjunctionQuery(*subqueries)
"""Alias of AndQuery"""
IntersectionQuery = AndQuery
class OrQuery(NaryQuery):
"""Compound query that returns the union of the results
of the subqueries"""
def __init__(self, *args):
"""Create a new OrQuery object.
Arguments:
*args (AbstractQuery, str, or list): the subqueries to be performed
"""
warnings.warn(
"Old-style queries are deprecated and will be removed in the \
future. Please use new-style queries (e.g., \
hatchet.query.DisjunctionQuery) instead.",
DeprecationWarning,
stacklevel=2,
)
if sys.version_info[0] == 2:
super(OrQuery, self).__init__(*args)
else:
super().__init__(*args)
if len(self.compat_subqueries) < 2:
raise BadNumberNaryQueryArgs("OrQuery requires 2 or more subqueries")
def _convert_to_new_query(self, subqueries):
return DisjunctionQuery(*subqueries)
"""Alias of OrQuery"""
UnionQuery = OrQuery
class XorQuery(NaryQuery):
"""Compound query that returns the symmetric difference
(i.e., set-based XOR) of the results of the subqueries"""
def __init__(self, *args):
"""Create a new XorQuery object.
Arguments:
*args (AbstractQuery, str, or list): the subqueries to be performed
"""
warnings.warn(
"Old-style queries are deprecated and will be removed in the \
future. Please use new-style queries (e.g., \
hatchet.query.ExclusiveDisjunctionQuery) instead.",
DeprecationWarning,
stacklevel=2,
)
if sys.version_info[0] == 2:
super(XorQuery, self).__init__(*args)
else:
super().__init__(*args)
if len(self.compat_subqueries) < 2:
raise BadNumberNaryQueryArgs("XorQuery requires 2 or more subqueries")
def _convert_to_new_query(self, subqueries):
return ExclusiveDisjunctionQuery(*subqueries)
"""Alias of XorQuery"""
SymDifferenceQuery = XorQuery
class NotQuery(NaryQuery):
"""Compound query that returns all nodes in the GraphFrame that
are not returned from the subquery."""
def __init__(self, *args):
"""Create a new NotQuery object.
Arguments:
*args (AbstractQuery, str, or list): the subquery to be performed
"""
warnings.warn(
"Old-style queries are deprecated and will be removed in the \
future. Please use new-style queries (e.g., \
hatchet.query.NegationQuery) instead.",
DeprecationWarning,
stacklevel=2,
)
if sys.version_info[0] == 2:
super(NotQuery, self).__init__(*args)
else:
super().__init__(*args)
if len(self.compat_subqueries) < 1:
raise BadNumberNaryQueryArgs("NotQuery requires exactly 1 subquery")
def _convert_to_new_query(self, subqueries):
return NegationQuery(*subqueries)
class QueryMatcher(AbstractQuery):
"""Processes and applies base syntax queries and Object-based queries to GraphFrames."""
def __init__(self, query=None):
"""Create a new QueryMatcher object.
Arguments:
query (list, optional): if provided, convert the Object-based query
into its internal representation
"""
warnings.warn(
"Old-style queries are deprecated and will be removed in the \
future. Please use new-style queries instead. \
For QueryMatcher, the equivalent new-style queries are \
hatchet.query.Query for base-syntax queries and \
hatchet.query.ObjectQuery for the object-dialect.",
DeprecationWarning,
stacklevel=2,
)
self.true_query = None
if query is None:
self.true_query = Query()
elif isinstance(query, list):
self.true_query = ObjectQuery(query)
else:
raise InvalidQueryPath("Provided query is not a valid object dialect query")
def match(self, wildcard_spec=".", filter_func=lambda row: True):
"""Start a query with a root node described by the arguments.
Arguments:
wildcard_spec (str, optional): the wildcard status of the node
filter_func (Callable, optional): a callable acceepting only a row from a pandas DataFrame
that is used to filter this node in the query
Returns:
(QueryMatcher): the instance of the class that called this function
"""
self.true_query.match(wildcard_spec, filter_func)
return self
def rel(self, wildcard_spec=".", filter_func=lambda row: True):
"""Add another edge and node to the query.
Arguments:
wildcard_spec (str, optional): the wildcard status of the node
filter_func (Callable, optional): a callable acceepting only a row from a pandas DataFrame
that is used to filter this node in the query
Returns:
(QueryMatcher): the instance of the class that called this function
"""
self.true_query.rel(wildcard_spec, filter_func)
return self
def apply(self, gf):
"""Apply the query to a GraphFrame.
Arguments:
gf (GraphFrame): the GraphFrame on which to apply the query
Returns:
(list): A list representing the set of nodes from paths that match this query
"""
return COMPATABILITY_ENGINE.apply(self.true_query, gf.graph, gf.dataframe)
def _get_new_query(self):
"""Get all the underlying 'new-style' query in this object.
Returns:
(Query or ObjectQuery): the underlying 'new-style' query in this object
"""
return self.true_query
class CypherQuery(QueryMatcher):
"""Processes and applies Strinb-based queries to GraphFrames."""
def __init__(self, cypher_query):
"""Create a new Cypher object.
Arguments:
cypher_query (str): the String-based query
"""
warnings.warn(
"Old-style queries are deprecated and will be removed in the \
future. Please use new-style queries instead. \
For CypherQuery, the equivalent new-style query is \
hatchet.query.StringQuery.",
DeprecationWarning,
stacklevel=2,
)
self.true_query = parse_string_dialect(cypher_query)
def _get_new_query(self):
"""Gets all the underlying 'new-style' queries in this object.
Returns:
(List[Union[Query, CompoundQuery]]): a list of all the underlying 'new-style' queries in this object
"""
return self.true_query
def parse_cypher_query(cypher_query):
"""Parse all types of String-based queries, including multi-queries that
leverage the curly brace delimiters.
Arguments:
cypher_query (str): the String-based query to be parsed
Returns:
(CypherQuery): a Hatchet query for this String-based query
"""
warnings.warn(
"Old-style queries are deprecated and will be removed in the \
future. Please use new-style queries instead. \
For parse_cypher_query, the equivalent new-style function is \
hatchet.query.parse_string_dialect.",
DeprecationWarning,
stacklevel=2,
)
return CypherQuery(cypher_query)
|
N = int(input())
if int(N) == 2 or int(N) == 4 :
print('Not Weird')
elif int(N) % 2 == 0 and N > 20:
print('Not Weird')
else:
print('Weird') |
#DE FIECARE DATA CAND UN USER SE INREGISTREAZA I SE CREEAZA PROFILUL AUTOMAT
#FARA SA MAI TREBUIASCA SA PUN EU DIN ADMIN
from django.db.models.signals import post_save, pre_delete #Se apeleaza dupa ce un user este creat
from django.contrib.auth.models import User #senderul - se trimite semnalul
from django.dispatch import receiver #o fct care preia signalul si performeaza anumite taskuri pe baza acestuia
from .models import Profile
from django.db.models import signals
#SIGNALS se importa in apps cu fct ready
#Cand un user este salvat, trimite semnalul, semnalul va fi receptionat de acel receiver si receiverul e functia
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
profile = Profile.objects.get_or_create(user=instance)
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("demo")
process.options = cms.untracked.PSet(
SkipEvent = cms.untracked.vstring('ProductNotFound')
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.ZCandidate = cms.EDProducer("CandViewShallowCloneCombiner",
decay = cms.string("selectedPatMuons@+ selectedPatMuons@-"),
cut = cms.string("81. < mass < 101.")
)
process.CompCandDump = cms.EDAnalyzer("ZcandHisto",
ZCandCollection = cms.untracked.string('ZCandidate'),
pfCandCollection = cms.untracked.string('pfNoElectronPFlow')
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1)
)
#process.load("AWhitbeck.JetHisto.testSample_cff")
#process.load("AWhitbeck.JetHisto.ZHbbSample_cff")
process.load("AWhitbeck.JetHisto.ZjetsSample_cff")
process.TFileService = cms.Service("TFileService",
fileName = cms.string('histo.root')
)
process.p = cms.Path(process.ZCandidate
+ process.CompCandDump)
|
#!/usr/bin/env python
""" Rosalind project - Problem: Finding a Shared Motif
Problem
A common substring of a collection of strings is a substring of every member of
the collection. We say that a common substring is a longest common substring if a
longer common substring of the collection does not exist. For example, CG is a common
substring of ACGTACGT and AACCGGTATA, whereas GTA is a longest common substring. Note
that multiple longest common substrings may exist.
Given: A collection of k DNA strings (of length at most 1 kbp each; k=100).
Return: A longest common substring of the collection. (If multiple solutions exist,
you may return any single solution.)
Sample Dataset
GATTACA
TAGACCA
ATACA
Sample Output
AC
"""
__author__ = "Daniel J. Barnes"
__email__ = "ghen2000@gmail.com"
__status__ = "Working"
filename = raw_input("Enter input path: ")
filein = open(filename,'r')
fileout = open('LCSM_output.txt','w')
lines = filein.readlines()
first = lines[0].rstrip()
i=0
chunks = []
while (i <= len(first)):
k = 1
while k <= len(first):
j = i + k
if j <= len(first):
chunks.append(first[i:j])
k += 1
i += 1
chunks = set(chunks)
chunks = list(chunks)
chunks.sort(key=len,reverse=True)
chunks = list(chunks)
stop = 0
for chunk in chunks:
m = 1
while m < len(lines) and stop == 0:
if lines[m].count(chunk):
if m == len(lines)-1:
consensus = chunk
fileout.write(chunk)
stop += 1
m += 1
else:
m = len(lines)
print len(chunks), 'chunks'
print consensus, len(consensus), 'bp' |
import gamestate
from lib.characters import *
from settings import Settings
if __name__ == "__main__":
state = gamestate.GameState([Villager('test0'), Villager('test1'), Doctor('test2'), Cop('test3'), Mafia('test4')], Settings())
state.run() |
from eppy.doc import EppDoc
class EppUpdateLaunch(EppDoc):
_path = ('launch:update',)
def __init__(self, phase: str, applicationid: str):
dct = {
'launch:update': {
'phase': phase,
'applicationID': applicationid
}
}
super(EppUpdateLaunch, self).__init__(dct=self.annotate(dct))
|
import numpy as np
import cv2
import os
__all__ = ['load_test_data', 'load_training_data']
labels = ["covid", "lung_opacity", "pneumonia", "normal"]
img_size = 224
def load_training_data(data_dir):
"""
Load in training data.
input: data_dir, str, path of data folder.
output: np.array(data), np.array of the loaded dataset.
"""
data = []
for label in labels:
path = os.path.join(data_dir, label)
class_num = labels.index(label)
for img in os.listdir(path):
if not img.endswith('.png'):
continue
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
# Reshaping images to preferred size
resized_arr = cv2.resize(img_arr, (img_size, img_size))
data.append([resized_arr, class_num])
except Exception as e:
print(e)
return np.array(data)
def load_test_data(data_dir, train=True):
"""
Load in test data.
input:
data_dir, str, path of data folder.
output:
np.array(data), np.array of the loaded dataset.
file_names, list of names of each images.
"""
data = []
file_names = []
path = data_dir
class_num = 0
num = len(os.listdir(path))
for i in range(num):
img = 'test_%d.png' % i
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
# Reshaping images to preferred size
resized_arr = cv2.resize(img_arr, (img_size, img_size))
data.append([resized_arr, class_num])
file_names.append(img.replace('.png', ''))
except Exception as e:
print(e)
return np.array(data), file_names
|
from IPython.utils.py3compat import xrange
price = '200'
print(price.zfill(4))
name = "sudeeppatel"
print(name.upper())
print(name.swapcase())
print(name.swapcase())
print(name.isalnum(), name.isalpha(), name.isdigit())
name = "sudeep patel"
print(name.isalnum(), name.isalpha(), name.isdigit())
print(name.encode(encoding='utf_8', errors='strict'))
print((name.encode(encoding='utf_8', errors='strict')))
print(name.capitalize())
listofnnum = range(1, 6)
print(listofnnum)
for i in listofnnum:
print(i)
st = set([1, 4, 5, 33])
fst = frozenset([1, 4, 5, 33])
print(st)
print(fst)
print(len(st), st.union(fst))
|
from neural_nlp.stimuli import load_stimuli
class TestDiverseSentences:
def test_1(self):
data = load_stimuli('diverse.1')
assert len(data) == 384
assert data[0] == 'An accordion is a portable musical instrument with two keyboards.'
assert data[-1] == 'A woman has different reproductive organs than a man.'
def test_2(self):
data = load_stimuli('diverse.2')
assert len(data) == 243
assert data[0] == 'Beekeeping encourages the conservation of local habitats.'
assert data[-1] == "This has led to more injuries, particularly to ligaments in the skier's knee."
class TestNaturalisticStories:
def test_boar(self):
data = load_stimuli('naturalistic.Boar')
assert len(data) == 47
assert data[0] == 'If you were to journey to the North of England, ' \
'you would come to a valley that is surrounded by moors as high as mountains.'
assert data[-1] == "His fame was indeed assured, but it was not nearly as lasting " \
"as that of the fearsome Bradford Boar."
class TestNaturalisticStoriesNeural:
def test_boar_full(self):
data = load_stimuli('naturalistic-neural-full.Boar')
assert len(data) == 23
assert data[0] == 'That all mill owners were generally busy as beavers and quite pleased with themselves for ' \
'being so successful and well off was known to the residents of Bradford and if you were to ' \
'go into the city to visit the stately City Hall you would see there the Crest of the City ' \
'of Bradford which those same mill owners created to celebrate their achievements.'
assert data[-1] == "His fame was indeed assured but it was not nearly as lasting " \
"as that of the fearsome Bradford Boar."
def test_boar_reduced(self):
data = load_stimuli('naturalistic-neural-reduced.Boar')
assert len(data) == 47
assert data[0] == 'If you were to journey to the North of England you would ' \
'come to a valley that is surrounded by moors as high as'
assert data[-1] == "His fame was indeed assured but it was not nearly as lasting " \
"as that of the fearsome Bradford Boar."
def test_load_all_full(self):
self._test_load_all(reduced=False)
def test_load_all_reduced(self):
self._test_load_all(reduced=True)
def _test_load_all(self, reduced=False):
for story in ['Boar', 'MatchstickSeller', 'KingOfBirds', 'Elvis', 'HighSchool']:
data = load_stimuli('naturalistic-neural-{}.{}'.format('reduced' if reduced else 'full', story))
assert data is not None
assert len(data) > 0
|
import numpy as np
import matplotlib
#matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import torch
#import model
import config as c
#c.feature_net_file+="_trained"
c.additionally_trained_feature_net = False
c.use_pretrained = True
#import new_net as feature_net
import feed_forward_net as feature_net
#import easy_train as feature_net
import plot_helpers
import data.prepare_data as prepare_data
import data.dataloader as dataloader
import data.data_helpers as data_helpers
import scipy
from scipy.stats import norm
import matplotlib.mlab as mlab
#from scipy.stats import norm
model = feature_net.model
feature_net.load(c.feature_net_file)
model.eval()
#print('Trainable parameters:')
#print(sum([p.numel() for p in model.params_trainable]))
#Variables:
# how many different posteriors to show:
n_plots = 5
# how many dimensions of x to use:
n_x = c.x_dim#
offset=0
N=c.x_dim
print(f"Evaluates with n_plots={n_plots}, n_x={n_x}, offset={offset} and N={N}")
def concatenate_all_set(cut=None):
x_all, y_all = [], []
for x,y in c.test_loader:
x_all.append(x)
y_all.append(y)
for x,y in c.train_loader:
x_all.append(x)
y_all.append(y)
x_cat, y_cat = torch.cat(x_all, 0), torch.cat(y_all, 0)
if cut:
x_cat, y_cat = x_cat[:cut], y_cat[:cut]
return x_cat, y_cat
def concatenate_train_set(cut=None):
x_all, y_all = [], []
for x,y in c.train_loader:
x_all.append(x)
y_all.append(y)
x_cat, y_cat = torch.cat(x_all, 0), torch.cat(y_all, 0)
if cut:
x_cat, y_cat = x_cat[:cut], y_cat[:cut]
return x_cat, y_cat
def concatenate_test_set(cut=None):
x_all, y_all, ana_all = [], [],[]
for idx, (x,y, ana) in enumerate(c.test_ana_loader): #,ana
x_all.append(x)
y_all.append(y)
#print("\nx\n",model.fn_func(y.to(c.device)).detach().cpu().numpy())
ana_all.append(ana)
if cut:
if idx*c.batch_size>cut:
break
x_cat, y_cat, ana_cat = torch.cat(x_all, 0), torch.cat(y_all, 0), torch.cat(ana_all, 0) #ana_all
if cut:
x_cat, y_cat, ana_cat = x_cat[:cut], y_cat[:cut], ana_cat[:cut]
return x_cat, y_cat, ana_cat
x_all, y_all, ana_all = concatenate_test_set(c.evaluation_samples)#concatenate_test_set(1000)#400)#20)#400)
ana_all = ana_all.detach().cpu().numpy()
position = prepare_data.y_to_spectra(y_all)[:,-2:]
#position=dataloader.ret_params[:c.evaluation_samples,[-2,-3]]
def hists(x):
results = []
for j in range(N):
h, b = np.histogram(x[:, j], bins=100, density=True)#range=(-2,2),
h /= np.max(h)
results.append([b[:-1],h])
return results
def show_error_stats(errors, name = "CO2", show_nice = False):
plt.figure(name,figsize=(8,2))
#plt.title(f"Total {name} error")
border = max(np.abs(errors.min()),np.abs(errors.max()))
bins=np.linspace(-border,border,100)
#plt.hist(error_ret, bins=bins, density=False, histtype='step',color="blue",label="retrival")
plt.hist(errors, bins=bins, density=True, histtype='step',color="blue")
(mu, sigma)=norm.fit(errors)
y_fn = scipy.stats.norm.pdf(bins, mu, sigma)
plt.plot(bins, y_fn, 'r--', linewidth=2)
plt.title(rf"$\mu$ = {mu:.2f}, $\sigma$ = {sigma:.2f}")
plt.xlabel("Difference to gt in ppm")
plt.ylabel("Prob. density")
#plt.legend()
plt.tight_layout()
if show_nice:
plt.figure("nice_nn_sol",figsize=(16,5))
plt.subplot(1, 3, 1)
plt.hist(errors, bins=bins, density=True, histtype='step',color="blue")
(mu, sigma)=norm.fit(errors)
y_fn = scipy.stats.norm.pdf(bins, mu, sigma)
plt.plot(bins, y_fn, 'r--', linewidth=2)
plt.title(rf"$\mu$ = {mu:.2f}, $\sigma$ = {sigma:.2f}",fontsize = 18)
plt.xlabel("Difference to gt in ppm",fontsize = 18)
plt.ylabel("Prob. density",fontsize = 18)
plt.tight_layout()
def show_predicted_error():
pass
def show_feature_net_solution():
print("show_feature_net_solution")
orig_prior_hists = hists(prepare_data.x_to_params(x_all))
x = model.fn_func(y_all.to(c.device)).detach().cpu().numpy()
#print("co2_results",x[:100,0])
#print("true_results",x_all[:100,0])
#y_test = y_all.numpy()+prepare_data.mu_y
#print(y_all[:10,:10], y_all[:10,-10:])
y_gt = y_all#[:n_plots]
x_gt = x_all
orig_x_gt = prepare_data.x_to_params(x_gt)
orig_y_gt = prepare_data.y_to_spectra(y_gt)
#print(x.shape)
orig_x=prepare_data.x_to_params(x)
#print(np.shape(orig_x), np.shape(orig_x_gt))
#print("\n")
#plot_world.plot_quality_map((np.abs(orig_x-orig_x_gt)[:,0]),(y_all.detach().cpu().numpy()+prepare_data.mu_y)[:,-2:], "Featurenet prediction")
plot_helpers.plot_quality_map((np.abs(orig_x-orig_x_gt)[:,0]),position, "Error of network prediction")
#print(x)
#print(x_gt)
print("shapes:",orig_x.shape,orig_x_gt.shape)
show_error_stats(orig_x[:,0]-orig_x_gt[:,0], show_nice= True)
print("show_predicted_error")
x_uncert = model.fn_func(y_all.to(c.device)).detach().cpu().numpy()[:,1]
#compute s to sigma, following the paper
x_uncert = np.sqrt(np.exp(x_uncert))
uncert = x_uncert*np.linalg.inv(prepare_data.w_x)[0,0]
plot_helpers.plot_quality_map(uncert,position, "Predicted Uncertainty")
#show_error_stats(uncert, "uncertainty")
#show_error_stats((orig_x[:,0]-orig_x_gt[:,0])/uncert, "normalized")
plot_helpers.plot_quality_map(np.abs((orig_x[:,0]-orig_x_gt[:,0])/uncert),position, "Uncertainty quality")
#orig_x_gt = orig_x_gt[:n_plots]
#x_gt = x_gt[:n_plots]
for i in range(n_plots):
#print(x_gt[0])
#print("\n",prepare_datax_to_params(x_gt)[0])
#print(prepare_datax_to_params(x)[0])
#print(x[0])
plt.figure(f"orig_{i}",figsize=(20,15))
for j in range(n_x):
plt.subplot(3, n_x/4+1, j +1)
if j == 0:
plt.step(*(orig_prior_hists[j]), where='post', color='grey',label= "prior")
plt.plot([orig_x_gt[i,j], orig_x_gt[i,j]], [0,1], color='red', label = "ground truth")
plt.plot([orig_x[i,j], orig_x[i,j]], [0,1], color='blue', label = "predicted value")
plt.legend()
else:
plt.step(*(orig_prior_hists[j]), where='post', color='grey')
#plt.step(*(hist_i[j+offset]), where='post', color='blue')
#x_low, x_high = np.percentile(orig_posteriors[i][:,j+offset], [q_low, q_high])
plt.plot([orig_x_gt[i,j], orig_x_gt[i,j]], [0,1], color='red')
plt.plot([orig_x[i,j], orig_x[i,j]], [0,1], color='blue')
#plt.plot([orig_y_gt [i,j+offset-18], orig_y_gt[i,j+offset-18]], [0,1], color='orange',alpha=0.5) #is on top of red
#if j+offset == 14:
# x_low=dataloader.ret_params[i,0]-dataloader.ret_params[i,1]
# x_high=dataloader.ret_params[i,0]+dataloader.ret_params[i,1]
# plt.plot([x_low, x_low], [0,1], color='green')
# plt.plot([x_high, x_high], [0,1], color='green')
plt.xlabel(f"{c.param_names[j]}")
#plt.legend()
#plt.tight_layout()
def show_prediction_changes():
#torch.manual_seed(71)
years = c.dc.viz_years
param_gt_mean = []
output_param_mean = []
#batch_size = 512
#for _, year in enumerate(years):
for i,loadset in enumerate(dataloader.loader):
year = c.dc.viz_years[i]
#sets = dataloader.loadOCODataset(year = [year], analyze=True, noise=False)
#loadset = dataloader.DataLoader(sets,
# batch_size=batch_size, shuffle=True, drop_last=True, num_workers = 1)
x,y,_ = data_helpers.concatenate_set(loadset,1000)
output = model.fn_func(y.to(c.device)).detach().cpu().numpy()
#output = torch.mean(torch.FloatTensor(outputs), dim = 1)
#print(output.size())
#with torch.no_grad():
# output = model.fn_func(y.to(c.device)).detach().cpu()
#errors = ht.show_error(f"{year} eval", visualize=False)
#errors.add_error(output,x)
#errors.print_error()
param_gt = prepare_data.x_to_params(x)
output_param = prepare_data.x_to_params(output)
param_gt_mean.append(np.mean(param_gt[:,0]))
output_param_mean.append(np.mean(output_param[:,0]))
plt.figure("Cmp_pred_true_INN")
plt.plot(years, param_gt_mean, label = "gt")
plt.plot(years, output_param_mean, label = "prediction_INN")
plt.legend()
plt.title("Comparison between predicted and true CO2 concentration")
plt.xlabel("year")
plt.ylabel("CO2 in ppm")
plt.figure("Offset_per_year_INN")
plt.title("Offset per year")
plt.plot(years, np.subtract(output_param_mean,param_gt_mean), label = "offset")
plt.xlabel("year")
plt.ylabel("CO2 in ppm")
plt.legend()
plt.figure("Increases_per_year_fn")
plt.title("Increases per year")
plt.plot(years[1:], np.diff(output_param_mean), label = "increase prediction")
plt.plot(years[1:], [j-i for i, j in zip(param_gt_mean[:-1], param_gt_mean[1:])], label = "increase gt")
plt.xlabel("year")
plt.ylabel("CO2 in ppm")
plt.legend()
plt.figure("nice_nn_sol",figsize=(14,5))
plt.subplot(1, 3, 2)
plt.plot(years, param_gt_mean, label = "True value")
plt.plot(years, output_param_mean, label = "Prediction ")
plt.legend(fontsize = 18)
#plt.title("Comparison between predicted and true CO2 concentration")
plt.title("Feedforward network",fontsize = 18)
plt.xlabel("year",fontsize = 18)
plt.ylabel("CO2 in ppm",fontsize = 18)
plt.subplot(1, 3, 3)
plt.title("Offset per year",fontsize = 18)
plt.plot(years, np.subtract(output_param_mean,param_gt_mean), label = "offset")
plt.xlabel("year",fontsize = 18)
plt.ylabel("CO2 in ppm",fontsize = 18)
#plt.legend(fontsize = 18)
plt.tight_layout()
def uncert(model2=model,name="CO2",position=position):
x2 = model2.fn_func(y_all.to(c.device)).detach().cpu().numpy()
params2 = prepare_data.x_to_params(x2[:,::2])[:,0]
params_gt = prepare_data.x_to_params(x_all)[:,0]
errors2 = params2 - params_gt
uncert2 = prepare_data.x_to_params(np.sqrt(np.exp(x2[:,1::2])),no_mu=True)[:,0]
calib2 = errors2/uncert2
plt.figure(f"Nice_uncerts_{name}", figsize=(15,5))
#error_INN=np.clip(error_INN,-4,4)
plot_boarders = max (np.max(errors2), -np.min(errors2))#4
bins=np.linspace(-plot_boarders,plot_boarders,100)
#error_ret=np.clip(error_ret ,-3,3)
#Error distribution
ax = plt.subplot(1, 2, 1)
(mu_INN, sigma_INN)=norm.fit(errors2)
errors2_clip=np.clip(errors2,-plot_boarders,plot_boarders)
uncert2_clip=np.clip(uncert2,-plot_boarders,plot_boarders)
ax.set_title(rf"Error distribution, $\mu$={mu_INN:.2f}, $\sigma$={sigma_INN:.2f}",fontsize=20)
#plt.hist(error_ret, bins=bins, density=False, histtype='step',color="blue",label="retrival")
plt.hist(errors2_clip, bins=bins, density=False, histtype='step',color="blue",label="cINN")
plt.hist(uncert2_clip, bins=bins, density=False, histtype='step',label="uncert")
plt.xlabel("Estimated difference to gt in ppm",fontsize=20)
plt.ylabel("Number of estimations",fontsize=20)
ax = plt.subplot(1, 2, 2)
bins=np.linspace(-4.5,4.5,100)
#rel_error_iNN=np.clip(rel_error_iNN,-4,4)
#plt.hist(error_ret/dataloader.ret_params[:c.evaluation_samples,1], density=True, bins=bins, histtype='step',color="blue",label="retrival")
iNN_bins,_,_ = plt.hist(calib2, bins=bins, histtype='step',density=True,color="blue",label="cINN")
(mu_INN, sigma_iNN)=norm.fit(calib2)
y_best = scipy.stats.norm.pdf( bins, 0, 1)
xi2_iNN = 0#np.sum(np.square(y_best-iNN_bins)/y_best)
print(xi2_iNN)
y_INN = scipy.stats.norm.pdf( bins, mu_INN, sigma_iNN)
l = plt.plot(bins, y_best, color="black", linestyle="--", linewidth=2,label="optimal gaussian")
l = plt.plot(bins, y_INN, 'b--', linewidth=2,label="gaussian fit on INN")
#l = plt.plot(bins, y_ret, 'r--', linewidth=2)
ax.set_title(f"mu_INN:{mu_INN:.2f}, sigma_iNN: {sigma_iNN:.2f}",fontsize=20)#mu_ret: {mu_ret:.2f}, sigma_ret: {sigma_ret:.2f},
c.mu.append(sigma_iNN)
c.sigma.append(mu_INN)
plt.legend()
plt.xlabel("Difference to gt, depending on estimated error",fontsize=20)
plt.ylabel("Prob. density",fontsize=20)
plt.tight_layout()
#TODO: Show whats the error for what percentage of samples.
def main():
show_predicted_error()
show_feature_net_solution()
show_prediction_changes()
#uncert()
if c.device == "cuda":
torch.cuda.empty_cache()
print("Empties cuda chache")
plot_helpers.save_results()
plt.show(block=False)
input("Press enter to end")
main() |
def tribonacci_rec(signature, n):
if n == 1:
return signature[0]
if n == 2:
return signature[1]
if n == 3:
return signature[2]
return tribonacci_rec(signature, n-1) + tribonacci_rec(signature, n-2) + tribonacci_rec(signature, n-3)
def tribonacci(signature, n):
result = []
for i in range(1, n+1) :
result.append(tribonacci_rec(signature, i))
return result
# print(tribonacci_rec([0.5, 0.5, 0.5], 30))
# print(tribonacci([0.5, 0.5, 0.5], 30))
def trib(signature,n):
while len(signature) < n:
signature.append(sum(signature[-3:]))
return signature[:n]
print(trib([0.5, 0.5, 0.5], 30)) |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Hamilton Kibbe <ham@hamiltonkib.be>
# Based on render_svg.py by Paulo Henrique Silva <ph.silva@gmail.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .render import GerberContext
from operator import mul
import math
import svgwrite
from ..primitives import *
SCALE = 400.
def svg_color(color):
color = tuple([int(ch * 255) for ch in color])
return 'rgb(%d, %d, %d)' % color
class GerberSvgContext(GerberContext):
def __init__(self):
GerberContext.__init__(self)
self.scale = (SCALE, -SCALE)
self.dwg = svgwrite.Drawing()
self.background = False
def dump(self, filename):
self.dwg.saveas(filename)
def set_bounds(self, bounds):
xbounds, ybounds = bounds
size = (SCALE * (xbounds[1] - xbounds[0]),
SCALE * (ybounds[1] - ybounds[0]))
if not self.background:
vbox = '%f, %f, %f, %f' % (SCALE * xbounds[0], -SCALE * ybounds[1],
size[0], size[1])
self.dwg = svgwrite.Drawing(viewBox=vbox)
rect = self.dwg.rect(insert=(SCALE * xbounds[0],
-SCALE * ybounds[1]),
size=size,
fill=svg_color(self.background_color))
self.dwg.add(rect)
self.background = True
def _render_line(self, line, color):
start = map(mul, line.start, self.scale)
end = map(mul, line.end, self.scale)
if isinstance(line.aperture, Circle):
width = line.aperture.diameter if line.aperture.diameter != 0 else 0.001
aline = self.dwg.line(start=start, end=end,
stroke=svg_color(color),
stroke_width=SCALE * width,
stroke_linecap='round')
aline.stroke(opacity=self.alpha)
self.dwg.add(aline)
elif isinstance(line.aperture, Rectangle):
points = [tuple(map(mul, point, self.scale)) for point in line.vertices]
path = self.dwg.path(d='M %f, %f' % points[0],
fill=svg_color(color),
stroke='none')
path.fill(opacity=self.alpha)
for point in points[1:]:
path.push('L %f, %f' % point)
self.dwg.add(path)
def _render_arc(self, arc, color):
start = tuple(map(mul, arc.start, self.scale))
end = tuple(map(mul, arc.end, self.scale))
radius = SCALE * arc.radius
width = arc.aperture.diameter if arc.aperture.diameter != 0 else 0.001
arc_path = self.dwg.path(d='M %f, %f' % start,
stroke=svg_color(color),
stroke_width=SCALE * width)
large_arc = arc.sweep_angle >= 2 * math.pi
direction = '-' if arc.direction == 'clockwise' else '+'
arc_path.push_arc(end, 0, radius, large_arc, direction, True)
self.dwg.add(arc_path)
def _render_region(self, region, color):
points = [tuple(map(mul, point, self.scale)) for point in region.points]
region_path = self.dwg.path(d='M %f, %f' % points[0],
fill=svg_color(color),
stroke='none')
region_path.fill(opacity=self.alpha)
for point in points[1:]:
region_path.push('L %f, %f' % point)
self.dwg.add(region_path)
def _render_circle(self, circle, color):
center = map(mul, circle.position, self.scale)
acircle = self.dwg.circle(center=center,
r = SCALE * circle.radius,
fill=svg_color(color))
acircle.fill(opacity=self.alpha)
self.dwg.add(acircle)
def _render_rectangle(self, rectangle, color):
center = tuple(map(mul, rectangle.position, self.scale))
size = tuple(map(mul, (rectangle.width, rectangle.height), map(abs, self.scale)))
insert = center[0] - size[0] / 2., center[1] - size[1] / 2.
arect = self.dwg.rect(insert=insert, size=size,
fill=svg_color(color))
arect.fill(opacity=self.alpha)
self.dwg.add(arect)
def _render_obround(self, obround, color):
self._render_circle(obround.subshapes['circle1'], color)
self._render_circle(obround.subshapes['circle2'], color)
self._render_rectangle(obround.subshapes['rectangle'], color)
def _render_drill(self, circle, color):
center = map(mul, circle.position, self.scale)
hit = self.dwg.circle(center=center, r=SCALE * circle.radius,
fill=svg_color(color))
self.dwg.add(hit)
|
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import numpy as np
import serial
import time
ser = serial.Serial('/dev/ttyACM0',115200)
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (480, 320)
camera.framerate = 32
camera.hflip = False
camera.vflip = True
rawCapture = PiRGBArray(camera, size=(480, 320))
# allow the camera to warmup et initiasize the communication serial
time.sleep(2)
c1=0
c2=0
c3=0
cx=0
cy=0
# capture frames from the camera
#3 windows video original,video gray, video black-white
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
cro = image[160:320,0:480]
im_gray = cv2.cvtColor(cro,cv2.COLOR_BGR2GRAY)
# function threshold : the third parametre 151 means level gray less than 255 convert in black the orther white
retval,im_at_fixed = cv2.threshold(im_gray,220,255,cv2.THRESH_BINARY)
cv2.imshow("black",im_at_fixed)
cv2.imshow("gray",im_gray)
contours, hierarchy = cv2.findContours(im_at_fixed,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#cherche la surface des contours
area = 0
for i in range(len(contours)) :
area = cv2.contourArea(contours[i])
if area >2000 :
M=cv2.moments(contours[i])
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.drawContours(cro,contours[i],-1,(0,255,0),2)
print(cx,cy)
c3=c2
c2=c1
c1=cx
cv2.circle(cro,(c1,cy),5,(0,0,255),5)
cv2.circle(cro,(c2,cy),5,(0,255,0),5)
cv2.circle(cro,(c3,cy),5,(255,0,0),5)
if(abs(c1-c2)<=5&abs(c2-c3)<=5):
ser.write('z')
else :
if(c1<c2):
if(c2<c3):
ser.write('d')
elif(c1>c2):
if(c2>c3):
ser.write('q')
else :
ser.write('z')
cv2.imshow("frame",image)
# size 640x480 =307200
#print(im_at_fixed.size)
# shape
#print(im_at_fixed.shape)
#print(im_at_fixed.dtype)
# show the frame
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
ser.write('i') |
import numpy as np
import vertex_screen as vt
# layout: u (n_dim) f (1) n_frames * [r (3) t(3) e (n_exp-1)]
def f_id(x, m_vertex, gt_landmarks, f, rte_guess, f_blend_shape, w_reg):
u = x
p = rte_guess
e = p[:, 6:]
e = np.c_[1 - np.sum(e, axis=1), e]
blend_shape = f_blend_shape(u) # (dim_exp, n_v_flat)
vertexes = e @ blend_shape # (n_frame,n_v_flat)
vertexes = vertexes + m_vertex
# add w=1
vertexes = vt.rt_multiframe(vertexes, p[:, 0:6], f)
e_landmarks_1 = vertexes - gt_landmarks
return np.concatenate([e_landmarks_1.flatten(), w_reg * np.array((np.sum(u ** 2) - 1,))])
def f_p_2(x, m_vertex, gt_landmarks, f, blend_shape, w_reg):
e = x[6:]
e = np.concatenate([np.array(1 - np.sum(e))[None], e])
vertexes = e @ blend_shape # (n_frame,n_v_flat)
vertexes = vertexes + m_vertex
# add w=1
vertexes = vt.rt_multiframe(vertexes, x[None, 0:6], f)
e_landmarks_1 = vertexes - gt_landmarks
return np.concatenate([e_landmarks_1.flatten(), e * w_reg])
def f_p_fp(x, m_vertex, gt_landmarks, f, blend_shape, w_reg, rp, w_regrp):
ff = f_p_2(x, m_vertex, gt_landmarks, f, blend_shape, w_reg)
reg_rp = rp - x
return np.concatenate([ff, w_regrp * reg_rp])
def f_p_f_2(x, m_vertex, gt_landmarks, blend_shape, w_reg):
e = x[7:]
f = x[6]
e = np.concatenate([np.array(1 - np.sum(e))[None], e])
vertexes = e @ blend_shape # (n_frame,n_v_flat)
vertexes = vertexes + m_vertex
# add w=1
vertexes = vt.rt_multiframe(vertexes, x[None, 0:6], f)
e_landmarks_1 = vertexes - gt_landmarks
return np.concatenate([e_landmarks_1.flatten(), e * w_reg])
def f_sq(x, fp, *args):
return np.sum(fp(x, *args) ** 2)
|
def main():
n = int(input())
print((n*(n+1))//2 - n)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Project: torch-Slim-Detection-Landmark
# @Author : panjq
# @E-mail : pan_jinquan@163.com
# @Date : 2020-04-03 18:38:34
# --------------------------------------------------------
"""
from __future__ import print_function
import os, sys
sys.path.append("..")
sys.path.append(os.path.dirname(__file__))
sys.path.append("../..")
sys.path.append(os.getcwd())
import argparse
import torch
import cv2
import numpy as np
from models import nets
from models.anchor_utils.prior_box import PriorBox
from models.anchor_utils import anchor_utils
from models.anchor_utils.nms import py_cpu_nms
from utils import image_processing, file_processing, torch_tools
def get_parser():
input_size = [320, 320]
image_dir = "/home/dm/data3/dataset/card_datasets/card_test/card"
model_path = "work_space/card/RFB1.0_card_320_320_CardData4det_20210701114842/model/best_model_RFB_199_loss0.4026.pth"
net_type = "rfb"
priors_type = "card"
image_dir = "data/test_image"
model_path = "work_space/RFB_face_person/RFB1.0_face_person_320_320_MPII_v2_ssd_20210624100518/model/best_model_RFB_168_loss2.8330.pth"
net_type = "rfb"
priors_type = "face_person"
parser = argparse.ArgumentParser(description='Face Detection Test')
parser.add_argument('-m', '--model_path', default=model_path, type=str, help='model file path')
parser.add_argument('--net_type', default=net_type, help='Backbone network mobile0.25 or slim or RFB')
parser.add_argument('--priors_type', default=priors_type, help='Backbone network mobile0.25 or slim or RFB')
parser.add_argument('--prob_threshold', default=0.5, type=float, help='confidence_threshold')
parser.add_argument('--iou_threshold', default=0.3, type=float, help='iou_threshold')
parser.add_argument('--image_dir', default=image_dir, type=str, help='directory or image path')
parser.add_argument('--input_size', nargs='+', help="--input size [600(W),600(H)]", type=int, default=input_size)
parser.add_argument('--num_classes', help="num_classes", type=int, default=2)
parser.add_argument('--device', default="cuda:0", type=str, help='device')
args = parser.parse_args()
print(args)
return args
class Detector(object):
def __init__(self,
model_path,
net_type="RFB",
priors_type="face",
input_size=[320, 320],
prob_threshold=0.6,
iou_threshold=0.4,
freeze_header=True,
device="cuda:0"):
"""
:param model_path:
:param net_type:"RFB",
:param input_size:input_size,
:param network: Backbone network mobile0.25 or slim or RFB
:param prob_threshold: confidence_threshold
:param iou_threshold: nms_threshold
:param device:
"""
self.device = device
self.net_type = net_type
self.priors_type = priors_type
self.prob_threshold = prob_threshold
self.iou_threshold = iou_threshold
self.model_path = model_path
self.top_k = 5000
self.keep_top_k = 750
self.input_size = input_size
self.freeze_header = freeze_header
self.model, self.prior_boxes = self.build_net(self.net_type, self.priors_type)
self.class_names = self.prior_boxes.class_names
self.priors_cfg = self.prior_boxes.get_prior_cfg()
self.priors = self.prior_boxes.priors.to(self.device)
self.model = self.load_model(self.model, model_path)
print('Finished loading model!')
def build_net(self, net_type, priors_type, version="v2"):
priorbox = PriorBox(input_size=self.input_size, priors_type=priors_type, freeze_header=self.freeze_header)
if version.lower() == "v1".lower():
model = nets.build_net_v1(net_type, priorbox, width_mult=1.0, phase='test', device=self.device)
else:
model = nets.build_net_v2(net_type, priorbox, width_mult=1.0, phase='test', device=self.device)
model = model.to(self.device)
return model, priorbox
def load_model(self, model, model_path):
"""
:param model:
:param model_path:
:param load_to_cpu:
:return:
"""
state_dict = torch_tools.load_state_dict(model_path, module=False)
model.load_state_dict(state_dict)
model = model.to(self.device)
model.eval()
return model
# @debug.run_time_decorator("pre_process")
def pre_process(self, image, input_size, mean=(127.0, 127.0, 127.0), std=(128.0, 128.0, 128.0)):
"""
:param image:
:param input_size: model input size [W,H]
:param mean:
:return:image_tensor: out image tensor[1,channels,W,H]
input_size : model new input size [W,H]
"""
out_image = image_processing.resize_image(image, resize_height=input_size[1], resize_width=input_size[0])
out_image = np.float32(out_image)
out_image -= mean
out_image /= std
out_image = out_image.transpose(2, 0, 1)
image_tensor = torch.from_numpy(out_image).unsqueeze(0)
return image_tensor
def pose_process(self, output, image_size):
"""
bboxes, scores = output
"""
bboxes, scores = output
bboxes_scale = np.asarray(image_size * 2)
# get boxes
if not self.prior_boxes.freeze_header:
variances = [self.prior_boxes.center_variance, self.prior_boxes.size_variance]
bboxes = anchor_utils.decode(bboxes, self.priors, variances)
bboxes = bboxes[0].cpu().numpy()
scores = scores[0].cpu().numpy()
bboxes = bboxes * bboxes_scale
scores = scores[:, 1:] # scores[:, 0:]是背景,无需nms
dets, labels = py_cpu_nms.bboxes_nms(bboxes, scores,
prob_threshold=self.prob_threshold,
iou_threshold=self.iou_threshold,
top_k=self.top_k,
keep_top_k=self.keep_top_k)
labels = labels + 1 # index+1
return dets, labels
# @debug.run_time_decorator("inference")
def inference(self, input_tensor):
with torch.no_grad():
input_tensor = input_tensor.to(self.device)
# loc, conf, landms-> boxes,scores,landms
output = self.model(input_tensor)
return output
# @debug.run_time_decorator("predict")
def predict(self, rgb_image, isshow=False):
"""
:param rgb_image:
:return:
bboxes: <np.ndarray>: (num_boxes, 4)
scores: <np.ndarray>: (num_boxes, 1)
scores: <np.ndarray>: (num_boxes, 5, 2)
"""
shape = rgb_image.shape
input_tensor = self.pre_process(rgb_image, input_size=self.input_size)
output = self.inference(input_tensor)
dets, labels = self.pose_process(output, image_size=[shape[1], shape[0]])
if isshow:
self.show_image(rgb_image, dets, labels)
return dets, labels
def detect_image_dir(self, image_dir, isshow=True):
"""
:param image_dir: directory or image file path
:param isshow:<bool>
:return:
"""
image_list = file_processing.get_files_lists(image_dir)
for img_path in image_list:
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image = image_processing.resize_image(image, 800)
self.predict(image, isshow=isshow)
def show_image(self, image, dets, labels, landms=None):
"""
:param image
:param dets
:return:
"""
if not landms is None and len(landms) > 0:
landms = landms.reshape(len(landms), -1, 2)
image = image_processing.draw_landmark(image, landms, vis_id=False)
print("dets:{}".format(dets))
print("landms:{}".format(landms))
if len(dets) > 0:
bboxes = dets[:, 0:4]
scores = dets[:, 4:5]
image = image_processing.draw_image_detection_bboxes(image, bboxes, scores, labels)
image_processing.cv_show_image("image", image)
if __name__ == '__main__':
args = get_parser()
model_path = args.model_path
net_type = args.net_type
priors_type = args.priors_type
prob_threshold = args.prob_threshold
iou_threshold = args.iou_threshold
image_dir = args.image_dir
device = args.device
input_size = args.input_size
det = Detector(model_path,
net_type=net_type,
priors_type=priors_type,
prob_threshold=prob_threshold,
iou_threshold=iou_threshold,
input_size=input_size,
device=device)
det.detect_image_dir(image_dir, isshow=True)
|
from __future__ import print_function
from __future__ import division
import numpy as np
from . import _preload_lattice
class Permutohedral_fast(object):
def __init__(self, N,M,d,with_blur=True):
self._impl = _preload_lattice.Permutohedral_p(N,M,d,with_blur)
def init_with_val(self,features,in_tmp,with_blur):
return self._impl.init_with_val(features.T,in_tmp.T,with_blur)
def apply(self, out, feature):
return self._impl.apply(out.T, feature.T)
|
from flask import Blueprint
ac = Blueprint('ac',__name__,url_prefix='/ac')
@ac.route('/login')
def login():
return 'login'
@ac.route('/logout')
def logout():
return 'logout' |
from django.test import TestCase, Client
from django.urls import reverse
class Test_home_page(TestCase):
def setUp(self):
self.client = Client()
self.url = reverse('lista')
def tearDown(self):
pass
def test_status_code(self):
response = self.client.get(self.url)
self.assertEquals(response.status_code,200)
def test_template_used(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response,'news_list.html')
|
#!usr/bin/python
import sys
from key_generator import *
from fast_exponentiaton import *
def encrypt(data,pub_key,n):
cipher = pow(data,pub_key,n)
return cipher
def decrypt():
pub_key,pri_key,n= generating_keys()
cipher = encrypt(5000000,pub_key,n)
decipher = pow(cipher,pri_key,n)
return decipher
print(decrypt())
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import threading
from bs4 import BeautifulSoup
import random
import sys
import os
import re
import sqlite3
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.styles import Alignment
import yaml
import atexit
import time
import win32api
import signal
def signal_handler(signal, frame):
pass
signal.signal(signal.SIGINT, signal_handler)
search_url = 'http://chn.lottedfs.cn/kr/search?comSearchWord={0}&comCollection=GOODS&comTcatCD=&comMcatCD=&comScatCD=&comPriceMin=&comPriceMax=&comErpPrdGenVal_YN=&comHsaleIcon_YN=&comSaleIcon_YN=&comCpnIcon_YN=&comSvmnIcon_YN=&comGiftIcon_YN=&comMblSpprcIcon_YN=&comSort=RANK%2FDESC&comListCount=20&txtSearchClickCheck=Y'
targeturl = 'http://icanhazip.com/' # 验证ip有效性的指定url
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
sys.path.append("..")
yamlPath = 'config.yaml'
_yaml = open(yamlPath, 'r', encoding='utf-8')
cont = _yaml.read()
yaml_data = yaml.load(cont, Loader=yaml.FullLoader)
pattern = re.compile('[0-9]+')
# 返回一个随机的请求头 headers
def getheaders():
user_agent_list = [
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36',
'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2309.372 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2117.157 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1866.237 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36 Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.517 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.17 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36',
'Mozilla/5.0 (X11; CrOS i686 4319.74.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.2 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1500.55 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.90 Safari/537.36',
'Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.60 Safari/537.17',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.15 (KHTML, like Gecko) Chrome/24.0.1295.0 Safari/537.15',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/24.0.1292.0 Safari/537.14',
'Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16',
'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14',
'Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14',
'Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02',
'Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00',
'Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00',
'Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00',
'Opera/12.0(Windows NT 5.1;U;en)Presto/22.9.168 Version/12.00',
'Mozilla/5.0 (Windows NT 5.1) Gecko/20100101 Firefox/14.0 Opera/12.0',
'Opera/9.80 (Windows NT 6.1; WOW64; U; pt) Presto/2.10.229 Version/11.62',
'Opera/9.80 (Windows NT 6.0; U; pl) Presto/2.10.229 Version/11.62',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; de) Presto/2.9.168 Version/11.52',
'Opera/9.80 (Windows NT 5.1; U; en) Presto/2.9.168 Version/11.51',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; de) Opera 11.51',
'Opera/9.80 (X11; Linux x86_64; U; fr) Presto/2.9.168 Version/11.50',
'Opera/9.80 (X11; Linux i686; U; hu) Presto/2.9.168 Version/11.50',
'Opera/9.80 (X11; Linux i686; U; ru) Presto/2.8.131 Version/11.11',
'Opera/9.80 (X11; Linux i686; U; es-ES) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/5.0 Opera 11.11',
'Opera/9.80 (X11; Linux x86_64; U; bg) Presto/2.8.131 Version/11.10',
'Opera/9.80 (Windows NT 6.0; U; en) Presto/2.8.99 Version/11.10',
'Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10',
'Opera/9.80 (Windows NT 6.1; Opera Tablet/15165; U; en) Presto/2.8.149 Version/11.1',
'Opera/9.80 (X11; Linux x86_64; U; Ubuntu/10.10 (maverick); pl) Presto/2.7.62 Version/11.01',
'Opera/9.80 (X11; Linux i686; U; ja) Presto/2.7.62 Version/11.01',
'Opera/9.80 (X11; Linux i686; U; fr) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; zh-tw) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; sv) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; en-US) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.1; U; cs) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 6.0; U; pl) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 5.1; U;) Presto/2.7.62 Version/11.01',
'Opera/9.80 (Windows NT 5.1; U; cs) Presto/2.7.62 Version/11.01',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.13) Gecko/20101213 Opera/9.80 (Windows NT 6.1; U; zh-tw) Presto/2.7.62 Version/11.01',
'Mozilla/5.0 (Windows NT 6.1; U; nl; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.01',
'Mozilla/5.0 (Windows NT 6.1; U; de; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.01',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; de) Opera 11.01',
'Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00',
'Opera/9.80 (X11; Linux i686; U; it) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.6.37 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; pl) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; ko) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; fi) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1; U; en-GB) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.1 x64; U; en) Presto/2.7.62 Version/11.00',
'Opera/9.80 (Windows NT 6.0; U; en) Presto/2.7.39 Version/11.00',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20130401 Firefox/31.0',
'Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20120101 Firefox/29.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/29.0',
'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:27.0) Gecko/20121011 Firefox/27.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/23.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:23.0) Gecko/20131011 Firefox/23.0',
'Mozilla/5.0 (Windows NT 6.2; rv:22.0) Gecko/20130405 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:22.0) Gecko/20130328 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.1; rv:22.0) Gecko/20130405 Firefox/22.0',
'Mozilla/5.0 (Microsoft Windows NT 6.2.9200.0); rv:22.0) Gecko/20130405 Firefox/22.0',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/21.0.1',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:21.0.0) Gecko/20121011 Firefox/21.0.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20130331 Firefox/21.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (X11; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20130514 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.2; rv:21.0) Gecko/20130326 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130401 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130331 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20130330 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130401 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20130328 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.1; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130401 Firefox/21.0',
'Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20130331 Firefox/21.0',
'Mozilla/5.0 (Windows NT 5.1; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 5.0; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64;) Gecko/20100101 Firefox/20.0',
'Mozilla/5.0 (Windows x86; rv:19.0) Gecko/20100101 Firefox/19.0',
'Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/19.0',
'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/18.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 7.0; InfoPath.3; .NET CLR 3.1.40767; Trident/6.0; en-IN)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)',
'Mozilla/4.0 (Compatible; MSIE 8.0; Windows NT 5.2; Trident/6.0)',
'Mozilla/4.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 10.0; Windows 3.1)',
'Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))',
'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 7.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; Media Center PC 6.0; InfoPath.3; MS-RTC LM 8; Zune 4.7',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; InfoPath.3; MS-RTC LM 8; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; chromeframe/12.0.742.112)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 4.0; Tablet PC 2.0; InfoPath.3; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; yie8)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET CLR 1.1.4322; .NET4.0C; Tablet PC 2.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; FunWebProducts)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/13.0.782.215)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; chromeframe/11.0.696.57)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0) chromeframe/10.0.648.205',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.1; SV1; .NET CLR 2.8.52393; WOW64; en-US)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; chromeframe/11.0.696.57)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/4.0; GTB7.4; InfoPath.3; SV1; .NET CLR 3.1.76908; WOW64; en-US)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.2; SV1; .NET CLR 3.3.69573; WOW64; en-US)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.8.36217; WOW64; en-US)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; .NET CLR 2.7.58687; SLCC2; Media Center PC 5.0; Zune 3.4; Tablet PC 3.6; InfoPath.3)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.2; Trident/4.0; Media Center PC 4.0; SLCC1; .NET CLR 3.0.04320)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 1.1.4322)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; SLCC1; .NET CLR 1.1.4322)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 3.0.04506.30)',
'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.0; Trident/4.0; FBSMTWB; .NET CLR 2.0.34861; .NET CLR 3.0.3746.3218; .NET CLR 3.5.33652; msn OptimizedIE8;ENUS)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.2; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; Media Center PC 6.0; InfoPath.2; MS-RTC LM 8',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; Media Center PC 6.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.3; .NET4.0C; .NET4.0E; .NET CLR 3.5.30729; .NET CLR 3.0.30729; MS-RTC LM 8)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 3.0)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10',
'Mozilla/5.0 (iPad; CPU OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko ) Version/5.1 Mobile/9B176 Safari/7534.48.3',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; de-at) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; da-dk) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; tr-TR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ko-KR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; fr-FR) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; cs-CZ) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; ja-JP) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_8; zh-cn) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_5_8; ja-jp) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; ja-jp) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; zh-cn) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; sv-se) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; ko-kr) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; ja-jp) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; it-it) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; fr-fr) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; es-es) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-us) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-gb) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; de-de) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; sv-SE) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ja-JP) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; de-DE) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; hu-HU) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; de-DE) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ja-JP) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it-IT) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-us) AppleWebKit/534.16+ (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; fr-ch) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; de-de) AppleWebKit/534.15+ (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; ar) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Android 2.2; Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; zh-HK) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; tr-TR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; nb-NO) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-TW) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; zh-cn) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5']
headers = {'User-Agent': random.choice(user_agent_list)}
return headers
class ConnectSqlite:
def __init__(self, dbName="./sqlite3Test.db"):
"""
初始化连接--使用完记得关闭连接
:param dbName: 连接库名字,注意,以'.db'结尾
"""
self._conn = sqlite3.connect(dbName, timeout=3, isolation_level=None, check_same_thread=False)
self._cur = self._conn.cursor()
self._time_now = "[" + sqlite3.datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S') + "]"
def close_con(self):
"""
关闭连接对象--主动调用
:return:
"""
self._cur.close()
self._conn.close()
def create_tabel(self, sql):
"""
创建表初始化
:param sql: 建表语句
:return: True is ok
"""
try:
self._cur.execute(sql)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[CREATE TABLE ERROR]", e)
return False
def drop_table(self, table_name):
"""
删除表
:param table_name: 表名
:return:
"""
try:
self._cur.execute('DROP TABLE {0}'.format(table_name))
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[DROP TABLE ERROR]", e)
return False
def delete_table(self, sql):
"""
删除表记录
:param sql:
:return: True or False
"""
try:
if 'DELETE' in sql.upper():
self._cur.execute(sql)
self._conn.commit()
return True
else:
print(self._time_now, "[EXECUTE SQL IS NOT DELETE]")
return False
except Exception as e:
print(self._time_now, "[DELETE TABLE ERROR]", e)
return False
def fetchall_table(self, sql, limit_flag=True):
"""
查询所有数据
:param sql:
:param limit_flag: 查询条数选择,False 查询一条,True 全部查询
:return:
"""
try:
self._cur.execute(sql)
war_msg = self._time_now + ' The [{}] is empty or equal None!'.format(sql)
if limit_flag is True:
r = self._cur.fetchall()
return r if len(r) > 0 else war_msg
elif limit_flag is False:
r = self._cur.fetchone()
return r if len(r) > 0 else war_msg
except Exception as e:
print(self._time_now, "[SELECT TABLE ERROR]", e)
def insert_update_table(self, sql):
"""
插入/更新表记录
:param sql:
:return:
"""
try:
self._cur.execute(sql)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[INSERT/UPDATE TABLE ERROR]", e)
return False
def insert_table_many(self, sql, value):
"""
插入多条记录
:param sql:
:param value: list:[(),()]
:return:
"""
try:
self._cur.executemany(sql, value)
self._conn.commit()
return True
except Exception as e:
print(self._time_now, "[INSERT MANY TABLE ERROR]", e)
return False
out_path = yaml_data.get('OUT_FILE_PATH')
out_path = out_path if out_path else 'data/网上最新价格.xlsx'
try:
wb = load_workbook(out_path)
except FileNotFoundError as e:
wb = Workbook()
conn = ConnectSqlite("./sqlite3Ip.db")
notes_row = 2
@atexit.register
def exit_handle():
print('匹配到第 {} 件商品结束'.format(notes_row))
conn.insert_update_table('''UPDATE notes SET number={0} WHERE id={1}'''.format(notes_row, '520'))
wb.save(out_path)
conn.close_con()
def on_close(sig):
conn.insert_update_table('''UPDATE notes SET number={0} WHERE id={1}'''.format(notes_row, '520'))
wb.save(out_path)
conn.close_con()
sys.exit()
win32api.SetConsoleCtrlHandler(on_close, True)
class Main:
def __init__(self):
pass
def getIp(self):
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
if isinstance(ip_list, list) and len(ip_list) <= 1:
self.getip()
while True:
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
if len(ip_list) >= 10:
break
time.sleep(10)
elif isinstance(ip_list, str):
self.getip()
while True:
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
if len(ip_list) >= 10:
break
time.sleep(10)
if isinstance(ip_list, list) and len(ip_list) > 0:
return random.choice(ip_list)
else:
raise RuntimeError('程序异常结束')
def requests_process(self, row, sku, commodity_name):
headers = getheaders() # 定制请求头
ip_tuple = self.getIp()
if not isinstance(ip_tuple, tuple):
return ['Ip代理获取失败', 0]
proxies = {"http": "http://" + ip_tuple[0], "https": "http://" + ip_tuple[0]} # 代理ip
requests.adapters.DEFAULT_RETRIES = 10
s = requests.session()
s.keep_alive = False
try:
response = requests.get(url=search_url.format(sku), proxies=proxies, headers=headers, timeout=10, verify=False)
if int(response.status_code) == 200:
soup = BeautifulSoup(response.text, 'lxml')
all_span = soup.select('#searchTabPrdList .imgType .listUl .productMd .price span')
if len(all_span) > 1:
return ['商品搜索条数错误', 0]
elif len(all_span) == 1:
match = pattern.findall(all_span[0].get_text())
if match:
return ['搜索成功', re.search(r'\d+(\.\d+)?', all_span[0].get_text()).group()]
else:
all_strong = soup.select('#searchTabPrdList .imgType .listUl .productMd .discount strong')
return ['搜索成功', re.search(r'\d+(\.\d+)?', all_strong[0].get_text()).group()]
else:
return ['商品没有搜到', 0]
elif int(response.status_code) == 403:
print('第 {0} 件商品搜索失败---sku号为:{1}--商品名称为:{2}--错误为:403错误'.format(row, sku, commodity_name))
conn.delete_table('''DELETE FROM proxyip WHERE ip_port='{0}';'''.format(ip_tuple[0]))
return self.requests_process(row, sku, commodity_name)
else:
return ['商品搜索失败', 0]
except Exception:
print('第 {0} 件商品匹配失败---sku号为:{1}--商品名称为:{2}--错误为:超时/代理错误'.format(row, sku, commodity_name))
conn.delete_table('''DELETE FROM proxyip WHERE ip_port='{0}';'''.format(ip_tuple[0]))
return self.requests_process(row, sku, commodity_name)
def process(self, rs, ws):
global notes_row
start_row = 2
start_row_list = conn.fetchall_table('''select number from notes where id = '520';''')
if len(start_row_list) > 0 and start_row_list[0][0]:
start_row = start_row_list[0][0]
notes_row = start_row
if start_row == 2:
for n in range(1, ws.max_row + 1):
ws.delete_rows(n)
wb.save(out_path)
if ws.max_row <= 1:
data_list = ['序号', 'sku', '品牌', '名称', '原价', '搜索结果', '网上价格']
ws.append(data_list)
wb.save(out_path)
for row in range(start_row, rs.max_row + 1):
sku_column = yaml_data.get('SKU_COLUMN') # sku
sku_column = sku_column if sku_column else 1
brand_column = yaml_data.get('BRAND_COLUMN') # 品牌
brand_column = brand_column if brand_column else 2
commodity_name_column = yaml_data.get('COMMODITY_NAME_COLUMN') # 商品名称
commodity_name_column = commodity_name_column if commodity_name_column else 3
original_price_column = yaml_data.get('ORIGINAL_PRICE_COLUMN') # 原价
original_price_column = original_price_column if original_price_column else 4
sku = rs.cell(row=row, column=sku_column).value
brand = rs.cell(row=row, column=brand_column).value
commodity_name = rs.cell(row=row, column=commodity_name_column).value
original_price = rs.cell(row=row, column=original_price_column).value
if sku and original_price:
data = self.requests_process(row, sku, commodity_name)
print('第 {0} 件商品处理结果为:{1}---sku号为:{2}---商品名称为:{3}---表格价格:{4}---网上价格为:{5}'.format(
row, data[0], sku, commodity_name, original_price, data[1]))
data_list = [row, sku, brand, commodity_name, original_price, data[0], data[1]]
ws.append(data_list)
wb.save(out_path)
else:
print('第 {0} 件商品匹配失败---sku号为:{1}---商品名称为:{2} --->sku非法或者价格非法'.format(row, sku, commodity_name))
data_list = [row, sku, brand, commodity_name, original_price, 'sku非法或者价格非法', 0]
ws.append(data_list)
wb.save(out_path)
notes_row = row
print('总共匹配了 {0} 件商品价格'.format(notes_row))
# -----------------------------------------------------检查ip是否可用----------------------------------------------------
def checkip(self, ip):
headers = getheaders() # 定制请求头
proxies = {"http": "http://" + ip, "https": "http://" + ip} # 代理ip
requests.adapters.DEFAULT_RETRIES = 3
thisIP = "".join(ip.split(":")[0:1])
try:
response = requests.get(url=targeturl, proxies=proxies, headers=headers, timeout=5)
if thisIP in response.text:
return True
else:
return False
except Exception:
return False
# -------------------------------------------------------获取代理方法----------------------------------------------------
# 免费代理 XiciDaili
def findip(self, type, pagenum): # ip类型,页码,目标url,存放ip的路径
list = {'1': 'http://www.xicidaili.com/wn/', # xicidaili国内https代理
'2': 'http://www.xicidaili.com/nn/', # xicidaili国内高匿代理
'3': 'http://www.xicidaili.com/nt/', # xicidaili国内普通代理
'4': 'http://www.xicidaili.com/wt/'} # xicidaili国外http代理
url = list[str(type)] + str(pagenum) # 配置url
headers = getheaders() # 定制请求头
try:
html = requests.get(url=url, headers=headers, timeout=5).text
soup = BeautifulSoup(html, 'lxml')
all = soup.find_all('tr', class_='odd')
for i in all:
t = i.find_all('td')
ip = t[1].text + ':' + t[2].text
is_avail = self.checkip(ip)
if is_avail:
sql = """INSERT INTO proxyip VALUES ('{0}');""".format(ip)
print('代理Ip: {0} 插入成功'.format(ip) if conn.insert_update_table(sql) else '代理Ip: {0} 插入失败'.format(ip))
except Exception:
print('代理Ip请求失败,可能Ip被禁止访问,请刷新网络Ip重启exe文件')
# -----------------------------------------------------多线程抓取ip入口---------------------------------------------------
def getip(self):
threads = []
for type in range(4): # 四种类型ip,每种类型取前三页,共12条线程
for pagenum in range(5):
t = threading.Thread(target=self.findip, args=(type + 1, pagenum + 1))
threads.append(t)
for s in threads: # 开启多线程爬取
s.start()
# -------------------------------------------------------读取文件execl-----------------------------------------------------------
def readFile(self):
ws = wb.active
ws.column_dimensions['A'].width = 12
ws.column_dimensions['A'].alignment = Alignment(horizontal='center', vertical='center')
ws.column_dimensions['C'].width = 36
ws.column_dimensions['C'].alignment = Alignment(horizontal='center', vertical='center')
file_path = yaml_data.get('FILE_PATH')
file_path = file_path if file_path else 'data/欧美韩免原价.xlsx'
rb = load_workbook(file_path)
sheets = rb.sheetnames
sheet = sheets[0]
rs = rb[sheet]
self.process(rs, ws)
# -------------------------------------------------------启动-----------------------------------------------------------
if __name__ == '__main__':
sql = '''CREATE TABLE `proxyip` (
`ip_port` VARCHAR(25) DEFAULT NULL PRIMARY KEY
)'''
print('创建代理表成功' if conn.create_tabel(sql) else '创建代理表失败')
sql1 = '''CREATE TABLE `notes` (
`id` VARCHAR(5) DEFAULT NULL PRIMARY KEY,
`number` int(6) DEFAULT NULL
)'''
if conn.create_tabel(sql1):
print('创建记录表成功')
conn.insert_update_table('''INSERT INTO notes VALUES ('520', 2);''')
else:
print('创建记录表失败')
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
m = Main()
if isinstance(ip_list, list) and len(ip_list) <= 10:
m.getip()
elif isinstance(ip_list, str):
m.getip()
while True:
ip_list = conn.fetchall_table('SELECT * FROM proxyip;')
if isinstance(ip_list, list) and len(ip_list) >= 5:
break
time.sleep(3)
if isinstance(ip_list, list):
print('---Ip代理数量不够,正常5个,等待数量满足开始匹配,当前代理Ip个数为:({0})---'.format(len(ip_list)))
else:
print('---Ip代理数量不够,正常5个,等待数量满足开始匹配,当前代理Ip个数为:({0})---'.format(0))
m.readFile()
input('点击右上角关闭')
while True:
time.sleep(60)
|
from flask import Flask, send_file
import time
app = Flask(__name__)
@app.route('/date/')
def datePage():
return time.ctime()
@app.route('/')
def alarmPage():
return send_file('./Soc_Prosjekt/CameraImage/alarmphoto.jpg', mimetype='image/jpg')
if __name__=='__main__':
app.run(host='128.39.113.212') #everyone is allowed to access my serve
|
# ECE 5725
# Michael Xiao (mfx2) and Thomas Scavella (tbs47)
# 3D scanner software
import cv2
import numpy as np
import math
from picamera import PiCamera
from time import sleep
import RPi.GPIO as GPIO
import time
import os
from gpiozero import LED
from gpiozero import PWMLED
from gpiozero import Button
import smtplib
import serial
# # pins of the stepper motor
# out1 = 13
# out2 = 16
# out3 = 5
# out4 = 12
#
# # additional GPIO pins
# button = Button(23)
# led = PWMLED(18)
# placeholder variable to track status of stepper motor
i=0
# # GPIO setup
# GPIO.setmode(GPIO.BCM)
# GPIO.setup(out1,GPIO.OUT)
# GPIO.setup(out2,GPIO.OUT)
# GPIO.setup(out3,GPIO.OUT)
# GPIO.setup(out4,GPIO.OUT)
ser = serial.Serial('/dev/ttyS0', 9600)
#vertex class
class vertex:
def __init__(self, x,y,z):
self.x = x
self.y = y
self.z = z
def write(self):
return "v " + str(self.x) + " " + str(self.y) + " " +str(self.z)
#return str(self.x) + "," + str(self.y) + "," +str(self.z)
#face class
class face:
def __init__(self, v1,v2,v3):
self.v1 = v1
self.v2 = v2
self.v3 = v3
def write(self):
return "f " + str(self.v1) + " " + str(self.v2) + " " +str(self.v3)
# transforms cylindrical coordinates into rectangular coordinates
def getVertex(pCoord):
#pass
H = pCoord.x
t = pCoord.y
d = pCoord.z
x = d*math.cos(t)
y = d*math.sin(t)
z = H
return vertex(int(x),int(y),int(z))
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def step(step):
while(ser.writable() is not True):
sleep(0.1)
print("sleep")
if(step < 10):
RXbuff = "R00{}"
elif(step >= 10 and step < 100):
RXbuff = "R0{}"
else:
RXbuff = "R{}"
b = 0x00
RXbuff = RXbuff.format(step)
print(RXbuff)
b = str.encode(RXbuff)
sleep(0.1)
ser.write(b)
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
while (1):
# angular resolution
numItt = 20
# angle
theta = 0
thetaInc = 360.0/numItt
# motor position
motorPos = 0
motorPosI = 400.0/numItt
#data
meshPoints = []
lineLenth = []
# while loop for system
# wait for start button
# turn on status light
while(theta < 360):
#will loop this
camera = PiCamera()
camera.start_preview()
sleep(1)
camera.capture('lineDetection.jpg')
camera.close()
img = cv2.imread('lineDetection.jpg')
#---------- Preview the picture ----------------
cv2.imshow("perspective", img)
cv2.waitKey(0)
#get perspective
# tlp = (375.0,275.0)
# trp = (1090.0,420.0)
# brp = (1090.0,915.0)
# blp = (375.0,1060.0)
# pts = np.array([tlp,trp,brp,blp])
# img = four_point_transform(img, pts)
#---------- Preview the PERSPECTIVE picture ----------------
cv2.imshow("perspective", img)
cv2.waitKey(0)
# filter
lowerb = np.array([0, 0, 20])
upperb = np.array([255, 255, 255])
#1200,1600
red_line = cv2.inRange(img, lowerb, upperb)
##red_line = cv2.resize(red_line, (60,80), interpolation = cv2.INTER_AREA)
#---------- Preview the filtered picture ----------------
cv2.imshow("perspective", red_line)
cv2.waitKey(0)
#print red_line.shape
h,w = np.shape(red_line)
backG = np.zeros((h, w))
#print backG
bottomR = 0
r = 0
for cIndex in np.argmax(red_line, axis=1):
if red_line[r,cIndex] != 0:
backG[r,cIndex] = 1
bottomR = r
r += 1
#---------- Preview the processed picture ----------------
cv2.imshow("perspective", backG)
cv2.waitKey(0)
tempV = []
r = 0
centerC = 420.0 #center column
for cIndex in np.argmax(backG,axis=1):
if(backG[r,cIndex] == 1):
#intvi = 0
H = r-bottomR
dist = cIndex - centerC
coord = vertex(H,np.radians(theta),dist)
tempV.append(coord)
r += 1
# vertical resolution
intv = 20
intv = len(tempV)/intv
if(len(tempV) != 0 and intv != 0):
V = []
V.append(tempV[0])
for ind in range(1,len(tempV)-2):
if(ind % intv == 0):
V.append(tempV[ind])
V.append(tempV[(len(tempV)-1)])
meshPoints.append(V)
print(str(len(V)))
lineLenth.append(-1*len(V))
theta += thetaInc
ser.reset_output_buffer()
sleep(0.5)
ser.write(b'R001')
sleep(0.01)
theta = theta + 1
time.sleep(0.3)
# for row in meshPoints:
# for coord in row:
# print getVertex(coord).write()
print(lineLenth)
shortest = len(meshPoints[np.argmax(lineLenth)])
for line in meshPoints:
while(len(line) > shortest):
line.pop(len(line)-2)
points = []
faces = []
firstRow = []
prevRow = []
for index in range(1,len(meshPoints[0])+1):
points.append(getVertex(meshPoints[0][index-1]))
firstRow.append(index)
prevRow = firstRow
for col in range(0,len(meshPoints)):
if col != 0:
indexS = prevRow[-1]
currentRow = []
for point in range(0,len(meshPoints[col])-1):
tl = indexS + point + 1
bl = tl + 1
tr = prevRow[point]
br = prevRow[point + 1]
f1 = face(tl,tr,bl)
f2 = face(bl,tr,br)
faces.append(f1)
faces.append(f2)
points.append(getVertex(meshPoints[col][point]))
currentRow.append(tl)
if(point == len(meshPoints[col])-2):
points.append(getVertex(meshPoints[col][point+1]))
currentRow.append(bl)
if col == (len(meshPoints)-1):
tr = tl
br = bl
tl = firstRow[point]
bl = firstRow[point+1]
f1 = face(tl,tr,bl)
f2 = face(bl,tr,br)
faces.append(f1)
faces.append(f2)
prevRow = currentRow
#---------- debugging prints ----------------
# for point in points:
# print(point.write())
# for face in faces:
# print(face.write())
# writing the file
filetowrite='3d.obj'
with open(filetowrite, 'w') as file:
for point in points:
file.write(point.write() + "\n")
for f in faces:
file.write(f.write() + "\n")
file.close()
# turn status light off
# TODO: send data online
# TODO: clear variables
GPIO.cleanup()
|
# coding: UTF-8
from lxml.builder import E, ET
from copy import copy
import urllib
from functools import wraps
from werkzeug.wrappers import BaseResponse
def CLASSES(*args):
return {'class': ' '.join(args)}
SITE_TITLE = 'ahri.net'
SITE_URL = 'ahri.net'
AUTHOR_NAME = 'Adam Piper'
AUTHOR_EMAIL = 'adam@ahri.net'
YEAR = 2011
HTML = E.html(
E.head(
E.title(SITE_TITLE),
E.meta(**{'http-equiv': 'Content-Type', 'content': 'text/html; charset=UTF-8'})
),
E.body(
E.div(
E.div(
E.a(SITE_TITLE, href=SITE_URL),
id='site-title',
),
E.div(id='links'),
id='header',
),
E.h1('', id='page-title'),
E.div(id='content'),
E.div(
E.hr(),
E.p(
u'Copyright © ',
E.a(
AUTHOR_NAME,
href='mailto:' + AUTHOR_EMAIL,
),
' ' + str(YEAR),
),
id='footer',
),
),
)
HEAD = HTML.xpath("/html/head")[0]
BODY = HTML.xpath("/html/body")[0]
HEADER = HTML.xpath("//*[@id='header']")[0]
def stylesheet(url):
return E.link(rel='stylesheet', type='text/css', media='screen', href=url)
def javascript(url):
return E.script('', type='text/javascript', src=url)
def embed_nodes(flask_app, title, route, methods=None, css=[], js=[], removals=[], **kwargs):
"""
Decorator to embed nodes inside the HTML node tree
Pass URLs for css/js to have them added appropriately
Pass xpaths to be removed
"""
def magic(f):
@flask_app.route(route, methods=methods, **kwargs)
@wraps(f)
def call(*args, **kwargs):
html = copy(HTML)
content = html.xpath("//html/body/*[@id='content']")[0]
res = f(*args, **kwargs)
if type(res) is BaseResponse:
return res
for item in res:
content.append(item)
if title is not None:
html.xpath("//title")[0].text += (' - ' + title)
html.xpath("//*[@id='page-title']")[0].text = '> %s' % title
HEAD = html.xpath("/html/head")[0]
BODY = html.xpath("/html/body")[0]
for url in css:
HEAD.append(stylesheet(url))
for url in js:
BODY.append(javascript(url))
for remove in removals:
try:
node = html.xpath(remove)[0]
node.getparent().remove(node)
except IndexError:
print "Could not find %s to remove!" % remove
# make sure all fields take UTF-8
for element in html.xpath("//input | //textarea"):
element.attrib['accept-charset'] = 'UTF-8'
return '<!DOCTYPE HTML>\n%s' % ET.tostring(html, pretty_print=True)
return call
return magic
# Add css
for url in ['/static/css/base.css',
]:
HEAD.append(stylesheet(url))
# Add google fonts
for font in ['Crimson Text', 'Droid Sans Mono', 'Lobster']:
HEAD.append(stylesheet('http://fonts.googleapis.com/css?family=%s' % urllib.quote_plus(font)))
# Add some links
for name, url in [('Blog', 'http://www.ahri.net'),
('Photos', 'http://photos.ahri.net'),
('Code Repos', 'http://github.com/ahri'),
('Regex Guide', '/regex'),
('Apostrophe Guide', '/apostrophes'),
('Music', '/music'),
('Login', '/login'),
]:
HEADER.xpath("//div[@id='links']")[0].append(E.a('[%s]' % name, href=url))
POST = E.div(
E.div(
E.h4(CLASSES('text')),
CLASSES('title'),
),
E.div(CLASSES('datetime')),
E.div(CLASSES('content')),
CLASSES('post'),
)
SPACER = E.div(
E.hr(),
CLASSES('spacer'),
)
POST_FORM = E.form(
E.dl(
E.dt('Title'),
E.dd(E.input(type='text', name='title')),
E.dt('Body'),
E.dd(E.textarea('', name='body')),
E.dt('Tags'),
E.dd(E.input(type='text', name='tags')),
E.dt(E.input(type='submit', value='Post')),
)
)
CV = E.cv(
E.date("2011-07-05"),
E.person("Adam Piper",
dob="18 January 1983",
address="Apt. 320, 5 Ludgate Hill, Manchester, M4 4TJ",
phone="07989572270",
email="adam@ahri.net"),
E.goal("Senior developer involving a high level of responsibility and team leadership duties"),
E.profile(
E.item("13 years' experience in software development, with experiencerience in all aspects of the project life-cycle."),
E.item("Over 3 years' exposure to the Energy and Utilities, Oil and Gas industry."),
E.item("Well versed in agile methodologies including Test-Driven Development, scrum, pair programming."),
E.item("Extensive technical skill set with an advanced understanding of OO concepts and expertise in a number of development tools, including .NET, Java, SQL and many open source tools such as vim, git, svn."),
E.item("Excellent communication skills with experience of working closely with clients and third party contractors."),
E.item("Confident in leading teams to development, manage and deliver innovative business solutions in a timely manner with an emphasis on quality and efficiency."),
E.item("Highly motivated and detail-oriented individual with excellent analytical and problem solving skills. Performs effectively under pressure and uses initiative to develop efficiency enhancing tools beyond specification to compliment projects."),
),
E.skillset(
E.expert(
E.item("Python"),
E.item("PHP"),
E.item("HTML"),
E.item("SQL"),
E.item("Linux"),
E.item("Shell Scripting (UNIX)"),
E.item("Data Modelling (UML)"),
E.item("Apache Web Server"),
),
E.experienced(
E.item("C"),
E.item("C#"),
E.item("Java"),
E.item("Ada"),
E.item("OpenRoad (4GL)"),
E.item("Ingres (ServerSQL)"),
E.item("Javascript"),
E.item("MS Office"),
),
E.exposure(
E.item("Perl"),
E.item("Visio"),
E.item("Microsoft SQL Server"),
E.item("Visual Studio"),
E.item("Ruby"),
E.item("Scheme (Lisp)"),
E.item("Haskell"),
),
),
E.career(
E.role("Web/Embedded Developer", begin="February 1998", end="August 2006", company="Redileads Anglesey Ltd."),
E.role("Freelance Web Developer", begin="October 2006", end="January 2009", company="SevenPointSix.com"),
E.role("Senior Analyst/Developer", begin="July 2007", end="Present", company="Logica"),
),
E.details(
E.job(),
),
E.education(),
E.activities(),
E.skills()
)
|
def main():
age = int(input("What is your age? "))
while age < 0 or age > 150:
age = int(input("What is your age? "))
credits = int(input("How many credits have your earned toward graduation (120 if done)?"))
while credits < 0 or credits > 120:
credits = int(input("How many credits have your earned toward graduation (120 if done)?"))
arts_and_humanities_courses = int(input("How many of your two required AH courses have you taken?"))
while arts_and_humanities_courses < 0 or arts_and_humanities_courses > 2:
arts_and_humanities_courses = int(input("How many of your two required AH courses have you taken?"))
dnd_stats = ["strength", "dexterity", "charisma", "constitution", "wisdom", "intelligence"]
user_stats = []
index = 0
while index < len(dnd_stats):
stat = dnd_stats[index]
user_stat = int(input("What would your " + stat + " be if you were a dungeons and dragons character?"))
while user_stat < 3 or user_stat > 18:
user_stat = int(input("What would your " + stat + " be if you were a dungeons and dragons character?"))
user_stats.append(user_stat)
index += 1
main() |
list_1 = [0,1,2,3,4,5,6,7,8,9]
list_2 = [0,2,3,4,7,8,12]
counter = 0
for i in list_1:
if i in list_2:
counter += 1
print(counter) |
def parse_args():
"""-> argparse.Namespace"""
import argparse
parser = argparse.ArgumentParser(description="train a darc parser.")
parser.add_argument('--verbose', '-v', action='count', help="maximum verbosity: -vv")
parser.add_argument('--model', required=True, help="npy model file to save")
parser.add_argument('--train', required=True, nargs='+', help="conllu files for training")
parser.add_argument('--form-w2v', help="word2vec file for form embeddings")
parser.add_argument('--lemm-w2v', help="word2vec file for lemma embeddings")
parser.add_argument('--w2v-is-binary', action='store_true')
parser.add_argument('--proj', action='store_true', help="train a projective parser")
parser.add_argument('--upos-embed-dim', type=int, default=12, help="default: 12")
parser.add_argument('--drel-embed-dim', type=int, default=16, help="default: 16")
parser.add_argument('--feat-embed-dim', type=int, default=32, help="default: 32")
parser.add_argument('--hidden-layers', type=int, default=2, help="default: 2")
parser.add_argument('--hidden-units', type=int, default=256, help="default: 256")
parser.add_argument('--activation', default='relu', help="default: relu")
parser.add_argument('--init', default='he_uniform', help="default: he_uniform")
parser.add_argument('--embed-init-max', type=float, default=0.5, help="default: 0.5")
parser.add_argument('--embed-const', default='unitnorm', help="default: unitnorm")
parser.add_argument('--embed-dropout', type=float, default=0.25, help="default: 0.25")
parser.add_argument('--hidden-const', default='none', help="default: none")
parser.add_argument('--hidden-dropout', type=float, default=0.25, help="default: 0.25")
parser.add_argument('--output-const', default='none', help="default: none")
parser.add_argument('--optimizer', default='adamax', help="default: adamax")
parser.add_argument('--batch', type=int, default=32, help="default: 32")
parser.add_argument('--epochs', type=int, default=16, help="default: 16")
parser.add_argument('--save-for-each', action='store_true')
args = parser.parse_args()
if not args.verbose:
args.verbose = 0
elif 1 == args.verbose:
args.verbose = 2
elif 2 <= args.verbose:
args.verbose = 1
return args
def make_setup(train, proj, form_w2v, lemm_w2v, w2v_is_binary, verbose):
"""-> Setup"""
if verbose:
print("training a", "projective" if proj else "non-projective", "parser")
print("loading", *train, "....")
import src_conllu as conllu
sents = [sent for train in train for sent in conllu.load(train)]
if verbose:
print("loading", form_w2v, "....")
from gensim.models.keyedvectors import KeyedVectors
form_w2v = KeyedVectors.load_word2vec_format(form_w2v, binary=w2v_is_binary)
if lemm_w2v:
if verbose:
print("loading", lemm_w2v, "....")
lemm_w2v = KeyedVectors.load_word2vec_lemmat(lemm_w2v, binary=w2v_is_binary)
else:
lemm_w2v = None
if verbose:
print("preparing training data ....")
from src_setup import Setup
return Setup.cons(sents=sents, proj=proj, form_w2v=form_w2v, lemm_w2v=lemm_w2v)
if '__main__' == __name__:
args = parse_args()
setup = make_setup(
train=args.train,
form_w2v=args.form_w2v,
lemm_w2v=args.lemm_w2v,
w2v_is_binary=args.w2v_is_binary,
proj=args.proj,
verbose=args.verbose)
model = setup.model(
upos_embed_dim=args.upos_embed_dim,
drel_embed_dim=args.drel_embed_dim,
feat_embed_dim=args.feat_embed_dim,
hidden_units=args.hidden_units,
hidden_layers=args.hidden_layers,
activation=args.activation,
init=args.init,
embed_init_max=args.embed_init_max,
embed_const=args.embed_const,
embed_dropout=args.embed_dropout,
hidden_const=args.hidden_const,
hidden_dropout=args.hidden_dropout,
output_const=args.output_const,
optimizer=args.optimizer)
if args.save_for_each:
for epoch in range(args.epochs):
setup.train(model, batch_size=args.batch, epochs=1, verbose=args.verbose)
model_path = "{}-e{:0>2d}.npy".format(args.model, epoch)
setup.save(model_path, model, with_data=False)
if args.verbose:
print("saved model", model_path)
else:
setup.train(model, batch_size=args.batch, epochs=args.epochs, verbose=args.verbose)
setup.save(args.model, model, with_data=False)
if args.verbose:
print("saved model", args.model)
|
def minimumOnStack(operations):
ans = []
stack = []
minchecker = []
for i in range(0,len(operations)):
if operations[i] == "min":
ans.append(minchecker[0])
elif operations[i] == "pop":
num = stack.pop()
if num == minchecker[0]:
minchecker.pop(0)
else:
num = changer(operations[i][5:])
stack.append(num)
if len(minchecker) ==0:
minchecker.append(num)
else:
if minchecker[0] > num:
minchecker.insert(0,num)
return ans
def changer(String):
log = 1
n = len(String)
sum1 = 0
k = ord("0")
for i in range(0,n):
sum1 = sum1 + (ord(String[n-1-i])-k)*log
log = log*10
return sum1
|
##########
#
# timesOfIndia.py
# By Aadarsha Shrestha (aadarsha.shrestha.nepal@gmail.com, aadarsha@tutanota.com)
#
# Returns the current day's headlines from Times Of India
# API: (RSS) http://timesofindia.indiatimes.com/rss.cms
#
# NOTE:
# - url_formatter(), extractor() are to be changed according to need
# - Do not change the modules: get_json(), scrapper()
#
#
# Date: 19-03-2017
#
##########
# !/usr/bin/python3
import datetime, os, requests, feedparser
SOURCE_CODE = "timesOfIndia"
'''
# Formats the Request
# Returns base URL
def url_formatter():
cur_date = time.strftime("%Y%m%d")
api_key = code['']
url = '' + cur_date + '' + cur_date + '&api-key=' + api_key
return url
'''
# Fetches JSON data and parses it
# Returns JSON object
def get_json(url):
response = requests.get(url)
json_data = response.json()
return json_data
# Headline extractor
# Returns headlines list
def extractor(headlines):
# URL for RSS feed
base_url = []
base_url.append('http://timesofindia.indiatimes.com/rssfeeds/-2128936835.cms') # National
base_url.append('http://timesofindia.indiatimes.com/rssfeeds/296589292.cms') # World
base_url.append('http://timesofindia.indiatimes.com/rssfeeds/-2128833038.cms') # Bangalore
for i in range(2):
# Parse RSS feed
feed = feedparser.parse(base_url[i])
# Append headline to list
for single_news in feed['entries']:
headlines.append(single_news["title"])
# Module to be called from extractorRunner.py
# Returns file populated with news headlines
def scrapper():
# Initialize headlines list
headlines = []
extractor(headlines)
# Compute file path
today = str(datetime.date.today())
directory = "./data/" + SOURCE_CODE + "/" + today
if not os.path.exists(directory):
os.makedirs(directory)
file = directory + "/" + today + ".txt"
# Write in file
with open(file, "w") as tf:
for headline in headlines:
try:
tf.write(headline + "\n")
except:
pass
return file |
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""
Modelo productor-consumidor con conditional variables
porque siempre se libera el lock después de lista.pop()
"""
import threading
from time import time, sleep, clock
from random import seed, uniform
# Global variables #
lista = list([])
evento = threading.Event()
ITERMAX = 5
UNIMAX = 1
##################
def consume():
"""
"""
print("[consume] Waiting...")
t1 = clock()
evento.wait(15.0)
t2 = clock()
print("[consume] Time elapsed: %s " % str(t2 - t1) )
if len(lista) > 0:
result = lista.pop()
else:
print("[consume] Aucun valeur a retourner.")
return
#End IF#
print("[consume] value consumed: %s" % str(result) )
print("[consume] lista: %s" % str(lista) )
print("[consume] End")
return result
### END consume ###
def produce():
"""
"""
print("[produce] getting uniform value")
value = uniform(0, UNIMAX)
print("[produce] value: %s " % str(value))
print("[produce] Appending value to the list.")
lista.append(value)
print("[produce] Value appended.")
print("[produce] list: %s " % str(lista) )
print("[produce] Notifying.")
evento.set()
print("[produce] Threads Notified.")
### END produce ###
def thread(type):
"""
type can be "producer" or "consumer".
"""
print("[thread %s] Begin" % str(type))
for i in range(0, ITERMAX):
print("[thread %s] Iteration: %s" % (str(type), i) )
if (type == "producer"):
produce()
elif (type == "consumer"):
consume()
#End FOR#
### End thread ###
def main():
"""
"""
seed(time())
t1 = threading.Thread(target = thread, name = "Producer", args = ("producer", ))
t1.start()
t2 = threading.Thread(target = thread, name = "Consumer", args = ("consumer", ))
t2.start()
### End main ###
if __name__ == '__main__':
main()
|
import arcpy
arcpy.env.overwriteOutput = True
#---------------------------------
# Set Spatial Reference for State Plane Feet HARN83 Florida East
#--------------------------------
SR = arcpy.SpatialReference(2881)
def get_SpatialA():
arcpy.CheckInExtension("Spatial")
# Check out the ArcGIS Spatial Analyst extension license
availability = arcpy.CheckExtension("Spatial")
if availability == "Available":
arcpy.CheckOutExtension("Spatial")
else:
arcpy.AddError("%s extension is not available (%s)"%("Spatial Analyst Extension",availability))
arcpy.AddError("Please ask someone who has it checked out but not using to turn off the extension")
exit()
return(availability)
DiffMapGDB = []
DiffMapGDB.append(r'//ad.sfwmd.gov/dfsroot/data/wsd/GIS/GISP_2012/WorkingDirectory/KAR/ModflowProcessing/PythonTools/NPALM/NPBFWOvsALT2.gdb')
DiffMapGDB.append(r'//ad.sfwmd.gov/dfsroot/data/wsd/GIS/GISP_2012/WorkingDirectory/KAR/ModflowProcessing/PythonTools/NPALM/NPBFWOvsALT5.gdb')
DiffMapGDB.append(r'//ad.sfwmd.gov/DFSRoot/data/wsd/GIS/GISP_2012/WorkingDirectory/KAR/ModflowProcessing/PythonTools/NPALM/NPBFWOvsALT10.gdb')
DiffMapGDB.append(r'//ad.sfwmd.gov/dfsroot/data/wsd/GIS/GISP_2012/WorkingDirectory/KAR/ModflowProcessing/PythonTools/NPALM/NPBFWOvsALT12.gdb')
DiffMapGDB.append(r'//ad.sfwmd.gov/dfsroot/data/wsd/GIS/GISP_2012/WorkingDirectory/KAR/ModflowProcessing/PythonTools/NPALM/NPBFWOvsALT13.gdb')
get_SpatialA()
#--------------------------------
# Loop through list of geodatabases each with raster layers representing water level differences from the NPB LECsR model
#
for dMap in DiffMapGDB:
print dMap
outGDB = dMap
tempASCII = "h:/RasterT_rasterc1.TXT"
tempPoly = outGDB + '/TempPoly'
#--------------------------------
# Loop through list of Rasters in the current geodatabase and convert it to a polygon feature class
#--------------------------------
arcpy.env.workspace = dMap
fRasList = arcpy.ListRasters()
for f in fRasList:
print f
rasFloat = arcpy.Raster(f)
rasFloat = arcpy.sa.Times(rasFloat,100)
arcpy.RasterToASCII_conversion(rasFloat, out_ascii_file=tempASCII)
iRas = outGDB +'/' + f
iPoly = outGDB + '/' + f + '_poly'
arcpy.ASCIIToRaster_conversion(in_ascii_file=tempASCII,out_raster=iRas,data_type="INTEGER")
arcpy.RasterToPolygon_conversion(in_raster=iRas,out_polygon_features=tempPoly,raster_field="Value")
arcpy.Dissolve_management(in_features=tempPoly, out_feature_class=iPoly, dissolve_field="gridcode", statistics_fields="",
multi_part="MULTI_PART", unsplit_lines="DISSOLVE_LINES")
arcpy.AddField_management(iPoly,"WL_diff","FLOAT")
arcpy.CalculateField_management(in_table=fc, field="WL_diff",
expression="!gridcode! / 100.0",
expression_type="PYTHON_9.3", code_block="") |
import os
import json
import pandas as pd
from docx import Document
import openpyxl
from openpyxl import load_workbook
from pathlib import Path
from xlrd import open_workbook
from xlutils.copy import copy
import xlsxwriter
class TempleteGenerator():
def __init__(self):
self.Keydictionary = ["REPLACE_NAME" , "REPLACE_LASTNAME", "REPLACE_CITY", "REPLACE_DIRECTION" , "REPLACE_POSSTALCODE" , "REPLACE_PHONENUM", "REPLACE_SOCIALCODE",
"REPLACE_EMAIL" , "REPLACE_COUNTRY" , "REPLACE_CARDNUM" , "REPLACE_EXPIRATION" , "REPLACE_CVV" , "REPLACE_ACTIVES" , "REPLACE_PASIVES" ,
"REPLACE_HERITAGE" , "REPLACE_COMPANY" , "REPLACE_DATE" , "REPLACE_ACTIVITY" , "REPLACE_RUC" , "REPLACE_ACCOUNT" , "REPLACE_POSITION",
"REPLACE_INSTITUTION" , "REPLACE_PERIOD" , "REPLACE_ABA" , "REPLACE_SWIFT"]
self.json_data = {"REPLACE_NAME":"NOMBRE" , "REPLACE_LASTNAME":"APELLIDO", "REPLACE_CITY":"CIUDAD", "REPLACE_DIRECTION":"DIRECCION" , "REPLACE_POSSTALCODE":"CODIGO POSTAL", "REPLACE_PHONENUM":"TELEFONO", "REPLACE_SOCIALCODE":"SEGURO SOCIAL",
"REPLACE_EMAIL":"MAIL" , "REPLACE_COUNTRY":"PAIS" , "REPLACE_CARDNUM":"CARD NUMBER" , "REPLACE_EXPIRATION":"CADUCIDAD" , "REPLACE_CVV":"CVV" , "REPLACE_ACTIVES":"ACTIVOS" , "REPLACE_PASIVES": "PASIVOS" ,
"REPLACE_HERITAGE":"PATRIMONIO" , "REPLACE_COMPANY":"EMPRESA" , "REPLACE_DATE":"FECHA" , "REPLACE_ACTIVITY":"ACTIVIDAD" , "REPLACE_RUC":"RUC" , "REPLACE_ACCOUNT":"CUENTA" , "REPLACE_POSITION": "CARGO",
"REPLACE_INSTITUTION": "INSTITUCION" , "REPLACE_PERIOD":"PERIODO" , "REPLACE_ABA":"ABA" , "REPLACE_SWIFT":"SWIFT"}
self.db =pd.read_csv('DP_Ecuador_10.csv')
self.file = open("DP_Ecuador_10.csv")
self.data_count = len(self.file.readlines()) - 1
self.file.close()
self.Generated_folder_name = "\\Generated"
self.path = str(os.getcwd() + self.Generated_folder_name)
if not os.path.isdir(os.getcwd() + self.Generated_folder_name):
os.mkdir(os.getcwd() + self.Generated_folder_name)
def replacedParograph(self , parograph , key , data_to_replace):
try:
replaced = parograph.replace(key , data_to_replace)
except TypeError as _:
replaced = ""
return replaced
def get_Data(self , i , key):
return str(self.db[self.json_data[key]].iloc[i])
def isfloat(self , value):
try:
float(value)
return True
except ValueError:
return False
def replaceContentXls(self ,documentName ):
for i in range(self.data_count):
document = load_workbook(documentName)
for sheet in document._sheets:
for row in range(1,sheet.max_row + 1):
for col in range(1,sheet.max_column + 1):
par = str(sheet.cell(row,col).value)
if par == 'None':
par = ""
for key in self.Keydictionary:
if key in par:
data_to_replace = self.get_Data(i , key)
par = self.replacedParograph(par , key , data_to_replace )
else:
if self.isfloat(par):
float_value = float(par)
sheet.cell(row, col).value = float_value
else:
sheet.cell(row, col).value = par
if self.isfloat(par):
float_value = float(par)
sheet.cell(row, col).value = float_value
else:
sheet.cell(row, col).value = par
path, file = os.path.split(documentName)
file_name , file_extension = os.path.splitext(file)
path = Path(path)
path = str(path.parent)
document.save(self.path + "\\"+ file_name + "_" + str(i) + file_extension)
def replaceContentDoc(self , documentName ):
for i in range(self.data_count):
document = Document(documentName)
for paragraph in document.paragraphs:
for key in self.Keydictionary:
if key in paragraph.text:
data_to_replace = self.get_Data(i , key)
paragraph.text = self.replacedParograph(paragraph.text , key , data_to_replace)
for table in document.tables:
for row in table.rows:
for cell in row.cells:
for paragraph in cell.paragraphs:
for key in self.Keydictionary:
if key in paragraph.text:
data_to_replace = self.get_Data(i , key)
paragraph.text = self.replacedParograph(paragraph.text , key , data_to_replace)
path, file = os.path.split(documentName)
file_name , file_extension = os.path.splitext(file)
path = Path(path)
path = str(path.parent)
document.save(self.path + "\\"+ file_name + "_" + str(i) + file_extension)
|
#dictionary
houses = {"Jesus": "Salvation", "Success": "Divine Help + Human Effort"}
houses["Peace"] = "Jesus"
print(f"My peace is in {houses['Peace']}") |
import pandas as pd
from datetime import datetime
from commons.csv_writer import CSVWriter
from driver.webdriver import WebDriver
from selenium.webdriver.common.keys import Keys
import random
from time import sleep
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
import constants
import id
def login(driver):
"""Function to login to the linkedin"""
driver.find_element(id.login_email,'css_selector').send_keys(os.environ.get('email'))
sleep(2)
driver.find_element(id.login_password,'css_selector').send_keys(os.environ.get('password'))
sleep(2)
driver.find_element(id.signin_button).click()
sleep(5)
def getUserDetails(i, link):
"""get user details"""
name=dr.find_element(id.name,'class_name').text
description=dr.find_element(id.description,'class_name').text
dr.load_url(link + 'detail/contact-info/')
try:
phone=dr.find_element(id.phone_number,'class_name').text
phone = ''.join(phone.split()[1:])
phone=phone.replace('(Mobile)','')
phone=phone.replace('(Home)','')
except:
phone = ""
try:
email=dr.find_element(id.email,'class_name').text
email = email.split()[1]
except:
email = ""
df.at[i, "Description"] = description
df.at[i, "Email Address"] = email
df.at[i, "Phone Number"] =phone
df.at[i,'Attempt Date']=datetime.today().date()
df.at[i,'Profile link'] =link
print("###########################")
print("Name: " + name)
print("Description: " + description)
print("Email: " + email)
print("Phone: " + phone)
print()
connectionsCSV.save_csv()
if __name__ == "__main__":
dr = WebDriver()
dr.load_url('https://www.linkedin.com/')
connectionsCSV = CSVWriter(constants.CSV_FileName)
login(dr)
limit=random.randint(constants.User_MIN_WAIT,constants.User_MAX_WAIT)
count=int(connectionsCSV.totalToday())
df=connectionsCSV.read_csv()
for i in range(len(df)):
try:
if count>limit:
break
if pd.isnull(df.loc[i,'Attempt Date']) and not 'Error' in df.loc[i,'Message']:
query=df.loc[i,'First Name']+' '+df.loc[i,'Last Name']+' '+df.loc[i,'Company']
query=query.replace('nan', '')
dr.find_element(id.search).clear()
dr.find_element(id.search).send_keys(query)
sleep(2)
dr.find_element(id.search).send_keys(Keys.ENTER)
sleep(random.randint(5, 10))
dr.find_element(id.profile_link_search,'class_name').click()
sleep(random.randint(7, 15))
getUserDetails(i,dr.currentURL)
count += 1
sec=random.randint(constants.User_MIN_WAIT,constants.User_MAX_WAIT)
print("Sleep for " + str(sec) + " seconds")
sleep(sec)
except KeyboardInterrupt:
print('Thank You')
print(f"Total {count} user scrapped successfully today")
break
except Exception as e:
df.at[i,'Message']='Error: '+str(e)
print(e)
|
import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
create_table_query_1='CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY,username text,password text)'
cursor.execute(create_table_query_1)
create_table_query_2='CREATE TABLE IF NOT EXISTS items (id INTEGER PRIMARY KEY,name text,price real)'
cursor.execute(create_table_query_2)
connection.commit()
connection.close() |
m = float(input("Digite uma medida em metros: "))
cm = m * 100
print(m, "metros equivalem a", cm, "centímetros")
|
"""
Burak Himmetoglu, 2019
Testers for elasticnet regression
"""
from sklearn.datasets import make_regression
from mlbook.utils.losses import mse
from mlbook.linear_regression.enet import *
from mlbook.utils.scalers import standardize
from mlbook.linear_regression.common import init_coef
if __name__ == "__main__":
ns, nf = 500, 5
X, Y = make_regression(n_samples = ns, n_features=nf, random_state=1)
X = standardize(X)
# ElasticNet
beta0_i, beta_i = init_coef(X, Y) # From univariate fit
fit_summary = fit(X, Y, lam=0.01, al=0.5, tol=1e-6, maxIter=500,
initBeta0 = beta0_i, initBeta = beta_i)
print(fit_summary)
Yhat = predict(X, fit_summary['beta0Hat'], fit_summary['betaHat'])
mse_ = mse(Y, Yhat)
print("ElasticNet coord. descent: MSE = {:.4f}".format(mse_))
|
from crispy_forms.helper import FormHelper
from crispy_forms.layout import ButtonHolder, Submit
from crispy_forms_foundation.layout import Column, Fieldset, Layout, Row
from django import forms
from bpp.util import formdefaults_html_after, formdefaults_html_before
from import_list_if.models import ImportListIf
class NowyImportForm(forms.ModelForm):
class Meta:
model = ImportListIf
fields = ["plik_xls", "rok"]
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_class = "custom"
self.helper.form_action = "."
self.helper.layout = Layout(
Fieldset(
"Wybierz parametry",
formdefaults_html_before(self),
Row(
Column("plik_xls", css_class="large-12 small-12"),
Column("rok", css_class="large-12 small-12"),
),
formdefaults_html_after(self),
),
ButtonHolder(
Submit(
"submit",
"Utwórz import",
css_id="id_submit",
css_class="submit button",
),
),
)
super(NowyImportForm, self).__init__(*args, **kwargs)
|
import turtle
turtle.Screen().bgcolor("black")
turtle_stamp = turtle.Turtle()
turtle_stamp.shape('turtle')
turtle.Screen().colormode(255)
turtle_stamp.color(121, 186, 78)
turtle_stamp.penup()
turtle_stamp.left(90)
turtle_stamp.forward(100)
turtle_stamp.right(90)
turtle_stamp.forward(100)
turtle_stamp.stamp()
turtle_stamp.left(90)
turtle_stamp.forward(100)
turtle_stamp.stamp()
turtle_stamp.left(90)
turtle_stamp.forward(100)
turtle_stamp.stamp()
turtle_stamp.left(90)
turtle_stamp.forward(100)
turtle_stamp.stamp()
turtle_stamp = turtle.Turtle()
turtle_stamp.shape('turtle')
turtle_stamp.penup()
turtle.colormode(255)
turtle_stamp.back(300)
red = (235, 73, 113)
orange = (237, 109, 24)
yellow = (252, 231, 91)
green = (57, 191, 23)
blue = (23, 143, 191)
violet = (183, 96, 214)
color = [red, orange, yellow, green, blue, violet]
for m in range(6):
turtle_stamp.forward(30)
turtle_stamp.color(color[m]);
turtle_stamp.stamp()
|
"""
Module providing flow control in simulation
"""
import random
import logging
from time import time
import copy
import cPickle
class Simulation(object):
"""
This class defines what happens and when.
"""
global_environment = None
environments = {}
def __init__(self, graph = None, interaction = None, agents = None):
self.graph = graph
self.interaction = interaction
self.agents = tuple(agents)
self.statistic = []
#self.
def dump_results(self, iter_num):
cc = copy.deepcopy(self.agents)
kr = (iter_num, cc)
self.statistic.append(kr)
f = open(str(iter_num)+".pout", "wb")
cPickle.dump(kr, f)
f.close()
def run(self, iterations = 1000, dump_freq = 10):
"""
Begins simulation.
iterations
"""
start_time = time()
logging.info("Simulation start...")
n = len(self.agents)
self.dump_results(0)
for i in xrange(iterations):
if self.interaction.num_agents() == 2:
a = random.choice(self.agents)
b = self.graph.get_random_neighbour(a)
r1, r2 = self.interaction.interact(a, b)
a.add_inter_result(r1)
b.add_inter_result(r2)
# (a, b) = random.choice(self.graph.edges())
# r1, r2 = self.interaction.interact(self.agents[a], self.agents[b])
# self.agents[a].add_inter_result(r1)
# self.agents[b].add_inter_result(r2)
else :
a = random.choice(self.agents)
r = self.interaction.interact(a)
a.add_inter_result(r)
if (i+1) % dump_freq == 0:
self.dump_results(i+1)
logging.info("Simulation end. Total time: "+str(time()-start_time))
return self.statistic
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.