text stringlengths 8 6.05M |
|---|
from ml import *
def main():
fs = fileio.FileStream(r"D:\Game\Steam\steamapps\common\Trails in the Sky FC\ed6_win_dump")
lines = []
xml = OrderedDict()
root = OrderedDict()
xml['ed6fc'] = root
root['text'] = lines
for l in fileio.readLines(r'ed6_fc_text2.txt'):
if not l:
continue
addr, text = l.split(' ', 1)
# addr, *_, length = addr.split(' ')
addr = int(addr, 16)
# addr -= 0x400000
fs.Position = addr
original = fs.ReadMultiByte('shiftjis')
lines.append(OrderedDict([
('rva', '%08X' % addr),
('original', original),
('translation', original),
# ('original', {'#text': '%s' % fs.ReadMultiByte('shiftjis')}),
# ('translation', {'#text': ''}),
]))
# lines[:] = sorted(lines, key = lambda e: int(e['rva'], 16))
# open('ed6_fc_text.xml', 'wb').write(xmltodict.unparse(xml, pretty = True, indent = ' ').encode('utf_8_sig'))
open('ed6_fc_text2.json', 'wb').write(json.dumps(lines, ensure_ascii = False, indent = ' ').encode('utf_8_sig'))
console.pause('done')
if __name__ == '__main__':
Try(main)
|
"""
Purpose of this script is to extract the list of pdb files required for download.
Output of this script are the downloaded pdb files from the chosen pdb website.
"""
# STEP 1
# Necessary packages
import os
from selenium import webdriver
import time
import csv
# STEP 2
main_folder = r'D:\PHML B factor estimation\02 Project'
# List of RNA names to download
folder = r'00 Basic information'
sources = ["Train_list", "Test_list"]
RNAproteins = []
for src in sources:
pathway = main_folder + "\\" + folder + "\\" + src
file = open(pathway, "r").read()
file = file.split("\n")
RNAproteins.extend(file)
# The PDB website only accept proteins name such as 1asy instead of the original 1asy_R.
# So the following step is to clean up the protein name.
RNAproteins = [protein for protein in RNAproteins if len(protein) > 1]
RNAproteins_clean = []
for protein in RNAproteins:
temp = protein.split("_")[0]
RNAproteins_clean.append(temp)
# STEP 3
# Web scraping
# The PDB website is open and the pdb file is downloaded for each of the RNA protein in the RNAproteins list.
# PDB website: https://www.rcsb.org/structure/<protein name>
website = r"https://files.rcsb.org/download/"
for protein in RNAproteins_clean:
full_website = website + protein + ".pdb"
driver = webdriver.Chrome()
driver.get(full_website)
time.sleep(15)
driver.close()
# STEP 4
# Extract the list of downloaded pdb
folder = r'01 Download and extract\01 Raw PDB files'
downloaded_files = []
for file in os.listdir(main_folder + "\\" + folder):
if file.endswith(".pdb"):
downloaded_files.append(file.split(".")[0])
# STEP 5
# Check for the missing files
missing_files = [f for f in RNAproteins_clean if f not in downloaded_files]
with open("Missing pdb.csv", 'w') as myfile:
writer = csv.writer(myfile, lineterminator='\n')
for val in missing_files:
writer.writerow([val])
|
#import sys
#input = sys.stdin.readline
Q = 10**9 + 7
def main():
N = int( input())
A = list( map( int, input().split()))
T = [[0]*60 for _ in range(N)]
P = [0]*60
for i in range(N):
a = A[i]
for j in range(60):
T[i][j] = a%2
P[j] += a%2
a //= 2
ans = 0
for i in range(60):
plus = P[i]
minus = N - plus
ans = (ans + plus*minus%Q*pow(2,i,Q))%Q
print(ans)
if __name__ == '__main__':
main()
|
from PIL import Image
import requests
from io import BytesIO
# Company UUID provided to you
COMPANY_ID = 9
# Some sample token. Instead replace with the token returned by authentication endpoint
JWT_TOKEN = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0IiwiaWFkIjoxLCJhY3AiOm51bGwsInRicCI6bnVsbCwiaWF0IjoxNTg4MTk1MjA1fQ.A6QVTjGLaYAwxOYN0khYxls1_xf6hHHb4VSg5nqZsVc'
def make_read_request(what_to_select, table, conditions):
# Web API host URL (change if needed)
# BASE_HOST_URL = 'http://127.0.0.1:8000'
BASE_HOST_URL = 'http://www.appspesa.it/api'
# No need to change this
ENDPOINT_ROUTE = '/v1/query/read'
# No need to change this
# Authorization header information
headers = {"Authorization": "Bearer " + JWT_TOKEN}
# No need to change this
# Query parameters to pass to request
params = {
'what_to_select': what_to_select,
'which_table': table,
'conditions_to_satisfy': conditions
}
# Make request and return the response
return requests.get(BASE_HOST_URL + ENDPOINT_ROUTE, headers=headers, params=params)
# Find all products in product table
r = make_read_request('Company_Name, about, Address1, Address2, latitude, longitude',
'company', 'Company_ID = ' + str(COMPANY_ID))
# If the request was successful
if r.status_code == 200:
company_info = r.json()['rows'][0]
# Print list containing company info
print(company_info)
|
from djangorestframework.renderers import TemplateRenderer
from shopback.base.renderers import BaseJsonRenderer
class AsyncPrintHtmlRenderer(TemplateRenderer):
"""
Renderer which serializes to JSON
"""
media_type = 'text/html'
format = 'html'
template = 'asynctask/async_print_commit.html' |
from _typeshed.wsgi import StartResponse, WSGIApplication, WSGIEnvironment
from collections.abc import Iterable
from datetime import datetime
from depot.io.interfaces import StoredFile
class FileServeApp:
file: StoredFile
filename: str
last_modified: datetime
content_length: int
content_type: str
cache_expires: int
replace_wsgi_filewrapper: bool
def __init__(self, storedfile: StoredFile, cache_max_age: int, replace_wsgi_filewrapper: bool = False) -> None: ...
def generate_etag(self) -> str: ...
def parse_date(self, value: str) -> datetime: ...
@classmethod
def make_date(cls, d: datetime | float) -> str: ...
def has_been_modified(self, environ: WSGIEnvironment, etag: str, last_modified: datetime) -> bool: ...
def __call__(self, environ: WSGIEnvironment, start_response: StartResponse) -> Iterable[bytes]: ...
class DepotMiddleware:
app: WSGIApplication
mountpoint: str
cache_max_age: int
replace_wsgi_filewrapper: bool
def __init__(self, app: WSGIApplication, mountpoint: str = "/depot", cache_max_age: int = 604800, replace_wsgi_filewrapper: bool = False) -> None: ...
def url_for(self, path: str) -> str: ...
def __call__(self, environ: WSGIEnvironment, start_response: StartResponse) -> Iterable[bytes]: ...
|
def js_tag(url):
return "<script src=\"%s\"></script>" % url
def css_tag(url):
return "<link href=\"%s\" rel=\"stylesheet\">" % url
def js_asset_tag(base_url, path):
return js_tag("%s/static/js/%s" % (base_url, path) )
def css_asset_tag(base_url, path):
return css_tag("%s/static/css/%s" % (base_url, path) )
def url(base_url, path):
return "%s/%s" % (base_url, path)
# shortcut
def home_url(base_url):
return url(base_url, "index.html")
def archives_url(base_url):
return url(base_url, "archives.html")
# youtube
def youtube_tag(id, width=420, height=315):
if id.startswith("http"):
id = _extract_query(id, "v")
return "<iframe width=\"%s\" height=\"%s\" src=\"http//www.youtube.com/embed/%s\" frameborder=\"0\" allowfullscreen></iframe>" % (width, height, id)
def _extract_query(url, key):
pos = url.find("?")+1
qs = url[pos:].split("=")
print qs
for k,v in qs:
if key == k:
return v
return ""
def string2cls(s):
return "".join(s.split(" ")).lower()
|
# palindrome_recursive.py asks the user for a string and determines if it is palindromic using a recursive function
# import function for cleaning strings
from string_cleaner import strip_whitespace_and_punctuation_and_make_lowercase
def check_if_palindrome_recursively(phrase):
"""return True if the string is a palindrome by checking recursively"""
# make lowercase
phrase = phrase.lower()
# strip whitespace and punctuation
phrase = strip_whitespace_and_punctuation_and_make_lowercase(phrase)
if len(phrase) == 0 or len(phrase) == 1:
return True
elif phrase[0] == phrase[-1]:
return check_if_palindrome_recursively(phrase[1:-1])
else:
return False
# ask user for a string
user_input = input("Enter a phrase, a sentence, or multiple sentences to see if it is a palindrome! ")
# check if user_input is a palindrome recursively and return
if check_if_palindrome_recursively(user_input):
print(user_input, "is a palindrome")
else:
print(user_input, "is not a palindrome") |
#!/usr/bin/env python
# Funtion:
# Filename:
import socket
import os
client = socket.socket()
client.connect(('localhost', 9999))
while True:
cmd = input(">> ").strip()
if cmd == '':
continue
client.send(cmd.encode('utf-8'))
tol_file_size = client.recv(1024).decode()
if tol_file_size == 'file not exist':
print('file not exist')
else:
client.send('OK'.encode('utf-8'))
received_size = 0
print(tol_file_size)
tol_file_size = int(tol_file_size)
f = open(cmd.split(' ')[1]+'.new', 'wb')
while tol_file_size - received_size > 0:
if tol_file_size - received_size > 1024:
size = 1024
else:
size = tol_file_size - received_size
data = client.recv(size)
received_size += len(data)
# print(data.decode())
f.write(data)
else:
print('recv done...')
f.close()
client.close() |
x = 40
y = 60
while x < 50 and y < 100:
x += 1
y += 1
print(x, y)
|
nam=input('who are you?\n')
print('welcome',nam)
|
##############b#############
# astring = input('่ฏท่พๅ
ฅไธไธชๅญ็ฌฆไธฒa๏ผ ')
# bsting = input('่ฏท่พๅ
ฅไธไธชๅญ็ฌฆไธฒb๏ผ ')
#
# if len(astring) != len(bsting):
# print('no')
# exit()
#
#
# for i, j in zip(astring, bsting):
# if i is not j:
# print('no')
# exit()
# else:
# print('yes')
##############c###############
# str1 = input('่ฏท่พๅ
ฅไธไธชๅญ็ฌฆไธฒ: ')
# if int(len(str1)%2) != 0:
# print('no')
# exit()
# elif str1[0:int(len(str1)/2)] == str1[int(len(str1)/2):int(len(str1))]:
# print('yes')
# exit(1)
# else:
# print('not')
#############d#########################
str1 = input('่ฏท่พๅ
ฅไธไธชๅญ็ฌฆไธฒ: ')
str2 = str1
i = int(len(str1))
while i >= 1:
str2 = str2 + str1[i-1]
i -= 1
print(str2)
|
import chessBoard as cb
import unittest
class chessTest(unittest.TestCase):
def testPawn(self):
self.board = cb.chessBoard()
currPos = []
currPos.append(1) # (1,1)
currPos.append(1)
pieceType = 1 # Pawn
color = "Black"
self.board.grid[currPos[0]][currPos[1]] = pieceType
endPos1 = [] # (2,4) : False
endPos1.append(2)
endPos1.append(4)
endPos2 = [] # (1,2) : False
endPos2.append(1)
endPos2.append(2)
endPos3 = [] # (1,2) : True
endPos3.append(2)
endPos3.append(1)
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos1, color))
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos2, color))
self.assertTrue(self.board.turnValid(self.board.grid, currPos, endPos3, color))
def testKnight(self):
self.board = cb.chessBoard()
currPos = []
currPos.append(1) # (1,1)
currPos.append(1)
pieceType = 2 # Knight
color = "Black"
self.board.grid[currPos[0]][currPos[1]] = pieceType
endPos1 = [] # (7,7) : False
endPos1.append(7)
endPos1.append(7)
endPos2 = [] # (2,1) : False
endPos2.append(1)
endPos2.append(2)
endPos3 = [] # (3,2) : True
endPos3.append(2)
endPos3.append(3)
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos1, color))
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos2, color))
self.assertTrue(self.board.turnValid(self.board.grid, currPos, endPos3, color))
def testBishop(self):
self.board = cb.chessBoard()
currPos = []
currPos.append(1) # (1,1)
currPos.append(1)
pieceType = 3 # Bishop
color = "Black"
self.board.grid[currPos[0]][currPos[1]] = pieceType
endPos1 = [] # (1,2) : False
endPos1.append(1)
endPos1.append(2)
endPos2 = [] # (4,5) : False
endPos2.append(4)
endPos2.append(5)
endPos3 = [] # (3,3) : True
endPos3.append(3)
endPos3.append(3)
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos1, color))
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos2, color))
self.assertTrue(self.board.turnValid(self.board.grid, currPos, endPos3, color))
def testRook(self):
self.board = cb.chessBoard()
currPos = []
currPos.append(1) # (1,1)
currPos.append(1)
pieceType = 4 # Rook
color = "Black"
self.board.grid[currPos[0]][currPos[1]] = pieceType
endPos1 = [] # (2,5) : False
endPos1.append(2)
endPos1.append(5)
endPos2 = [] # (7,2) : False
endPos2.append(7)
endPos2.append(2)
endPos3 = [] # (5,1) : True
endPos3.append(5)
endPos3.append(1)
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos1, color))
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos2, color))
self.assertTrue(self.board.turnValid(self.board.grid, currPos, endPos3, color))
def testQueen(self):
self.board = cb.chessBoard()
currPos = []
currPos.append(1) # (1,1)
currPos.append(1)
pieceType = 5 # Queen
color = "Black"
self.board.grid[currPos[0]][currPos[1]] = pieceType
endPos1 = [] # (2,5) : False
endPos1.append(2)
endPos1.append(5)
endPos2 = [] # (7,2) : False
endPos2.append(7)
endPos2.append(2)
endPos3 = [] # (1,2) : True
endPos3.append(2)
endPos3.append(1)
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos1, color))
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos2, color))
self.assertTrue(self.board.turnValid(self.board.grid, currPos, endPos3, color))
def testKing(self):
self.board = cb.chessBoard()
currPos = []
currPos.append(1) # (1,1)
currPos.append(1)
pieceType = 0 # King
color = "Black"
self.board.grid[currPos[0]][currPos[1]] = pieceType
endPos1 = [] # (2,5) : False
endPos1.append(2)
endPos1.append(5)
endPos2 = [] # (7,2) : False
endPos2.append(7)
endPos2.append(2)
endPos3 = [] # (2,1) : True
endPos3.append(1)
endPos3.append(1)
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos1, color))
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos2, color))
self.assertFalse(self.board.turnValid(self.board.grid, currPos, endPos3, color))
if __name__ == '__main__':
unittest.main()
|
from copy import deepcopy
N, K = map( int, input().split())
A = list( map( int, input().split()))
ans = N
for i in range(N):
B = [0]
V = [0]*(K+1)
for j in range(N):
if j == i:
continue
a = A[j]
C = []
for b in B:
if b + a <= K:
if V[b+a] == 0:
V[b+a] = b+a
C.append(b+a)
B = deepcopy(C)
v = max(V)
if v < K and K <= v+A[i]:
ans -= 1
print(ans)
|
from django.shortcuts import render
from django.core.cache import cache
from django.utils import timezone
from scheduler.forms import FeedbackForm
from datetime import datetime, timedelta
from .schedalgo.schedule import sched
from .models import Course, Request
from .organize_data import organize, organize_output, organize_request
import json
import pickle
import os
import hashlib
site_hdr = "Course Scheduler"
max_sections = 5
history_data_path = "scheduler/history_schedule_data/"
def index(request):
course_list = Course.objects.all().order_by('cname')
context = {
'course_list': course_list,
'header': site_hdr,
'max_sections': range(max_sections + 1)
}
return render(request, 'index.html', context)
def about(request):
return render(request, 'about.html', {'header': site_hdr})
# This feedback form old and will be redone using a model form.
def feedback(request):
form = FeedbackForm(request.POST)
if form.is_valid():
form.save()
return render(request, 'feedback.html', {'header': site_hdr, 'form': form})
def requirements(request):
return render(request, 'requirements.html', {'header': site_hdr})
def add_filter(request, kwargs, get_name, kwarg_name):
courses = request.GET.getlist(get_name)
for course in courses:
if course != '':
kwargs.append(course)
def schedule(request):
if request.method == "POST":
# Organize the input data into required format
data_in = organize_request(request)
data = organize(data_in)
# Schedule the courses
ret_data = sched(json.dumps(data))
ret_dict = json.loads(ret_data)
scheduled = ret_dict['scheduled']
unscheduled = ret_dict['unscheduled']
# Change the data into front-end required format
ret_scheduled = organize_output(scheduled)
# Store the historical data into db and local file system
record_history(ret_scheduled, unscheduled)
return render(
request, 'schedule.html', {
'scheduled': ret_scheduled,
'unscheduled': unscheduled,
'header': site_hdr
})
def record_history(ret_scheduled, unscheduled):
# Record the new schedule results
new_request = Request()
now = timezone.now()
path = history_data_path + hashlib.sha256(repr(now).encode('utf-8')).hexdigest() + '.pkl'
f = open(path, 'wb')
pickle.dump((ret_scheduled, unscheduled), f)
new_request.date_time = now
new_request.path = path
new_request.save()
if int(now.day) % 7 == 0:
# Remove the outdated records
delta = timedelta(days=7)
outdated = now - delta
outdated_requests = Request.objects.filter(date_time__lte=outdated)
for request in outdated_requests:
if os.path.isfile(request.path):
os.remove(request.path)
request.delete()
# Remove unrecorded files
all_requests = Request.objects.all().order_by('-date_time')
all_requests = list(map(lambda req: str(req.path), all_requests))
file_list = os.listdir(history_data_path)
for file_name in file_list:
file_path = history_data_path+file_name
if file_path not in all_requests:
os.remove(history_data_path+file_name)
def request_history(request):
all_requests = Request.objects.values_list('date_time', flat=True).order_by('-date_time')
all_requests = list(filter(lambda x: x in all_requests, all_requests))
return render(request, 'request_history.html', {
'requests': all_requests,
'header': site_hdr
})
def resubmit(request):
request_date = request.GET['req']
res = cache.get(hash(request_date))
if not res:
record = Request.objects.get(date_time=request_date)
f = open(record.path, 'rb')
res = pickle.load(f)
cache.set(hash(record.date_time), res)
return render(request, 'schedule.html', {
'scheduled': res[0],
'unscheduled': res[1],
'header': site_hdr
})
|
# -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
import sys
sys.path.append('..')
import os
import plotly.graph_objects as go
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output,ALL,MATCH
import json
import pickle
import re
import mat4py
import numpy as np
import pandas as pd
import dash_bootstrap_components as dbc
import plotly.express as px
from app import app
from apps.point_cloud_app import display_description
from navbar import Navbar
from modules.timeSeriesModels.TimeSeriesAnalysis import TensorDecomp,MatrixProfile,ChangePoint
dataset_dropdown_choices = [
{
'label': "Aircraft",
'value': 'aircraft'
},
{
'label': "Building",
'value': 'building'
},
]
model_dropdown = [
{
'label': 'Matrix Profile',
'value': 'matrix_profile'
},
{
'label': 'Change Point',
'value': 'change_point'
},
#{
# 'label': 'Tensor Decomposition',
# 'value': 'tensor_decomp'
#},
]
controls = dbc.Card(
[
dbc.FormGroup(
[
dbc.Label("Dataset"),
dcc.Dropdown(
id="timeseries-dataset",
options=dataset_dropdown_choices,
value=dataset_dropdown_choices[0]['value']
),
]
),
dbc.FormGroup(
[
dbc.Label("Model"),
dcc.Dropdown(
id="timeseries-model",
options=model_dropdown,
value=model_dropdown[0]['value'],
),
]
),
dbc.FormGroup(
[
dbc.Label("Sensor Selection"),
dcc.Checklist(
value = [1],
labelStyle =
{'display': 'inline-block'},
id='timeseries-sensor'
)
]
),
html.P(
"This may taken a few seconds to update.",
className="card-text",
style={
'margin-bottom': 10
})
],
body=True,
)
dataset_info = {
'aircraft': {
'nsensors':5
},
'building': {
'nsensors':24
},
}
X_list = []
y_list = []
data_path = os.path.abspath(os.path.join('../data/sensor_data')) + '/'
building_dataset = np.array([
mat4py.loadmat(data_path + f'Building_Sensor{i}.mat')['X'] for i in range(1,dataset_info['building']['nsensors']+1)
])
aircraft_dataset = np.array([
mat4py.loadmat(data_path + f'Aircraft_Sensor{i}.mat')['X'] for i in range(1,dataset_info['aircraft']['nsensors']+1)
])
data_description = dbc.Card(
[
dbc.Container([
html.H4(
id="timeseries-data-descript-heading",
className="card-title",
style={
'margin-top': 10
}),
html.P(
id="timeseries-data-descript-body",
className="card-text",
style={
'margin-bottom': 10
}),
])
])
layout = html.Div([
dbc.Container([
Navbar("/timeseries"),
html.Div([
dbc.Row(
[
dbc.Col(controls, md=4),
dbc.Col(data_description, md=8),
],
align="top",
style={
'padding-top': 10
}),
]),
dcc.Loading(html.Div(id="timeseries-data-plot"),type="circle",),
html.H4("Results"),
dcc.Loading(
children=html.Div(id="timeseries-data-result"),
type="circle",),
]),
])
@app.callback(
Output('timeseries-data-descript-heading', 'children'),
Output('timeseries-data-descript-body', 'children'),
Input('timeseries-dataset', 'value'),)
def timeseries_dataset_description(*args,**kwargs):
return display_description(*args,**kwargs)
@app.callback(
Output('timeseries-sensor', 'options'),
Input('timeseries-dataset', 'value'),)
def timeseries_sensor_option(timeseries_dataset):
try:
nsensors = dataset_info[timeseries_dataset]['nsensors']
except:
raise ValueError
model_dropdown = [{'label': i, 'value': i} for i in range(1,nsensors+1)]
return model_dropdown
@app.callback(
Output('timeseries-data-plot', 'children'),
Input('timeseries-dataset', 'value'),
Input('timeseries-sensor', 'value'),)
def plot_timeseries_data(timeseries_dataset,timeseries_sensor):
if isinstance(timeseries_sensor,int):
timeseries_sensor = [timeseries_sensor]
fig = plot_dataset(timeseries_dataset,timeseries_sensor)
return dcc.Graph(figure=fig)
@app.callback(
Output('timeseries-data-result', 'children'),
Input('timeseries-dataset', 'value'),
Input('timeseries-model', 'value'),
Input('timeseries-sensor', 'value'),)
def plot_timeseries_data(timeseries_dataset,timeseries_model,timeseries_sensor):
if isinstance(timeseries_sensor,int):
timeseries_sensor = [timeseries_sensor]
#if timeseries_model == 'tensor_decomp':
# graphs = tensor_decomp(timeseries_dataset,timeseries_sensor)
if timeseries_model == 'change_point':
graphs = change_point(timeseries_dataset,timeseries_sensor)
elif timeseries_model == 'matrix_profile':
graphs = matrix_profile(timeseries_dataset,timeseries_sensor)
else:
raise ValueError
return graphs
def matrix_profile(timeseries_dataset, timeseries_sensor):
MP = MatrixProfile(data_path = data_path)
MP.fit(
data_type = timeseries_dataset.title(), sensor_nums = timeseries_sensor)
graphs = []
motif_fig = px.line(
x = MP.motif_listX[0],
y = MP.motif_listy[0],
title = "Matrix Profile Significant Motif")
motif_fig.update_layout(
xaxis_title="",
yaxis_title="",
width=500,
height=400,
)
motif_fig.update_xaxes(showticklabels=False)
motif_fig.update_yaxes(showticklabels=False)
graphs.append(
dcc.Graph(
figure = motif_fig,))
matrix_figure = go.Figure()
semantic_figure = go.Figure()
for i in range(len(MP.mp_dict)):
matrix_figure.add_trace(
go.Scatter(
x=np.arange(len(MP.mp_list[i])),
y=MP.mp_list[i],
mode='lines',
name=f"Sensor {timeseries_sensor[i]}"
))
semantic_figure.add_trace(
go.Scatter(
x=np.arange(len(MP.mp_list[i])),
y=MP.cac_list[i],
mode='lines',
name=f"Sensor {timeseries_sensor[i]}"
))
## add the barycenter
matrix_figure.add_trace(
go.Scatter(
x=np.arange(len(MP.mp_list[i])),
y=MP.mp_bary_list[0],
mode='lines',
name=f"Barycentre",
line=dict(
width = 5,
color = 'black'
),
))
semantic_figure.add_trace(
go.Scatter(
x=np.arange(len(MP.mp_list[i])),
y=MP._moving_average(MP.cac_bary_list[0],100),
mode='lines',
name=f"Barycentre",
line=dict(
width = 5,
color = 'black'
),
))
semantic_figure.update_layout(
xaxis=dict(
autorange=True,
rangeslider=dict(
autorange=True,
),
))
matrix_figure.update_layout(
xaxis=dict(
autorange=True,
rangeslider=dict(
autorange=True,
),
))
semantic_figure.update_layout(
title="Semantic Segmenter",
)
matrix_figure.update_layout(
title="Matrix Profiles",
)
graphs.append(
dcc.Graph(figure=matrix_figure)
)
graphs.append(
dcc.Graph(figure=semantic_figure)
)
return graphs
def tensor_decomp(timeseries_dataset,timeseries_sensor):
TD = TensorDecomp(num_dims=2, data_path=data_path)
TD.fit(
data_type=timeseries_dataset.title(), sensor_nums=timeseries_sensor)
y_labels = TD.arr_stacked_y.flatten()
if timeseries_dataset == 'building':
nlabels = 5
legend = ['Healthy','Damage Level 1',
'Damage Level 2', 'Damage Level 3',
'Damage Level 4']
elif timeseries_dataset == 'aircraft':
nlabels = 3
legend = ['Healthy Take-Off','Healthy Climb',
'Damage Climb']
fig = go.Figure()
for i in range(nlabels):
fig.add_trace(
go.Scatter(
x=TD.dim1_x[y_labels==i],
y=TD.dim1_y[y_labels==i],
mode='markers',
marker=dict(
color='blue',
),
name=legend[i]
))
fig.update_layout(
legend_title="Damage Type",
xaxis_title="Magnitude",
yaxis_title="Magnitude",
legend_x=0.01,
legend_y=0.99,
title="Tensor Decomposition",
)
return dcc.Graph(figure=fig)
def plot_dataset(timeseries_dataset,timeseries_sensor):
if timeseries_dataset == "aircraft":
data = aircraft_dataset.reshape((aircraft_dataset.shape[0],-1)).T
max_time = data.shape[0]
elif timeseries_dataset == "building":
data = building_dataset.reshape((building_dataset.shape[0],-1)).T
max_time = 13170
columns = [f"Sensor {ele}" for ele in timeseries_sensor]
temp = np.array(timeseries_sensor)-1
df = pd.DataFrame(data[:,temp],columns=columns)
df["Time"] = np.arange(1,df.shape[0]+1)
fig = px.line(df[:max_time], x='Time', y=df.columns,
title=f"{timeseries_dataset.title()} dataset")
fig.update_traces(line=dict(width=0.5))
fig.update_xaxes(rangeslider_visible=True)
if timeseries_dataset == "aircraft":
fig.add_vrect(x0=0, x1=2000,
annotation_text="Take off Phase", annotation_position="top left",line_width=0)
fig.add_vrect(x0=2000, x1=4000,
annotation_text="Climb Phase", annotation_position="top left",
fillcolor="green", opacity=0.1, line_width=0)
fig.add_vrect(x0=4000, x1=6000,
annotation_text="Climb Phase (Damaged)", annotation_position="top left",
fillcolor="red", opacity=0.1, line_width=0)
if timeseries_dataset == "building":
break_points = [5170,7170,9170,11170]
for i,ele in enumerate(break_points):
fig.add_vrect(x0=ele, x1=ele+2000,
annotation_text=f"Damage Level {i+1}", annotation_position="top left",line_width=0,
fillcolor = "red", opacity=0.1+i*0.1)
fig.update_layout(
legend_title="Sensor",
yaxis_title="Magnitude",
)
return fig
def change_point(timeseries_dataset,timeseries_sensor):
CP = ChangePoint(data_path=data_path)
CP.fit(
data_type = timeseries_dataset.title(),
sensor_nums = timeseries_sensor,
min_size=200)
chg_pt_locs = CP.predict()
relevant_chg_pts = CP.chg_pts[CP.chg_pts > 2000][:-1]
fig = plot_dataset(timeseries_dataset,timeseries_sensor)
for ele in relevant_chg_pts:
fig.add_vline(
ele,
line_width=5,
line_dash='dash',
line_color="blue",name='Change Points')
fig.update_layout(
title="Change Point",
)
return [dcc.Graph(figure=fig)]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import matplotlib as mpl
mpl.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
from ising import IsingAnim
import threading
import time
from ext.colors import rundark
def main():
if len(args.shape) == 2:
animate_evolution()
else:
print("Only 2D lattices can be animated.")
def animate_evolution():
plt.ion()
fig1 = plt.figure()
ax = fig1.add_subplot(111)
i = IsingAnim(args.shape, args.iterations, temperature=args.T, aligned=args.aligned,
algorithm=args.algorithm)
grid_2d = i.grid.reshape(args.shape[0], args.shape[1])
if args.nointerpolate:
im = ax.imshow(grid_2d, cmap=mpl.cm.binary, origin='lower', vmin=0,
vmax=1, interpolation='None' )
else:
im = ax.imshow(grid_2d, cmap=mpl.cm.binary, origin='lower', vmin=0,
vmax=1)
def worker():
i.evolve(sleep=args.s2)
plt.draw()
evolvegrid = threading.Thread(target=worker)
evolvegrid.start()
while evolvegrid.isAlive():
time.sleep(args.s1)
g = i.grid.reshape(args.shape[0], args.shape[1])
im.set_array(g)
ax.set_title(str(i.sweepcount))
fig1.canvas.draw()
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--algorithm', choices=['metropolis','wolff'],
default='wolff')
parser.add_argument('-i', '--iterations', default=1000000, type=int,
help="Number of iterations, default: 100000")
parser.add_argument('--shape', default=[200, 200], type=int,
nargs='+', help="Lattice size")
parser.add_argument('--aligned', action='store_true')
parser.add_argument('--nointerpolate', action='store_true')
parser.add_argument('-T', default=2.3, type=float)
parser.add_argument('--s1', default=0.1, type=float, help="image redraw interval")
parser.add_argument('--s2', default=0.000001, type=float)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_arguments()
print(args)
rundark()
main()
|
# !/usr/bin/python
"""
-----------------------------------------------
Auto Layer Cast Light
Written By: Colton Fetters
Version: 1.0
First release: 9/2017
Production tool designed to add shadow light to
utility shadow layer
-----------------------------------------------
"""
# import modules
import maya.cmds as cmds
import maya.OpenMaya as om
class Core(object):
def run(self):
lightStatus = cmds.objExists('Key_Cast_Shadow')
if not lightStatus:
light = self.create_light()
else:
light = cmds.ls('Key_Cast_Shadow')[0]
cmds.select(light)
cLayer = cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True)
cmds.editRenderLayerMembers(cLayer, light, noRecurse=True)
lightInfo1, camLightInfo1 = self.match_camera_key_position(light)
cmds.select(d=True)
def create_light(self):
shape = cmds.directionalLight(name='Key_Cast_Shadow')
light = shape.split("Shape")
light = "".join(light)
shape = cmds.rename(shape, light + 'Shape')
cmds.setAttr(light + ".scaleX", 25)
cmds.setAttr(light + ".scaleY", 25)
cmds.setAttr(light + ".scaleZ", 25)
cmds.setAttr(light + ".rotateX", -90)
cmds.setAttr(shape + ".intensity", 1)
cmds.setAttr(shape + ".emitSpecular", 0)
cmds.setAttr(shape + ".lightAngle", 2.5)
cmds.setAttr(shape + ".shadowRays", 64)
cmds.setAttr(shape + ".useRayTraceShadows", 1)
return light
def match_camera_key_position(self, shadowLight):
self.findSetLayer()
cam = self.get_render_camera()
light = shadowLight
if cam:
cmds.select(cam, light)
pct = cmds.parentConstraint(mo=False)
cmds.delete(pct)
cmds.select(light)
cmds.xform(light, r=True, os=True, t=(0, 2, -15))
# match to a characters key light
allLights = cmds.ls(lights=True, type='VRayLightRectShape')
extLights = []
charLights = []
otherLight = []
lightInfo = []
camLightInfo = []
for each in allLights:
if light not in each:
if 'Key' in each:
if 'EXT' in each: # Key is the common word, EXT or Character are more precise
transform = cmds.listRelatives(each, p=True)
extLights.append(transform)
elif 'Character' in each:
transform = cmds.listRelatives(each, p=True)
charLights.append(transform)
else:
transform = cmds.listRelatives(each, p=True)
otherLight.append(transform)
if cam:
if extLights:
matchLight = extLights[0][0]
lightInfo, camLightInfo = self.final_cam_constraints(cam, light, matchLight)
elif charLights:
matchLight = charLights[0][0]
lightInfo, camLightInfo = self.final_cam_constraints(cam, light, matchLight)
elif otherLight:
matchLight = otherLight[0][0]
lightInfo, camLightInfo = self.final_cam_constraints(cam, light, matchLight)
else:
om.MGlobal.displayInfo('No Key Light to match Orientations to')
else:
om.MGlobal.displayInfo('No Custom Cameras to connect light to')
return lightInfo, camLightInfo
def final_cam_constraints(self, cam, light, matchLight):
lightMessage = light + ' Oriented to ---> ' + matchLight
camLightMessage = light + ' Point Constrained to ---> ' + cam
cmds.select(matchLight, light)
oc = cmds.orientConstraint(mo=False)
cmds.delete(oc)
cmds.select(cam, light)
cmds.pointConstraint(mo=True)
return lightMessage, camLightMessage
def findSetLayer(self):
for clayer in cmds.ls(type='renderLayer'):
if 'FX' not in clayer:
if 'ST' in clayer:
cmds.editRenderLayerGlobals(currentRenderLayer=clayer)
else:
pass
return clayer
def get_render_camera(self, *args):
allCams = cmds.ls(type='camera')
cam = []
for each in allCams:
if 'front' in each or 'persp' in each or 'side' in each or 'top' in each:
pass
else:
cam.append(each)
if cam:
camShape = cam[0]
cam = cmds.listRelatives(camShape, p=True)[0]
return cam
|
l, N = map( int, input().split())
X = [ int( input()) for _ in range(N)]
L = [0]*(N+1)
R = [0]*(N+1)
RX = [ l - X[i] for i in range(N-1,-1,-1)]
for i in range(N):
L[i+1] = L[i]*2 + X[i]
R[i+1] = R[i]*2 + RX[i]
#ๅใใฎไฝ็ฝฎใใๅณๅทฆใซ้ ็ชใซๅพๅพฉใใๅ ดๅ
print(L)
print(R)
ans = L[N//2] + R[(N+1)//2]
print(ans)
|
# Generated by Django 3.1.6 on 2021-02-11 07:13
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blogger', '0005_auto_20210210_0718'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blogger.category'),
),
migrations.AlterField(
model_name='blog',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 11, 7, 13, 33, 794218, tzinfo=utc)),
),
]
|
# -*- coding: utf-8 -*-
{
'name': "Optesis Sale Order Custom Validation Date",
'summary': """
La date saisie dans le sale.order doit etre considรฉrรฉe comme de confirmation et date prรฉvue du stock.picking associรฉ au sale order""",
'description': """
""",
'author': "Optesis SA, by Robilife",
'website': "http://www.optesis.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/13.0/odoo/addons/base/data/ir_module_category_data.xml
# for the full list
'category': 'Sales',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'sale_management'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/sale_order_inherit_views.xml',
],
# only loaded in demonstration mode
'demo': [
],
}
|
#!/usr/bin/env python
# coding: utf-8
# # แ
แแแถแแแแแแถแแแขแแแแปแ (Binary Classification)
# แแแแ
แแธแแถแแแทแแแแถแแธแแแแถแแแแแแแแแถแแขแแแแแธแแฌแ
แแแพแแแถแแแแแแแผแแแแแแแแแแแแแแ แแถแแแแแ
แแแแทแแแแแแแแแแแแแแแแพแขแแแแแแแแแแแแแแแถแแถแแแแปแแ แ
แแถแ
แแแถแแแแแแถแแแแแแปแ(classification)แ แแถแแแทแแ
แแแถแแแแแแถแแแแแแปแ แแแขแถแ
แแแแ แถแแแถแแแถแแแแแแแแแผแแแแแแแแแแแแแแแแแแแ แแแแแแแถแแแแแแแแแแแแขแแแแแแแแ
แแทแแแแแแแแแแผแแถแแแพแแแแปแแ
แแแฝแแแทแแกแพแ แแแแปแแแ
แแทแแแบแแแแแแแแแถแ
แแแผแ
แแถ{0,1}แแถแแพแแ
# แแแแปแแขแแแแแแแแแแพแแแนแแแแแถแแแแแธแแแแขแแแแแแแแ
แแแแแแแแแแแธแแแแแแแแแแแแพแแแแแแแ แ
แแถแ
แแแถแแแแแแถแแแขแแแแปแ(binary classification)แ แงแแถแ แแแแแแแถแแขแแปแแแแแ
แแแถแแแแแแถแแแขแแแแปแแแแแปแแแธแแแถแแแถแแแผแ
แแถแ แแถแแแแแแแขแแแแแแแแถแแแถแแขแแกแทแ
แแแแผแแทแ
แแแแถแ(spam or not) แแถแแแทแแถแแแแแแบแแถแแแแแแแแแแแแถ(แแถแแแแแแบแฌแแแแถแ) แแถแแแแแแแแแถแแแแแแแแแแแแแแแ(แแถแแแฌแแแแถแแ)แแถแแพแแ
#
# แแถแงแแถแ แแแแแพแแแนแแแพแแแแแถแแแแแแแขแแแแแแแแถแแแถแแขแแกแทแ
แแแแผแแทแ
แแแแถแ(spam or not)แแแแแแ แถแแแพแแแแธแแแแแแแแแแแแแแแแธแแแแแแแแแถแแแแแปแแแแแ แถแ
แแแถแแแแแแถแแแแแแปแแแถแแแแ machine learning แ แแผแแแธแกแแแแ แถแแแธแแแแพแแแถแแแแแถแแแแแแแแถแแแแแถแแ
#
# 
#
# แแถแแแแแแแแแถแแแถแแแแแปแแแแแขแแแแขแถแ
แแแแ แแแแถแแนแแแนแแแพแแแปแแแแแบแแถแแแแแแแแแแผแแแถแแแแแแแ
แแแพแแแแแพแแแแถแแแแแแปแspam mailแย
# แแถแงแแถแ แแแแแพแแขแถแ
แแแแแแแผแ
แแแแแแแแถแแแแแแแ
# In[1]:
def checkSpam(text):
if text.find('search for friend') != -1:
return True
if text.find('check my attached file for more photos') != -1:
return True
if text.find('Hey honey') != -1:
return True
# and many conditions
# and many many conditions
# .....
return False
# In[2]:
checkSpam('I am Mariya. I search for friend. Check my profile here!')
# In[3]:
checkSpam('Please submit your abstract before 1st March')
# แแผแ
แแแแขแแแแขแถแ
แแแแแแแแถแแแแถแย แแถแแแแแแแแแแแแแแแแแแแแแแถแแแถแแแแแแแแถแแทแแแแแแถแแแ
แแแพแแแแแปแแแถแแแแแแแแแแถแแแแแแแแแแแแแโแแแแแถแแแแแแถแแทแแแถแแแถแแแแแแแแแแแแแแแแกแพแแย แแถแแแแแแแแแถแแแพแแแนแแแแแถแแขแแแธแแทแแธแแถแแแแแแแแแพแ
แแแถแแแแแแถแแแแแแปแแแแแแแแพMachine Learningแย แแ
แแธแแแแแถแแปแ
แผแแแบแแถแขแแแแแแแถแแแธแฏแแแแแแแแบแแถแแถแแแแแแทแแแแถแแแถแแพแแถspam mailแฌแแแถแแแถแย
# ## แ
แแแถแแแแแแถแแแขแแแแปแแแแแแแผแแแแแธแแแขแแแ
# แแแผแแแแแธแแแขแแแแแแ
แแแถแแแแแแถแแแขแแแแปแแแบแแถแแแผแแแแแแแแแแแแแแถแแแแแแแแแแแทแแแแแแ
# $\hat{y}\in\left\{0,1\right\}$แแแแแแแแแแถแแแแแแแแทแแแแแถแแฌแขแแทแแแแแถแแแแแแแปแแแแแถแแแแแถแแแถแแปแ
แผแ(input)$\pmb{x}\in\mathbb{R}^d $ แแทแแแแถแแแถแแแแแแแแแผแแแ$\pmb{w}\in\mathbb{R}^d $แ
#
# $$
# \hat{y}=\left\{\begin{matrix}
# 1 & \pmb{x}^{\top}\pmb{w}>0\\
# 0 & \pmb{x}^{\top}\pmb{w}\leq 0
# \end{matrix}\right.
# $$
#
# แแแแปแแแแแธแแถแแแแแแspam mail แแพแแขแถแ
แแแแแแแถแแแแแแแแแผแแแแแธแแแขแแแแแถแแแพแแถแแแแแแแแแแแ \hat{y}=1 แแแแแถแแspam mail แแทแ \hat{y}=0แแแแแถแแแแถแแแแแแแถแ
# แแพแแแแธแแถแแแแแฝแแแแแแธแแแผแแแแแแ แแพแแแแแพแแงแแถแ แแแแแถแแแฝแแแแแปแแแถแแแแแแspam mailแแผแ
แแถแแแแแแแ
#
# แแแแแแแถ แแแทแ
แแแแแถแแปแ
แผแ(แขแแแแแแแถแ)แแถแแแทแแถแแแ10แ แแพแแแแแแแแแทแ
แแแแแแแแแแ แถแแแธแแแแแแถแแแแแถแแแแแแแแนแแ
แแแฝแแกแ แแแแแแแแแแ0 แฌ 1 แแแแแแแถแแแแแแแแนแแแทแแฝแแแแแแแทแแแพแแถแแแแแแแแนแแแแแแแแถแแฌแแถแแแแแแแถแแแแแปแแขแแแแแแแถแแ
#
# แงแแถแ แแแ แแถแแแแแแแแนแแแแแแแแ
แแแปแแแถแ โassignmentโ, โboyโ, โfileโ, โhelloโ, โloveโ, โmyโ, โphotoโ, โpasswordโ, โschoolโ , โtextโแ แแแแปแแแแแธแแแ แขแแแแแแแถแ โHello my boy, I sent you my photo in the attached fileโ แขแถแ
แแแแ แถแแแถแแแแแแแแแทแ
แแแ $\pmb{x}\in\mathbb{R}^d$ แแถแแแผแ
แแถแแแแแแ
#
# $$
# \pmb{x}=\left(\begin{matrix}\begin{matrix}\begin{matrix}0&1\\\end{matrix}&\begin{matrix}1&1\\\end{matrix}\\\end{matrix}&\begin{matrix}\begin{matrix}0&1\\\end{matrix}&\begin{matrix}\begin{matrix}1&0\\\end{matrix}&\begin{matrix}0&0\\\end{matrix}\\\end{matrix}\\\end{matrix}\\\end{matrix}\right)^\top
# $$
#
# แ
แแแแแแถแแแแถแแแแแแถแแแแแแแผแแแแแธแแแขแแแแแถแแแพ แ
แแแแแแแถแแแถแแแแแแ$\pmb{w}=\left(\begin{matrix}w_1&\cdots&w_{10}\\\end{matrix}\right)^\top $ แแแแปแแแแแถแแแแแแปแแแแแธแขแแแแแแแถแแแถแแแพแแแแผแแแถแแแแแถแแผแ
แแถแแแแแแแแแแแปแแแแแธแแแแแแแแทแแแพแแแแแแแแแปแแแแแถแแแแแแแทแแแแแถแแแแ แขแแแแแแแแแแแแผแแแถแแแแแแแแถแแถspam mailแ
#
# $$
# \pmb{x}^\top\pmb{w}=\left(\begin{matrix}\begin{matrix}\begin{matrix}0&1\\\end{matrix}&\begin{matrix}1&1\\\end{matrix}\\\end{matrix}&\begin{matrix}\begin{matrix}0&1\\\end{matrix}&\begin{matrix}\begin{matrix}1&0\\\end{matrix}&\begin{matrix}0&0\\\end{matrix}\\\end{matrix}\\\end{matrix}\\\end{matrix}\right)\left(\begin{matrix}\begin{matrix}w_1\\w_2\\\end{matrix}\\\vdots\\\begin{matrix}w_9\\w_{10}\\\end{matrix}\\\end{matrix}\right)=w_2+w_3+w_4+w_6+w_7
# $$
#
# แแแปแแแแแแผแ
แแแแขแแแแแแแแแแแถแแแแถแ แแแแ แถแแแแแถแแแแพแแแบแแถ แแพแแนแแแแแผแแแแแแแแแแแแแแแแถแแแถแแแแแแแแแแแแแผแแแแแแแแแแแแถแ แแถแแแทแแธแแถแแแแแแแถแ
แแแพแแแแแผแแแถแแแแแพ แแแแแแปแแขแแแแแแแแแแพแแแนแแแแแถแแแแผแแแแแแแแแแแแแแLogisticแ
#
# ## แแแแแแแแแแแLogistic (Logistic Regression)
# แแแแแแแแแแแLogisticแแบแแถแแแผแแแแแธแแแขแแแแแแ
แแแถแแแแแแถแแแขแแแแปแแแฝแแแแแแแ แแแแแแแผแแถแแแถแ
# แแแแแแแแ$p\left(y|\pmb{x}\right)$แแแแแบแแแแผแแถแแแแแแแแแแแแแแทแแแแแแyแแแแผแแแถแแแแแแแแแถแแ
แแแแแขแแแแแแแแแ$ \pmb{x} $แแแแผแแแถแแแแแถแแผแ
แแถแแแแแแแ
#
# $$
# P\left(\hat{y}=1|\pmb{x}\right)=\sigma\left(\pmb{x}^\top\pmb{w}\right)=\frac{\exp{\left(\pmb{x}^\top\pmb{w}\right)}}{1+\exp{\left(\pmb{x}^\top\pmb{w}\right)}}=\frac{1}{1+\exp{\left(-\pmb{x}^\top\pmb{w}\right)}}
# $$
# $$
# P\left(\hat{y}=0|\pmb{x}\right)=1-P\left(\hat{y}=1|\pmb{x}\right)=1-\sigma\left(\pmb{x}^\top\pmb{w}\right)=\sigma\left(-\pmb{x}^\top\pmb{w}\right)
# $$
# แแ
แแธแแแ $\sigma\left(x\right)$ แแถแขแแปแแแแsigmaแแแแแแแผแแแถแแแแแถแแผแ
แแถแแแแแแแ
#
# $$
# \sigma\left(x\right)=\frac{\exp{\left(x\right)}}{1+\exp{\left(x\right)}}=\frac{1}{1+\exp{\left(-x\right)}}
# $$
#
# In[4]:
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1/(1+np.exp(-x))
x = np.linspace(-8,8,100)
plt.plot(x,sigmoid(x),label='sigmoid')
plt.grid()
plt.ylim([-0.1,1.1])
plt.title("Sigmoid Function")
plt.show()
# แ
แแแแแแทแแแแแแ$\pmb{x}$ แแแแปแแแแแธแแแแแแแผแแถแ$P\left(\hat{y}=1|\pmb{x}\right)>0.5 $แแแแแทแแแแแแแแแแผแแแถแแแแแแแแแถแแแถแแแแปแแ
แแแถแแแแแแถแแแแแแปแ$\hat{y}=1 $แแแถแแแแแแแแแแแแแแบแแแแผแแแแแถแแนแแแแแแแแแแแแแปแแแแแถแแแแทแแแแแถแแแแแแถแแแแแ แถแแแถแแแพแแ
#
# $$
# P\left(\hat{y}=1|\pmb{x}\right)>0.5\Leftrightarrow\frac{1}{1+\exp{\left(-\pmb{x}^\top\pmb{w}\right)}}>\frac{1}{2}\ \Leftrightarrow\exp{\left(-\pmb{x}^\top\pmb{w}\right)}<1\ \Leftrightarrow\ \pmb{x}^\top\pmb{w}>0
# $$
#
# ## แแแแแทแแแถแแแแแแแทแแแแแแ Likelihood
# แงแแแถแแถ แแแถแแแถแแแแแแแแแแแผแแแแแแแแแแแแแแLogisticแแแแผแแแถแแแแแแแแผแ
แแถแแแแแแแ
#
# $$
# \pmb{w}=\left(\begin{matrix}\begin{matrix}\begin{matrix}-2&1\\\end{matrix}&\begin{matrix}1&0\\\end{matrix}\\\end{matrix}&\begin{matrix}\begin{matrix}1&0\\\end{matrix}&\begin{matrix}\begin{matrix}1&1\\\end{matrix}&\begin{matrix}-1&0\\\end{matrix}\\\end{matrix}\\\end{matrix}\\\end{matrix}\right)^\top
# $$
#
# แแแแปแแแแแธแแแ แแแแแแแแแปแแแแแถแแแแแถแแแแทแ
แแแแขแแแแแแแแแแแแแทแแแแแแ$ \pmb{x} $แแทแแแแถแแแถแแแแแแแขแถแ
# แแแแถแแทแแแแแแแถแแแถแแแแแแแแแแผแแถแแแผแ
แแถแแแแแแแ
#
# $$
# \pmb{x}^\top\pmb{w}=\left(\begin{matrix}\begin{matrix}\begin{matrix}0&1\\\end{matrix}&\begin{matrix}1&1\\\end{matrix}\\\end{matrix}&\begin{matrix}\begin{matrix}0&1\\\end{matrix}&\begin{matrix}\begin{matrix}1&0\\\end{matrix}&\begin{matrix}0&0\\\end{matrix}\\\end{matrix}\\\end{matrix}\\\end{matrix}\right)\left(\begin{matrix}\begin{matrix}-2\\1\\\end{matrix}\\\vdots\\\begin{matrix}-1\\0\\\end{matrix}\\\end{matrix}\right)=3
# $$
#
# $$
# P\left(\hat{y}=1|\pmb{x}\right)=\sigma\left(3\right)=\frac{1}{1+\exp{\left(-3\right)}}=0.95
# $$
#
# แ
แแแแแแแแแแแแแแแพแแขแถแ
แแแแแแถแแแถ แ
แแแแแขแแแแแแแถแแแแแแถแแแแแแ แแแผแแแแแถแแแแถแแแแแแถแแแถแแถspam mailแแแแแแแแแแแแผแแถแ0.95แแแแแแแแแแแแแถแ0.5 แ แแแปแแแแแพแแแถแแแผแแแแแแแแแแแถแแแถแแถแแถspam mailแ
#
# แแแแปแแแถแแแทแแถแแแแถแแถแแแพแแแแแแแแแนแแแแ แ
แแแแแแทแแแแแแ$\pmb{x}$แแทแแแแถแแแถแแแแแแ$\pmb{w} $ แแแแแแแผแแแถแแแแแแแฒแแแแถแแแแถแแแแแแถแแแแแแแแผแแแแแแแผแแแถแแแแแถแแถแแแทแแธแแแแแถแแแแแแถแแแแถแแแพแ แแแแแแแแพแแแทแแทแแแแแแแธแแแแแแแแแแแทแแแแแแแแแแผแแแถแแแแแแแแถแแแแถแแ แแแแแถแแแถแแแแแแแขแถแ
แแแแผแแแถแแแแแถแแแแแแผแแ แแ
แแธแแแ แแพแแแทแแแแถแแพแแแแแทแแแถแแแแแแแทแแแแแแแแแแแแผแแแแแแถแแแแแแถแแแถแแแแแแแแแแแถแแแแแแแแแผแแถแ${\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)$แแผแ
แแถแแแแแแ แแทแแแแแแแ แ
แแถ แแแแแทแแแถแแแ Likelihoodแแแแแแบแแแแแทแแแแแแแผแแแแขแถแ
แแแถแแแแแแถแแแถแแแแแนแแแแแผแ(แแทแแแถแแถแแแ)แแพแแแแแแแแทแแแแแแแแทแแฝแแแ
#
# $$
# {\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)=P\left(\hat{y}=y|\pmb{x}\right)
# $$
#
# แงแแถแ แแแแแแแปแแแแแธแขแแแแแแแถแแแถแแแพ แแแแแแแถแแแแแแแแทแแแแแแแแทแแแบ$y=1$ แแ
แแแแแแแถแแแถแแแแแแ$\pmb{w}$ แแแแแแแผแแแถแแแแแแแฒแแ แแถแแแแถแแแแแแถแแแแแแแแผแแแแแถแแแพแแบ
#
# $$
# P\left(\hat{y}=1|\pmb{x}\right)=0.95
# $$
# แ แแแปแแแแแแแแทแแแถแแแLikelihood :
#
# $$
# {\hat{l}}_{\left(\pmb{x},1\right)}\left(\pmb{w}\right)=P\left(\hat{y}=1|\pmb{x}\right)=0.95
# $$
#
# แแแแแถแแแแแแถ แแแผแแแแขแถแ
แแแแ
แแแแถแแถspam mailแแถแแแแแนแแแแแผแแแแแปแแแแแแทแ95%(แแแแผแแถแ0.95)แ แแแแปแแแ
แแทแ แงแแแถแแถแแพแแแถแแขแแแแแแแถแแแทแแแแspam mail แแฝแแแแแแ โPlease submit your assignment file by tomorrow morningโ แ แแแแแแทแ
แแแ
#
# $$
# \pmb{x}=\left(\begin{matrix}\begin{matrix}\begin{matrix}1&0\\\end{matrix}&\begin{matrix}1&0\\\end{matrix}\\\end{matrix}&\begin{matrix}\begin{matrix}0&0\\\end{matrix}&\begin{matrix}\begin{matrix}0&0\\\end{matrix}&\begin{matrix}0&0\\\end{matrix}\\\end{matrix}\\\end{matrix}\\\end{matrix}\right)^\top
# $$
#
# แ
#
# $$
# \pmb{x}^\top\pmb{w}=\left(\begin{matrix}\begin{matrix}\begin{matrix}1&0\\\end{matrix}&\begin{matrix}1&0\\\end{matrix}\\\end{matrix}&\begin{matrix}\begin{matrix}0&0\\\end{matrix}&\begin{matrix}\begin{matrix}0&0\\\end{matrix}&\begin{matrix}0&0\\\end{matrix}\\\end{matrix}\\\end{matrix}\\\end{matrix}\right)\left(\begin{matrix}\begin{matrix}-2\\1\\\end{matrix}\\\vdots\\\begin{matrix}-1\\0\\\end{matrix}\\\end{matrix}\right)=-1
# $$
#
# $$
# P\left(\hat{y}=1|\pmb{x}\right)=\sigma\left(-1\right)=\frac{1}{1+\exp{\left(1\right)}}=0.27
# $$
#
# แแแแ
แแแแพแแแทแแแบแแทแแแแแแถspam mail (y=0) แ แแแปแแแแแแแแทแแแถแแแLikelihood :
#
# $$
# {\hat{l}}_{\left(\pmb{x},0\right)}\left(\pmb{w}\right)=P\left(\hat{y}=0|\pmb{x}\right)=1-P\left(\hat{y}=1|\pmb{x}\right)=1-0.27=0.73
# $$
#
# แแแแแถแแแแแแถแแแผแแแแขแถแ
แแแแ
แแแแถแแทแแแแspam mailแแถแแแแแนแแแแแผแแแแแปแแแแแแทแ73%(แแแแผแแถแ0.73)แ
#
# แ
แแแแแแแแแแแแแแแแแทแแแแแแ แแพแแขแถแ
แแแแแแแแแแแแแแแแทแแแถแแแแแถแแแแแแแแแแแแ
#
# $$
# {\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)=P\left(\hat{y}=y|\pmb{x}\right)=\left\{\begin{matrix}
# P(\hat{y}=1|x)& (y=1)\\
# P(\hat{y}=0|x)& (y=0)
# \end{matrix}\right. =ฯ^{y}(1-ฯ)^{1-y}
# $$
#
# แแแ
#
# $$\pi=P\left(\hat{y}=1|\pmb{x}\right)=\sigma\left(\pmb{x}^\top\pmb{w}\right)$$
#
# แ
#
# ## แแถแแแแถแแแแแแถแแแแแปแแแแแแแแแแแแแแถแ Maximum Likelihood
# แแ
แแแแปแแแแผแแแแแแแแแแแแแแแแธแแแขแแแ แแพแแแถแแแแแแแแแแแแแแแปแแแแแแแแแแแแแแแแแแแพแขแแแ
# แแแแถแแแแแแพแแแแแแแแแแทแแแแแขแแแแแถแแแแแแแแแถแแแแแแถแแแแแแแผแแแแแทแแแแแแแแทแแแแแถแแแแแขแแแแแแแแ
แแแแแแแแแแแถแแแ แแแแปแแแแผแแแแแแแแแแแแแแLogistic แแพแแขแถแ
แแแแแแแแแแแแแแแถแแแถแแแแแแแแแแแแแผแแแแแแแแแแพแขแแทแแแแถแแแแแแพแแแแแทแแแถแแแแแแแแแทแแแแแแแแแแแแผแแแแขแถแ
แแแถแแแแแแถแแแถแแ แแทแแธแแถแแแแแแแแแแแแผแแแถแแแแ แ
แแถ Maximum Likelihood Estimation(MLE)แ
#
# แ
แแแแแแแแปแแแทแแแแแแแแถแแแขแแ$\mathcal{D}$แแแแแถแแ
แแแฝแ$N$ แแพแแแแแแแแแแแทแแแถแแแแแแแทแแแแแแ(likelihood)แแแแแบแแแแแทแแแแแแแผแแแแขแถแ
แแแถแแแแแแถแแแถแแแแแนแแแแแผแแ
แแแแแแแแแแแทแแแแแแแแถแแแขแแแแแแแแแแแแแถแแแแแแแ แแ
แแธแแแแแพแแแแแแแแถแแแถแแแแแแแแแแทแแแแแแแแถแแแขแแแแบแฏแแแถแแแแแทแแแถแแฏแแแแแแถแแแถแ(i.i.d : independent and identically dristributed)แ
#
# $$
# {\hat{L}}_\mathcal{D}\left(\pmb{w}\right)=\prod_{i=1}^{N}{{\hat{l}}_{\left(\pmb{x}_i,y_i\right)}\left(\pmb{w}\right)}
# $$
#
# แแแแแถแ แแแแแทแแแถแแแแแแแทแแแแแแ(likelihood)แแบแแถแแแแแแแแแผแแถแ แ แแแปแแแแแแแแแแแแแแถแแผแ
แแแแถแแ แแแแแแแพแฑแแแแแแแแแแแปแแแถแแแแแแผแ
แแแแถแแแแแแ
แแแฝแแแทแแแแแแแแถแแ
แแแพแแ แแพแแแแธแแแแ
แแแแผแแแแแ แถแแแแแแแผแ
แแแแแแแปแแแถแแแแแถแแถแแฝแแแปแแแแแผแแแ แแ
แแธแแแแแพแแแทแแแแถแแแแถแแแแแแพแแแแแแแแแถแแธแแแแแแแถแ แแถแแแแแพแแแแแแแแทแแแแแแถแแแแแแแถแแแแแพแแแแถแแแแแกแพแ แแแแแแขแแปแแแแแแแแถแแธแแแถแขแแปแแแแแแพแแแถแ
แแแถแแ
#
# $$
# \log{{\hat{L}}_\mathcal{D}\left(\pmb{w}\right)}=\log{\prod_{i=1}^{N}{{\hat{l}}_{\left(\pmb{x}_i,y_i\right)}\left(\pmb{w}\right)}}=\sum_{i=1}^{N}\log{{\hat{l}}_{\left(\pmb{x}_i,y_i\right)}\left(\pmb{w}\right)}
# $$
#
# แแพแแแแธแแถแแแแแฝแแแแแปแแแถแแแแแแแแถแแแแแ แถแแแแถแแแแ แแพแแแแแผแแแธแแถแแแแแพแขแแทแแแแถแแแแแแพLikelihood แแ
แแถแแถแแแแแพแขแแแแแแแถแแแแแแแแแปแแแแแแแแแถแแแพ -1 แ
#
# $$
# {\hat{\mathcal{L}}}_\mathcal{D}\left(\pmb{w}\right)=-\log{{\hat{L}}_\mathcal{D}\left(\pmb{w}\right)}=-\sum_{i=1}^{N}\log{{\hat{l}}_{\left(\pmb{x}_i,y_i\right)}\left(\pmb{w}\right)}
# $$
#
# แแแแปแแแแแธแแพแแ
แแแแทแแแแถแแแแแแแแแแแแแ
แผแแแแแแ Regularization (Ridge) แ
แผแแแแแปแแแแผแแแ Likehoodแแแแแแแผแแแแแพแแแแถแแแแ แขแถแ
แแแแผแแแ
แแแแแแแถแแแแแแแแถแแแแแแแ
#
# $$
# {\hat{\mathcal{L}}}_\mathcal{D}\left(\pmb{w}\right)=-\log{{\hat{L}}_\mathcal{D}\left(\pmb{w}\right)}+\alpha\left||\pmb{w}|\right|_2^2=-\sum_{i=1}^{N}\log{\hat{l}_{(x_i,y_i)}w}+ฮฑ\left||w|\right|_2^2 ใ(ฮฑ>0)
# $$
#
# ## แแถแแแแแแแแถแแแถแแแแแแทแแธ SGD
# แแพแแแแธแแแแพแขแแแแแแแถแแแแ${\hat{\mathcal{L}}}_\mathcal{D}\left(\pmb{w}\right) $แแพแแแนแแแแแพแแแแถแแแแทแแธSGDแแแแแถแแแทแแแแถแแแแปแแขแแแแแแแปแแ
# แแถแแแแผแแแพแแแทแแทแแแแแพแขแแปแแแแแแแแธแแ $\frac{\partial}{\partial\pmb{w}}\log{{\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)} $แ
#
# $$
# \log{{\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)}=\log{\left(\pi^y\left(1-\pi\right)^{1-y}\right)}=y\log{\pi}+\left(1-y\right)\log{\left(1-\pi\right)}
# $$
#
# $$
# \frac{\partial}{\partial\pmb{w}}\log{{\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)}=\frac{y}{\pi}\frac{\partial\pi}{\partial\pmb{w}}+\frac{1-y}{1-\pi}\times\left(-\frac{\partial\pi}{\partial\pmb{w}}\right)=\frac{y-\pi}{\pi\left(1-\pi\right)}\frac{\partial\pi}{\partial\pmb{w}}
# $$
#
# แแแแแถแแแแธแแแ แแพแแแแธแแแแถ $\frac{\partial\pi}{\partial\pmb{w}}$ แแพแแแทแแทแแแแแพแแแแธแแแแแขแแปแแแแSigmoidแ
#
# $$
# \frac{\partial}{\partial a}\sigma\left(a\right)=\frac{\partial}{\partial a}\left\{\frac{1}{1+\exp{\left(-a\right)}}\right\}=-\frac{\frac{\partial}{\partial a}\exp{\left(-a\right)}}{\left(1+\exp{\left(-a\right)}\right)^2}=\frac{1}{1+\exp{\left(-a\right)}}\times\frac{\exp{\left(-a\right)}}{1+\exp{\left(-a\right)}}
# $$
#
# $$
# \frac{\partial}{\partial a}\sigma\left(a\right)=\sigma\left(a\right)\left(1-\sigma\left(a\right)\right)
# \frac{\partial\pi}{\partial\pmb{w}}=\frac{\partial}{\partial\pmb{w}}P\left(\hat{y}=1|\pmb{x}\right)=\frac{\partial}{\partial\pmb{w}}\sigma\left(\pmb{x}^\top\pmb{w}\right)=\sigma\left(\pmb{x}^\top\pmb{w}\right)\left(1-\sigma\left(\pmb{x}^\top\pmb{w}\right)\right)=\pi\left(1-\pi\right)
# $$
#
# แแ
แแธแแแ
# $$
# a=\pmb{x}^\top\pmb{w}\ ,\ \frac{\partial a}{\partial\pmb{w}}=\frac{\partial}{\partial\pmb{w}}\left(\pmb{x}^\top\pmb{w}\right)=\pmb{x}
# $$
#
# แ แแแปแแแ
#
# $$
# \frac{\partial}{\partial\pmb{w}}\log{{\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)}=\frac{y-\pi}{\pi\left(1-\pi\right)}\frac{\partial\pi}{\partial\pmb{w}}=\frac{y-\pi}{\pi\left(1-\pi\right)}\frac{\partial\pi}{\partial a}\frac{\partial a}{\partial\pmb{w}}=\frac{y-\pi}{\pi\left(1-\pi\right)}\pi\left(1-\pi\right)\pmb{x}
# $$
#
# $$
# \frac{\partial}{\partial\pmb{w}}\log{{\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)}=\left(y-\pi\right)\pmb{x}
# $$
#
# แแผแ
แแแ แแถแแแแแแทแแธSGD แแแแแแแแแแถแแแถแแแแแแ$ \pmb{w} $แแแแแแแพแแแแถแแแแแแพแแแแแแแแแแทแแแถแแแแแแแทแแแแแแแแแแผแแแถแแแแแถแแแแแแแถแแแแแแผแแแแแแแแผแ
แแถแแแแแแแ
#
# $$
# \pmb{w}^{\left(t+1\right)}=\pmb{w}^{\left(t\right)}-\eta_t\left.\frac{\partial}{\partial\pmb{w}}\left\{-\log{{\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)}\right\}\right|_{\pmb{w}=\pmb{w}^{\left(\pmb{t}\right)}}
# $$
#
# $$
# \pmb{w}^{\left(t+1\right)}=\pmb{w}^{\left(t\right)}+\eta_t\left.\frac{\partial}{\partial\pmb{w}}\log{{\hat{l}}_{\left(\pmb{x},y\right)}\left(\pmb{w}\right)}\right|_{\pmb{w}=\pmb{w}^{\left(\pmb{t}\right)}}
# $$
#
# $$
# \pmb{w}^{\left(t+1\right)}=\pmb{w}^{\left(t\right)}+\eta_t\left(y-\pi^{\left(t\right)}\right)\pmb{x}
# $$
#
# แแ
แแธแแแ $\pi^{\left(t\right)}$ แแถแแแแแแแแแผแแถแแแแแแแแถแแแแแแผแแแแแถแแฝแแแแแแแแแถแแแถแแแแแแแแ
แแแแถแแแแถแ$t$ แแแแถแแแแแถแแแแแแผแแแแแแแแแถแแแถแแแแแแ แแแแแบ $\pi^{\left(t\right)}=\sigma\left(\pmb{x}^\top\pmb{w}^{\left(t\right)}\right)$แ$ \eta_t$ แแถlearning-rate แแทแ $y $แแถแ
แแแถแแแแแแถแแแแแแปแแแทแแแแแถแแแแแแทแแแแแแ$\pmb{x}$แ
#
# แแแแปแแแแแธแแแแพRidge Regularization แแแแแแแแถแแแพแแนแแแแแแแ
แแถแแแแแแแแถแแแแแแแ
#
# $$
# \pmb{w}^{\left(t+1\right)}=\left(1-\frac{2\alpha\eta_t}{N}\right)\pmb{w}^{\left(t\right)}+\eta_t\left(y-\pi^{\left(t\right)}\right)\pmb{x}
# $$
# ## แแถแแแถแแแแแแ
# แแถแแแธแแทแแแแถแแแผแแแแแแแแแแแแแแแแธแแแขแแแ แแพแแแถแแแแแแแแแผแแแแแถแแแแแแแแแแแแแแทแแแแแขแแ แฌ
# แแแแปแR^2 แ แแ
แแแแธแแแผแแแLogisticแ
แแแแแแแแ แถแ
แแแถแแแแแแถแแแแแแปแแแแ แแพแแขแถแ
แแถแแแแแแแแถแแแแแแแแแLikelihood แแถแแ แแแปแแแแแแถแแแทแแแแถแแพแแแแแLikelihood แแถแแแถแแแทแแถแแแแแปแแแถแแแแแแแถแแแแแถแแแแนแแแธแแแถแแแแแแ
แแแแแแพแแแ แแแปแแแแแแแปแแแแแ แถแ
แแแถแแแแแแถแแแแแแปแแแแแแถแแแแแแแถแแแแแแแถแแแแแแแแพแแแผแแแแแแแผแแแถแแแแแถแแผแ
แแถแแแแแแแ
# | | $y=1$ | $y=0$ | Total |
# |- |- |- |- |
# | $\hat{y}=1$ | TP<br>(true positive) | FP<br>(false positive) | แ
แแแฝแแแแแธแแแแแแแผแแแถแแแแแแแแแถแแแถ$\hat{y}=1$ |
# | $\hat{y}=0$ | FN<br>(false negative) | TN<br>(true negative) | แ
แแแฝแแแแแธแแแแแแแผแแแถแแแแแแแแแถแแแถ$\hat{y}=0$ |
# | Total | แ
แแแฝแแแทแแแแแแ$y=1$ | แ
แแแฝแแแทแแแแแแ$y=0$ | แ
แแแฝแแแทแแแแแแแแแปแ |
# Accuracyแแบแแแแ แถแแแธแขแแแแถแแแแถแแแแแแแแแถแแแถแแแแแนแแแแแผแแแแแแแแผแแแแแแแแทแแแแแ
แแแ
แแแถแแแแแแถแแแแแแปแแแแแแแทแแแแแแแแขแแแแถแแแแแผแ
แแแแถแแทแแแทแแแแปแแแแขแแแแแแแพแแแแแฝแแแถแแแแแนแแแแแผแแแแแปแแแถแแแแแกแแแถแแฝแแแแแ
#
# Precisionแแบแแแแ
แแแแขแแแแถแแแแแแธแแแแแทแแแถแแ
แแแแปแแแแแปแ$ y=1 $แแแแแแแปแแ
แแแแแแแแธแแแแแแผแแแแแถแแแแแแแแแถแแแถแแแแทแแแแแปแแแแแปแ$y=1$แ Recallแแแแ
แแแแขแแแแถแแแแแแธแแแแแแผแแแแแถแแแแแแแแแถแแแถแแแแทแแแแแปแแแแแปแ$y=1$ แแแแปแแ
แแแแแแทแแแแแแแแแแปแแแแแปแ$y=1$แแแปแแ
#
# แแถแแผแแ
แแถแแถแแถแแแแแถแแแแแปแแแถแแแแแแพแแแแผแแแแแแแแถแแแถแแPrecisionแแทแRecallแแแแแแแผแ
แแแแถ(trade-off relation)แแ แแแปแแแแแพแแแแธแแถแแแแแแแแฝแแแพแแแแแถแแแแถแแแแธแแแแแแถแแแแแพแแแแแแแแแแแแพแแแแแแแถแแแแธแแแแแผแแแถแแแแแพแแแแแบแแแแแ F1-scoreแ
#
# ## แขแแปแแแแแแพแแถแแแแแพแ
แแแถแแแแแแถแแแแธแแแแแปแ : spam mail classification
# In[5]:
import math
import random
import numpy as np
import matplotlib.pyplot as plt
# ### แแแแ
แแแทแแแแแแ
#
# แแพแแแถแแแ Dataset แแธย https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip
# In[6]:
get_ipython().system('wget https://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip')
# In[7]:
get_ipython().system('unzip smsspamcollection.zip')
# In[11]:
get_ipython().system('head SMSSpamCollection')
# spam $\to$ y=1
#
# ham $\to$ y=0
# In[12]:
def tokenize(s):
tokens = s.split(' ')
return [t.rstrip('.') for t in tokens]
D = []
with open('SMSSpamCollection') as fi:
for line in fi:
fields = line.strip('\n').split('\t')
x = tokenize(fields[1])
y = 1 if fields[0] == 'spam' else 0
D.append((x, y))
print("Number of data N=",len(D))
print("Example:",D[10])
# แแแแ
แแแทแแแแแแแแแแแถแแแแแแแแแแถแแแถแแแแแแแแแผแแแ(training data) แแทแแแแแแถแแแแถแแแแแแ(test data)
# In[13]:
import random
random.shuffle(D)
Dtrain = D[:5000]
Dtest = D[5000:]
# ### Logistic Regression Model with Python
# แแแแแแแแแแแแแแผแแแแแแถแแแถแแแแแแแแแแแแแผแแแแแแย 0. แแแแแแแแแแผแแแแแแแแแถ
# In[14]:
W = {}
for x, y in Dtrain:
for a in x:
W.setdefault(a, 0.)
# In[15]:
def score(W, x):
score_ = 0.
for a in x:
score_ += W.get(a, 0.)
return score_
# In[16]:
def sigmoid(a):
if 0 <= a:
return 1 / (1 + math.exp(-a))
else:
return 1. - 1 / (1 + math.exp(a))
# แแแแแแแแแแแแแถแแแถแแแแแแแแแแแแแพ SGD
# In[17]:
eta = 0.1
for t in range(1000):
loss = 0.
for x, y in Dtrain:
pi = sigmoid(score(W, x))
for a in x:
W[a] += eta * (y - pi)
# แแแแพย Accuracy แแถแแแแแถแแแแแแแถแแแแถแแแแแแแแแผแแแ
# In[18]:
def accuracy(W, Dtest):
n = 0
for x, y in Dtest:
if score(W, x) > 0:
n += y
else:
n += 1-y
return n / len(Dtest)
# In[19]:
accuracy(W, Dtest)
# แแแแแแแถแแแแแแแแทแแถแ แแทแย แแถแแแแแฝแย แแแแแแแแแแถแแ spam mail แแถแแแแแแแแแแแแแแถแแแถแแแแแแ
# In[24]:
F = sorted(W.items(), key=lambda x:x[1])
#
# แแถแแแแแแแแถแแแทแแแถแแแแแแถแแแแถแแแถแแถโ spam mail
# In[25]:
F[:20]
# แแถแแแแแแแแถแแแถแแแแแฝแแแแแปแแแถแแแแแแถแแแแถแแแถแแถโ spam mail
# In[26]:
F[-20:]
# In[ ]:
|
from setuptools import setup
setup(
name="reorg",
version="0.1.0",
license="MIT",
author="Michael Hwang",
description="Command-line to reorganize documents stored in CLB6 structure to..",
packages=["reorg"],
install_requires=[],
entry_points={
"console_scripts": [
"reorg = reorg.cli:run"
]
}
)
|
import os,sys
import numpy as np
import scipy.sparse as sp
caffe_root = os.environ["CAFFE_ROOT"]
sys.path.insert(0, caffe_root + 'python')
os.chdir(caffe_root)
import caffe
def dump2file(mat, filename):
assert mat.dtype == np.float32
csr_m = sp.csr_matrix(mat)
f = open(filename, 'wb')
nnz = csr_m.getnnz()
m,n = mat.shape
np.array([m,n,nnz], dtype = 'int32').tofile(f)
csr_m.data.tofile(f)
csr_m.indptr.tofile(f)
csr_m.indices.tofile(f)
f.close()
layers = {'lenet5':['ip1', 'ip2'],
'lenet_300':['ip1', 'ip2', 'ip3'],
'alexnet':['fc6', 'fc7', 'fc8'],
'vgg': ['fc6', 'fc7', 'fc8']}
nets = layers.keys()
for net_name in nets:
if net_name == 'lenet5':
prototxt = '3_prototxt_solver/lenet5/train_val.prototxt'
caffemodel = '4_model_checkpoint/lenet5/lenet5.caffemodel'
elif net_name == 'alexnet':
prototxt = '3_prototxt_solver/L2/train_val.prototxt'
caffemodel = '4_model_checkpoint/alexnet/alexnet9x.caffemodel'
elif net_name == 'vgg':
prototxt = '3_prototxt_solver/vgg16/train_val.prototxt'
caffemodel = '4_model_checkpoint/vgg16/vgg16_13x.caffemodel'
elif net_name == 'lenet_300':
prototxt = '3_prototxt_solver/lenet_300_100/train_val.prototxt'
caffemodel = '4_model_checkpoint/lenet_300_100/lenet300_100_9x.caffemodel'
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
net.forward()
for layer in layers[net_name]:
data_dir = '/home/maohz12/dnn_simulator/cusparse/data/%s_%s'%(net_name, layer)
os.system('mkdir ' + data_dir)
mat = net.params[layer][0].data
mat.tofile(data_dir + '/dense.dat')
dump2file(mat, data_dir + '/matrix.dat')
all_layers = net.blobs.keys()
layer_previous = all_layers[all_layers.index(layer)-1]
if len(net.blobs[layer_previous].data.shape) == 1: # In case of lenet300-100
layer_previous = 'data'
act = net.blobs[layer_previous].data.flatten()
act_relu = (act + abs(act)) / 2
act_relu.tofile(data_dir + '/act.dat')
|
"""
Andrew Olin
axo4762
File to kill unwanted Services
"""
import os
os.system("service --status-all > currentServices.txt")
services = open("currentServices.txt","r")
print("Current running services:")
for line in services:
if line[3] == '+':
print(line)
processes = input("What processes should be killed? EX.(cron allOfThem): ")
processes = processes.split(" ")
for line in services:
if line[3] == '+':
line = line.split(" ")
if line[5] == 'cron':
os.system("sudo systemctl stop cron")
for item in processes:
os.system("sudo systemctl stop " + item) |
import os
import sys
os.chdir('/home/peitian_zhang/Codes/News-Recommendation')
sys.path.append('/home/peitian_zhang/Codes/News-Recommendation')
import torch
from utils.utils import evaluate,train,prepare,load_hparams,test
if __name__ == "__main__":
hparams = {
'name':'baseline-mha-cnn',
'dropout_p':0.2,
'query_dim':200,
'embedding_dim':300,
'value_dim':16,
'head_num':16,
}
hparams = load_hparams(hparams)
device = torch.device(hparams['device'])
vocab, loaders = prepare(hparams)
if hparams['select'] == 'greedy':
from models.baseline_MHA_MHA import GCAModel_greedy
gcaModel = GCAModel_greedy(vocab=vocab,hparams=hparams).to(device)
elif hparams['select'] == 'pipeline':
from models.baseline_MHA_MHA import GCAModel_pipeline
gcaModel = GCAModel_pipeline(vocab=vocab,hparams=hparams).to(device)
if hparams['mode'] == 'dev':
evaluate(gcaModel,hparams,loaders[0],load=True)
elif hparams['mode'] == 'train':
train(gcaModel, hparams, loaders)
elif hparams['mode'] == 'test':
test(gcaModel, hparams, loaders[0]) |
a,b = map(int, input().split())
s = max(a-b, a+b, a*b)
print("{}".format( s))
|
#!/bin/python3
def richie_rich(s, k):
s_len = len(s)
left = s[:s_len//2]
right = s[s_len//2:] if s_len % 2 == 0 else s[s_len//2+1:]
right = list(reversed(right))
changes = [0] * (s_len//2)
# see if a palindrome can be made
for i in range(s_len//2):
if left[i] != right[i] and k >= 1:
changes[i] = 1
k -= 1
elif left[i] != right[i] and k < 1:
return -1
# alter the palindrome now
for i in range(s_len//2):
k += changes[i]
if k >= 2:
for a in (left, right):
if a[i] != 9:
a[i] = 9
k -= 1
elif k == 1 and changes[i] == 1:
max_val = max(left[i], right[i])
left[i] = max_val
right[i] = max_val
k -= 1
# get center
if s_len % 2 != 0:
center = '9' if k > 0 else str(s[s_len//2])
else:
center = ''
right = reversed(right)
return (''.join(str(a) for a in left)
+ center
+ ''.join(str(b) for b in right))
def main():
n, k = map(int, input().strip().split(' '))
s = [int(x) for x in input().strip()]
print(richie_rich(s, k))
main()
|
from django.urls import path
from .views import home_page_view
urlpatterns=[path("",home_page_view,name="home")]
|
from selenium import webdriver
import time
from selenium.webdriver.common.keys import Keys
class GameDriver():
def __init__(self, address, privatekey):
with open("code.js", "r") as text_file:
self.js = text_file.read()
driver = webdriver.Chrome('df_gym/envs/utils/chromedriver')
self.driver = driver
driver.get("http://localhost:8081/game1")
assert "Dark Forest" in driver.title
time.sleep(4)
textbox = driver.find_element_by_css_selector("textarea[class^='InputTextArea']")
assert textbox
time.sleep(2)
def typetext(k, length=0.5):
textbox.send_keys(k)
textbox.send_keys(Keys.RETURN)
time.sleep(length)
typetext("i")
self._waitToSee("Enter the 0x-prefixed private key")
assert "Enter the 0x-prefixed private key" in driver.page_source
typetext(
privatekey,
length=3
)
time.sleep(5)
assert "Press ENTER to find a home planet" in driver.page_source
self._callJsFunc(f"disablePopups('{address}')")
time.sleep(1)
textbox.send_keys(Keys.RETURN)
time.sleep(3)
# start game
textbox.send_keys(Keys.RETURN)
time.sleep(5)
self._callJsFunc("stopExplore()")
time.sleep(5)
loading_complete = False
while not loading_complete:
time.sleep(1)
loading_complete = self.getPlanetCount() > 0
assert "No results found." not in driver.page_source
def _waitToSee(self, text):
start = time.time()
can_see = False
while not can_see:
time.sleep(1)
can_see = text in self.driver.page_source
if time.time() - start > 10:
assert False, f"10 seconds has passed and we didn't see '{text}'"
def _callJsFunc(self, function_name):
return self.driver.execute_script(self.js + "\n" + function_name)
def move(self):
self._callJsFunc("randomove()")
def sendEnergy(self, from_planet_id, to_planet_id, percent_amount):
print(f"sendEnergy('{from_planet_id}', '{to_planet_id}', {percent_amount})")
self._callJsFunc(f"sendEnergy('{from_planet_id}', '{to_planet_id}', {percent_amount})")
def getPlanetCount(self):
return self._callJsFunc("return df.getMyPlanets().length")
def getAllReachablePlanets(self):
return self._callJsFunc("return getAllReachablePlanets()")
def getEnergyScore(self):
return self._callJsFunc("return df.getEnergyOfPlayer(df.account)")
def close(self):
self.driver.close()
def screenshot(self):
return self.driver.get_screenshot_as_png()
|
import numpy as np
import matplotlib.pyplot as plt
circle = plt.Circle((1, 1), 1, color='r')
plt.gcf().gca().add_artist(circle)
plt.plot([0, 1, 2, 3], [0, 1, 2, 3])
plt.show()
|
"""open_pipelines URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from app.views.index import IndexView
from app.views.repos import ReposJsonView
from app.views.repos import RepoByPathJsonView
from app.views.builds import BuildByUUIDView
from app.views.builds import BuildByUUIDJsonView
from app.views.webhook import WebhookByUUIDView
from app.views.login import LoginView
from app.views.login import LoginWithServiceView
from app.views.logout import LogoutView
urlpatterns = [
# Index
url(r'^$',
view = IndexView.as_view(),
name = "index"
),
# Repos
url(r'^repos\.json$',
view = ReposJsonView.as_view(),
name = "repos_json"
),
# Repo by Path
url(r'^repos\/(.+)\.json$',
view = RepoByPathJsonView.as_view(),
name = "repo_by_path_json"
),
# Build by UUID
url(r'^builds\/([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})$',
view = BuildByUUIDView.as_view(),
name = "build_by_uuid"
),
# Build by UUID JSON
url(r'^builds\/([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})\.json$',
view = BuildByUUIDJsonView.as_view(),
name = "build_by_uuid_json"
),
# Webhook
url(r'^webhook\/([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})$',
view = WebhookByUUIDView.as_view(),
name = "webhook_by_uuid"
),
# Login
url(r'^login$',
view = LoginView.as_view(),
name = "login"
),
# Login - With Service
url(r'^login\/(.+)$',
view = LoginWithServiceView.as_view(),
name = "login_with_service"
),
# Logout
url(r'^logout$',
view = LogoutView.as_view(),
name = "logout"
),
]
|
def generate_n_triangle_num(n):
return (n * (n + 1)) / 2
def count_divisors(n):
root = int(n ** 0.5)
return len(filter(lambda x: n % x == 0, (range(1, root)))) * 2
n = 0
while True:
n += 1
div_count = count_divisors(generate_n_triangle_num(n))
if div_count >= 500:
break
print generate_n_triangle_num(n)
|
class MusicalInstrument:
no_of_major_keys = 12
class StringInsturment(MusicalInstrument):
type_of_wood = 'Tonewood'
class Guitar(StringInsturment):
def __init__(self):
self.no_of_strings = 6
print(
'This guitar consists of {} strings. It is made up of {} and it can play {} keys.'.format(self.no_of_strings, self.type_of_wood, self.no_of_major_keys))
guitar = Guitar() |
def count_words(arr):
return {word: arr.count(word) for word in arr if type(word) == str and word.isalpha() == True}
print(count_words([1,])) |
for joel in 1,2,3,4,5:
print "current joel: ",joel
print"------------------"
fruits = ['banana','Apple','Mango']
for fruit in fruits:
print "current fruit: ", fruit
print"------------------"
for index in range(len(fruits)):
print "current fruit: ",fruits[index]
print "Good Bye"
|
# coding: utf-8
# flake8: noqa
"""
LoRa App Server REST API
For more information about the usage of the LoRa App Server (REST) API, see [https://docs.loraserver.io/lora-app-server/api/](https://docs.loraserver.io/lora-app-server/api/). # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from swagger_client.models.api_activate_device_request import ApiActivateDeviceRequest
from swagger_client.models.api_add_device_to_multicast_group_request import ApiAddDeviceToMulticastGroupRequest
from swagger_client.models.api_add_organization_user_request import ApiAddOrganizationUserRequest
from swagger_client.models.api_application import ApiApplication
from swagger_client.models.api_application_list_item import ApiApplicationListItem
from swagger_client.models.api_branding_response import ApiBrandingResponse
from swagger_client.models.api_create_application_request import ApiCreateApplicationRequest
from swagger_client.models.api_create_application_response import ApiCreateApplicationResponse
from swagger_client.models.api_create_device_keys_request import ApiCreateDeviceKeysRequest
from swagger_client.models.api_create_device_profile_request import ApiCreateDeviceProfileRequest
from swagger_client.models.api_create_device_profile_response import ApiCreateDeviceProfileResponse
from swagger_client.models.api_create_device_request import ApiCreateDeviceRequest
from swagger_client.models.api_create_fuota_deployment_for_device_request import ApiCreateFUOTADeploymentForDeviceRequest
from swagger_client.models.api_create_fuota_deployment_for_device_response import ApiCreateFUOTADeploymentForDeviceResponse
from swagger_client.models.api_create_gateway_profile_request import ApiCreateGatewayProfileRequest
from swagger_client.models.api_create_gateway_profile_response import ApiCreateGatewayProfileResponse
from swagger_client.models.api_create_gateway_request import ApiCreateGatewayRequest
from swagger_client.models.api_create_http_integration_request import ApiCreateHTTPIntegrationRequest
from swagger_client.models.api_create_influx_db_integration_request import ApiCreateInfluxDBIntegrationRequest
from swagger_client.models.api_create_multicast_group_request import ApiCreateMulticastGroupRequest
from swagger_client.models.api_create_multicast_group_response import ApiCreateMulticastGroupResponse
from swagger_client.models.api_create_network_server_request import ApiCreateNetworkServerRequest
from swagger_client.models.api_create_network_server_response import ApiCreateNetworkServerResponse
from swagger_client.models.api_create_organization_request import ApiCreateOrganizationRequest
from swagger_client.models.api_create_organization_response import ApiCreateOrganizationResponse
from swagger_client.models.api_create_service_profile_request import ApiCreateServiceProfileRequest
from swagger_client.models.api_create_service_profile_response import ApiCreateServiceProfileResponse
from swagger_client.models.api_create_things_board_integration_request import ApiCreateThingsBoardIntegrationRequest
from swagger_client.models.api_create_user_request import ApiCreateUserRequest
from swagger_client.models.api_create_user_response import ApiCreateUserResponse
from swagger_client.models.api_device import ApiDevice
from swagger_client.models.api_device_activation import ApiDeviceActivation
from swagger_client.models.api_device_keys import ApiDeviceKeys
from swagger_client.models.api_device_list_item import ApiDeviceListItem
from swagger_client.models.api_device_profile import ApiDeviceProfile
from swagger_client.models.api_device_profile_list_item import ApiDeviceProfileListItem
from swagger_client.models.api_device_queue_item import ApiDeviceQueueItem
from swagger_client.models.api_downlink_frame_log import ApiDownlinkFrameLog
from swagger_client.models.api_downlink_tx_info import ApiDownlinkTXInfo
from swagger_client.models.api_encrypted_fine_timestamp import ApiEncryptedFineTimestamp
from swagger_client.models.api_enqueue_device_queue_item_request import ApiEnqueueDeviceQueueItemRequest
from swagger_client.models.api_enqueue_device_queue_item_response import ApiEnqueueDeviceQueueItemResponse
from swagger_client.models.api_enqueue_multicast_queue_item_request import ApiEnqueueMulticastQueueItemRequest
from swagger_client.models.api_enqueue_multicast_queue_item_response import ApiEnqueueMulticastQueueItemResponse
from swagger_client.models.api_fuota_deployment import ApiFUOTADeployment
from swagger_client.models.api_fuota_deployment_device_list_item import ApiFUOTADeploymentDeviceListItem
from swagger_client.models.api_fuota_deployment_device_state import ApiFUOTADeploymentDeviceState
from swagger_client.models.api_fuota_deployment_list_item import ApiFUOTADeploymentListItem
from swagger_client.models.api_gateway import ApiGateway
from swagger_client.models.api_gateway_board import ApiGatewayBoard
from swagger_client.models.api_gateway_list_item import ApiGatewayListItem
from swagger_client.models.api_gateway_profile import ApiGatewayProfile
from swagger_client.models.api_gateway_profile_extra_channel import ApiGatewayProfileExtraChannel
from swagger_client.models.api_gateway_profile_list_item import ApiGatewayProfileListItem
from swagger_client.models.api_gateway_stats import ApiGatewayStats
from swagger_client.models.api_get_application_response import ApiGetApplicationResponse
from swagger_client.models.api_get_device_activation_response import ApiGetDeviceActivationResponse
from swagger_client.models.api_get_device_keys_response import ApiGetDeviceKeysResponse
from swagger_client.models.api_get_device_profile_response import ApiGetDeviceProfileResponse
from swagger_client.models.api_get_device_response import ApiGetDeviceResponse
from swagger_client.models.api_get_fuota_deployment_device_response import ApiGetFUOTADeploymentDeviceResponse
from swagger_client.models.api_get_fuota_deployment_response import ApiGetFUOTADeploymentResponse
from swagger_client.models.api_get_gateway_profile_response import ApiGetGatewayProfileResponse
from swagger_client.models.api_get_gateway_response import ApiGetGatewayResponse
from swagger_client.models.api_get_gateway_stats_response import ApiGetGatewayStatsResponse
from swagger_client.models.api_get_http_integration_response import ApiGetHTTPIntegrationResponse
from swagger_client.models.api_get_influx_db_integration_response import ApiGetInfluxDBIntegrationResponse
from swagger_client.models.api_get_last_ping_response import ApiGetLastPingResponse
from swagger_client.models.api_get_multicast_group_response import ApiGetMulticastGroupResponse
from swagger_client.models.api_get_network_server_response import ApiGetNetworkServerResponse
from swagger_client.models.api_get_organization_response import ApiGetOrganizationResponse
from swagger_client.models.api_get_organization_user_response import ApiGetOrganizationUserResponse
from swagger_client.models.api_get_random_dev_addr_response import ApiGetRandomDevAddrResponse
from swagger_client.models.api_get_service_profile_response import ApiGetServiceProfileResponse
from swagger_client.models.api_get_things_board_integration_response import ApiGetThingsBoardIntegrationResponse
from swagger_client.models.api_get_user_response import ApiGetUserResponse
from swagger_client.models.api_global_search_response import ApiGlobalSearchResponse
from swagger_client.models.api_global_search_result import ApiGlobalSearchResult
from swagger_client.models.api_http_integration import ApiHTTPIntegration
from swagger_client.models.api_http_integration_header import ApiHTTPIntegrationHeader
from swagger_client.models.api_influx_db_integration import ApiInfluxDBIntegration
from swagger_client.models.api_influx_db_precision import ApiInfluxDBPrecision
from swagger_client.models.api_integration_kind import ApiIntegrationKind
from swagger_client.models.api_integration_list_item import ApiIntegrationListItem
from swagger_client.models.api_list_application_response import ApiListApplicationResponse
from swagger_client.models.api_list_device_profile_response import ApiListDeviceProfileResponse
from swagger_client.models.api_list_device_queue_items_response import ApiListDeviceQueueItemsResponse
from swagger_client.models.api_list_device_response import ApiListDeviceResponse
from swagger_client.models.api_list_fuota_deployment_devices_response import ApiListFUOTADeploymentDevicesResponse
from swagger_client.models.api_list_fuota_deployment_response import ApiListFUOTADeploymentResponse
from swagger_client.models.api_list_gateway_profiles_response import ApiListGatewayProfilesResponse
from swagger_client.models.api_list_gateway_response import ApiListGatewayResponse
from swagger_client.models.api_list_integration_response import ApiListIntegrationResponse
from swagger_client.models.api_list_multicast_group_queue_items_response import ApiListMulticastGroupQueueItemsResponse
from swagger_client.models.api_list_multicast_group_response import ApiListMulticastGroupResponse
from swagger_client.models.api_list_network_server_response import ApiListNetworkServerResponse
from swagger_client.models.api_list_organization_response import ApiListOrganizationResponse
from swagger_client.models.api_list_organization_users_response import ApiListOrganizationUsersResponse
from swagger_client.models.api_list_service_profile_response import ApiListServiceProfileResponse
from swagger_client.models.api_list_user_response import ApiListUserResponse
from swagger_client.models.api_login_request import ApiLoginRequest
from swagger_client.models.api_login_response import ApiLoginResponse
from swagger_client.models.api_multicast_group import ApiMulticastGroup
from swagger_client.models.api_multicast_group_list_item import ApiMulticastGroupListItem
from swagger_client.models.api_multicast_group_type import ApiMulticastGroupType
from swagger_client.models.api_multicast_queue_item import ApiMulticastQueueItem
from swagger_client.models.api_network_server import ApiNetworkServer
from swagger_client.models.api_network_server_list_item import ApiNetworkServerListItem
from swagger_client.models.api_organization import ApiOrganization
from swagger_client.models.api_organization_link import ApiOrganizationLink
from swagger_client.models.api_organization_list_item import ApiOrganizationListItem
from swagger_client.models.api_organization_user import ApiOrganizationUser
from swagger_client.models.api_organization_user_list_item import ApiOrganizationUserListItem
from swagger_client.models.api_ping_rx import ApiPingRX
from swagger_client.models.api_profile_response import ApiProfileResponse
from swagger_client.models.api_profile_settings import ApiProfileSettings
from swagger_client.models.api_rate_policy import ApiRatePolicy
from swagger_client.models.api_service_profile import ApiServiceProfile
from swagger_client.models.api_service_profile_list_item import ApiServiceProfileListItem
from swagger_client.models.api_stream_device_event_logs_response import ApiStreamDeviceEventLogsResponse
from swagger_client.models.api_stream_device_frame_logs_response import ApiStreamDeviceFrameLogsResponse
from swagger_client.models.api_stream_gateway_frame_logs_response import ApiStreamGatewayFrameLogsResponse
from swagger_client.models.api_things_board_integration import ApiThingsBoardIntegration
from swagger_client.models.api_update_application_request import ApiUpdateApplicationRequest
from swagger_client.models.api_update_device_keys_request import ApiUpdateDeviceKeysRequest
from swagger_client.models.api_update_device_profile_request import ApiUpdateDeviceProfileRequest
from swagger_client.models.api_update_device_request import ApiUpdateDeviceRequest
from swagger_client.models.api_update_gateway_profile_request import ApiUpdateGatewayProfileRequest
from swagger_client.models.api_update_gateway_request import ApiUpdateGatewayRequest
from swagger_client.models.api_update_http_integration_request import ApiUpdateHTTPIntegrationRequest
from swagger_client.models.api_update_influx_db_integration_request import ApiUpdateInfluxDBIntegrationRequest
from swagger_client.models.api_update_multicast_group_request import ApiUpdateMulticastGroupRequest
from swagger_client.models.api_update_network_server_request import ApiUpdateNetworkServerRequest
from swagger_client.models.api_update_organization_request import ApiUpdateOrganizationRequest
from swagger_client.models.api_update_organization_user_request import ApiUpdateOrganizationUserRequest
from swagger_client.models.api_update_service_profile_request import ApiUpdateServiceProfileRequest
from swagger_client.models.api_update_things_board_integration_request import ApiUpdateThingsBoardIntegrationRequest
from swagger_client.models.api_update_user_password_request import ApiUpdateUserPasswordRequest
from swagger_client.models.api_update_user_request import ApiUpdateUserRequest
from swagger_client.models.api_uplink_frame_log import ApiUplinkFrameLog
from swagger_client.models.api_uplink_rx_info import ApiUplinkRXInfo
from swagger_client.models.api_user import ApiUser
from swagger_client.models.api_user_list_item import ApiUserListItem
from swagger_client.models.api_user_organization import ApiUserOrganization
from swagger_client.models.common_location import CommonLocation
from swagger_client.models.common_location_source import CommonLocationSource
from swagger_client.models.common_modulation import CommonModulation
from swagger_client.models.gw_delay_timing_info import GwDelayTimingInfo
from swagger_client.models.gw_downlink_timing import GwDownlinkTiming
from swagger_client.models.gw_fsk_modulation_info import GwFSKModulationInfo
from swagger_client.models.gw_fine_timestamp_type import GwFineTimestampType
from swagger_client.models.gw_gps_epoch_timing_info import GwGPSEpochTimingInfo
from swagger_client.models.gw_immediately_timing_info import GwImmediatelyTimingInfo
from swagger_client.models.gw_lo_ra_modulation_info import GwLoRaModulationInfo
from swagger_client.models.gw_plain_fine_timestamp import GwPlainFineTimestamp
from swagger_client.models.gw_uplink_tx_info import GwUplinkTXInfo
from swagger_client.models.protobuf_any import ProtobufAny
from swagger_client.models.runtime_stream_error import RuntimeStreamError
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
from xml.sax.saxutils import escape as _escape
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
class PangoMarkupRenderer(Renderer):
'''Powerline Pango markup segment renderer.'''
@staticmethod
def hlstyle(*args, **kwargs):
# We donโt need to explicitly reset attributes, so skip those calls
return ''
def hl(self, contents, fg=None, bg=None, attrs=None, **kwargs):
'''Highlight a segment.'''
awesome_attr = []
if fg is not None:
if fg is not False and fg[1] is not False:
awesome_attr += ['foreground="#{0:06x}"'.format(fg[1])]
if bg is not None:
if bg is not False and bg[1] is not False:
awesome_attr += ['background="#{0:06x}"'.format(bg[1])]
if attrs is not None and attrs is not False:
if attrs & ATTR_BOLD:
awesome_attr += ['font_weight="bold"']
if attrs & ATTR_ITALIC:
awesome_attr += ['font_style="italic"']
if attrs & ATTR_UNDERLINE:
awesome_attr += ['underline="single"']
return '<span ' + ' '.join(awesome_attr) + '>' + contents + '</span>'
escape = staticmethod(_escape)
renderer = PangoMarkupRenderer
|
from mailu import db, models
from mailu.internal import internal
import flask
@internal.route("/postfix/domain/<domain_name>")
def postfix_mailbox_domain(domain_name):
domain = models.Domain.query.get(domain_name) or \
models.Alternative.query.get(domain_name) or \
flask.abort(404)
return flask.jsonify(domain.name)
@internal.route("/postfix/mailbox/<email>")
def postfix_mailbox_map(email):
user = models.User.query.get(email) or flask.abort(404)
return flask.jsonify(user.email)
@internal.route("/postfix/alias/<alias>")
def postfix_alias_map(alias):
localpart, domain = alias.split('@', 1) if '@' in alias else (None, alias)
alternative = models.Alternative.query.get(domain)
if alternative:
domain = alternative.domain_name
email = '{}@{}'.format(localpart, domain)
if localpart is None:
return flask.jsonify(domain)
else:
alias_obj = models.Alias.resolve(localpart, domain)
if alias_obj:
return flask.jsonify(",".join(alias_obj.destination))
user_obj = models.User.query.get(email)
if user_obj:
return flask.jsonify(user_obj.destination)
return flask.abort(404)
@internal.route("/postfix/transport/<email>")
def postfix_transport(email):
localpart, domain = email.split('@', 1) if '@' in email else (None, email)
relay = models.Relay.query.get(domain) or flask.abort(404)
return flask.jsonify("smtp:[{}]".format(relay.smtp))
@internal.route("/postfix/sender/<sender>")
def postfix_sender(sender):
""" Simply reject any sender that pretends to be from a local domain
"""
localpart, domain_name = sender.split('@', 1) if '@' in sender else (None, sender)
domain = models.Domain.query.get(domain_name)
alternative = models.Alternative.query.get(domain_name)
if domain or alternative:
return flask.jsonify("REJECT")
return flask.abort(404)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
n=int(raw_input())
a=[]
l=r=ans=chk=0
p,q=[0]*3,[0]*3
for i in range(n):
x,y=map(int,raw_input().split())
l,r=l+x,r+y
if x>y:
if x-y>p[0]-p[1]:
p=[x,y,i+1]
if x<y:
if y-x>q[1]-q[0]:
q=[x,y,i+1]
ll=abs((l-q[0]+q[1])-(r-q[1]+q[0]))
rr=abs((l-p[0]+p[1])-(r-p[1]+p[0]))
print 0 if abs(l-r)==max(abs(l-r),ll,rr) else q[2] if ll>rr else p[2]
|
import requests
class Fetcher(object):
def fetch(self):
return ""
class FilesystemFetcher(Fetcher):
def __init__(self, path):
self.path = path
def fetch(self):
with open(self.path) as f:
return f.read()
class HTTPFetcher(Fetcher):
def __init__(self, url):
self.url = url
def fetch(self):
resp = requests.get(self.url)
return resp.content
|
#!/usr/bin/env python
import os
import re
from sys import argv
parts = argv[1].rpartition("/.git/")
is_submodule = os.path.isfile(parts[0] + "/.git")
if is_submodule:
f = open(parts[0] + "/.git", 'r')
line = f.readline()
f.close()
gitdir = re.match('gitdir: (.*)', line).group(1)
if not os.path.isabs(gitdir):
gitdir = os.path.normpath(os.path.join(parts[0], gitdir))
headfile = gitdir + "/" + parts[2]
else:
headfile = argv[1]
gitdir = os.path.dirname(headfile)
f = open(headfile, 'r')
head = f.readline()
f.close()
match = re.match('ref: (.*)', head)
if match and os.path.isfile(gitdir + "/" + match.group(1)):
print(match.group(1))
elif match:
print("packed-refs")
else:
print("")
|
import tcod
from input_handlers import handle_main_menu
from graphics.scene.main_menu import MainMenuScene
from globals import GameStates, RenderOrder, CONFIG
from game_map import GameMap
from components.fighter import Fighter
from components.inventory import Inventory
from components.equipment import Equipment
from components.purse import Purse
from components.ai import BasicMonster
from message_log import MessageLog
from entity import Entity
def component(name):
# TODO: Change this into a proper factory
component_map = {
"PLAYER" : Fighter(hp=80, defense=5, power=7, magic=5),
#"PLAYER" : Fighter(hp=99999, defense=99999, power=99999, magic=99999),
"ORC" : Fighter(hp=10, defense=0, power=3, xp=35),
"TROLL" : Fighter(hp=16, defense=1, power=4, xp=100),
"BASIC" : BasicMonster(),
"INVENTORY" : Inventory(26),
"EQUIPMENT" : Equipment(),
"PURSE" : Purse()
}
return component_map[name]
def get_game_variables():
player = Entity(0, 0,
'@',
tcod.black,
'Player',
True,
render_order=RenderOrder.ACTOR,
inventory=component("INVENTORY"),
equipment=component("EQUIPMENT"),
purse=component("PURSE"),
fighter=component("PLAYER"))
entities = [player]
game_map = GameMap(CONFIG.get('MAP_WIDTH'), CONFIG.get('MAP_HEIGHT'))
game_map.make_map(
CONFIG.get('MAX_ROOMS'),
CONFIG.get('ROOM_MIN_SIZE'),
CONFIG.get('ROOM_MAX_SIZE'),
player,
entities,
CONFIG.get('MAX_MONSTERS'),
CONFIG.get('MAX_ITEMS'),
component)
message_log = MessageLog(CONFIG.get("MESSAGE_X"),
CONFIG.get("MESSAGE_WIDTH"),
CONFIG.get("MESSAGE_HEIGHT"))
game_state = GameStates.INSTRUCTIONS
return player, entities, game_map, message_log, game_state
class MainMenuInputHandler:
def on_key(self, key, state=None):
return handle_main_menu(key)
class MainMenuState:
def __init__(self):
self.error = False
def turn(self, action):
new_game = action.get('new_game')
load_saved_game = action.get('load_game')
exit_game = action.get('exit')
if self.error and (new_game or load_saved_game or exit_game):
self.error = False
return {}
elif new_game:
player, entities, game_map, message_log, game_state = get_game_variables()
game_state = GameStates.INSTRUCTIONS
return {
'next_stage': 'game',
'args': (player, entities, game_map, message_log, game_state)
}
elif load_saved_game:
try:
# player, entities, game_map, message_log, game_state = load_game()
return {
'next_stage': 'game',
'args': (player, entities, game_map, message_log, game_state)
}
except:
self.error = True
elif exit_game:
return { 'exit_game': True }
return {}
class EmptyResultProcess:
def process(self, event_queue):
return event_queue
class MainMenuStage:
def __init__(self):
self.scene = MainMenuScene()
self.input_handler = MainMenuInputHandler()
self.state = MainMenuState()
self.result_processor = EmptyResultProcess()
self.event_queue = {}
self.name = "Main Menu"
def run(self, events):
key, mouse_move, mouse_click = events
self.scene.show(self.state)
if key:
action = self.input_handler.on_key(key)
self.event_queue = self.state.turn(action)
self.event_queue = self.result_processor.process(self.event_queue)
return self.event_queue
|
from unfollowLogManager import UnfollowLogManager
logHandler = UnfollowLogManager("niclasguenther")
peopleToUnfollow = logHandler.getDataFromUnfollowLog()
|
def hi(name):
print(name)
print("hello")
print("how are you?")
return name
a=hi("sri")
print(a)
|
#dict_popitem
#popitem็ฑปไผผไบlist.pop,ๅ่
ไผ่ฟๅๅ่กจ็ๆๅไธไธชๅ
็ด ๏ผไฝไธๅ็ๆฏ๏ผpopitem่ฟๅ็ๆฏ้ๆบ้กนใ
#ๅ ไธบๅญๅ
ธๆฒกๆ้กบๅบ็ๆฆๅฟตๅๆ่ฐ็โๆๅ็ๅ
็ด โใ
d={'adam':89,'lisa':67,'bart':27,'paul':56,'name':'dcy','age':67}
print(len(d))
while len(d)!=0:
key,value=d.popitem()#่ฐ็จpopitemๆนๆณ้ๆบๅ ้ค้ฎ-ๅผๅฏน๏ผ้กน๏ผๅนถไปฅๅ
็ป็ๅฝขๅผ่ฟๅ,ๅฐๅ
ถ็ดๆฅ่ตๅผ็ปkeyๅvalueใ
print(key,value)
print(d.keys())#keys()ๆนๆณๅฐๅญๅ
ธไธญ็้ฎไปฅๅ่กจ็ๅฝขๅผ่ฟๅ๏ผiterkeysๅ่ฟๅ้ๅฏน้ฎ็่ฟญไปฃๅจใ
print(d.values())#valuesๆนๆณไปฅๅ่กจ็ๅฝขๅผ่ฟๅๅญๅ
ธไธญ็ๅผ๏ผitervalues่ฟๅๅผๅพ่ฟญไปฃๅจใ
print(d)
x={'names':'hadescat'}
d.update(x)#ๅฉ็จไธไธชๅญๅ
ธ้กนๆดๆฐๅฆๅคไธไธชๅญๅ
ธ๏ผๆไพ็ๅญๅ
ธไธญ็้กนไผ่ขซๆทปๅ ๅฐๆง็ๅญๅ
ธไธญ๏ผ่ฅๆ็ธๅ็้ฎๅไผ่ฟ่ก่ฆ็ใ
print(d)
pass
pass
|
# s = 'azcbobobegghakl'
# numVowels = 0
#
# for char in s:
# if char == 'a' or char == 'e' or char == 'i' or char == 'o' or char == 'u':
# numVowels += 1
#
# print('Number of vowels: ' + str(numVowels))
s = 'bobobobobobobobobob'
# b = 0
# o = 0
# third = 0
#
# for letter in s:
# print('I am at the letter: ' + str(letter))
# if letter == 'b' and o==0:
# b=1
# elif letter == 'o' and b==1:
# b = 0
# o = 1
# print('I am at the letter: ' +str(letter) + ' and found a BO')
# elif letter == 'b' and o==1:
# b=1
# third += 1
# print('I am at the letter: ' +str(letter) + ' and found a BOB!')
# else:
# b = 0
# o = 0
index = ''
counter = 0
for letter in s:
index = index + letter
# print(index)
if 'bob' in index:
counter += 1
index = 'b'
print('Number of times bob occurs is: ' + str(counter))
# print('Number of times bob occurs is: ' + str(third))
#
# str1 = 'hello'
# str2 = ','
# str3 = 'world'
# str4 = str1 + str3
# print(str4)
# check = 'low' in str4
# print(check)
# str3[:3]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 14:22:16 2018
Multilayer Perceptron Implementation
with Tensorflow Framework
@author: zhaoyu
"""
import tensorflow as tf
import numpy as np
def loss_cross_entropy(pred, y):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
def one_hot_encoding(y, n_class):
one_hot = np.zeros([len(y), n_class], dtype='float32')
for i in range(len(y)):
one_hot[i][y[i]] = 1.0
return one_hot
def predict(prob):
return tf.cast(tf.argmax(prob, axis=1), tf.int32)
def accuracy(y, y_pred):
return tf.reduce_mean(tf.cast(tf.equal(y, y_pred), tf.float32))
class MultilayerPerceptron:
def __init__(self):
pass
def init_params(self):
self.init_input_params()
self.init_tf_param()
def init_input_params(self):
self.n_samp, self.n_feat = self.X.shape
self.n_class = len(set(self.y))
self.n_layers = len(self.n_hiddens) + 1 #zero hidden layer is just 1-layer mlp, that is LR
self.y_onehot = one_hot_encoding(self.y, self.n_class)
self.w_list = []
self.b_list = []
def init_tf_param(self):
'''
# one-hot encoding in tf.float32 is too wierd!
'''
self.input_X = tf.placeholder(dtype=tf.float32, shape=[None, self.n_feat])
self.input_y = tf.placeholder(dtype=tf.int32, shape=[None])
self.input_y_onehot = tf.placeholder(dtype=tf.float32, shape=[None, self.n_class])
'''
# n_hiddens is a list containing the number of neurons on each layer
# thus a list of weight matrices and bias vectors are required to represent each layer
'''
nrow = self.n_feat
for each in self.n_hiddens:
w = tf.Variable(tf.random_normal([nrow, each], 0, 1), name='w')
b = tf.Variable(tf.random_normal([each], 0, 1), name='b')
self.w_list.append(w)
self.b_list.append(b)
nrow = each
'''
# the output layer is diff from the hidden layer
# and need to be specially treated.
'''
w = tf.Variable(tf.random_normal([nrow, self.n_class], 0, 1), name='w')
b = b = tf.Variable(tf.random_normal([self.n_class], 0, 1), name='b')
self.w_list.append(w)
self.b_list.append(b)
def forward(self):
'''
# forward-propagation procedure for prediction
'''
f = self.input_X
for (w, b) in zip(self.w_list[:-1], self.b_list[:-1]):
f = tf.nn.sigmoid(tf.matmul(f, w)+b)
f = tf.matmul(f, self.w_list[-1])+self.b_list[-1]
# f = tf.nn.softmax(tf.matmul(f, self.w_list[-1])+self.b_list[-1])
return f
def def_graph(self):
'''
# define placeholder (contained for input parameters), variables,
# loss functions, esitamation functions and predictions.
# Symbols and logics defined for computin graph in Tensorflow.
'''
self.pred = self.forward()
self.prob = tf.nn.softmax(self.pred)
self.loss = loss_cross_entropy(self.pred, self.input_y_onehot)
self.y_pred = predict(self.prob)
self.acc = accuracy(self.input_y, self.y_pred)
self.gama = 1
self.optimizer = tf.train.GradientDescentOptimizer(self.gama).minimize(self.loss)
def fit(self, X_data, y_data, hidden_list=[]):
'''
# Just mimic the fitting procedure in sklearn
# here, it is back-propagation.
# Details are shown in the naive-numpy version of MLP
'''
self.X, self.y, self.n_hiddens = X_data, y_data, hidden_list
self.init_params()
self.def_graph()
self.s = tf.Session()
self.s.run(tf.global_variables_initializer())
self.mini_batch()
def mini_batch(self):
'''
# Stochatic Gradient Descent
# I will implement all versions of optimizer in the naive version
'''
loops_cnt = 100
batch_size = 32
for i in range(loops_cnt):
seq = np.arange(self.n_samp)
np.random.shuffle(seq)
# batch_num = int(np.ceil(self.n_samp/batch_size))
for epoch in range(100):
scope = seq[range(epoch*batch_size, min((epoch+1)*batch_size, self.n_samp))]
X_batch = self.X[scope, :]
y_batch_onehot = self.y_onehot[scope, :]
self.s.run(self.optimizer,
{self.input_X:X_batch, self.input_y_onehot:y_batch_onehot})
loss_i, acc_i, prob = self.s.run([self.loss, self.acc, self.prob],
{self.input_X:self.X, self.input_y:self.y, self.input_y_onehot:self.y_onehot})
# print(float(0) in prob) # check NaN in logits
print(i, ' : ', loss_i, acc_i)
def test(self, X_test, y_test):
y_test_onehot = one_hot_encoding(y_test, self.n_class)
loss, acc, prob = self.s.run([self.loss, self.acc, self.prob], {self.input_X:X_test, self.input_y:y_test, self.input_y_onehot:y_test_onehot})
print(loss, acc)
'''
# procedural-oriented programming
def mlp_tf(X, y):
n_samp, n_feat = X.shape
n_class = len(set(y))
y_onehot = one_hot_encoding(y, n_class)
input_X = tf.placeholder(dtype=tf.float32, shape=[None, n_feat])
input_y = tf.placeholder(dtype=tf.int32, shape=[None])
input_y_onehot = tf.placeholder(dtype=tf.float32, shape=[None, n_class])
# w = tf.Variable(tf.random_normal([n_feat, n_class], 0, 1), name='w')
# b = tf.Variable(tf.random_normal([n_class], 0, 1), name='b')
# prob = tf.nn.softmax(tf.matmul(input_X, w)+b)
n_neuron = 50
w0 = tf.Variable(tf.random_normal([n_feat, n_neuron], 0, 1), name='w0')
b0 = tf.Variable(tf.random_normal([n_neuron], 0, 1), name='b0')
w1 = tf.Variable(tf.random_normal([n_neuron, n_class], 0, 1), name='w1')
b1 = tf.Variable(tf.random_normal([n_class], 0, 1), name='b1')
prob = tf.nn.softmax(tf.matmul(tf.nn.sigmoid(tf.matmul(input_X, w0)+b0), w1)+b1)
L = loss_cross_entropy(input_y_onehot, prob)
y_pred = predict(prob)
acc = accuracy(input_y, y_pred)
gama = 1
optimizer = tf.train.GradientDescentOptimizer(gama).minimize(L)
s = tf.Session()
s.run(tf.global_variables_initializer())
batch_size = 32
batch_num = int(np.ceil(n_samp/batch_size))
for epoch in range(batch_num):
scope = range(epoch*batch_size, min((epoch+1)*batch_size, n_samp))
X_batch = X[scope, :]
y_batch_onehot = y_onehot[scope, :]
loss_i, acc_i = s.run([L, acc],
{input_X:X, input_y:y, input_y_onehot:y_onehot})
print(epoch, ' : ', loss_i, acc_i)
s.run(optimizer, {input_X:X_batch, input_y_onehot:y_batch_onehot})
'''
if __name__ == '__main__':
print('tensorflow implemented mlp')
train_file = './data/train.csv'
X_train, y_train = read_mnist(train_file, True)
mlp = MultilayerPerceptron()
mlp.fit(X_train, y_train, [100, 50]) |
import math
class Agent:
def __init__(self,env):
self.Q={}
self.C={}
self.pi={}
self.gamma=0.95
self.alpha=lambda d: 0.8
self.actions=env.actions
self.t=0
def get_a_exp(self,state,det=None):
"""
This function will return an action given a stationary policy given the current state.
:param state: The current state
:param det: Whether action is deterministic
:return: The action
"""
if det==None:
det=False
return self.get_optimal_a(state,det)
def get_optimal_a(self, state, det=None):
"""
This function will return an action given a stationary policy given the current state.
:param state: The current state
:param policy: The current policy
:return: The action
"""
if det==None: det=False
s = tuple(state)
probs = []
if not det:
for a in self.actions:
# a = tuple(action)
probs.append(self.get_pi(s,a,self.pi))
np.seterr(all='raise')
try:
probs = probs / np.linalg.norm(probs, ord=1)
except Exception:
print('ERROR')
print('State' + str(state))
print(probs)
index = np.random.choice(range(len(self.actions)), p=probs)
a_star = self.actions[index]
else:
for a in self.actions:
# a = tuple(action)
probs.append(self.get_pi(s,a,self.pi))
a_star = self.actions[probs.index(max(probs))]
return a_star
def get_pi(self,s,a,func=None):
"""
Generic getter function for all policy dictionaries
:param state: The current state
:param action: The current action
:return: policy
"""
if func==None:
func=self.pi
if (s,a) in func.keys():
return func[s,a]
else:
return 1/len(self.actions)
def get_vals(self,s,a,func):
"""
Generic getter function for all dictionaries with keys (s,a)
:param state: The current state
:param action: The current action
:return: value
"""
if (s,a) in func.keys():
return func[s,a]
else:
return 0
def get_val(self,s,func):
"""
Generic getter function for all dictionaries with keys (s)
:param state: The current state
:param action: The current action
:return: value
"""
if s in func.keys():
return func[s]
else:
return 0
def update(self, exp):
"""
Update Q function and policy
:param exp: Experience tuple from Env
:return: void
"""
s=exp[0]
a=exp[1]
s_=exp[2]
r=exp[3]
done=exp[4]
self.t=self.t+1
# q update
n_keys=[(s_,act) for act in self.actions]
q_n=[self.get_vals(k[0],k[1],self.Q) for k in n_keys]
q_target=(r+self.gamma*max(q_n)) if not done else r
# print('prev q '+str((s,a))+' : '+str(self.get_vals(s,a,self.Q)))
self.Q[s,a]=self.get_vals(s,a,self.Q)+self.alpha(self.t)*(q_target-self.get_vals(s,a,self.Q))
# print('new q '+str((s,a))+' : '+str(self.get_vals(s,a,self.Q)))
keys=[(s,act) for act in self.actions]
tot_q=sum([math.exp(self.get_vals(k[0],k[1],self.Q)) for k in keys])
for k in keys:
self.pi[k]=math.exp(self.get_vals(k[0],k[1],self.Q))/tot_q
|
#!/usr/bin/env python
#coding: utf-8
from google.appengine.ext import db
from webapp import webHandler
from api import datastore_api as api
from cgi import escape
import datastore
class ShoutBugHandler(webHandler):
def get(self):
if self.request.cookies.has_key("token"):
token = self.request.cookies.get("token")
else:
self.redirect("/")
return 1
user_name = api.get_user_from_token(token)
self.write("""
<body>
<h3>ใใฐๅ ฑๅใใ้กใใใพใ</h3>
ใฆใผใถใผ : %s
<form action="/api/bug" method="POST">
<textarea name="description" style="width:500px;height:300px;"></textarea>
<input type="submit" value="ใใฐใๅ ฑๅใใ" />
</form>
""" % user_name.encode("utf-8")
)
class RegisterBugHandler(webHandler):
def get(self):
self.redirect("/")
return 1
def post(self):
if self.request.cookies.has_key("token"):
token = self.request.cookies.get("token")
else:
self.redirect("/")
return 1
user_name = api.get_user_from_token(token)
bugstring = escape(self.request.get("description"))
datastore.ShoutBug(
user_name = user_name,
bug = bugstring
).put()
self.write("ใใฐๅ ฑๅใใใใจใใใใใพใ!!")
class OutputBugHandler(webHandler):
def get(self):
if self.request.cookies.has_key("token"):
token = self.request.cookies.get("token")
else:
self.redirect("/")
return 1
user_name = api.get_user_from_token(token)
if user_name != "Alice1017":
self.write("ใใฎๆฉ่ฝใฏ็ฎก็ไบบใใไฝฟใใพใใ")
return 1
else:
self.write("<table border='1' cellspacing='0' cellpadding='5'>")
self.write("<tr><td>ใฆใผใถใผๅ</td><td>ใใฐ</td></tr>")
for model in db.Query(datastore.ShoutBug):
data = {"user_name":model.user_name.encode("utf-8"), "bug":model.bug.encode("utf-8")}
self.write("""
<tr>
<td>%(user_name)s</td>
<td>%(bug)s</td>
</tr>
""" % data)
self.write("</table>")
#class DeleteBugHandler(webHandler):
# def get(self):
# if self.request.cookies.has_key("token"):
# token = self.request.cookies.get("token")
#
# else:
# self.redirect("/")
# return 1
#
# user_name = api.get_user_from_token(token)
# if user_name != "Alice1017":
# self.write("ใใฎๆฉ่ฝใฏ็ฎก็ไบบใใไฝฟใใพใใ")
# return 1
#
# else:
# user = self.request.get("user")
# bug = self.request.get("bug")
# filter = db.Query(datastore.ShoutBug).filter("user_name =",user_name).filter("bug =",bug)
# self.redirect("/shout/bug/output")
|
# Challenge - Classes Exercise
# Add a method to the Car class called age
# that returns how old the car is (2019 - year)
# *Be sure to return the age, not print it
class Car:
def __init__(self, year, make, model):
self.year = year
self.make = make
self.model = model
def compute_age(self):
return(2019 - self.year)
myCar = Car(1996, "Ford", "Everest")
print("The age of my car is " + str(myCar.compute_age()) + " years old.") |
from copy import deepcopy
from bt_scheme import PartialSolution, BacktrackingSolver, State, Solution
from typing import *
from random import random, seed
def horse_solve(tablero: "List[Tuple[int, int], ...]"):
class KnapsackPS(PartialSolution):
#def __init__(self, solucionParcial: Tuple[int, ...], valorActual: int, pesoActual: int): # IMPLEMENTAR: Aรฑade los parรกmetros que tรบ consideres
def __init__(self, solucion: Tuple[Tuple[int, int], ...], x: int, y: int): # IMPLEMENTAR: Aรฑade los parรกmetros que tรบ consideres
self.solucion = solucion
self.x = x
self.y = y
self.n = len(self.solucion)
def is_solution(self) -> bool: # IMPLEMENTAR
return self.n==len(tablero) and (0,0) in self.getSiguientes()
def get_solution(self) -> Solution: # IMPLEMENTAR
return self.solucion
def successors(self) -> Iterable["KnapsackPS"]:# IMPLEMENTAR
if not self.is_solution():
for pos in self.getSiguientes():
if pos in tablero and pos not in self.solucion:
if self.noDisjunto(pos):
yield KnapsackPS(self.solucion+(pos,), pos[0], pos[1])
def getSiguientes(self):
return [(self.x-1, self.y-2),(self.x-2, self.y-1),(self.x-1, self.y+2),(self.x-2, self.y+1),(self.x+1, self.y-2),(self.x+2, self.y-1),(self.x+1, self.y+2),(self.x+2, self.y+1)]
def noDisjunto(self, pos):
if len(self.solucion)+1 is len(tablero): return True
for i in tablero:
if i not in self.solucion+(pos,):
aux=i
break
suma=1
vertices = []
queue = []
seen = set()
queue.append(aux)
seen.add(aux)
while len(queue) > 0:
v = queue[0]
suma+=1
queue.remove(v)
vertices.append(v)
for suc in self.getSiguientes2(v[0], v[1]):
if suc not in seen and suc != pos and suc not in self.solucion and suc in tablero:
seen.add(suc)
queue.append(suc)
return (suma + self.n) == len(tablero)
def getSiguientes2(self,x, y):
return [(x-1, y-2),(x-2, y-1),(x-1, y+2),(x-2, y+1),(x+1, y-2),(x+2, y-1),(x+1, y+2),(x+2, y+1)]
initialPS = KnapsackPS(((0,0),), 0, 0) # IMPLEMENTAR: Aรฑade los parรกmetros que tรบ consideres
return BacktrackingSolver.solve(initialPS)
# Programa principal ------------------------------------------
if __name__ == "__main__":
tablero = ()
for i in range(8):
for j in range(8):
tablero = tablero + ((i,j),)
print(tablero)
print("\n<SOLUCIONES>")
ficheroF = open("Caballos.txt", "w")
for sol in horse_solve(tablero):
print (sol)
ficheroF.write(str(sol))
ficheroF.write("\n")
print("\n<TERMINADO>")
|
from rest_framework import serializers
from apiAnalisis import models
class LibroSerializer(serializers.ModelSerializer):
class Meta:
fields = (
'id',
'titulo',
'descripcion',
)
model = models.Libro
class ClienteSerializer(serializers.ModelSerializer):
class Meta:
fields = (
'id',
'cedula',
'edad',
'tipoCliente',
)
model = models.Cliente
class GraficaSerializer(serializers.ModelSerializer):
class Meta:
fields = (
'id',
'imagen',
'titulo',
)
model = models.Grafica |
import warnings
import itertools
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
warnings.filterwarnings("ignore")
plt.style.use('fivethirtyeight')
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
##............................DATA PREPROCESSING and CLEANING.................................##
def parseDate(x):
return datetime.strptime(x,"%Y-%m-%d")
# convert series to supervised learning
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# transform series into train and test sets for supervised learning
def prepare_data(series, n_test, n_lag, n_seq):
# extract raw values
raw_values = series.values
# raw_values = raw_values.reshape(len(raw_values), 1)
# transform into supervised learning problem X, y
supervised = series_to_supervised(raw_values, n_lag, n_seq)
print(supervised.info())
supervised_values = supervised.values
# split into train and test sets
train, test = supervised_values[0:-n_test], supervised_values[-n_test:]
return train, test, supervised
# fit an LSTM network to training data
def fit_lstm(train, test, n_lag, n_seq, n_batch, nb_epoch, n_neurons):
# reshape training into [samples, timesteps, features]
train_X, train_Y = train[:, 0:n_lag*n_seq], train[:, n_lag*n_seq:]
test_X, test_Y = test[:, 0:n_lag*n_seq], test[:, n_lag*n_seq:]
print(train_X.shape,train_Y.shape,test_X.shape,test_Y.shape)
train_X = train_X.reshape(train_X.shape[0], 1, train_X.shape[1])
test_X = test_X.reshape(test_X.shape[0], 1, test_X.shape[1])
# design network
model = Sequential()
model.add(LSTM(150, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(train_Y.shape[1]))
model.compile(loss='mean_squared_error', optimizer='adam',metrics=['accuracy'])
# fit network
history = model.fit(train_X, train_Y, epochs=100, validation_data=(test_X, test_Y),batch_size=50, verbose=2, shuffle=False)
# plot history
# plt.plot(history.history['loss'], label='train')
# plt.plot(history.history['val_loss'], label='test')
# plt.legend()
# plt.show()
return model
##function to forecast values of target and features for n number of weeks
def make_forecast(data,weeks,shift_by,n_lag,n_seq):
predictions=[]
temp=data[len(data)-1,:]
for i in range(weeks):
testVec=np.roll(temp,-shift_by)
test=testVec[0:n_lag*n_seq]
test=test.reshape(1,1,test.size)
prediction=model.predict(test)
testVec[n_lag*n_seq:]=prediction;
predictions.append(testVec)
temp=testVec
return predictions
df_timeseries = pd.read_csv("sample_data_arw.csv",low_memory=False,parse_dates=['date'])
#sorting the values firstly, on component vlaues and then on their dates
df_timeseries=df_timeseries.sort_values(by=['part','date'],ascending=[1,1])
#Handlig the missing and zero values
df_timeseries=df_timeseries.fillna(method='ffill')
df_timeseries=df_timeseries.fillna(method='bfill')
df_timeseries=df_timeseries.replace(to_replace=0,method='ffill')
df_timeseries=df_timeseries.replace(to_replace=0,method='bfill')
df_timeseries=df_timeseries.reset_index()
df_timeseries=df_timeseries.drop('featureG_avg',axis=1)
df_timeseries=df_timeseries.drop('Unnamed: 0',axis=1)
df_timeseries=df_timeseries.drop('index',axis=1)
df_timeseries=df_timeseries.drop(['part_category_1','part_category_2','part_category_3'],axis=1)
df_timeseries=df_timeseries.drop(['featureA_max','featureB_max','featureC_max','featureD_max','featureE_max','featureF_max'],axis=1)
cols=df_timeseries.columns
nums=[0,1,2,3,4,5,6,7,8]
##interger encode direction
encoder=preprocessing.LabelEncoder()
vals=df_timeseries.values
#vals[:,0]=encoder.fit_transform(vals[:,0])
selector=[x for x in range(vals.shape[1]) if x!= 0 and x!=1]
## SCALING THE FEATURES AND TARGET ONLY
scaler=preprocessing.MinMaxScaler(feature_range=(0,1))
vals_wo_date=scaler.fit_transform(vals[:,selector])
df_timeseries2=pd.DataFrame(vals_wo_date)
vals_temp=vals[:,[0,1]]
df_temp=pd.DataFrame(vals_temp)
df_timeseries=pd.concat([df_temp,df_timeseries2],axis=1)
df_timeseries.columns=cols
print(df_timeseries.info())
###creating separate matrixes for each part(component)
grouped=df_timeseries.groupby('part')
l_grouped=list(grouped)
n_lag = 4
n_seq = 1
n_test = 10
n_features=7
n_weeks=24
joined_df=pd.DataFrame()
for i in range(20):
# prepare data
print(l_grouped[i][1].describe())
parts=l_grouped[i][1]['part']
l_grouped[i][1].drop(['part','date'],axis=1,inplace=True)
#l_grouped[0][1]=l_grouped[0][1].reindex(sorted(l_grouped[0][1].columns),axis=1)
train, test,supervised = prepare_data(l_grouped[i][1], n_test, n_lag, n_seq)
# fit model
model = fit_lstm(train,test, n_features, n_lag, 1, 10, 1)
forecasts=make_forecast(test,n_weeks,n_features,n_lag,n_features)
##append in to our original supervised timeseries
predictFrame=pd.DataFrame(forecasts)
predictFrame.columns=supervised.columns
predictFrame=pd.concat([supervised,predictFrame])
predictVals=predictFrame.values
predictVals=scaler.inverse_transform(predictVals[:,n_lag*n_features:])
predictFrame=pd.DataFrame(predictVals)
predictFrame.columns=l_grouped[i][1].columns
##to csv
csvVals=predictFrame.values[-n_weeks:,:]
csv_df=pd.DataFrame(csvVals[-n_weeks:,:])
csv_df.columns=predictFrame.columns
csv_df=csv_df.reset_index(drop=True)
new_df=pd.DataFrame(parts[:n_weeks],columns=['part'])
new_df=new_df.reset_index(drop=True)
csv_df=pd.concat([csv_df,new_df],axis=1)
csv_df=csv_df.reset_index(drop=True)
joined_df=joined_df.reset_index(drop=True)
joined_df=pd.concat([joined_df,csv_df],axis=0)
joined_df.to_csv('forecast.csv')
##plot the line graph to show change in target
# fig, ax = plt.subplots()
# plt.plot(predictFrame['target'][:-n_weeks],color='black')
# plt.plot(predictFrame['target'][-n_weeks:],color='red')
# plt.legend()
# plt.show()
print (predictFrame)
|
# Generated by Django 2.2.5 on 2019-11-11 11:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('appointments', '0002_auto_20191106_1714'),
('bms', '0003_auto_20191106_1837'),
]
operations = [
migrations.RenameModel(
old_name='appointbed',
new_name='AllotBed',
),
migrations.RenameField(
model_name='allotbed',
old_name='bedid',
new_name='bed',
),
]
|
from __future__ import unicode_literals
import os, threading, tinify
from urlparse import urlparse
from django.db.models.signals import post_save
from django.templatetags.static import StaticNode
from django.conf import settings
aws_key_id = os.getenv('AWS_ACCESS_KEY_ID')
aws_secret = os.getenv('AWS_SECRET_ACCESS_KEY')
aws_bucket = os.getenv('AWS_STORAGE_BUCKET_NAME')
aws_region = os.getenv('AWS_REGION')
cf_api_key = os.getenv('CF_API_KEY')
cf_api_email = os.getenv('CF_API_EMAIL')
cf_zone_id = os.getenv('CF_ZONE_ID')
tinify.key = os.getenv('TINYPNG_API_KEY')
# If cloudflare key is not set, then we don't need to import Cloudflare
if cf_api_key and cf_api_email and cf_api_key:
import CloudFlare
class TinyPngOptimizeThread(threading.Thread):
def __init__(self, instance, **kwargs):
self.instance = instance
super(TinyPngOptimizeThread, self).__init__(**kwargs)
def run(self):
if tinify.key != None:
# If aws keys are available, use them to fetch the image and write back
if aws_key_id and aws_secret and aws_bucket and aws_region:
source_url_http = urlparse(StaticNode.handle_simple(self.instance.file.name), scheme='http').geturl()
source_url_https = urlparse(StaticNode.handle_simple(self.instance.file.name), scheme='https').geturl()
source = tinify.from_url(source_url_https)
path = "%s/%s" % (aws_bucket, self.instance.file.name)
source.store(service='s3',aws_access_key_id=aws_key_id,aws_secret_access_key=aws_secret,region=aws_region,path=path)
if cf_zone_id and cf_api_key and cf_api_email:
cf = CloudFlare.CloudFlare()
cf.zones.purge_cache.delete(cf_zone_id, data={'files':[source_url_http, source_url_https]})
# Else we grab the local image, optimize it and override the local file
else:
path = os.getcwd()+self.instance.url
source = tinify.from_file(path)
source.to_file(path)
else:
print "No tinify key"
|
# Generated by Django 2.1.4 on 2019-01-10 06:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0003_post_title'),
]
operations = [
migrations.AlterField(
model_name='post',
name='message',
field=models.TextField(max_length=145, verbose_name='ๆ็จฟๅ
ๅฎน'),
),
]
|
# calculate age using Python
while True:
# exception
try:
age = int(input('Please enter the year you were born. for example = 2001 '))
age = 2021-age
print(f'You are {age} old')
except ValueError:
print('Please enter a number')
except ZeroDivisionError:
print('Please enter age value year heigher then 0')
else:
print('thankyou')
break
|
#Simple script to publish data to a subscriber.
#Uses the REP messaging pattern.
import zmq
import time
from random import *
def main():
context = zmq.Context()
socket= context.socket(zmq.REP)
socket.connect("tcp://127.0.0.1:5200")#Here the REP can be used to connect
for i in range(100,200):
msg= socket.recv()
socket.send(str(i))
print(i)
x = randint(1,10)#generate random numbers
time.sleep(x)
socket.close()
if __name__=="__main__":
main()
|
# Generated by Django 3.0.2 on 2020-04-22 15:40
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('obsapp', '0029_auto_20200419_0006'),
]
operations = [
migrations.AddField(
model_name='games',
name='highestscore',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='gamerating',
name='reviewtime',
field=models.DateField(default=datetime.datetime(2020, 4, 22, 21, 10, 44, 224477)),
),
migrations.AlterField(
model_name='notificationz',
name='notificationtime',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 22, 21, 10, 44, 222475)),
),
migrations.AlterField(
model_name='requestexchange',
name='datetimeofrequest',
field=models.DateTimeField(default=datetime.datetime(2020, 4, 22, 21, 10, 44, 221475)),
),
]
|
from abc import ABC, abstractmethod, abstractproperty
import os, sys
from shlex import split
import argparse
from re import sub as substitute
class AbsHandler(ABC):
def __init__(self, info_handler = None, finish_handler = None, handling_params_string : str = None, handle_immediately : bool = False) -> None:
self.is_running = False
self.set_notifications(info_handler, finish_handler)
if handle_immediately:
self.start_handling(handling_params_string)
def set_notifications(self, info_handler = None, finish_handler = None) -> None:
self.publish_info = info_handler if info_handler else print
self.publish_finish_info = finish_handler if finish_handler else print
def start_handling(self, handling_params_string) -> None:
self.is_running = True
r = self.handle(handling_params_string)
if self.is_running:
self.publish_finish_info(' '.join(["Done!", r]))
self.is_running = False
def stop_handling(self) -> None:
if self.is_running:
self.is_running = False
def handle_sysragv(self, sys_input_params):
handling_params_string = ' '.join(sys.argv) if (sys.argv) else None
self.start_handling(handling_params_string)
def get_args_from_params_string(self, handling_params_string, argparser):
params = split(handling_params_string)
args = argparser.parse_args(params)
return args
def get_description_from_argparser(self, argparser):
command_description = substitute(r'^usage.*-h', r'[-h', argparser.format_usage())
return command_description
@abstractmethod
def get_name(self) -> str:
pass
@abstractmethod
def get_description(self) -> str:
pass
@abstractmethod
def handle(self, handling_params_string) -> str:
pass
|
#! usr/bin/python3
# -*- coding = utf-8 -*-
from RuntimeDecorate import runtime
data = {}
@runtime
def rob(nums: list) -> int:
if not nums:
return 0
l = len(nums)
if l == 1:
return nums[0]
elif l == 2:
return max(nums[0], nums[1])
elif l == 3:
return max(nums[1], nums[0] + nums[2])
if l not in data:
data[l] = max(nums[0] + rob(nums[2:]), nums[1] + rob(nums[3:]))
return data[l]
@runtime
def rob2(nums: list) -> int:
pre = 0
cur = 0
for num in nums:
print("cur:%d,pre:%d" % (cur, pre))
tmp = cur
cur = max(pre+num, cur)
pre = tmp
return cur
q0 = [1, 1, 1, 1]
q1 = [2, 7, 9, 3, 1]
q = [155, 44, 52, 58, 250, 225, 109, 118, 211, 73, 137, 96, 137, 89, 174, 66,
134, 26, 25, 205, 239, 85, 146, 73, 55, 6, 122, 196, 128, 50, 61, 230, 94,
208, 46, 243, 105, 81, 157, 89, 205, 78, 249, 203, 238, 239, 217, 212,
241, 242, 157, 79, 133, 66, 36, 165]
ans = rob(q)
|
# sheldon woodward
# 2/10/19
from collections import defaultdict
def find_anagram(words):
"""
Takes a list of words and finds all anagram sets within the list. Instead of using a standard python dictionary
as a hashmap, this method uses the defaultdict object. defaultdict is more efficient than a standard
Python dictionary when you need to perform a key lookup and then create a new key-value pair if it does not
already exist.
:param words: A list of words to find anagrams of.
:return: Returns a list of anagram lists.
"""
# dictionary of anagrams
anagrams = defaultdict(list)
# check every word from word list
for word in words:
# lookup the sorted characters of a word as a key in anagrams. if the key does not exist,
# it will automatically be created with its value as a blank list.
anagrams[''.join(sorted(word))].append(word)
# return a list of all of the anagram lists
return anagrams.values()
|
# La leyenda de Filius Bonacci
# Espiral Fibonacci
# Imprima ๐ nรบmeros de la sucesion de Fibonacci
# 0, 1, 1, 2, 3, 5, 8, 13, 21 ...
def fibonacci(n):
n_anterior = 0
n_actual = 1
sucesion = ""
for i in range(n):
if i == 0:
sucesion += "0"
elif i == 1:
sucesion += ", 1"
else:
aux = n_actual
n_actual = n_actual + n_anterior
n_anterior = aux
sucesion += ", " + str(n_actual)
print(sucesion) |
from flask import flash, redirect, url_for
from flask_admin import Admin, BaseView, expose
from ..admin import AdminPermissionRequiredMixin
from . import models
class DiscourseView(AdminPermissionRequiredMixin, BaseView):
@expose('/')
def index(self):
return self.render('admin/discourse_index.html')
@expose('/topics/recache', methods=['POST'])
def recache_topics(self):
models.DiscourseTopicEvent.update()
flash('Discourse topics recached.')
return redirect(url_for('discourse_admin.index'))
|
__author__ = 'Lucas Amaral'
class Pessoa:
_nome = "asdf"
_nascimento = "12345"
def __init__(self, new_name, data_nasc):
self._nome = new_name
self._nascimento = data_nasc
def setNome(self, new_name):
self._nome = new_name
def getNome(self):
return self._nome
def isAlive(self):
return True
|
print('ะะฒะตะดััั ะฒะธัะพัั ะบะพะถะฝะพั ะดัะฐะณัะฐะผะธ ะคะตััะต, ะดะปั ะทะฐะบัะฝัะตะฝะฝั ะดะฒััั ะฝะฐะถะธะผะฐะนัะต enter')
a = int(input('-->> '))
rices = []
while True:
try:
rices.append(a)
a = int(input('-->> '))
except:
break
val = 1
for i in rices:
val *= i
print("ะัะปัะบัััั ััะฐัะบัะพััะน ะฝะฐ ะทะฐะดะฐะฝัะน ะดัะฐะณัะฐะผั ะคะตััะต:")
print(val)
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from .forms import *
from reports.models import *
from django.shortcuts import render_to_response, render, redirect
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.models import User
from django.utils import timezone
from django.contrib.postgres.search import SearchVector
from django.http import HttpResponse
from wsgiref.util import FileWrapper
from registration.models import UserProfile
from django.core.files import File
import os
from django.conf import settings
import mimetypes
import ast
from django.utils.encoding import smart_str
from Crypto.PublicKey import *
from django.db.models import Q
import geoip2.database
from groupmanagement.models import *
from django.contrib.auth.models import User, Group
import json
# Create your views here.
@csrf_exempt
def createReport(request):
if request.method == 'POST':
form = ReportForm(request.POST, request.FILES)
files = FileForm(request.POST, request.FILES)
print(files)
if form.is_valid() and files.is_valid():
reader = geoip2.database.Reader(settings.BASE_DIR + '/geoip/GeoLite2-City.mmdb')
ip = request.META.get('REMOTE_ADDR', None)
if ip == '127.0.0.1':
response = reader.city('128.143.22.36')
city = response.city.name
else:
response = reader.city(ip)
city = response.city.name
checked = False
if request.POST.get("is_private", False):
checked =True
# clean_title = form.clean('title')
newdoc = report(title=form.cleaned_data['title'],
timestamp=timezone.now(),
short_description=form.cleaned_data['short_description'],
detailed_description=form.cleaned_data['detailed_description'],
is_private = checked,
location=city,
username_id= request.user)
newdoc.save()
if files.cleaned_data['is_encrypted'] == True:
f = files.cleaned_data['document']
pubKey = UserProfile.objects.get(user_id=newdoc.username_id_id).publicKey
pubKeyOb = RSA.importKey(pubKey)
newfile = Document(document=f, report_document=newdoc, name=f, is_encrypted=files.cleaned_data['is_encrypted'])
newfile.save()
enc = encrypt_file(pubKeyOb, str(newfile.document))
Document.objects.filter(name=f).delete()
os.remove(settings.MEDIA_ROOT + '/' + str(newfile.document))
newfile2 = Document(document=enc, report_document=newdoc, name=enc, is_encrypted=files.cleaned_data['is_encrypted'])
newfile2.save()
elif files.cleaned_data['document']:
f = files.cleaned_data['document']
newfile = Document(document=f, report_document=newdoc, name=f, is_encrypted=files.cleaned_data['is_encrypted'])
newfile.save()
if files.cleaned_data['is_encrypted2'] == True:
f = files.cleaned_data['document2']
pubKey = UserProfile.objects.get(user_id=newdoc.username_id_id).publicKey
pubKeyOb = RSA.importKey(pubKey)
newfile = Document(document=f, report_document=newdoc, name=f, is_encrypted=files.cleaned_data['is_encrypted2'])
newfile.save()
enc = encrypt_file(pubKeyOb, str(newfile.document))
Document.objects.filter(name=f).delete()
os.remove(settings.MEDIA_ROOT + '/' + str(newfile.document))
newfile2 = Document(document=enc, report_document=newdoc, name=enc, is_encrypted=files.cleaned_data['is_encrypted2'])
newfile2.save()
elif files.cleaned_data['document2']:
f = files.cleaned_data['document2']
newfile = Document(document=f, report_document=newdoc, name=f, is_encrypted=files.cleaned_data['is_encrypted2'])
newfile.save()
if files.cleaned_data['is_encrypted3'] == True:
f = files.cleaned_data['document3']
pubKey = UserProfile.objects.get(user_id=newdoc.username_id_id).publicKey
pubKeyOb = RSA.importKey(pubKey)
newfile = Document(document=f, report_document=newdoc, name=f, is_encrypted=files.cleaned_data['is_encrypted3'])
newfile.save()
enc = encrypt_file(pubKeyOb, str(newfile.document))
Document.objects.filter(name=f).delete()
os.remove(settings.MEDIA_ROOT + '/' + str(newfile.document))
newfile2 = Document(document=enc, report_document=newdoc, name=enc, is_encrypted=files.cleaned_data['is_encrypted3'])
newfile2.save()
elif files.cleaned_data['document3']:
f = files.cleaned_data['document3']
newfile = Document(document=f, report_document=newdoc, name=f, is_encrypted=files.cleaned_data['is_encrypted3'])
newfile.save()
else:
form = ReportForm()
files = FileForm()
variables = RequestContext(request, {
'form': form,
'files': files
})
return render_to_response(
'reports/createReports.html',
variables,
)
@csrf_exempt
def encrypt_file(key, filename):
print(key)
file = settings.MEDIA_ROOT + "/" + filename
with open(file, 'rb') as in_file:
with open(file + '.enc','w') as out_file:
chunk = in_file.read()
#chunk = bytes(chunk, 'utf-8')
#chunk = key.encrypt(chunk, 32)
enc_data = key.encrypt(chunk, 32)
out_file.write(str(enc_data))
file = '/documents/' + filename + '.enc'
return file
@csrf_exempt
def createFolder(request):
reports = report.objects.all()
username_id = request.user
if request.method == 'POST':
form = FolderForm(request.POST, request.FILES)
selected = request.POST.getlist('selected_report[]')
if form.is_valid():
folder_object = folder.objects.create(
title=form.cleaned_data['title'], username_id=username_id
)
for report_selected in selected:
re = report.objects.get(title=report_selected)
folder_object.added_reports.add(re)
else:
form = FolderForm()
variables = RequestContext(request, {
'form': form, 'reports':reports, 'username_id':username_id,
})
return render_to_response(
'reports/createFolder.html',
variables,
)
@csrf_exempt
def addToFolder(request):
folder_title = request.POST.get('selected_folder')
f = folder.objects.get(id=request.POST.get('selected_folder'))
added = f.added_reports.all().values_list('id', flat=True)
added = list(added)
reports = report.objects.all().filter(username_id_id=request.user.id).values("title").exclude(id__in=added)
#filteredReports = list(reports)
#temp = []
username_id = request.user
if request.POST.get('selected_report[]'):
# form = FolderForm(request.POST, request.FILES)
folder_title = request.POST.get('selected_folder')
selectedReport = request.POST.getlist('selected_report[]')
for sr in selectedReport:
r = report.objects.get(title=sr)
f.added_reports.add(r)
f.save()
added = f.added_reports.all().values_list('id', flat=True)
added = list(added)
reports = report.objects.all().filter(username_id_id=request.user.id).values("title").exclude(id__in=added)
else:
form = FolderForm()
variables = RequestContext(request, {'reports':reports, 'folder_title': folder_title})
return render_to_response('reports/viewFolderDescription.html', variables,)
@csrf_exempt
def renameFolder(request):
folders = folder.objects.all()
selected = request.POST.getlist('selected_folder[]')
if request.method == 'POST':
form = FolderForm(request.POST)
if form.is_valid():
title=form.cleaned_data['title']
for folder_selected in selected:
print(folder_selected)
fs = folder.objects.get(title=folder_selected)
fs.title = title
fs.save()
else:
form = FolderForm()
variables = RequestContext(request, {
'form': form, 'folders':folders
})
return render_to_response(
'reports/renameFolder.html',
variables,
)
@csrf_exempt
def deleteFolder(request):
folders = folder.objects.all()
selected = request.POST.getlist('selected_folder[]')
if request.method == 'POST':
for folder_selected in selected:
fs = folder.objects.get(title=folder_selected)
fs.delete()
else:
pass
variables = RequestContext(request, {
'folders': folders
})
return render_to_response(
'reports/deleteFolder.html',
variables,
)
def viewFolderContent(request):
if request.POST.get("remove"):
folder_title = request.POST.get("selected_folder")
report_title = request.POST.get("selected_report")
removeReports(request, folder_title, report_title)
folder_title = request.POST.get("selected_folder")
f = folder.objects.get(id=folder_title)
print(f)
rep = f.added_reports.all().values_list('id', flat=True)
rep = list(rep)
r = report.objects.all().filter(id__in=rep)
return render(request, 'reports/viewFolderContent.html', {'r':r, 'folder_title': folder_title})
def viewFolderDescription(request):
user = request.user
folder_title = request.POST.get("selected_folder")
selected = request.POST.getlist('selected_report[]')
reports = report.objects.all()
# folders = folder.objects.get(title=title)
return render(request, 'reports/viewFolderDescription.html', {'folder_title':folder_title, 'reports':reports})
def removeReports(request, folder_title, report_title):
f = folder.objects.get(id=folder_title)
r = report.objects.all().filter(title=report_title).values_list('id', flat=True)
print(r)
#r = r.get('id')
f.added_reports.remove(r[0])
f.save()
# return redirect(viewFolderContent)
@csrf_exempt
def viewFolder(request):
user = request.user
folders = folder.objects.all().filter(username_id_id=request.user.id)
return render(request, 'reports/viewFolders.html', {'folders': folders, 'user': user})
@csrf_exempt
def viewReport(request):
user = request.user
if 'username' in request.POST:
user = User.objects.get(username=request.POST.get('username'))
if user.is_superuser:
reports = report.objects.all()
else:
reports = report.objects.all().filter(is_private=False)
print(reports)
folders = folder.objects.all()
if 'username' in request.POST:
reps = []
for rep in reports:
reps.append(rep.title)
return HttpResponse(json.dumps(reps), status=200)
return render(request, 'reports/viewReports.html', {'user': user, 'reports': reports, 'folders':folders})
@csrf_exempt
def viewReports(request):
user = request.user
if 'reportname' in request.POST:
title = request.POST.get('reportname')
else:
title = request.POST.get("selected_report")
rs = report.objects.get(title=title)
files = Document.objects.all().filter(report_document=rs.id)
owner = User.objects.get(id=rs.username_id_id)
print(owner.username)
if 'reportname' in request.POST:
filenames = []
for file in files:
filenames.append(file.document.name)
return HttpResponse(json.dumps({'files' : filenames, 'owner' : owner.username, 'title' : rs.title,
's_desc' : rs.short_description, 'd_desc' : rs.detailed_description,
'isPrivate' : rs.is_private}))
return render(request, 'reports/viewReportDescription.html', {'rs': rs, 'user': user, 'files': files, 'owner': owner})
@csrf_exempt
def download(request, file_name):
file_path = settings.MEDIA_ROOT + '/' + file_name
file_wrapper = FileWrapper(open(file_path, 'rb'))
file_mimetype = mimetypes.guess_type(file_path)
print(file_mimetype)
print((file_wrapper))
response = HttpResponse(file_wrapper, content_type=file_mimetype)
response['X-Sendfile'] = file_path
response['Content-Disposition'] = 'attachment; filename="%s"' % smart_str(file_name)
# f = open(file_path, 'r')
# myFile = File(f)
# # print(filename)
# print("puppies")
return response
@csrf_exempt
def viewYourReports(request):
user = request.user
if 'username' in request.POST:
user = User.objects.get(username=request.POST.get('username'))
reports = report.objects.all().filter(username_id=user)
#At this point reports has all the reports created by the current user
folders = folder.objects.all().filter(username_id=user)
# for report_document in report.objects.all():
# for group in report_document.groupreports_set.select_related().all():
# print()
listOfR = []
if user.is_superuser:
listOfR = report.objects.all()
for each in report.objects.filter(username_id=user):
listOfR.append(each)
for group in user.groups.all():
for reportSet in group.groupreports_set.all():
listOfR.append(reportSet.report_document)
AllReports = []
for rep in listOfR:
AllReports.append(rep.title)
#
#
# #We have to append all the reports that are not created by the user but ones that he has access to
if 'username' in request.POST:
return HttpResponse(json.dumps(AllReports), status=200)
return render(request, 'reports/viewYourReports.html', {'reports' : listOfR, 'user': user, 'folders':folders })
@csrf_exempt
def editReport(request):
user = request.user
title = request.POST.get("title")
short = request.POST.get("short")
detailed = request.POST.get("detailed")
is_private = request.POST.get("private")
original = request.POST.get("original")
if(request.POST.getlist('updated')):
reports = report.objects.get(title=original)
reports.title = title
reports.short_description = short
reports.detailed_description = detailed
if is_private == "private":
reports.is_private = True
else:
reports.is_private = False
#reports.is_private = is_private
reports.save()
return render(request, 'reports/editReport.html', {'user': user, 'title': title, 'short': short, 'detailed':detailed, 'private': is_private})
@csrf_exempt
def deleteReport(request):
user = request.user
id = request.POST.get("id")
report.objects.filter(id=id).delete()
return render(request, 'reports/viewYourReports.html', {'user':user})
@csrf_exempt
def searchReports(request):
query_string = request.GET.get('q')
loc = request.GET.get('location')
if request.GET.get('start-date') and request.GET.get('end=date'):
start_date = request.GET.get('start-date') + ' 00:00:00.000000-00'
end_date = request.GET.get('end-date') + ' 00:00:00.000000-00'
if request.GET.get('q'):
results = report.objects.annotate(
search=SearchVector('title', 'short_description', 'detailed_description'),
).filter(search=query_string).exclude(is_private=True).order_by('timestamp')
if request.GET.get('location'):
results = report.objects.annotate(
search=SearchVector('title', 'short_description', 'detailed_description'),
).filter(search=query_string).filter(location=loc).exclude(is_private=True).order_by('timestamp')
if request.GET.get('start-date') and request.GET.get('end-date'):
results = report.objects.annotate(
search=SearchVector('title', 'short_description', 'detailed_description'),
).filter(search=query_string).filter(location=loc).filter(timestamp__range=(start_date,end_date)).exclude(is_private=True).order_by('timestamp')
return render(request, 'reports/searchReports.html', {'results': results})
return render(request,'reports/searchReports.html', {'results': results })
return render(request, 'reports/searchReports.html', {'results': results})
if request.GET.get('location'):
results = report.objects.all().filter(location=loc).exclude(is_private=True).order_by('timestamp')
if request.GET.get('start-date') and request.GET.get('end-date'):
results = report.objects.all().filter(location=loc).filter(timestamp__range=(start_date,end_date)).exclude(is_private=True).order_by('timestamp')
return render(request, 'reports/searchReports.html', {'results': results})
return render(request,'reports/searchReports.html', {'results': results })
if request.GET.get('start-date') and request.GET.get('end-date'):
print(request.GET.get('start-date'))
results = report.objects.all().filter(timestamp__range=(start_date, end_date)).exclude(is_private=True).order_by('timestamp')
return render(request, 'reports/searchReports.html', {'results': results})
return render(request, 'reports/searchReports.html', {})
@csrf_exempt
def reportHome(request):
user = request.user
UP = UserProfile.objects.get(id=user.id)
return render_to_response("reports/reportHome.html", {"user":user, 'Suspended' : UP.isSuspended})
@csrf_exempt
def folderHome(request):
user = request.user
UP = UserProfile.objects.get(id=user.id)
return render_to_response("reports/folderHome.html", {"user":user, 'Suspended' : UP.isSuspended}) |
import pytest
from page.home_page import *
from page.notice_list_page import *
import time
'''ๆต่ฏๅ
ฌๅๅ่กจ้กต'''
class TestNoticeList():
def enter_in_notice_page(self,driver,host,pkUser,pkCompany):
'''่ฟๅ
ฅๅ
ฌๅธๅ
ฌๅ้กต'''
HomePage(driver).open(host,pkUser,pkCompany)
list=NoticeListPage(driver)
list.enter_in_page()
return list
# def test_operation_column_exist(self,driver,host):
# '''ๆฃๆฅๆใ็ฎก็ๅ
ฌๅธ-ๅ
จๅ
ฌๅธใๅ็ฎก็่ตทๅง้กตๆ้๏ผๆพ็คบใๆไฝใๅ'''
# list = self.enter_in_notice_page(driver, host, "d43c2133-a058-487d-b271-ade608548bfb",
# "68dc0e06-82b7-4024-9a5b-ae921cc53914")
# result=list.check_operation_column_exist()
# assert result==True
#
# def test_operation_column_not_exist(self,driver,host):
# '''ๆฃๆฅๆฒกๆใ็ฎก็ๅ
ฌๅธ-ๅ
จๅ
ฌๅธใๆ็ฎก็่ตทๅง้กตๆ้๏ผไธๆพ็คบใๆไฝใๅ'''
# list=self.enter_in_notice_page(driver, host, "a01ef2d3-e87f-4910-9c37-288a3dca325e",
# "68dc0e06-82b7-4024-9a5b-ae921cc53914")
# result=list.check_operation_column_exist()
# assert result==False
# def test_top_notice(self,driver,host):
# '''ๆต่ฏ็ฝฎ้กถ'''
# list = self.enter_in_notice_page(driver, host, "d43c2133-a058-487d-b271-ade608548bfb",
# "68dc0e06-82b7-4024-9a5b-ae921cc53914")
# #ๆฃๆฅ็ฝฎ้กถๅฝๅ้กต้ขๆๅไธ็ฏๆ็ซ
# article_title=list.top_or_cancel_top_notice(-1)
# assert list.get_toast_text()=="็ฝฎ้กถๆๅ"
# index=list.get_title_index(article_title) #่ทๅ่ฏฅๆ ้ข็็ดขๅผ
# assert list.get_top_icon_title_attribute(index)=="ๅๆถ็ฝฎ้กถ"#ๆฃๆฅใ็ฝฎ้กถใๆ้ฎๅๆใๅๆถ็ฝฎ้กถใ
# assert list.check_top_icon_exist(index) == True #ๆฃๆฅๅฏนๅบๆ็ซ ่กไธญๅญๅจใ้กถใๅพๆ
#
# def test_cancel_top_notice(self,driver,host):
# '''ๆต่ฏๅๆถ็ฝฎ้กถ'''
# list = self.enter_in_notice_page(driver, host, "d43c2133-a058-487d-b271-ade608548bfb",
# "68dc0e06-82b7-4024-9a5b-ae921cc53914")
# #ๆฃๆฅๅๆถ็ฝฎ้กถๅฝๅ้กต้ข็ฌฌไธ็ฏๆ็ซ
# article_title = list.top_or_cancel_top_notice(0)
# assert list.get_toast_text() == "ๅๆถ็ฝฎ้กถๆๅ"
# index = list.get_title_index(article_title) # ่ทๅ่ฏฅๆ ้ข็็ดขๅผ
# assert list.get_top_icon_title_attribute(index) == "็ฝฎ้กถ" # ๆฃๆฅใๅๆถ็ฝฎ้กถใๆ้ฎๅๆใ็ฝฎ้กถใ
# assert list.check_top_icon_exist(index) == False # ๆฃๆฅๅฏนๅบๆ็ซ ่กไธญไธๅญๅจใ้กถใๅพๆ
def test_refresh(self,driver,host):
'''ๆต่ฏๅทๆฐ'''
list = self.enter_in_notice_page(driver, host, "d43c2133-a058-487d-b271-ade608548bfb",
"68dc0e06-82b7-4024-9a5b-ae921cc53914")
# ๅทๆฐ้็ฝฎ้กถๅ
ฌๅ
top_notice_len = list.get_top_notice_length() # ่ทๅ็ฝฎ้กถๅ
ฌๅ็ไธชๆฐ
toast_text,time_difference,index_after= list.check_refresh_notice(-1)
assert toast_text=="ๅ
ฌๅๅทๆฐๆๅ"
assert time_difference==0.0
assert index_after==top_notice_len
#ๅทๆฐ็ฝฎ้กถๅ
ฌๅ
toast_text,time_difference,index_after=list.check_refresh_notice(top_notice_len-1)
assert toast_text == "ๅ
ฌๅๅทๆฐๆๅ"
assert time_difference == 0.0
assert index_after == 0
|
import eqparser
def createTreeCopy(root):
if root == None:
return None;
newRoot = eqparser.Node(root.type,[],root.leaf)
if isinstance(root.children,list) == False:
root.children = [root.children];
for i in range(len(root.children)):
newRoot.children.append(createTreeCopy(root.children[i]))
return newRoot
# Main Function ****************************************************************
if __name__ == "__main__":
while 1:
try:
s = raw_input('eq > ') # use input() on Python 3
except EOFError:
print
break
root = eqparser.parse(s)
newRoot = createTreeCopy(root)
newRoot.children[0].leaf = 'a'
print "Old Tree = " + repr(root)
print "New Tree = " + repr(newRoot)
|
import sys
T = int(sys.stdin.readline().rstrip())
for _ in range(T):
empty = sys.stdin.readline().rstrip()
N = int(sys.stdin.readline().rstrip())
total = 0
for _ in range(N):
i = int(sys.stdin.readline().rstrip())
total += i
if total % N == 0:
print("YES")
else:
print("NO")
|
#-*- coding:utf-8 -*-
import datetime
import time
import json
from celery.task import task
from celery.task.sets import subtask
from shopback import paramconfig as pcfg
from shopback.items.tasks import updateUserItemsTask,updateUserProductSkuTask
from shopback.fenxiao.tasks import saveUserFenxiaoProductTask
from shopback.orders.tasks import saveUserDuringOrdersTask
from shopback.fenxiao.tasks import saveUserPurchaseOrderTask
@task()
def initSystemDataFromAuthTask(visitor_id):
#ๆดๆฐ็จๆทๆๆๅๅๆพ็คบ
updateUserItemsTask(visitor_id)
#ๆดๆฐๅๅ่งๆ ผไฟกๆฏ
updateUserProductSkuTask(visitor_id)
#ๆดๆฐๅ้ๅๅไฟกๆฏ
saveUserFenxiaoProductTask(visitor_id)
#ๆดๆฐ็ญๅพ
ๅ่ดงๅๅ่ฎขๅ
saveUserDuringOrdersTask.delay(visitor_id,status=pcfg.WAIT_SELLER_SEND_GOODS)
#ๆดๆฐๅพ
ๅ่ดงๅ้่ฎขๅ
saveUserPurchaseOrderTask.delay(visitor_id,status=pcfg.WAIT_SELLER_SEND_GOODS)
|
def composite_trapezoidal(fn, a, b, M):
h = (b-a) / M
s = 0
for k in range(1, M):
x = a + h * k
s += h * fn(x)
s += h / 2 * (fn(a) + fn(b))
return s
def composite_simpson(fn, a, b, M):
h = (b-a) / (2*M)
s1 = 0
s2 = 0
for k in range(1, M):
x = a + 2*h*k
s1 += fn(x)
for k in range(1, M+1):
x = a + h * (2*k-1)
s2 += fn(x)
s1 *= 2*h/3
s2 *= 4*h/3
return s1 + s2 + h / 3 * (fn(a) + fn(b))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# By Antoine Maziรจres -- http://ant1.cc/
# For CorText Project -- http://cortext.fr/
# CC-BY-CA
#
# This script query a Seeks server ($seeks_node) and return a list of seeds URL to crawl
# usage for debug : python ./make_seeds.py | tee log.txt && wc -l log.txt
import json
import urllib
import pprint
import sys
import os
import string
# tweak a textmate bug
reload(sys)
sys.setdefaultencoding("utf-8")
#
# tweak a textmate bug
reload(sys)
sys.setdefaultencoding("utf-8")
#
num_results = 1000
seeks_node = '67.23.28.136'
result = '%s' % urllib.urlopen('http://%s/search.php/search/txt/q=volcan?output=json&rpp=%d&expansion=%d' % ( seeks_node, num_results, num_results )).read()
result_utf8 = unicode(result, errors='ignore')
result_json = json.loads('%s' % result_utf8)
for each in result_json['snippets']:
print each['cite']
|
import itertools
if __name__ == '__main__':
data, r = input().split()
results = []
for output in itertools.permutations(data, int(r)):
results.append(''.join(list(output)))
results.sort()
for text in results:
print(text)
|
import os #this library provides an interface between Python and an operating system
def add_preamble(directory):
for path, names, files in os.walk(directory): #os.walk(directory) returns the path, names of subdirectories and names of files of a directory, creating a directory treee
if names != 'vendor' or names != 'vendor-local': #files in the vendor and vendor-local directories should not have a preamble
for afile in files:
afile_path = os.path.join(path, afile) #concatenates the path returned in os.walk and the filename, resulting in an absolute path
if afile_path.endswith('.py') and afile != 'addpreamble.py': #add the preamble to all .py files except this one
f = open(afile_path, 'r')
all_lines = f.readlines()
f.close()
if all_lines != []:
first_line = all_lines[0]
if first_line == '#!/usr/bin/env python\n' or first_line == 'coding: utf-8': #these lines need to remain at the top
f = open(afile_path, 'w')
f.write(first_line)
f.write(str(all_lines[1:]))
f.write("preamble\n\n")
f.close()
elif first_line != '#!/usr/bin/env python': #elif is used instead of else here in order not to "catch" too much
old_contents = f.read()
f = open(afile_path, 'w')
f.write("preamble\n\n")
f.write(old_contents)
f.close()
#--------------------------------------------------------------------------------
add_preamble('.') #calls add_preamble on the directory from which python addpreamble.py is run
|
age = input("Enter your age: ")
new_age = age + 50
print(new_age)
# input doesn't convert to a string anymore
# Python casts to highests number type
|
from django.shortcuts import render
import numpy as np
import pandas as pd
from . import forms
import pickle
import os
# Create your views here.
def readData(filepath='data/water_potability.csv'):
df = pd.read_csv(filepath)
X = df.iloc[:,0:len(df.columns)-1]
y = df.iloc[:, -1]
return X, y
def evaluateUserInput(data_features, X_test):
# X_test = getUserInput(data_features)
with open(f'app/result/scaler/MinMaxScaler.sav', 'rb') as f:
scaler = pickle.load(f)
X_test = scaler.transform([X_test])
model_accuracy_scores = pd.read_csv(
'app/result/model_evaluation/ModelEvaluationScores.csv'
)['Accuracy']
model_list=[]
test_results = []
prediction_score = 0
model_files = os.listdir('app/result/models')
for model_file in model_files:
with open(f'app/result/models/{model_file}', 'rb') as f:
model = pickle.load(f)
model_list.append(model)
test_results.append([str(model)[ : str(model).index('(')]])
for index, classifier in enumerate(model_list):
y_pred = classifier.predict(X_test)
y_pred = 'Positive' if y_pred == 1 else 'Negative'
if y_pred == 'Positive':
prediction_score += 1 * model_accuracy_scores[index]
else:
prediction_score -= 1 * model_accuracy_scores[index]
test_results[index].append(y_pred)
prediction_score = prediction_score / sum(model_accuracy_scores)
if prediction_score < 0:
prediction_class = 'Negative'
elif prediction_score > 0:
prediction_class = 'Positive'
else:
prediction_class = 'Indeterminate'
results = {}
for test_result in test_results:
results[test_result[0]] = test_result[1]
final_result = []
final_result.append(results)
final_result.append(prediction_score)
final_result.append(prediction_class)
return final_result
def getUserInput(data_features):
user_input = []
for feature in data_features.columns:
print(f'Enter value for {feature}')
print(f'Range is {min(data_features[feature])} to {max(data_features[feature])}')
user_input.append(input('Your input: '))
return user_input
def Home(request):
X, y = readData(filepath='app/data/parkinsons.csv')
if request.method == 'POST':
form = forms.InputDataForm(request.POST)
if form.is_valid():
ph = form.cleaned_data['ph']
hardness = form.cleaned_data['hardness']
solids = form.cleaned_data['solids']
chloramines = form.cleaned_data['chloramines']
sulphate = form.cleaned_data['sulphate']
conductivity = form.cleaned_data['conductivity']
organic_carbon = form.cleaned_data['organic_carbon']
trihalomethanes = form.cleaned_data['trihalomethanes']
turbidity = form.cleaned_data['turbidity']
user_input = []
user_input.append(ph)
user_input.append(hardness)
user_input.append(solids)
user_input.append(chloramines)
user_input.append(sulphate)
user_input.append(conductivity)
user_input.append(organic_carbon)
user_input.append(trihalomethanes)
user_input.append(turbidity)
results = evaluateUserInput(X, user_input)
return render(request, 'result.html', {'model_results': results[0], 'prediction_score': results[1], 'prediction_class': results[2]})
else:
pass
else:
form = forms.InputDataForm()
return render(request, 'home.html', {'form': form})
|
# python 2.7.3
import sys
import math
n = input()
print n * (n + 1) * (n + 2) / 2
|
# 10 binden kรผรงรผk fibonacci sayฤฑlarฤฑnฤฑn en bรผyรผฤรผnรผ bulan algoritma
fn1, fn2 = 1, 1
fn3 = 42 #fn3 e herhangi bir deฤer atamamฤฑz gerekiyordu tanฤฑmlฤฑ olmasฤฑ iรงin
while(fn3<10000): #10.000den kรผรงรผk olduฤu yerlerde yaptฤฑrmak istediฤimiz iลlemleri dรถngรผye aldฤฑk
fn3 = fn1 + fn2
fn1 = fn2
fn2 = fn3
print(fn1) #fn3 รผ fn2 ye, fn2 yi fn1 e atadฤฑฤฤฑmฤฑz iรงin fn1 i yazdฤฑrdฤฑk
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import shutil
import torch
class Logger:
def __init__(self, output_dir):
# Remove and recreate output_dirs
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
self.output_dir = output_dir
self.model_weights_dir = "{0}/model_weights".format(output_dir)
self.real_images_dir = "{0}/real_images".format(output_dir)
self.generated_images_dir = "{0}/generated_images".format(output_dir)
self.real_distributions_dir = "{0}/real_distributions".format(output_dir)
self.generated_distributions_dir = "{0}/generated_distributions".format(output_dir)
os.makedirs(self.model_weights_dir)
os.makedirs(self.real_images_dir)
os.makedirs(self.generated_images_dir)
os.makedirs(self.real_distributions_dir)
os.makedirs(self.generated_distributions_dir)
# Set up lists to hold loss histories
self.critic_losses = []
self.generator_losses = []
def save_model_weights(self, generator, critic):
torch.save(generator.state_dict(), "{0}/generator".format(self.model_weights_dir))
torch.save(critic.state_dict(), "{0}/critic".format(self.model_weights_dir))
def log_training_step(self, training_step, num_training_steps, elapsed_training_time, critic_loss, generator_loss):
# Loss histories
self.critic_losses.append(critic_loss)
self.generator_losses.append(generator_loss)
plt.plot(self.critic_losses)
plt.plot(self.generator_losses)
plt.legend(['Critic Loss', 'Generator Loss'])
plt.savefig('{0}/losses'.format(self.output_dir))
plt.close()
# Estimated time remaining
num_training_steps_remaining = num_training_steps - training_step
estimated_minutes_remaining = (num_training_steps_remaining*elapsed_training_time)/60.0
print("===== TRAINING STEP {} | ~{:.0f} MINUTES REMAINING =====".format(training_step, estimated_minutes_remaining))
print("CRITIC LOSS: {0}".format(critic_loss))
print("GENERATOR LOSS: {0}\n".format(generator_loss))
def visualize_generated_data(self, real_images, fake_images, training_step):
# Images:
generator_sample_images = fake_images.data.cpu().numpy().reshape(-1, 28, 28)
Logger.visualize_ten_images(generator_sample_images, '{0}/step_{1}'.format(self.generated_images_dir, training_step))
real_sample_images = real_images.data.cpu().numpy().reshape(-1, 28, 28)
Logger.visualize_ten_images(real_sample_images, '{0}/step_{1}'.format(self.real_images_dir, training_step))
# Distributions
generator_sample_distributions = fake_images.data.cpu().numpy().reshape(-1, 784)
Logger.visualize_ten_distributions(generator_sample_distributions, '{0}/step_{1}'.format(self.generated_distributions_dir, training_step))
real_sample_distributions = real_images.data.cpu().numpy().reshape(-1, 784)
Logger.visualize_ten_distributions(real_sample_distributions, '{0}/step_{1}'.format(self.real_distributions_dir, training_step))
def visualize_ten_images(images, output_path):
plt.figure()
for i in range(10):
ax = plt.subplot(3, 4, i+1)
im = ax.imshow(images[i], cmap="gray")
plt.tight_layout()
plt.savefig(output_path)
plt.close()
def visualize_ten_distributions(distributions, output_path):
plt.figure()
for i in range(10):
ax = plt.subplot(3, 4, i+1)
ax.set_ylim([-1.1, 1.1])
ax.plot(distributions[i])
plt.tight_layout()
plt.savefig(output_path)
plt.close()
|
import weakref
from .._compat.typing import Callable, Any
from types import MethodType
__all__ = ['weak_method']
def weak_method(method: Callable) -> Callable:
assert isinstance(method, MethodType)
self_ref = weakref.ref(method.__self__)
function_ref = weakref.ref(method.__func__)
def wrapped(*args: Any, **kwargs: Any) -> Any:
self = self_ref()
function = function_ref()
if self is not None and function is not None:
return function(self, *args, **kwargs)
return wrapped
|
set1 = {1,2,3,5,"Print",5.3, 1, 2,3}
print(set1)
|
from django.shortcuts import render
from .models import Publicacion
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from .forms import PostForm
from django.shortcuts import redirect
# Create your views here.
def listar_pub(request):
pubs = Publicacion.objects.filter(fecha_publicacion__lte=timezone.now()).order_by('fecha_publicacion')
return render(request, 'blog/listar_pub.html', {'pubs': pubs})
def post_detail(request, pk):
pubss = get_object_or_404(Publicacion, pk=pk)
return render(request, 'blog/post_detail.html', {'pubss': pubss})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.autor = request.user
# post.fecha_publicacion = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Publicacion, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.autor = request.user
# post.fecha_publicacion = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
def post_draft_list(request):
posts = Publicacion.objects.filter(fecha_publicacion__isnull=True).order_by('fecha_creacion')
return render(request, 'blog/post_draft_list.html', {'posts': posts})
def post_publish(request, pk):
post = get_object_or_404(Publicacion, pk=pk)
post.publicar()
return redirect('post_detail', pk=pk)
def post_remove(request, pk):
post = get_object_or_404(Publicacion, pk=pk)
post.delete()
return redirect('listar_pub')
|
# For Practice
#Ask user their age
#Tell them how old they will be next year.
#CTI 110 #Assigment
#CTI 110 #Assigment
# Javonte Woods
#11/8/2018
# Header
# Get input number=int(input("Enter number:"))
#Calculate the answear answear = number*2
#Print output print("Your result is",answear)
print("What is your age")
age = int(input("Enter age: "))
yourAgeIs = ( age + 1)
print("Your age is" , yourAgeIs)
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Ana")
process.load('FWCore.MessageService.MessageLogger_cfi')
##-------------------- Communicate with the DB -----------------------
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = 'PHYS14_25_V4::All'
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
##process.load('Configuration.StandardSequences.Geometry_cff')
process.load('Configuration.Geometry.GeometryIdeal_cff')
process.load('RecoJets.Configuration.RecoPFJets_cff')
process.load('RecoJets.Configuration.RecoJets_cff')
process.load('CommonTools/RecoAlgos/HBHENoiseFilterResultProducer_cfi')
##-------------------- Import the JEC services -----------------------
process.load('JetMETCorrections.Configuration.DefaultJEC_cff')
############# Set the number of events #############
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
############# Format MessageLogger #################
process.MessageLogger.cerr.FwkReport.reportEvery = 100
############# Define the source file ###############
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("root://eoscms//eos/cms/store/user/ilknur/rootfiles_for_testing/2012_Data/Run2012A_MinimumBias_AOD_13Jul2012_v1.root")
)
############# processed tree producer ##################
process.TFileService = cms.Service("TFileService",fileName = cms.string('ProcessedTree_data.root'))
process.ak7 = cms.EDAnalyzer('ProcessedTreeProducer',
## jet collections ###########################
pfjets = cms.InputTag('ak7PFJets'),
##calojets = cms.InputTag('ak7CaloJets'),
## database entry for the uncertainties ######
PFPayloadName = cms.string('AK7PF'),
##CaloPayloadName = cms.string('AK7Calo'),
jecUncSrc = cms.string('Winter14_V8_DATA_UncertaintySources_AK7PF.txt'),
############# add for JEC 53X ##################
jecL1FastFile = cms.string('Winter14_V8_DATA_L1FastJet_AK7PF.txt'),
jecL2RelativeFile = cms.string('Winter14_V8_DATA_L2Relative_AK7PF.txt'),
jecL3AbsoluteFile = cms.string('Winter14_V8_DATA_L3Absolute_AK7PF.txt'),
jecL2L3ResidualFile = cms.string('Winter14_V8_DATA_L2L3Residual_AK7PF.txt'),
jecUncSrcNames = cms.vstring('AbsoluteStat','AbsoluteScale','AbsoluteFlavMap','AbsoluteMPFBias',
'Fragmentation','SinglePionECAL','SinglePionHCAL','FlavorQCD',
'TimeEta','TimePt','RelativeJEREC1','RelativeJEREC2',
'RelativeJERHF','RelativePtBB','RelativePtEC1','RelativePtEC2',
'RelativePtHF','RelativeFSR','RelativeStatFSR','RelativeStatEC2',
'RelativeStatHF','PileUpDataMC','PileUpPtRef','PileUpPtBB','PileUpPtEC1',
'PileUpPtEC2','PileUpPtHF','PileUpMuZero','PileUpEnvelope',
'SubTotalPileUp','SubTotalRelative','SubTotalPt','SubTotalScale','SubTotalAbsolute',
'SubTotalMC','Total','TotalNoFlavor','TotalNoTime','TotalNoFlavorNoTime','FlavorZJet',
'FlavorPhotonJet','FlavorPureGluon','FlavorPureQuark','FlavorPureCharm','FlavorPureBottom',
'TimeRunA','TimeRunB','TimeRunC','TimeRunD','CorrelationGroupMPFInSitu',
'CorrelationGroupIntercalibration','CorrelationGroupbJES','CorrelationGroupFlavor',
'CorrelationGroupUncorrelated'
##52X JEC Uncertainty Sources
##'Absolute','HighPtExtra','SinglePion','Flavor','Time',
##'RelativeJEREC1','RelativeJEREC2','RelativeJERHF',
##'RelativeStatEC2','RelativeStatHF','RelativeFSR',
##'PileUpDataMC','PileUpOOT','PileUpPt','PileUpBias','PileUpJetRate'
),
## calojet ID and extender for the JTA #######
##calojetID = cms.InputTag('ak7JetID'),
##calojetExtender = cms.InputTag('ak7JetExtender'),
## set the conditions for good Vtx counting ##
offlineVertices = cms.InputTag('offlinePrimaryVertices'),
goodVtxNdof = cms.double(4),
goodVtxZ = cms.double(24),
## rho #######################################
##srcCaloRho = cms.InputTag('kt6CaloJets','rho'),
srcPFRho = cms.InputTag('kt6PFJets','rho'),
## preselection cuts #########################
maxY = cms.double(5.0),
minPFPt = cms.double(5),
minPFFatPt = cms.double(10),
maxPFFatEta = cms.double(2.5),
minTwrEt = cms.double(0.),
minTwrEta = cms.double(0.0),
maxTwrEta = cms.double(5.0),
##minCaloPt = cms.double(5),
minNPFJets = cms.int32(1),
##minNCaloJets = cms.int32(1),
minJJMass = cms.double(-1),
## trigger ###################################
printTriggerMenu = cms.untracked.bool(True),
processName = cms.string('HLT'),
triggerName = cms.vstring('HLT_L1SingleJet16_v1','HLT_L1SingleJet16_v2','HLT_L1SingleJet16_v3','HLT_L1SingleJet16_v4','HLT_L1SingleJet16_v5','HLT_L1SingleJet16_v6',
'HLT_L1SingleJet36_v1','HLT_L1SingleJet36_v2','HLT_L1SingleJet36_v3','HLT_L1SingleJet36_v4','HLT_L1SingleJet36_v5','HLT_L1SingleJet36_v6',
'HLT_SingleForJet15_v2','HLT_SingleForJet25_v2','HLT_ZeroBias_v6','HLT_ZeroBiasPixel_DoubleTrack_v1',
'HLT_PFJet40_v1','HLT_PFJet80_v1','HLT_PFJet140_v1','HLT_PFJet200_v1','HLT_PFJet260_v1','HLT_PFJet320_v1','HLT_PFJet400_v1',
'HLT_PFJet40_v2','HLT_PFJet80_v2','HLT_PFJet140_v2','HLT_PFJet200_v2','HLT_PFJet260_v2','HLT_PFJet320_v2','HLT_PFJet400_v2',
'HLT_PFJet40_v3','HLT_PFJet80_v3','HLT_PFJet140_v3','HLT_PFJet200_v3','HLT_PFJet260_v3','HLT_PFJet320_v3','HLT_PFJet400_v3',
'HLT_PFJet40_v4','HLT_PFJet80_v4','HLT_PFJet140_v4','HLT_PFJet200_v4','HLT_PFJet260_v4','HLT_PFJet320_v4','HLT_PFJet400_v4',
'HLT_PFJet40_v5','HLT_PFJet80_v5','HLT_PFJet140_v5','HLT_PFJet200_v5','HLT_PFJet260_v5','HLT_PFJet320_v5','HLT_PFJet400_v5'
),
triggerResults = cms.InputTag("TriggerResults","","HLT"),
triggerEvent = cms.InputTag("hltTriggerSummaryAOD","","HLT")
## jec services ##############################
##pfjecService = cms.string('ak7PFL1FastL2L3Residual'),
##calojecService = cms.string('ak7CaloL1L2L3Residual')
)
process.ak5 = process.ak7.clone(
pfjets = 'ak5PFJets',
##calojets = 'ak5CaloJets',
PFPayloadName = 'AK5PF',
##CaloPayloadName = 'AK5Calo',
jecUncSrc = 'Winter14_V8_DATA_UncertaintySources_AK5PF.txt',
##calojetID = 'ak5JetID',
##calojetExtender = 'ak5JetExtender',
## Add JEC 53X Winter ##############################
jecL1FastFile = cms.string('Winter14_V8_DATA_L1FastJet_AK5PF.txt'),
jecL2RelativeFile = cms.string('Winter14_V8_DATA_L2Relative_AK5PF.txt'),
jecL3AbsoluteFile = cms.string('Winter14_V8_DATA_L3Absolute_AK5PF.txt'),
jecL2L3ResidualFile = cms.string('Winter14_V8_DATA_L2L3Residual_AK5PF.txt'),
##pfjecService = 'ak5PFL1FastL2L3Residual',
##calojecService = 'ak5CaloL1L2L3Residual',
printTriggerMenu = False
)
############# hlt filter #########################
process.hltFilter = cms.EDFilter('HLTHighLevel',
TriggerResultsTag = cms.InputTag('TriggerResults','','HLT'),
HLTPaths = cms.vstring('HLT_L1SingleJet16_v*',
'HLT_L1SingleJet36_v*',
'HLT_SingleForJet15_v*',
'HLT_SingleForJet25_v*',
'HLT_ZeroBias_v*',
'HLT_ZeroBiasPixel_DoubleTrack_v*',
'HLT_PFJet40_v*',
'HLT_PFJet80_v*',
'HLT_PFJet140_v*',
'HLT_PFJet200_v*',
'HLT_PFJet260_v*',
'HLT_PFJet320_v*',
'HLT_PFJet400_v*'
),
eventSetupPathsKey = cms.string(''),
andOr = cms.bool(True), #----- True = OR, False = AND between the HLTPaths
throw = cms.bool(False)
)
process.path = cms.Path(process.hltFilter * process.HBHENoiseFilterResultProducer * process.ak5 * process.ak7)
|
#!/usr/bin/env python3
import re
def word_frequencies(filename="src/alice.txt"):
d = {}
with open(filename, "r") as f:
for row in f:
r = list(row.split())
for w in r:
w = w.strip("""!"#$%&'()*,-./:;?@[]_""")
if w in d.keys():
d[w] = d[w] + 1
else:
d[w] = 1
return d
def main():
word_frequencies()
if __name__ == "__main__":
main()
|
import operator as op
print(op.add(4, 5))
print(op.mul(4, 5))
print(op.contains([1, 2, 3], 4)) # 4 in [1, 2, 3]
x = [1, 2, 3]
#x ={"123": 3}
f = op.itemgetter(2) # f(x) == x[2]
#f = op.itemgetter("123") # f(x) == x["123"]
print(f(x)) |
import math
from typing import NamedTuple
from shared.vector import Vec2
class Ray(NamedTuple):
origin: Vec2
direction: Vec2
class LineSegment(NamedTuple):
a: Vec2
b: Vec2
# https://stackoverflow.com/questions/563198/how-do-you-detect-where-two-line-segments-intersect/
def get_intersection(ray: Ray, line_segment: LineSegment):
p = ray.origin
r = ray.direction
q = line_segment.a
s = line_segment.b - line_segment.a
num = (q - p).cross(s)
denom = r.cross(s)
if denom == 0 and num == 0:
t0 = (q - p).dot(r)/r.dot(r)
t1 = t0 + s.dot(r)/r.dot(r)
if t0 < 0 and t1 < 0:
return None
if t0 < 0:
return t1
return t0
if denom == 0 and num != 0:
return None
t = num / denom
if t < 0:
return None
u = (p - q).cross(r) / s.cross(r)
if 0 <= u <= 1:
return p + t*r
return None
t_ray = Ray(Vec2(0, 0,), Vec2(1, 0))
segment = LineSegment(Vec2(-1, 0), (Vec2(-0.01, 0)))
print(get_intersection(t_ray, segment))
|
from os import getcwd
from sys import path
path.insert(1,getcwd()+"\\Library\\")
import Library
vloop = "Y"
vask = "N"
while (vloop=="Y") or (vloop=="y"):
Library.main.main()
while (vask=="N"):
vloop= input ("\nDo you want to try another word? (Y/N) ")
if (vloop=="Y") or (vloop=="N") or (vloop=="y") or (vloop=="n"):
vask="Y"
else:
print ("Incorrect option")
vask="N"
vask = "N" |
#CSCI 1133 Homework 2
#Sid Lin
#Problem 2A
#fibonacci function
def newFib(first, second, term):
count = 0 # number of terms
while (count < term - 2): # -2 because the first 2 are printed in main()
next = first + second #previous terms added
print(next, end = " ")
first = second
second = next
count += 1 #adds a count to the loop
def main():
first = int(input("Enter the first term of the series:"))
second = int(input("Enter the second term of the series:"))
term = int(input("Enter the number of terms you want to see:"))
print(first, second, end = " ")
newFib(first, second, term)
if __name__ == "__main__":
main()
|
import os
import subprocess
import re
import argparse
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
y_names = ['toxic', 'severe_toxic', 'obscene',
'threat', 'insult', 'identity_hate']
upsample_id = 'xxxxxxxxxxxxxxxx'
def upsample():
''' upsample rare classes
- severe_toxic: x10
- obscene: x2
- threat: x30
- insult: x2
- identity_hate: x10
'''
def gen_comment(comments):
new_comment = ''
# 10 to 20 sentences
n_sentences = np.random.randint(5, 11)
for _ in range(n_sentences):
comment = np.random.choice(comments, 1)[0]
sentences = re.split('[.!?;\n]', comment)
if len(sentences) == 0:
continue
# pick 2 consecutive sentences from first 10
idx = np.random.randint(0, min(10, len(sentences)))
new_comment += sentences[idx]
if (idx+1) < len(sentences):
new_comment += sentences[idx+1]
new_comment += '. '
return new_comment
def upsample1(df, col, ratio):
print('upsamping', col)
cnt = df[col].sum() * (ratio-1)
df_new = pd.DataFrame(index=np.arange(cnt), columns=df.columns)
for i in range(cnt):
comments = df[df[col]==1]['comment_text'].values
comment = gen_comment(comments)
df_new.loc[i] = [upsample_id, comment, 0, 0, 0, 0, 0, 0]
df_new[col] = 1
return df.append(df_new, ignore_index=True, verify_integrity=True)
df = pd.read_csv('dataset/train.csv')
df = upsample1(df, 'severe_toxic', 10)
df = upsample1(df, 'obscene', 2)
df = upsample1(df, 'threat', 30)
df = upsample1(df, 'insult', 2)
df = upsample1(df, 'identity_hate', 10)
df.to_csv('dataset/train-upsample.csv', index=False)
def capital_ratio(s):
small_cnt = len(re.findall(r'[a-z]', s))
big_cnt = len(re.findall(r'[A-Z]', s))
alpha_cnt = small_cnt + big_cnt
if alpha_cnt == 0:
return 0.0
return (1.0 * big_cnt) / alpha_cnt
def gen_X(df):
# columns: comment_len, bang_cnt, capital_ratio
X = np.empty((df.shape[0], 3), dtype=np.float32)
X[:, 0] = df['comment_text'].apply(lambda x: min(len(x.split()), 200))
X[:, 1] = df['comment_text'].apply(lambda x: min(x.count('!'), 10))
X[:, 2] = df['comment_text'].apply(capital_ratio)
return X
def get_split_indices(num, split_cnt):
indices = np.arange(num)
np.random.shuffle(indices)
indices_lst = []
if num > split_cnt:
per_len = num // split_cnt
else:
per_len = 1
s = 0
for _ in range(split_cnt-1):
indices_lst.append(indices[s:s+per_len])
s += per_len
indices_lst.append(indices[s:])
return indices_lst
def do_split(df, split_cnt):
df_indices = [[] for _ in range(split_cnt)]
for x in range(1<<len(y_names)):
dfx = df[df['xxx'] == x]
indices_lst = get_split_indices(dfx.shape[0], split_cnt)
for i, indices in enumerate(indices_lst):
df_indices[i] += dfx.index[indices].tolist()
return df_indices
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, metavar='N', default=19)
parser.add_argument('--splits', type=int, metavar='N', default=5)
parser.add_argument('--upsample', action='store_true')
parser.add_argument('--force', action='store_true')
args = parser.parse_args()
np.random.seed(args.seed)
train_csv = 'dataset/train.csv'
if args.upsample:
train_csv = 'dataset/train-upsample.csv'
if os.path.exists(train_csv):
print('Upsampled dataset exists: %s' % (train_csv))
else:
print('Upsampling train dataset...')
upsample()
f = 'dataset/text-embedding.npz'
if not args.force and os.path.exists(f):
print('Text embedding exists: %s' % (f))
else:
print('Generating text embedding: %s' % (f))
rc = subprocess.call(['python', './text-embedding.py', train_csv])
assert(rc == 0)
f = 'dataset/train-split.npz'
print('Splitting train and validation sets: %s' % (f))
df = pd.read_csv(train_csv)
# one hot to integer
df['xxx'] = 0
for col in y_names:
df['xxx'] *= 2
df['xxx'] += df[col]
# split
indices = do_split(df, args.splits)
np.savez(f, indices=indices, seed=args.seed)
sc = MinMaxScaler(copy=False)
print('Generating train file: dataset/train.npz')
df = pd.read_csv(train_csv)
X = gen_X(df)
y = df[y_names].values.astype(np.float32)
sc.fit_transform(X)
np.savez('dataset/train.npz', X=X, y=y)
print('Generating test file: dataset/test.npz')
df = pd.read_csv('dataset/test.csv')
X = gen_X(df)
id = df['id']
sc.transform(X)
np.savez('dataset/test.npz', X=X, id=id)
|
#can sort string with sorted()
from collections import defaultdict
def isPermutation1(str1, str2):
if len(str1) != len(str2):
return false
elif str1 == "" and str2 == "":
return True;
sorted1 = sorted(str1)
sorted2 = sorted(str2)
if sorted1 == sorted2:
return True
else: return False
def isPermutation2(str1,str2):
ledger1 = {}
ledger2 = {}
if len(str1) != len(str2): return False
for char in str1:
ledger1[char] += 1
for char in str2:
ledger2[char] += 1
print(ledger1)
print(isPermutation1("Cat", "tac"))
print(isPermutation2("Cat", "tac")) |
from mysql import connector
import mysql.connector.errors as CE
from urllib2 import urlopen
from multiprocessing import pool
import json
from time import sleep
from pprint import pprint
#db connection object
conn = connector.connect(host='localhost',user='root',passwd='root',db='cubito')
cursor = conn.cursor()
def getDetail(trip_id,current_state):
location = []
temp=[]
while (current_state=="RUNNING"):
trip_detail = json.load(urlopen("http://cubito.co.in/assignment/gpslocation.php?trip_id="+trip_id))
print trip_detail["status"]+":"+trip_detail["trip_id"]
#location.append([str(trip_detail["trip_id"])+","+str(++trip_detail["location"]["latitude"])+","+str(trip_detail["location"]["longitude"])])
#del temp[0:3]
temp.append(str("TRIP : "+str(trip_detail["trip_id"])))
temp.append(str(float(trip_detail["location"]["latitude"])))
temp.append(str(float(trip_detail["location"]["longitude"])))
#print temp
location.append(list(temp))
del temp[:]
#pprint(location)
sleep(10)
#Fail Safe
if trip_detail["status"] == "COMPLETED":
current_state="COMPLETED"
json_loc= json.dumps(location)
break
return json_loc
#get trip ids
def getTripData():
flag=1
for id in range(2):
trip_id = json.load(urlopen("http://cubito.co.in/assignment/gpslocation.php"))
current_state = "RUNNING"
print str(trip_id["trip_id"])+"START"
dummy = json.load(urlopen("http://cubito.co.in/assignment/gpslocation.php?trip_id="+trip_id["trip_id"]))
current_state = dummy["status"]
location = getDetail(trip_id["trip_id"],current_state)
print location
try:
cursor.execute("insert into trips values('"+trip_id["trip_id"]+"','"+location+"')")
conn.commit()
except (CE.Error ,Exception) as e:
print e
print str(trip_id["trip_id"])+"STOP"
getTripData()
|
import brownie
import pytest
from brownie import Wei
DEADLINE = 9999999999999
ZERO_ADDRESS = "0x0000000000000000000000000000000000000000"
@pytest.fixture()
def liquid_lgt(lgt, accounts):
lgt.mint(50, {'from': accounts[0]})
lgt.addLiquidity(1, 51, 99999999999, {'from': accounts[0], 'value': "0.05 ether"})
lgt.mint(80, {'from': accounts[1]})
lgt.addLiquidity(1, 50, 99999999999, {'from': accounts[1], 'value': "0.049 ether"})
yield lgt
def test_transfer_liquidity(liquid_lgt, accounts):
sender, recipient = accounts[:2]
sender_initial_liquidity = liquid_lgt.poolBalanceOf(sender)
recipient_initial_liquidity = liquid_lgt.poolBalanceOf(recipient)
transfer_amount = Wei("0.01 ether")
tx = liquid_lgt.poolTransfer(recipient, transfer_amount, {'from': sender})
assert tx.return_value
event = tx.events['TransferLiquidity']
assert event["from"] == sender
assert event["to"] == recipient
assert event["value"] == transfer_amount
assert sender_initial_liquidity - transfer_amount == liquid_lgt.poolBalanceOf(sender)
assert recipient_initial_liquidity + transfer_amount == liquid_lgt.poolBalanceOf(recipient)
def test_transfer_liquidity_insufficient_reverts(liquid_lgt, accounts):
sender, recipient = accounts[:2]
sender_initial_liquidity = liquid_lgt.poolBalanceOf(sender)
recipient_initial_liquidity = liquid_lgt.poolBalanceOf(recipient)
transfer_amount = Wei("1 ether")
with brownie.reverts("LGT: transfer exceeds balance"):
liquid_lgt.poolTransfer(recipient, transfer_amount, {'from': sender})
assert sender_initial_liquidity == liquid_lgt.poolBalanceOf(sender)
assert recipient_initial_liquidity == liquid_lgt.poolBalanceOf(recipient)
def test_transfer_liquidity_self_reverts(liquid_lgt, accounts):
sender = accounts[0]
sender_initial_liquidity = liquid_lgt.poolBalanceOf(sender)
recipient_initial_liquidity = liquid_lgt.poolBalanceOf(liquid_lgt)
transfer_amount = Wei("0.001 ether")
with brownie.reverts("dev: can't transfer liquidity to token contract"):
liquid_lgt.poolTransfer(liquid_lgt, transfer_amount, {'from': sender})
assert sender_initial_liquidity == liquid_lgt.poolBalanceOf(sender)
assert recipient_initial_liquidity == liquid_lgt.poolBalanceOf(liquid_lgt)
def test_transfer_liquidity_zero_reverts(liquid_lgt, accounts):
sender = accounts[0]
recipient = ZERO_ADDRESS
sender_initial_liquidity = liquid_lgt.poolBalanceOf(sender)
recipient_initial_liquidity = liquid_lgt.poolBalanceOf(ZERO_ADDRESS)
transfer_amount = Wei("0.005 ether")
with brownie.reverts("dev: can't transfer liquidity to zero address"):
liquid_lgt.poolTransfer(recipient, transfer_amount, {'from': sender})
assert sender_initial_liquidity == liquid_lgt.poolBalanceOf(sender)
assert recipient_initial_liquidity == liquid_lgt.poolBalanceOf(recipient)
|
import random as r
import os, sys, time, threading, multiprocessing
numberOfCores=multiprocessing.cpu_count()
def task(cmd):
w=r.randint(2,5)
time.sleep(w)
return
# Run Multiple Thread
for i in range(16):
cmd=str(i+1)
msg="...Thread %s start...."%(cmd)
print(msg)
t = threading.Thread(target=task , args=(cmd,))
t.start()
while True:
print(threading.activeCount())
if threading.activeCount()-1 <= 6:
break
time.sleep(1)
# Waiting to finish the thread
while True:
if threading.activeCount() == 1:
break
time.sleep(1)
print ("Thread Left ... ",threading.activeCount() - 2)
print("\n...All Thread ends....") |
{
PDBConst.Name: "bill",
PDBConst.Columns: [
{
PDBConst.Name: "ID",
PDBConst.Attributes: ["int", "not null", "auto_increment", "primary key"]
},
{
PDBConst.Name: "PID",
PDBConst.Attributes: ["int", "not null"]
},
{
PDBConst.Name: "Datetime",
PDBConst.Attributes: ["datetime", "not null"]
},
{
PDBConst.Name: "Amount",
PDBConst.Attributes: ["double(12,2)", "not null"]
},
{
PDBConst.Name: "Currency",
PDBConst.Attributes: ["tinyint", "not null", "default 1"]
},
{
PDBConst.Name: "Category",
PDBConst.Attributes: ["tinyint"]
},
{
PDBConst.Name: "PaymentMode",
PDBConst.Attributes: ["tinyint"]
},
{
PDBConst.Name: "Note",
PDBConst.Attributes: ["varchar(255)"]
}]
}
|
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
from presentNN import genPic,save2Pic
import math
BASE_DIR_PIC = './picDir/'
BASE = 0
MAX = 6000
STEP = 1000
THETA1 = 0
THETA2 = 10*2*math.pi/360
save2Pic(BASE_DIR_PIC,BASE,MAX,STEP,THETA1)
save2Pic(BASE_DIR_PIC,BASE,MAX,STEP,THETA2) |
from bs4 import BeautifulSoup
import requests
import re
## Time Pattern
TIME = re.compile('(\d{1,2}):(\d\d)\s*([AaPp]\.?\s*[Mm]\.?)?')
TIME_DAYS = re.compile('(\d{1,2}):(\d\d)\s+([A-Za-z]+)')
## Normalization for day
def norm_days(days: str) -> int:
DAYS = [('M', 0), ('TU', 1), ('W', 2), ('TH', 3), ('F', 4)]
days = days.upper()
b = ['0'] * 5
for d, i in DAYS:
if d in days:
b[i] = '1'
days = days.replace(d, '')
if 'T' in days:
b[1] = '1'
days = days.replace('T', '')
return int(''.join(b), 2)
## Base for extract exam schedule
from typing import Dict, Tuple
from typing import Optional
def norm_time(hour: str, minute: str, period: Optional[str] = None) -> int:
hr = int(hour)
mi = int(minute)
if period:
if period[0].upper() == 'P':
hr += 12
return hr * 100 + mi
def extract_exam_schedule(url) -> Dict[Tuple[int, int], Tuple[str, str, str]]:
r = requests.get(url)
html = BeautifulSoup(r.text, 'html.parser')
tbody = html.find('tbody')
schedule = {}
for tr in tbody.find_all('tr'):
tds = tr.find_all('td')
class_time = tds[0].string.strip()
m = TIME_DAYS.match(class_time)
if m:
time = norm_time(int(m.group(1)), int(m.group(2)))
days = norm_days(m.group(3))
key = (time, days)
exam_day = tds[1].string.strip()
exam_date = tds[2].string.strip()
exam_time = tds[3].string.strip()
schedule[key] = (exam_day, exam_date, exam_time)
return schedule
## Base for extract class schedule
def extract_class_schedule(url) -> Dict[int, Tuple[str, str, str, str, int, str, str, str]]:
r = requests.get(url)
html = BeautifulSoup(r.text, 'html.parser')
schedule = {}
for tr in html.find_all('tr'):
td1 = tr.find_all('td')
if len(td1) < 10: continue
program = td1[0].string.strip()
number = td1[1].string.strip()
section = td1[2].string.strip()
title = td1[5].text.strip()
opus = int(td1[6].string)
td2 = td1[9].find_all('td')
days = td2[0].string
if days is None:
continue
else:
days = days.strip()
time = td2[1].text.strip()
instructor = td2[3].string
schedule[opus] = (program, number, section, title, opus, days, time, instructor)
return schedule
## Function for get exam schedule
def get_exam_schedule(opus: int, exam_schedule: Dict[Tuple[int, int], Tuple[str, str, str]], class_schedule: Dict[int, Tuple[str, str, str, str, int, str, str, str]]) -> Tuple[str, str, str]:
s = class_schedule.get(opus, None)
if s is None: return None
days = norm_days(s[5])
m = TIME.match(s[6])
time = norm_time(m.group(1), m.group(2))
key = (time, days)
return exam_schedule.get(key, None)
ID = re.compile('([A-Za-z]+)(\d\d\d[A-Za-z]*)-([A-Za-z]*\d*)')
def get_class_opus_id(coursename: str, class_schedule: Dict[int, Tuple[str, str, str, str, int, str, str, str]]) -> int:
m = ID.match(coursename)
program = m.group(1)
number = m.group(2)
section = m.group(3)
ans = [opus for opus, classes in class_schedule.items() if classes[0] == program and classes[1] == number and classes[2] == section][0]
return ans
def print_exam_schedule(course_id: str):
"""
Prints the exam schedules of the input courses.
:param course_id: `<program><number>-<section>` (e.g., `QTM385-1`)
"""
url_exam = 'http://registrar.emory.edu/faculty-staff/exam-schedule/spring-2019.html'
url_class = 'http://atlas.college.emory.edu/class-schedules/spring-2019.php'
exam_schedule = extract_exam_schedule(url_exam)
class_schedule = extract_class_schedule(url_class)
course_opus = get_class_opus_id(course_id, class_schedule)
exam_opus_schedule = get_exam_schedule(course_opus, exam_schedule, class_schedule)
return ', '.join(list(exam_opus_schedule))
def lambda_handler(event, context):
course = event['course']
schedule = print_exam_schedule(course)
return schedule |
import click
import pkgutil
import shutil
import os.path
from datetime import datetime
from slackviewer.constants import SLACKVIEWER_TEMP_PATH
from slackviewer.utils.click import envvar, flag_ennvar
from slackviewer.reader import Reader
from slackviewer.archive import get_export_info
from jinja2 import Environment, PackageLoader
@click.group()
def cli():
pass
@cli.command(help="Cleans up any temporary files (including cached output by slack-export-viewer)")
@click.option("--wet", "-w", is_flag=True,
default=flag_ennvar("SEV_CLEAN_WET"),
help="Actually performs file deletion")
def clean(wet):
if wet:
if os.path.exists(SLACKVIEWER_TEMP_PATH):
print("Removing {}...".format(SLACKVIEWER_TEMP_PATH))
shutil.rmtree(SLACKVIEWER_TEMP_PATH)
else:
print("Nothing to remove! {} does not exist.".format(SLACKVIEWER_TEMP_PATH))
else:
print("Run with -w to remove {}".format(SLACKVIEWER_TEMP_PATH))
@cli.command(help="Generates a single-file printable export for an archive file or directory")
@click.argument('archive_dir')
def export(archive_dir):
css = pkgutil.get_data('slackviewer', 'static/viewer.css').decode('utf-8')
tmpl = Environment(loader=PackageLoader('slackviewer')).get_template("export_single.html")
export_file_info = get_export_info(archive_dir)
r = Reader(export_file_info["readable_path"])
channel_list = sorted(
[{"channel_name": k, "messages": v} for (k, v) in r.compile_channels().items()],
key=lambda d: d["channel_name"]
)
html = tmpl.render(
css=css,
generated_on=datetime.now(),
workspace_name=export_file_info["workspace_name"],
source_file=export_file_info["basename"],
channels=channel_list
)
outfile = open(export_file_info["stripped_name"] + '.html', 'w')
outfile.write(html.encode('utf-8'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.