text
stringlengths 8
6.05M
|
|---|
__author__ = 'chenjensen'
from BeautifulSoup import BeautifulSoup
from PageGetter import PageGetter
class NextCrawer:
def __int__(self):
herfInfoList = []
nameInfoList = []
describeInfoList = []
getter = PageGetter('http://next.36kr.com/posts')
page = getter.getPage()
soup = BeautifulSoup(page)
newProduct = soup.find(attrs={'class':'post'})
productInfo=self.newProduct.findAll(attrs={'class':'post-url'})
def getNameInfo(self):
for info in self.productInfo:
self.nameInfoList.append(info.text)
return self.nameInfoList
def getUrlInfo(self):
for info in self.productInfo:
self.herfInfoList.append(info['href'])
return self.herfInfoList
def getDescribe(self):
productDescirbe=self.newProduct.findAll(attrs={'class':'post-tagline'})
for info in productDescirbe:
self.DescribeInfoList.append(info.text)
|
a,b,c=input().split()
a=a[int(c):]
print(a[int(b)-1])
|
'''
Single neuron with Numpy dot product
'''
import numpy as np
inputs = [1.0, 2.0, 3.0, 2.5]
weights = [0.2, 0.8, -0.5, 1.0]
bias = 2.0
output = np.dot(weights, inputs) + bias
print(output)
|
# Generated by Django 2.0.7 on 2018-08-09 09:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twitterapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='uefa',
name='Teams',
field=models.CharField(max_length=64),
),
]
|
import os
import sys
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def get_path(main_folder = 'files', a = '', b = ''):
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', main_folder, a, b))
def make_directory(main_folder = 'files'):
"""Modificar descargas con path completo"""
os.makedirs(get_path(main_folder = main_folder, a = 'PML', b='MDA'))
os.makedirs(get_path(main_folder = main_folder, a = 'PML', b = 'MTR'))
os.makedirs(get_path(main_folder = main_folder, a = 'PND', b = 'MDA'))
os.makedirs(get_path(main_folder = main_folder, a = 'PND', b = 'MTR'))
os.makedirs(get_path(main_folder = main_folder, a = 'generation', b = 'real'))
os.makedirs(get_path(main_folder = main_folder, a = 'generation', b = 'forecast'))
os.makedirs(get_path(main_folder = main_folder, a = 'consumption', b = 'real'))
os.makedirs(get_path(main_folder = main_folder, a = 'consumption', b = 'forecast'))
os.makedirs(get_path(main_folder = main_folder, a = 'descargas'))
def wait_download(directorio,file_number, download_folder):
"""Iterates while a file is being downloaded in order to download just one file at a time. To do this a file.part is searched in the download_folder directory, when it disappears, the download has finished. Does not return anything."""
while directorio == os.listdir(download_folder):
# Waiting for the download to begin
pass
time.sleep(1)
print(f'File {file_number}.', end = '')
wait = True
# Looking for a .part file in download_folder directory
while wait:
wait = False
for file in os.listdir(download_folder):
if ".part" in file:
time.sleep(0.5)
wait = True
print('.', end = '')
sys.stdout.flush()
print('Done')
def open_browser(download_folder):
"""Function description..."""
profile = webdriver.FirefoxProfile()
# Do not use default download folder
profile.set_preference("browser.download.folderList", 2)
# Use selected download folder
profile.set_preference("browser.download.dir", download_folder)
# Do not show download popup for selected mime-type files
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/octet-stream, application/zip")
print('Opening Browser.')
driver = webdriver.Firefox(firefox_profile=profile)
return driver
def download_by_xpath(driver, folder_path, xpath):
""""""
# Find element
download_button = WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.XPATH, xpath)))
# Get before-download directory content
directory = os.listdir(folder_path)
# Click button and begin download
download_button.click()
return directory
def postgres_password(file_path = 'psql_password.txt'):
with open(file_path, 'r') as file:
params = {
'host':file.readline()[:-1],
'user':file.readline()[:-1],
'password':file.readline()[:-1],
'port':int(file.readline())
}
return params
def textbox_fill(driver, xpath, date_string, attribute):
textbox = WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.XPATH, xpath)))
textbox.send_keys(date_string)
textbox.send_keys(Keys.TAB)
time.sleep(0.1)
return textbox.get_attribute(attribute)
def get_folder(subfolder_1 = '', subfolder_2 = ''):
"""This function returns folder,files wher folder is the folder to look for files in the selected system and data, files is a list with the name of all the files available"""
folder = get_path(a = subfolder_1, b = subfolder_2)
# folder = f'{folder_frame}\\{subfolder_1}'
# if subfolder_2:
# folder = f'{folder_frame}\\{subfolder_1}\\{subfolder_2}'
files = os.listdir(folder)
return folder,files
def upload_file_to_database(folder, cursor, table_name, sep = ','):
files = get_files_names(folder, table_name)
table = table_name
for file in files:
print(f"Uploading {file} to table {table}...", end='')
sys.stdout.flush()
file_path = f"{folder}\\{file}"
# print(file_path)
with open(file_path, 'rb') as f:
cursor.copy_from(f, table.lower(), sep=sep)
print('Done')
def get_files_names(folder, string):
"""This function returns folder,files wher folder is the folder to look for files in the selected system and data, files is a list with the name of all the files available"""
files_list = os.listdir(folder)
files = [file for file in files_list if string in file]
return files
def delete_files(folder, subfolder=''):
# folder = f'{folder}\\{subfolder}'
print(f'Deleting {folder}')
files = files_list = os.listdir(folder)
for file in files:
os.remove(f'{folder}\\{file}')
def get_download_file_name(file_name = 'dashboard_energia_mexico_datos'):
folder = get_path(a = 'descargas')
files = os.listdir(folder)
i = 1
keep = True
while keep:
keep = False
for file in files:
if file_name in file:
if f'({i-1})' in file_name:
file_name = file_name.replace(f"({i-1})", '')
file_name += f'({i})'
i += 1
keep = True
break
return file_name + '.csv'
if __name__ == '__main__':
pass
# print(postgres_password())
|
print("Hola")
print("Cómo te llamas?")
myName=input()
print("Qué tal " + myName + "?")
print("Tu nombre tiene " + str(len(myName)) + " letras")
print('Cuál es tu edad?')
#print(int('99.9')) #Not possible to print
yourAge=int(input())
#yourAge=int(yourAge) #can't evaluate value as an integer, better try yourAge=int(input())
#print(int(1.99)) #Te deja imprimir números flotantes
print("Tienes "+ str(yourAge) +" años " + myName) #cambia a str solo para el print
print("El siguiente año tendrás "+ str(yourAge + 1) + " años")
if yourAge < 4:
print("Felicidades, eres una Chiquisaurio")
elif myName=="Edwin":
print("Es usted un agradable sujeto")
elif yourAge> 18:
print("Eres una Nathisaurio")
|
from sdc.crypto.key_store import KeyStore
TEST_DO_NOT_USE_SR_PRIVATE_PEM = """-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAt8LZnIhuOdL/BC029GOaJkVUAqgp2PcmbFr2Qwhf/514DUUQ
9sKJ1rvwvbmmW2zE8JRtdY3ey0RXGtMn5UZHs8NReHzMxvsmHN4VuaGEnFmPwO82
1Tkvg0LpKsLkotcw793FD/fut44N2lhpTSW2Sc82uG0p9A+Kud8HCIaWaluosghk
9rbMGYDzZQk8cA91GtKJRmIOED4PorB/dexDf37qhuWNQgzyNyTti1DTDUIWyzQQ
Jp926vLbkOip6Fc2R13hOFNETe68Rrw/h3hXEFS17uPFZHsxvm9PFXX9KZMS25oh
qbNh97I94LL4o4wybl6LaE6lJEHiD6docD0B6wIDAQABAoIBADRQGyUtzbtWbtTg
jlF6fWrBt83d07P5gA2+w3aHztR1HrUgYVjuPtFLxZgtXseOKm6WwqaBkdhqByYS
0Lu7izQIuYvEc4N+Afab3rFu4tKjyIHTV9fRpM4IYVqUCwS0oDDZAH2wRlwo65aq
LqgQwVk3zUspgJUDS6nobRcnQXDbVaQ54JU0zSXrFJqZygrUR5TDuPnE7Ehbb9Ru
L1YNkxn2wVT9iOHdyaxr9co7x1z01hHCgdf3SUyGTCOCqp9rJYXtm+GPpZMRpwv7
CdsMfDxpkNKC2X/hBHz5ux9sC8kRA/JcTKGvbKbPVpedWyIYwKjJ8H1A0zuSQX9Q
rZU1a0kCgYEA3EyNsBwtllLSzA2sb1yf6qc+0mvXcPPrTt7WWG7bUI1/43Nx2rMW
XxRGdJlOdA9GnxIK2Ir75pPXj6VVL2U2CCh87td5bnVDr8DMA8nj7efpjMpAUEtU
QX/qKHtzkr3nRjLLkrL9IhQ6m9rNVtyKqWLTnBv6Uflq2UlYHh2xBi0CgYEA1Yp3
DycqKDkkGHi9wuZeyAhQDsKHyVEukS5bflVjnkbkdX7Z4efMzKdFknNk6O/YtgYh
Ti/XheojCkCVMSEJ3kndsotIsEf1kXYIvfSSBPO0J8GWma7meGbUn61Tq8Kj10LI
8k6KsXiT67+r79wOYcRclIBGNm3nR4rMMpKAj3cCgYAB6oCI+ZXD6vB+adgIF+wk
JFQ9jEaRau2u/+0pU72Ak9p65fQljM0zAoAiX3r5M3DPzV5ex8atGLgVPcDh6qVv
qLp9cU5TEZ4HF0wu9ECRPyUe3lt011LiRvSIaZp1ukUarTJsEjZ1Z2ujE2IZ0U07
b+qbPvsMX3j4btTfXi69+QKBgFZvAHgKsz6quliJbs3X719qNfVzegDbskyjhfch
2vuy2EBSwyB0ceoYfsmjmaHLi11KJ+r85HDY76vzri+/nr3yCiF9zUNFLTnem/U/
bGdCuZYp/qpgJ/tuK/wh7S8lzqmP58RkVDE3jDAtWgvxd4TNNWgKb+ESJT5JCRQj
RpRLAoGALFlPzTd/ifWCcV0Pn5U/ESzX1az2kfll30VxjfGVFsRJ4ZvezxgI+vwo
OZrki4MBTK/6GFkHLFkF6w2Le+Y5Nos9O2UUZs45lwLEYbQ4yKcx2KlWGLZOypB8
i7/6TB95Ej2i5KgaSlcJjOyOx7g20TwDD1THtLXgY54d0Yr9T/U=
-----END RSA PRIVATE KEY-----
"""
TEST_DO_NOT_USE_UPSTREAM_PUBLIC_PEM = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvZzMraB96Wd1zfHS3vW3
z//Nkqz+9HfwViNje2Y5L6m3K/7raA0kUsWD1f6X7/LIJfkCEctCEj9q19+cX30h
0pi6IOu92MlIwdH/L6CTuzYnG4PACKT8FZonLw0NYBqh8p4vWS8xtNHNjTWua/FF
TlxdtYnEb9HbUZkg7dXAtnikozlE/ZZSponq7K00h3Uh9goxQIavcK1QI8pw5V+T
8V8Ue7k98W8LpbYQWm7FPOZayu1EoJWUZefdOlYAdeVbDS4tjrVF+3za+VX3q73z
JEfyLEM0zKrkQQ796gfYpkzDYwJvkiW7fb2Yh1teNHpFR5tozzMwUxkREl/TQ4U1
kwIDAQAB
-----END PUBLIC KEY-----"""
VALID_SIGNED_JWT = "eyJraWQiOiI3MDllYjQyY2ZlZTU1NzAwNThjZTA3MTFmNzMwYmZiYjdkNGM4YWRlIiwiYWxnIjoiUlMyNTYiLCJ0eXAiOiJqd3" \
"QifQ.eyJ1c2VyIjoiamltbXkiLCJpYXQiOjE0OTgxMzc1MTkuMTM1NDc5LCJleHAiOjEuMDAwMDAwMDAwMDAxNDk4MmUrMjF9.tXGcIZf" \
"bTIgxrd7ILj_XqcoiRLtmgjnJ0WORPBJ4M9Kd3zKTBkoIM6pN5XWdqsfvdby53mxQzi3_DZS4Ab4XvF29Wce49GVv7k69ZZJ-5g2NX9iJ" \
"y4_Be8uTZNKSwMpfrnkRrsbaWAGrXe9NKC3WC_Iq4UuE3KM7ltvOae4be-2863DP7_QEUtaAtXSwUkjPcgkvMPns-SurtFNXgFFVToNnw" \
"IuJ9UWsY8JlX1UB56wfqu68hbl88lenIf9Ym0r5hq0DlOZYNtjVizVDFciRx_52d4oeKMSzwJ1jB5aZ7YKRNHTo38Kltb5FkHRcIkV1Ae" \
"68-5dZeE9Yu_JHPMi_hw"
VALID_JWE = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00iLCJraWQiOiJlMTkwOTEwNzJmOTIwY2JmM2NhOWY0MzZjZWJhMzA5ZTdkODE0Y" \
"TYyIn0.SZG8UMNXYGnjppgGf1ok_O93Z_5qzKEZmn35pbStiDzAYdrUgg4Aa04B6ivDzPaZu-ROYTRw8UsroK8OEkySMDuHw0s63Z2AOZ" \
"K6qviFfobxQFGnndEro9HrDTYMM9dpOt-_uFO0Ezuxyo7dkvRnAnRv4wy7Tqwu0CXtHcv5wzeUlGzh2OGR9nNK_6_2eRF8Lu3wuV5INa2" \
"VSppU3xeQZQsuc1e-XoHi_fNzr8Lckmv9Cl5Z19BeC5DPhQb1IK8rRKyxIU8h65yoDEGfsD0Mf62wvdTFOldQ_gwCjSw3Piez_V2g9FUv" \
"entQKVH28_pqBAZrUBj-Ma9FfNuWrJJo-w.1fsxK2D0kHa5RXW8.xO6V9QtVbKkBd9n75Bs0MugZ85oXVSqiKqwXEOc-_BqM0_1LtBbx9" \
"Q6hsvwZ84f3vakIy4AiFPKhEY_ofbokEqMnFPEg0s2U7oux-vZcNU5Db4F_TO_3bMEetyUoPiOJeJztTI-an2A4oQjSB0rniXaaAI3buD" \
"D43CvfS-SBuWHDQ6CD7ntca2hWzcO8YpnZsSKJad9FquHW_VpOj1nXnNh73q_qHXuB6USF5l3IPndep0KRwj8fUQTF9l358uWChJ2VtLK" \
"_gvw_H7PSMdgHzpj1o4Nv22boVhnhtG7ns-tP53Lec01C_qAbRGnQ30eHZsbdpnAeIrOl9_2p_rjOO6ua5K5tnD2fQp1_8MXf1Ezbr1pc" \
"p_gfk4eDJCxKblpn3Q22YtsF3qCtPS3Xz7izPz0UCK7EJy6yRU3UcLQ3YyTfCVRK1RJpgpyltCsABS6IRuw0OXmXHNy-GKB0w19hVeXU-" \
"gcY7FH9ldespOEnruTaOSWB7tcMoKyAgH3nZqZbx0NMJiAcXFJowWSzcLtrfUOZ5nU5hnXretpD0VD45mnze4TVfvt1lCY-EGMoWM1HmW" \
"YIdIo013famiRIrs2peofThYZ3aGq-WatXHuBT1SJO_CV8gT8ifOLJX0UqH1wwVKjgfxelwtNOFNDe7Hq0iu2p-skwsI8P_N87RiByCue" \
"Pw2HLVu4kzag21xtXnDz9rcPgeWiAS4ji9g.IM-8SjLJH-NFBLkg5EkAmg"
TOO_FEW_TOKENS_JWE = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00iLCJraWQiOiJlMTkwOTEwNzJmOTIwY2JmM2NhOWY0MzZjZWJhMzA5ZTdkODE0Y" \
"TYyIn0.SZG8UMNXYGnjppgGf1ok_O93Z_5qzKEZmn35pbStiDzAYdrUgg4Aa04B6ivDzPaZu-ROYTRw8UsroK8OEkySMDuHw0s63Z2AOZ" \
"K6qviFfobxQFGnndEro9HrDTYMM9dpOt-_uFO0Ezuxyo7dkvRnAnRv4wy7Tqwu0CXtHcv5wzeUlGzh2OGR9nNK_6_2eRF8Lu3wuV5INa2" \
"VSppU3xeQZQsuc1e-XoHi_fNzr8Lckmv9Cl5Z19BeC5DPhQb1IK8rRKyxIU8h65yoDEGfsD0Mf62wvdTFOldQ_gwCjSw3Piez_V2g9FUv" \
"entQKVH28_pqBAZrUBj-Ma9FfNuWrJJo-w.1fsxK2D0kHa5RXW8.xO6V9QtVbKkBd9n75Bs0MugZ85oXVSqiKqwXEOc-_BqM0_1LtBbx9" \
"Q6hsvwZ84f3vakIy4AiFPKhEY_ofbokEqMnFPEg0s2U7oux-vZcNU5Db4F_TO_3bMEetyUoPiOJeJztTI-an2A4oQjSB0rniXaaAI3buD" \
"D43CvfS-SBuWHDQ6CD7ntca2hWzcO8YpnZsSKJad9FquHW_VpOj1nXnNh73q_qHXuB6USF5l3IPndep0KRwj8fUQTF9l358uWChJ2VtLK" \
"_gvw_H7PSMdgHzpj1o4Nv22boVhnhtG7ns-tP53Lec01C_qAbRGnQ30eHZsbdpnAeIrOl9_2p_rjOO6ua5K5tnD2fQp1_8MXf1Ezbr1pc" \
"p_gfk4eDJCxKblpn3Q22YtsF3qCtPS3Xz7izPz0UCK7EJy6yRU3UcLQ3YyTfCVRK1RJpgpyltCsABS6IRuw0OXmXHNy-GKB0w19hVeXU-" \
"gcY7FH9ldespOEnruTaOSWB7tcMoKyAgH3nZqZbx0NMJiAcXFJowWSzcLtrfUOZ5nU5hnXretpD0VD45mnze4TVfvt1lCY-EGMoWM1HmW" \
"YIdIo013famiRIrs2peofThYZ3aGq-WatXHuBT1SJO_CV8gT8ifOLJX0UqH1wwVKjgfxelwtNOFNDe7Hq0iu2p-skwsI8P_N87RiByCue" \
"Pw2HLVu4kzag21xtXnDz9rcPgeWiAS4ji9g"
TEST_DO_NOT_USE_UPSTREAM_PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAvZzMraB96Wd1zfHS3vW3z//Nkqz+9HfwViNje2Y5L6m3K/7r
aA0kUsWD1f6X7/LIJfkCEctCEj9q19+cX30h0pi6IOu92MlIwdH/L6CTuzYnG4PA
CKT8FZonLw0NYBqh8p4vWS8xtNHNjTWua/FFTlxdtYnEb9HbUZkg7dXAtnikozlE
/ZZSponq7K00h3Uh9goxQIavcK1QI8pw5V+T8V8Ue7k98W8LpbYQWm7FPOZayu1E
oJWUZefdOlYAdeVbDS4tjrVF+3za+VX3q73zJEfyLEM0zKrkQQ796gfYpkzDYwJv
kiW7fb2Yh1teNHpFR5tozzMwUxkREl/TQ4U1kwIDAQABAoIBAHXiS1pTIpT/Dr24
b/rQV7RIfF2JkoUZIGHdZJcuqbUZVdlThrXNHd0cEWf0/i9fCNKa6o93iB9iMCIA
Uu8HFAUjkOyww/pIwiRGU9ofglltRIkVs0lskZE4os3c1oj+Zds6P4O6FLQvkBUP
394aRZV/VX9tJKTEmw8zHcbgEw0eBpiY/EMELcSmZYk7lhB80Y+idTrZcHoV4AZo
DhQwyF0R63mMphuOV4PwaCdCYZKgd/tr2uUHglLpYbQag3iEzoDfxdFcxnRkBdOi
a/wcNo0JRlMsxXmtJ+HrZar+6ObUx5SgLGz7dQnKvP/ZgenTk0yyohwikh2b2KOS
M3M2oUkCgYEA9+olFPDZxtM1fwmlXcymBtokbiki/BJQGJ1/5RMqvdsSeq8icl/i
Qk5AoNbWEcsAxeBftb1IfnxJsRthRyp0NX5HOSsBFiIfdSF225nmBpktwPjJmvZZ
G2MQCVqw9Y40Cia0LZnRo8417ahSfVf8/IoggnAwkswJ3fkktt/FlW8CgYEAw8vi
7hWxehiUaZO4RO7GuV47q4wPZ/nQvcimyjJuXBkC/gQay+TcA7CdXQTgxI2scMIk
UPas36mle1vbAp+GfWcNxDxhmSnQvUke4/wHF6sNZ3BwKoTRqJqFcFUHm+2uo6A4
HCBtXM83Z1nDYkHUrfng99U+zgGDz2XKPko9OB0CgYAtVVOSkLhB8z1FDa5/iHyT
pDAlNMCA95hN5/8LFIYsUXL/nCbgY0gsd8K5po9ekZCCnpTh1sr61h9jk24mZUz6
uyyq94IrWfIGqSfi4DF/42LKdrPm8kU5DNRR4ZOaU3aQpKMt84KyQXL7ElyDLyPD
yj5Hm9xF+6mSPYzJJAItYQKBgHzUZXbzf7ZfK2fwVSAlt68BJDvnzP62Z95Hqgbp
hjDThXPbvBXYcGkt1fYzIPZPeOxe6nZv/qGOcEGou4X9nOogpMdC09qprTqw/q/N
w9vUI3SaW/jPuzeqZH7Mx1Ajhh8uC/fquK7eMe2Dbi0b2XOeB08atrLyhk3ZEMsL
2+IFAoGAUbmo0idyszcarBPPsiEFQY2y1yzHMajs8OkjUzOVLzdiMkr36LF4ojgw
UCM9sT0g1i+eTfTcuOEr3dAxcXld8Ffs6INSIplvRMWH1m7wgXMRpPCy74OuxlDQ
xwPp/1IVvrMqVgnyS9ezAeE0p9u8zUdZdwHz1UAggwbtHR6IbIA=
-----END RSA PRIVATE KEY-----
"""
TEST_DO_NOT_USE_SR_PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt8LZnIhuOdL/BC029GOa
JkVUAqgp2PcmbFr2Qwhf/514DUUQ9sKJ1rvwvbmmW2zE8JRtdY3ey0RXGtMn5UZH
s8NReHzMxvsmHN4VuaGEnFmPwO821Tkvg0LpKsLkotcw793FD/fut44N2lhpTSW2
Sc82uG0p9A+Kud8HCIaWaluosghk9rbMGYDzZQk8cA91GtKJRmIOED4PorB/dexD
f37qhuWNQgzyNyTti1DTDUIWyzQQJp926vLbkOip6Fc2R13hOFNETe68Rrw/h3hX
EFS17uPFZHsxvm9PFXX9KZMS25ohqbNh97I94LL4o4wybl6LaE6lJEHiD6docD0B
6wIDAQAB
-----END PUBLIC KEY-----
"""
TEST_DO_NOT_USE_EQ_PRIVATE_KEY = """-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAwgSIPwv72JfGe87Jf+gI5HSzZfWRJEzAynv6g94rr78spbag
+4Q/63Zl1EBfKnOqZBBmDbBMoSFpGRWchW8YkYvo3jJx74ns0LkxDvDEXfKHAu64
w5AwvGjodSy2FP1vjz7U5rpAmhtB4hv5TVlMhCdLlXm5Xh66mpmRtGfVHrSfrqLs
RecGg2IOGstGRBcykBL2cewWEaW0ORm+L1zkUIUzrtdcGtX5iFrTd/Q5AUYXS8hf
4lIOkZc24nzj/ZGA+u8/fEKyHk9rNNHndRgQlivlorbF2L8+LF01V7GhkrwXV+gB
itIo7c2bGJjVVKIlJNK8aYqm2vnyli/J8ClSvQIDAQABAoIBADXYJCe7H63AkWkS
voEs2Cru6ErHl/xHAMoxFhk6s00W9UEYKh0jWsnyFdiN9NtHNmaG1ou9/cZKC2zW
vpWZe2wJNBtWTKB52qsieib3Usfv4uBBeC1t+tiPFNRQEEhK/Yb3nQZbckpSfjpO
ISYCPmX+sc9N9M/WH1uAextiJZdbdanuGC3TETj0qugb+3UGX/z4hVZKEPRVGxlf
oVULcbM9auKv9OGJJcNGlIva1nZeapb+jhlgmfwJVCDr7vNtKC1D6sziU+HGj0dP
3A4+FaGU9akfQPDUkYt7tfXNiGcYa5CEYyzBwZ7RQ1RnZoUjA0m0Nhb3VrQoQblA
5a7BqNECgYEA4+V/R9HPz9RKWsaWTtqB8PpO/XuxmGta8TME2YVdtSMTrlL/riza
OlXVTFK+dlyT+9WpDgmQStK8DBAh1nmu4EqdDrYvOtYUd6SHMNC40szvS3jMbfNp
AXEmoqToabGTASqWv55sbQMA0OZE0QIEHoIYNiUVDDUqIe0I85Tiwi8CgYEA2fGC
pgfyhNRH5V6U9yxNShh9K3r+ioI7AW0vtezCOmZgQ1D+I5PRMXttJvL/kPgQn3eR
7tB/u5Kra/yGLlj7hKxShwPvT10G+IxOfpfX1u3aJIWd25UWPvuMIUmCstTufw1l
P6fA6HFuV9N6p4gGdUG6sj/91CNSLm/M8Jj9mtMCgYEAxwRT8tQ3Nredd0iVWqdX
cqok8RhkL0cRVDHJumvNObI4LbQttF1W9jqe2tgnnBWc5f/gcnHHoJAHyEEOS85X
+WcvYPmYpTjvBsyXgvnDbdOp5a7IV/yJZsj5hG+exy5bwlj+7Lfc2BYXUFbHIf8w
ubPCkQYxK0gCUz484vrSS+ECgYBxAdeau7g2w9PbzSU03RXee8A7kXT24PwziygY
DwHPQlJb1V1RmU35eGRqs8lspBQKe/eBez8gRbb5MWFqGt2gN7I7LAEkh7obmrUA
0z8pxP89vMLTnwR/9/L7N6C7lclsu8dqMFPIszhh9dg9kjy3BDQIRUIag44TYglE
IDAv3QKBgQC6ZH412yYMqGz3VKpCnE8NfoSMpUhFl1tpS12NNohVarJPABspn6rX
mYWGeHCFOvLLeeW9SI3T8R+uar4cCyRVtCCi1n05D/Gmy10Enf8QyZFx3mMwuWLq
5QIaYe1+U/9+2rdrEt7XL3Q8gbIJ98sebY+/on1AYEKEU2YpQ+v2ng==
-----END RSA PRIVATE KEY-----
"""
TEST_DO_NOT_USE_EQ_PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwgSIPwv72JfGe87Jf+gI
5HSzZfWRJEzAynv6g94rr78spbag+4Q/63Zl1EBfKnOqZBBmDbBMoSFpGRWchW8Y
kYvo3jJx74ns0LkxDvDEXfKHAu64w5AwvGjodSy2FP1vjz7U5rpAmhtB4hv5TVlM
hCdLlXm5Xh66mpmRtGfVHrSfrqLsRecGg2IOGstGRBcykBL2cewWEaW0ORm+L1zk
UIUzrtdcGtX5iFrTd/Q5AUYXS8hf4lIOkZc24nzj/ZGA+u8/fEKyHk9rNNHndRgQ
livlorbF2L8+LF01V7GhkrwXV+gBitIo7c2bGJjVVKIlJNK8aYqm2vnyli/J8ClS
vQIDAQAB
-----END PUBLIC KEY-----
"""
# jwt.io public key signed
TEST_DO_NOT_USE_PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3Wojg
GHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlv
dbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GU
nKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB
-----END PUBLIC KEY-----"""
def get_mock_key_store(key_purpose):
return KeyStore(
{
"keys": {
"e19091072f920cbf3ca9f436ceba309e7d814a62": {
"purpose": key_purpose,
"type": "private",
"value": TEST_DO_NOT_USE_SR_PRIVATE_PEM,
},
"EQ_USER_AUTHENTICATION_SR_PRIVATE_KEY": {
"purpose": key_purpose,
"type": "private",
"value": TEST_DO_NOT_USE_SR_PRIVATE_PEM,
"service": "some-service",
},
"EDCRRM": {
"purpose": key_purpose,
"type": "public",
"value": TEST_DO_NOT_USE_PUBLIC_KEY,
"service": "some-service",
},
"709eb42cfee5570058ce0711f730bfbb7d4c8ade": {
"purpose": key_purpose,
"type": "public",
"value": TEST_DO_NOT_USE_UPSTREAM_PUBLIC_PEM,
"service": "some-service",
},
"KID_FOR_EQ_V2": {
"purpose": key_purpose,
"type": "public",
"value": TEST_DO_NOT_USE_PUBLIC_KEY,
"service": "eq_v2",
},
}
}
)
|
import pygame
pygame.init()
canvas = pygame.display.set_mode([500, 250])
# para crear textos es necesario usar el objeto pygame.font
# SysFont(name, size, bold=False, italic=False)
font = pygame.font.SysFont("Arial", 24)
# render(text, antialias, color, background=None)
antialias = True
text = "Yes"
text_surface = font.render(text, antialias, (255, 255, 255))
canvas.blit(text_surface, (250, 125))
text = "No"
text_surface = font.render(text, antialias, (255, 255, 255))
canvas.blit(text_surface, (300, 125))
pygame.display.flip()
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
<<<<<<< HEAD
from contest.models import *
from contest.functions import *
# Create your views here.
def index(request, tag=None):
tags = ContestTag.objects.order_by()
if tag != None:
contests = Contest.objects.filter(tags__tag=tag).order_by('name')
title_add = tags.filter(tag=tag).all()[0].name
else:
contests = Contest.objects.order_by('name')
title_add = None
for contest in contests:
contest.levelFrom = lksh_lvl(contest.levelFrom)
contest.levelTo = lksh_lvl(contest.levelTo)
return render(request, "index.html", {
"title" : u"Констесты",
"title_add" : title_add,
"contest_tags" : tags,
"contest_list" : contests
}
)
=======
from contest.models import Contest
from contest.functions import *
# Create your views here.
def index(request):
contests = Contest.objects.order_by()
for contest in contests:
contest.levelFrom = lksh_lvl(contest.levelFrom)
contest.levelTo = lksh_lvl(contest.levelTo)
return render(request, "index.html", {
"title" : u"Констесты",
"contest_list" : contests,
}
)
>>>>>>> 8e796cacec1d781d3a9d7c69257924d5bde5b7df
|
import unittest
from conans.test.utils.tools import TestClient
from conans.util.files import save
import os
class SettingConstraintTest(unittest.TestCase):
def settings_constraint_test(self):
conanfile = """from conans import ConanFile
class Test(ConanFile):
name = "Hello"
version = "0.1"
settings = {"compiler": {"gcc": {"version": ["7.1"]}}}
def build(self):
self.output.info("Compiler version!: %s" % self.settings.compiler.version)
"""
test = """from conans import ConanFile
class Test(ConanFile):
requires = "Hello/0.1@user/channel"
def test(self):
pass
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test})
default_profile = os.path.join(client.base_folder, ".conan/profiles/default")
save(default_profile, "[settings]\ncompiler=gcc\ncompiler.version=6.3")
error = client.run("test_package", ignore_error=True)
self.assertTrue(error)
self.assertIn("Invalid setting '6.3' is not a valid 'settings.compiler.version'",
client.user_io.out)
client.run("test_package -s compiler=gcc -s compiler.version=7.1")
self.assertIn("Hello/0.1@user/channel: Compiler version!: 7.1", client.user_io.out)
self.assertIn("Hello/0.1@user/channel: Generating the package", client.user_io.out)
|
import unittest
from gita_md_writer import mdcumulate, groupadja
class MDChapterTest(unittest.TestCase):
def test_adjacent_paras_of_same_style_are_grouped(self):
adjacent_paras = [
{"para": "para1.1", "style": "style1"},
{"para": "para1.2", "style": "style1"},
{"para": "para2", "style": "style2"}
]
grouped_paras = groupadja(adjacent_paras)
self.assertEqual(len(grouped_paras), 2)
self.assertEqual(grouped_paras[0]["style"], "style1")
self.assertEqual(len(grouped_paras[0]["paras"]), 2)
self.assertEqual(grouped_paras[0]["paras"][0]["para"], "para1.1")
self.assertEqual(grouped_paras[0]["paras"][1]["para"], "para1.2")
self.assertEqual(len(grouped_paras[1]["paras"]), 1)
self.assertEqual(grouped_paras[1]["paras"][0]["para"], "para2")
def test_sample_chapter_written_as_markdown(self):
gulpables = mdcumulate(sample_chapter_paras)
first_title = list(gulpables.keys())[0]
firstchapmd = gulpables[first_title]
self.assertEqual(first_title, "Chapter 2")
second_title = list(gulpables.keys())[1]
shlokamd = gulpables[second_title]
self.assertEqual(second_title, "2-1 to 2-3")
with open(f'{first_title}.test.md', 'w', encoding='utf8') as mdfile:
mdfile.write(firstchapmd)
with open(f'{second_title}.test.md', 'w', encoding='utf8') as mdfile:
mdfile.write(shlokamd)
print(f'see aesthetics: {first_title}.test.md and {second_title}.test.md')
sample_chapter_paras = [{
"chapter": "Chapter 2", "shloka": "",
"content": [{
"type": "anchor", "name": "_Chapter_2", "content": ""
}, {
"type": "text", "content": "Chapter 2"
}], "style": "heading1"
}, {
"chapter": "Chapter 2", "shloka": "2-1 to 2-3",
"content": [{
"type": "text", "content": "2-1 to 2-3"
}], "style": "heading2"
}, {
"chapter": "Chapter 2", "shloka": "2-1 to 2-3",
"content": [{
"type": "text",
"content": "tam tathA kr`payAviShTam ashrupUrNAkulEkShaNam |"
}], "style": "shloka"
}, {
"chapter": "Chapter 2", "shloka": "2-1 to 2-3",
"content": [{
"type": "text",
"content": "[ththA] Then, [madhusUdhana:] Krishna [idam vAkyam uvAcha] said this sentence [tam] to Arjuna, [kr`payAviShTam] who was overcome with pity, [ashrupUrNAkulEkShaNam] whose eyes were full of tears: "
}], "style": "explnofshloka"
}
]
if __name__ == '__main__':
unittest.main()
|
"""Hermes MQTT server for Rhasspy TTS using Google Wavenet"""
import asyncio
import hashlib
import io
import logging
import os
import shlex
import subprocess
import typing
import wave
from pathlib import Path
from uuid import uuid4
from google.cloud import texttospeech
from rhasspyhermes.audioserver import AudioPlayBytes, AudioPlayError, AudioPlayFinished
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.tts import GetVoices, TtsError, TtsSay, TtsSayFinished, Voice, Voices
_LOGGER = logging.getLogger("rhasspytts_wavenet_hermes")
# -----------------------------------------------------------------------------
class TtsHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy TTS using Google Wavenet."""
def __init__(
self,
client,
credentials_json: Path,
cache_dir: Path,
voice: str = "en-US-Wavenet-C",
sample_rate: int = 22050,
play_command: typing.Optional[str] = None,
site_ids: typing.Optional[typing.List[str]] = None,
):
super().__init__("rhasspytts_wavenet_hermes", client, site_ids=site_ids)
self.subscribe(TtsSay, GetVoices, AudioPlayFinished)
self.credentials_json = credentials_json
self.cache_dir = cache_dir
self.voice = voice
self.sample_rate = int(sample_rate)
self.play_command = play_command
self.play_finished_events: typing.Dict[typing.Optional[str], asyncio.Event] = {}
# Seconds added to playFinished timeout
self.finished_timeout_extra: float = 0.25
self.wavenet_client: typing.Optional[texttospeech.TextToSpeechClient] = None
# Create cache directory in profile if it doesn't exist
self.cache_dir.mkdir(parents=True, exist_ok=True)
if (not self.wavenet_client) and self.credentials_json.is_file():
_LOGGER.debug("Loading credentials at %s", self.credentials_json)
# Set environment var
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(
self.credentials_json.absolute()
)
self.wavenet_client = texttospeech.TextToSpeechClient()
# -------------------------------------------------------------------------
async def handle_say(
self, say: TtsSay
) -> typing.AsyncIterable[
typing.Union[
TtsSayFinished,
typing.Tuple[AudioPlayBytes, TopicArgs],
TtsError,
AudioPlayError,
]
]:
"""Run TTS system and publish WAV data."""
wav_bytes: typing.Optional[bytes] = None
try:
# Try to pull WAV from cache first
sentence_hash = self.get_sentence_hash(say.text)
cached_wav_path = self.cache_dir / f"{sentence_hash.hexdigest()}.wav"
if cached_wav_path.is_file():
# Use WAV file from cache
_LOGGER.debug("Using WAV from cache: %s", cached_wav_path)
wav_bytes = cached_wav_path.read_bytes()
if not wav_bytes:
# Run text to speech
assert self.wavenet_client, "No Wavenet Client"
_LOGGER.debug(
"Calling Wavenet (voice=%s, rate=%s)",
self.voice,
self.sample_rate,
)
synthesis_input = texttospeech.SynthesisInput(text=say.text)
voice_params = texttospeech.VoiceSelectionParams(
language_code = '-'.join(self.voice.split('-')[:2]),
name=self.voice,
)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.LINEAR16,
sample_rate_hertz=self.sample_rate,
)
response = self.wavenet_client.synthesize_speech(
request={
"input": synthesis_input,
"voice": voice_params,
"audio_config": audio_config,
}
)
wav_bytes = response.audio_content
assert wav_bytes, "No WAV data received"
_LOGGER.debug("Got %s byte(s) of WAV data", len(wav_bytes))
if wav_bytes:
finished_event = asyncio.Event()
# Play WAV
if self.play_command:
try:
# Play locally
play_command = shlex.split(
self.play_command.format(lang=say.lang)
)
_LOGGER.debug(play_command)
subprocess.run(play_command, input=wav_bytes, check=True)
# Don't wait for playFinished
finished_event.set()
except Exception as e:
_LOGGER.exception("play_command")
yield AudioPlayError(
error=str(e),
context=say.id,
site_id=say.site_id,
session_id=say.session_id,
)
else:
# Publish playBytes
request_id = say.id or str(uuid4())
self.play_finished_events[request_id] = finished_event
yield (
AudioPlayBytes(wav_bytes=wav_bytes),
{"site_id": say.site_id, "request_id": request_id},
)
# Save to cache
with open(cached_wav_path, "wb") as cached_wav_file:
cached_wav_file.write(wav_bytes)
try:
# Wait for audio to finished playing or timeout
wav_duration = TtsHermesMqtt.get_wav_duration(wav_bytes)
wav_timeout = wav_duration + self.finished_timeout_extra
_LOGGER.debug("Waiting for play finished (timeout=%s)", wav_timeout)
await asyncio.wait_for(finished_event.wait(), timeout=wav_timeout)
except asyncio.TimeoutError:
_LOGGER.warning("Did not receive playFinished before timeout")
except Exception as e:
_LOGGER.exception("handle_say")
yield TtsError(
error=str(e),
context=say.id,
site_id=say.site_id,
session_id=say.session_id,
)
finally:
yield TtsSayFinished(
id=say.id, site_id=say.site_id, session_id=say.session_id
)
# -------------------------------------------------------------------------
async def handle_get_voices(
self, get_voices: GetVoices
) -> typing.AsyncIterable[typing.Union[Voices, TtsError]]:
"""Publish list of available voices."""
voices: typing.List[Voice] = []
try:
if self.wavenet_client:
response = self.wavenet_client.list_voices()
voicelist = sorted(response.voices, key=lambda voice: voice.name)
for item in voicelist:
voice = Voice(voice_id=item.name)
voice.description = texttospeech.SsmlVoiceGender(item.ssml_gender).name
voices.append(voice)
except Exception as e:
_LOGGER.exception("handle_get_voices")
yield TtsError(
error=str(e), context=get_voices.id, site_id=get_voices.site_id
)
# Publish response
yield Voices(voices=voices, id=get_voices.id, site_id=get_voices.site_id)
# -------------------------------------------------------------------------
async def on_message(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
if isinstance(message, TtsSay):
async for say_result in self.handle_say(message):
yield say_result
elif isinstance(message, GetVoices):
async for voice_result in self.handle_get_voices(message):
yield voice_result
elif isinstance(message, AudioPlayFinished):
# Signal audio play finished
finished_event = self.play_finished_events.pop(message.id, None)
if finished_event:
finished_event.set()
else:
_LOGGER.warning("Unexpected message: %s", message)
# -------------------------------------------------------------------------
def get_sentence_hash(self, sentence: str):
"""Get hash for cache."""
m = hashlib.md5()
m.update(
"_".join(
[
sentence,
self.voice,
str(self.sample_rate),
]
).encode("utf-8")
)
return m
@staticmethod
def get_wav_duration(wav_bytes: bytes) -> float:
"""Return the real-time duration of a WAV file"""
with io.BytesIO(wav_bytes) as wav_buffer:
wav_file: wave.Wave_read = wave.open(wav_buffer, "rb")
with wav_file:
width = wav_file.getsampwidth()
rate = wav_file.getframerate()
# getnframes is not reliable.
# espeak inserts crazy large numbers.
guess_frames = (len(wav_bytes) - 44) / width
return guess_frames / float(rate)
|
from jamesbot.data_loader import DataLoader
|
import os
import re
import sys
import json
import shutil
from optparse import OptionParser
from subprocess import check_call
CONDA_ENV_SH = """#!/bin/bash
if [ -z "${CDH_PYTHON}" ]; then
export CDH_PYTHON=${PARCELS_ROOT}/${PARCEL_DIRNAME}/bin/python
fi
if [ -n "${R_HOME}" ]; then
export R_HOME="${PARCELS_ROOT}/${PARCEL_DIRNAME}/lib"
fi
if [ -n "${RHOME}" ]; then
export RHOME="${PARCELS_ROOT}/${PARCEL_DIRNAME}/lib/conda-R"
fi
if [ -n "${R_SHARE_DIR}" ]; then
export R_SHARE_DIR="${PARCELS_ROOT}/${PARCEL_DIRNAME}/lib/R/share"
fi
if [ -n "${R_INCLUDE_DIR}" ]; then
export R_INCLUDE_DIR="${PARCELS_ROOT}/${PARCEL_DIRNAME}/lib/R/include"
fi
"""
def metadata(name, version, os_version, prefix):
# $PREFIX/meta
meta_dir = os.path.join(prefix, "meta")
if not os.path.exists(meta_dir):
os.mkdir(meta_dir)
# Write parcel.json
packages = get_package_list(prefix)
data = get_parcel_json(name, version, packages, os_version)
with open(os.path.join(meta_dir, "parcel.json"), "w") as f:
json.dump(data, f, indent=4, sort_keys=True)
# Write parcel env scripts
with open(os.path.join(meta_dir, "conda_env.sh"), "w") as f:
f.write(CONDA_ENV_SH)
def get_package_list(prefix):
"""Get packages from an anaconda installation
"""
packages = []
# Get the (set of canonical names) of linked packages in prefix
meta_dir = os.path.join(prefix, "conda-meta")
pkg_list = set(fn[:-5] for fn in os.listdir(meta_dir) if fn.endswith(".json"))
# print(pkgs)
for dist in sorted(pkg_list):
name, version, build = dist.rsplit("-", 2)
packages.append({
"name": name,
"version": "%s-%s" % (version, build),
})
return packages
def get_parcel_json(name, version, packages, os_version):
_ = {
"schema_version": 1,
"name": name,
"version": version,
"provides": [
"conda",
],
"scripts": {
"defines": "conda_env.sh",
},
"packages": packages,
"setActiveSymlink": True,
"extraVersionInfo": {
# "fullVersion":"%s-%s" % (version, os_version),
"baseVersion": version,
"patchCount": "p0",
},
"components": [{
"name": name,
"version": version,
"pkg_version": version,
}],
"users": {},
"groups": [],
}
return _
if __name__ == "__main__":
params = OptionParser(
usage="usage: %prog [options] NAME VERSION OS_VERSION PREFIX",
description="Create parcel metadata for a conda installation")
opts, args = params.parse_args()
if len(args) != 4:
params.error("Exactly 4 arguments expected")
name = args[0]
version = args[1]
os_version = args[2]
prefix = args[3]
metadata(name, version, os_version, prefix)
|
from flask import Blueprint
bp = Blueprint("base_routes", __name__)
from . import delete, get, patch # noqa: F401, E402
|
# @Title: 键盘行 (Keyboard Row)
# @Author: 2464512446@qq.com
# @Date: 2019-10-08 16:59:59
# @Runtime: 24 ms
# @Memory: 11.5 MB
class Solution(object):
def findWords(self, words):
set1 = set('qwertyuiop')
set2 = set('asdfghjkl')
set3 = set('zxcvbnm')
res = []
for i in words:
x = i.lower()
setx = set(x)
if setx<=set1 or setx<=set2 or setx<=set3:
res.append(i)
return res
|
from multiprocessing import Manager,Queue,Pool
#1.进程之间的通讯
q=Queue(3) #初始化一个Queue队列,最多存储三个put消息
q.put("haha1") #放入任意数据类型消息(具有堵塞属性,如果添加第四个会堵塞)
q.qsize() #获取队列里面的消息个数
q.get() #塞先进先出,获取第一个消息内容(具有堵塞属性,如果里面没有消息,调用get会堵)
q.empty() #判断是否空消息
q.full() #判断队列的消息是否已满
q.get_nowait() #不会堵塞,但是会抛出异常,所以要放在异常捕获try里面
q.put_nowait("haha2") #不会堵塞,但是会抛出异常,所以要放在异常捕获try里面
|
import cv2
import numpy as np
img = cv2.imread("H:/Github/OpenCv/Research/images/opencv.jpg")
# cv2.imshow("Original",img )
grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(grey,75,127)
ret,thresh = cv2.threshold(edges,70,255,0)
thresh = cv2.subtract(255, thresh)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
# cv2.fillPoly(thresh, pts =[c], color=(45,240,100))
cv2.polylines(thresh,[c],True,(0,255,255),2)
# print(c)
cv2.imshow("output",thresh )
cv2.imwrite('output' + '.jpg' ,thresh )
cv2.waitKey(0)
cv2.destroyAllWindows()
# edges = cv2.Canny(thresh,0,255)
# cv2.imshow("output",thresh )
# cv2.imwrite('output' + '.jpg' ,thresh )
# dst = cv2.inpaint(thresh,img,3,cv2.INPAINT_TELEA)
# kernel = np.ones((2,2),np.uint8)
# dilation = cv2.dilate(thresh,kernel,iterations = 2)
#
#
|
'''
multiple of 3 'Fizz' and 5 'Buzz' and both 'FizzBuzz'
NOw this is just a test to push the code
'''
import os,sys
from flask import Flask
app = Flask(__name__)
@app.route("/")
#class FizzBuzz:
def numb():
a=[]
for i in range(1,101):
if (i%3==0) and (i%5==0):
a.append('FizzBuzz')
elif (i%5)==0:
a.append('Buzz')
elif (i%3==0):
a.append('Fizz')
else:
a.append(i)
return str(a)
port = os.getenv('VCAP_APP_PORT', '5000')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int(port))
|
# This file should contain the main codes that controls the whole
# behaviour of the package.
#
# To import modules from different files, just add here:
# from <package_name>.module import functions, classes
def main():
pass
|
# -*- coding: utf-8 -*-
#############
#
# Copyright - Nirlendu Saha
#
# author - nirlendu@gmail.com
#
#############
"""
Django settings for core project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
from base import *
import dj_database_url
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*", ]
WSGI_APPLICATION = 'core.wsgi.heroku.application'
# Static files (CSS, JavaScript, Images) during deployment
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'var/www/static')
# url to access the static files
STATIC_URL = '/static/'
# static files during development wrt base directory
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'templates/include/static'),
)
MEDIA_URL = 'http://s3.ap-south-1.amazonaws.com/the-thing/'
AWS_STORAGE_BUCKET_NAME = 'the-thing'
# place where media files are served wrt base directory
#MEDIA_ROOT = os.path.join('http://the-thing.s3-website.ap-south-1.amazonaws.com', 'media')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
os.environ['DJANGO_SETTINGS_MODULE'] = 'core.settings.heroku'
# For the Postgre
DATABASES = {'default': dj_database_url.config(default=os.getenv('DATABASE_URL'))}
# For the Graph
GRAPHDB_URL = os.environ['GRAPHENEDB_URL']
|
import os, glob
import numpy as np
import scipy.stats as stats
import pandas
class _Sampler:
def __init__(self, sbml, config):
self.sbml = sbml
self.config = config
def sample(self):
raise NotImplementedError
def _sample1dist(self):
return NotImplementedError
class MonteCarloSampler(_Sampler):
def sample(self):
if self.config['settings']['sampler'] != 'monte_carlo':
raise ValueError('Must set "settings.sampler = monte_carlo" to use '
'monte carlo sampling')
species = self.config['species']
# check for special case when all distributions and parameters to the distributions are equal
df = pandas.DataFrame(species).transpose()
if len(set(df['distribution'] == 1)) and len(set(df['loc'])) == 1 and len(set(df['scale'])) == 1:
return list(self._sample1dist())[0]
sample_list = np.array()
for k, v in species.items():
dist = v['distribution']
v.pop('distribution')
np.append(dist(**v, size=1), sample_list)
return list(sample_list)[0]
def _sample1dist(self):
"""
Sample the required number of time from a single distribution.
For the special case when only one distribution with one set of parameters is being used
it is more efficient to sample using the inbuilt 'size' argument.
Returns:
"""
species = self.config['species']
n = len(species)
keys = list(species.keys())
dist = species[keys[0]]['distribution']
loc = species[keys[0]]['loc']
scale = species[keys[0]]['scale']
samples = dist(loc=loc, scale=scale).rvs(n)
yield samples
|
from collections import namedtuple
from simlammps.bench.util import get_particles
from simlammps.testing.md_example_configurator import MDExampleConfigurator
from simphony.bench.util import bench
from simphony.core.cuba import CUBA
from simphony.engine import lammps
_Tests = namedtuple(
'_Tests', ['method', 'name'])
def configure_wrapper(wrapper, state_data, particles, number_time_steps):
""" Configure wrapper
Parameters:
-----------
wrapper : ABCModelingEngine
wrapper to be configured
state_data : StateData
state data (materials)
particles : ABCParticles
particles to use
number_time_steps : int
number of time steps to run
"""
materials = [material for material in state_data.iter_materials()]
configurator = MDExampleConfigurator(materials=materials,
number_time_steps=number_time_steps)
configurator.set_configuration(wrapper)
wrapper.add_dataset(particles)
def run(wrapper):
wrapper.run()
def run_iterate(wrapper):
wrapper.run()
for particles_dataset in wrapper.iter_datasets():
for particle in particles_dataset.iter(item_type=CUBA.PARTICLE):
pass
def run_update_run(wrapper):
wrapper.run()
for particles_dataset in wrapper.iter_datasets():
for particle in particles_dataset.iter(item_type=CUBA.PARTICLE):
particles_dataset.update([particle])
wrapper.run()
def describe(name, number_particles, number_steps, is_internal):
wrapper_type = "INTERNAL" if is_internal else "FILE-IO"
result = "{}__{}_particles_{}_steps_{}:".format(name,
number_particles,
number_steps,
wrapper_type)
return result
def run_test(func, wrapper):
func(wrapper)
if __name__ == '__main__':
run_wrapper_tests = [_Tests(method=run,
name="run"),
_Tests(method=run_iterate,
name="run_iterate"),
_Tests(method=run_update_run,
name="run_update_run")]
for is_internal in [True, False]:
for y_range in [3000, 8000]:
# test different run scenarios
particles, state_data = get_particles(y_range)
number_particles = sum(p.count_of(
CUBA.PARTICLE) for p in particles)
number_time_steps = 10
SD = "DUMMY - TODO"
for test in run_wrapper_tests:
lammps_wrapper = lammps.LammpsWrapper(
use_internal_interface=is_internal)
configure_wrapper(lammps_wrapper,
state_data,
particles,
number_time_steps=number_time_steps)
results = bench(lambda: run_test(test.method, lammps_wrapper),
repeat=1,
adjust_runs=False)
print(describe(test.name,
number_particles,
number_time_steps,
is_internal), results)
# test configuration
lammps_wrapper = lammps.LammpsWrapper(
use_internal_interface=is_internal)
results = bench(lambda: configure_wrapper(lammps_wrapper,
state_data,
particles,
number_time_steps),
repeat=1,
adjust_runs=False)
print(describe("configure_wrapper",
number_particles,
number_time_steps,
is_internal), results)
|
#!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow"
### points mixed together--separate them so we can give them different colors
### in the scatterplot and identify them visually
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
#plt.show()
################################################################################
print "Initial view complete"
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
#K nearest neighbors
from sklearn import neighbors
knn_clf= neighbors.KNeighborsClassifier(n_neighbors=5)
knn_clf.fit(features_train,labels_train)
pred=knn_clf.predict(features_test)
from sklearn.metrics import accuracy_score
knn_accuracy=accuracy_score(pred,labels_test)
print ("k nearest neighbors with accuracy is %f " %(knn_accuracy))
#Naive Bayes
from sklearn.naive_bayes import GaussianNB
nb_clf = GaussianNB()
nb_clf.fit(features_train,labels_train)
nb_predict=nb_clf.predict(features_test)
print ("Naive Bayes: %f" %(accuracy_score(nb_predict,labels_test)))
# Decision Tree
from sklearn import tree
dt_classifier = tree.DecisionTreeClassifier(min_samples_split= 50)
dt_classifier.fit(features_train,labels_train)
pred = dt_classifier.predict(features_test)
from sklearn.metrics import accuracy_score
acc_min_samples_split_2= accuracy_score(pred,labels_test)
print ("Decision Tree: %f" %(accuracy_score(pred,labels_test)))
# SVM
from sklearn.svm import SVC
svm_linear_classifier = SVC(kernel="rbf", C=10000.0)
svm_linear_classifier.fit(features_train,labels_train)
svm_linear_prediction=svm_linear_classifier.predict(features_test)
from sklearn.metrics import accuracy_score
print("SVM Linear accuracy: %s" %(accuracy_score(svm_linear_prediction,labels_test)))
# adaboost
from sklearn.ensemble import AdaBoostClassifier
adb_classifier = AdaBoostClassifier()
adb_classifier.fit(features_train,labels_train)
adb_pred = adb_classifier.predict(features_test)
print("Adaboost accuracy: %f" %(accuracy_score(adb_pred,labels_test)))
#random forest
from sklearn.ensemble import RandomForestClassifier
rf_classifier = RandomForestClassifier()
rf_classifier.fit(features_train,labels_train)
rf_pred = rf_classifier.predict(features_test)
print("Random forest accuracy: %f" %(accuracy_score(rf_pred,labels_test)))
def drawPicture(clf):
try:
print "Before calling pretty picture"
prettyPicture(clf, features_test, labels_test)
print "After calling pretty picture"
except NameError:
pass
|
/Users/karshenglee/anaconda3/lib/python3.6/fnmatch.py
|
'''
задача 1 - сделать скрипт, который
- раз в 30 секунд выводит текущее время,
- все остальное время ждем ввода.
- если передать пробел - скрипт завершается
'''
import time
from threading import Thread
from threading import Event
class TimerThread(Thread):
def __init__(self, event):
Thread.__init__(self)
self.stopped = event
def run(self):
print(time.ctime())
while not self.stopped.wait(30):
print(time.ctime())
if __name__ == '__main__':
stopFlag = Event()
thread = TimerThread(stopFlag)
thread.start()
while True:
if input() == ' ':
stopFlag.set()
break
|
import os.path
import photo
import calibrate
from os import path
import numpy as np
def read_cam_paramns():
with np.load('pose/webcam_calibration_params.npz') as X:
mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
return mtx, dist
def prepare_env():
if path.exists("pose/webcam_calibration_params.npz"):
return read_cam_paramns()
else:
photo.take_photos()
calibrate.calibrate()
return read_cam_paramns()
mtx,dist = prepare_env()
|
#import sys
#input = sys.stdin.readline
from collections import Counter
Q = 10**9+7
def main():
N = int(input())
A = list(map(int,input().split()))
if A[0] > 0:
print(0)
return
CA = Counter(A)
if CA[0] > 1:
print(0)
return
B = list(set(A))
B.sort()
for i, b in enumerate(B):
if i != b:
print(0)
return
now = 1
ans = 1
invtwo = pow(2,Q-2,Q)
for c in B:
b = CA[c]
ans *= pow((pow(2,now,Q)-1),b,Q)
# print(c, now, pow((pow(2,now,Q)-1),b,Q))
ans %= Q
# t = 1
# p = 1
# for i in range(0,b-1,2):
# # print(b,i,b-i,b-i-1, (b-i)*(b-i-1)*invtwo%Q)
# p *= (b-i)*(b-i-1)*invtwo%Q
# p %= Q
# t += p
# t %= Q
# print(b,t)
# ans *= t
ans *= pow(2, b*(b-1)//2,Q)
ans %= Q
now = b
print(ans)
if __name__ == '__main__':
main()
|
# Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import os
import sys
import grpc
from .base import BaseService as BS, MessageHandler
from ..proto import gnes_pb2
class GRPCService(BS):
handler = MessageHandler(BS.handler)
def post_init(self):
self.channel = grpc.insecure_channel(
'%s:%s' % (self.args.grpc_host, self.args.grpc_port),
options=[('grpc.max_send_message_length', self.args.max_message_size * 1024 * 1024),
('grpc.max_receive_message_length', self.args.max_message_size * 1024 * 1024)])
foo = self.PathImport().add_modules(self.args.pb2_path, self.args.pb2_grpc_path)
# build stub
self.stub = getattr(foo, self.args.stub_name)(self.channel)
def close(self):
self.channel.close()
super().close()
@handler.register(NotImplementedError)
def _handler_default(self, msg: 'gnes_pb2.Message'):
yield getattr(self.stub, self.args.api_name)(msg)
class PathImport:
@staticmethod
def get_module_name(absolute_path):
module_name = os.path.basename(absolute_path)
module_name = module_name.replace('.py', '')
return module_name
def add_modules(self, pb2_path, pb2_grpc_path):
(module, spec) = self.path_import(pb2_path)
sys.modules[spec.name] = module
(module, spec) = self.path_import(pb2_grpc_path)
sys.modules[spec.name] = module
return module
def path_import(self, absolute_path):
module_name = self.get_module_name(absolute_path)
spec = importlib.util.spec_from_file_location(module_name, absolute_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[spec.name] = module
return module, spec
|
import cv2
import os
from scipy import ndimage
i = 0
for filename in os.listdir("path\\to\\folder\\of\\images\\"):
img = cv2.imread("path\\to\\folder\\of\\images\\"+filename)
rotated = ndimage.rotate(img, 270)
cv2.imwrite("path\\to\\folder\\for\\saving\\images\\"+filename, rotated)
print(i)
i += 1
cv2.waitKey(0)
cv2.destroyAllWindows()
|
'''
Specified insensitivity to IC phase
'''
import warnings
warnings.simplefilter("ignore", UserWarning)
# Import the necessary python library modules
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import minimize
import os
import sys
import pdb
# Add my local path to the relevant modules list
sys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')
# Import my python modules
import InputShaping as shaping
import Boom_Crane as BC
import Generate_Plots as genplt
# Use lab plot style
plt.style.use('Crawlab')
# define constants
DEG_TO_RAD = np.pi / 180
Boom=4.
Cable=0.3*Boom
Amax=174.0
Vmax=17.4
Luff_init=30.0
Luff_fin=60.0
Tmax=15.
Tstep=0.01
normalized_amp=0.5
phase=90.
d_y = 0.5*Boom
d_z = 0.25 * Boom
p = BC.init_crane( Boom,
Cable,
Amax,
Vmax,
Luff_init,
Luff_fin,
Tmax,
Tstep,
normalized_amp,
phase
)
[Amax,Vmax], l, r, StartTime, gamma_init,gamma_fin, t_step,t,X0,Distance = p
# Start generating responses
################################################
################################################
# Get the UM-ZV-IC shaped response
icsi_response = BC.response(p,'IC-SI Phase')
umzvic_response = BC.response(p,'UMZVIC-UMZVIC')
# Plot all of the relevant response values
################################################
################################################
# Determine the folder where the plots will be saved
folder = 'Figures/{}/Luff_{}_{}/norm_{}({}_{})_phase_{}'.format(
sys.argv[0],
Luff_init,Luff_fin,
normalized_amp,
np.round(X0[0]/DEG_TO_RAD).astype(int),
np.round(X0[1]/DEG_TO_RAD).astype(int),
phase
)
# Convert the response values into degrees
icsi_response[:,0] /= DEG_TO_RAD
umzvic_response[:,0] /= DEG_TO_RAD
genplt.compare_responses(t,
icsi_response[:,0],'IC-SI',
umzvic_response[:,0],'UM-ZV-IC',
name_append='UMZVIC_Swing',
xlabel='Time (s)',ylabel='Swing Angle (deg)',
folder=folder,grid=False,save_data=False
)
genplt.compare_responses(t,
icsi_response[:,2],'IC-SI',
name_append='UMZVIC_Displacement',
xlabel='Time (s)',ylabel='Luff Angle (deg)',
folder=folder,grid=False,save_data=False
)
genplt.compare_responses(t,
icsi_response[:,3],'IC-SI',
name_append='UMZVIC_Velocity',
xlabel='Time (s)',ylabel='Luff Velocity (deg/s)',
folder=folder,grid=False,save_data=False
)
|
import time
import sys,os
import curses
import datetime
import math
import json
from dateutil.parser import *
import urllib2
def check_wind():
try:
try:
f = urllib2.urlopen('http://api.wunderground.com/api/c76852885ada6b8a/conditions/q/Ijsselstein.json')
except:
print('[NOK] Could not open website')
try:
json_string = f.read()
#print(json_string)
parsed_json = json.loads(json_string)
print(parsed_json)
except:
print('[NOK] Could not parse wunderground json')
try:
Wind = int(float(parsed_json['current_observation']['wind_kph']))
WindGust = int(float(parsed_json['current_observation']['wind_gust_kph']))
WindDir = parsed_json['current_observation']['wind_dir']
WindDirAngle = int(float(parsed_json['current_observation']['wind_degrees']))
except:
print('[NOK] Could not convert wunderground data')
except:
print('[NOK] Wunderground not found...')
WindGust=0
return WindGust
print('Read wind speed: '+str(check_wind()))
|
import vk_api, json
from vk_api import VkUpload
from vk_api.longpoll import VkLongPoll, VkEventType
#from si
vk_session = vk_api.VkApi(token="ac4a1efc08aba9faa25bb28e290debf62e3d5c2932430a57020cb07fa37698472f69a06b33bad06bad251")
vk = vk_session.get_api()
longpoll = VkLongPoll(vk_session)
upload = VkUpload(vk_session)
def get_but(text, color):
return {
"action": {
"type": "text",
"payload": "{\"button\": \"" + "1" + "\"}",
"label": f"{text}"
},
"color": f"{color}"
}
k = {
"one_time": False,
"buttons": [
[get_but('Расписание пар', 'secondary'),],
]
}
k = json.dumps(k, ensure_ascii=False).encode('utf-8')
k = str(k.decode('utf-8'))
k1 = {
"one_time": False,
"buttons": [
[get_but('первый курс', 'secondary'), get_but('второй курс', 'secondary')],
[get_but('третий курс', 'secondary'), get_but('четвёртый курс', 'secondary'), get_but('Назад', 'negative')]
]
}
k1 = json.dumps(k1, ensure_ascii=False).encode('utf-8')
k1 = str(k1.decode('utf-8'))
k2 = {
"one_time": False,
"buttons": [
[get_but('ИБС-125', 'secondary'), get_but('Курсы', 'negative')]
]
}
k2 = json.dumps(k2, ensure_ascii=False).encode('utf-8')
k2 = str(k2.decode('utf-8'))
k3 = {
"one_time": False,
"buttons": [
[get_but('понедельник', 'secondary')],
[get_but('вторник', 'secondary'), get_but('среда', 'secondary')],
[get_but('четверг', 'secondary'), get_but('пятница', 'secondary'), get_but('группы', 'negative')]
]
}
k3 = json.dumps(k3, ensure_ascii=False).encode('utf-8')
k3 = str(k3.decode('utf-8'))
def sender(id, text):
vk_session.method('messages.send', {'user_id': id, 'message': text, 'random_id': 0, 'keyboard': k})
def sender2(id, text):
vk_session.method('messages.send', {'user_id': id, 'message': text, 'random_id': 0, 'keyboard': k1})
def sender3(id, text):
vk_session.method('messages.send', {'user_id': id, 'message': text, 'random_id': 0, 'keyboard': k2})
def sender4(id, text):
vk_session.method('messages.send', {'user_id': id, 'message': text, 'random_id': 0, 'keyboard': k3})
def main():
for event in longpoll.listen():
if event.type == VkEventType.MESSAGE_NEW and event.to_me:
if event.to_me:
name = vk_session.method("users.get", {"user_ids": event.user_id})
name0 = name[0]["first_name"]
name1 = name[0]["last_name"]
request = event.text.lower()
if request == "расписание пар":
sender2(event.user_id, "\nВыбери курс ")
if request == "начать":
sender(event.user_id, "\nКаничива,я твой босс ," + name1 +" "+ name0 )
if request == "назад":
sender(event.user_id, "\nТы вернулся назад!")
if request == "первый курс":
sender3(event.user_id, "\nВыбери свою группу!")
if request == "курсы":
sender2(event.user_id, "\nКурсы!")
if request == "группы":
sender3(event.user_id, "\nГруппы!")
if request == "ибс-125":
sender4(event.user_id, "\nВыбери день!")
if request == "понедельник":
sender4(event.user_id, "Каничива🔥🔥🔥🔥🔥.""Твоё расписание \n 1)ОИБ\n2)Алгоритмизация\n3)ТСИ\n4)Электротехника")
if request == "вторник":
sender4(event.user_id, "Каничива🔥🔥🔥🔥.""Твоё расписание \n1)Электротехника\n2)Электротехника\n 3)ОИБ\n4)Англ")
if request == "среда":
sender4(event.user_id, "Каничива🔥🔥🔥.""Твоё расписание \n 1)Физра\n2)История\n3)Логика\n4)Алгоритмизация")
if request == "четверг":
sender4(event.user_id, "Каничива🔥🔥.""Твоё расписание \n 1)ОИБ\n2)Англ")
if request == "пятница":
sender4(event.user_id, "Каничива🔥.""Твоё расписание \n 1)Мат\n2)Мат\n 3)ТСИ\n 4)Алгоритмизация")
if request == "привет":
sender(event.user_id, "С возвращением семпай ," + name1 +" "+ name0 )
print("Дорогой/ая " + name1 +" "+ name0 + " написал/а сообщение: " + request)
main()
|
templates_list = dict(
photo="photo_message.jinja2",
document="document_message.jinja2",
voice="voice_message.jinja2",
video_note="video_note_message.jinja2",
sticker="sticker_message.jinja2",
animation="animation_message.jinja2",
_="base_message.jinja2",
)
def get_template(message, templates=templates_list):
for key in message.keys():
if template := templates.get(key):
return template
return templates.get("_")
__all__ = ["get_template", "templates_list"]
|
from pytuning.scales import create_edo_scale
edo_12_scale = create_edo_scale(12)
print((edo_12_scale[1] * 440).evalf(8))
|
from django.contrib import admin
from user.models import User
# Register your models here.
class UserAdmin(admin.ModelAdmin):
list_display = ('username', 'password') # user list 사용자명과 비밀번호를 확인할 수 있도록 설정
admin.site.register(User, UserAdmin)
|
import torch
import torch.nn as nn
class GCAModel(nn.Module):
def __init__(self,hparams,vocab):
super().__init__()
self.cdd_size = (hparams['npratio'] + 1) if hparams['npratio'] > 0 else 1
self.device = torch.device(hparams['device'])
self.embedding = vocab.vectors.to(self.device)
self.batch_size = hparams['batch_size']
self.signal_length = hparams['title_size']
self.his_size = hparams['his_size']
self.dropout_p = hparams['dropout_p']
self.filter_num = hparams['filter_num']
self.embedding_dim = hparams['embedding_dim']
# elements in the slice along dim will sum up to 1
self.softmax = nn.Softmax(dim=-1)
self.ReLU = nn.ReLU()
self.DropOut = nn.Dropout(p=hparams['dropout_p'])
self.CNN = nn.Conv1d(in_channels=self.embedding_dim,out_channels=self.filter_num,kernel_size=3,padding=1)
self.SeqCNN = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3,3), padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(3,3), stride=(3,3)),
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=(3,3), padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(3,3), stride=(3,3))
)
# 64 is derived from SeqCNN
self.learningToRank = nn.Linear(64, 1)
# self.learningToRank = nn.Linear(self.repr_dim * self.his_size, 1)
def _scaled_dp_attention(self,query,key,value):
""" calculate scaled attended output of values
Args:
query: tensor of [*, query_num, key_dim]
key: tensor of [batch_size, *, key_num, key_dim]
value: tensor of [batch_size, *, key_num, value_dim]
Returns:
attn_output: tensor of [batch_size, *, query_num, value_dim]
"""
# make sure dimension matches
assert query.shape[-1] == key.shape[-1]
key = key.transpose(-2,-1)
attn_weights = torch.matmul(query,key)/torch.sqrt(torch.tensor([self.embedding_dim],dtype=torch.float,device=self.device))
attn_weights = self.softmax(attn_weights)
attn_output = torch.matmul(attn_weights,value)
return attn_output
def _news_encoder(self,news_batch):
""" encode batch of news with 1d-CNN
Args:
news_batch: tensor of [batch_size, *]
Returns:
news_emebdding: tensor of [batch_size, *, filter_num]
"""
news_embedding = self.embedding[news_batch].transpose(-2,-1).view(-1,self.embedding_dim,news_batch.shape[-1])
news_embedding = self.CNN(news_embedding).transpose(-2,-1).view(news_batch.shape + (self.filter_num,))
news_embedding = self.ReLU(news_embedding)
if self.dropout_p > 0:
news_embedding = self.DropOut(news_embedding)
return news_embedding
def _fusion(self, cdd_news_embedding, his_news_embedding):
""" concatenate candidate news title and history news title
Args:
cdd_news_embedding: tensor of [batch_size, cdd_size, signal_length, filter_num]
his_news_embedding: tensor of [batch_size, his_size, signal_length, filter_num]
Returns:
fusion_news: tensor of [batch_size, cdd_size, his_size, signal_length, signal_length]
"""
fusion_matrices = torch.matmul(cdd_news_embedding.unsqueeze(dim=2), his_news_embedding.unsqueeze(dim=1).transpose(-2,-1)).view(self.batch_size * self.cdd_size * self.his_size, 1, self.signal_length, self.signal_length)
fusion_vectors = self.SeqCNN(fusion_matrices).view(self.batch_size, self.cdd_size, self.his_size, -1)
fusion_vectors = torch.mean(fusion_vectors, dim=-2)
return fusion_vectors
def _click_predictor(self,fusion_vectors):
""" calculate batch of click probability
Args:
fusion_vectors: tensor of [batch_size, cdd_size, repr_dim]
Returns:
score: tensor of [batch_size, cdd_size]
"""
score = self.learningToRank(fusion_vectors).squeeze(dim=-1)
if self.cdd_size > 1:
score = nn.functional.log_softmax(score,dim=1)
else:
score = torch.sigmoid(score)
return score
def forward(self,x):
if x['candidate_title'].shape[0] != self.batch_size:
self.batch_size = x['candidate_title'].shape[0]
cdd_news_embedding = self._news_encoder(x['candidate_title'].long().to(self.device))
his_news_embedding = self._news_encoder(x['clicked_title'].long().to(self.device))
fusion_vectors = self._fusion(cdd_news_embedding, his_news_embedding)
score_batch = self._click_predictor(fusion_vectors)
return score_batch
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-14 07:24
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0003_urls'),
]
operations = [
migrations.RenameField(
model_name='property',
old_name='loc_level',
new_name='location_level',
),
migrations.AddField(
model_name='property',
name='publisher',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
import datetime
from haystack import indexes
from periodicals.models import Article
class ArticleIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
pub_date = indexes.DateTimeField(model_attr='issue__pub_date')
# pregenerate the search result HTML for an Article
# this avoids any database hits when results are processed
# at the cost of storing all the data in the Haystack index
result_text = indexes.CharField(indexed=False, use_template=True)
def get_model(self):
return Article
def index_queryset(self, using=None):
return self.get_model().objects.filter(issue__pub_date__lte=datetime.datetime.now())
|
"""
Specification objects and functions for the ``Date`` built-in.
"""
from __future__ import absolute_import
import time
import math
import operator
from .base import ObjectInstance, FunctionInstance
from .function import define_native_method
from ..exceptions import ESRangeError, ESTypeError
from ..literals import LiteralParser, LiteralParseError
from ..types import (
NaN, inf, Undefined, Null, StringType, ObjectType, get_arguments,
get_primitive_type
)
# primitive_value is a number, representing ms since unix epoch
DIGITS = set('0123456789')
MS_PER_DAY = 86400000
AVERAGE_DAYS_PER_YEAR = 365.2425
HOURS_PER_DAY = 24
MINUTES_PER_HOUR = 60
SECONDS_PER_MINUTE = 60
MS_PER_SECOND = 1000
MS_PER_MINUTE = 60000
MS_PER_HOUR = 3600000
WEEKDAY_NAMES = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
MONTH_NAMES = [
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'July', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec'
]
MONTH_START_DAYS = [
0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
]
MONTH_START_DAYS_LEAP_YEAR = [
0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335
]
#
# Internal specification helper functions
#
def finite(x):
"""
Is the given value a non-infinite number?
"""
return not (math.isnan(x) or x == inf or x == -inf)
def day(t):
"""
Number of days represented by the given milliseconds.
15.9.1.2
"""
return t // MS_PER_DAY
def time_within_day(t):
"""
The remainder when converting ms to number of days.
15.9.1.2
"""
return t % MS_PER_DAY
def day_from_year(y):
"""
The day number of the first day of the given year.
15.9.1.3
"""
return 365 * (y - 1970) + ((y - 1969) // 4) - ((y - 1901) // 100) + ((y - 1601) // 400)
def days_in_year(y):
"""
The number of days in the given year.
15.9.1.3
"""
if (y % 4) != 0:
return 365
if (y % 4) == 0 and (y % 100) != 0:
return 366
if (y % 100) == 0 and (y % 400) != 0:
return 365
if (y % 400) == 0:
return 366
return 365
def time_from_year(y):
"""
The time value at the start of the given year.
15.9.1.3
"""
return MS_PER_DAY * day_from_year(y)
def year_from_time(t):
"""
The year that the given time falls within.
15.9.1.3
"""
y = int(((float(t) / float(MS_PER_DAY)) / AVERAGE_DAYS_PER_YEAR) + 1970)
t2 = time_from_year(y)
if t2 > t:
y = y - 1
elif (t2 + MS_PER_DAY * days_in_year(y)) <= t:
y = y + 1
return y
def in_leap_year(t):
"""
Is the year that the given time falls within a leap year?
15.9.1.3
"""
y = year_from_time(t)
return int(days_in_year(y) == 366)
def day_within_year(t):
"""
The number of the day the given time is in relative to the start of the
year the time falls within.
15.9.1.4
"""
return day(t) - day_from_year(year_from_time(t))
def month_from_time(t):
"""
The 0-based number of the month in the year the given time falls within.
15.9.1.4
"""
leap_year = in_leap_year(t)
day_in_year = day_within_year(t)
month_start_days = in_leap_year(t) and MONTH_START_DAYS_LEAP_YEAR or MONTH_START_DAYS
for i, start_day in enumerate(month_start_days[1:]):
if day_in_year < start_day:
return i
return 11
def date_from_time(t):
"""
The 1-based number of date within the month the given time falls within.
15.9.1.5
"""
day_in_year = day_within_year(t) + 1 # Adjust to 1-based
month = month_from_time(t)
month_start_days = in_leap_year(t) and MONTH_START_DAYS_LEAP_YEAR or MONTH_START_DAYS
month_start_day = month_start_days[month]
return day_in_year - month_start_day
def days_in_month(month, in_leap):
"""
Given the 0-based index of the month, return the number of days in the
month in the given in_leap context.
"""
month = month % 12
if month in (3, 5, 8, 10):
return 30
elif month in (0, 2, 4, 6, 7, 9, 11):
return 31
elif in_leap and month == 1:
return 29
elif month == 1:
return 28
def week_day(t):
"""
Return the 0-based index of the weekday the time falls within.
15.9.1.6
"""
return (day(t) + 4) % 7
def hour_from_time(t):
"""
The 0-based hour in the day the given time falls within.
15.9.1.10
"""
return (t // MS_PER_HOUR) % HOURS_PER_DAY
def min_from_time(t):
"""
The 0-based minute in the hour the given time falls within.
15.9.1.10
"""
return (t // MS_PER_MINUTE) % MINUTES_PER_HOUR
def sec_from_time(t):
"""
The 0-based second in the minute the given time falls within.
15.9.1.10
"""
return (t // MS_PER_SECOND) % SECONDS_PER_MINUTE
def ms_from_time(t):
"""
The 0-based millisecond in the minute the given time falls within.
15.9.1.10
"""
return t % MS_PER_SECOND
def local_tza():
"""
Return the local standard timezone adjustment in milliseconds.
15.9.1.7
"""
return -(time.timezone * MS_PER_SECOND)
def make_date(day, time):
"""
Return the ms representation of the given date number and ms within that
given date.
15.9.1.13
"""
if not finite(day) or not finite(time):
return NaN
return day * MS_PER_DAY + time
def make_time(hour, minute, sec, ms, to_integer=None):
"""
Calculate the milliseconds represented by the given time parts.
15.9.1.11
"""
if not finite(hour) or not finite(minute) or not finite(sec) or not finite(ms):
return NaN
if to_integer is None:
to_integer = lambda x: int(x)
return to_integer(hour) * MS_PER_HOUR + to_integer(minute) * MS_PER_MINUTE + to_integer(sec) * MS_PER_SECOND + to_integer(ms)
def make_day(year, month, date, to_integer=None):
"""
Calculate the day number represented by the given date parts.
Note that the ``to_integer`` parameter may be given to provide the
appropriate conversion for an ES execution context.
15.9.1.12
"""
if not finite(year) or not finite(month) or not finite(date):
return NaN
if to_integer is None:
to_integer = int
year = to_integer(year)
month = to_integer(month)
date = to_integer(date)
ym = year + (month // 12)
mn = month % 12
sign = year < 1970 and -1 or 1
t = year < 1970 and 1 or 0
y = year < 1970 and 1969 or 1970
compare = (sign == -1) and operator.ge or operator.lt
while compare(y, year):
t = t + (sign * days_in_year(y) * MS_PER_DAY)
y = y + sign
for i in range(mn):
leap = in_leap_year(t)
t = t + days_in_month(i, leap) * MS_PER_DAY
if not year_from_time(t) == ym:
return NaN
if not month_from_time(t) == mn:
return NaN
if not date_from_time(t) == 1:
return NaN
return day(t) + date - 1
def time_clip(time, to_integer=None):
"""
Convert the ECMAScript number value to a number of milliseconds.
15.9.1.14
"""
if to_integer is None:
to_integer = int
if not finite(time):
return NaN
if abs(time) > 8.64e15:
return NaN
return to_integer(time)
def next_sunday(t):
"""
Compute the next calendar Sunday from the given time.
"""
day = week_day(t)
if day != 0:
t = t + (7 - day) * MS_PER_DAY
return t
def in_dst(t, to_integer=None):
"""
Determine whether the given time is in an alternate timezone.
"""
if to_integer is not None:
time = lambda h, m, s, ms: make_time(h, m, s, ms, to_integer=to_integer)
day = lambda y, m, d: make_day(y, m, d, to_integer=to_integer)
else:
time = make_time
day = make_day
year = year_from_time(t)
time = make_time(2, 0, 0, 0)
if year <= 2006:
start = next_sunday(make_date(make_day(year, 3, 1), time))
end = next_sunday(make_date(make_day(year, 9, 24), time))
else:
start = next_sunday(make_date(make_day(year, 2, 7), time))
end = next_sunday(make_date(make_day(year, 10, 1), time))
return start <= t < end
def daylight_saving_ta(t):
"""
The offset for the effective daylight saving time the time falls within.
15.9.1.8
"""
# time_in_year = t - time_from_year(year_from_time(t))
# leap_year = in_leap_year(t)
# year_start_week_day = week_day(time_from_year(year_from_time(t)))
ta = 0
# if in_dst(t):
# ta = -((time.altzone - time.timezone) * 1000)
return ta
def local_time(t):
"""
Compute the local time from the given UTC time.
15.9.1.9
"""
return t + local_tza() + daylight_saving_ta(t)
def utc(t):
"""
Compute the UTC time from the given local time.
"""
return t - local_tza() + daylight_saving_ta(t - local_tza())
#
# Parser objects for the ECMAScript date time string format
#
class DateTimeParser(LiteralParser):
"""
Parse ECMAScript date time formatted strings.
15.9.1.15
"""
def expect_digit(self):
if self.peek() not in DIGITS:
raise LiteralParseError()
return self.advance()
def consume_int(self, digits):
return int(u''.join([self.expect_digit() for i in range(digits)]))
def parse_date(self):
"""
YYYY[-MM[-DD]]
"""
year = self.consume_int(4)
month = 1
day = 1
if self.peek() == '-':
self.expect('-')
month = self.consume_int(2)
else:
return (year, month, day)
if self.peek() == '-':
self.expect('-')
day = self.consume_int(2)
return (year, month, day)
def parse_time(self):
"""
THH:mm[:ss[.sss]]
"""
self.expect('T')
hour = self.consume_int(2)
self.expect(':')
minutes = self.consume_int(2)
seconds = 0
ms = 0
if self.peek(':'):
self.expect(':')
seconds = self.consume_int(2)
else:
return
if self.peek('.'):
self.expect('.')
ms = self.consume_int(3)
return (hour, minutes, seconds)
def parse_offset(self):
"""
Z | (+|-)HH:mm
"""
sign = 1
hours = 0
minutes = 0
next_char = self.peek()
if next_char == 'Z':
self.expect('Z')
self.expect('')
return 0
elif next_char == '-':
self.expect('-')
sign = -1
elif next_char == '+':
self.expect('+')
sign = 1
else:
raise LiteralParseError()
hours = self.consume_int(2)
self.expect(':')
minutes = self.consume_int(2)
offset = (hours * MS_IN_HOUR) + (minutes * MS_IN_MINUTES)
return sign * offset
def parse(self):
"""
Return the time represented by the ECMAScript date time formatted
string.
"""
try:
year, month, day = self.parse_date()
result = make_date(make_day(year, month - 1, day), 0)
next_char = self.peek()
if next_char == 'T':
hour, minutes, seconds, ms = self.parse_time()
result = result + make_time(hour, minutes, seconds, ms)
elif next_char != '':
return NaN
next_char = self.peek()
if next_char == 'Z' or next_char == '+' or next_char == '-':
offset = self.parse_offset()
result = result + offset
next_char = self.peek()
if next_char != '':
return NaN
return result
except LiteralParseError:
return NaN
def parse_datetime(string):
parser = DateTimeParser(string)
return parser.parse()
#
# Specification objects
#
class DateInstance(ObjectInstance):
"""
The specialized ``Date`` class object.
15.9.6
"""
es_class = "Date"
def __init__(self, interpreter, primitive_value):
super(DateInstance, self).__init__(interpreter)
self.primitive_value = primitive_value
def default_value(self, hint='String'):
"""
8.12.8
"""
if hint is None:
hint = 'String'
return super(DateInstance, self).default_value(hint=hint)
class DateConstructor(FunctionInstance):
"""
The ``Date`` constructor function.
15.9.2 & 15.9.3
"""
def __init__(self, interpreter):
super(DateConstructor, self).__init__(interpreter)
self.prototype = interpreter.FunctionPrototype
define_native_method(self, 'parse', self.parse_method, 1)
define_native_method(self, 'UTC', self.utc_method, 7)
define_native_method(self, 'now', self.now_method)
def time_clip(self, t):
"""
"""
return time_clip(t, to_integer=self.interpreter.to_integer)
def make_date_instance(self, primitive_value):
"""
"""
obj = DateInstance(self.interpreter, primitive_value)
obj.prototype = self.interpreter.DatePrototype
obj.set_property('prototype', self.interpreter.DatePrototype)
return obj
#
# Internal Specification Methods
#
def call(self, this, arguments):
"""
15.9.2
"""
obj = self.construct([])
return self.interpreter.to_string(obj)
def construct(self, arguments):
"""
15.9.3.1
"""
to_number = self.interpreter.to_number
num_args = len(arguments)
v = None
if num_args == 0:
return self.now_method(None, [])
elif num_args == 1:
v = self.interpreter.to_primitive(arguments[0])
if get_primitive_type(v) is StringType:
return self.parse_method(None, [v])
else:
v = to_number(v)
primitive_value = self.time_clip(v)
else:
def get_arguments(arguments, defaults):
values = []
num_arguments = len(arguments)
for i in range(7):
if i < num_arguments:
v = to_number(arguments[i])
else:
v = defaults[i]
values.append(v)
return values
year, month, date, hours, minutes, seconds, ms = get_arguments(
arguments, [Undefined, 0, 1, 0, 0, 0, 0]
)
if not math.isnan(year) and 0 <= year <= 99:
year = 1900 + year
final_date = make_date(
make_day(year, month, date),
make_time(hours, minutes, seconds, ms)
)
primitive_value = self.time_clip(utc(final_date))
return self.make_date_instance(primitive_value)
#
# Method property implementations
#
def parse_method(self, this, arguments):
"""
``Date.parse`` method implementation.
15.9.4.2
"""
string = Undefined
if arguments:
string = arguments[0]
string = self.interpreter.to_string(string)
primitive_value = parse_datetime(string)
primitive_value = self.time_clip(primitive_value)
primitive_value = utc(primitive_value)
return self.make_date_instance(primitive_value)
def utc_method(self, this, arguments):
"""
``Date.UTC`` method implementation.
15.9.4.3
"""
def get_arguments(arguments, defaults):
values = []
num_arguments = len(arguments)
for i in range(7):
if i < num_arguments:
v = to_number(arguments[0])
else:
v = defaults[i]
values.append(v)
return values
year, month, date, hours, minutes, seconds, ms = get_arguments(
arguments, [Undefined, Undefined, 1, 0, 0, 0, 0]
)
if not math.isnan(year) and 0 <= y <= 99:
year = 1900 + year
final_date = make_date(
make_day(year, month, date),
make_time(hours, minutes, seconds, ms)
)
primitive_value = self.time_clip(final_date)
return self.make_date_instance(primitive_value)
def now_method(self, this, arguments):
"""
``Date.now`` method implementation.
15.9.4.4
"""
primitive_value = self.time_clip(int(time.time() * 1000))
return self.make_date_instance(primitive_value)
class DatePrototype(DateInstance):
"""
The prototype object assigned to ``Date`` instances.
15.9.5
"""
def __init__(self, interpreter):
super(DatePrototype, self).__init__(interpreter, NaN)
self.prototype = interpreter.ObjectPrototype
define_native_method(self, 'toString', self.to_string_method)
define_native_method(self, 'toDateString', self.to_date_string_method)
define_native_method(self, 'toTimeString', self.to_time_string_method)
define_native_method(self, 'toLocaleString', self.to_locale_string_method)
define_native_method(self, 'toLocaleDateString', self.to_locale_date_string_method)
define_native_method(self, 'toLocaleTimeString', self.to_locale_time_string_method)
define_native_method(self, 'valueOf', self.value_of_method)
define_native_method(self, 'getTime', self.get_time_method)
define_native_method(self, 'getFullYear', self.get_full_year_method)
define_native_method(self, 'getUTCFullYear', self.get_utc_full_year_method)
define_native_method(self, 'getMonth', self.get_month_method)
define_native_method(self, 'getUTCMonth', self.get_utc_month_method)
define_native_method(self, 'getDate', self.get_date_method)
define_native_method(self, 'getUTCDate', self.get_utc_date_method)
define_native_method(self, 'getDay', self.get_day_method)
define_native_method(self, 'getUTCDay', self.get_utc_day_method)
define_native_method(self, 'getHours', self.get_hours_method)
define_native_method(self, 'getUTCHours', self.get_utc_hours_method)
define_native_method(self, 'getMinutes', self.get_minutes_method)
define_native_method(self, 'getUTCMinutes', self.get_utc_minutes_method)
define_native_method(self, 'getSeconds', self.get_seconds_method)
define_native_method(self, 'getUTCSeconds', self.get_utc_seconds_method)
define_native_method(self, 'getMilliseconds', self.get_milliseconds_method)
define_native_method(self, 'getUTCMilliseconds', self.get_utc_milliseconds_method)
define_native_method(self, 'getTimezoneOffset', self.get_timezone_offset_method)
define_native_method(self, 'setTime', self.set_time_method, 1)
define_native_method(self, 'setMilliseconds', self.set_milliseconds_method, 1)
define_native_method(self, 'setUTCMilliseconds', self.set_utc_milliseconds_method, 1)
define_native_method(self, 'setSeconds', self.set_seconds_method, 2)
define_native_method(self, 'setUTCSeconds', self.set_utc_seconds_method, 2)
define_native_method(self, 'setMinutes', self.set_minutes_method, 3)
define_native_method(self, 'setUTCMinutes', self.set_utc_minutes_method, 3)
define_native_method(self, 'setHours', self.set_hours_method, 4)
define_native_method(self, 'setUTCHours', self.set_utc_hours_method, 4)
define_native_method(self, 'setDate', self.set_date_method, 1)
define_native_method(self, 'setUTCDate', self.set_utc_date_method, 1)
define_native_method(self, 'setMonth', self.set_month_method, 2)
define_native_method(self, 'setUTCMonth', self.set_utc_month_method, 2)
define_native_method(self, 'setFullYear', self.set_full_year_method, 3)
define_native_method(self, 'setUTCFullYear', self.set_utc_full_year_method, 3)
define_native_method(self, 'toUTCString', self.to_utc_string_method)
define_native_method(self, 'toISOString', self.to_iso_string_method)
define_native_method(self, 'toJSON', self.to_json_method, 1)
#
# Internal helper methods
#
def time_clip(self, time):
"""
"""
return time_clip(time, to_integer=self.interpreter.to_integer)
def get_value(self, obj):
"""
"""
if get_primitive_type(obj) is not ObjectType or obj.es_class != 'Date':
string = self.interpreter.to_string(obj)
raise ESTypeError('%s is not a Date object' % string)
return obj.primitive_value
def make_time_replace(self, t, arguments, possible_count):
"""
"""
to_number = self.interpreter.to_number
num_given = len(arguments)
defaults = [hour_from_time, min_from_time, sec_from_time, ms_from_time]
to_use = min(num_given, possible_count)
args = [default(t) for default in defaults[:-possible_count]]
args.extend(to_number(arg) for arg in arguments[:to_use])
args.extend(default(t) for default in defaults[len(defaults)-possible_count+to_use:])
return make_time(*args, to_integer=self.interpreter.to_integer)
def set_time_component(self, this, arguments, possible_count, local=True):
"""
"""
value = Undefined
if arguments:
value = arguments[0]
value = self.interpreter.to_number(value)
t = self.get_value(this)
if local:
t = local_time(t)
time = self.make_time_replace(t, arguments, possible_count)
d = make_date(day(t), time)
if local:
d = utc(d)
u = self.time_clip(d)
this.primitive_value = u
return u
def make_day_replace(self, t, arguments, possible_count):
"""
"""
to_number = self.interpreter.to_number
num_given = len(arguments)
to_use = min(num_given, possible_count)
defaults = [year_from_time, month_from_time, date_from_time]
args = [default(t) for default in defaults[:-possible_count]]
args.extend(to_number(arg) for arg in arguments[:to_use])
args.extend(default(t) for default in defaults[len(defaults)-possible_count+to_use:])
return make_day(*args, to_integer=self.interpreter.to_integer)
def set_date_component(self, this, arguments, possible_count, local=True):
"""
"""
primitive_value = self.get_value(this)
if math.isnan(primitive_value):
t = 0
else:
t = primitive_value
if local:
t = local_time(t)
d = make_date(
self.make_day_replace(t, arguments, possible_count),
time_within_day(t)
)
if local:
d = utc(d)
u = self.time_clip(d)
this.primitive_value = u
return u
#
# Method property implementations
#
def to_string_method(self, this, arguments):
"""
``Date.prototype.toString`` method implementation.
15.9.5.2
"""
t = local_time(self.get_value(this))
if math.isnan(t):
return u'Invalid Date'
year, month, day = year_from_time(t), month_from_time(t), date_from_time(t)
hour, minutes, seconds = hour_from_time(t), min_from_time(t), sec_from_time(t)
day_of_week = week_day(t)
return u'%s %s %02d %d %02d:%02d:%02d' % (WEEKDAY_NAMES[day_of_week], MONTH_NAMES[month], day, year, hour, minutes, seconds)
def to_date_string_method(self, this, arguments):
"""
``Date.prototype.toDateString`` method implementation.
15.9.5.3
"""
t = local_time(self.get_value(this))
if math.isnan(t):
return u'Invalid Date'
year, month, day = year_from_time(t), month_from_time(t), date_from_time(t)
day_of_week = week_day(t)
return u'%s %s %02d %d' % (WEEKDAY_NAMES[day_of_week], MONTH_NAMES[month], day, year)
def to_time_string_method(self, this, arguments):
"""
``Date.prototype.toTimeString`` method implementation.
15.9.5.4
"""
t = local_time(self.get_value(this))
if math.isnan(t):
return u'Invalid Date'
hour, minutes, seconds = hour_from_time(t), min_from_time(t), sec_from_time(t)
return u'%02d:%02d:%02d' % (hour, minutes, seconds)
def to_locale_string_method(self, this, arguments):
"""
``Date.prototype.toLocaleString`` method implementation.
15.9.5.5
"""
return self.to_string(this, arguments)
def to_locale_date_string_method(self, this, arguments):
"""
``Date.prototype.toLocaleDateString`` method implementation.
15.9.5.6
"""
return self.to_date_string(this, arguments)
def to_locale_time_string_method(self, this, arguments):
"""
``Date.prototype.toLocaleTimeString`` method implementation.
15.9.5.7
"""
return self.to_time_string(this, arguments)
def value_of_method(self, this, arguments):
"""
``Date.prototype.valueOf`` method implementation.
15.9.5.8
"""
return self.get_value(this)
def get_time_method(self, this, arguments):
"""
``Date.prototype.getTime`` method implementation.
15.9.5.9
"""
return self.get_value(this)
def get_full_year_method(self, this, arguments):
"""
``Date.prototype.getFullYear`` method implementation.
15.9.5.10
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return year_from_time(local_time(t))
def get_utc_full_year_method(self, this, arguments):
"""
``Date.prototype.getUTCFullYear`` method implementation.
15.9.5.11
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return year_from_time(t)
def get_month_method(self, this, arguments):
"""
``Date.prototype.getMonth`` method implementation.
15.9.5.12
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return month_from_time(local_time(t))
def get_utc_month_method(self, this, arguments):
"""
``Date.prototype.getUTCMonth`` method implementation.
15.9.5.13
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return month_from_time(t)
def get_date_method(self, this, arguments):
"""
``Date.prototype.getDate`` method implementation.
15.9.5.14
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return date_from_time(local_time(t))
def get_utc_date_method(self, this, arguments):
"""
``Date.prototype.getUTCDate`` method implementation.
15.9.5.15
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return date_from_time(t)
def get_day_method(self, this, arguments):
"""
``Date.prototype.getDay`` method implementation.
15.9.5.16
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return week_day(local_time(t))
def get_utc_day_method(self, this, arguments):
"""
``Date.prototype.getUTCDay`` method implementation.
15.9.5.17
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return week_day(t)
def get_hours_method(self, this, arguments):
"""
``Date.prototype.getHours`` method implementation.
15.9.5.18
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return hour_from_time(local_time(t))
def get_utc_hours_method(self, this, arguments):
"""
``Date.prototype.getUTCHours`` method implementation.
15.9.5.19
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return hour_from_time(t)
def get_minutes_method(self, this, arguments):
"""
``Date.prototype.getMinutes`` method implementation.
15.9.5.20
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return min_from_time(local_time(t))
def get_utc_minutes_method(self, this, arguments):
"""
``Date.prototype.getUTCMinutes`` method implementation.
15.9.5.21
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return min_from_time(t)
def get_seconds_method(self, this, arguments):
"""
``Date.prototype.getSeconds`` method implementation.
15.9.5.22
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return sec_from_time(local_time(t))
def get_utc_seconds_method(self, this, arguments):
"""
``Date.prototype.getUTCSeconds`` method implementation.
15.9.5.23
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return sec_from_time(t)
def get_milliseconds_method(self, this, arguments):
"""
``Date.prototype.getMilliseconds`` method implementation.
15.9.5.24
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return ms_from_time(local_time(t))
def get_utc_milliseconds_method(self, this, arguments):
"""
``Date.prototype.getUTCMilliseconds`` method implementation.
15.9.5.25
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return ms_from_time(t)
def get_timezone_offset_method(self, this, arguments):
"""
``Date.prototype.getTimezoneOffset`` method implementation.
15.9.5.26
"""
t = self.get_value(this)
if math.isnan(t):
return NaN
return (t - local_time(t)) / MS_PER_MINUTE
def set_time_method(self, this, arguments):
"""
``Date.prototype.setTime`` method implementation.
15.9.5.27
"""
t = get_arguments(arguments, count=1)
v = self.time_clip(self.interpreter.to_number(t))
this.primitive_value = v
return v
def set_milliseconds_method(self, this, arguments):
"""
``Date.prototype.setMilliseconds`` method implementation.
15.9.5.28
"""
return self.set_time_component(this, arguments, 1)
def set_utc_milliseconds_method(self, this, arguments):
"""
``Date.prototype.setUTCMilliseconds`` method implementation.
15.9.5.29
"""
return self.set_time_component(this, arguments, 1, local=False)
def set_seconds_method(self, this, arguments):
"""
``Date.prototype.setSeconds`` method implementation.
15.9.5.30
"""
return self.set_time_component(this, arguments, 2)
def set_utc_seconds_method(self, this, arguments):
"""
``Date.prototype.setUTCSeconds`` method implementation.
15.9.5.31
"""
return self.set_time_component(this, arguments, 2, local=False)
def set_minutes_method(self, this, arguments):
"""
``Date.prototype.setMinutes`` method implementation.
15.9.5.32
"""
return self.set_time_component(this, arguments, 3)
def set_utc_minutes_method(self, this, arguments):
"""
``Date.prototype.setUTCMinutes`` method implementation.
15.9.5.33
"""
return self.set_time_component(this, arguments, 3, local=False)
def set_hours_method(self, this, arguments):
"""
``Date.prototype.setHours`` method implementation.
15.9.5.34
"""
return self.set_time_component(this, arguments, 4)
def set_utc_hours_method(self, this, arguments):
"""
``Date.prototype.setUTCHours`` method implementation.
15.9.5.35
"""
return self.set_time_component(this, arguments, 4, local=False)
def set_date_method(self, this, arguments):
"""
``Date.prototype.setDate`` method implementation.
15.9.5.36
"""
return self.set_date_component(this, arguments, 1)
def set_utc_date_method(self, this, arguments):
"""
``Date.prototype.setUTCDate`` method implementation.
15.9.5.37
"""
return self.set_date_component(this, arguments, 1, local=False)
def set_month_method(self, this, arguments):
"""
``Date.prototype.setMonth`` method implementation.
15.9.5.38
"""
return self.set_date_component(this, arguments, 2)
def set_utc_month_method(self, this, arguments):
"""
``Date.prototype.`` method implementation.
15.9.5.39
"""
return self.set_date_component(this, arguments, 2, local=False)
def set_full_year_method(self, this, arguments):
"""
``Date.prototype.setFullYear`` method implementation.
15.9.5.40
"""
return self.set_date_component(this, arguments, 3)
def set_utc_full_year_method(self, this, arguments):
"""
``Date.prototype.setUTCFullYear`` method implementation.
15.9.5.41
"""
return self.set_date_component(this, arguments, 3, local=False)
def to_utc_string_method(self, this, arguments):
"""
``Date.prototype.toUTCString`` method implementation.
15.9.5.42
"""
t = self.get_value(this)
if math.isnan(t):
return u'Invalid Date'
year, month, day = year_from_time(t), month_from_time(t), date_from_time(t)
hour, minutes, seconds = hour_from_time(t), min_from_time(t), sec_from_time(t)
day_of_week = week_day(t)
return u'%s %s %02d %d %02d:%02d:%02d' % (WEEKDAY_NAMES[day_of_week], MONTH_NAMES[month], day, year, hour, minutes, seconds)
def to_iso_string_method(self, this, arguments):
"""
``Date.prototype.toISOString`` method implementation.
15.9.5.43
"""
t = self.get_value(this)
if not finite(t):
raise ESRangeError('Invalid time value')
t = utc(t)
year, month, day = year_from_time(t), month_from_time(t), date_from_time(t)
hour, minutes, seconds = hour_from_time(t), min_from_time(t), sec_from_time(t)
return u'%04d-%02d-%02dT%02d:%02d:%02dZ' % (year, month, day, hour, minutes, seconds)
def to_json_method(self, this, arguments):
"""
``Date.prototype.toJSON`` method implementation.
15.9.5.44
"""
o = self.interpreter.to_object(this)
tv = self.interpreter.to_primitive(o, 'Number')
if get_primitive_type(tv) is NumberType and not finite(tv):
return Null
to_iso = o.get('toISOString')
if is_callable(to_iso) is False:
raise ESTypeError('toISOString is not a function')
return to_iso.call(this, [])
|
from unittest import TestCase
import create_food
class TestFieldObjects(TestCase):
def test_coordinates(self):
s = create_food.save_terrain()
# self.assertTrue(isinstance(s, basestring))
|
# 增加属性类型限制 限制People 中的name 属性只能是str age属性只能是 int
# 数据描述符
class Typed():
def __init__(self,key,exceptipnType):
self.key = key
self.exceptipnType=exceptipnType
def __get__(self, instance, owner):
print('**get方法***')
# print('**instance参数 [%s] ***' %instance)
# print('**owner参数 [%s] ***' %owner)
return instance.__dict__[self.key]
def __set__(self, instance, value):
print('**set方法***')
# print('**instance参数 [%s] ***' %instance)
# print('**value参数 [%s] ***' %value)
# 进行逻辑判断,如果不是字符串,返回数据不合规
if not isinstance(value,self.exceptipnType):
print('你的数据不符合')
raise TypeError('%s 传入的数据不是%s' %(self.key,self.exceptipnType))
# return
instance.__dict__[self.key] = value
def __delete__(self, instance):
print('**delete方法***')
instance.__dict__.pop(self.key)
def deco(**kwargs):
def Wrapper(obj):
for key,val in kwargs.items():
print('====>',key,val)
# val = Typed(key,val)
setattr(obj,key,Typed(key,val)) #设置类属性
return obj
return Wrapper
@deco(name=str,age=int,salary=float,gender=str) # Deco() -->@Wrapper--> People = Wrapper(People)
class People:
# name = Typed('name',str)
# age = Typed('age',int)
def __init__(self,name,age,salary,gender,heigth):
self.name=name
self.age=age
self.salary=salary
self.gender=gender
self.heigth=heigth
p1 = People('安其拉',18,3000.00,'男',312)
# print(p1.__dict__)
print(People.__dict__)
print(p1.__dict__)
# print(p1.name)
p1.name = '妲己'
# print(p1.name)
#
# # print(p1.__dict__)
# del p1.name
# print(p1.__dict__)
# p2 = People(1212,18,3000)
# print(p2.__dict__)
|
__author__ = 'Jan Pecinovsky, Roel De Coninck'
"""
A sensor generates a single data stream.
It can have a parent device, but the possibility is also left open for a sensor to stand alone in a site.
It is an abstract class definition which has to be overridden (by eg. a Fluksosensor).
This class contains all metadata concerning the function and type of the sensor (eg. electricity - solar, ...)
"""
from opengrid.library import misc
from opengrid import ureg
import pandas as pd
import tmpo, sqlite3
class Sensor(object):
def __init__(self, key=None, device=None, site=None, type=None,
description=None, system=None, quantity=None, unit=None,
direction=None, tariff=None, cumulative=None):
self.key = key
self.device = device
self.site = site
self.type = type
self.description = description
self.system = system
self.quantity = quantity
self.unit = unit
self.direction = direction
self.tariff = tariff
self.cumulative = cumulative
def __repr__(self):
return """
{}
Key: {}
Type: {}
""".format(self.__class__.__name__,
self.key,
self.type
)
def get_data(self, head=None, tail=None, resample='min'):
"""
Return a Pandas Series with measurement data
Parameters
----------
head, tail: timestamps for the begin and end of the interval
Notes
-----
This is an abstract method, because each type of sensor has a different way of fetching the data.
Returns
-------
Pandas Series
"""
raise NotImplementedError("Subclass must implement abstract method")
def _get_default_unit(self, diff=True, resample='min'):
"""
Return a string representation of the default unit for the requested operation
If there is no unit, returns None
Parameters
----------
diff : True (default) or False
If True, the original data has been differentiated
resample : str (default='min')
Sampling rate, if any. Use 'raw' if no resampling.
Returns
-------
target : str or None
String representation of the target unit, eg m3/h, kW, ...
"""
if self.type in ['electricity', 'gas', 'heat', 'energy']:
if diff:
target = 'W'
else:
target = 'kWh'
elif self.type == 'water':
if diff:
target = 'l/min'
else:
target = 'liter'
elif self.type == 'temperature':
target = 'degC'
elif self.type == 'pressure':
target = 'Pa'
elif self.type in ['battery']:
target = 'V'
elif self.type in ['current']:
target = 'A'
elif self.type in ['light']:
target = 'lux'
elif self.type == 'humidity':
target = 'percent'
elif self.type in ['error', 'vibration', 'proximity']:
target = ''
else:
target = None
return target
def _unit_conversion_factor(self, diff=True, resample='min', target='default'):
"""
Return a conversion factor to convert the obtained data
The method starts from the unit of the sensor, and takes
into account sampling, differentiation (if any) and target unit.
For gas, a default calorific value of 10 kWh/liter is used.
For some units, unit conversion does not apply, and 1.0 is returned.
Parameters
----------
diff : True (default) or False
If True, the original data has been differentiated
resample : str (default='min')
Sampling rate, if any. Use 'raw' if no resampling.
target : str , default='default'
String representation of the target unit, eg m3/h, kW, ...
If None, 1.0 is returned
Returns
-------
cf : float
Multiplication factor for the original data to the target unit
"""
# get the target
if target == 'default':
target = self._get_default_unit(diff=diff, resample=resample)
if target is None:
return 1.0
if resample == 'raw':
if diff:
raise NotImplementedError("Differentiation always needs a sampled dataframe")
# get the source
if not self.type == 'gas':
if not diff:
source = self.unit
else:
# differentiation. Careful, this is a hack of the unit system.
# we have to take care manually of some corner cases
if self.unit:
source = self.unit + '/' + resample
else:
source = self.unit
return misc.unit_conversion_factor(source, target)
else:
# for gas, we need to take into account the calorific value
# as of now, we use 10 kWh/l by default
CALORIFICVALUE = 10
q_src = 1 * ureg(self.unit)
q_int = q_src * ureg('Wh/liter')
if not diff:
source = str(q_int.units) # string representing the unit, mostly kWh
else:
source = str(q_int.units) + '/' + resample
return CALORIFICVALUE * misc.unit_conversion_factor(source, target)
def last_timestamp(self, epoch=False):
"""
Get the last timestamp for a sensor
Parameters
----------
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
"""
raise NotImplementedError("Subclass must implement abstract method")
class Fluksosensor(Sensor):
def __init__(self, key=None, token=None, device=None, type=None,
description=None, system=None, quantity=None, unit=None,
direction=None, tariff=None, cumulative=None, tmpos=None):
# invoke init method of abstract Sensor
super(Fluksosensor, self).__init__(key=key,
device=device,
site=device.site if device else None,
type=type,
description=description,
system=system,
quantity=quantity,
unit=unit,
direction=direction,
tariff=tariff,
cumulative=cumulative)
if token != '':
self.token = token
else:
self.token = device.mastertoken
if self.unit == '' or self.unit is None:
if self.type in ['water', 'gas']:
self.unit = 'liter'
elif self.type == 'electricity':
self.unit = 'Wh'
elif self.type == 'pressure':
self.unit = 'Pa'
elif self.type == 'temperature':
self.unit = 'degC'
elif self.type == 'battery':
self.unit = 'V'
elif self.type == 'light':
self.unit = 'lux'
elif self.type == 'humidity':
self.unit = 'percent'
elif self.type in ['error', 'vibration', 'proximity']:
self.unit = ''
if self.cumulative == '' or self.cumulative is None:
if self.type in ['water', 'gas', 'electricity', 'vibration']:
self.cumulative = True
else:
self.cumulative = False
self._tmpos = tmpos
@property
def tmpos(self):
if self._tmpos is not None:
return self._tmpos
elif self.device is not None:
return self.device.tmpos
else:
raise AttributeError('TMPO session not defined')
@property
def has_data(self):
"""
Checks if a sensor actually has data by checking the length of the
tmpo block list
Returns
-------
bool
"""
tmpos = self.site.hp.get_tmpos()
return len(tmpos.list(self.key)[0]) != 0
def get_data(self, head=None, tail=None, diff='default', resample='min', unit='default', tz='UTC'):
"""
Connect to tmpo and fetch a data series
Parameters
----------
sensors : list of Sensor objects
If None, use sensortype to make a selection
sensortype : string (optional)
gas, water, electricity. If None, and Sensors = None,
all available sensors in the houseprint are fetched
head, tail: timestamps
Can be epoch, datetime of pd.Timestamp, with our without timezone (default=UTC)
diff : bool or 'default'
If True, the original data will be differentiated
If 'default', the sensor will decide: if it has the attribute
cumulative==True, the data will be differentiated.
resample : str (default='min')
Sampling rate, if any. Use 'raw' if no resampling.
unit : str , default='default'
String representation of the target unit, eg m**3/h, kW, ...
tz : str, default='UTC'
Specify the timezone for the index of the returned dataframe
Returns
-------
Pandas Series with additional attribute 'unit' set to
the string representation of the unit of the data.
"""
if head is None:
head = 0
if tail is None:
tail = 2147483647 # tmpo epochs max
data = self.tmpos.series(sid=self.key, head=head, tail=tail)
if data.dropna().empty:
# Return an empty dataframe with correct name
return pd.Series(name=self.key)
data = data.tz_convert(tz)
if resample != 'raw':
if resample == 'hour':
rule = 'H'
elif resample == 'day':
rule = 'D'
else:
rule = resample
# interpolate to requested frequency
newindex = data.resample(rule).first().index
data = data.reindex(data.index.union(newindex))
data = data.interpolate(method='time')
data = data.reindex(newindex)
if diff == 'default':
diff = self.cumulative
if diff:
data = data.diff()
# unit conversion
if unit == 'default':
unit = self._get_default_unit(diff=diff, resample=resample)
ucf = self._unit_conversion_factor(diff=diff, resample=resample, target=unit)
data *= ucf
data.unit = unit
return data
def last_timestamp(self, epoch=False):
"""
Get the theoretical last timestamp for a sensor
It is the mathematical end of the last block, the actual last sensor stamp may be earlier
Parameters
----------
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
"""
tmpos = self.site.hp.get_tmpos()
return tmpos.last_timestamp(sid=self.key, epoch=epoch)
|
import zeroone_hash
from binascii import unhexlify, hexlify
import unittest
# zeroone block #1
# user@b1:~/zeroone$ zeroone-cli getblockhash 1
# 000005e9eeef7185898754d08dbfd6ecc167cfa83c4e15dcb1dcc0d79cc13fbf
# user@b1:~/zeroone$ zeroone-cli getblock 000005e9eeef7185898754d08dbfd6ecc167cfa83c4e15dcb1dcc0d79cc13fbf
# {
# "hash": "000005e9eeef7185898754d08dbfd6ecc167cfa83c4e15dcb1dcc0d79cc13fbf",
# "confirmations": 80391,
# "size": 179,
# "height": 1,
# "version": 536870912,
# "merkleroot": "a4298441592013a2b6265ac312aebc245fe53b3ce2c243598c89c4f70f17e6ae",
# "tx": [
# "a4298441592013a2b6265ac312aebc245fe53b3ce2c243598c89c4f70f17e6ae"
# ],
# "time": 1517407356,
# "mediantime": 1517407356,
# "nonce": 80213,
# "bits": "1e0ffff0",
# "difficulty": 0.000244140625,
# "chainwork": "0000000000000000000000000000000000000000000000000000000000200020",
# "previousblockhash": "00000c8e2be06ce7e6ea78cd9f6ea60e22821d70f8c8fbb714b6baa7b4f2150c",
# "nextblockhash": "00000aeb1683851ca7b40dea400cafe986116d904a93bae004341ea52a0930ab"
# }
header_hex = ("00000020" + # version
"0c15f2b4a7bab614b7fbc8f8701d82220ea66e9fcd78eae6e76ce02b8e0c0000" + # reverse-hex previousblockhash
"aee6170ff7c4898c5943c2e23c3be55f24bcae12c35a26b6a2132059418429a4" + # reverse-hex merkleroot
"7ccc715a" + # reverse-hex time
"f0ff0f1e" + # reverse-hex bits
"55390100") # reverse-hex nonce
best_hash = 'bf3fc19cd7c0dcb1dc154e3ca8cf67c1ecd6bf8dd05487898571efeee9050000' # reverse-hex block hash
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.block_header = unhexlify(header_hex)
self.best_hash = best_hash
def test_zeroone_hash(self):
self.pow_hash = hexlify(zeroone_hash.getPoWHash(self.block_header))
self.assertEqual(self.pow_hash.decode(), self.best_hash)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Gabriel Santos IS-211 9/12/2020
import urllib.request
import re
import logging
import csv
import argparse
import datetime
import requests
hours = {0: 0, 1: 0, 2: 0, 3: 0,
4: 0, 5: 0, 6: 0, 7: 0,
8: 0, 9: 0, 10: 0, 11: 0,
12: 0, 13: 0, 14: 0, 15: 0,
16: 0, 17: 0, 18: 0, 19: 0,
20: 0, 21: 0, 22: 0, 23: 0, }
def downloadData(url):
"""Part I Pull
Down Web Log File
Your program should download the web log file from the location provided by a url
parameter. This is just
like the previous assignment (remember to use agrparse). The URL you can use for testing is located here:
TODO .
Accepts a URL as a string and opens it.
Parameters:
url (string): the url to be opened
Example:
>>> downloadData('http://s3.amazonaws.com/cuny-is211-spring2015/weblog.csv')
"""
file = requests.get(url)
csvFile = file.content.decode()
return csvFile
def processData(data):
"""Part II Process
File Using CSVThe file should then be processed, using the CSV module from this week. Here is an example line from the
file, with an explanation as to what each fields represents:
/images/test.jpg, 01/27/2014 03:26:04, Mozilla/5.0 (Linux) Firefox/34.0, 200, 346547
When broken down by column, separated by commas, we have:
path to file, datetime accessed, browser, status of request, request size in bytes
Processes data from the contents of a CSV file line by line.
Parameters:
data - the contents of the CSV file
Example:
>>> processData(downloadedData)
"""
lines = 0
images = 0
browsers = {'Firefox': 0,
'Google Chrome': 0,
'Internet Explorer': 0,
'Safari': 0}
file = csv.reader(data.splitlines())
"""Part III Search
for Image Hits
After processing the file, your next task will be to search for all hits that are for an image file. To check if a hit
is for an image file or not, we will simply check that the file extension is either .jpg, .gif or .png. Remember to
use regular expressions for this. Once you have found all the hits relating to images, print out how many
hits, percentagewise,
are for images. As an example, your program should print to the screen something
like “Image requests account for 45.3% of all requests”
"""
for line in file:
lines += 1
if re.search('jpe?g|JPE?G|gif|GIF|png|PNG', line[0]):
images += 1
"""Part IV Finding
Most Popular Browser
Once Part III is done, your program should find out which browser people are using is the most popular. The
third column of the file stores what is known as the UserAgent,
which is a string web browser’s use to
identify themselves. The program should use a regular expression to determine what kind of browser
created each hit, and print out which browser is the most popular that day. For this exercise, all you need to
do is determine if the browser is Firefox, Chrome, Internet Explorer or Safari.
"""
if re.search("Firefox", line[2]):
browsers['Firefox'] += 1
elif re.search("Chrome", line[2]):
browsers['Google Chrome'] += 1
elif re.search("MSIE", line[2]):
browsers['Internet Explorer'] += 1
elif re.search("Safari[^Chrome]", line[2]):
browsers['Safari'] += 1
"""Part V Extra Credit
For extra credit, your program should output a list of hours of the day sorted by the total number of hits that
occurred in that hour. The datetime is given by the second column, which you can extract the hour from
using the Datetime module from last week. Using that information, your program should print to the screen
something like:
"""
HoursSorted(line)
print("Files that are images: " + str(images))
imagePct = float((images / lines) * 100)
print("Image requests account for {}% of all requests".format(imagePct))
for browser in browsers:
print(browser + " usage: " + str(browsers[browser]))
topB = max(browsers, key=browsers.get)
print("{} is the most popular broswer with {} uses.".format(topB, browsers[topB]))
for hour in hours:
print("Hour {} has {} hits.".format(hour, hours[hour]))
def HoursSorted(line):
hour = (datetime.datetime.strptime(line[1], "%Y-%m-%d %H:%M:%S")).hour
hours[hour] += 1
def main():
try:
# Pull file from internet
source = input('File Source: ')
csvData = downloadData(source)
except ValueError:
print('Invalid URL.')
exit()
processData(csvData)
if __name__ == '__main__':
main()
# In[ ]:
|
#Array range
class Stack:
def __init__(self):
self.stack = list()
def isEmpty(self):
return self.stack == []
def peek(self):
assert not self.isEmpty() , "Cannot peek from empty stack"
return self.stack[-1]
def pop(self):
assert not self.isEmpty() , "Cannot pop from empty stack"
return self.stack.pop()
def push(self , val):
self.stack.append(val)
def overLapStack(arr):
#[(2, 6), (3, 5), (7, 25), (20, 23)]
# {{1,3}, {2,4}, {5,7}, {6,8} }
arr.sort()
stack = Stack()
stack.push(arr[0])
for i in range(1 , len(arr)):
print(stack.stack)
top = stack.peek()
if arr[i][0] < top[1]:
temp = stack.pop()
stack.push((temp[0] , arr[i][1]))
elif top[1] < arr[i][0]:
stack.push(arr[i])
print(stack.stack)
def overLapBF(arr):
less = arr[0]
new_arr = []
for i in range(0 , len(arr) + 1):
for j in range(i + 1 , len(arr)):
if arr[i][1] >= arr[j][1]:
new_arr.append((arr[i]))
print(new_arr)
#arr = [(2, 6), (3, 5), (7, 21), (20, 21)]
#arr = [(1, 3), (2, 4), (5, 7), (6, 8)]
arr = [[1,3], [2,4], [5,7], [6,8]]
overLapStack(arr)
|
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
""" Path to media monitoring data (monthly) """
DATA_PATH = os.path.join(PROJECT_ROOT, 'data/')
""" Segments """
segment1 = {"label": "Undecided", "value": "UND"}
segment2 = {"label": "Abstainer", "value": "ABS"}
segment3 = {"label": "PJD", "value": "PJD"}
segment4 = {"label": "PAM", "value": "PAM"}
segment5 = {"label": "RNI", "value": "RNI"}
segment6 = {"label": "Istiqlal", "value": "IST"}
segment7 = {"label": "Other", "value": "OTH"}
SEGMENT_LIST = [segment1, segment2, segment3, segment4, segment5, segment6, segment7]
DEFAULT_CLUSTERS = 'cluster_Forgotten|cluster_Aspirational Youth|cluster_Snowflakes|cluster_Average|cluster_PJD|cluster_OTH|cluster_PAM|cluster_Empty Nest Mothers|cluster_IST|cluster_RNI|cluster_Urban Professional'
""" Hover-over explanations """
big_five = """
The Big Five personality
traits, per segment,
normalised to the population mean.
"""
segment_tilt = """
The most prominent features within each segment, normalised against the population mean.
"""
feature_importance = """
Normalised chi2 statistic for all the features.
"""
|
"""Advent of Code 2019 Day 20 - Donut Maze."""
from collections import defaultdict, deque
def maze_bfs(maze, start, end, portals, recursive=False):
"""BFS from entrance to exit of maze with portals.
Args:
maze (dict): {Coords: Value} dictionary representing the maze.
entrance (str): Coords of entrance (x, y).
exit (str): Coords of exit (x, y).
portals (dict): Dictionary mapping portals to their destinations.
recursive (bool): Treat portals like recursive mazes.
Returns:
Length of the shortest path (int).
"""
x_edges = (2, max([x for x, y in maze.keys()]) - 2)
y_edges = (2, max([y for x, y in maze.keys()]) - 2)
visited = set()
visited.add((start, 0))
queue = deque()
queue.append((start, 0, 0))
while queue:
coords, steps, level = queue.popleft()
if coords in portals:
warp_to = portals[coords]
if coords[0] in x_edges or coords[1] in y_edges:
new_level = level - 1
else:
new_level = level + 1
if (warp_to, new_level) not in visited and new_level >= 0:
visited.add((warp_to, new_level))
queue.append((warp_to, steps + 1, new_level))
x, y = coords
next_nodes = [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]
for node in next_nodes:
if (node, level) in visited:
continue
if node == end:
if not recursive or level == 0:
return steps + 1
else:
queue.append((node, steps + 1, level))
tile_value = maze.get(node)
if tile_value == '.':
visited.add((node, level))
queue.append((node, steps + 1, level))
return False
with open('input.txt') as f:
input_map = [line.strip('\n') for line in f.readlines()]
maze_dict = {}
for y in range(len(input_map)):
for x in range(len(input_map[0])):
maze_dict[(x, y)] = input_map[y][x]
portal_locations = defaultdict(list)
for coords, tile in maze_dict.items():
if tile in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
portal_code = tile
x, y = coords
neighbours = {
'l': (x - 1, y), 'r': (x + 1, y),
'd': (x, y + 1), 'u': (x, y - 1)
}
for direction, new_coords in neighbours.items():
portal_coords = None
value = maze_dict.get(new_coords, '#')
if value in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
if direction == 'l':
portal_code = value + portal_code
elif direction == 'r':
portal_code += value
elif direction == 'u':
portal_code = value + portal_code
elif direction == 'd':
portal_code += value
elif value == '.':
portal_coords = new_coords
else:
continue
if not portal_coords:
new_x, new_y = new_coords
adjacents = [
(new_x - 1, new_y), (new_x + 1, new_y),
(new_x, new_y - 1), (new_x, new_y + 1)
]
for adjacent in adjacents:
value = maze_dict.get(adjacent)
if value == '.':
portal_coords = adjacent
break
if portal_coords not in portal_locations[portal_code]:
if portal_coords:
portal_locations[portal_code].append(portal_coords)
portal_links = {}
for code, coords in portal_locations.items():
if code == 'AA':
entrance = coords[0]
elif code == 'ZZ':
maze_exit = coords[0]
else:
portal_links[coords[0]] = coords[1]
portal_links[coords[1]] = coords[0]
# Answer One
fewest_steps = maze_bfs(maze_dict, entrance, maze_exit, portal_links)
print("Fewest steps required to navigate the maze:", fewest_steps)
# Answer Two
fewest_steps = maze_bfs(maze_dict, entrance, maze_exit, portal_links, True)
print("Fewest steps required to navigate the recursive maze:", fewest_steps)
|
from functools import cached_property
from onegov.activity import Activity, PeriodCollection, Occasion
from onegov.activity import BookingCollection
from onegov.core.elements import Link, Confirm, Intercooler, Block
from onegov.core.elements import LinkGroup
from onegov.core.utils import linkify, paragraphify
from onegov.feriennet import _
from onegov.feriennet import security
from onegov.feriennet.collections import BillingCollection
from onegov.feriennet.collections import NotificationTemplateCollection
from onegov.feriennet.collections import OccasionAttendeeCollection
from onegov.feriennet.collections import VacationActivityCollection
from onegov.feriennet.const import OWNER_EDITABLE_STATES
from onegov.feriennet.models import InvoiceAction, VacationActivity
from onegov.org.layout import DefaultLayout as BaseLayout
from onegov.pay import PaymentProviderCollection
from onegov.ticket import TicketCollection
class DefaultLayout(BaseLayout):
@property
def is_owner(self):
return security.is_owner(self.request.current_username, self.model)
@property
def is_editable(self):
if self.request.is_admin:
return True
if not self.request.is_organiser:
return False
if isinstance(self.model, Activity):
return self.model.state in OWNER_EDITABLE_STATES
if isinstance(self.model, Occasion):
return self.model.activity.state in OWNER_EDITABLE_STATES
return True
def offer_again_link(self, activity, title):
return Link(
text=title,
url=self.request.class_link(
VacationActivity,
{'name': activity.name},
name="offer-again"
),
traits=(
Confirm(
_(
'Do you really want to provide "${title}" again?',
mapping={'title': activity.title}
),
_("You will have to request publication again"),
_("Provide Again"),
_("Cancel")
),
Intercooler(
request_method="POST",
redirect_after=self.request.class_link(
VacationActivity, {'name': activity.name},
)
)
),
attrs={'class': 'offer-again'}
)
def linkify(self, text):
return linkify(text)
def paragraphify(self, text):
return paragraphify(text)
class VacationActivityCollectionLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Activities"), self.request.class_link(
VacationActivityCollection)),
]
@property
def organiser_links(self):
if self.app.active_period:
yield Link(
text=_("Submit Activity"),
url=self.request.link(self.model, name='new'),
attrs={'class': 'new-activity'}
)
yield self.offer_again_links
@property
def offer_again_links(self):
q = self.app.session().query(VacationActivity)
q = q.filter_by(username=self.request.current_username)
q = q.filter_by(state='archived')
q = q.with_entities(
VacationActivity.title,
VacationActivity.name,
)
q = q.order_by(VacationActivity.order)
activities = tuple(q)
if activities:
return LinkGroup(
_("Provide activity again"),
tuple(self.offer_again_link(a, a.title) for a in activities),
right_side=False,
classes=('provide-activity-again', )
)
@cached_property
def editbar_links(self):
if not self.request.is_organiser:
return None
links = []
if self.request.is_organiser:
links.extend(self.organiser_links)
return links
class BookingCollectionLayout(DefaultLayout):
def __init__(self, model, request, user=None):
super().__init__(model, request)
self.user = user or request.current_user
def rega_link(self, attendee, period, grouped_bookings):
if not any((period, attendee, grouped_bookings)):
return
if self.request.app.org.meta['locales'] == 'de_CH':
return 'https://www.rega.ch/partner/' \
'das-pro-juventute-engagement-der-rega'
if self.request.app.org.meta['locales'] == 'it_CH':
return 'https://www.rega.ch/it/partner/' \
'limpegno-pro-juventute-della-rega'
return 'https://www.rega.ch/fr/partenariats/' \
'lengagement-de-la-rega-en-faveur-de-pro-juventute'
@cached_property
def title(self):
wishlist_phase = self.app.active_period \
and self.app.active_period.wishlist_phase
if self.user.username == self.request.current_username:
return wishlist_phase and _("Wishlist") or _("Bookings")
elif wishlist_phase:
return _("Wishlist of ${user}", mapping={
'user': self.user.title
})
else:
return _("Bookings of ${user}", mapping={
'user': self.user.title
})
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(self.title, self.request.link(self.model))
]
class GroupInviteLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
wishlist_phase = self.app.active_period \
and self.app.active_period.wishlist_phase
if self.request.is_logged_in:
return [
Link(_("Homepage"), self.homepage_url),
Link(
wishlist_phase and _("Wishlist") or _("Bookings"),
self.request.class_link(BookingCollection)
),
Link(_("Group"), '#')
]
else:
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Group"), '#')
]
class VacationActivityFormLayout(DefaultLayout):
def __init__(self, model, request, title):
super().__init__(model, request)
self.include_editor()
self.title = title
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(_("Activities"), self.request.link(self.model)),
Link(self.title, '#')
)
@cached_property
def editbar_links(self):
return None
class OccasionFormLayout(DefaultLayout):
def __init__(self, model, request, title):
assert isinstance(model, Activity)
super().__init__(model, request)
self.title = title
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(_("Activities"), self.request.class_link(
VacationActivityCollection)),
Link(self.model.title, self.request.link(self.model)),
Link(self.title, '#')
)
@cached_property
def editbar_links(self):
return None
class VacationActivityLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(_("Activities"), self.request.class_link(
VacationActivityCollection)),
Link(self.model.title, self.request.link(self.model))
)
@cached_property
def latest_request(self):
return self.model.latest_request
@cached_property
def ticket(self):
if self.latest_request:
tickets = TicketCollection(self.request.session)
return tickets.by_handler_id(self.latest_request.id.hex)
@cached_property
def attendees(self):
if self.request.app.default_period:
return OccasionAttendeeCollection(
self.request.session,
self.request.app.default_period,
self.model
)
@cached_property
def editbar_links(self):
links = []
period = self.request.app.active_period
if self.request.is_admin or self.is_owner:
if self.model.state == 'archived' and period:
links.append(
self.offer_again_link(self.model, _("Provide Again")))
if self.is_editable:
if self.model.state == 'preview':
if not period:
links.append(Link(
text=_("Request Publication"),
url='#',
attrs={'class': 'request-publication'},
traits=(
Block(
_(
"There is currently no active period. "
"Please retry once a period has been "
"activated."
),
no=_("Cancel")
),
)
))
elif self.model.has_occasion_in_period(period):
links.append(Link(
text=_("Request Publication"),
url=self.request.link(self.model, name='propose'),
attrs={'class': 'request-publication'},
traits=(
Confirm(
_(
"Do you really want to request "
"publication?"
),
_("This cannot be undone."),
_("Request Publication")
),
Intercooler(
request_method="POST",
redirect_after=self.request.link(self.model)
)
)
))
else:
links.append(Link(
text=_("Request Publication"),
url='#',
attrs={'class': 'request-publication'},
traits=(
Block(
_(
"Please add at least one occasion "
"before requesting publication."
),
no=_("Cancel")
),
)
))
if not self.model.publication_requests:
links.append(Link(
text=_("Discard"),
url=self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Confirm(_(
'Do you really want to discard "${title}"?',
mapping={'title': self.model.title}
), _(
"This cannot be undone."
), _(
"Discard Activity"
), _(
"Cancel")
),
Intercooler(
request_method="DELETE",
redirect_after=self.request.class_link(
VacationActivityCollection
)
)
)
))
links.append(Link(
text=_("Edit"),
url=self.request.link(self.model, name='edit'),
attrs={'class': 'edit-link'}
))
if not self.request.app.periods:
links.append(Link(
text=_("New Occasion"),
url='#',
attrs={'class': 'new-occasion'},
traits=(
Block(
_("Occasions cannot be created yet"),
_(
"There are no periods defined yet. At least "
"one period needs to be defined before "
"occasions can be created."
),
_("Cancel")
)
)
))
else:
links.append(Link(
text=_("New Occasion"),
url=self.request.link(self.model, 'new-occasion'),
attrs={'class': 'new-occasion'}
))
if self.request.is_admin or self.is_owner:
if self.attendees:
links.append(Link(
text=_("Attendees"),
url=self.request.link(self.attendees),
attrs={'class': 'show-attendees'}
))
if self.request.is_admin:
if self.model.state != 'preview' and self.ticket:
links.append(Link(
text=_("Show Ticket"),
url=self.request.link(self.ticket),
attrs={'class': 'show-ticket'}
))
return links
class PeriodCollectionLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(_("Manage Periods"), '#')
)
@cached_property
def editbar_links(self):
return (
Link(
_("New Period"),
self.request.link(self.model, 'new'),
attrs={'class': 'new-period'}
),
)
class PeriodFormLayout(DefaultLayout):
def __init__(self, model, request, title):
super().__init__(model, request)
self.title = title
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(
_("Manage Periods"),
self.request.class_link(PeriodCollection)
),
Link(self.title, '#')
)
@cached_property
def editbar_links(self):
return None
class MatchCollectionLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(_("Matches"), '#')
)
class BillingCollectionLayout(DefaultLayout):
@property
def families(self):
yield from self.app.session().execute("""
SELECT
text
|| ' ('
|| replace(avg(unit * quantity)::money::text, '$', '')
|| ' CHF)'
AS text
,
MIN(id::text) AS item,
COUNT(*) AS count,
family IN (
SELECT DISTINCT(family)
FROM invoice_items
WHERE source IS NOT NULL and source != 'xml'
) AS has_online_payments
FROM invoice_items
WHERE family IS NOT NULL
GROUP BY family, text
ORDER BY text
""")
@property
def family_removal_links(self):
attrs = {
'class': ('remove-manual', 'extend-to-family')
}
for record in self.families:
text = _('Delete "${text}"', mapping={
'text': record.text,
})
url = self.csrf_protected_url(
self.request.class_link(InvoiceAction, {
'id': record.item,
'action': 'remove-manual',
'extend_to': 'family'
})
)
if record.has_online_payments:
traits = (
Block(
_(
"This booking cannot be removed, at least one "
"booking has been paid online."
),
_(
"You may remove the bookings manually one by one."
),
_("Cancel")
),
)
else:
traits = (
Confirm(
_('Do you really want to remove "${text}"?', mapping={
'text': record.text
}),
_("${count} bookings will be removed", mapping={
'count': record.count
}),
_("Remove ${count} bookings", mapping={
'count': record.count
}),
_("Cancel")
),
Intercooler(request_method='POST')
)
yield Link(text=text, url=url, attrs=attrs, traits=traits)
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(_("Billing"), '#')
)
@cached_property
def editbar_links(self):
return (
Link(
_("Import Bank Statement"),
self.request.link(self.model, 'import'),
attrs={'class': 'import'}
),
Link(
_("Synchronise Online Payments"),
self.request.return_here(
self.request.class_link(
PaymentProviderCollection, name='sync')),
attrs={'class': 'sync'},
),
LinkGroup(
title=_("Accounting"),
links=[
Link(
text=_("Manual Booking"),
url=self.request.link(
self.model,
name='booking'
),
attrs={'class': 'new-booking'},
traits=(
Block(_(
"Manual bookings can only be added "
"once the billing has been confirmed."
), no=_("Cancel")),
) if not self.model.period.finalized else tuple()
),
*self.family_removal_links
]
)
)
class OnlinePaymentsLayout(DefaultLayout):
def __init__(self, *args, **kwargs):
self.title = kwargs.pop('title')
super().__init__(*args, **kwargs)
@cached_property
def editbar_links(self):
return (
Link(
_("Synchronise Online Payments"),
self.request.return_here(
self.request.class_link(
PaymentProviderCollection, name='sync')),
attrs={'class': 'sync'},
),
)
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(
_("Billing"),
self.request.class_link(BillingCollection)
),
Link(self.title, '#')
)
class BillingCollectionImportLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(_("Billing"), self.request.link(self.model)),
Link(_("Import Bank Statement"), '#')
)
class BillingCollectionManualBookingLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(_("Billing"), self.request.link(self.model)),
Link(_("Manual Booking"), '#')
)
class BillingCollectionPaymentWithDateLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(_("Billing"), self.request.link(self.model)),
Link(_("Payment with date"), '#')
)
class InvoiceLayout(DefaultLayout):
def __init__(self, model, request, title):
super().__init__(model, request)
self.title = title
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(self.title, '#')
)
class DonationLayout(DefaultLayout):
def __init__(self, model, request, title):
super().__init__(model, request)
self.title = title
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(_("Invoices"), self.request.link(self.model)),
Link(_("Donation"), self.title)
)
class OccasionAttendeeLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return (
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(
self.model.activity.title,
self.request.link(self.model.activity)
),
Link(_("Attendees"), '#')
)
class NotificationTemplateCollectionLayout(DefaultLayout):
def __init__(self, model, request, subtitle=None):
super().__init__(model, request)
self.subtitle = subtitle
@cached_property
def breadcrumbs(self):
links = [
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(
_("Notification Templates"),
self.request.class_link(NotificationTemplateCollection)
)
]
if self.subtitle:
links.append(Link(self.subtitle, '#'))
return links
@cached_property
def editbar_links(self):
if not self.subtitle:
return (
Link(
_("New Notification Template"),
self.request.link(self.model, 'new'),
attrs={'class': 'new-notification'}
),
)
class NotificationTemplateLayout(DefaultLayout):
def __init__(self, model, request, subtitle=None):
super().__init__(model, request)
self.subtitle = subtitle
@cached_property
def breadcrumbs(self):
links = [
Link(_("Homepage"), self.homepage_url),
Link(
_("Activities"),
self.request.class_link(VacationActivityCollection)
),
Link(
_("Notification Templates"),
self.request.class_link(NotificationTemplateCollection)
),
Link(
self.model.subject,
self.request.link(self.model)
)
]
if self.subtitle:
links.append(Link(self.subtitle, '#'))
return links
class VolunteerLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(_("Volunteers"), self.request.link(self.model))
]
class VolunteerFormLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return [
Link(_("Homepage"), self.homepage_url),
Link(
_("Join as a Volunteer"),
self.request.class_link(
VacationActivityCollection, name='volunteer'
)
),
Link(
_("Register as Volunteer"),
'#'
)
]
class HomepageLayout(DefaultLayout):
@property
def editbar_links(self):
if self.request.is_manager:
return [
Link(
_("Sort"),
self.request.link(self.model, 'sort'),
attrs={'class': ('sort-link')}
)
]
|
from rest_framework.response import Response
from rest_framework.generics import ListAPIView
from rest_framework.views import APIView
from django.http import JsonResponse
from employee_core.api.serializers import EmployeeObjectSerializer
from employee_core.models import Employee
class EmployeeObjectView(APIView):
def get(self, request, *args, **kwargs):
qs = Employee.objects.all()
serializer = EmployeeObjectSerializer(qs, many=True)
data = {
"data": serializer.data,
}
return Response(data=data)
|
import time
class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
nums = []
if s == '':
return 0
if len(s) == 1:
return 1
for i in range(n):
#这里直接用字符而不用列表是因为时间复杂度的原因
#temp = [s[i]]
temp = s[i]
for j in range(i + 1, n):
if s[j] not in temp:
#这里的append是用列表来更新,可是时间通不过。
# temp.append(s[j])
temp += (s[j])
#这里是如果已经到最后一位了就不用再执行查找了,因为该字串已经是最长的了没必要再进行往后面找了
if j == (n - 1):
nums.append(len(temp))
return max(nums)
else:
nums.append(len(temp))
break
return max(nums)
#
# class Solution:
# def lengthOfLongestSubstring(self, s):
# if s == '':
# return 0
# if len(s) == 1:
# return 1
# subcount = set()
# substrings = set()
# for i in range(len(s)-1):
# substring = s[i]
# for j in range(i+1, len(s)):
# if s[j] not in substring:
# substring += s[j]
# if j == len(s)-1:
# substrings.add(substring)
# subcount.add(len(substring))
# return max(subcount)
# else:
# substrings.add(substring)
# subcount.add(len(substring))
# break
# return max(subcount)
start = time.time()
s = Solution()
a = s.lengthOfLongestSubstring('pwwkew')
end = time.time()
print(a)
print(end - start)
|
import socket
import random
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 4445))
random_number = random.randrange(100)
s.send(str(random_number).encode())
|
#importing necessary libraries
import matplotlib.pyplot as plt
import torch
import numpy as np
from torch import nn
from torch import optim
from torchvision import datasets, models, transforms
import torch.nn.functional as F
import torch.utils.data
import pandas as pd
from collections import OrderedDict
from PIL import Image
import argparse
import json
# define Mandatory and Optional Arguments for the script
parser = argparse.ArgumentParser (description = "Parser of training script")
parser.add_argument ('data_dir', help = 'Provide data directory. Mandatory argument', type = str)
parser.add_argument ('--save_dir', help = 'Provide saving directory. Optional argument', type = str)
parser.add_argument ('--arch', help = 'Vgg13 can be used if this argument specified, otherwise Alexnet will be used', type = str)
parser.add_argument ('--lrn', help = 'Learning rate, default value 0.001', type = float)
parser.add_argument ('--hidden_units', help = 'Hidden units in Classifier. Default value is 2048', type = int)
parser.add_argument ('--epochs', help = 'Number of epochs', type = int)
parser.add_argument ('--GPU', help = "Option to use GPU", type = str)
#setting values data loading
args = parser.parse_args ()
data_dir = args.data_dir
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
#defining device: either cuda or cpu
if args.GPU == 'GPU':
device = 'cuda'
else:
device = 'cpu'
#data loading
if data_dir: #making sure we do have value for data_dir
# Define your transforms for the training, validation, and testing sets
train_data_transforms = transforms.Compose ([transforms.RandomRotation (30),
transforms.RandomResizedCrop (224),
transforms.RandomHorizontalFlip (),
transforms.ToTensor (),
transforms.Normalize ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
valid_data_transforms = transforms.Compose ([transforms.Resize (255),
transforms.CenterCrop (224),
transforms.ToTensor (),
transforms.Normalize ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
test_data_transforms = transforms.Compose ([transforms.Resize (255),
transforms.CenterCrop (224),
transforms.ToTensor (),
transforms.Normalize ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
# Load the datasets with ImageFolder
train_image_datasets = datasets.ImageFolder (train_dir, transform = train_data_transforms)
valid_image_datasets = datasets.ImageFolder (valid_dir, transform = valid_data_transforms)
test_image_datasets = datasets.ImageFolder (test_dir, transform = test_data_transforms)
# Using the image datasets and the trainforms, define the dataloaders
train_loader = torch.utils.data.DataLoader(train_image_datasets, batch_size = 64, shuffle = True)
valid_loader = torch.utils.data.DataLoader(valid_image_datasets, batch_size = 64, shuffle = True)
test_loader = torch.utils.data.DataLoader(test_image_datasets, batch_size = 64, shuffle = True)
#end of data loading block
#mapping from category label to category name
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
def load_model (arch, hidden_units):
if arch == 'vgg13': #setting model based on vgg13
model = models.vgg13 (pretrained = True)
for param in model.parameters():
param.requires_grad = False
if hidden_units: #in case hidden_units were given
classifier = nn.Sequential (OrderedDict ([
('fc1', nn.Linear (25088, 4096)),
('relu1', nn.ReLU ()),
('dropout1', nn.Dropout (p = 0.3)),
('fc2', nn.Linear (4096, hidden_units)),
('relu2', nn.ReLU ()),
('dropout2', nn.Dropout (p = 0.3)),
('fc3', nn.Linear (hidden_units, 102)),
('output', nn.LogSoftmax (dim =1))
]))
else: #if hidden_units not given
classifier = nn.Sequential (OrderedDict ([
('fc1', nn.Linear (25088, 4096)),
('relu1', nn.ReLU ()),
('dropout1', nn.Dropout (p = 0.3)),
('fc2', nn.Linear (4096, 2048)),
('relu2', nn.ReLU ()),
('dropout2', nn.Dropout (p = 0.3)),
('fc3', nn.Linear (2048, 102)),
('output', nn.LogSoftmax (dim =1))
]))
else: #setting model based on default Alexnet ModuleList
arch = 'alexnet' #will be used for checkpoint saving, so should be explicitly defined
model = models.alexnet (pretrained = True)
for param in model.parameters():
param.requires_grad = False
if hidden_units: #in case hidden_units were given
classifier = nn.Sequential (OrderedDict ([
('fc1', nn.Linear (9216, 4096)),
('relu1', nn.ReLU ()),
('dropout1', nn.Dropout (p = 0.3)),
('fc2', nn.Linear (4096, hidden_units)),
('relu2', nn.ReLU ()),
('dropout2', nn.Dropout (p = 0.3)),
('fc3', nn.Linear (hidden_units, 102)),
('output', nn.LogSoftmax (dim =1))
]))
else: #if hidden_units not given
classifier = nn.Sequential (OrderedDict ([
('fc1', nn.Linear (9216, 4096)),
('relu1', nn.ReLU ()),
('dropout1', nn.Dropout (p = 0.3)),
('fc2', nn.Linear (4096, 2048)),
('relu2', nn.ReLU ()),
('dropout2', nn.Dropout (p = 0.3)),
('fc3', nn.Linear (2048, 102)),
('output', nn.LogSoftmax (dim =1))
]))
model.classifier = classifier #we can set classifier only once as cluasses self excluding (if/else)
return model, arch
# Defining validation Function. will be used during training
def validation(model, valid_loader, criterion):
model.to (device)
valid_loss = 0
accuracy = 0
for inputs, labels in valid_loader:
inputs, labels = inputs.to(device), labels.to(device)
output = model.forward(inputs)
valid_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return valid_loss, accuracy
#loading model using above defined functiion
model, arch = load_model (args.arch, args.hidden_units)
#Actual training of the model
#initializing criterion and optimizer
criterion = nn.NLLLoss ()
if args.lrn: #if learning rate was provided
optimizer = optim.Adam (model.classifier.parameters (), lr = args.lrn)
else:
optimizer = optim.Adam (model.classifier.parameters (), lr = 0.001)
model.to (device) #device can be either cuda or cpu
#setting number of epochs to be run
if args.epochs:
epochs = args.epochs
else:
epochs = 7
print_every = 40
steps = 0
#runing through epochs
for e in range (epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate (train_loader):
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad () #where optimizer is working on classifier paramters only
# Forward and backward passes
outputs = model.forward (inputs) #calculating output
loss = criterion (outputs, labels) #calculating loss (cost function)
loss.backward ()
optimizer.step () #performs single optimization step
running_loss += loss.item () # loss.item () returns scalar value of Loss function
if steps % print_every == 0:
model.eval () #switching to evaluation mode so that dropout is turned off
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
valid_loss, accuracy = validation(model, valid_loader, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Valid Loss: {:.3f}.. ".format(valid_loss/len(valid_loader)),
"Valid Accuracy: {:.3f}%".format(accuracy/len(valid_loader)*100))
running_loss = 0
# Make sure training is back on
model.train()
#saving trained Model
model.to ('cpu') #no need to use cuda for saving/loading model.
# Save the checkpoint
model.class_to_idx = train_image_datasets.class_to_idx #saving mapping between predicted class and class name,
#second variable is a class name in numeric
#creating dictionary for model saving
checkpoint = {'classifier': model.classifier,
'state_dict': model.state_dict (),
'arch': arch,
'mapping': model.class_to_idx
}
#saving trained model for future use
if args.save_dir:
torch.save (checkpoint, args.save_dir + '/checkpoint.pth')
else:
torch.save (checkpoint, 'checkpoint.pth')
|
from .mplbasewidget import MatplotlibBaseWidget
from .mplcurvewidget import MatplotlibCurveWidget
from .mplerrorbarwidget import MatplotlibErrorbarWidget
from .mplimagewidget import MatplotlibImageWidget
from .mplbarwidget import MatplotlibBarWidget
__all__ = [
'MatplotlibBaseWidget',
'MatplotlibCurveWidget',
'MatplotlibErrorbarWidget',
'MatplotlibImageWidget',
'MatplotlibBarWidget',
]
|
import os, re
invertInput_arr = [
{
"IMCR" : "28",
"Name" : "eMIOS_0_emios_y_in_28"
},
{
"IMCR" : "29",
"Name" : "eMIOS_0_emios_y_in_29"
},
{
"IMCR" : "30",
"Name" : "eMIOS_0_emios_y_in_30"
},
{
"IMCR" : "31",
"Name" : "eMIOS_0_emios_y_in_31"
},
{
"IMCR" : "64",
"Name" : "eMIOS_1_emios_y_in_28"
},
{
"IMCR" : "65",
"Name" : "eMIOS_1_emios_y_in_29"
},
{
"IMCR" : "66",
"Name" : "eMIOS_1_emios_y_in_30"
},
{
"IMCR" : "67",
"Name" : "eMIOS_1_emios_y_in_31"
}
]
################## CHANGE HERE ONLY ##################
# Type of file that you want to update
# typeFile = PinSettingsPrg,
# IncItem,
# PropertyModelConfigurationXml,
# SignalConfigurationXml,
# HalPortBridgeV2Prg,
# All
# typeFile = "HalPortBridgeV2Prg"
# Family chip
family = "C55"
# The name of Package
all_packages = [
"MPC5744B_100",
"MPC5744B_176",
"MPC5744B_256",
"MPC5745B_100",
"MPC5745B_176",
"MPC5745B_256",
"MPC5746B_100",
"MPC5746B_176",
"MPC5746B_256",
"MPC5744C_100",
"MPC5744C_176",
"MPC5744C_256",
"MPC5745C_100",
"MPC5745C_176",
"MPC5745C_256",
"MPC5746C_100",
"MPC5746C_176",
"MPC5746C_256",
]
# The path of repository
ksdk_path = "e:/C55SDK/sdk_codebase1/"
# Unix standard
unix_standard = '\r\n'
######################################################
def update_and_write_data_to_file(fFile, wdata, line_ending):
for line in wdata:
line = line.replace("\r\n", line_ending)
fFile.write(line)
######################################################
def pinsettings_create_data(mdata):
raw_data = []
invertExist = 1
line_index = 0
temp_imcr =""
for line in mdata:
temp_data = re.search(r'^.*%:count=%get_item_config_sequence\((.*)_inputInversionSelect,PinMuxInit\).*', mdata[line_index], re.M|re.I)
if temp_data:
temp_imcr = temp_data.group(1)
temp_invert = 0
for pin in invertInput_arr:
if temp_imcr == pin["Name"]:
temp_invert = 1
break
if temp_invert == 1:
print temp_imcr
else:
invertExist = 0
if invertExist == 0:
for i in range(0,2):
mdata[line_index+i] = ""
invertExist = 1
raw_data.append(mdata[line_index])
line_index += 1
return raw_data
######################################################
def incIteam_create_data(mdata):
raw_data = []
invertExist = 1
line_index = 0
temp_imcr =""
for line in mdata:
if mdata[line_index].count(' <GrupItem>'):
temp_data = re.search(r'^.*<Symbol>(.*)_inputInversionSelect</Symbol>.*', mdata[line_index+3], re.M|re.I)
if temp_data:
temp_imcr = temp_data.group(1)
temp_invert = 0
for pin in invertInput_arr:
if temp_imcr == pin["Name"]:
temp_invert = 1
break
if temp_invert == 1:
print temp_imcr
else:
invertExist = 0
if invertExist == 0:
for i in range(0,21):
mdata[line_index+i] = ""
invertExist = 1
raw_data.append(mdata[line_index])
line_index += 1
return raw_data
######################################################
def signal_xml_create_data(mdata):
raw_data = []
invertExist = 1
line_index = 0
temp_imcr =""
for line in mdata:
if mdata[line_index].count(' <functional_property id="inputInversionSelect">'):
temp_data = re.search(r'^.*<enum_property id="(.*)_inputInversionSelect" default="doNotInvert">.*', mdata[line_index+2], re.M|re.I)
if temp_data:
temp_imcr = temp_data.group(1)
temp_invert = 0
for pin in invertInput_arr:
if temp_imcr == pin["Name"]:
temp_invert = 1
break
if temp_invert == 1:
print temp_imcr
else:
invertExist = 0
if invertExist == 0:
for i in range(0,15):
mdata[line_index+i] = ""
invertExist = 1
raw_data.append(mdata[line_index])
line_index += 1
return raw_data
######################################################
def Make_Pinsettings(package):
file_path_import = os.path.join(ksdk_path, "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Beans/PinSettings/Drivers/" + family + "/PinSettings_" + package + ".prg")
file_path_export = ksdk_path + "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Beans/PinSettings/Drivers/" + family + "/PinSettings_" + package + "_new.prg"
# print file_path_import
# print file_path_export
directory_export = os.path.dirname(file_path_export)
print directory_export
if not os.path.exists(directory_export):
print "File does not exist"
else:
file_import = open(file_path_import, "rb").readlines()
file_export = open(file_path_export, "wb")
local_data = pinsettings_create_data(file_import)
update_and_write_data_to_file(file_export, local_data, unix_standard)
file_export.close()
os.renames(directory_export + "/PinSettings_" + package + ".prg", directory_export + "/PinSettings_" + package + "_old.prg")
os.renames(directory_export + "/PinSettings_" + package + "_new.prg", directory_export + "/PinSettings_" + package + ".prg")
print "Done Make_Pinsettings"
def Make_IncIteam(package):
file_path_import = os.path.join(ksdk_path, "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Beans/PinSettings/Inc" + package + ".item")
file_path_export = ksdk_path + "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Beans/PinSettings/Inc" + package + "_new.item"
directory_export = os.path.dirname(file_path_export)
print directory_export
if not os.path.exists(directory_export):
print "File does not exist"
else:
file_import = open(file_path_import, "rb").readlines()
file_export = open(file_path_export, "wb")
local_data = incIteam_create_data(file_import)
update_and_write_data_to_file(file_export, local_data, unix_standard)
file_export.close()
os.renames(directory_export + "/Inc" + package + ".item", directory_export + "/Inc" + package + "_old.item")
os.renames(directory_export + "/Inc" + package + "_new.item", directory_export + "/Inc" + package + ".item")
print "Done Make_IncIteam"
def Make_SignalXml(package):
file_path_import = os.path.join(ksdk_path, "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/CPUs/" + package + "/signal_configuration.xml")
file_path_export = ksdk_path + "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/CPUs/" + package + "/signal_configuration_new.xml"
directory_export = os.path.dirname(file_path_export)
print directory_export
if not os.path.exists(directory_export):
print "File does not exist"
else:
file_import = open(file_path_import, "rb").readlines()
file_export = open(file_path_export, "wb")
local_data = signal_xml_create_data(file_import)
update_and_write_data_to_file(file_export, local_data, unix_standard)
file_export.close()
os.renames(directory_export + "/signal_configuration.xml", directory_export + "/signal_configuration_old.xml")
os.renames(directory_export + "/signal_configuration_new.xml", directory_export + "/signal_configuration.xml")
print "Done Make_SignalXml"
def Make_All(package):
Make_Pinsettings(package) #Prg File
Make_IncIteam(package) #item File
Make_SignalXml(package) #signal File
for pk in all_packages:
print ">>>>>>>>>>>>> Start " + pk + " <<<<<<<<<<<<<"
Make_All(pk)
print ">>>>>>>>>>>>> Finish " + pk + " <<<<<<<<<<<<<"
|
# Generated by Django 3.0.7 on 2020-10-09 09:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cl_table', '0021_auto_20201009_0859'),
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('emp_no', models.AutoField(db_column='Emp_no', primary_key=True, serialize=False)),
('emp_code', models.CharField(blank=True, db_column='Emp_code', max_length=20, null=True)),
('emp_name', models.CharField(blank=True, db_column='Emp_name', max_length=60, null=True)),
('emp_nric', models.CharField(blank=True, db_column='Emp_nric', max_length=20, null=True)),
('emp_sexes', models.CharField(blank=True, db_column='Emp_sexes', max_length=50, null=True)),
('emp_marital', models.CharField(blank=True, db_column='Emp_marital', max_length=50, null=True)),
('emp_race', models.CharField(blank=True, db_column='Emp_race', max_length=20, null=True)),
('emp_religion', models.CharField(blank=True, db_column='Emp_religion', max_length=20, null=True)),
('emp_phone1', models.CharField(blank=True, db_column='Emp_phone1', max_length=20, null=True)),
('emp_phone2', models.CharField(blank=True, db_column='Emp_phone2', max_length=20, null=True)),
('emp_nationality', models.CharField(blank=True, db_column='Emp_nationality', max_length=40, null=True)),
('emp_address', models.CharField(blank=True, db_column='Emp_address', max_length=255, null=True)),
('emp_jobpost', models.CharField(blank=True, db_column='Emp_jobpost', max_length=40, null=True)),
('emp_isactive', models.BooleanField(db_column='Emp_isactive')),
('emp_emer', models.CharField(blank=True, db_column='Emp_emer', max_length=60, null=True)),
('emp_emerno', models.CharField(blank=True, db_column='Emp_emerno', max_length=20, null=True)),
('emp_salary', models.FloatField(blank=True, db_column='Emp_salary', null=True)),
('emp_commission_type', models.CharField(blank=True, db_column='Emp_Commission_Type', max_length=20, null=True)),
('emp_dob', models.DateTimeField(blank=True, db_column='Emp_DOB', null=True)),
('emp_joindate', models.DateTimeField(blank=True, db_column='Emp_JoinDate', null=True)),
('emp_email', models.CharField(blank=True, db_column='Emp_email', max_length=40, null=True)),
('emp_socso', models.CharField(blank=True, db_column='Emp_SOCSO', max_length=20, null=True)),
('emp_epf', models.CharField(blank=True, db_column='Emp_EPF', max_length=20, null=True)),
('emp_target', models.FloatField(blank=True, db_column='Emp_Target', null=True)),
('emp_targetbas', models.IntegerField(blank=True, db_column='Emp_TargetBas', null=True)),
('itemsite_code', models.CharField(blank=True, db_column='ItemSite_Code', max_length=10, null=True)),
('emp_barcode', models.CharField(blank=True, db_column='Emp_Barcode', max_length=20, null=True)),
('emp_barcode2', models.CharField(blank=True, db_column='Emp_Barcode2', max_length=20, null=True)),
('emp_leaveday', models.CharField(blank=True, db_column='Emp_LeaveDay', max_length=50, null=True)),
('emp_pic', models.TextField(blank=True, db_column='Emp_PIC', null=True)),
('annual_leave', models.IntegerField(blank=True, db_column='Annual_Leave', null=True)),
('marriage_leave', models.IntegerField(blank=True, db_column='Marriage_Leave', null=True)),
('compassiolnate_leave', models.IntegerField(blank=True, db_column='Compassiolnate_leave', null=True)),
('national_service', models.IntegerField(blank=True, db_column='National_Service', null=True)),
('maternity_leave', models.IntegerField(blank=True, db_column='Maternity_Leave', null=True)),
('unpay_leave', models.IntegerField(blank=True, db_column='Unpay_Leave', null=True)),
('mc_leave', models.IntegerField(blank=True, db_column='MC_Leave', null=True)),
('emergency_leave', models.IntegerField(blank=True, db_column='Emergency_Leave', null=True)),
('emp_isboss', models.BooleanField(blank=True, db_column='Emp_IsBoss', null=True)),
('itemsite_refcode', models.CharField(blank=True, db_column='ITEMSITE_REFCODE', max_length=20, null=True)),
('emp_type', models.CharField(blank=True, db_column='EMP_TYPE', max_length=20, null=True)),
('emp_refcode', models.CharField(blank=True, db_column='EMP_REFCODE', max_length=20, null=True)),
('display_name', models.CharField(blank=True, db_column='Display_Name', max_length=20, null=True)),
('show_in_appt', models.BooleanField(db_column='Show_In_Appt')),
('emp_address1', models.CharField(blank=True, db_column='Emp_address1', max_length=255, null=True)),
('emp_address2', models.CharField(blank=True, db_column='Emp_address2', max_length=255, null=True)),
('emp_address3', models.CharField(blank=True, db_column='Emp_address3', max_length=255, null=True)),
('age_range0', models.BooleanField(db_column='Age_Range0')),
('age_range1', models.BooleanField(db_column='Age_Range1')),
('age_range2', models.BooleanField(db_column='Age_Range2')),
('age_range3', models.BooleanField(db_column='Age_Range3')),
('age_range4', models.BooleanField(db_column='Age_Range4')),
('type_code', models.CharField(blank=True, db_column='Type_Code', max_length=20, null=True)),
('emp_address4', models.CharField(blank=True, db_column='Emp_address4', max_length=255, null=True)),
('attn_password', models.CharField(blank=True, db_column='Attn_Password', max_length=50, null=True)),
('max_disc', models.FloatField(blank=True, db_column='Max_Disc', null=True)),
('disc_type', models.BooleanField(blank=True, db_column='Disc_Type', null=True)),
('disc_amt', models.FloatField(blank=True, db_column='Disc_Amt', null=True)),
('ep_allow', models.BooleanField(blank=True, db_column='EP_Allow', null=True)),
('ep_amttype', models.BooleanField(blank=True, db_column='EP_AmtType', null=True)),
('ep_startdate', models.DateTimeField(blank=True, db_column='EP_StartDate', null=True)),
('ep_discamt', models.FloatField(blank=True, db_column='EP_DiscAmt', null=True)),
('ep_amt', models.FloatField(blank=True, db_column='EP_Amt', null=True)),
('bonus_level', models.CharField(blank=True, db_column='Bonus_Level', max_length=50, null=True)),
('bonus_scale_code', models.CharField(blank=True, db_column='Bonus_Scale_Code', max_length=50, null=True)),
('has_product_comm', models.BooleanField(blank=True, db_column='Has_Product_Comm', null=True)),
('ser_level', models.CharField(blank=True, db_column='Ser_Level', max_length=50, null=True)),
('ser_scale_code', models.CharField(blank=True, db_column='Ser_Scale_Code', max_length=50, null=True)),
('treat_level', models.CharField(blank=True, db_column='Treat_Level', max_length=50, null=True)),
('treat_scale_code', models.CharField(blank=True, db_column='Treat_Scale_code', max_length=50, null=True)),
('emp_target_bonus', models.FloatField(blank=True, db_column='Emp_Target_Bonus', null=True)),
('extra_percent', models.FloatField(blank=True, db_column='Extra_Percent', null=True)),
('site_code', models.CharField(blank=True, db_column='Site_Code', max_length=10, null=True)),
('emp_pic_b', models.BinaryField(blank=True, db_column='Emp_Pic_B', null=True)),
('getsms', models.BooleanField(db_column='GetSMS')),
('emp_comm', models.BooleanField(blank=True, db_column='Emp_Comm', null=True)),
('show_in_sales', models.BooleanField(db_column='Show_In_Sales')),
('show_in_trmt', models.BooleanField(db_column='Show_In_Trmt')),
('emp_edit_date', models.DateTimeField(blank=True, db_column='Emp_Edit_Date', null=True)),
('emp_seq_webappt', models.IntegerField(blank=True, db_column='Emp_Seq_WebAppt', null=True)),
('employeeapptype', models.CharField(blank=True, db_column='employeeAppType', max_length=40, null=True)),
('treat_exp_day_limit', models.IntegerField(blank=True, db_column='Treat_Exp_Day_Limit', null=True)),
('defaultsitecode', models.CharField(blank=True, db_column='defaultSiteCode', max_length=10, null=True)),
('queue_no', models.IntegerField(db_column='Queue_No', null=True)),
('emp_pic_b1', models.CharField(db_column='Emp_Pic_B1', max_length=250, null=True)),
],
options={
'db_table': 'Employee',
},
),
]
|
import random
import requests
import http.client
import numpy as np
from flask import Flask
from flask import request, escape, render_template
app = Flask(__name__)
def getData(ID, ID_Data):
""" Converts the data sent by the server into the original message
Parameters
----------
ID : list(:float)
ID of the client
ID_Data: list(:float)
Array containing ID embedded with data
Returns
-------
str
The data which was published by the client with the specified ID
"""
# print(ID_Data)
# print(ID)
data = np.multiply(np.array(ID_Data) - np.array(ID), np.array(ID))
curr = ""
data[np.isclose(data, -1)] = 0
# print(data)
dataBinary = []
for i in range(len(data)):
if i and i % 8 == 0:
dataBinary.append(curr)
curr = ""
curr += str(int(data[i]))
print(dataBinary)
dataString1 = list(map(lambda x: chr(int(x, 2)), dataBinary))
return "".join(dataString1)
def fetchDataFromPubServer(ID_arr):
""" Sends the client ID to the Pub/Sub server to fetch the published data
Parameters
----------
ID_arr : list(:float)
ID of the client
Returns
-------
str
The data which was published by the client with the sppecified ID
"""
pubSubURL = "http://localhost:7001/fetchData"
myData = {"ID": ID_arr}
clientIDData = requests.post(pubSubURL, json=myData).json()["data"]
print(clientIDData)
if clientIDData:
data = getData(ID_arr, clientIDData)
else:
data = None
print(data)
return data
def fetchIDFromClientURL(clientURL: str):
""" Fetches Client ID from the specified client URL
Parameters
----------
clientURL : str
URL of the client to fetch ID from
Returns
-------
list(:float)
A float array containing the ID
"""
conn = http.client.HTTPConnection(clientURL)
conn.request('GET', '/')
resp = conn.getresponse()
content = resp.read()
conn.close()
text = content.decode('utf-8')
ID_arr = text.split(" ")
ID_arr = list(map(float, ID_arr))
return ID_arr
@app.route("/")
def index():
"""
The function index is a callback for when a user lands on the homepage URL: 127.0.0.1:6001
It loads an input form to enter the URL of the client. It then fetched the ID from that URL
and queries the Publish Subscribe server to fetch the Data
"""
clientURL = request.args.get("ClientURL", "")
data = ""
if clientURL:
ID_arr = fetchIDFromClientURL(clientURL)
data = fetchDataFromPubServer(ID_arr)
# return (
# """<form action="" method="get">
# <input type="text" name="ClientURL">
# <input type="submit" value="Fetch Data">
# </form>"""
# + clientURL
# + (data if data else "No client data found")
# )
return render_template("index.html", data=data)
if __name__ == "__main__":
app.run(host="127.0.0.1", port=6001, debug=True)
|
import math
N = 1
for n in xrange(N):
y = (math.sin(float(n)/float(N)*math.pi*2.0)+1.0)/2.0*(8*16-1)
sy = int(round((y % 16)/3))
cy = int(round(int(y / 16)))
print "; x = %d y = %f" % (n, y)
print "db %d" % cy
print "db %d" % sy
#print n, y, cy*16+sy*3
|
###### ITC 106 - Jarryd Keir - Student Number 11516086
#### Variable Section - ensure that variables are correct values before starting to ensure that the main part of the code ####
inputMarkAss1 = -1
inputMarkAss2 = -1
inputMarkExam = -1
outputMarkAss1 = 0
outputMarkAss2 = 0
outputMarkExam = 0
AssWeight1 = 20
AssWeight2 = 30
ExamWeight = 50
TotalWeightAssMark = 0
WeightedTotalMark = 0
#### Main Code ####
print("-----------------------------------------------------------------------------------------\nThe Innovation University of Australia (IUA) Grade System\n-----------------------------------------------------------------------------------------\n")
print("Please enter all marks out of 100.")
#### While loop to ensure that input from screen prompt is a valid number and can be converted to an int
while isinstance(inputMarkAss1,str) or inputMarkAss1 < 0:
inputMarkAss1 = input("Please enter the marks for Assignment 1: ") #get input from user and store input in inputMarkAss1
try: #Attempt to convert the input to an int
inputMarkAss1 = int(inputMarkAss1)
if inputMarkAss1 > 100 or inputMarkAss1 < 0: #TRUE - ensure that it's within 0 to 100 (inclusive of 0 and 100) then ensure that inputMarkAss1 is -1 still to ensure that the while loop continues ### FALSE - will step to the next input of mark for Assessment2
print("Please enter a value between 0 and 100!")
inputMarkAss1 = -1
except ValueError: #if the input that is currently stored in inputMarkAss1 fails to be cast to an int then catch the error here and display error message
print("Please enter all marks out of 100.")
#### While loop to ensure that input from screen prompt is a valid number and can be converted to an int
while isinstance(inputMarkAss2,str) or inputMarkAss2 < 0:
inputMarkAss2 = input("Please enter the marks for Assignment 2: ") #get input from user and store input in inputMarkAss2
try: #Attempt to convert the input to an int
inputMarkAss2 = int(inputMarkAss2)
if inputMarkAss2 > 100 or inputMarkAss2 < 0: #TRUE - ensure that it's within 0 to 100 (inclusive of 0 and 100) then ensure that inputMarkAss2 is -1 still to ensure that the while loop continues ### FALSE - will step to the next input of mark for Final Exam
print("Please enter a value between 0 and 100!")
inputMarkAss2 = -1
except ValueError:#if the input that is currently stored in inputMarkAss2 fails to be cast to an int then catch the error here and display error message
print("Please enter all marks out of 100.")
#### While loop to ensure that input from screen prompt is a valid number and can be converted to an int
while isinstance(inputMarkExam,str) or inputMarkExam < 0:
inputMarkExam = input("Please enter the marks for the Final Exam: ") #get input from user and store input in inputMarkExam
try: #Attempt to convert the input to an int
inputMarkExam = int(inputMarkExam)
if inputMarkExam > 100 or inputMarkExam < 0: #TRUE - ensure that it's within 0 to 100 (inclusive of 0 and 100) then ensure that inputMarkExam is -1 still to ensure that the while loop continues ### FALSE - will step to the calculation of the weighted marks and output
print("Please enter a value between 0 and 100!")
inputMarkExam = -1
except ValueError: #if the input that is currently stored in inputMarkExam fails to be cast to an int then catch the error here and display error message
print("Please enter all marks out of 100.")
print("\nThank you!\n") #print Thank You!
outputMarkAss1 = inputMarkAss1 * (AssWeight1/100) #calculate the weighted mark for Assessment 1, holds the weight of 20%
outputMarkAss2 = inputMarkAss2 * (AssWeight2/100) #calculate the weighted mark for Assessment 1, holds the weight of 30%
outputMarkExam = inputMarkExam * (ExamWeight/100) #calculate the weighted mark for Exam, holds the weight of 50%
TotalWeightAssMark = outputMarkAss1 + outputMarkAss2 #calculate the combine weighted mark for Assessment 1 & 2
WeightedTotalMark = outputMarkAss1 + outputMarkAss2 + outputMarkExam #calculate the combine weighted mark for Assessment 1, 2, & Exam
print("Weighted mark for Assignment 1: ", int(outputMarkAss1)) #output the weighted mark for Assessment 1
print("Weighted mark for Assignment 2: ", int(outputMarkAss2)) #output the weighted mark for Assessment 2
print("Total weighted mark of the assignments: ", int(TotalWeightAssMark), "\n") #calculate the combine weighted mark for Assessment 1 & 2
print("Weighted mark for the Final Exam is: ", int(outputMarkExam)) #output the weighted mark for Exam
print("Total weighted mark for the subject: ", int(WeightedTotalMark), "\n") #output the combine weighted mark for Assessment 1, 2, & Exam
print("Goodbye.") #print Goodbye.
#end
|
# Generated by Django 3.2.5 on 2021-08-12 03:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('erp', '0004_auto_20210718_1958'),
]
operations = [
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=500, verbose_name='Categoria'),
),
]
|
from flask import Flask, render_template
from random import randrange
app = Flask(__name__)
@app.route("/")
def home():
return render_template("home.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/fun")
def fun():
return render_template("fun.html")
@app.route("/bunnies")
def bunnies():
place = randrange(4)
d = {}
d['Miniature Lion Lop'] = "Lopped ears and mane of Lionhead. Has a bib!"
d['Jersey Wooly'] = "About 3 pounds, docile, with easy-care wool fur!"
d['American Sable'] = "Result of Chinchilla rabbit crosses. One named Luna is prizewinning!"
d['Continental Giant'] = "Also known as the German Giant, and originally bred for meat, the largest of these bunnies is about 4 feet 4 inches and 53 pounds!"
d['Miniature Lop'] = "With a maximum weight of 1.6 kilograms, they are small and easily handeled!"
keez = d.keys()
return render_template("bunnies.html", d = d, place = place, keez = keez)
if __name__ == "__main__":
app.debug = True
app.run(host = '0.0.0.0',port=8000)
|
# making anagrams
from collections import Counter
s="abc"
s1="cde"
a=Counter(s)
b=Counter(s1)
print(a-b)
print(a)
print(b)
|
# --------------------------------------------------------------------
import re
import os
# *** Matching chars ***
""" MetaCharacters: . ^ $ * + ? { } [ ] \ | ( )
Class [] or set of characters
[abc] or [a-c]
[abc$] $ is not special here!
[^5] complement. Any char but 5. [5^] has no meaning
[a-zA-Z0-9_] = \w
\d Matches any decimal digit; this is equivalent to the class [0-9].
\D Matches any non-digit character; this is equivalent to the class [^0-9].
\s Matches any whitespace character; this is equivalent to the class [ \t\n\r\f\v].
\S Matches any non-whitespace character; this is equivalent to the class [^ \t\n\r\f\v].
\w Matches any alphanumeric character; this is equivalent to the class [a-zA-Z0-9_].
\W Matches any non-alphanumeric character; this is equivalent to the class [^a-zA-Z0-9_].
Can be combined with classes: [\s,abc]
. matches any char except newline. re.DOTALL matches newline as well
"""
p = re.compile ('a[\S]*')
print ('a[\S]', p.search ('abcbd n')) # Matches till d. After d there is a space char
p = re.compile ('a[\D]*') # Non-decimal digits
print ('a[\D]', p.search ('abc5bd1n')) # Matches till c
p = re.compile ('a[^0-9]*') # Non-decimal digits
print ('a[^0-9]', p.search ('abc5bd1n')) # Matches till c. ^ in a set it means complement
# *** Repeating things ***
"""
* matches the previous char 0 or more times
ca*t will match 'ct' (0 'a' characters), 'cat' (1 'a'), 'caaat' (3 'a' characters)
* is greedy. Goes as far as it can.
a[bcd]*b tries to match 'abcbd'
'a' is matched against 'a' so it tries to match the next part of regexp: [bcd*]
It goes till the end because the letter 'd' matches [bcd*] but then it fails because regexp part 3 'b' does not match the string as the string is finished
So it back tracks. 'd' does not match 'b' so it back tracks again. Finally the regexp 'b' (last bit of the regexp) matches 'b'
"""
p = re.compile ('a[bcd]*b')
print (p.match ('abcbd')) # If matches from the beginning
print (p.match ('abcbd'))
# matches 'abcb'. Span: [0-4]
'''
+ matches previous char 1 or more times
ca+t will match 'cat' (1 'a'), 'caaat' (3 'a's), but won’t match 'ct'
? matches the previous char 0 or once. Means 'optional'
home-?brew matches either 'homebrew' or 'home-brew'.
{m,n} matches previous char at least m times but at most n times
a/{1,3}b will match 'a/b', 'a//b', and 'a///b'. It won’t match 'ab', which has no slashes, or 'a////b', which has four
'''
'''
Backslash in regexp is '\\' but Python also requires escaping so '\\\\' is required to match a sinle '\'
or use 'r' meaning raw string
'''
# --------------------------------------------------------------------
'''
match() Determine if the RE matches at the beginning of the string.
search() Scan through a string, looking for any location where this RE matches.
findall() Find all substrings where the RE matches, and returns them as a list.
finditer() Find all substrings where the RE matches, and returns them as an iterator.
'''
p = re.compile ('[bcd]*b')
print (p.match ('abcbdabmbertdb')) # None as it has to start with 'a'
print (p.findall ('abcbdabmbertdb')) # bcb b b db as it is match 0 or more times
print (p.search ('abcbdabmbertdb')) # bcb first occurrence
m = p.search ('abcbdabmbertdb')
print ('')
print ("Group:", m.group (), "Start:", m.start (), "End:", m.end (), "Span:", m.span ())
print ('-')
p = re.compile ('[bcd]+b')
print (p.match ('abcbdabmbertdb')) # None as it has to start with 'a'
print (p.findall ('abcbdabmbertdb')) # bcb db as it has to match at least once
print (p.search ('abcbdabmbertdb')) # bcb first occurrence
m = p.search ('abcbdabmbertdb')
print ('')
print ("Group:", m.group (), "Start:", m.start (), "End:", m.end (), "Span:", m.span ())
if m:
print ("Match found!")
else:
print ("No match")
print (re.match ('[bcd]+b', 'abcbdabmbertdb')) # Implicit compilation and calls the function. No need for pattern object
p = re.compile ('a[bcd]+b', re.IGNORECASE) # Compilation flags. MULTILINE affects ^ and $ as they are applied after each newline
print (p.match ('ABCBDBMBERTDB'))
# --------------------------------------------------------------------
# More metacharacters
# | is the OR operator
print (re.findall ('a|b', 'karbon'))
# ^ at the beginning
print (re.findall ('^(abs|bra)', 'absolute'))
print (re.findall ('^(abs|bra)', 'brass'))
# $ at the end
print (re.findall ('(abs|bra)$', 'pre-abs'))
print (re.findall ('(abs|bra)$', 'abra'))
print (re.findall ('(abs|bra)$', 'abrak'))
# --------------------------------------------------------------------
# Search and replace
p = re.compile('(blue|white|red)')
cc = p.sub ('colour', 'blue socks and red shoes')
p = re.compile (r'^(create.+table).+udm\.')
m = p.search ('create table udm.claims as (')
if m:
p = re.compile (r'udm\.')
newline = p.sub ('ProjectName.', 'create table udm.claims as (')
print (newline)
else:
print ("No match!")
#os.system ("pause")
# exit ()
# Greedy vs non-greedy
# Greedy goes as far as it can
s = '<html><head><title>Title</title>'
print (re.match('<.*>', s).span()) # (0, 32) goes all the way. <html>'s first < and </title>'s >
print (re.match('<.*?>', s).group()) # returns <html> Stops as early as it can
# Practice
print (re.search ('abcm*y', 'abcy')) # abcy
print (re.search ('abc[opk*]y', 'abcy')) # None. Tries abc, any of op zero or more times k* then y. op can not be found!
print (re.search ('abc[opky]*y', 'abcpypyoy')) # abcy. Tries abc, any of opk zero or more times then y
print (re.search ('abc(opk)*y', 'abcpy')) # abcy. Tries abc, the word (group) opk zero or more times then y
print (re.search ('a[bcd]*b', 'abcbd')) # abcb. Starts with a, any of bcd zero or more times. Finds d at the end as it is greedy. Backtracks and finds b
print (re.search ('a[bcd]', 'abcbd')) # ab. Starts with a, any of bcd. Finds b and stops. Non-greedy
print (re.search ('a[bcd]d', 'abcbd')) # None. Starts with a, any of bcd finds b then d but there is a c after b
print (re.search ('a[bcd]d', 'abdbd')) # abd. Starts with a, any of bcd. Finds b then d. Non-greedy
# --------------------------------------------------------------------
# Replace strings in a file
fin = open ("inputfile.txt", "rt")
fout = open ("outfile.txt", "wt")
p1 = re.compile (r'^(create.+table).+udm\.')
p2 = re.compile (r'varchar')
p3 = re.compile (r'integer')
for line in fin:
m1 = p1.search (line)
m2 = p2.search (line)
m3 = p3.search (line)
if m1:
p1 = re.compile (r'udm\.')
line = p1.sub ('ProjectName', line)
else:
line = line
if m2:
p2 = re.compile (r'varchar.*?,|varchar.*?\n')
line = p2.sub ('string,', line)
else:
line = line
if m3:
p3 = re.compile (r'integer,|integer')
line = p3.sub ('int64', line)
else:
line = line
fout.write (line)
fin.close()
fout.close()
# --------------------------------------------------------------------
api_url = 'https://aventri.com/v2/ereg/listEvents/?accesstoken=a547f5deA32CA8013Ab849faB90&lastmodified-gt=2022-02-15&limit=1' # First param
api_url2 = 'https://aventri.com/v2/ereg/listEvents/?lastmodified-gt=2022-02-15&limit=1&accesstoken=a547f5deA32CA8013Ab849faB90&offset=100' # Middle param
api_url3 = 'https://aventri.com/v2/ereg/listEvents/?accesstoken=a547f5deA32CA8013Ab849faB90' # Only param
api_url4 = 'https://aventri.com/v2/ereg/listEvents/?lastmodified-gt=2022-02-15&limit=1&accesstoken=a547f5deA32CA8013Ab849faB90' # Last param
s = re.sub (r"accesstoken=([a-zA-Z0-9])+", r"*at*", api_url, count=1).replace ("*at*", "accesstoken=***Access token***")
s2 = re.sub (r"accesstoken=([a-zA-Z0-9])+", r"*at*", api_url2, count=1).replace ("*at*", "accesstoken=***Access token***")
s3 = re.sub (r"accesstoken=([a-zA-Z0-9])+", r"*at**", api_url3, count=1).replace ("*at*", "accesstoken=***Access token***")
s4 = re.sub (r"accesstoken=([a-zA-Z0-9])+", r"*at*", api_url4, count=1).replace ("*at*", "accesstoken=***Access token***")
print (s)
pattern = r"Cook"
sequence = "Cookie"
if re.match (pattern, sequence):
print ("Match!")
else: print ("Not a match!")
s = re.search (r'Co.k.e', 'Cookie').group () # Without group it is just a match object!
s = re.search (r'^Eat', "Eat cake!").group () # Match at the beginning of the string
s= re.search (r'cake$', "Cake! Let's eat cake").group () # Match at the end
# group () without parameters is the whole matched string
s = re.search (r'[0-6]', 'Number: 5').group () # 5
s = re.search (r'[abc]', 'x-ray').group () # a
s = re.search(r'Not a\sregular character', 'Not a regular character').group () # \ = escaping. \s = space char
'''
Character(s) What it does
. A period. Matches any single character except the newline character.
^ A caret. Matches a pattern at the start of the string.
\A Uppercase A. Matches only at the start of the string.
$ Dollar sign. Matches the end of the string.
\Z Uppercase Z. Matches only at the end of the string.
[ ] Matches the set of characters you specify within it.
\ ∙ If the character following the backslash is a recognized escape character, then the special meaning of the term is taken.
∙ Else the backslash () is treated like any other character and passed through.
∙ It can be used in front of all the metacharacters to remove their special meaning.
\w Lowercase w. Matches any single letter, digit, or underscore.
\W Uppercase W. Matches any character not part of \w (lowercase w).
\s Lowercase s. Matches a single whitespace character like: space, newline, tab, return.
\S Uppercase S. Matches any character not part of \s (lowercase s).
\d Lowercase d. Matches decimal digit 0-9.
\D Uppercase D. Matches any character that is not a decimal digit.
\t Lowercase t. Matches tab.
\n Lowercase n. Matches newline.
\r Lowercase r. Matches return.
\b Lowercase b. Matches only the beginning or end of the word.
+ Checks if the preceding character appears one or more times.
* Checks if the preceding character appears zero or more times.
? ∙ Checks if the preceding character appears exactly zero or one time.
∙ Specifies a non-greedy version of +, *
{ } Checks for an explicit number of times.
( ) Creates a group when performing matches.
< > Creates a named group when performing matches.
'''
# --------------------------------------------------------------------
os.system ("pause")
|
'''tests ensuring that *the* way of doing things works'''
import datetime
from icalendar import Calendar, Event
import pytest
def test_creating_calendar_with_unicode_fields(calendars, utc):
''' create a calendar with events that contain unicode characters in their fields '''
cal = Calendar()
cal.add('PRODID', '-//Plönë.org//NONSGML plone.app.event//EN')
cal.add('VERSION', '2.0')
cal.add('X-WR-CALNAME', 'äöü ÄÖÜ €')
cal.add('X-WR-CALDESC', 'test non ascii: äöü ÄÖÜ €')
cal.add('X-WR-RELCALID', '12345')
event = Event()
event.add('DTSTART', datetime.datetime(2010, 10, 10, 10, 0, 0, tzinfo=utc))
event.add('DTEND', datetime.datetime(2010, 10, 10, 12, 0, 0, tzinfo=utc))
event.add('CREATED', datetime.datetime(2010, 10, 10, 0, 0, 0, tzinfo=utc))
event.add('UID', '123456')
event.add('SUMMARY', 'Non-ASCII Test: ÄÖÜ äöü €')
event.add('DESCRIPTION', 'icalendar should be able to de/serialize non-ascii.')
event.add('LOCATION', 'Tribstrül')
cal.add_component(event)
# test_create_event_simple
event1 = Event()
event1.add('DTSTART', datetime.datetime(2010, 10, 10, 0, 0, 0, tzinfo=utc))
event1.add('SUMMARY', 'åäö')
cal.add_component(event1)
# test_unicode_parameter_name
# test for issue #80 https://github.com/collective/icalendar/issues/80
event2 = Event()
event2.add('DESCRIPTION', 'äöüßÄÖÜ')
cal.add_component(event2)
assert cal.to_ical() == calendars.created_calendar_with_unicode_fields.raw_ics
|
#!/usr/bin/env python
import pygtk
pygtk.require("2.0")
import gtk
class Base:
def combo_text(self,widget):
self.win.set_title(widget.get_active_text())
def textchange(self,widget):
self.win.set_title(self.textbox.get_text())
def relabel(self,widget):
self.label.set_text('xxxxxxxx')
self.textbox.set_text('uuuu')
def myhide(self,widget):
self.button.hide()
self.combo.append_text(self.textbox.get_text())
def destroy(self,widget,data=None):
print "dsafasdf"
gtk.main_quit()
def __init__(self):
self.win=gtk.Window(gtk.WINDOW_TOPLEVEL)
self.win.set_position(gtk.WIN_POS_CENTER)
self.win.set_size_request(333,1333)
self.win.set_title("my title")
self.win.set_tooltip_text("dasfsad\nuuuuuu")
self.button=gtk.Button("exit")
self.button.set_tooltip_text("aasaaa")
self.button.connect("clicked",self.destroy)
self.label=gtk.Label("dbalalalal")
self.button2=gtk.Button("hide")
self.button2.connect("clicked",self.myhide)
self.button4=gtk.Button("relabel")
self.button4.connect("clicked",self.relabel)
#fixed=gtk.Fixed();
#fixed.put(self.button,20,33)
#fixed.put(self.button2,120,33)
self.textbox=gtk.Entry()
self.textbox.connect("changed",self.textchange)
self.combo=gtk.combo_box_entry_new_text()
self.combo.connect("changed",self.combo_text)
self.combo.append_text("111")
self.combo.append_text("222")
self.pix=gtk.gdk.pixbuf_new_from_file_at_size("/home/roya/small.png",122,133)
self.image=gtk.Image()
self.image.set_from_pixbuf(self.pix)
self.box=gtk.VBox()
self.box.pack_start(self.button2)
self.box.pack_start(self.button)
self.box.pack_start(self.label)
self.box.pack_start(self.button4)
self.box.pack_start(self.image)
self.box2=gtk.HBox()
self.box2.pack_start(self.box)
self.box2.pack_start(self.textbox)
self.box2.pack_start(self.combo)
self.win.add(self.box2)
self.win.show_all()
self.win.connect("destroy",self.destroy)
def main(self):
gtk.main()
if __name__ == "__main__":
base=Base()
base.main()
|
from operator import itemgetter
import re
import numpy as np
def levenshtein_distance(s, t):
"""
computes the Levenshtein distance between the strings
s and t using dynamic programming
Returns
----------
dist(int): the Levenshtein distance between s and t
"""
rows = len(s) + 1
cols = len(t) + 1
# create matrix and initialise first line and column
dist = [[0 for _ in range(cols)] for _ in range(rows)]
for i in range(1, rows):
dist[i][0] = i
for i in range(1, cols):
dist[0][i] = i
# use the recursion relation
# lev(a[:i], lev[:b]) = min(lev(a[:i - 1], b[j]) + 1,lev(a[:i], b[j -1]) + 1,
# lev(a[:i - 1], b[j -1]) + ai≠bj)
for col in range(1, cols):
for row in range(1, rows):
if s[row - 1] == t[col - 1]:
cost = 0
else:
cost = 1
dist[row][col] = min(dist[row - 1][col] + 1, # deletion
dist[row][col - 1] + 1, # insertion
dist[row - 1][col - 1] + cost) # substitution
return dist[rows - 1][cols - 1]
class OOVhandler(object):
def __init__(self, pcfg, words, embeddings):
""" initialize out of vocabulary handler
vocabulary: the vocabulary of the corpus
words: words used from a bigger corpus(provided by polyglot)
embeddings: the vector representation of words"""
print('creating out of vocabulary handler:')
self.words = words
self.word_id = {word: i for i, word in enumerate(self.words)}
self.embeddings = embeddings
self.terminals = [terminal.symb for terminal in pcfg.terminals]
print('Keeping only common words that have embeddings')
self.embedded_terminals = [terminal for terminal in self.terminals if terminal in words]
self.transformed_embeddings = np.array([self.embeddings[self.word_id[w]] for w in self.embedded_terminals])
self.transformed_embeddings = self.transformed_embeddings.T / \
(np.sum(self.transformed_embeddings ** 2, axis=1) ** 0.5)
def closer_levenshtein(self, word):
"""
returns the closest word in the word embedding using the levenshtein distance
"""
word_distances = [(w, levenshtein_distance(word, w)) for w in self.words]
return min(word_distances, key=itemgetter(1))[0]
def case_normalizer(self, word):
""" In case the word is not available in the vocabulary,
we can try multiple case normalizing procedure.
We consider the best substitute to be the one with the lowest index,
which is equivalent to the most frequent alternative."""
w = word
lower = (self.word_id.get(w.lower(), 1e12), w.lower())
upper = (self.word_id.get(w.upper(), 1e12), w.upper())
title = (self.word_id.get(w.title(), 1e12), w.title())
results = [lower, upper, title]
results.sort()
index, w = results[0]
if index != 1e12:
return w
return word
def normalize(self, word):
""" Find the closest alternative in case the word is OOV."""
digits = re.compile("[0-9]", re.UNICODE)
if word not in self.words:
word = digits.sub("#", word)
# if the word is not in the vocabulary try different normalizations
if word not in self.words:
word = self.case_normalizer(word)
# if the word is still not in the vocabulary replace it by the closest word
# using the levenshtein distance
if word not in self.words:
return self.closer_levenshtein(word)
return word
def nearest_cosine(self, word):
""" Sorts words according to their Euclidean distance.
To use cosine distance, embeddings has to be normalized so that their l2 norm is 1.
Returns
----------
word: closest word in the embedded terminals to the word in input
"""
e = self.embeddings[self.word_id[word]]
# normalise e and the embedding matrix
e = e / np.linalg.norm(e)
distances = e @ self.transformed_embeddings
return self.embedded_terminals[max(enumerate(distances), key=itemgetter(1))[0]]
def replace(self, oov_word):
"""Replace an out of the vocabulary word with another terminal word
Returns
----------
word: string.
most similar word in the terminal embedded words
"""
if oov_word in self.terminals:
return oov_word
else:
# first find the closest word in the vocabulary using levenshtein distance
word = self.normalize(oov_word)
# find the closest terminal using the cosine similarity.
return self.nearest_cosine(word)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import numpy as np
from ..common._apply_operation import apply_abs, apply_cast, apply_mul
from ..common._apply_operation import apply_add, apply_div
from ..common._apply_operation import apply_reshape, apply_sub, apply_topk
from ..common._apply_operation import apply_pow, apply_concat, apply_transpose
from ..common._registration import register_converter
from ..proto import onnx_proto
def _calculate_weights(scope, container, unity, distance):
"""
weights = 1 / distance
Handle divide by 0.
"""
weights_name = scope.get_unique_variable_name('weights')
ceil_result_name = scope.get_unique_variable_name('ceil_result')
floor_result_name = scope.get_unique_variable_name('floor_result')
mask_sum_name = scope.get_unique_variable_name('mask_sum')
bool_mask_sum_name = scope.get_unique_variable_name('bool_mask_sum')
ceil_floor_sum_name = scope.get_unique_variable_name('ceil_floor_sum')
distance_without_zero_name = scope.get_unique_variable_name(
'distance_without_zero')
not_ceil_floor_sum_name = scope.get_unique_variable_name(
'not_ceil_floor_sum')
bool_ceil_floor_sum_name = scope.get_unique_variable_name(
'bool_ceil_floor_sum')
bool_not_ceil_floor_sum_name = scope.get_unique_variable_name(
'bool_not_ceil_floor_sum')
mask_sum_complement_name = scope.get_unique_variable_name(
'mask_sum_complement')
mask_sum_complement_float_name = scope.get_unique_variable_name(
'mask_sum_complement_float')
masked_weights_name = scope.get_unique_variable_name('masked_weights')
final_weights_name = scope.get_unique_variable_name('final_weights')
container.add_node('Ceil', distance, ceil_result_name,
name=scope.get_unique_operator_name('Ceil'))
container.add_node('Floor', distance, floor_result_name,
name=scope.get_unique_operator_name('Floor'))
apply_add(scope, [ceil_result_name, floor_result_name],
ceil_floor_sum_name, container, broadcast=0)
apply_cast(scope, ceil_floor_sum_name, bool_ceil_floor_sum_name, container,
to=onnx_proto.TensorProto.BOOL)
container.add_node('Not', bool_ceil_floor_sum_name,
bool_not_ceil_floor_sum_name,
name=scope.get_unique_operator_name('Not'))
apply_cast(scope, bool_not_ceil_floor_sum_name, not_ceil_floor_sum_name,
container, to=onnx_proto.TensorProto.FLOAT)
apply_add(scope, [distance, not_ceil_floor_sum_name],
distance_without_zero_name, container, broadcast=0)
apply_div(scope, [unity, distance_without_zero_name],
weights_name, container, broadcast=1)
container.add_node('ReduceSum', not_ceil_floor_sum_name,
mask_sum_name, axes=[1],
name=scope.get_unique_operator_name('ReduceSum'))
apply_cast(scope, mask_sum_name, bool_mask_sum_name, container,
to=onnx_proto.TensorProto.BOOL)
container.add_node('Not', bool_mask_sum_name,
mask_sum_complement_name,
name=scope.get_unique_operator_name('Not'))
apply_cast(scope, mask_sum_complement_name, mask_sum_complement_float_name,
container, to=onnx_proto.TensorProto.FLOAT)
apply_mul(scope, [weights_name, mask_sum_complement_float_name],
masked_weights_name, container, broadcast=1)
apply_add(scope, [masked_weights_name, not_ceil_floor_sum_name],
final_weights_name, container, broadcast=0)
return final_weights_name
def _get_weights(scope, container, topk_values_name, distance_power):
"""
Get the weights from an array of distances.
"""
unity_name = scope.get_unique_variable_name('unity')
root_power_name = scope.get_unique_variable_name('root_power')
nearest_distance_name = scope.get_unique_variable_name(
'nearest_distance')
actual_distance_name = scope.get_unique_variable_name(
'actual_distance')
container.add_initializer(unity_name, onnx_proto.TensorProto.FLOAT,
[], [1])
container.add_initializer(root_power_name,
onnx_proto.TensorProto.FLOAT,
[], [1 / distance_power])
apply_abs(scope, topk_values_name, nearest_distance_name,
container)
apply_pow(scope, [nearest_distance_name, root_power_name],
actual_distance_name, container)
weights_name = _calculate_weights(scope, container, unity_name,
actual_distance_name)
return weights_name
def _get_probability_score(scope, container, operator, weights,
topk_values_name, distance_power, topk_labels_name,
classes):
"""
Calculate the class probability scores, update the second output of
KNeighboursClassifier converter with the probability scores and
return it.
"""
labels_name = [None] * len(classes)
output_label_name = [None] * len(classes)
output_cast_label_name = [None] * len(classes)
output_label_reduced_name = [None] * len(classes)
for i in range(len(classes)):
labels_name[i] = scope.get_unique_variable_name(
'class_labels_{}'.format(i))
container.add_initializer(labels_name[i],
onnx_proto.TensorProto.INT32, [], [i])
output_label_name[i] = scope.get_unique_variable_name(
'output_label_{}'.format(i))
output_cast_label_name[i] = scope.get_unique_variable_name(
'output_cast_label_{}'.format(i))
output_label_reduced_name[i] = scope.get_unique_variable_name(
'output_label_reduced_{}'.format(i))
if weights == 'distance':
weights_val = _get_weights(
scope, container, topk_values_name, distance_power)
for i in range(len(classes)):
weighted_distance_name = scope.get_unique_variable_name(
'weighted_distance')
container.add_node('Equal', [labels_name[i], topk_labels_name],
output_label_name[i])
apply_cast(scope, output_label_name[i], output_cast_label_name[i],
container, to=onnx_proto.TensorProto.FLOAT)
apply_mul(scope, [output_cast_label_name[i], weights_val],
weighted_distance_name, container, broadcast=0)
container.add_node('ReduceSum', weighted_distance_name,
output_label_reduced_name[i], axes=[1])
else:
for i in range(len(classes)):
container.add_node('Equal', [labels_name[i], topk_labels_name],
output_label_name[i])
apply_cast(scope, output_label_name[i], output_cast_label_name[i],
container, to=onnx_proto.TensorProto.INT32)
container.add_node('ReduceSum', output_cast_label_name[i],
output_label_reduced_name[i], axes=[1])
concat_labels_name = scope.get_unique_variable_name('concat_labels')
cast_concat_labels_name = scope.get_unique_variable_name(
'cast_concat_labels')
normaliser_name = scope.get_unique_variable_name('normaliser')
apply_concat(scope, output_label_reduced_name,
concat_labels_name, container, axis=1)
apply_cast(scope, concat_labels_name, cast_concat_labels_name,
container, to=onnx_proto.TensorProto.FLOAT)
container.add_node('ReduceSum', cast_concat_labels_name,
normaliser_name, axes=[1],
name=scope.get_unique_operator_name('ReduceSum'))
apply_div(scope, [cast_concat_labels_name, normaliser_name],
operator.outputs[1].full_name, container, broadcast=1)
return operator.outputs[1].full_name
def _convert_k_neighbours_classifier(scope, container, operator, classes,
class_type, training_labels,
topk_values_name, topk_indices_name,
distance_power, weights):
"""
Convert KNeighboursClassifier model to onnx format.
"""
classes_name = scope.get_unique_variable_name('classes')
predicted_label_name = scope.get_unique_variable_name(
'predicted_label')
final_label_name = scope.get_unique_variable_name('final_label')
training_labels_name = scope.get_unique_variable_name(
'training_labels')
topk_labels_name = scope.get_unique_variable_name('topk_labels')
container.add_initializer(classes_name, class_type,
classes.shape, classes)
container.add_initializer(
training_labels_name, onnx_proto.TensorProto.INT32,
training_labels.shape, training_labels.ravel())
container.add_node(
'ArrayFeatureExtractor', [training_labels_name, topk_indices_name],
topk_labels_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
proba = _get_probability_score(scope, container, operator,
weights, topk_values_name, distance_power,
topk_labels_name, classes)
container.add_node('ArgMax', proba,
predicted_label_name,
name=scope.get_unique_operator_name('ArgMax'), axis=1)
container.add_node(
'ArrayFeatureExtractor', [classes_name, predicted_label_name],
final_label_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
if class_type == onnx_proto.TensorProto.INT32:
reshaped_final_label_name = scope.get_unique_variable_name(
'reshaped_final_label')
apply_reshape(scope, final_label_name, reshaped_final_label_name,
container, desired_shape=(-1,))
apply_cast(scope, reshaped_final_label_name,
operator.outputs[0].full_name, container,
to=onnx_proto.TensorProto.INT64)
else:
apply_reshape(scope, final_label_name,
operator.outputs[0].full_name, container,
desired_shape=(-1,))
def _convert_k_neighbours_regressor(scope, container, new_training_labels,
new_training_labels_shape,
topk_values_name, topk_indices_name,
distance_power, weights):
"""
Convert KNeighboursRegressor model to onnx format.
"""
training_labels_name = scope.get_unique_variable_name(
'training_labels')
topk_labels_name = scope.get_unique_variable_name('topk_labels')
container.add_initializer(
training_labels_name, onnx_proto.TensorProto.FLOAT,
new_training_labels_shape,
new_training_labels.ravel().astype(float))
container.add_node(
'ArrayFeatureExtractor', [training_labels_name, topk_indices_name],
topk_labels_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
weighted_labels = topk_labels_name
final_op_type = 'ReduceMean'
if weights == 'distance':
weighted_distance_name = scope.get_unique_variable_name(
'weighted_distance')
reduced_weights_name = scope.get_unique_variable_name(
'reduced_weights')
weighted_labels_name = scope.get_unique_variable_name(
'weighted_labels')
weights_val = _get_weights(
scope, container, topk_values_name, distance_power)
apply_mul(scope, [topk_labels_name, weights_val],
weighted_distance_name, container, broadcast=0)
container.add_node(
'ReduceSum', weights_val, reduced_weights_name,
name=scope.get_unique_operator_name('ReduceSum'), axes=[1])
apply_div(scope, [weighted_distance_name, reduced_weights_name],
weighted_labels_name, container, broadcast=1)
weighted_labels = weighted_labels_name
final_op_type = 'ReduceSum'
return final_op_type, weighted_labels
def convert_sklearn_knn(scope, operator, container):
"""
Converter for KNN models to onnx format.
"""
# Computational graph:
#
# In the following graph, variable names are in lower case characters only
# and operator names are in upper case characters. We borrow operator names
# from the official ONNX spec:
# https://github.com/onnx/onnx/blob/master/docs/Operators.md
# All variables are followed by their shape in [].
# Note that KNN regressor and classifier share the same computation graphs
# until the top-k nearest examples' labels (aka `topk_labels` in the graph
# below) are found.
#
# Symbols:
# M: Number of training set instances
# N: Number of features
# C: Number of classes
# input: input
# output: output
# output_prob (for KNN Classifier): class probabilities
#
# Graph:
#
# input [1, N] --> SUB <---- training_examples [M, N]
# |
# V
# sub_results [M, N] ----> POW <---- distance_power [1]
# |
# V
# reduced_sum [M] <-- REDUCESUM <-- distance [M, N]
# |
# V
# length -> RESHAPE -> reshaped_result [1, M]
# |
# V
# n_neighbors [1] ----> TOPK
# |
# / \
# / \
# | |
# V V
# topk_indices [K] topk_values [K]
# |
# V
# ARRAYFEATUREEXTRACTOR <- training_labels [M]
# |
# V (KNN Regressor)
# topk_labels [K] ---------------------> REDUCEMEAN --> output [1]
# |
# |
# | (KNN Classifier)
# |
# |------------------------------------------------.
# /|\ (probability calculation) |
# / | \ |
# / | \ (label prediction) V
# / | \ CAST
# / | \__ |
# | | | V
# V V V cast_pred_label [K, 1]
# label0 -> EQUAL EQUAL ... EQUAL <- label(C-1) |
# | | | |
# V V V |
# output_label_0 [C] ... output_label_(C-1) [C] |
# | | | V
# V V V pred_label_shape [2] -> RESHAPE
# CAST CAST ... CAST |
# | | | V
# V V V reshaped_pred_label [K, 1]
# output_cast_label_0 [C] ... output_cast_label_(C-1) [C] |
# | | | |
# V V V |
# REDUCESUM REDUCESUM ... REDUCESUM |
# | | | |
# V V V |
# output_label_reduced_0 [1] ... output_label_reduced_(C-1) [1] |
# \ | / |
# \____ | ____/ |
# \ | ___/ |
# \ | / |
# \|/ |
# V |
# CONCAT --> concat_labels [C] |
# | |
# V |
# ARGMAX --> predicted_label [1] |
# | |
# V |
# output [1] <--- ARRAYFEATUREEXTRACTOR <- classes [C] |
# |
# |
# |
# ohe_model --> ONEHOTENCODER <-------------------------------------'
# |
# V
# ohe_result [n_neighbors, C] -> REDUCEMEAN -> reduced_prob [1, C]
# |
# V
# output_probability [1, C] <- ZipMap
knn = operator.raw_operator
training_examples = knn._fit_X.astype(float)
distance_power = knn.p if knn.metric == 'minkowski' else (
2 if knn.metric == 'euclidean' or knn.metric == 'l2' else 1)
if operator.type != 'SklearnNearestNeighbors':
training_labels = knn._y
training_examples_name = scope.get_unique_variable_name(
'training_examples')
sub_results_name = scope.get_unique_variable_name('sub_results')
abs_results_name = scope.get_unique_variable_name('abs_results')
distance_name = scope.get_unique_variable_name('distance')
distance_power_name = scope.get_unique_variable_name('distance_power')
reduced_sum_name = scope.get_unique_variable_name('reduced_sum')
topk_values_name = scope.get_unique_variable_name('topk_values')
topk_indices_name = scope.get_unique_variable_name('topk_indices')
reshaped_result_name = scope.get_unique_variable_name('reshaped_result')
negate_name = scope.get_unique_variable_name('negate')
negated_reshaped_result_name = scope.get_unique_variable_name(
'negated_reshaped_result')
container.add_initializer(
training_examples_name, onnx_proto.TensorProto.FLOAT,
training_examples.shape, training_examples.flatten())
container.add_initializer(distance_power_name,
onnx_proto.TensorProto.FLOAT,
[], [distance_power])
container.add_initializer(negate_name, onnx_proto.TensorProto.FLOAT,
[], [-1])
apply_sub(scope, [operator.inputs[0].full_name, training_examples_name],
sub_results_name, container, broadcast=1)
apply_abs(scope, sub_results_name, abs_results_name, container)
apply_pow(scope, [abs_results_name, distance_power_name], distance_name,
container)
container.add_node('ReduceSum', distance_name, reduced_sum_name,
name=scope.get_unique_operator_name('ReduceSum'),
axes=[1])
apply_reshape(scope, reduced_sum_name, reshaped_result_name, container,
desired_shape=[1, -1])
apply_mul(scope, [reshaped_result_name, negate_name],
negated_reshaped_result_name, container, broadcast=1)
apply_topk(scope, negated_reshaped_result_name,
[topk_values_name, topk_indices_name], container,
k=knn.n_neighbors)
if operator.type == 'SklearnKNeighborsClassifier':
classes = knn.classes_
class_type = onnx_proto.TensorProto.STRING
if np.issubdtype(knn.classes_.dtype, np.floating):
class_type = onnx_proto.TensorProto.INT32
classes = classes.astype(np.int32)
elif np.issubdtype(knn.classes_.dtype, np.signedinteger):
class_type = onnx_proto.TensorProto.INT32
else:
classes = np.array([s.encode('utf-8') for s in classes])
_convert_k_neighbours_classifier(
scope, container, operator, classes, class_type, training_labels,
topk_values_name, topk_indices_name, distance_power,
knn.weights)
elif operator.type == 'SklearnKNeighborsRegressor':
multi_reg = (len(training_labels.shape) > 1 and
(len(training_labels.shape) > 2 or
training_labels.shape[1] > 1))
if multi_reg:
shape = training_labels.shape
irange = tuple(range(len(shape)))
new_shape = (shape[-1],) + shape[:-1]
perm = irange[-1:] + irange[:-1]
new_training_labels = training_labels.transpose(perm)
perm = irange[1:] + (0,)
shape = new_shape
else:
shape = training_labels.shape
new_training_labels = training_labels
final_op_type, weighted_labels = _convert_k_neighbours_regressor(
scope, container, new_training_labels, shape,
topk_values_name, topk_indices_name, distance_power, knn.weights)
if multi_reg:
means_name = scope.get_unique_variable_name('means')
container.add_node(
final_op_type, weighted_labels, means_name,
name=scope.get_unique_operator_name(final_op_type), axes=[1])
apply_transpose(scope, means_name, operator.output_full_names,
container, perm=perm)
else:
container.add_node(
final_op_type, weighted_labels, operator.output_full_names,
name=scope.get_unique_operator_name(final_op_type), axes=[1])
elif operator.type == 'SklearnNearestNeighbors':
container.add_node(
'Identity', topk_indices_name, operator.outputs[0].full_name,
name=scope.get_unique_operator_name('Identity'))
apply_abs(scope, topk_values_name, operator.outputs[1].full_name,
container)
register_converter('SklearnKNeighborsClassifier', convert_sklearn_knn)
register_converter('SklearnKNeighborsRegressor', convert_sklearn_knn)
register_converter('SklearnNearestNeighbors', convert_sklearn_knn)
|
import os
class Config(object):
SECRET_KEY = os.environ.get("SECRET_KEY")
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = os.environ.get('MAIL_PORT')
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS')
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = [""]
USERS_PER_PAGE = 1
FIELD_PER_ROWS = 3
LOG_TO_STDOUT = 1
WEB_NAME = '<YOUR WEB APP NAME>'
LANGUAGES = ['en', 'ru', 'uk']
|
#!/usr/bin/python
import os
import urllib2
import json
import commands
import re
import boto3
from boto3 import session
#Retrieving Instance Details such as Instance ID and Region from EC2 metadata service
instance_details = json.loads(urllib2.urlopen('http://169.254.169.254/latest/dynamic/instance-identity/document').read())
instanceid=instance_details['instanceId']
REGION=instance_details['region']
# Getting the AWS credentials from the IAM role
session = session.Session()
credentials = session.get_credentials()
def Activation(InstanceID):
#Getting Activation ID and Code from parameter store
ssm = boto3.client('ssm',region_name=REGION)
activation_id = ssm.get_parameter(Name='ActivationId')
ActivationID=activation_id['Parameter']['Value']
activation_code = ssm.get_parameter(Name='ActivationCode')
ActivationCode=activation_code['Parameter']['Value']
# Registering Instance to Activation and storing ManagedInstanceID for tagging
status_stop_service, Output_stop_service =commands.getstatusoutput("sudo stop amazon-ssm-agent")
cmd="sudo amazon-ssm-agent -register -y -code %s -id %s -region %s"%(ActivationCode,ActivationID,REGION)
status, output = commands.getstatusoutput(cmd)
m = re.search('(mi-)\w{17}',output.splitlines()[-1])
ManagedInstanceID=m.group(0)
if status==0:
status_start_service, Output_start_service =commands.getstatusoutput("sudo start amazon-ssm-agent")
print ManagedInstanceID
# Creating Tag for ManagedInstanceID tag
create_tags = ec2.create_tags(Resources=[str(InstanceID)],Tags=[{'Key':'managedinstanceid','Value':ManagedInstanceID}])
# Checking if Instance already has ManagedInstanceID Tag
ec2=boto3.client('ec2',region_name=REGION)
ec2_attached_tags = ec2.describe_instances(Filters=[{'Name': 'tag-key','Values': ['managedinstanceid']}],InstanceIds=[instanceid])
if not ec2_attached_tags['Reservations']:
Activation(instanceid)
else:
print "Instance is already registered to an Activation/Account"
|
from Pages.ContentPages.BasePage import Page
import time
from Pages.ServicePages import AuthPage
import pytest
import config
class User(object):
URL = 'http://{login}:{pas}@{url}/user/login'. \
format(login=config.http_login, pas=config.http_pass, url = config.domain)
login = 'adyaxadmin'
password = config.admin_pass
pages = [
['admin/config/system', 200],
['admin/modules', 200],
['admin/appearance', 200],
['admin/config/system/site-information', 200],
['admin/structure/webform', 200],
['node/26/webform/results/submissions', 200],
['admin/config/system/google-analytics', 200],
['admin/config/regional', 200],
['admin/config/search/metatag', 200],
['admin/config/services/addtoany', 200],
['admin/config/regional/region_switcher', 200],
['admin/people', 200],
['admin/people/roles', 200],
['admin/people/permissions', 200],
['admin/people/permissions', 200],
['admin/config/people/saml', 200],
['admin/config/services/captcha/recaptcha', 200],
['admin/config/system/shield', 200],
['admin/config/system/cron/jobs', 200],
['admin/config/system/lightning', 200],
['admin/config/system/file_mdm', 200],
['admin/config/system/acquia-connector', 200],
['admin/config/content', 200],
['admin/config/user-interface', 200],
['admin/config/development', 200],
['admin/config/media', 200],
['admin/config/services/rss-publishing', 200],
['admin/config/workflow', 200],
['admin/config/search', 200],
['admin/config/regional', 200],
['admin/reports', 200],
['admin/help', 200],
['admin/structure/block', 200],
['admin/structure/menu', 200],
['admin/config/search/path', 200],
['node/286/edit', 200], # home page
['media/446/edit', 200], # media asset
['node/1861/edit', 200], # common node
['admin/content', 200],
['admin/content/media', 200],
['admin/structure/menu', 200],
['admin/structure/taxonomy', 200],
['admin/config/services/map/google/settings', 200],
]
@pytest.allure.step('Login')
def log_in(self, driver):
driver.get(self.URL)
auth = AuthPage.AuthPage(driver)
if auth.is_popin():
auth.close_popin()
time.sleep(1)
if auth.is_cookie_banner():
auth.close_cookie_banner()
time.sleep(1)
auth.get_login_field().send_keys(self.login)
auth.get_pass_field().send_keys(self.password)
auth.get_auth_button().click()
@staticmethod
@pytest.allure.step('Logout')
def log_out(driver):
page = Page(driver)
page.open_add_content_page()
page.get_user_button().click()
page.get_logout_button().click()
@staticmethod
@pytest.allure.step('Logout')
def log_out_by_link(driver):
page = Page(driver)
page.open_url('/user/logout')
class ContentContributor(User):
login = 'content_contributor'
password = config.admin_pass
pages = [
['admin/config/system', 403],
['admin/modules', 403],
['admin/appearance', 403],
['admin/config/system/site-information', 403],
['admin/structure/webform', 403],
['node/26/webform/results/submissions', 403],
['admin/config/system/google-analytics', 403],
['admin/config/regional', 403],
['admin/config/search/metatag', 403],
['admin/config/services/addtoany', 403],
['admin/config/regional/region_switcher', 403],
['admin/people', 403],
['admin/people/roles', 403],
['admin/people/permissions', 403],
['admin/people/permissions', 403],
['admin/config/people/saml', 403],
['admin/config/services/captcha/recaptcha', 403],
['admin/config/system/shield', 403],
['admin/config/system/cron/jobs', 403],
['admin/config/system/lightning', 403],
['admin/config/system/file_mdm', 403],
['admin/config/system/acquia-connector', 403],
['admin/config/content', 403],
['admin/config/user-interface', 403],
['admin/config/development', 403],
['admin/config/media', 403],
['admin/config/services/rss-publishing', 403],
['admin/config/workflow', 403],
['admin/config/search', 403],
['admin/config/regional',403],
['admin/reports', 403],
['admin/help', 403],
['admin/structure', 200],
['admin/structure/menu', 200],
['admin/config/search/path', 403],
['node/286/edit', 200], # home page
['media/446/edit', 200], # media asset
['node/1861/edit', 200], # common node
['admin/content', 200],
['admin/content/media', 200],
['admin/structure/taxonomy', 403],
['admin/config/services/map/google/settings', 403],
]
class SiteManager(User):
login = 'site_manager'
password = config.admin_pass
pages = [
['admin/config/system', 200],
['admin/modules', 403],
['admin/appearance', 403],
['admin/config/system/site-information', 200],
['admin/structure/webform', 200],
['node/26/webform/results/submissions', 200],
['admin/config/system/google-analytics', 200],
['admin/config/regional', 200],
['admin/config/search/metatag', 200],
['admin/config/services/addtoany', 200],
['admin/config/regional/region_switcher', 200],
['admin/people', 200],
['admin/people/roles', 403],
['admin/people/permissions', 403],
['admin/people/permissions', 403],
['admin/config/people/saml', 403],
['admin/config/services/captcha/recaptcha', 200],
['admin/config/system/shield', 200],
['admin/config/system/cron/jobs', 403],
['admin/config/system/lightning', 403],
['admin/config/system/file_mdm', 403],
['admin/config/system/acquia-connector', 403],
['admin/config/content', 403],
['admin/config/user-interface', 403],
['admin/config/development', 200],
['admin/config/media', 403],
['admin/config/services/rss-publishing', 403],
['admin/config/workflow', 403],
['admin/config/search', 200],
['admin/config/regional', 200],
['admin/reports', 403],
['admin/help', 403],
['admin/structure/block', 403],
['admin/structure/menu', 200],
['admin/config/search/path', 200],
['node/286/edit', 200], # home page
['media/446/edit', 200], # media asset
['node/1861/edit', 200], # common node
['admin/content/media', 200],
['admin/content', 200],
['admin/structure/menu', 200],
['admin/structure/taxonomy', 200],
['admin/config/services/map/google/settings', 200],
]
class SiteBuilder(User):
login = 'site_builder'
password = config.admin_pass
pages = [
['admin/config/system', 403],
['admin/modules', 403],
['admin/appearance', 403],
['admin/config/system/site-information', 403],
['admin/structure/webform', 403],
['node/26/webform/results/submissions', 403],
['admin/config/system/google-analytics', 403],
['admin/config/regional', 403],
['admin/config/search/metatag', 403],
['admin/config/services/addtoany', 403],
['admin/config/regional/region_switcher', 403],
['admin/people', 403],
['admin/people/roles', 403],
['admin/people/permissions', 403],
['admin/people/permissions', 403],
['admin/config/people/saml', 403],
['admin/config/services/captcha/recaptcha', 403],
['admin/config/system/shield', 403],
['admin/config/system/cron/jobs', 403],
['admin/config/system/lightning', 403],
['admin/config/system/file_mdm', 403],
['admin/config/system/acquia-connector', 403],
['admin/config/content', 403],
['admin/config/user-interface', 403],
['admin/config/development', 403],
['admin/config/media', 403],
['admin/config/services/rss-publishing', 403],
['admin/config/workflow', 403],
['admin/config/search', 403],
['admin/config/regional', 403],
['admin/reports', 403],
['admin/help', 403],
['admin/structure/block', 403],
['admin/structure/menu', 403],
['admin/config/search/path', 403],
['node/286/edit', 200], # home page
['media/446/edit', 403], # media asset
['node/1861/edit', 403], # common node
['admin/content', 403],
['admin/content/media', 403],
['admin/structure/menu', 403],
['admin/structure/taxonomy', 403],
['admin/config/services/map/google/settings', 403],
]
class NotAuthorized(User):
pages = [
['admin/config/system', 403],
['admin/modules', 403],
['admin/appearance', 403],
['admin/config/system/site-information', 403],
['admin/structure/webform', 403],
['node/26/webform/results/submissions', 403],
['admin/config/system/google-analytics', 403],
['admin/config/regional', 403],
['admin/config/search/metatag', 403],
['admin/config/services/addtoany', 403],
['admin/config/regional/region_switcher', 403],
['admin/people', 403],
['admin/people/roles', 403],
['admin/people/permissions', 403],
['admin/people/permissions', 403],
['admin/config/people/saml', 403],
['admin/config/services/captcha/recaptcha', 403],
['admin/config/system/shield', 403],
['admin/config/system/cron/jobs', 403],
['admin/config/system/lightning', 403],
['admin/config/system/file_mdm', 403],
['admin/config/system/acquia-connector', 403],
['admin/config/content', 403],
['admin/config/user-interface', 403],
['admin/config/development', 403],
['admin/config/media', 403],
['admin/config/services/rss-publishing', 403],
['admin/config/workflow', 403],
['admin/config/search', 403],
['admin/config/regional', 403],
['admin/reports', 403],
['admin/help', 403],
['admin/structure/block', 403],
['admin/structure/menu', 403],
['admin/config/search/path', 403],
['node/286/edit', 403], # home page
['media/446/edit', 403], # media asset
['node/1861/edit', 403], # common node
['admin/content', 403],
['admin/content/media', 403],
['admin/structure/menu', 403],
['admin/structure/taxonomy', 403],
['admin/config/services/map/google/settings', 403],
]
|
Author = 'Liu Lei'
import configparser
config=configparser.ConfigParser()
config['DEFAULT']={'ServerAliveInterval':'45','sex':'girl'}
config['f']={'aslkd':'wew'}
config['topsecret.server.com']={}
topsecret = config['topsecret.server.com']
topsecret['Host Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
|
import socket
import os
import sys
import glob
def enviar(nombre):
try:
f = open(nombre,'rb')
stats = os.stat(nombre)
tam = stats.st_size
#print(tam)
s_cliente.send(str(tam).encode())
l = f.read(1024)
while (l):
s_cliente.send(l)
l = f.read(1024)
print("Enviado")
f.close()
except IOError:
s_cliente.send(str(0).encode())
print ("Error Envio")
def recibir(nombre):
t = s_cliente.recv(1024).decode()
tam = int(t)
print(tam)
if(tam != 0):
f = open(nombre,'wb')
while (tam > 0):
l = s_cliente.recv(1024)
f.write(l)
tam -= sys.getsizeof(l)
#print(tam)
f.close()
print("Archivo '"+nombre+"' recibido")
else:
print("Error Subida")
def listar():
string = ""
for file in glob.glob('*[!'+nombreServidor+']*'):
string = string+"\n-> "+file
return string
HOST = 'localhost'
PORT = 1025
nombreServidor = "ftpserver.py"
socketServidor = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
socketServidor.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socketServidor.bind((HOST,PORT))
socketServidor.listen(1)
while True:
print("Esperando a que un usuario se conecte...")
s_cliente, addr = socketServidor.accept()
print("Usuario conectado")
while True:
print("Esperando Opcion...")
m = s_cliente.recv(1024).decode()
#print(m)
op = int(m)
if(op == 1):
lista = listar()
s_cliente.send(lista.encode())
if(op == 2):
print("Esperando peticion...")
nombre = s_cliente.recv(1024).decode()
if(nombre == nombreServidor):
s_cliente.send(str(0).encode())
else:
enviar(nombre)
if(op == 3):
print("Esperando archivo...")
nombre = s_cliente.recv(1024).decode()
#print(nombre.decode())
recibir(nombre)
if(op == 4):
break
s_cliente.close()
socketServidor.close()
|
from django import forms
from .models import User
from django.core.exceptions import *
import re
USERNAME_PATTERN = re.compile(r'\w{4,20}')
class UserForm(forms.ModelForm):
def clean_username(self):
username = self.cleaned_data['username']
if not USERNAME_PATTERN.fullmatch(username):
raise ValidationError('用户名由字母、数字和下划线构成且长度为4-20个字符')
return username
def clean_password(self):
password = self.cleaned_data['password']
if len(password) < 8 or len(password) > 20:
raise ValidationError('无效的密码,密码长度为8-20个字符')
return password
class Meta:
model = User
|
# -*- coding:utf-8 -*-
"""
Time : 2020/11/6 11:10
Author : Kexin Guan
Decs :
"""
|
# ##################
# 1. lists of floats
# ##################
import random
from deap import base
from deap import creator
from deap import tools
# negative weights lead to minimization
# positive weights are for maximization
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
IND_SIZE = 10
toolbox = base.Toolbox()
toolbox.register("attr_float", random.random)
# will have DNA IND_SIZE long
# can use array.array or numpy.ndarray instead of tools.initRepeat
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, n=IND_SIZE)
# ##################
# 2. permutations
# ##################
import random
from deap import base
from deap import creator
from deap import tools
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
IND_SIZE = 10
toolbox = base.Toolbox()
toolbox.register("indices", random.sample, range(IND_SIZE), IND_SIZE)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.indices)
print toolbox.individual()
print "a permutation of 0 to IND_SIZE"
# ##################
# 3. arithmetic expression
# ##################
import operator
from deap import base
from deap import creator
from deap import gp
from deap import tools
pset = gp.PrimitiveSet("MAIN", arity=1)
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin, pset=pset)
toolbox = base.Toolbox()
toolbox.register("expr", gp.genRamped, pset=pset, min_=1, max_=2)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
print toolbox.individual()
print "individual in the form of an arithmetic expression in the form of a prefix tree"import random
# ##################
# 4. evolution strategy
# ##################
from deap import base
from deap import creator
from deap import toolsimport array
import random
from deap import base
from deap import creator
from deap import tools
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode="d", fitness=creator.FitnessMin, strategy=None)
creator.create("Strategy", array.array, typecode="d")
def initES(icls, scls, size, imin, imax, smin, smax):
ind = icls(random.uniform(imin, imax) for _ in range(size))
ind.strategy = scls(random.uniform(smin, smax) for _ in range(size))
return ind
IND_SIZE = 10
MIN_VALUE, MAX_VALUE = -5., 5.
MIN_STRAT, MAX_STRAT = -1., 1.
toolbox = base.Toolbox()
toolbox.register("individual", initES, creator.Individual, creator.Strategy, IND_SIZE, MIN_VALUE, MAX_VALUE, MIN_STRAT, MAX_STRAT)
print toolbox.individual()
print "complete evolution strategy and a strategy vector"import random
# ##################
# 5. particles
# ##################
from deap import base
from deap import creator
from deap import tools
creator.create("FitnessMax", base.Fitness, weights=(1.0, 1.0))
creator.create("Particle", list, fitness=creator.FitnessMax, speed=None, smin=None, smax=None, best=None)
def initParticle(pcls, size, pmin, pmax, smin, smax):
part = pcls(random.uniform(pmin, pmax) for _ in xrange(size))
part.speed = [random.uniform(smin, smax) for _ in xrange(size)]
part.smin = smin
part.smax = smax
return part
toolbox = base.Toolbox()
# first arg is the name of the future method to instant an ind
toolbox.register("particle", initParticle, creator.Particle, size=2, pmin=-6, pmax=6, smin=-3, smax=3)
# here "particle" first arg of line 19 is used
print toolbox.particle()
print "particle with speed vector maximizing two objectives"
# ##################
# 6. funky ones
# ##################
import random
from deap import base
from deap import creator
from deap import tools
creator.create("FitnessMax", base.Fitness, weights=(1.0, 1.0))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
INT_MIN, INT_MAX = 5, 10
FLT_MIN, FLT_MAX = -0.2, 0.8
N_CYCLES = 4
toolbox.register("attr_int", random.randint, INT_MIN, INT_MAX)
toolbox.register("attr_flt", random.uniform, FLT_MIN, FLT_MAX)
toolbox.register("individual", tools.initCycle, creator.Individual,
(toolbox.attr_int, toolbox.attr_flt), n=N_CYCLES)
print toolbox.particle()
print "Individual in the form of int float int float etc"
# ##################
# 7. bag populations
# ##################
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.population(n=100)
# ##################
# 8. grid populations - where individuals are in a grid where neighbors affect each other
# ##################
toolbox.register("row", tools.initRepeat, list, toolbox.individual, n=N_COL)
toolbox.register("population", tools.initRepeat, list, toolbox.row, n=N_ROW)
# ##################
# 9. swarm population - where each individual knows the best point the swarm has seen in the past
# ##################
creator.create("Swarm", list, gbest=None, gbestfit=creator.FitnessMax)
toolbox.register("swarm", tools.initRepeat, creator.Swarm, toolbox.particle)
# ##################
# 10. demes - sub-populations in a population. they dont' affect other sub-populations
# ##################
toolbox.register("deme", tools.initRepeat, list, toolbox.individual)
# ##################
# 11. seeding populations
# ##################
DEME_SIZES = 10, 50, 100
population = [toolbox.deme(n=i) for i in DEME_SIZES]
import json
from deap import base
from deap import creator
creator.create("FitnessMax", base.Fitness, weights=(1.0, 1.0))
creator.create("Individual", list, fitness=creator.FitnessMax)
def initIndividual(icls, content):
return icls(content)
def initPopulation(pcls, ind_init, filename):
contents = json.load(open(filename, "r"))
return pcls(ind_init(c) for c in contents)
toolbox = base.Toolbox()
toolbox.register("individual_guess", initIndividual, creator.Individual)
toolbox.register("population_guess", initPopulation, list, toolbox.individual_guess, "my_guess.json")
population = toolbox.population_guess()
|
#coding=utf-8
import re
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField
from wtforms import TextAreaField, IntegerField
from wtforms import ValidationError
from wtforms.validators import Length, Email, EqualTo, DataRequired, URL, NumberRange, Optional
from jobplus.models import User, db, Company, Job
class RegisterForm(FlaskForm):
"""
求职者注册
"""
# 默认角色是用户
role = 10
username = StringField("用户名", validators=[DataRequired("请输入用户名。"),
Length(3,24, message="用户名长度要在3~24个字符之间。"),
Optional(strip_whitespace=True)])
email = StringField("邮箱", validators=[DataRequired("请输入邮箱。"),
Email(message="请输入合法的email地址。")])
password = PasswordField("密码", validators=[DataRequired("请输入密码。"),
Length(6, 24, message="密码长度要在6~24个字符之间。"),
Optional(strip_whitespace=True)
])
repeat_password = PasswordField("重复密码", validators=[DataRequired("请确认密码。"),
EqualTo("password"),
Optional(strip_whitespace=True)
])
submit = SubmitField("提交")
def create_user(self):
"""
创建用户
"""
user = User()
user.username = self.username.data
user.email = self.email.data
user.password = self.password.data
user.role = self.role
db.session.add(user)
db.session.commit()
return user
def validate_username(self, field):
if len(re.sub("[0-9a-zA-Z_]", "", field.data)) != 0:
raise ValidationError("用户名只能包含数字、字母、下划线。")
if User.query.filter_by(username=field.data).first():
raise ValidationError("用户名已经存在。")
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError("邮箱已经存在")
class CompanyRegisterForm(RegisterForm):
""" 企业注册
"""
# 角色是企业
role = 20
username = StringField("企业名称", validators=[DataRequired("请输入用户名。"),
Length(3, 24, message="用户名长度要在3~24个字符之间。"),
Optional(strip_whitespace=True)])
email = StringField("邮箱", validators=[DataRequired("请输入邮箱。"),
Email(message="请输入合法的email地址。")])
password = PasswordField("密码", validators=[DataRequired("请输入密码。"),
Length(6, 24, message="密码长度要在6~24个字符之间。"),
Optional(strip_whitespace=True)
])
repeat_password = PasswordField("重复密码", validators=[DataRequired("请确认密码。"),
EqualTo("password"),
Optional(strip_whitespace=True)
])
submit = SubmitField("提交")
def create_user(self):
"""创建企业用户
"""
user = User()
user.username = self.username.data
user.email = self.email.data
user.password = self.password.data
user.role = self.role
db.session.add(user)
db.session.commit()
return user
def validate_username(self, field):
if len(re.sub("[0-9a-zA-Z_]", "", field.data)) != 0:
raise ValidationError("用户名只能包含数字、字母、下划线。")
if User.query.filter_by(username=field.data).first():
raise ValidationError("用户名已经存在。")
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱已经存在')
class LoginForm(FlaskForm):
email = StringField("邮箱", validators=[DataRequired(message="请输入邮箱。"),
Email(message="邮箱格式不正确。"),
Optional(strip_whitespace=True)])
password = PasswordField("密码", validators=[DataRequired("请输入密码。"),
Length(6, 24),
Optional(strip_whitespace=True)])
remember_me = BooleanField("记住我")
submit = SubmitField("提交")
def validate_email(self, field):
if field.data and not User.query.filter_by(email=field.data).first():
raise ValidationError("邮箱未注册。")
def validate_password(self, field):
user = User.query.filter_by(email=field.data).first()
if user and not user.check_password(field.data):
raise ValidationError("密码错误。")
class UserProfileForm(FlaskForm):
"""
用户配置表单
"""
username = StringField("姓名")
email = StringField("邮箱", validators=[DataRequired(), Email()])
password = PasswordField("密码(不填写保持不变)")
phone = StringField("手机号")
work_years = IntegerField("工作年限")
resume_url = StringField("简历地址")
submit = SubmitField("提交")
def validate_phone(self, field):
"""
简单验证手机号码
"""
phone = field.data
if not re.match("^1(3[0-9]|4[57]|5[0-35-9]|7[0135678]|8[0-9])\\d{8}$", phone):
raise ValidationError("请输入有效的手机号。")
def updated_profile(self, user):
"""
更新
"""
user.username = self.username.data
user.email = self.email.data
if self.password.data:
user.password = self.password.data
user.phone = self.phone.data
user.work_years = self.work_years.data
user.resume_url = self.resume_url.data
db.session.add(user)
db.session.commit()
class CompanyProfileForm(FlaskForm):
"""
企业信息配置表单
"""
name = StringField("企业名称")
email = StringField("邮箱", validators=[DataRequired(message="请输入邮箱。"), Email(message="邮箱格式不正确。")])
password = PasswordField("密码(不填写保持不变)")
#slug = StringField("Slug", validators=[DataRequired(""), Length(3, 24, message="不要太长,也不要太短(3, 24)。")])
logo = StringField("Logo")
site = StringField("公司网站", validators=[Length(0, 64)])
location = StringField("地址", validators=[Length(0, 64)])
description = StringField("一句话描述", validators=[Length(0, 100)])
about = TextAreaField("公司详情", validators=[Length(0, 1500)])
submit = SubmitField("提交")
def validate_phone(self, field):
"""
简单验证手机号码
"""
phone = field.data
if not re.match("^1(3[0-9]|4[57]|5[0-35-9]|7[0135678]|8[0-9])\\d{8}$", phone):
raise ValidationError("请输入有效的手机号。")
def updated_profile(self, user):
"""
更新
"""
user.username = self.name.data
user.email = self.email.data
if self.password.data:
user.password = self.password.data
if user.company:
company = user.company
else:
company = Company()
company.user_id = user.id
self.populate_obj(company)
db.session.add(user)
db.session.add(company)
db.session.commit()
class JobForm(FlaskForm):
name = StringField("职位名称")
salary_low = IntegerField("最低薪资")
salary_high = IntegerField("最高薪资")
location = StringField("工作地点(多个用,隔开)")
tags = StringField("职位标签(多个用,隔开)")
degree_requirement = SelectField(
"学历要求",
choices=[
("不限", "不限"),
("专科", "专科"),
("本科", "本科"),
("硕士", "硕士"),
("博士", "博士")
]
)
experience_requirement = SelectField(
"经验要求(年)",
choices=[
("不限", "不限"),
("1年", "1年"),
("2年", "2年"),
("3年", "3年"),
("1-3年", "1-3年"),
("3-5年", "3-5年"),
("5年以上", "5年以上")
]
)
description = TextAreaField("职位描述", validators=[Length(0, 1500)])
submit = SubmitField("发布")
def create_job(self, company):
job = Job()
self.populate_obj(job)
job.company_id = company.id
db.session.add(job)
db.session.commit()
return job
def update_job(self, job):
self.populate_obj(job)
db.session.add(job)
db.session.commit()
return job
|
#symetric difference
def symetric_difference(num1,num2):
num1 = set(list(map(int, num1)))
num2 = set(list(map(int, num2)))
num = sorted(num1.symmetric_difference(num2), key=int, reverse=True)
for i in num[::-1]:
print(i)
nums1 = input()
nums2 = input().split()
nums3 = input()
nums4 = input().split()
symetric_difference(nums2,nums4)
|
import tosca.basetypes
def f_eq(v, _):
return lambda x : x == v
def f_gt(v, _):
return lambda x : x > v
def f_ge(v, _):
return lambda x : x >= v
def f_lt(v, _):
return lambda x : x < v
def f_le(v, _):
return lambda x : x <= v
def f_ir(v, t):
if isinstance(t, int) and isinstance(v, list):
r = Range.from_list(v)
return lambda x : x in r
else:
return lambda _: False
def f_vv(v, _):
return lambda x : x in v
def f_ln(v, t):
if isinstance(t, int) and isinstance(v, list):
return lambda x : len(x) == v
else:
return lambda _: False
def f_mn(v, t):
if isinstance(t, int) and isinstance(v, list):
return lambda x : len(x) >= v
else:
return lambda _: False
def f_mx(v, t):
if isinstance(t, int) and isinstance(v, list):
return lambda x : len(x) <= v
else:
return lambda _: False
def f_re(v, t):
if isinstance(t, basestring) and isinstance(v, basestring):
return lambda x : re.search(v, x) is not None
else:
return lambda _: False
def parse_constraint(expr, typename):
constraint_map = {
'equal': f_eq,
'greater_than': f_gt,
'greater_or_equal': f_ge,
'less_than': f_lt,
'less_or_equal': f_le,
'in_range': f_ir,
'valid_values': f_vv,
'length': f_ln,
'min_length': f_mn,
'max_length': f_mx,
'pattern': f_re }
if isinstance(expr, dict) and len(expr) == 1:
key = expr.keys()[0]
if key in constraint_map.keys():
return constraint_map[key](expr[key], typename)
print "Error: {} is not a valid expression for a constraint".format(expr)
return lambda _ : False
def parse_constraints(list_expr, typename):
if list_expr is None:
list_expr = []
if isinstance(list_expr, list):
f_list = [ parse_constraint(expr, typename) for expr in list_expr ]
return lambda x : all(map(lambda f : f(x), f_list))
else:
print "'{}' is not a list".format(list_expr)
return lambda _ : False
|
import requests
p='page2'
res=requests.get("https://reqres.in/api/users?",params=p)
assert res.status_code==200, "Code dose not match."
print(res.json())
print(res.headers)
print(res.encoding)
print(res.url)
json_res=res.json()
print(json_res['total'])
print(json_res['total_pages'])
assert (json_res['total_pages'])==2, "No pages not matching."
print(json_res['data'][0]['email'])
assert (json_res['data'][0]['email']).endswith('reqres.in'),"Email not matching."
print(json_res['data'][1]["last_name"])
print(json_res['support']['url'])
|
A, B, C, D = map( int, input().split())
if A+B < C+D:
print('Right')
elif A+B == C+D:
print('Balanced')
else:
print('Left')
|
# . Copyright (C) 2020 Jhonathan P. Banczek (jpbanczek@gmail.com)
#
import unittest
import os
import datetime
from utils import (
str2float,
namefile2date,
date2filename,
_format_item,
format_file,
all_files,
)
from models import Arquivo, Folha
#####################################################################
# models.py
#####################################################################
class TestModels(unittest.TestCase):
def test_Arquivo(self):
# criar banco
arquivo = Arquivo(db_name="test.sqlite3")
f = Folha(db_name="test.sqlite3")
f.close()
param = "WHERE 1"
# delete
arquivo.delete(param)
data = [("arq1", 1_001), ("arq2", 2_001)]
# insert
ultimo_id = arquivo.insert(data)
self.assertIsNot(ultimo_id, 0)
self.assertIsNot(ultimo_id, None)
fields, params = "descricao, itens", "where 1"
# select
data_db = arquivo.select(fields, params)
self.assertEqual(data, data_db)
# select
fields, params = "descricao", "where 1"
data_db = arquivo.select(fields, params)
data = [("arq1",), ("arq2",)]
self.assertEqual(data, data_db)
# select
fields, params = "descricao, itens", 'where descricao like "arq1"'
data_db = arquivo.select(fields, params)
data = [("arq1", 1_001)]
self.assertEqual(data, data_db)
# update
data = ("arquivo-atualizado", 10_000)
field_set = f"descricao = '{data[0]}', itens = {data[1]}"
params = 'where descricao like "arq1"'
arquivo.update(field_set, params)
fields, params = "descricao, itens", f"WHERE descricao like '{data[0]}'"
data_db = arquivo.select(fields, params)
self.assertEqual([data], data_db)
arquivo.close()
def test_Folha(self):
folha = Folha(db_name="test.sqlite3")
param = "WHERE 1"
folha.delete(param)
datas = [
(
"01/2012",
"orgaox",
"situacaox",
"nomex",
"cpfx",
"cargox",
1000.1,
50.5,
1235.9,
"vinculox",
"matriculax",
1
),
(
"02/2012",
"orgaoy",
"situacaoy",
"nomey",
"cpfy",
"cargoy",
1000.2,
60.5,
2222.2,
"vinculoy",
"matriculay",
2
),
(
"03/2012",
"orgaoz",
"situacaoz",
"nomez",
"cpfz",
"cargoz",
1000.3,
70.5,
3333.3,
"vinculoz",
"matriculaz",
2
),
(
"04/2012",
"orgaow",
"situacaow",
"nomew",
"cpfw",
"cargozw",
1000.4,
80.5,
4444.4,
"vinculow",
"matriculaw",
1
),
]
ultimo_id = folha.insert(datas)
self.assertIsNot(ultimo_id, 0)
self.assertIsNot(ultimo_id, None)
fields = """competencia, orgao, situacao, nome, cpf, cargo,
rem_base, outras_verbas, rem_posdeducoes,
vinculo, matricula, id_arquivo"""
params = "where 1"
data_db = folha.select(fields, params)
self.assertEqual(datas, data_db)
fields, params = "orgao", "where 1"
data_db = folha.select(fields, params)
self.assertEqual([(i[1],) for i in datas], data_db)
# update
data = ("competencia-atualizacao", 1000.50)
field_set = f"competencia = '{data[0]}', rem_base = {data[1]}"
params = 'where competencia like "04/2012"'
folha.update(field_set, params)
fields, params = "*", f"WHERE competencia like '{data[0]}'"
data_db = folha.select(fields, params)
data_db = data_db[0]
self.assertEqual(data, (data_db[1], data_db[7]))
folha.close()
#####################################################################
# utils.py
#####################################################################
class TestUtils(unittest.TestCase):
def test_str2float(self):
valor = str2float("12,2")
self.assertEqual(valor, 12.2)
valor = str2float("12345678,77")
self.assertEqual(valor, 12345678.77)
def test_namefile2date(self):
path1 = "/path/path2/path3/path4/folha-01-2000.txt"
path2 = "path/path2/folha-02-2020.txt"
path3 = "/path/path2/folha-03-2018.txt"
self.assertEqual(
datetime.datetime.strptime("01-2000", "%m-%Y"), namefile2date(path1)
)
self.assertEqual(
datetime.datetime.strptime("02-2020", "%m-%Y"), namefile2date(path2)
)
self.assertEqual(
datetime.datetime.strptime("03-2018", "%m-%Y"), namefile2date(path3)
)
def test_date2filename(self):
_path = os.getcwd() + "/arquivos"
path1 = f"{_path}/folha-02-2020.txt"
path2 = f"{_path}/folha-03-2018.txt"
path3 = f"{_path}/folha-12-2001.txt"
self.assertEqual(
date2filename(datetime.datetime.strptime("02-2020", "%m-%Y"), _path), path1
)
self.assertEqual(
date2filename(datetime.datetime.strptime("03-2018", "%m-%Y"), _path), path2
)
self.assertEqual(
date2filename(datetime.datetime.strptime("12-2001", "%m-%Y"), _path), path3
)
def test__format_item(self):
s = "1{0}/201{0};orgao{0};situacao{0};nome{0};cpf{0};cargo{0};1{0},{0};100{0},{0};200{0},3{0};vinculo{0};matricula{0}"
itens = [s.format(i) for i in range(1, 5)]
for item in itens:
a, b, c, d, e, f, g, h, i, j, k = item.split(";")
g = str2float(g)
h = str2float(h)
i = str2float(i)
resp = (a, b, c, d, e, f, g, h, i, j, k)
self.assertEqual(_format_item(item), resp)
def test_format_file(self):
s = "1{0}/201{0};orgao{0};situacao{0};nome{0};cpf{0};cargo{0};1{0},{0};100{0},{0};200{0},3{0};vinculo{0};matricula{0}"
itens = [s.format(i) for i in range(1, 5)]
resp = []
for item in itens:
a, b, c, d, e, f, g, h, i, j, k = item.split(";")
g = str2float(g)
h = str2float(h)
i = str2float(i)
resp.append((a, b, c, d, e, f, g, h, i, j, k))
self.assertEqual(format_file(itens), resp)
def test_all_files(self):
# criar diretorio e os arquivos
path_test = f"{os.getcwd()}/test-folder"
# os.rmdir(path_test)
os.mkdir(path_test)
nomes = []
for i in range(10):
with open("{0}/folha-09-200{1}.txt".format(path_test, i), "w") as file:
file.write("test")
file.flush()
nomes.append("{0}/folha-09-200{1}.txt".format(path_test, i))
nomes_arquivos = all_files(path=path_test)
self.assertEqual(nomes_arquivos, nomes)
# remove todos os arquivos criados
for file in nomes:
if os.path.isfile(file):
os.remove(file)
# exclui o diretório
os.rmdir(path_test)
if __name__ == "__main__":
unittest.main()
|
from django.urls import path
from .views import review, PostListView, PostDetailView, PostCreateView, ReviewCreateView, ReviewDetailView, review_comment_create, ReviewListView
urlpatterns = [
path('board', PostListView.as_view(), name='board'),
path('board/<int:pk>', PostDetailView.as_view(), name='board_detail'),
path('board/new', PostCreateView.as_view(), name='board_create'),
path('review', ReviewListView.as_view(), name='review'),
path('review/new', ReviewCreateView.as_view(), name='review_create'),
path('review/<int:pk>', ReviewDetailView.as_view(), name='review_detail'),
path('review/<int:pk>/comments', review_comment_create, name='review_comment_create'),
# path('post/<int:pk>/comments', post_comment_create, name='post_comment_create'),
]
|
import numpy as np
import os,sys
import tensorflow as tf
import cv2 as cv
sys.path.append('../')
from models.research.object_detection.utils import label_map_util
from models.research.object_detection.utils import visualization_utils as vis_util
PATH_TO_CKPT = "data/save/frozen_inference_graph.pb"
PATH_TO_LABELS = "data/label_map.pbtxt"
CLASS_NUM = 1
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=CLASS_NUM, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
cap = cv.VideoCapture(0)
while True:
_, frame = cap.read()
image_np_expanded = np.expand_dims(frame, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=.97)
cv.imshow("detections", frame)
if cv.waitKey(1) >= 0:
break
|
import json
from breeding.models import Source
from users.models import UserProfile
userprofile_filter = UserProfile.objects.filter(is_signup=True)
winner_list = list()
for userprofile in userprofile_filter:
source_count = Source.objects.filter(userprofile=userprofile,
qualified_status='已通過').count()
if source_count < 3:
continue
winner_dict = {'uid': str(userprofile.user_uuid),
'name': userprofile.name,
'phone': userprofile.phone}
winner_list.append(winner_dict)
with open('winners.json', 'w') as myfile:
json.dump(winner_list, myfile, ensure_ascii=False, indent=4)
|
from keras.preprocessing import text
import pandas as pd
import pickle
import util
print('loading data...')
df_train = pd.read_csv(util.train_data)
df_test = pd.read_csv(util.test_data)
df_train['comment_text'] = df_train['comment_text'].fillna('UN')
df_test['comment_text'] = df_test['comment_text'].fillna('UN')
print('df_test shape: {0}'.format(df_test.shape))
train_comments = df_train['comment_text'].tolist()
test_comments = df_test['comment_text'].tolist()
corpus = train_comments + test_comments
print('corpus size: {0}'.format(len(corpus)))
tk = text.Tokenizer(num_words=1000)
tk.fit_on_texts(corpus)
tf_idf_train = tk.texts_to_matrix(train_comments, mode='tfidf')
tf_idf_test = tk.texts_to_matrix(test_comments, mode='tfidf')
print(tf_idf_train[:10])
pickle.dump(tf_idf_train, open(util.tmp_tf_idf_train, 'wb'))
pickle.dump(tf_idf_test, open(util.tmp_tf_idf_test, 'wb'))
|
import pandas as pd
# DataFrame() 함수로 데이터 프레임 변환, 변수 df에 저장
exam_data = {'이름':['서준','우현','인아'],'수학':[90,80,70],'영어':[98,89,95],'음악':[85,95,100],'체육':[100,90,90]}
df = pd.DataFrame(exam_data)
print("# '이름'열을 새로운 인덱스로 지정하고, df객체에 변경사항 반영")
df.set_index('이름',inplace=True)
print(df)
print()
print("# 데이터프레임 df의 특정원소 1개 선택('서준'의 '음악' 점수)")
a = df.loc['서준','음악']
print(a,type(a),sep=' ')
b = df.iloc[0,2]
print(b,type(b),sep=' ')
print()
print("# 데이터프레임 df의 특정원소 2개 이상 선택('서준', '우현'의 '음악','체육' 점수")
g = df.loc[['서준','우현'],['음악','체육']]
print(g,type(g),sep='\n')
h = df.iloc[[0,1],[2,3]]
print(h,type(h),sep='\n')
i = df.loc['서준':'우현','음악':'체육']
print(i,type(i),sep='\n')
j = df.iloc[0:2, 2:]
print(j,type(j),sep='\n')
|
from download_task_center import DownloadTaskCenter
from spider_lib import log_print
info = '''
程序说明:
本程序会将 https://alpha.wallhaven.cc/random 上的图片采集到运行目录下
作者:Jerry
网站:www.jerryshell.cn
版本:v0.4
'''
log_print(info)
home_page_url = 'https://alpha.wallhaven.cc/random'
get_home_page_count = int(input('请求 1 次首页可获得 24 张图片,你要请求多少次?\n>>> '))
thread_count = int(input('你要开启多少个下载线程?(推荐值 4)\n>>> '))
task_center = DownloadTaskCenter(home_page_url, thread_count, get_home_page_count * 24)
task_center.drive_page_analyze()
|
from state import State
class StateObserver(object):
def __init__(self, delegate):
assert hasattr(delegate, 'deploy')
self._delegate = delegate
def update(self, _, payload):
if payload['old'] == State.STARTING and payload['new'] == State.RUNNING:
self._delegate.deploy()
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Modifications Copyright 2018 Peter Mølgaard Sørensen
# Adapted from freeze.py, to create a checkpoint file with quantized weights
#
r"""
Loads a checkpoint file and quantizes weights based on bitwidths command line argument.
The quantized weights are then saved to a separate checkpoint file which can then be converted to a GraphDef file using
freeze.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import math
import tensorflow as tf
import numpy as np
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
import input_data
import models
from tensorflow.python.framework import graph_util
FLAGS = None
def create_inference_graph(wanted_words, sample_rate, clip_duration_ms,
clip_stride_ms, window_size_ms, window_stride_ms,
dct_coefficient_count, model_architecture, input_type,
model_size_info):
"""Creates an audio model with the nodes needed for inference.
Uses the supplied arguments to create a model, and inserts the input and
output nodes that are needed to use the graph for inference.
Args:
wanted_words: Comma-separated list of the words we're trying to recognize.
sample_rate: How many samples per second are in the input audio files.
clip_duration_ms: How many samples to analyze for the audio pattern.
clip_stride_ms: How often to run recognition. Useful for models with cache.
window_size_ms: Time slice duration to estimate frequencies from.
window_stride_ms: How far apart time slices should be.
dct_coefficient_count: Number of frequency bands to analyze.
model_architecture: Name of the kind of model to generate.
"""
words_list = input_data.prepare_words_list(wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), sample_rate, clip_duration_ms, window_size_ms,
window_stride_ms, dct_coefficient_count,100)
runtime_settings = {'clip_stride_ms': clip_stride_ms}
wav_data_placeholder = tf.placeholder(tf.string, [], name='wav_data')
decoded_sample_data = contrib_audio.decode_wav(
wav_data_placeholder,
desired_channels=1,
desired_samples=model_settings['desired_samples'],
name='decoded_sample_data')
#input_spectrogram = tf.placeholder(tf.float32, shape=[49,513], name='speech_signal')
spectrogram = contrib_audio.audio_spectrogram(
decoded_sample_data.audio,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
#spectrogram = input_spectrogram
if (input_type == 'log-mel'):
print("log-mel energies")
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = spectrogram.shape[-1].value # magnitude_spectrograms.shape[-1].value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 20.0, 4000.0, model_settings['dct_coefficient_count']
linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, model_settings['sample_rate'], lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrogram, linear_to_mel_weight_matrix, 1)
# Note: Shape inference for `tf.tensordot` does not currently handle this case.
mel_spectrograms.set_shape(spectrogram.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
log_offset = 1e-6
log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)
fingerprint_input = log_mel_spectrograms
elif (input_type == 'MFCC'):
print('MFCC-features')
fingerprint_input = contrib_audio.mfcc(
spectrogram,
decoded_sample_data.sample_rate,
dct_coefficient_count=model_settings['dct_coefficient_count'])
#fingerprint_input = tf.placeholder(tf.float32,shape=[49,20],name='fingerprint')
fingerprint_frequency_size = model_settings['dct_coefficient_count']
fingerprint_time_size = model_settings['spectrogram_length']
reshaped_input = tf.reshape(fingerprint_input, [
-1, fingerprint_time_size * fingerprint_frequency_size
])
logits,dropout_prob = models.create_model(
reshaped_input, model_settings, model_architecture, model_size_info,
is_training=True, runtime_settings=runtime_settings)
# Create an output to use for inference.
tf.nn.softmax(logits, name='labels_softmax')
def main(_):
print(FLAGS.model_size_info)
reg_conv_bits = FLAGS.bit_widths[0]
dw_conv_bits = FLAGS.bit_widths[1]
pw_conv_bits = FLAGS.bit_widths[2]
fc_bits = FLAGS.bit_widths[3]
activations_bits = FLAGS.bit_widths[4]
print("Regular Conv-weights bit width: " +str(reg_conv_bits))
print("Depthwise Conv-weights bit width: " + str(dw_conv_bits))
print("Pointwise Conv-weights bit width: " + str(pw_conv_bits))
print("FC-weights bit width: " + str(fc_bits))
print("Activations bit width: " + str(activations_bits))
# We want to see all the logging messages for this tutorial.
tf.logging.set_verbosity(tf.logging.INFO)
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
words_list = input_data.prepare_words_list(FLAGS.wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count, 100)
clip_stride_ms = 260
runtime_settings = {'clip_stride_ms': clip_stride_ms}
wav_data_placeholder = tf.placeholder(tf.string, [], name='wav_data')
decoded_sample_data = contrib_audio.decode_wav(
wav_data_placeholder,
desired_channels=1,
desired_samples=model_settings['desired_samples'],
name='decoded_sample_data')
# input_spectrogram = tf.placeholder(tf.float32, shape=[49,513], name='speech_signal')
spectrogram = contrib_audio.audio_spectrogram(
decoded_sample_data.audio,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
# spectrogram = input_spectrogram
if (FLAGS.input_type == 'log-mel'):
print("log-mel energies")
# Warp the linear-scale, magnitude spectrograms into the mel-scale.
num_spectrogram_bins = spectrogram.shape[-1].value # magnitude_spectrograms.shape[-1].value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 20.0, 4000.0, model_settings['dct_coefficient_count']
linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, model_settings['sample_rate'], lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrogram, linear_to_mel_weight_matrix, 1)
# Note: Shape inference for `tf.tensordot` does not currently handle this case.
mel_spectrograms.set_shape(spectrogram.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
log_offset = 1e-6
log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)
fingerprint_input = log_mel_spectrograms
elif (FLAGS.input_type == 'MFCC'):
print('MFCC-features')
fingerprint_input = contrib_audio.mfcc(
spectrogram,
decoded_sample_data.sample_rate,
dct_coefficient_count=model_settings['dct_coefficient_count'])
# fingerprint_input = tf.placeholder(tf.float32,shape=[49,20],name='fingerprint')
fingerprint_frequency_size = model_settings['dct_coefficient_count']
fingerprint_time_size = model_settings['spectrogram_length']
reshaped_input = tf.reshape(fingerprint_input, [
-1, fingerprint_time_size * fingerprint_frequency_size
])
training = tf.placeholder(tf.bool, name='training')
logits, net_c1 = models.create_model(
reshaped_input, model_settings, FLAGS.model_architecture, FLAGS.model_size_info,
is_training=True, runtime_settings=runtime_settings)
# Create an output to use for inference.
tf.nn.softmax(logits, name='labels_softmax')
saver = tf.train.Saver(tf.global_variables())
tf.global_variables_initializer().run()
start_step = 1
if FLAGS.start_checkpoint:
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
for v in tf.trainable_variables():
print(v.name)
v_backup = tf.trainable_variables()
eps = 0.001
# Layer information [weights, biases, channel means, channel variances, input fractional bits, output fractional bits, name for .h file]
conv_1 = ['DS-CNN/conv_1/weights', 'DS-CNN/conv_1/biases', 'DS-CNN/conv_1/batch_norm/moving_mean',
'DS-CNN/conv_1/batch_norm/moving_variance', 2, 5, 'CONV1', 'DS-CNN/conv_1/batch_norm/beta']
dw_conv_1 = ['DS-CNN/conv_ds_1/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_1/depthwise_conv/biases',
'DS-CNN/conv_ds_1/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_1/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV1',
'DS-CNN/conv_ds_1/dw_batch_norm/beta']
pw_conv_1 = ['DS-CNN/conv_ds_1/pointwise_conv/weights', 'DS-CNN/conv_ds_1/pointwise_conv/biases',
'DS-CNN/conv_ds_1/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_1/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV1', 'DS-CNN/conv_ds_1/pw_batch_norm/beta']
dw_conv_2 = ['DS-CNN/conv_ds_2/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_2/depthwise_conv/biases',
'DS-CNN/conv_ds_2/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_2/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV2',
'DS-CNN/conv_ds_2/dw_batch_norm/beta']
pw_conv_2 = ['DS-CNN/conv_ds_2/pointwise_conv/weights', 'DS-CNN/conv_ds_2/pointwise_conv/biases',
'DS-CNN/conv_ds_2/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_2/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV2', 'DS-CNN/conv_ds_2/pw_batch_norm/beta']
dw_conv_3 = ['DS-CNN/conv_ds_3/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_3/depthwise_conv/biases',
'DS-CNN/conv_ds_3/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_3/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV3',
'DS-CNN/conv_ds_3/dw_batch_norm/beta']
pw_conv_3 = ['DS-CNN/conv_ds_3/pointwise_conv/weights', 'DS-CNN/conv_ds_3/pointwise_conv/biases',
'DS-CNN/conv_ds_3/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_3/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV3', 'DS-CNN/conv_ds_3/pw_batch_norm/beta']
dw_conv_4 = ['DS-CNN/conv_ds_4/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_4/depthwise_conv/biases',
'DS-CNN/conv_ds_4/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_4/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV4',
'DS-CNN/conv_ds_4/dw_batch_norm/beta']
pw_conv_4 = ['DS-CNN/conv_ds_4/pointwise_conv/weights', 'DS-CNN/conv_ds_4/pointwise_conv/biases',
'DS-CNN/conv_ds_4/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_4/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV4', 'DS-CNN/conv_ds_4/pw_batch_norm/beta']
dw_conv_5 = ['DS-CNN/conv_ds_5/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_5/depthwise_conv/biases',
'DS-CNN/conv_ds_5/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_5/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV5',
'DS-CNN/conv_ds_5/dw_batch_norm/beta']
pw_conv_5 = ['DS-CNN/conv_ds_5/pointwise_conv/weights', 'DS-CNN/conv_ds_5/pointwise_conv/biases',
'DS-CNN/conv_ds_5/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_5/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV5', 'DS-CNN/conv_ds_5/pw_batch_norm/beta']
dw_conv_6 = ['DS-CNN/conv_ds_6/depthwise_conv/depthwise_weights', 'DS-CNN/conv_ds_6/depthwise_conv/biases',
'DS-CNN/conv_ds_6/dw_batch_norm/moving_mean',
'DS-CNN/conv_ds_6/dw_batch_norm/moving_variance', 5, 5, 'DW_CONV6',
'DS-CNN/conv_ds_6/dw_batch_norm/beta']
pw_conv_6 = ['DS-CNN/conv_ds_6/pointwise_conv/weights', 'DS-CNN/conv_ds_6/pointwise_conv/biases',
'DS-CNN/conv_ds_6/pw_batch_norm/moving_mean', 'DS-CNN/conv_ds_6/pw_batch_norm/moving_variance', 5, 5,
'PW_CONV6', 'DS-CNN/conv_ds_6/pw_batch_norm/beta']
layer_list = [conv_1, dw_conv_1, pw_conv_1, dw_conv_2, pw_conv_2, dw_conv_3, pw_conv_3, dw_conv_4, pw_conv_4,
dw_conv_5, pw_conv_5, dw_conv_6, pw_conv_6]
n_filters = 76
for layer in layer_list:
bit_width = reg_conv_bits
layer_name = layer[6]
PW = False
if (layer_name[0:2] == 'PW'):
PW = True
bit_width = pw_conv_bits
DW = False
if (layer_name[0:2] == 'DW'):
DW = True
bit_width = dw_conv_bits
print("Name of node - " + layer[6])
for v in tf.trainable_variables():
if v.name == layer[0]+':0':
v_weights = v
if v.name == layer[1]+':0':
v_bias = v
if v.name == layer[7]+':0':
v_beta = v
for v in tf.global_variables():
if v.name == layer[2]+':0':
v_mean = v
if v.name == layer[3]+':0':
v_var = v
weights = sess.run(v_weights)
bias = sess.run(v_bias)
beta = sess.run(v_beta)
mean = sess.run(v_mean)
var = sess.run(v_var)
#print("Weights shape: " + str(weights.shape))
#print("Bias shape: " + str(bias.shape))
#print("Var shape: " + str(var.shape))
#print("Mean shape: " + str(mean.shape))
#print("Beta shape: " + str(beta.shape))
w_shape = weights.shape
b_shape = bias.shape
weights = weights.squeeze()
weights_t1 = np.zeros(weights.shape)
bias_t1 = np.zeros((1, n_filters))
for i in range(0, len(bias)):
if (PW):
filter = weights[:, i]
else:
filter = weights[:, :, i]
bias_temp = bias[i]
mean_temp = mean[i]
var_temp = var[i]
beta_temp = beta[i]
new_filter = filter / math.sqrt(var_temp + eps)
new_bias = beta_temp + (bias_temp - mean_temp) / (math.sqrt(var_temp + eps))
if (PW):
weights_t1[:, i] = new_filter
else:
weights_t1[:, :, i] = new_filter
bias_t1[0, i] = new_bias
#if (i == 0):
#print('filters : ' + str(filter))
#print('Bias : ' + str(bias_temp))
#print('Mean : ' + str(mean_temp))
#print('Variance : ' + str(var_temp))
#print("New filter : " + str(new_filter))
#print("New Bias : " + str(new_bias))
min_value = weights_t1.min()
max_value = weights_t1.max()
int_bits = int(np.ceil(np.log2(max(abs(min_value), abs(max_value)))))
dec_bits_weight = min((bit_width-1) - int_bits, 111)
weights_quant = np.round(weights_t1 * 2 ** dec_bits_weight)
weights_quant = weights_quant/(2**dec_bits_weight)
weights_quant = weights_quant.reshape(w_shape)
#print("input fractional bits: " + str(layer[4]))
#print("Weights min value: " + str(min_value))
#print("Weights max value: " + str(max_value))
#print("Weights fractional bits: " + str(dec_bits_weight))
min_value = bias_t1.min()
max_value = bias_t1.max()
int_bits = int(np.ceil(np.log2(max(abs(min_value), abs(max_value)))))
dec_bits_bias = min((bit_width-1) - int_bits, 10000)
bias_quant = np.round(bias_t1 * 2 ** dec_bits_bias)
bias_quant = bias_quant/(2**dec_bits_bias)
bias_quant = bias_quant.reshape(b_shape)
bias_left_shift = layer[4] + dec_bits_weight - dec_bits_bias
#print("Bias min value: " + str(min_value))
#print("Bias max value: " + str(max_value))
#print("Bias fractional bits: " + str(dec_bits_bias))
# update the weights in tensorflow graph for quantizing the activations
updated_weights = sess.run(tf.assign(v_weights, weights_quant))
updated_bias = sess.run(tf.assign(v_bias, bias_quant))
fc_layer = ['DS-CNN/fc1/weights', 'DS-CNN/fc1/biases', 5, 3, 'FC']
for v in tf.trainable_variables():
if v.name == fc_layer[0]+':0':
v_fc_weights = v
if v.name == fc_layer[1]+':0':
v_fc_bias = v
weights = sess.run(v_fc_weights)
bias = sess.run(v_fc_bias)
w_shape = weights.shape
b_shape = bias.shape
#print("FC weights : " + str(weights.shape))
#print(weights)
#print("FC bias : " + str(bias.shape))
#print(bias)
min_value = weights.min()
max_value = weights.max()
int_bits = int(np.ceil(np.log2(max(abs(min_value), abs(max_value)))))
dec_bits_weight = min((fc_bits-1) - int_bits, 111)
weights_quant = np.round(weights * 2 ** dec_bits_weight)
weights_quant = weights_quant / (2 ** dec_bits_weight)
weights_quant = weights_quant.reshape(w_shape)
#print("input fractional bits: " + str(fc_layer[2]))
#print("Weights min value: " + str(min_value))
#print("Weights max value: " + str(max_value))
#print("Weights fractional bits: " + str(dec_bits_weight))
min_value = bias.min()
max_value = bias.max()
int_bits = int(np.ceil(np.log2(max(abs(min_value), abs(max_value)))))
dec_bits_bias = min((fc_bits-1) - int_bits, 10000)
bias_quant = np.round(bias * 2 ** dec_bits_bias)
#print("Bias min value: " + str(min_value))
#print("Bias max value: " + str(max_value))
#print("Bias fractional bits: " + str(dec_bits_bias))
bias_quant = bias_quant / (2 ** dec_bits_bias)
bias_quant = bias_quant.reshape(b_shape)
#print("Quantized weights: " + str(weights_quant))
#print("Quantized bias: " +str(bias_quant))
updated_weights = sess.run(tf.assign(v_fc_weights, weights_quant))
updated_bias = sess.run(tf.assign(v_fc_bias, bias_quant))
#print("bias[0] : " + str(bias[0]))
#print("bias_quant[0] : " + str(bias_quant[0]))
training_step = 30000
checkpoint_path = os.path.join(FLAGS.train_dir, 'quant',
FLAGS.model_architecture + '.ckpt')
tf.logging.info('Saving best model to "%s-%d"', checkpoint_path, training_step)
saver.save(sess, checkpoint_path, global_step=training_step)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--clip_stride_ms',
type=int,
default=30,
help='How often to run recognition. Useful for models with cache.',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long the stride is between spectrogram timeslices',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='conv',
help='What model architecture to use')
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--output_file', type=str, help='Where to save the frozen graph.')
parser.add_argument(
'--input_type',
type=str,
default='MFCC',
help='MFCC if DCT should be applied, log_mel if not')
parser.add_argument(
'--model_size_info',
type=int,
nargs="+",
default=[128, 128, 128],
help='Model dimensions - different for various models')
parser.add_argument(
'--bit_widths',
type=int,
nargs="+",
default=[8, 8, 8, 8, 8],
help='Bit width for regular Conv-weights, Depthwise-conv weights, Pointwise-conv weights, FC-weights and activations')
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/speech_commands_train',
help='Directory to write event logs and checkpoint.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
from collections import math
import math
def uniqueNumber(A):
counter = Counter(A)
commons = counter.most_common()
return commons[-1][0]
def power(x, y, p):
res = 1
x = x % p;
while (y > 0):
if (y & 1):
res = (res * x) % p
y = y >> 1
x = (x * x) % p
return res
T = input()
T = int(T)
MOD = 10**9 + 7
for t in range(0,T):
N, K, R = input().split()
N = int(N)
K = int(K)
R = int(R)
A = list(map(int,input().split()))
x = uniqueNumber(A)
remainderB = 0
fac = ((math.factorial(2*R))/(math.factorial(R)*math.factorial(R)))
b = str(fac)
for i in range(len(b)):
remainderB = ((remainderB * 10 +
ord(b[i]) - 48) %
(MOD - 1));
print(power(x, remainderB, MOD))
#for reference visit link: https://www.geeksforgeeks.org/find-abm-where-b-is-very-large/
|
import json
import tweepy
# keep credentials in a seperate folder so it need to be imported. also the file is added to the '.gitignore' file.
import credentials
from tweepy import OAuthHandler
# import Python's Counter Class
from collections import Counter
# to access the credentials in the file we add 'credentials.' beforehand.
auth = OAuthHandler(credentials.CONSUMER_KEY, credentials.CONSUMER_SECRET)
auth.set_access_token(credentials.OAUTH_TOKEN, credentials.OAUTH_TOKEN_SECRET)
# Create an instance of the Tweepy API that will do the actual data access.
# In order for Twitter to allow the access to the API, you pass in the
# OAuthHandler object when instantiating it.
api = tweepy.API(auth)
count = 50
query = 'Dublin'
# Get all tweets for the search query
results = [status for status in tweepy.Cursor(api.search, q=query).items(count)]
status_texts = [ status._json['text'] for status in results ]
screen_names = [ status._json['user']['screen_name']
for status in results
for mention in status._json['entities']['user_mentions'] ]
hashtags = [ hashtag['text']
for status in results
for hashtag in status._json['entities']['hashtags'] ]
words = [ w for t in status_texts
for w in t.split()]
for entry in [screen_names, hashtags, words]:
counter = Counter(entry)
print counter.most_common()[:10] # the top 10 results
print
|
class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
s_lst = list(s)
l = 0
r = len(s) - 1
vowels = 'aeiouAEIOU'
while True:
while l < r and s[l] not in vowels:
l += 1
while l < r and s[r] not in vowels:
r -= 1
if l < r:
s_lst[l], s_lst[r] = s_lst[r], s_lst[l]
l += 1
r -= 1
else:
break
return ''.join(s_lst)
if __name__ == "__main__":
assert Solution().reverseVowels('leetcode') == 'leotcede'
|
# https://www.w3schools.com/python/python_ml_scale.asp
# Machine Learning - Scale - Escala
# Recursos de escala
# Quando seus dados têm valores diferentes e até mesmo unidades de medição diferentes,
# pode ser difícil compará-los. O que é quilograma comparado com metros?
# Ou altitude em comparação com o tempo?
# A resposta para este problema é o escalonamento.
# Podemos dimensionar dados em novos valores que são mais fáceis de comparar.
# Dê uma olhada na tabela abaixo,
# é o mesmo conjunto de dados que usamos no capítulo de regressão múltipla,
# mas desta vez a coluna de volume contém valores em litros
# em vez de centímetro3 (1,0 em vez de 1000).
# carro - modelo volume - peso - CO2
# Toyota Aygo - 1.0 - 790 - 99
# Pode ser difícil comparar o volume 1.0 com o peso 790,
# mas se dimensioná-los ambos em valores comparáveis,
# podemos facilmente ver quanto um valor é comparado ao outro.
# Existem diferentes métodos para dimensionamento de dados,
# neste tutorial usaremos um método chamado padronização.
# O método de padronização usa esta fórmula: z = (x - u) / s
# Onde está o novo valor, é o valor original, é a média e é o desvio padrão.zxus
# Se você pegar a coluna de peso do conjunto de dados acima,
# o primeiro valor é 790, e o valor escalonado será:
# (790 - 1292.23) / 238.74 = -2.1
# O Valor 1292.23 foi obtido através do código:
# import pandas
# import numpy
# df = pandas.read_csv("cars2.csv")
# v = df['Weight']
# Finding the mean value:
# mean = numpy.mean(v)
# print(mean)
# O valor 238.74 foi obtido através do código:
# import pandas
# import numpy
# df = pandas.read_csv("cars2.csv")
# v = df['Weight']
#Finding the standard deviation:
# std = numpy.std(v)
# print(std)
# Se você pegar a coluna de volume do conjunto de dados acima,
# o primeiro valor é 1.0, e o valor escalonado será:
# (1.0 - 1.61) / 0.38 = -1.59
# Agora você pode comparar -2.1 com -1,59 em vez de comparar 790 com 1.0.
# Você não precisa fazer isso manualmente, o módulo python sklearn tem um
# método chamado que retorna um objeto Scaler com métodos para transformar
# conjuntos de dados.StandardScaler()
# Exemplo
# Dimensione todos os valores nas colunas Peso e Volume:
import pandas
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
scale = StandardScaler()
df = pandas.read_csv("cars.csv")
X = df[['Weight', 'Volume']]
scaledX = scale.fit_transform(X)
print(scaledX)
# Resultado: execute o código acima para ver o resultado.
# Note que os dois primeiros valores são -2.1 e -1,59, o que corresponde aos nossos cálculos.
|
#
# Copyright (C) 2012 - 2019 Satoru SATOH <satoru.satoh@gmail.com>
# Copyright (C) 2017 Red Hat, Inc.
# License: MIT
#
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods
from __future__ import absolute_import
import os
import tests.backend.common as TBC
try:
import anyconfig.backend.yaml.pyyaml as TT
except ImportError:
import tests.common
tests.common.skip_test()
from .common import CNF_S, CNF
class HasParserTrait(TBC.HasParserTrait):
psr = TT.Parser()
cnf = CNF
cnf_s = CNF_S
class Test_10(TBC.Test_10_dumps_and_loads, HasParserTrait):
load_options = dict(ac_safe=True, Loader=TT.yaml.loader.Loader)
dump_options = dict(ac_safe=True)
empty_patterns = [('', {}), (' ', {}), ('[]', []),
("#%s#%s" % (os.linesep, os.linesep), {})]
class Test_20(TBC.Test_20_dump_and_load, HasParserTrait):
pass
# vim:sw=4:ts=4:et:
|
import random
import config
from enums import SideEnum, ActionEnum
from feedhandler import FeedHandler
def generate_order_message(order_id: int):
min_price = int(config.MIN_PRICE_THRESHOLD)
max_price = int(config.MAX_PRICE_THRESHOLD)
price = random.randint(min_price, max_price)
qty = random.randint(1, 100)
side = SideEnum(random.randint(int(SideEnum.S.value), int(SideEnum.B.value)))
action = ActionEnum(random.randint(int(ActionEnum.A.value), int(ActionEnum.A.value)))
return [action.name, order_id, side.name, qty, price]
if __name__ == "__main__":
counter = 0
feed_handler = FeedHandler()
while counter <= 100000:
counter += 1
feed_handler.process_message(generate_order_message(counter))
feed_handler.print_mid_quote()
feed_handler.print_recent_price_trades()
feed_handler.print_book()
|
def add(n1, n2):
return n1 + n2
def subtract(n1, n2):
return n1 - n2
def calculator(n1, n2, func):
return func(n1, n2)
result_1 = calculator(5, 3, add)
print(result_1)
result_2 = calculator(5, 3, subtract)
print(result_2)
|
from tkinter import *
import math as m
import tkinter.messagebox
root = Tk()
root.title("Advanced scientific calculator")
root.configure(background="powder blue")
root.resizable(width="false", height="false")
root.geometry("480x624+20+20")
Cacl = Frame(root)
Cacl.grid()
txtDisplay = Entry(Cacl, font=('arial', 30, 'bold'), bg="powder blue", bd=30, width=28, justify=RIGHT)
txtDisplay.grid(row=0, column=0, columnspan=3, pady=1)
txtDisplay.insert(0, "0")
# ==================Numbers======================================================================================
def added_value():
print("")
numberpad = "789456123"
i = 0
btn = []
for j in range(2, 5):
for k in range(3):
btn.append(Button(Cacl, width=6, height=2, font=('arial', 20, 'bold'), bd=4, bg="gray99", text=numberpad[i]))
btn[i].grid(row=j, column=k, pady=1)
btn[i]["command"] = lambda x=numberpad[i]: added_value.numberEnter(x)
i += 1
# =============Menu=====================================================================================
def iExit():
iExit = tkinter.messagebox.askyesno = "Advanced scientific calculator", "Confirm if you want to exit"
if iExit > 0:
root.destroy()
return
def Scientific():
root.resizable(width="false", height="false")
root.geometry("944x568+0+0")
def Standard():
root.resizable(width="false", height="false")
root.geometry("480x568+0+0")
menubar = Menu(Cacl)
filemenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Standard")
filemenu.add_command(label="Scientific")
filemenu.add_separator()
filemenu.add_command(label="Exit", command=iExit)
editmenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Edit", menu=editmenu)
editmenu.add_command(label="Copy")
editmenu.add_command(label="Cut")
editmenu.add_separator()
editmenu.add_command(label="Exit")
helpmenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Help", menu=helpmenu)
helpmenu.add_command(label="View help")
root.configure(menu=menubar)
root.mainloop()
|
from flask import Flask, render_template, session, request, redirect,url_for
app = Flask(__name__)
@app.route("/")
def home():
if 'logged' in session:
return redirect(url_for("secret"))
else:
return render_template("home.html")
@app.route("/secret", methods=["GET","POST"])
def secret():
if request.method == "POST":
if request.form['username'] == "hoyinho" and request.form['password'] == "hoyin":
session['logged'] = "Ho Yin"
if 'logged' not in session:
return redirect(url_for("login"))
return render_template("secret.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/logout")
def logout():
session.clear()
return redirect(url_for("about"))
@app.route ("/login")
def login(error = None):
if 'logged' in session:
return redirect(url_for("secret"))
return render_template("login.html")
if __name__ == "__main__":
app.debug = True
app.secret_key = "Some random key"
app.run(host='0.0.0.0', port = 8000)
|
from ED6ScenarioHelper import *
def main():
# 格兰赛尔
CreateScenaFile(
FileName = 'T4214 ._SN',
MapName = 'Grancel',
Location = 'T4214.x',
MapIndex = 1,
MapDefaultBGM = "ed60017",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'希尔丹夫人', # 9
'茜亚', # 10
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH02460 ._CH', # 00
'ED6_DT07/CH02540 ._CH', # 01
'ED6_DT07/CH02230 ._CH', # 02
'ED6_DT07/CH02240 ._CH', # 03
)
AddCharChipPat(
'ED6_DT07/CH02460P._CP', # 00
'ED6_DT07/CH02540P._CP', # 01
'ED6_DT07/CH02230P._CP', # 02
'ED6_DT07/CH02240P._CP', # 03
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = 0,
TalkScenaIndex = 4,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = 0,
TalkScenaIndex = 3,
)
ScpFunction(
"Function_0_10A", # 00, 0
"Function_1_240", # 01, 1
"Function_2_24A", # 02, 2
"Function_3_3C7", # 03, 3
"Function_4_634", # 04, 4
"Function_5_957", # 05, 5
"Function_6_FDA", # 06, 6
"Function_7_2994", # 07, 7
)
def Function_0_10A(): pass
label("Function_0_10A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 2)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 6)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_134")
SetChrChipByIndex(0x0, 0)
SetChrChipByIndex(0x1, 2)
SetChrChipByIndex(0x138, 3)
SetChrFlags(0x0, 0x1000)
SetChrFlags(0x1, 0x1000)
SetChrFlags(0x138, 0x1000)
label("loc_134")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x7F, 2)), scpexpr(EXPR_END)), "loc_142")
OP_A3(0x3FA)
Event(0, 5)
label("loc_142")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x7F, 3)), scpexpr(EXPR_END)), "loc_150")
OP_A3(0x3FB)
Event(0, 7)
label("loc_150")
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x0), scpexpr(EXPR_END)),
(100, "loc_15C"),
(SWITCH_DEFAULT, "loc_172"),
)
label("loc_15C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 1)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_16F")
OP_A2(0x642)
Event(0, 6)
label("loc_16F")
Jump("loc_172")
label("loc_172")
OP_A2(0x639)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 7)), scpexpr(EXPR_END)), "loc_19C")
ClearChrFlags(0x8, 0x80)
SetChrPos(0x8, 64129, 0, 99150, 167)
OP_43(0x8, 0x0, 0x0, 0x2)
Jump("loc_23F")
label("loc_19C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 3)), scpexpr(EXPR_END)), "loc_1C3")
ClearChrFlags(0x8, 0x80)
SetChrPos(0x8, 70620, 0, 69790, 90)
OP_43(0x8, 0x0, 0x0, 0x2)
Jump("loc_23F")
label("loc_1C3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 6)), scpexpr(EXPR_END)), "loc_1CD")
Jump("loc_23F")
label("loc_1CD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 2)), scpexpr(EXPR_END)), "loc_1F4")
ClearChrFlags(0x9, 0x80)
SetChrPos(0x9, 70630, 0, 98590, 48)
OP_43(0x9, 0x0, 0x0, 0x2)
Jump("loc_23F")
label("loc_1F4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 0)), scpexpr(EXPR_END)), "loc_1FE")
Jump("loc_23F")
label("loc_1FE")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC7, 1)), scpexpr(EXPR_END)), "loc_23F")
ClearChrFlags(0x8, 0x80)
SetChrPos(0x8, 64129, 0, 99150, 167)
OP_43(0x8, 0x0, 0x0, 0x2)
ClearChrFlags(0x9, 0x80)
SetChrPos(0x9, 70630, 0, 98590, 48)
OP_43(0x9, 0x0, 0x0, 0x2)
label("loc_23F")
Return()
# Function_0_10A end
def Function_1_240(): pass
label("Function_1_240")
OP_4F(0x2B, (scpexpr(EXPR_PUSH_LONG, 0xFF), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Return()
# Function_1_240 end
def Function_2_24A(): pass
label("Function_2_24A")
RunExpression(0x0, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_26F")
OP_99(0xFE, 0x0, 0x7, 0x672)
Jump("loc_3B1")
label("loc_26F")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_288")
OP_99(0xFE, 0x1, 0x7, 0x640)
Jump("loc_3B1")
label("loc_288")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2A1")
OP_99(0xFE, 0x2, 0x7, 0x60E)
Jump("loc_3B1")
label("loc_2A1")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2BA")
OP_99(0xFE, 0x3, 0x7, 0x5DC)
Jump("loc_3B1")
label("loc_2BA")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2D3")
OP_99(0xFE, 0x4, 0x7, 0x5AA)
Jump("loc_3B1")
label("loc_2D3")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_2EC")
OP_99(0xFE, 0x5, 0x7, 0x578)
Jump("loc_3B1")
label("loc_2EC")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_305")
OP_99(0xFE, 0x6, 0x7, 0x546)
Jump("loc_3B1")
label("loc_305")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_31E")
OP_99(0xFE, 0x0, 0x7, 0x677)
Jump("loc_3B1")
label("loc_31E")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_337")
OP_99(0xFE, 0x1, 0x7, 0x645)
Jump("loc_3B1")
label("loc_337")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_350")
OP_99(0xFE, 0x2, 0x7, 0x613)
Jump("loc_3B1")
label("loc_350")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_369")
OP_99(0xFE, 0x3, 0x7, 0x5E1)
Jump("loc_3B1")
label("loc_369")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_382")
OP_99(0xFE, 0x4, 0x7, 0x5AF)
Jump("loc_3B1")
label("loc_382")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_39B")
OP_99(0xFE, 0x5, 0x7, 0x57D)
Jump("loc_3B1")
label("loc_39B")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_3B1")
OP_99(0xFE, 0x6, 0x7, 0x54B)
label("loc_3B1")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_3C6")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("loc_3B1")
label("loc_3C6")
Return()
# Function_2_24A end
def Function_3_3C7(): pass
label("Function_3_3C7")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 7)), scpexpr(EXPR_END)), "loc_3D4")
Jump("loc_630")
label("loc_3D4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 3)), scpexpr(EXPR_END)), "loc_3DE")
Jump("loc_630")
label("loc_3DE")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 6)), scpexpr(EXPR_END)), "loc_3E8")
Jump("loc_630")
label("loc_3E8")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 2)), scpexpr(EXPR_END)), "loc_471")
ChrTalk(
0xFE,
(
"约修亚先生的肌肤\x01",
"和女性一样细腻呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"只要化妆得当,\x01",
"就会变得相当漂亮哦。\x02",
)
)
CloseMessageWindow()
Jump("loc_630")
label("loc_471")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 0)), scpexpr(EXPR_END)), "loc_47B")
Jump("loc_630")
label("loc_47B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC7, 1)), scpexpr(EXPR_END)), "loc_630")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_END)), "loc_517")
ChrTalk(
0xFE,
(
"距晚宴开始\x01",
"还有一段时间。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"请慢慢在城里参观。\x02",
)
CloseMessageWindow()
Jump("loc_630")
label("loc_517")
OP_A2(0x1)
ChrTalk(
0xFE,
"啊……\x02",
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"怎么了?\x01",
"是不是有什么事情呢?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F嗯,没什么,\x01",
"我们正在城里参观呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"是这样啊。\x02",
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"距晚宴开始\x01",
"还有一段时间。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"请慢慢在城里参观。\x02",
)
CloseMessageWindow()
label("loc_630")
TalkEnd(0xFE)
Return()
# Function_3_3C7 end
def Function_4_634(): pass
label("Function_4_634")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 7)), scpexpr(EXPR_END)), "loc_72E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_6AE")
ChrTalk(
0x8,
(
"#710F在诞辰庆典上玩累了吗?\x01",
" \x02\x03",
"如果有什么难处,\x01",
"尽管告诉我就可以了。\x02",
)
)
CloseMessageWindow()
Jump("loc_72B")
label("loc_6AE")
OP_A2(0x0)
ChrTalk(
0x8,
(
"#710F艾丝蒂尔。\x02\x03",
"在诞辰庆典上玩累了吗?\x01",
" \x02\x03",
"如果有什么难处,\x01",
"尽管告诉我就可以了。\x02",
)
)
CloseMessageWindow()
label("loc_72B")
Jump("loc_953")
label("loc_72E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 3)), scpexpr(EXPR_END)), "loc_83F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_7AD")
ChrTalk(
0x8,
(
"#710F因为诞辰庆典,\x01",
"现在街上变得很热闹。\x02\x03",
"你们就去好好玩玩吧,\x01",
"要注意安全哦。\x02",
)
)
CloseMessageWindow()
Jump("loc_83C")
label("loc_7AD")
OP_A2(0x0)
ChrTalk(
0x8,
(
"#711F啊,\x01",
"你们两个打算出去吗?\x02\x03",
"因为诞辰庆典,\x01",
"现在王都变得很热闹。\x02\x03",
"你们就去好好玩玩吧,\x01",
"要注意安全哦。\x02",
)
)
CloseMessageWindow()
label("loc_83C")
Jump("loc_953")
label("loc_83F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 6)), scpexpr(EXPR_END)), "loc_849")
Jump("loc_953")
label("loc_849")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 2)), scpexpr(EXPR_END)), "loc_853")
Jump("loc_953")
label("loc_853")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC8, 0)), scpexpr(EXPR_END)), "loc_85D")
Jump("loc_953")
label("loc_85D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC7, 1)), scpexpr(EXPR_END)), "loc_953")
ChrTalk(
0x8,
(
"#710F是问晚宴吗……\x02\x03",
"因为料理还在准备,\x01",
"请再稍等片刻。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F料理准备完毕之后,\x01",
"晚宴就会立刻开始。\x02\x03",
"你们就请先回房间休息一下吧。\x01",
" \x02",
)
)
CloseMessageWindow()
OP_28(0x49, 0x1, 0x800)
label("loc_953")
TalkEnd(0xFE)
Return()
# Function_4_634 end
def Function_5_957(): pass
label("Function_5_957")
EventBegin(0x0)
OP_6D(62970, 640, 71000, 0)
OP_67(0, 8000, -10000, 0)
OP_6B(2800, 0)
OP_6C(45000, 0)
OP_6E(262, 0)
ClearChrFlags(0x8, 0x80)
SetChrPos(0x8, 64390, 0, 71030, 270)
SetChrPos(0x101, 61580, 0, 71540, 90)
SetChrPos(0x102, 61580, 0, 70620, 90)
ChrTalk(
0x8,
(
"#710F……你们要说的我明白了。\x02\x03",
"想要把拉赛尔博士的传话\x01",
"直接的告诉女王陛下……\x02\x03",
"就是这件事对吧?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F对……就是这样的。\x02\x03",
"如果女王陛下真的是身体不适,\x01",
"我们就重新再考虑一下。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F那并不是主要的问题……\x02\x03",
"女王宫正处于刚才那些特务兵\x01",
"的24小时监控状态。\x02\x03",
"能够进去的只有公爵大人和上校,\x01",
"以及在女王身边照料她的\x01",
"我和侍女们。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F这么说来,想要去见女王\x01",
"果真是非常困难的了……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F怎么办,艾丝蒂尔?\x02\x03",
"只有把博士的传话让\x01",
"希尔丹夫人转达这个办法了……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F唔~嗯,可是还是\x01",
"直接去和女王谈谈更好……\x02\x03",
"杜南公爵的目的\x01",
"和理查德上校的企图……\x02\x03",
"不清楚的事情还有很多呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F……艾丝蒂尔、约修亚。\x02\x03",
"我已经有些打算了。\x02\x03",
"晚宴结束之后\x01",
"你们再来这里一趟可以吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F咦,这么说来……\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F我们和女王见面的\x01",
"方法已经有了吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F这样认为也是可以的。\x02\x03",
"虽然可能比较困难……\x01",
"但还是有试一试的价值。\x02\x03",
"因为还要做一些准备的缘故,\x01",
"请等到晚宴结束,可以吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F好~的,太幸运了!\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F明白了。\x01",
"晚宴一结束就来向您请教。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#710F我会等候你们的到来的。\x02",
)
CloseMessageWindow()
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC7, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC7, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_OR), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC7, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_OR), scpexpr(EXPR_END)), "loc_F29")
ChrTalk(
0x8,
(
"#710F啊,说到晚宴的事情……\x02\x03",
"因为料理还在准备,\x01",
"请再稍等片刻。\x02",
)
)
CloseMessageWindow()
Jump("loc_FAB")
label("loc_F29")
ChrTalk(
0x8,
(
"#710F料理准备完毕之后,\x01",
"晚宴就会立刻开始。\x02\x03",
"先回房间休息一下\x01",
"也许是个不错的选择。\x02",
)
)
CloseMessageWindow()
OP_28(0x49, 0x1, 0x800)
label("loc_FAB")
Sleep(300)
Fade(1000)
SetChrPos(0x101, 62550, 0, 68550, 45)
SetChrPos(0x102, 62550, 0, 68550, 45)
EventEnd(0x0)
Return()
# Function_5_957 end
def Function_6_FDA(): pass
label("Function_6_FDA")
EventBegin(0x0)
OP_6D(67590, 0, 65319, 0)
ClearChrFlags(0x8, 0x80)
SetChrPos(0x8, 70120, 0, 69770, 225)
SetChrPos(0x101, 66580, 0, 64769, 45)
SetChrPos(0x102, 67630, 0, 64590, 45)
def lambda_102B():
OP_6D(69520, 0, 68800, 2000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_102B)
def lambda_1043():
OP_8E(0xFE, 0x10B6C, 0x0, 0x10BE4, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x101, 2, lambda_1043)
def lambda_105E():
OP_8E(0xFE, 0x1113E, 0x0, 0x1095A, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x102, 2, lambda_105E)
WaitChrThread(0x102, 0x2)
TurnDirection(0x102, 0x8, 400)
WaitChrThread(0x101, 0x3)
ChrTalk(
0x8,
(
"料理还在准备中,\x01",
"请稍等片刻。\x02\x03",
"不觉得迟到了很久吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F这个……对不起。\x02\x03",
"不巧被理查德上校\x01",
"抓住了……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
"#710F上校……吗?\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F只是谈了谈关于我们\x01",
"父亲过去的事情。\x02\x03",
"与这边的行动无关,\x01",
"请不用在意。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F是这样啊……\x02\x03",
"根据介绍信来看,\x01",
"你们两位是卡西乌斯先生\x01",
"的孩子吧。\x02\x03",
"理查德上校\x01",
"会有一些感慨也是理所当然的。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F请问,希尔丹夫人也\x01",
"知道父亲的事吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F曾经作为摩尔根将军副官\x01",
"的他经常到王城这里来。\x02\x03",
"是去世的王子……陛下的儿子\x01",
"以前的学友。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F去世的王子……\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F就是科洛蒂亚公主\x01",
"的父亲大人。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F嗯,因为15年前的海难事故\x01",
"而不幸身亡。\x02\x03",
"倘若王子还活着的话,\x01",
"现在这样的局面是\x01",
"不会发生的……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F哎……?\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F……对于已经发生的事情,\x01",
"后悔是没有用处的。\x02\x03",
"夜色已晚,\x01",
"这就准备出发吧。\x02\x03",
"茜亚,过来吧。\x02",
)
)
CloseMessageWindow()
ClearChrFlags(0x9, 0x80)
SetChrPos(0x9, 69050, 0, 75720, 180)
def lambda_14BF():
TurnDirection(0xFE, 0x9, 400)
ExitThread()
QueueWorkItem(0x8, 1, lambda_14BF)
def lambda_14CD():
TurnDirection(0xFE, 0x9, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_14CD)
def lambda_14DB():
TurnDirection(0xFE, 0x9, 400)
ExitThread()
QueueWorkItem(0x102, 1, lambda_14DB)
Sleep(300)
def lambda_14EE():
label("loc_14EE")
TurnDirection(0xFE, 0x102, 0)
OP_48()
Jump("loc_14EE")
QueueWorkItem2(0x9, 1, lambda_14EE)
def lambda_14FF():
OP_8E(0xFE, 0x10CCA, 0x0, 0x112B0, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x9, 2, lambda_14FF)
def lambda_151A():
label("loc_151A")
TurnDirection(0xFE, 0x9, 0)
OP_48()
Jump("loc_151A")
QueueWorkItem2(0x8, 1, lambda_151A)
def lambda_152B():
label("loc_152B")
TurnDirection(0xFE, 0x9, 0)
OP_48()
Jump("loc_152B")
QueueWorkItem2(0x101, 1, lambda_152B)
def lambda_153C():
label("loc_153C")
TurnDirection(0xFE, 0x9, 0)
OP_48()
Jump("loc_153C")
QueueWorkItem2(0x102, 1, lambda_153C)
OP_6D(70030, 0, 70300, 2000)
ChrTalk(
0x101,
"#000F咦,你不是……?\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F您是茜亚小姐\x01",
"对吧?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"你、你们好……\x01",
"艾丝蒂尔小姐,约修亚先生,\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"事情我已经知道了。\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F这个孩子\x01",
"你们完全可以相信她。\x02\x03",
"公主殿下在城里的时候,\x01",
"就是由这位侍女照顾的。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F公主殿下……\x01",
"就是科洛蒂亚公主吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F这样的话就没问题了。\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
"谢、谢谢……\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"那么这就把准备好的制服\x01",
" \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"丝带呀头饰呀那些\x01",
"细小的方面我都已经\x01",
"准备完毕了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F哎……\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F这么说……难道?\x02",
)
CloseMessageWindow()
OP_44(0x101, 0xFF)
OP_44(0x102, 0xFF)
OP_44(0x9, 0xFF)
OP_44(0x8, 0xFF)
TurnDirection(0x8, 0x102, 400)
ChrTalk(
0x8,
(
"#710F是啊,艾丝蒂尔如果\x01",
"装扮成侍女的样子\x01",
"就可以进入女王宫了。\x02\x03",
"在把头发的样式改变一下,\x01",
"看守也就觉察不出来了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#000F原~来如此……\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F的确,制服可以很好\x01",
"将个人特点隐藏起来。\x02\x03",
"用于潜入\x01",
"就再好不过了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F啊~侍女的服饰啊。\x02\x03",
"看到过莉拉小姐的着装,\x01",
"觉得很不错呢。\x02\x03",
"既飘逸而又很可爱,\x01",
"行动起来也很方便的样子。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"嘻嘻,如果行动不方便\x01",
"那扫除的时候就麻烦了……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F啊,果然是这样吗?\x02\x03",
"那就立刻穿上吧!\x02",
)
)
CloseMessageWindow()
TurnDirection(0x102, 0x101, 400)
ChrTalk(
0x102,
(
"#010F很开心嘛……\x02\x03",
"蹦蹦跳跳的虽然是可以,\x01",
"但不要在陛下面前失礼哦。\x02\x03",
"这次我是\x01",
"不能和你一起了。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x101, 0x102, 400)
ChrTalk(
0x101,
(
"#000F哎?为什么?\x02\x03",
"约修亚也换装不就行了吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F…………………………\x02\x03",
"……咦。\x02",
)
)
CloseMessageWindow()
def lambda_1B0A():
TurnDirection(0xFE, 0x101, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_1B0A)
TurnDirection(0x8, 0x101, 400)
ChrTalk(
0x8,
"#710F你说什么?\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F约修亚你在学院祭的舞台剧\x01",
"中扮演的公主不是很合适的吗?\x02\x03",
"礼服和侍女装不是差不多吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F这、这可不是在演戏。\x02\x03",
"和女王陛下见面时\x01",
"却穿的女装,这有点……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F没关系,没关系。\x01",
"一点都不难看!\x02\x03",
"当时约修亚装扮的公主\x01",
"可是非常美丽哟!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F又、又来了……别开玩笑了。\x02\x03",
"希尔丹夫人你们\x01",
"怎么说我就怎么做吧。\x02",
)
)
CloseMessageWindow()
def lambda_1D1B():
TurnDirection(0xFE, 0x102, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_1D1B)
TurnDirection(0x8, 0x102, 400)
ChrTalk(
0x8,
"#710F………………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0x9,
"………………………………\x02",
)
CloseMessageWindow()
OP_62(0x102, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1)
OP_22(0x31, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0x102,
"#010F我、我说……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F原来如此……\x01",
"好像的确没有什么问题。\x02\x03",
"茜亚,为公主殿下准备的\x01",
"假发还在吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"是、是的……\x01",
"一次都没有使用过呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"如果是长长的黑发,\x01",
"和约修亚公子是很配的哦……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F我、我说……\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F就这样,3比1多数取胜,\x01",
"最终的结果出现⊙\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"那就请到这边来。\x01",
"更衣室已经准备好了……\x02",
)
)
CloseMessageWindow()
def lambda_1F06():
label("loc_1F06")
TurnDirection(0xFE, 0x102, 0)
OP_48()
Jump("loc_1F06")
QueueWorkItem2(0x8, 1, lambda_1F06)
SetChrFlags(0x101, 0x4)
SetChrFlags(0x102, 0x4)
SetChrFlags(0x9, 0x4)
SetChrFlags(0x9, 0x40)
def lambda_1F2B():
OP_8E(0xFE, 0x10C48, 0x0, 0x12D4A, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_1F2B)
Sleep(300)
OP_8E(0x101, 0x10FF4, 0x0, 0x10AEA, 0xBB8, 0x0)
ChrTalk(
0x102,
(
"#010F请等一下!\x01",
"我换衣服这件事怎么一句话就……\x02",
)
)
CloseMessageWindow()
OP_8E(0x102, 0x10FEA, 0x0, 0x1090A, 0x7D0, 0x0)
OP_8C(0x102, 180, 400)
def lambda_1FC1():
OP_6D(69970, 0, 72360, 3000)
ExitThread()
QueueWorkItem(0x101, 3, lambda_1FC1)
def lambda_1FD9():
OP_8E(0xFE, 0x10C48, 0x0, 0x12D4A, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1FD9)
def lambda_1FF4():
OP_8F(0xFE, 0x10C48, 0x0, 0x12D4A, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x102, 1, lambda_1FF4)
OP_62(0x8, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(3000)
ChrTalk(
0x102,
(
"#010F我知道,我知道的啊……\x01",
"衣服什么的由我自己来脱……\x02\x03",
"啊……茜亚小姐……\x01",
"还要化妆的啊……!?\x02",
)
)
CloseMessageWindow()
OP_63(0x8)
ChrTalk(
0x8,
(
"#710F呼……\x01",
"现在的年轻人啊……\x02",
)
)
CloseMessageWindow()
FadeToDark(2000, 0, -1)
OP_0D()
OP_6D(69200, 0, 72370, 0)
SetChrPos(0x8, 68890, 0, 69520, 0)
SetChrFlags(0x101, 0x1000)
SetChrFlags(0x102, 0x1000)
SetChrChipByIndex(0x101, 2)
SetChrChipByIndex(0x102, 3)
FadeToBright(2000, 0)
def lambda_2126():
OP_8E(0xFE, 0x11062, 0x0, 0x116F2, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_2126)
Sleep(600)
def lambda_2146():
OP_8E(0xFE, 0x10E00, 0x0, 0x11D78, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_2146)
WaitChrThread(0x9, 0x1)
def lambda_2166():
OP_8E(0xFE, 0x1090A, 0x0, 0x11E18, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_2166)
WaitChrThread(0x9, 0x1)
def lambda_2186():
OP_8C(0xFE, 180, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_2186)
ChrTalk(
0x8,
"#710F啊……\x02",
)
CloseMessageWindow()
OP_8C(0x101, 317, 400)
OP_8C(0x101, 75, 400)
OP_8C(0x101, 180, 400)
ChrTalk(
0x101,
(
"#000F您~好。\x02\x03",
"嗯,怎么样-呢?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"嘿嘿嘿……\x01",
"非常合适呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F刚到城里不久,\x01",
"活泼开郎的实习侍女……\x01",
"这种说法十分有说服力啊。\x02\x03",
"头发也批下来之后,\x01",
"就更不容易被注意到了。\x02\x03",
"不如就到我们这个\x01",
"格兰赛尔城来工作如何?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F游、游击士那边还有任务,\x01",
"所以这个就……\x02\x03",
"啊,对了。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x101, 0x102, 400)
ChrTalk(
0x101,
(
"#000F喂喂,约修亚。\x01",
"快点出来吧~\x02",
)
)
CloseMessageWindow()
def lambda_23A2():
TurnDirection(0xFE, 0x102, 400)
ExitThread()
QueueWorkItem(0x8, 1, lambda_23A2)
def lambda_23B0():
TurnDirection(0xFE, 0x102, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_23B0)
OP_6D(69080, 0, 73680, 1000)
ChrTalk(
0x102,
(
"#010F啊……\x02\x03",
"不出来不行吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F不-行。\x02\x03",
"再喋喋不休的话\x01",
"我就去把你拖出来了哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F我明白了……\x02\x03",
"唉,没办法了……\x02",
)
)
CloseMessageWindow()
def lambda_2479():
label("loc_2479")
TurnDirection(0xFE, 0x102, 0)
OP_48()
Jump("loc_2479")
QueueWorkItem2(0x9, 1, lambda_2479)
def lambda_248A():
label("loc_248A")
TurnDirection(0xFE, 0x102, 0)
OP_48()
Jump("loc_248A")
QueueWorkItem2(0x8, 1, lambda_248A)
def lambda_249B():
label("loc_249B")
TurnDirection(0xFE, 0x102, 0)
OP_48()
Jump("loc_249B")
QueueWorkItem2(0x101, 1, lambda_249B)
OP_8E(0x102, 0x10DBA, 0x0, 0x11DC8, 0x3E8, 0x0)
ChrTalk(
0x102,
"#010F………………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F这竟然会……\x01",
"相称的到了可怕的程度。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F怎么样,我说过的吧!?\x02\x03",
"真是的,竟然比身为\x01",
"女子的我还要有形,\x01",
"这到底是怎-么回事嘛。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"嘿嘿嘿……\x01",
"我还为他好好的化了妆的哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F好了……\x01",
"请不要再说了……\x02",
)
)
CloseMessageWindow()
OP_6D(68990, 0, 71660, 1000)
def lambda_25F1():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_25F1)
def lambda_25FF():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_25FF)
ChrTalk(
0x8,
(
"#710F嗯……\x01",
"准备完毕了。\x02\x03",
"那么我现在就\x01",
"带领你们去女王宫吧。\x02\x03",
"彻底的把自己当成\x01",
"实习侍女,这一点要记住。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F啊,好的,明白了。\x02\x03",
"唔……\x01",
"终于要见到女王了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F嗯……到了关键时刻了。\x02\x03",
"集中精力,\x01",
"无论如何也要进入女王宫。\x02",
)
)
CloseMessageWindow()
TurnDirection(0x101, 0x102, 400)
ChrTalk(
0x101,
(
"#000F噗哧,你这身打扮配合这样\x01",
"严肃的话真是天衣无缝啊……\x02",
)
)
CloseMessageWindow()
TurnDirection(0x102, 0x101, 800)
ChrTalk(
0x102,
(
"#010F太、太坏了!\x01",
"什么天衣无缝!\x02\x03",
"我都打扮成这副\x01",
"模样了,你还取笑我……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F对不起对不起,\x01",
"不要那么倔犟嘛。\x02\x03",
"下次我请你吃冰淇淋\x01",
"消消气哈~\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F哼,我又不像你,\x01",
"用吃的是不能收买我的。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F我、我什么时候\x01",
"被吃的给收买过?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"嘿嘿嘿……\x01",
"真是一对好伙伴呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F时间快来不及了……\x01",
"立刻前往女王宫吧。\x02",
)
)
CloseMessageWindow()
OP_28(0x4A, 0x1, 0x20)
OP_28(0x4A, 0x1, 0x40)
SetChrFlags(0x8, 0x40)
def lambda_295C():
OP_92(0xFE, 0x0, 0x0, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_295C)
EventEnd(0x0)
AddParty(0x37, 0xFF)
SetChrChipByIndex(0x0, 0)
SetChrChipByIndex(0x1, 2)
SetChrChipByIndex(0x138, 3)
SetChrFlags(0x0, 0x1000)
SetChrFlags(0x1, 0x1000)
SetChrFlags(0x138, 0x1000)
SetChrFlags(0x8, 0x80)
Return()
# Function_6_FDA end
def Function_7_2994(): pass
label("Function_7_2994")
EventBegin(0x0)
FadeToBright(2000, 0)
OP_6D(68370, 0, 69650, 0)
ClearChrFlags(0x8, 0x80)
ClearChrFlags(0x9, 0x80)
SetChrPos(0x8, 68920, 0, 70070, 180)
SetChrPos(0x9, 67750, 0, 70350, 180)
RemoveParty(0x37, 0xFF)
SetChrPos(0x101, 67080, 0, 68350, 0)
SetChrPos(0x102, 68360, 0, 68190, 0)
OP_0D()
ChrTalk(
0x101,
(
"#000F希尔丹夫人,茜亚小姐,\x01",
"真是太感谢你们了!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F帮了我们大忙啊。\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#710F哪里,这是为陛下服务\x01",
"的人理所当然的义务。\x02\x03",
"陛下委托的任务\x01",
"无论如何拜托了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"那、那个……\x01",
"我也要拜托你们……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"请一定替我们……\x01",
"把公主殿下救出来啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F啊,茜亚小姐\x01",
"服侍过公主殿下的吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"是、是的……\x01",
"虽然能够照顾她的\x01",
"机会并不多,很遗憾……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"但是她把我这种下人\x01",
"当作朋友一样对待,\x01",
"是一个平易近人而又温柔的人呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
(
"当听说她被\x01",
"囚禁了的时候,\x01",
"我每天都担心的睡不着觉……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#000F是吗……\x01",
"我们一定会把她救出来的!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F那我们就告辞了。\x02",
)
CloseMessageWindow()
EventEnd(0x0)
Return()
# Function_7_2994 end
SaveToFile()
Try(main)
|
from sys import platform as os_name
from abstract_os import AbstractOS
def singleton(cls):
_instance = {}
def inner():
if cls not in _instance:
_instance[cls] = cls()
return _instance[cls]
return inner()
@singleton
class NativeOS(AbstractOS):
def __init__(self):
self._instance = None
def instance(self) -> AbstractOS:
if self._instance is not None:
return self._instance
if os_name == 'win32':
from native.windows_os import WindowsNative
self._instance = WindowsNative()
elif os_name == 'linux':
from native.linux_os import LinuxNative
self._instance = LinuxNative()
elif os_name == 'darwin':
from native.mac_os import MacNative
self._instance = MacNative()
else:
raise NotImplementedError(os_name)
return self._instance
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.