blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
072988ddd38519237e86f4c699c9f59b0325b3f5
|
Python
|
avoevodin/lab5
|
/levy_curve.py
|
UTF-8
| 638 | 3.78125 | 4 |
[] |
no_license
|
"""Draw Levy curve fractal.
"""
import turtle
turtle.speed('fastest')
def draw_levy_curve(edge_width: int, rec_deep: int):
"""Draw Levy curve fractal.
Keyword args:
edge_width -- current width of edge (int)
rec_deep -- current recursion deep (int)
"""
if rec_deep == 0:
turtle.forward(edge_width)
return
new_edge_width = (edge_width ** 1 / 2) / 2
turtle.left(45)
draw_levy_curve(new_edge_width, rec_deep - 1)
turtle.right(90)
draw_levy_curve(new_edge_width, rec_deep - 1)
turtle.left(45)
if __name__ == "__main__":
draw_levy_curve(3000000, 9)
| true |
863a802ac3d73b40e6d2644d48b366a6e288bd78
|
Python
|
asanchez78/max7219
|
/matrix_scroll.py
|
UTF-8
| 642 | 3.21875 | 3 |
[] |
no_license
|
#!/usr/bin/python3
import max7219.led as led
import argparse
device = led.matrix(cascaded = 3)
parser = argparse.ArgumentParser(description='Scrolls message on LED matrix')
parser.add_argument('-m','--message',help='The message to scroll on the LED matrix',required=True)
parser.add_argument('-r','--repeat',help='The number of times to repeat the message. 0 will scroll forever.',required=True,type=int)
arguments = parser.parse_args()
message = arguments.message
scrolls = arguments.repeat
if scrolls == 0:
while scrolls == 0:
device.show_message(message)
else:
while scrolls > 0 :
device.show_message(message)
scrolls -= 1
| true |
b47de52c4bb3bf7c8ed72ac716ad80d43e4418a0
|
Python
|
GudUgne/Block_3
|
/Bit_1.py
|
UTF-8
| 1,244 | 3.046875 | 3 |
[] |
no_license
|
# `pc_transaction.py` example
from bitcoin.rpc import RawProxy
p = RawProxy() #sujungimas
# Pavyzdinis ID: "4410c8d14ff9f87ceeed1d65cb58e7c7b2422b2d7529afc675208ce2ce09ed7d"
txid = input("Iveskite transakcijos ID\n")
# First, retrieve the raw transaction in hex - is pavyzdzio visa tranzakcijos info
raw_tx = p.getrawtransaction(txid)
# Decode the transaction hex into a JSON object - is pavyzdzio
decoded_tx = p.decoderawtransaction(raw_tx)
i_sum = [] #saugojimo kintamasis
what_got = 0
# Retrieve each of the outputs from the transaction
for output in decoded_tx['vout']:
i_sum.append(output['value']) #issaugo verte
print("Ka tranzakcijos gavejas gavo: ")
for ou in i_sum:
print(ou)
what_got += ou #ka gavo issaugo
whole_sum = 0 #bendra suma
# Calculating whole sum
for input in decoded_tx['vin']: #susumuojami visi pervedimai kuriuos atliko
out_index = input['vout']
call_tx = p.getrawtransaction(input['txid'])
decoded_call_tx = p.decoderawtransaction(call_tx)
whole_sum += decoded_call_tx['vout'][out_index]['value']
print("Visa suma: ")
print(whole_sum)
tran_fee = whole_sum - what_got
print("Trasakcijos mokestis, kuri gavo mineris ")
print(tran_fee)
| true |
ced559d875588c05039e1845b3dc10a43e38a30e
|
Python
|
Ruk288/Project-01
|
/Chapter#05.py
|
UTF-8
| 4,059 | 3.9375 | 4 |
[] |
no_license
|
# simple if statement
cars=['audi','bmw','suzuki','toyota']
for car in cars:
if car=='bmw':
print(car.upper())
else:
print(car.title())
car='Audi'
car.lower() =='audi'
# checking for inequality
requested_topping='paproni'
if requested_topping != 'mashrooms':
print("hold the mashrooms!")
# Numerical Comparisions
answer=17
if answer!=42:
print("that is not the correct number")
#checking user is not in the list
banned_users=['andrew','carolina','david']
user='marie'
if user not in banned_users:
print(user.title() + ", you can response")
# TRY IT YOURSELF
#5-1
car='subaru'
print("Is car == 'subaru' I predict true")
print(car=='subaru')
#5-2
for car in cars:
if car=='Audi':
print(car.lower()=='audi')
if car!='audi':
print(car=='suzuki')
numbers=[1,2,3,5,6,7]
for num in numbers:
if (num==4 or num==6):
print(str(num) + "is not in the list")
if(num!=4):
print(str(num) + "is in the list")
#Simple IF statement
age=12
if age >= 18:
print("you can vote")
print("you can not vote")
# if else statement
age=17
if age>=18:
print("you are eligible for the test")
else:
print("sorry you are not elligiable")
#if-elif-else
age=12
if age<4:
price=0
elif age < 18:
price=5
else:
price=10
print("you admission cost is "+str(price))
#MULTIPLE ELIF STATEMENTS
age=12
if age<4:
price=0
elif age < 18:
price=5
elif age<65:
price=10
elif age>=65:
price=5
print("you admission cost is "+str(price))
#multiple if without else of elif
requested_topping=['mushrooms','extra cheese']
if 'mushrooms'in requested_topping:
print("adding mushrooms")
if'papproni'in requested_topping:
print("Add peproni")
if 'extra cheese' in requested_topping:
print("add extra cheese")
##################### TRY YOURSELF #######################
# AlienColors
#5-4
alien_color=['green','yellow','red']
if 'green' in alien_color:
print("the player earned 5 points")
else:
print("th eplayer just earned 10 points")
#5-5
alien_color=['green','yellow','red']
if 'green' in alien_color:
print("the player earned 5 points")
elif 'yellow' in alien_color:
print("the player just earned 10 points")
else:
print("The player earned 15 points")
#5-6
age=14
if age<2:
print("the person is a baby")
elif age==2 or age<4:
print("the person is toddler")
elif age==4 or age<13:
print("the person is kid")
elif age==20 or age<65:
print("the person is an adult")
elif age>=65:
print("the person is elder")
#5-7
fav_fruits=['apple','mango','banana','pinapple','melon']
if 'apple' in fav_fruits:
print("I really like apple")
if 'mango' in fav_fruits:
print("I really like mango")
if 'banana' in fav_fruits:
print("I really like banana")
if 'pinapple' in fav_fruits:
print("I really like pinapple")
if 'melon' in fav_fruits:
print("I really like melon")
# Checking for special items
requested_toppings=['mushroom','green peppers','extra cheese']
for requested_topping in requested_toppings:
if requested_topping=='green peppers':
print("sorry we are out of green peppers right now")
else:
print("Adding " + requested_topping)
print("finished making piza")
requested_toppings=[]
if requested_toppings:
for requested_topping in requested_toppings:
print("Adding "+ requested_topping)
print("Finish making your pizza")
else:
print("Are you sure you want a plan pizza")
available_toppings=['mushrooms','olives','green peppers','pepproni','pineapple','extra cheese']
requested_toppings=['mushrooms','french fries','extra cheese']
for requested_topping in requested_toppings:
if requested_topping in available_toppings:
print("Adding "+ requested_topping)
else:
print("sorry we dont have " + requested_topping)
print("Finished making pizza")
# TRY YOURSELF
#5-8
username=['ali','sara','admin','zara','saira']
for user in username:
if user=='admin':
print("hi "+ user+ " welocme here")
else:
print("hi "+ user + "welcome")
| true |
cb28b141fcfec1c03f55ea3f9300f19a911b2b2c
|
Python
|
manelmengibar/Python_Excel
|
/Pandas/Basic/Create.py
|
UTF-8
| 485 | 3.09375 | 3 |
[] |
no_license
|
import pandas as pd
# dataframe Name and Age columns
df = pd.DataFrame({'Empresa': ['Draexlmaier', 'Seat', 'Fujikura', 'Synergie'],
'Anys': [5, 20, 30, 10]})
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('demo.xlsx', engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='LListat', index=False)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| true |
ee17411d4662f9c226544cc5d94ced0693e8d994
|
Python
|
Instrumedley/hypothesis
|
/exceptions.py
|
UTF-8
| 371 | 2.984375 | 3 |
[] |
no_license
|
class Error(Exception):
"""Base class for other exceptions"""
pass
class AddTransactionError(Error):
"""Raised when you can't create a transaction for Person"""
pass
class InvalidNumberError(Error):
"""Raised when input is not an int or float"""
pass
class InvalidDateError(Error):
"""Raised when date string is not a valid date"""
pass
| true |
dbf3b203ee329e31f718be93018b26afe9840068
|
Python
|
beitay/SpamRepo
|
/Spammer.py
|
UTF-8
| 348 | 3.125 | 3 |
[] |
no_license
|
from pynput.keyboard import Key, Controller
import time
times_to_spam = input("Enter number of times to spam> ")
time.sleep(5)
i = 0
while i < int(times_to_spam):
keyboard = Controller()
# any letter
keyboard.press('A')
keyboard.release('A')
keyboard.press(Key.enter)
keyboard.release(Key.enter)
i += 1
| true |
d5b191508f587b8cdde93f7003ba5e08f0858269
|
Python
|
arkakrak/Jezyki_Skryptowe
|
/Zadanie9.py
|
UTF-8
| 714 | 3.609375 | 4 |
[] |
no_license
|
input_file = open("input_file.txt", "w")
input_file.writelines("I chose to write this line 1\n")
input_file.writelines("I chose to write this line 2\n")
input_file.writelines("I chose to write this line 3\n")
input_file.writelines("I chose to write this line 4\n")
input_file.writelines("I chose to write this line 5\n")
input_file.close()
try:
with open("input_file.txt", "r") as input_file:
with open("output_file.txt", "w") as output_file:
for row in input_file:
output_file.write(row)
except (IOError, ZeroDivisionError):
print("I/O Error or dividing by zero")
output_file_read = open("input_file.txt", "r")
print(output_file_read.read())
output_file_read.close()
| true |
ad5ad7c50e54a80248f3008634983c2ddfc28675
|
Python
|
mapoferri/Bioinformatics-projects
|
/GOR-SVM/extract_matrix.py
|
UTF-8
| 2,532 | 2.765625 | 3 |
[] |
no_license
|
import os
import numpy as np
import sys
#run with python3!
def extract_matrix(pssm_file):
with open(pssm_file, "r") as pssm:
a = 0
matrix=[[] for line in pssm]
with open(pssm_file, "r") as porcod:
for line in porcod:
line = line.split()
#print (line) #iterating for line
if a == 0: #first line (residues)
a += 1
continue #pass --> veder come funziona e sostituire in caso
elif a >= 1 : #cutting first line, MATRIX
#print (line)
sequence_p = (line[22:42])
print (sequence_p)
for value in sequence_p:
freq = np.true_divide(int(value),100)
matrix[a].append(freq) #normalized
a += 1
#matrix_file = "matrix_"+pssm_file
#with open(matrix_file, "w+") as m:
matrix = matrix[1:len(matrix)]
#for line in matrix:
#m.write(str(line)+ '\n')
return matrix
################################################################################
# code to insert padding in every matrix and save to another open #
# extracted matrix will be used for SVM too, so we need different matrices #
# as GOR input (solving indexing problems) #
################################################################################
#def padding(matrix):
#padding = np.zeros((8,20), dtype = float) #empty arrays of eight rows
#print (padding)
#matrix = np.loadtxt(matrix, delimiter=',')
#print (matrix)
#gor_matrix = "gor_input_"+matrix_file
#with open(matrix) as matrix:
#GORMatrix = np.concatenate((padding[:,None], matrix, padding[:,None]), axis = 0)
#GORMatrix = padding + matrix + padding #adding padding to gor input matrices
#print (GORMatrix)
#with open(gor_matrix, "a+") as gor_matrix:
#gor_matrix.write(str(padding))
#gor_matrix.write(str(matrix))
#gor_matrix.write(str(padding))
if __name__ == '__main__':
path = sys.argv[1] #giving as input the directory with all files
files_list = [f for f in os.listdir(path)]
for files in files_list: #iterating for file in directory
if files.endswith('.pssm'):
pssm_file = files
matrix = extract_matrix(pssm_file)
#gor_matrix = padding(matrix)
#if files.startswith('matrix'):
#matrix_file = files
#gor_matrix = padding(matrix_file)
#matrix_file = extract_matrix(pssm_file)
np.save('matrix_prova_{}.npy'.format(pssm_file), matrix)
#print (type(matrix_file))
#gor_matrix = padding(matrix)
#print (gor_matrix)
| true |
92b34099ca3cac42e8368a57b77c7200a24bf80e
|
Python
|
wuqingtao-GitHub/Speech-Transformer-tf2.0
|
/test/test_input_mask.py
|
UTF-8
| 5,323 | 2.6875 | 3 |
[] |
no_license
|
import tensorflow as tf
import numpy as np
#####################################################
# NOTE:
# 这个mask是为了遮住att输出(N,seq_q,seq_k)中
# 被padding的部分(seq_k对应的那一轴,k是key,也就是被查询的句子)
#####################################################
def create_padding_mask(seq):
'''
:param seq: [batch_size * seq_len_k] # k means key in MultiheadAttention
:return: [batch_size, 1, seq_len_k]
'''
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions so that we can add the padding
# to the attention logits.
return seq[:, tf.newaxis, :] # (batch_size, 1, seq_len)
def create_padding_mask2(seq,seq_lengths):
'''
padding position is set to 1.
padding_mask can be broadcast on attention_logits (batch_size * seq_q* seq_k)
:param seq: [batch_size * seq_len * feature_dim]
:param seq_lengths: [batch_size] (== seq_k in MultiheadAttention)
:return: padding_mask: batch_size * 1 * seq_len
'''
# seq = tf.math.equal(seq[:,:,0],0)
# seq = tf.math.equal(seq,False)
# seq = tf.cast(seq, tf.float32)
seq_lengths = tf.squeeze(seq_lengths).numpy()
# print('seq_lengths shape: ' + str(seq_lengths.shape.as_list()))
seq_shape = seq.shape.as_list()
padding_mask = np.zeros(seq_shape[:-1],dtype=seq.dtype.as_numpy_dtype) # batch_size * seq_len
for i in range(seq_shape[0]):
padding_mask[i,int(seq_lengths[i]):] = 1 # eager mode doesnt support item assignment,use numpy instead
# add extra dimensions so that we can add the padding to the attention logits.
return tf.convert_to_tensor(padding_mask[:,np.newaxis,:])
#####################################################
# NOTE:
# 这个mask是为了遮住att输出(N,seq_q,seq_k)中
# 当前时间步i后面的部分(seq_k对应的那一轴,k是key,也就是被查询的句子)
#####################################################
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len) AKA (seq_q, seq_k)
#####################################################
# NOTE:
# encoder与decoder第二个block的self-att只需要考虑遮盖被pad的部分
# decoder第一个block的self-att需要考虑遮盖被pad的部分以及遮盖未来时间步的信息
#####################################################
def create_masks(inp, tar):
'''
:param inp: [batch_size * seq_len_k_of_encoder ]
:param tar: [batch_size * seq_len_k_of_decoder_block2 ]
:return:
'''
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
# encoder outputs [batch_size * seq_len * d_model] 中间那一维相比原始encoder的input不变,所以就按照inp计算了
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
def create_masks2(inp, tar, inp_len, tar_len):
'''
:param inp: [batch_size * seq_len * feature_dim]
:param tar: [batch_size * seq_len * feature_dim]
:param inp_len: [batch_size]
:param tar_len: [batch_size]
:return:
'''
# Encoder padding mask
enc_padding_mask = create_padding_mask2(inp,inp_len)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
# encoder outputs [batch_size * seq_len * d_model] 中间那一维相比原始encoder的input不变,所以就按照inp计算了
dec_padding_mask = create_padding_mask2(inp,inp_len)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask2(tar,tar_len)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
if __name__=='__main__':
x = np.array([[[7, 6, 1, 1, 1], [1, 2, 3, 1, 1], [1, 1, 1, 1, 1]],
[[7., 6, 6, 1, 1], [0.0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]])
length = [3,1]
length2 = [3,3]
print(create_padding_mask(x[:,:,0]))
x2 = np.random.randn(2,3,5)
print(create_padding_mask(x2[:,:,0]))
temp = create_look_ahead_mask(x2.shape[1])
print(temp)
temp = create_masks(x2[:,:,0],x[:,:,0])
print(temp)
x = tf.constant([[[7, 6, 1, 1, 1], [1, 2, 3, 1, 1], [1, 1, 1, 1, 1]],
[[7., 6, 6, 1, 1], [0.0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]])
length = tf.constant([3,1])
length2 = tf.constant([3,3])
print(create_padding_mask2(x,length))
x2 = tf.random.uniform((2,3, 5))
print(create_padding_mask2(x2,length2))
temp = create_look_ahead_mask(x2.shape[1])
print(temp)
temp = create_masks2(x2, x, length2,length)
print(temp)
| true |
efaa3bf10aee5ffe79b7dee0b6f85adb9385b2d2
|
Python
|
zilongwang1993/MergeSortedFiles
|
/mergeFile.py
|
UTF-8
| 2,756 | 3.34375 | 3 |
[] |
no_license
|
import heapq
import fnmatch
import os
import sys
# Name: hw.py
# Author: Zilong Wang
# Goal: merge any number of sorted text files with one data per line into a single file.
# Requirements:
# 1. The program must compile and run without errors on the sample input files.
# 2. The program should be self-documenting. Running the program with no input arguments
# should produce instructions. The program should require no input from the user except
# arguments passed on the command line when executing the program. Do not prompt the user
# for input.
# 3. The program should be able to handle input files too large to fit entirely in memory.
# 4. The program should be able to handle merging a file with itself any number of times.
# 5. The program must be robust. It should not crash or show low-level generic exceptions or
# stack traces if given unexpected or invalid input.
def main():
try:
file_names=[]
args=sys.argv
#If no input argument for file names are found, the default is to
#check for all the .txt files in the current directory and use them as input files.
if len(args) == 1:
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*.txt'):
file_names.append(file)
#If input file names are provided as command line arguments,
#use them as input files.
elif len(args)>1:
file_names=args[1:]
merge(file_names)
except:
print "Unexpected error in main():", sys.exc_info()[0]
def merge(file_names):
try:
#create a priority queue for maintaining the current smallest strings from each file.
pq=[]
opened_files = [ open(f) for f in file_names]
output_file_name = "output.txt"
if os.path.isfile(output_file_name):
f= open(output_file_name,'w+')
f.close()
# print "file exists"
# os.remove(output_file_name)
for cur_file in opened_files:
first = cur_file.readline().rstrip()
#detect empty file
if (len(first) is 0):
continue
heapq.heappush(pq,(first,cur_file))
output = open(output_file_name,'w+')
#keep popping from the priority queue until it is empty.
while(pq):
cur = heapq.heappop(pq)
output.write(cur[0]+'\n')
print cur[0]
next=cur[1].readline()
if len(next) >0:
next=next.rstrip()
# check if the input file is sorted properly.
# if the file is not sorted, raise exception.
if next<cur[0]:
raise ValueError('The input file is not sorted properly for ' + cur[1].name +".Please fix it and try again.")
#put the next smallest item in the queue
heapq.heappush(pq,(next,cur[1]))
for f in opened_files:
f.close()
output.close()
except ValueError as err:
print(err.args)
except:
print "Unexpected error in merge():", sys.exc_info()[0]
if __name__ == "__main__":
main()
| true |
434c72767437a9fdfcd869aa94932a817d004552
|
Python
|
svf55/get_image
|
/client/client.py
|
UTF-8
| 2,725 | 2.75 | 3 |
[] |
no_license
|
#!/usr/bin/env python
import io
from PIL import Image
import logging
from websocket import create_connection, ABNF
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'proto'))
import get_image_pb2
class WebSocketClient(object):
"""
Getting an image through a Websocket
"""
def __init__(self, host_ws, port_ws):
self.host_ws = host_ws
self.port_ws = port_ws
self._logger = logging.getLogger(__name__)
def get_image(self, image_generator, image_max_width, image_max_height, image_gray):
ws = create_connection('ws://' + self.host_ws + ':' + str(self.port_ws))
client_request = get_image_pb2.ClientRequest()
client_request.image_generator = image_generator
if image_max_width:
client_request.image_max_width = image_max_width
if image_max_height:
client_request.image_max_height = image_max_height
client_request.image_gray = image_gray
message = client_request.SerializeToString()
ws.send(message, opcode=ABNF.OPCODE_BINARY)
self._logger.info('Sent')
self._logger.info('Receive...')
data = ws.recv()
server_response = get_image_pb2.ServerResponse()
server_response.ParseFromString(data)
bio = io.BytesIO(server_response.image_byte)
bio.seek(0)
image = Image.open(bio)
self._logger.info('Save image %s', server_response.image_file_name)
image.save(server_response.image_file_name)
ws.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Websocket client')
parser.add_argument('--host_ws', help='Websocket host, example: localhost', required=True)
parser.add_argument('--port_ws', type=int, default=8888, help='Websocket port, example: 9500')
parser.add_argument('--image_generator', default='PIL',
choices=['file', 'PIL'], help='Image generator', required=True)
parser.add_argument('--image_max_width', type=int, help='Image max width (integer)')
parser.add_argument('--image_max_height', type=int, help='Image max height (integer)')
parser.add_argument('--image_gray', action='store_true', default=False, help='Image is gray')
args = parser.parse_args()
# Setup logging
FORMAT = '%(asctime)s %(process)d %(levelname)s %(name)s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
# Getting an image through a Websocket
WebSocketClient(args.host_ws, args.port_ws).get_image(args.image_generator, args.image_max_width,
args.image_max_height, args.image_gray)
| true |
bc0183f0b653f7114426f94fd340cc72fb234550
|
Python
|
AbubakarSaad/NN-Assignment2
|
/functions.py
|
UTF-8
| 561 | 2.921875 | 3 |
[] |
no_license
|
import math
import numpy as np
class Functions():
def neighbourhood(self, radius, numIteration, timeConstant):
return radius * np.exp(-(numIteration / timeConstant))
def guassin(self, radius, dist):
return np.exp(-(dist**2)/(2*(radius**2)))
# updating the learning rate
def updateLR(self, learningRate, numIteration, timeConstant):
return learningRate * np.exp(-(numIteration / timeConstant))
def mexicanhat(self, radius, dist):
return (1-(dist**2/radius**2)) * np.exp(-(dist**2)/(2*(radius**2)))
| true |
66f39aa0f7da929f6b74eb0678719c9f598de6e0
|
Python
|
dineshkumarkummara/my-basic-programs-in-java-and-python
|
/folders/python/instagram/90class.py
|
UTF-8
| 327 | 4.4375 | 4 |
[] |
no_license
|
#Creating a class in Python. In the example, the class has a single method called "talk",
# which prints a default greeting. Then, two objects (or instances) of that class are created,
# and "talk" is called on each of them.
class animal:
def talk(self):
print("i am an animal")
animal1=animal()
animal1.talk()
| true |
cf5cfb7497d9646be831c6f8d2f58598566dc853
|
Python
|
SRH-BDBA/movie-data
|
/aggregation.py
|
UTF-8
| 538 | 2.5625 | 3 |
[] |
no_license
|
import pymongo
import config
conn = config.MONGO_URL
client = pymongo.MongoClient(conn)
db = client["movies"]
collection1 = db.movies_collection
collection2 = db.budget_collection
collection3 = db.aggregated_collection
data = list(collection1.aggregate( [
{ "$lookup" : {
"from" : "budget_collection",
"localField" : "original_title",
"foreignField" : "title",
"as": "aggregate"
}
},
]))
collection3.insert_many(data)
print(f'{len(data)} is the number of movies inserted in the aggregated_collection')
| true |
b280a01ec97b2770213f75e29d67faf276d4e857
|
Python
|
mauromatsudo/brazilian-stocks-analyzer
|
/B3Analyzer/data/B3_list.py
|
UTF-8
| 4,196 | 3.0625 | 3 |
[
"Apache-2.0"
] |
permissive
|
'''
Author: Mauro Matsudo
This script uses the official B3 web site get the list of all thhe firm trade in brazilian stock market
'''
import openpyxl
import requests
from pandas import DataFrame
from zipfile import ZipFile
from io import BytesIO
from sys import exit
from os.path import exists
class Plan:
def __init__(self):
self._url = 'http://www.b3.com.br/lumis/portal/file/fileDownload.jsp?fileId=8AA8D0975A2D7918015A3C81693D4CA4'
def download_plan(self):
try:
request = requests.get(self._url)
except requests.exceptions.ConnectionError:
print('There is a problem with your connection to http://www.b3.com.br.')
if request.status_code is False:
print(
'An Error occurred, if evertything alright with your internet and proxy, so the worksheet is no longer available in '
'http://www.b3.com.br/pt_br/produtos-e-servicos/negociacao/renda-variavel/acoes/consultas/classificacao-setorial/')
exit(request.status_code)
file = ZipFile(BytesIO(request.content))
name = file.infolist()[0].filename
# The file name contain release date, so if the worksheet was previously downloaded and has the same date
# it isn't necessay to upgrade our data
if exists(name):
print("The excel file from B3 is the last release. There is no reason to download new version.\n"
"If there is any problem with your plan, please delete it and run the scrip again!")
else:
file.extractall()
return openpyxl.load_workbook(name)
def organize_plan(self, download_new=False):
if download_new == True:
plan = self.download_plan()
else:
plan = openpyxl.load_workbook("Setorial B3 03-03-2020 (português).xlsx")
# It'll select the first plan doesn't matter its name, so it b3 change it, there'll be no effect
sheet = plan[plan.sheetnames[0]]
# Nornally the column D is responsable to store the ticker, so it will be our reference
max_row_d = max((d.row for d in sheet['D'] if d.value is not None)) # Get the number of companies trade in B3
tickers = {}
for row in sheet.iter_rows(min_row=1, max_row= max_row_d, min_col=4 ,max_col=4): # iterating the rows containing the tickers code
current_row = row[0]
ticker = current_row.value
# Note every classification is taken based on the way they are organized at the plan
industry_cell = sheet.cell(row=current_row.row, column=1) # the instustry sector is stored in the first column
if industry_cell.value == 'SETOR ECONÔMICO':
# the general industry is defined bellow the 'SETOR ECONÔMICO' header, however that header his merged and occupies
# 2 rows, that's why there +2. Note that, until the next header, all the firms belongs to the same industry
industry = sheet.cell(row=(industry_cell.row+2), column=1).value
sub_industry_cell = sheet.cell(row = current_row.row, column=2) # the sub-instustry sector is stored in the second column
if current_row.row > 6 and sub_industry_cell.value is not None and sub_industry_cell.value != 'SUBSETOR':
sub_industry = sub_industry_cell.value
segment_cell = sheet.cell(row=current_row.row, column=3)
if segment_cell.row > 6 and segment_cell.value is not None and ticker is None:
segment = segment_cell.value
if ticker != None and (len(ticker) == 4):
row_addr = current_row.row
tickers[row_addr] = {'Ticker': ticker,
'Trade Name': sheet.cell(row=row_addr, column=3).value.strip(),
'Industry': industry.strip(),
'Sub-Industry': sub_industry.strip(),
'Segment': segment}
b3_df = DataFrame.from_dict(tickers, orient='index')
b3_df.to_excel("B3_list.xlsx", index=False)
if __name__ == "__main__":
plan = Plan()
plan.organize_plan()
| true |
ee4281029b847a8422580fe79cf4f0a31d92432b
|
Python
|
viniciusarruda/genetic-algorithm
|
/src/Image/lena_polygon.py
|
UTF-8
| 3,629 | 2.796875 | 3 |
[] |
no_license
|
import time
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import polygon, set_color
from skimage.io import imread
from skimage.measure import compare_ssim
from skimage import img_as_float
from random import randint, random, uniform
# center, radius, color
# [x,y , r, r,g,b]
alpha = None
original = None
mx = None
my = None
img = None
def print_pop(population):
for p in population:
print p
print '\n\n'
def individual():
n = randint(3, 5)
return [np.array([uniform(0, mx) for _ in xrange(n)]), np.array([uniform(0, my) for _ in xrange(n)]), random(), random(), random()]
def fitness(individual):
tmp = img.copy()
rr, cc = polygon(individual[0], individual[1], original.shape)
set_color(tmp, (rr, cc), (individual[2], individual[3], individual[4]), alpha)
return (1.0 - compare_ssim(original, tmp, multichannel=True))
def mutate_x(value):
value *= uniform(0.9, 1.1)
return mx if value > mx else value
def mutate_y(value):
value *= uniform(0.9, 1.1)
return mx if value > mx else value
def mutate_grow(x, y):
idx = randint(0, len(x))
return np.insert(x, idx, uniform(0, mx)), np.insert(y, idx, uniform(0, my))
def mutate_color(r, g, b):
r *= uniform(0.9, 1.1)
g *= uniform(0.9, 1.1)
b *= uniform(0.9, 1.1)
r = 1.0 if r > 1.0 else r
g = 1.0 if g > 1.0 else g
b = 1.0 if b > 1.0 else b
return r, g, b
def crossover(male, female):
return [[male[0].copy()] + [male[1].copy()] + female[2:]] + [[female[0].copy()] + [female[1].copy()] + male[2:]]
def genetic_algorithm(n_individuals=500, figs=100, epochs=50, selection_rate=0.05, crossover_rate=0.6, mutation_rate=0.01):
retain = int(n_individuals * (1.0 - crossover_rate))
for _ in xrange(figs):
population = map(lambda _: individual(), xrange(n_individuals))
for _ in xrange(epochs):
population = list(zip(*sorted(zip(map(fitness, population), population), key=lambda t: t[0]))[1])
parents = population[:retain] + [i for i in population[retain:] if selection_rate > random()]
n_parents = len(parents)
n_children = n_individuals - n_parents
children = []
while len(children) < n_children:
male, female = randint(0, n_parents-1), randint(0, n_parents-1)
if male != female:
children.extend(crossover(parents[male], parents[female]))
population = parents + children
for i in population:
for g in xrange(len(i[0])):
if mutation_rate > random():
i[0][g] = mutate_x(i[0][g])
for i in population:
for g in xrange(len(i[1])):
if mutation_rate > random():
i[1][g] = mutate_y(i[1][g])
for i in population:
if mutation_rate > random():
i[0], i[1] = mutate_grow(i[0], i[1])
for i in population:
if mutation_rate > random():
i[2], i[3], i[4] = mutate_color(i[2], i[3], i[4])
best = min(zip(map(fitness, population), population), key=lambda t: t[0])
print "Fitness of adding circle: ", best[0]
rr, cc = polygon(best[1][0], best[1][1], original.shape)
set_color(img, (rr, cc), (best[1][2], best[1][3], best[1][4]), alpha)
def main():
global alpha, original, mx, my, img
alpha = 0.3
original = img_as_float(imread('lena.png'))
mx, my, _ = original.shape
img = np.zeros(original.shape, dtype=np.double)
start = time.clock()
genetic_algorithm()
print "Time elapsed: ", time.clock() - start
print "Final fitness: ", (1.0 - compare_ssim(original, img, multichannel=True))
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(6, 3))
ax1.imshow(original)
ax1.set_title('Original')
ax2.imshow(img)
ax2.set_title('Generated')
plt.show()
if __name__ == "__main__":
main()
| true |
f09773d08e6a70777646e0c7218780c701ecaed7
|
Python
|
bundgus/py_curate_json
|
/xml_to_csv_pipeline.py
|
UTF-8
| 2,837 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
import xml.etree.ElementTree as ET
from xmljson import yahoo as jencoder
from py_curate_json import curate_json_core as cjc
from py_curate_json.flatten_denorm_json import flatten_denorm_json
import json
import csv
def fixup_element_prefixes(elem, uri_map, memo):
def fixup(name):
try:
return memo[name]
except KeyError:
if name[0] != "{":
return
uri, tag = name[1:].split("}")
if uri in uri_map:
new_name = uri_map[uri] + ":" + tag
memo[name] = new_name
return new_name
# fix element name
name = fixup(elem.tag)
if name:
elem.tag = name
# fix attribute names
for key, value in elem.items():
name = fixup(key)
if name:
elem.set(name, value)
del elem.attrib[key]
def set_prefixes(elem, prefix_map):
# check if this is a tree wrapper
if not ET.iselement(elem):
elem = elem.getroot()
# build uri map and add to root element
uri_map = {}
for prefix, uri in prefix_map.items():
uri_map[uri] = prefix
elem.set("xmlns:" + prefix, uri)
# fixup all elements in the tree
memo = {}
for elem in elem.getiterator():
fixup_element_prefixes(elem, uri_map, memo)
def xml_to_json(xml_string):
#xml_data = ET.parse('sample_json/EUOZJB.xml').getroot()
root = ET.fromstring(xml_string)
ns = {'asds4_0': 'http://services.sabre.com/res/asds/v4_0',
'stl15': 'http://webservices.sabre.com/pnrbuilder/v1_15',
'ns18': 'http://services.sabre.com/res/or/v1_8'}
set_prefixes(root, ns)
# Convert to JSON
return jencoder.data(root)
# xml file with one complete xml record per line
input_xml_file_name = 'sample_json/EUOZJB.xml'
# Get Flattened Keys From All Records
cj = cjc.CurateJson()
with open(input_xml_file_name, encoding='utf-8-sig') as xml_file:
for xml_row_string in xml_file:
# convert xml to json
json_row = xml_to_json(xml_row_string)
# curate json (get flattened keys)
cj.curate_json(json.dumps(json_row))
# collect flattened keys
flattened_keys = cj.get_master_dict()
# Flatten and Denormalize All Records to CSV
with open(r'output/EUOZJB_pipeline.csv', 'w') as csv_file:
w = csv.DictWriter(csv_file, sorted(flattened_keys.keys()), lineterminator='\n', extrasaction='ignore')
w.writeheader()
with open(input_xml_file_name, encoding='utf-8-sig') as xml_file:
for xml_row_string in xml_file:
# convert xml to json
json_row = xml_to_json(xml_row_string)
# denormalize and flatten
denormrows = flatten_denorm_json(json.dumps(json_row), flattened_keys)
if denormrows is not None:
w.writerows(denormrows)
| true |
b6b961e924bc7a85a81716bedfadfa9068c596d4
|
Python
|
JamesG-Projects/Misc_Projects
|
/Python/lab/lab11.py
|
UTF-8
| 2,114 | 3.296875 | 3 |
[] |
no_license
|
"""Lab11.py: Coroutines"""
__author__ = "James Garrett"
__credits__ = [""]
__email__ = "garretjb@mail.uc.edu"
#####################
# Lab11 Co-Routines #
#####################
def supplier(ingredients, chef):
for ingredient in ingredients:
try:
chef.send(ingredient)
except StopIteration as e:
print(e)
raise
chef.close()
def customer():
served = False
while True:
try:
dish = yield
print('Yum! Customer got a {}!'.format(dish))
served = True
except GeneratorExit:
if not served:
print('Customer never got served.')
raise
def chef(customers, dishes):
"""
>>> cust = customer()
>>> next(cust)
>>> c = chef({cust: 'hotdog'}, {'hotdog': ['bun', 'hotdog']})
>>> next(c)
>>> supplier(['bun', 'hotdog'], c)
Yum! Customer got a hotdog!
Chef went home.
>>> cust = customer()
>>> next(cust)
>>> c = chef({cust: 'hotdog'}, {'hotdog': ['bun', 'hotdog']})
>>> next(c)
>>> supplier(['bun'], c)
Chef went home.
Customer never got served.
>>> cust = customer()
>>> next(cust)
>>> c = chef({cust: 'hotdog'}, {'hotdog': ['bun', 'hotdog']})
>>> next(c)
>>> supplier(['bun', 'hotdog', 'mustard'], c)
Yum! Customer got a hotdog!
No one left to serve!
"""
remaining_customers = dict(customers)
ingredients = set()
while True:
try:
ingredient = yield
except GeneratorExit:
print('Chef went home.')
for customer in customers:
customer.close()
raise
ingredients.add(ingredient)
if not remaining_customers:
raise StopIteration('No one left to serve!')
for customer, dish_name in dict(remaining_customers).items():
if not set(dishes[dish_name]) - ingredients:
customer.send(dish_name)
del remaining_customers[customer]
#Run
def _test():
import doctest
doctest.testmod(verbose=True)
if __name__ == '__main__':
_test()
| true |
ba062c5e5a753908383a0c2497ea1656ceb94f53
|
Python
|
reikoreinup/AdventOfCode2020
|
/Day12/Ex2.py
|
UTF-8
| 1,237 | 3.640625 | 4 |
[] |
no_license
|
ship_pos, wp_pos = (0, 0), (1, 10)
def move(command, amount, current_pos):
if command == 'N':
return current_pos[0] + amount, current_pos[1]
elif command == 'E':
return current_pos[0], current_pos[1] + amount
elif command == 'S':
return current_pos[0] - amount, current_pos[1]
elif command == 'W':
return current_pos[0], current_pos[1] - amount
for textInput in open('Input.txt', 'r').readlines():
command = textInput[0]
amount = int(textInput[1:])
if command == 'L' or command == 'R':
turns = amount // 90
if command == 'L' and turns == 1 or command == 'R' and turns == 3:
wp_pos = wp_pos[1], -wp_pos[0]
elif command == 'L' and turns == 3 or command == 'R' and turns == 1:
wp_pos = -wp_pos[1], wp_pos[0]
elif turns == 2:
wp_pos = -wp_pos[0], -wp_pos[1]
elif command == 'F':
ship_pos = move("E" if wp_pos[1] >= 0 else "W", abs(wp_pos[1] * amount), ship_pos)
ship_pos = move("N" if wp_pos[0] >= 0 else "S", abs(wp_pos[0] * amount), ship_pos)
else:
wp_pos = move(command, amount, wp_pos)
print(f'Manhattan value for position {ship_pos}: {abs(ship_pos[0]) + abs(ship_pos[1])}')
| true |
6d1418eeee22c94150d8d773fdd3b95033ff37ce
|
Python
|
rkapdi/SENG265
|
/Assignments/assign3/.svn/text-base/s265fmt2.py.svn-base
|
UTF-8
| 2,746 | 2.859375 | 3 |
[] |
no_license
|
#!/usr/bin/python
import os
import sys
import optparse
import re
from formatting import seng265_formatter
def main():
s = """?pgwdth 50
?mrgn 15
Call me Ishmael. Some years ago--never mind how long precisely--having
little or no money in my purse, and nothing particular to interest me on
?mrgn +5
shore, I thought I would sail about a little and see the watery part of
the world. It is a way I have of driving off the spleen and regulating
the circulation. Whenever I find myself growing grim about the mouth;
?mrgn +5
whenever it is a damp, drizzly November in my soul; whenever I find
myself involuntarily pausing before coffin warehouses, and bringing up
?mrgn +5
the rear of every funeral I meet; and especially whenever my hypos get
such an upper hand of me, that it requires a strong moral principle to
?mrgn +5
prevent me from deliberately stepping into the street, and methodically
knocking people's hats off--then, I account it high time to get to sea
as soon as I can. This is my substitute for pistol and ball. With a
?mrgn +5
philosophical flourish Cato throws himself upon his sword; I quietly
?mrgn +5
take to the ship. There is nothing surprising in this. If they but knew
it, almost all men in their degree, some time or other, cherish very
nearly the same feelings towards the ocean with me.
There now is your insular city of the Manhattoes, belted round by
?mrgn +5
wharves as Indian isles by coral reefs--commerce surrounds it with
her surf. Right and left, the streets take you waterward. Its extreme
downtown is the battery, where that noble mole is washed by waves, and
?mrgn +5
cooled by breezes, which a few hours previous were out of sight of land.
Look at the crowds of water-gazers there."""
fp_exist = 0 #variable for checking if tempwrite.txt exists
fp = ""
if(len(sys.argv)>1):
fp = sys.argv[1]
if(re.match(r"\.txt$",fp)):
pass
else:
process_stdin()
fp_exist = 1
fp = "tempwrite.txt"
if(check_if_empty(fp)):
fp = s.splitlines()
fp_exist = 0
f = seng265_formatter(fp)
f = f.get_lines()
for x in f:
print x,
if(fp_exist!=0):
os.remove("tempwrite.txt")
fp_exist=0
def process_stdin():
"""No file name was input, so have to accept text from console"""
fpout = open("tempwrite.txt",'w')
x = raw_input()
while(x != "-1"):
fpout.write(x+"\n")
x = raw_input()
fpout.close()
return None
def check_if_empty(fp):
filename = open(fp,'r')
filename.seek(0)
first_char = filename.read(1)
if not first_char:
return True
else:
return False
if __name__ == "__main__":
main()
| true |
857479fbf7bc61d5abb25e2bcc1a93bf2a32521a
|
Python
|
frankurcrazy/SimpleFileTransfer
|
/SimpleFileTransfer/base.py
|
UTF-8
| 1,443 | 2.546875 | 3 |
[] |
no_license
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import pickle
import asyncio
import struct
from .message import *
class SimpleFileTransferBase(asyncio.Protocol):
def __init__(self):
self.rcvbuf = bytearray()
self.pause = False
def message_received(self, msg):
raise NotImplementedError
def pause_writing(self):
self.pause = True
def resume_writing(self):
self.pause = False
def decode_msgs(self):
while len(self.rcvbuf) > 4:
view = memoryview(self.rcvbuf)
msg_len, = struct.unpack("!I", view[:4])
msg = None
if len(view[4:]) >= msg_len:
msg = pickle.loads(view[4:4+msg_len])
self.message_received(msg)
del view
if msg: del self.rcvbuf[:4+msg_len]
else: break
def data_received(self, data):
self.rcvbuf += data
self.decode_msgs()
def send_message(self, msg):
raw = pickle.dumps(msg)
raw = struct.pack("!I", len(raw)) + raw
self.transport.write(raw)
def send_error(self, error_msg):
err = {
SimpleFileTransferMessageField.ACTION: \
SimpleFileTransferActionType.ERROR,
SimpleFileTransferMessageField.MSG: \
error_msg,
}
self.send_message(err)
| true |
6d8f47465ee0320241d7df2bd94d935484b5e990
|
Python
|
caranuial/ud036_StarterCode
|
/media.py
|
UTF-8
| 651 | 3.109375 | 3 |
[] |
no_license
|
import webbrowser
# Movie Class that supports required functionality
class Movie(object):
# This is the Constructor that initializes the object in memory
def __init__(self,
movie_title,
story_line,
poster_image_url,
trailer_youtube_id):
# Instace Variables
self.title = movie_title
self.story_line = story_line
self.poster_image_url = poster_image_url
self.trailer_youtube_url = trailer_youtube_id
def show_trailer(self):
# Function to open a trailer
webbrowser.open(self.trailer_youtube_id)
| true |
ed29c2579dabe47de5065cea960d50a6d86a2302
|
Python
|
MiaZhang0/Learning
|
/QuestionTypes/demo05.py
|
UTF-8
| 341 | 4.1875 | 4 |
[] |
no_license
|
#分别统计列表[True,False,0,1,2]中True,False,0,1,2的元素个数,发现了什么?
list = [True,False,0,1,2]
a = list.count(True)
b = list.count(False)
c = list.count(0)
d = list.count(1)
e = list.count(2)
print(a,b,c,d,e)
#结果为2,2,2,2,1
#count()不区分True和1,False和0,但None、‘’不会被视为False
| true |
a64481033a16a2b95fbd761b61fdb46e8ef25fb5
|
Python
|
KVS-CODE/area_module
|
/unsolved q no_04.py
|
UTF-8
| 225 | 3.453125 | 3 |
[] |
no_license
|
#unsolved q no: 04
def star(n):
if n==0:
return print("please enter any natural number ")
else:
return "*"*n,"\n",star(n-1)
#ain inputs
a=int(input('enter a positive integer'))
print(star(a),sep='\n')
| true |
8adacf078fed3137eeaed70412c51dab4aac59d1
|
Python
|
JiguangLi/quasar_variability
|
/z_luminosity_plot.py
|
UTF-8
| 3,657 | 2.515625 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 8 14:11:49 2017
@author: jiguangli
"""
from astropy.io import fits
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from astropy.cosmology import FlatLambdaCDM
def compute_luminosity(sdss_name,sdss_magz_dict,cosmo):
z=sdss_magz_dict[sdss_name][1]
radio_l_watt=sdss_magz_dict[sdss_name][0]*(10**(-29))
l_distance=cosmo.luminosity_distance(z)
l_distance_meters=l_distance.value*3.085677758*(10**22)
l=4*np.pi*radio_l_watt*(l_distance_meters**2)
return l
#Store SDSS names of two populations into two lists
loud_stats=pd.read_csv('better_rl_stats.csv')
#quiet_stats=pd.read_csv('normalized_large_quiet_stats.csv')
#upper_limit=np.percentile(quiet_stats['Chi-square'],90)
#quiet_stats=quiet_stats[quiet_stats['Chi-square']<upper_limit]
#==============================================================================
# loud_id=loud_stats['Unnamed: 0'].tolist()
# loud_id=[x[:-4] for x in loud_id]
# quiet_id=quiet_stats['Unnamed: 0'].tolist()
# quiet_id=[y[:-4] for y in quiet_id]
#
# look_up=pd.read_csv('lightcurve_lookup.csv')
# #loud_table=look_up[look_up['id'].isin(loud_id)]
# #loud_sdss_names=loud_table['SDSS_NAME'].tolist()
# ids=look_up['id'].tolist()
# sdss_names=look_up['SDSS_NAME'].tolist()
# id_sdss_dict=dict(zip(ids,sdss_names))
# loud_sdss_names=[id_sdss_dict[int(hehe)] for hehe in loud_id]
# loud_stats['SDSS_NAME']=pd.Series(loud_sdss_names)
# quiet_sdss_names=[id_sdss_dict[int(hinhin)] for hinhin in quiet_id]
# quiet_stats['SDSS_NAME']=pd.Series(quiet_sdss_names)
#
# quiet_table=look_up[look_up['id'].isin(quiet_id)]
# quiet_sdss_names=quiet_table['SDSS_NAME'].tolist()
#
#
# #build a dictionary:
# #key: SDSS_name Values: (I magnitude,Z)
# input_file = fits.open('DR12Q(type1).fits')
# tbdata = input_file[1].data
# mag_arrays=tbdata.field('FIRST_FLUX')
# SDSS_names=tbdata.field('SDSS_NAME')
# red_shifts=tbdata.field('Z_VI')
# mag_z_tuple=zip(mag_arrays,red_shifts)
# sdss_magz_dict=dict(zip(SDSS_names,mag_z_tuple))
#
# #compute luminosity
# cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
#==============================================================================
#==============================================================================
# quiet_l_values=[compute_luminosity(x,sdss_magz_dict,cosmo)
# for x in quiet_sdss_names]
#==============================================================================
# #==============================================================================
# loud_l_values=[compute_luminosity(y,sdss_magz_dict,cosmo)
# for y in loud_sdss_names]
# loud_stats['Luminosity']=pd.Series(loud_l_values)
# #quiet_z_values=np.array([sdss_magz_dict[zq][1] for zq in quiet_sdss_names ])
# loud_z_values=np.array([sdss_magz_dict[zl][1] for zl in loud_sdss_names ])
# loud_stats['Z']=pd.Series(loud_z_values)
#==============================================================================
#==============================================================================
# quiet_z_values=np.array([sdss_magz_dict[zl][1] for zl in quiet_sdss_names ])
# quiet_stats['Z']=pd.Series(quiet_z_values)
#==============================================================================
loud_z_values=loud_stats['Z'].tolist()
loud_l_values=loud_stats['Luminosity'].tolist()
#plot Z-l
fig=plt.figure()
plt.xlabel('Z')
plt.ylabel('Luminosity (W)')
#plt.scatter(quiet_z_values,quiet_l_values,s=50, c='blue',label='radio-quiet')
plt.scatter(loud_z_values,loud_l_values,s=21, c='red',marker='o',label='radio-loud')
#ax=fig.gca()
#ax.set_ylim(10**4, 10**8)
plt.yscale('log')
plt.legend()
| true |
cd0e7a6e182def35292f2fdf0a1063989e461229
|
Python
|
andreaslundin47/Advent-Of-Code-2020
|
/day22/day22.py
|
UTF-8
| 2,162 | 3.765625 | 4 |
[] |
no_license
|
from queue import deque
with open('input', 'r') as f:
p1, p2 = f.read().strip().split('\n\n')
deck_one = [int(v) for v in p1.split('\n')[1:]]
deck_two = [int(v) for v in p2.split('\n')[1:]]
def combat(deck_1, deck_2):
mine_hand = deque(deck_1)
crab_hand = deque(deck_2)
turns = 0
while mine_hand and crab_hand:
turns += 1
my_card, crab_card = mine_hand.popleft(), crab_hand.popleft()
if my_card > crab_card:
mine_hand.append(my_card)
mine_hand.append(crab_card)
elif crab_card > my_card:
crab_hand.append(crab_card)
crab_hand.append(my_card)
if mine_hand:
return 1, list(mine_hand)
else:
return 2, list(crab_hand)
def recursive_combat(deck1, deck2):
# Make queues from the decks of cards
p1_deck = deque(deck1)
p2_deck = deque(deck2)
deck_record = set()
while p1_deck and p2_deck:
# Check record
record = ( tuple(p1_deck), tuple(p2_deck) )
if record in deck_record:
return 1, []
else:
deck_record.add( record )
# Pop off top!
top_1, top_2 = p1_deck.popleft(), p2_deck.popleft()
# Determine the winner!
if top_1 <= len(p1_deck) and top_2 <= len(p2_deck):
winner, _ = recursive_combat(list(p1_deck)[:top_1], list(p2_deck)[:top_2])
else:
winner = 1 if top_1 > top_2 else 2
# Add the two card to the bottom of the winner's deck!
if winner == 1:
p1_deck.append(top_1)
p1_deck.append(top_2)
else:
p2_deck.append(top_2)
p2_deck.append(top_1)
# One deck is now empty
if p1_deck:
return 1, list(p1_deck)
else:
return 2, list(p2_deck)
def deck_score(deck):
return sum(idx * card for idx, card in enumerate(reversed(deck), start=1))
# Part 1
winner, deck = combat(deck_one, deck_two)
score = deck_score(deck)
print(f"Part 1. Winner's Score: {score}")
# Part 2
winner, deck = recursive_combat(deck_one, deck_two)
score = deck_score(deck)
print(f"Part 2. Winner's Score: {score}")
| true |
2eb1bc151b59ae8a66511ca2bf78231492e2a1fe
|
Python
|
onitonitonito/py_police
|
/py_police.py
|
UTF-8
| 3,071 | 3.046875 | 3 |
[] |
no_license
|
"""-------------------------
# 경찰차 애니매이션 - 총 8장의 스프라이트
# 오브젝트 딕트와 리스트가 막 뒤죽박죽 됬는데 .. 일단은 놔두고
# 천천히 리펙토링을 해야겠다~ 지금은 여기서 끝!
#
#\n\n\n"""
print(__doc__)
import sys
import time
from asset.config import * # 따로 저장한 변수를 불러온다.
from asset.main import * # 따로 저장한 변수를 불러온다.
player = set_obj('player', DESTIN_DIR + 'car_top.png', rotate=90)
enemy = set_obj('enemy', DESTIN_DIR + 'kr_police_0.png', rotate=0)
""" # 애니메이션을 불러온다.. 이게 좋은방법은 아니지만; 일단 한다. """
# 좋은 방법은 나중에 생각한다.. 리팩터링은 나중에 한가할 때, 인터넷 검색!
p_rotate = 45
pcars = {
'pcar_0': set_obj('enemy', DESTIN_DIR + 'kr_police_0.png', p_rotate),
'pcar_1': set_obj('enemy', DESTIN_DIR + 'kr_police_1.png', p_rotate),
'pcar_2': set_obj('enemy', DESTIN_DIR + 'kr_police_2.png', p_rotate),
'pcar_3': set_obj('enemy', DESTIN_DIR + 'kr_police_3.png', p_rotate),
'pcar_4': set_obj('enemy', DESTIN_DIR + 'kr_police_4.png', p_rotate),
'pcar_5': set_obj('enemy', DESTIN_DIR + 'kr_police_5.png', p_rotate),
'pcar_6': set_obj('enemy', DESTIN_DIR + 'kr_police_6.png', p_rotate),
'pcar_7': set_obj('enemy', DESTIN_DIR + 'kr_police_7.png', p_rotate),
}
e_rotate = 0
ecars = {
'ecar_0': set_obj('enemy', DESTIN_DIR + 'jp_police_0.png', e_rotate),
'ecar_1': set_obj('enemy', DESTIN_DIR + 'jp_police_1.png', e_rotate),
'ecar_2': set_obj('enemy', DESTIN_DIR + 'jp_police_2.png', e_rotate),
'ecar_3': set_obj('enemy', DESTIN_DIR + 'jp_police_3.png', e_rotate),
'ecar_4': set_obj('enemy', DESTIN_DIR + 'jp_police_4.png', e_rotate),
'ecar_5': set_obj('enemy', DESTIN_DIR + 'jp_police_5.png', e_rotate),
'ecar_6': set_obj('enemy', DESTIN_DIR + 'jp_police_6.png', e_rotate),
'ecar_7': set_obj('enemy', DESTIN_DIR + 'jp_police_7.png', e_rotate),
}
if __name__ == '__main__':
ongame = True # 루프를 빠져나가기 위한 옵션
anim = 0 # 아니메 스프라이트 8장 카운트를 위한 숫자
while ongame:
SCREEN.fill(BLACK)
draw_socre(anim)
draw_game_over()
draw_object(player, x, y)
# 아니메 카운터를 위한 숫자
# if anim < 7:
# anim += 1
# else:
# anim = 0
anim += 1 if anim < 7 else -7
# 패드폭 안에 있으면 마이너스 해서 리턴한다.
if ENEMY_WIDTH < EPOS_X < PAD_WIDTH - ENEMY_WIDTH:
EPOS_X -= EPOS_MOV
else:
EPOS_X = PAD_WIDTH - (1.5 * ENEMY_WIDTH)
# 경찰차1,2,3 애니매이션
draw_object(ecars['ecar_' + str(anim)], EPOS_X, EPOS_Y)
[draw_object(
pcars['pcar_' + str(anim)],
xi,
400,) for xi in range(100, 350, 35)]
pygame.display.update()
FPS_CLK.tick(FPS)
time.sleep(0.1)
| true |
37c936ae1e22170c8ae7ed44314bfee04d6671f1
|
Python
|
wnagy/pymframe
|
/WEB-INF/mvc/domain/lovdomain.py
|
UTF-8
| 2,664 | 2.640625 | 3 |
[
"Apache-2.0"
] |
permissive
|
# -*- coding: iso-8859-15 -*-
from dbaccess.core import *
class LovDomain(Domain) :
lovID = None
lovClass = None
lovKey = None
lovValue = None
lovFlag1 = None
lovFlag2 = None
lovFlag3 = None
lovFlag4 = None
lovRemark = None
meta = {
'tablename':'lov',
'primarykey':'lovID',
'fields':{
'lovID' : {'dbfield':'lovID', 'type':'Integer'},
'lovClass' : {'dbfield':'lovClass', 'type':'String'},
'lovKey' : {'dbfield':'lovKey', 'type':'String'},
'lovValue' : {'dbfield':'lovValue', 'type':'String'},
'lovFlag1' : {'dbfield':'lovFlag1', 'type':'String'},
'lovFlag2' : {'dbfield':'lovFlag2', 'type':'String'},
'lovFlag3' : {'dbfield':'lovFlag3', 'type':'String'},
'lovFlag4' : {'dbfield':'lovFlag4', 'type':'String'},
'lovRemark' : {'dbfield':'lovRemark', 'type':'String'}
}
}
def getDatasourceClass(self,addempty=None):
retval = list()
where = "lovClass='CLASS'"
if addempty != None:
retval.append(['0',addempty])
for lov in self.eachDomain(where=where,orderby='lovKey'):
retval.append([lov.lovKey,lov.lovKey])
return retval
def truncate(self,value,size):
if len(value) > size :
value = value[:size]
while ord(value[-1]) > 127: value = value[:-1]
value += '…'
return value
def getDatasource(self,theClass,addempty=None,orderby='lovKey',truncate=None):
retval = list()
where = "lovClass='{0}'".format(theClass)
if addempty != None:
retval.append(['',addempty])
for lov in self.eachDomain(where=where,orderby=orderby):
title = None
if truncate is not None:
text = self.truncate(lov.lovValue,truncate)
title = lov.lovValue
else:
text = lov.lovValue
option = {
'value':lov.lovKey,
'text':text,
'title':title
}
retval.append(option)
return retval
def getLovValue (self,lovClass,lovKey):
"""
Liefert ueber eine Klasse und Key den Inhalt.
@param lovClass Klasse
@param lovKey Schluessel
@return Wert, oder None wenn nicht gefunden.
"""
lov = LovDomain(self.db)
where = "lovClass='{0}' and lovKey='{1}'".format(lovClass,lovKey)
lov.get(where=where)
if lov.isOk:
return lov.lovValue
else:
return None
| true |
51496e34b0fccd109f0316e734be2a7845f9d35e
|
Python
|
bsk17/PYTHONTRAINING1
|
/GUI/guidemo2.py
|
UTF-8
| 907 | 3.328125 | 3 |
[] |
no_license
|
from tkinter import *
from tkinter import messagebox as mb
def register():
name = e1.get()
password = e2.get()
print("NAME = ", name)
print("PASSSWORD = ", password)
mb.showinfo("DATA", "Welcome "+name+" Your Password is"+password)
window = Tk()
window.geometry("300x400")
window.title("First Page")
# creating our widgets
l1 = Label(window, text="Welcome to Python Project", bg="#FF512F")
l2 = Label(window, text="Enter Name = ", bg="#EA384D")
l3 = Label(window, text="Enter Pass = ", bg="#EA384D")
e1 = Entry(window)
e2 = Entry(window, show="*") # to hide the password
b1 = Button(window, text="Register", bg="#DA22FF", command=register)
b2 = Button(window, text="Cancel", bg="#DA22FF")
# placing our widgets
l1.place(x=60, y=0)
l2.place(x=10, y=30)
e1.place(x=110, y=30)
l3.place(x=10, y=60)
e2.place(x=110, y=60)
b1.place(x=40, y= 90)
b2.place(x=200, y=90)
window.mainloop()
| true |
25f34b1c0777b9123c28adeb0ad2a41f319192ad
|
Python
|
abbyssoul/mood-prob
|
/emotions/emotion.py
|
UTF-8
| 374 | 2.734375 | 3 |
[] |
no_license
|
import json
class Emotion(object):
""" Representation of a single emotion
"""
def __init__(self, desc, dim, id=-1):
self.description = desc
self.dim = dim
self.id = id
def __str__(self):
return self.description
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
| true |
d588701393ee593a9805d6e8c2a6ae608d192853
|
Python
|
nuria/study
|
/advent/2021/advent5.py
|
UTF-8
| 2,608 | 3.15625 | 3 |
[] |
no_license
|
#!usr/local/bin
import sys
lines = list(open(sys.argv[1]))
lines_i = []
x_max =0
y_max = 0
# print matrix concisely for debugging
def print_matrix(G):
txt = ''
for i in range(0, len(G[0])):
for j in range(0, len(G[i])):
if G[i][j] == 0:
txt = txt + '.'
else:
txt = txt + str(G[i][j])
txt = txt +"\n"
return txt
###### READ DATA ########
for l in lines:
p1, separator, p2 = l.split()
x1, y1 = p1.split(',')
x2, y2 = p2.split(',')
x1 = int(x1)
x2 = int(x2)
y1 = int(y1)
y2 = int(y2)
points = []
if x1 == x2 or y1 == y2:
# we need max x max y to see dimensions of grid
tmp_max = max(x1, x2)
if tmp_max> x_max:
x_max = tmp_max
tmp_max = max(y1,y2)
if tmp_max > y_max:
y_max = tmp_max
# calculate points in this line
if x1 == x2:
# only increment y
delta = abs(y2-y1)
# same point
if delta == 0:
#lines_i.append([(x1, y1)])
continue
if y2 > y1:
for i in range(y1, y2+1):
points.append((x1,i))
elif y2 < y1:
# y1 > y2
for i in range(y2, y1 +1):
points.append((x1, i))
lines_i.append(points)
elif y1 == y2:
# only increment x
delta = abs(x2-x1)
# same point
if delta == 0:
#lines_i.append([(x1, y1)])
continue
if x2 > x1:
for i in range(x1, x2+1):
points.append((i,y1))
elif x2 < x1:
# x1 > x2
for i in range(x2, x1 +1):
points.append((i, y1))
lines_i.append(points)
# this array now holds every point for every line as a element
#print lines_i
# GRID
row = [0] * (x_max +1)
G = []
for k in range(0, y_max+1):
G.append(row[:])
print "x_max:{0}, y_max:{1}".format(x_max, y_max)
print print_matrix(G)
# now loop through array and build grid
overlap = 0
for line in lines_i:
for p in line:
x = p[0]
y = p[1]
#print"{0}, {1}".format (x,y)
G[y][x] = G[y][x] + 1
for i in range(0, len(G[0])):
for j in range(0, len(G[i])):
if G[i][j] >=2:
overlap = overlap +1
print print_matrix(G)
print "overlap"
print overlap
| true |
0b9ca43606893b5ba3b170f3f412051da3f4dac1
|
Python
|
lizardnoises/daily-coding-problem
|
/033_running_median/running_median.py
|
UTF-8
| 2,438 | 4.40625 | 4 |
[] |
no_license
|
__author__ = "Sean Moore"
"""
Problem:
Compute the running median of a sequence of numbers. That is, given a stream of
numbers, print out the median of the list so far on each new element.
Recall that the median of an even-numbered list is the average of the two
middle numbers.
For example, given the sequence [2, 1, 5, 7, 2, 0, 5], your algorithm should
print out:
2
1.5
2
3.5
2
2
2
"""
"""
To calculate the median of numbers seen so far, those numbers need to be
collected. That means we need to assume that we have enough space to store the
entire stream in the most general case. This solution makes that assumption.
If we maintain a sorted aggregation on the stream, then the median can be
calculated just by using the middle one or two numbers like this:
median = middle value if odd
average of middle values if even.
So ideally, we need to be able to access the two middle elements efficently
each time an element is added to the collection, and also keep the collection
sorted. Using insertion sort on an array could work, but the runtime complexity
would be very poor with O(n^2). A more efficient approach could use two heaps,
one max heap of values smaller than the median and one min heap of values
equal to or larger than the median. In the odd case, pick the top element of
the min heap. In the even case, average the top elements of both heaps. Keep
the heaps balanced by poping and pushing to ensure the middle elements stay
on top. Insertion on a heap is O(log n).
"""
import heapq
def running_median(number_stream):
left = [] # numbers less than the running median
right = [] # numbers greater or equal to the running median
for x in number_stream:
# add the number
if len(right) == 0 or x >= right[0]:
heapq.heappush(right, x)
else:
heapq.heappush(left, -x)
# balance the heaps
if len(left) > len(right):
heapq.heappush(right, -heapq.heappop(left))
elif len(right) > len(left) + 1:
heapq.heappush(left, -heapq.heappop(right))
# calculate the median
if (len(left) + len(right)) % 2 == 0:
median = (-left[0] + right[0]) / 2.0
else:
median = right[0]
yield median
def list_medians(numbers):
return [median for median in running_median(numbers)]
def print_medians(numbers):
for median in running_median(numbers):
print(median)
| true |
167237e4708d99f3344968f035e2c418ba4ab060
|
Python
|
ccas08/prueba
|
/list.py
|
UTF-8
| 734 | 3.734375 | 4 |
[] |
no_license
|
"""def run():
squares = []
for i in range(1, 101):
if i % 3 != 0:
squares.append(i ** 2)
print(squares)""" # funcion normal
def eleva_al_2(i):
return i ** 2
def run():
squares = [i ** 2 for i in range(1, 101) if i % 3 != 0]
ones = [1 for i in range(5)]
# [1, 1, 1, 1, 1]
squares2 = [eleva_al_2(i) for i in range(5)]
# [0, 1, 4, 9, 16]
print(squares, " ", ones, " ", squares2)
def eleva_al_2(i):
return i ** 2
cuadrados = [eleva_al_2(i) for i in range(5)]
# [0, 1, 4, 9, 16]
if __name__ == "__main__":
run()
"""reto
def run():
my_dict = {i: round(i ** 0.5, 2) for i in range(1, 1001)}
print(my_dict)
if __name__ == "__main__":
run()
"""
| true |
ee05d48c995291cbb25cabc54df0ea4f67e624b1
|
Python
|
tjuxiaoyi/qqZoneModeSpider
|
/qzoneMoodSpider.py
|
UTF-8
| 6,278 | 2.640625 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 13 13:55:57 2019
@author: xy
"""
#以下为需要使用的库
from selenium.webdriver.support.ui import WebDriverWait as WebWait
from selenium.webdriver.chrome import options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains as AC
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import time
from bs4 import BeautifulSoup
class moodSpider():
#定义QQ账号密码
user = '670127565'
passwd = '*********'
def __init__(self):
#初始化,打开浏览器并最大化,以下两句话为设置无头浏览器
#options = options.Options()
#options.add_argument('--headless')
self.driver = webdriver.Chrome(executable_path='C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe')#,chrome_options=options)
self.driver.maximize_window()
def get_to_mood_page(self):
#本函数用于登录QQ空间并跳转至说说界面
driver = self.driver
#访问QQ空间
driver.get('https://i.qq.com')
#print('geted')
#输入账户密码的frame不是默认的frame 所以需要更改frame 不然找不到元素
driver.switch_to.frame('login_frame')
#print('switched')
#显示等待id为switcher_plogin的按钮,该按钮用于更改登录方式为账号密码登录
switch = WebWait(driver,5).until(EC.element_to_be_clickable((By.ID,'switcher_plogin')))
switch.click()
#print('clicked')
#找到输入账号和密码的文本框并输入账号密码
driver.find_element_by_id('u').send_keys(moodSpider.user)
driver.find_element_by_id('p').send_keys(moodSpider.passwd)
login = WebWait(driver,5).until(EC.element_to_be_clickable((By.ID,'login_button')))
login.click()
#登录后跳转至说说界面
time.sleep(2)
driver.get('http://user.qzone.qq.com/'+moodSpider.user+'/311')
def load_all_resoure(self):
driver = self.driver
#下拉到页面最下方,保证所有说说被加载
driver.execute_script('window.scrollBy(0,10000)')
time.sleep(2)
driver.execute_script('window.scrollBy(0,10000)')
time.sleep(2)
driver.execute_script('window.scrollBy(0,10000)')
time.sleep(2)
#找到说说所在的frame,否则无法找到说说的元素
driver.switch_to.frame('app_canvas_frame')
time.sleep(2)
def view_full_content(self):
driver = self.driver
#找到所有展开查看全文的按钮,并点击,保证说说完整加载
all_extended = False
while not all_extended:
try:
button = driver.find_element_by_link_text('展开查看全文')
try:
'''
此处的逻辑我写了很久,因为想点击展开查看全文,必须使得按钮在页面中,否则会报未知错误
报错的同时也会将页面跳至按钮附近
所以我在捕获异常里将页面向上移动
下次即可直接点击按钮了
'''
button.click()
time.sleep(2)
#actions.click(button)
#actions.perform()
except Exception as e:
print('fuck!')
driver.switch_to.parent_frame()
driver.execute_script('window.scrollBy(0,-200)')
time.sleep(1)
driver.switch_to.frame('app_canvas_frame')
#如果找不到展开查看全文的按钮,则结束循环
except NoSuchElementException as e:
print(e)
all_extended = True
def process_content(self):
#保存所有的说说文本
self.soup = BeautifulSoup(self.driver.page_source,'xml')
mood_content = self.soup.find_all('pre',{'class':'content'})
filename = 'mood.txt'
#因为有些字符可能不能直接保存为gbk格式,所以此处指明使用utf-8
with open (filename ,'a',encoding='utf-8') as f:
for c in mood_content:
content_text = c.get_text('pre')
#以下两句为替换部分表情字符
content_text = content_text.replace('pre',' ')
content_text = content_text.replace('\ue412',' ')
f.write(content_text)
def to_next_page(self):
driver = self.driver
#此函数用于跳转至下一页
#因为下一页的id是会变化的,所以在文本里识别出来下一页的id
soup = self.soup
next_page_id = soup.find('a',{'title':'下一页'})['id']
to_next = WebWait(driver,5).until(EC.element_to_be_clickable((By.ID,next_page_id)))
to_next.click()
driver.switch_to.parent_frame()
#翻页后返回顶端,以保证所以的说说被加载
driver.execute_script('window.scrollBy(0,-10000)')
time.sleep(1)
driver.execute_script('window.scrollBy(0,-10000)')
time.sleep(1)
driver.execute_script('window.scrollBy(0,-10000)')
time.sleep(1)
def download_mood(self):
self.get_to_mood_page()
finished = False
#当下一页无法被点击时,到下一页的函数会抛出异常
#于是结束程序
while not finished:
self.load_all_resoure()
self.view_full_content()
self.process_content()
try:
self.to_next_page()
except NoSuchElementException as e:
print(e)
finished = True
except:
print('fuck')
print('done')
spider = moodSpider()
spider.download_mood()
| true |
965daabaf3188d98f66dcfad1425fbc1c289c7c3
|
Python
|
sevenian3/ChromaStarPy
|
/Planck.py
|
UTF-8
| 4,446 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import math
import Useful
def planck(temp, lambda2):
""" /**
* Inputs: lambda: a single scalar wavelength in cm temp: a single scalar
* temperature in K Returns log of Plank function in logBBlam - B_lambda
* distribution in pure cgs units: ergs/s/cm^2/ster/cm
*/"""
#//int numLams = (int) (( lamSetup[1] - lamSetup[0] ) / lamSetup[2]) + 1;
#double logBBlam; //, BBlam;
#//double c = Useful.c; //linear
logC = Useful.logC() # //log
#//double k = Useful.k; //linear
logK = Useful.logK() #//log
#//double h = Useful.h; //linear
logH = Useful.logH() #//log
logPreFac = math.log(2.0) + logH + 2.0 * logC #//log
logExpFac = logH + logC - logK #//log
#//double preFac = 2.0 * h * ( c * c ); //linear
#//double expFac = ( h / k ) * c; //linear
#//System.out.println("logC " + logC + " logK " + logK + " logH " + logH);
#//System.out.println("logPreFac " + logPreFac + " logExpFac " + logExpFac);
#//Declare scratch variables:
#double logLam, logPreLamFac, logExpLamFac, expon, logExpon, eTerm, denom, logDenom; //log
#//double preLamFac, expLamFac, expon, denom; //linear
#//for (int il = 0; il < numLams; il++){
#//lambda = lambda[il] * 1.0E-7; // convert nm to cm
#//lambda = lambda * 1.0E-7; // convert nm to cm
logLam = math.log(lambda2) #// Do the call to log for lambda once //log
#//System.out.println("lambda " + lambda + " logLam " + logLam);
logPreLamFac = logPreFac - 5.0 * logLam #//log
logExpLamFac = logExpFac - logLam #//log
#//System.out.println("logPreLamFac " + logPreLamFac + " logExpLamFac " + logExpLamFac);
#// Be VERY careful about how we divide by lambda^5:
#//preLamFac = preFac / ( lambda * lambda ); //linear
#//preLamFac = preLamFac / ( lambda * lambda ); //linear
#//preLamFac = preLamFac / lambda; //linear
#//expLamFac = expFac / lambda;
#//for (int id = 0; id < numDeps; id++){
#//logExpon = logExpLamFac - temp[1][id];
#//This is very subtle and dangerous!
logExpon = logExpLamFac - math.log(temp) #// log of hc/kTlambda
#//System.out.println("temp " + temp + " logTemp " + Math.log(temp));
expon = math.exp(logExpon) #// hc/kTlambda
#//System.out.println("logExpon " + logExpon + " expon " + expon + " denom " + denom);
#// expon = expLamFac / temp; //linear
eTerm = math.exp(expon) #// e^hc/ktlambda
denom = eTerm - 1.0 #// e^hc/ktlambda - 1
logDenom = math.log(denom) #// log(e^hc/ktlambda - 1)
#//BBlam[1][id][il] = logPreLamFac - logDenom;
#//BBlam[0][id][il] = Math.exp(BBlam[1][id][il]);
logBBlam = logPreLamFac - logDenom #//log
#// Not needed? BBlam = math.exp(logBBlam) #//log
#//BBlam = preLamFac / denom; //linear
#// } //id loop - depths
#// } //il loop - lambdas
return logBBlam;
#} //end method planck()
def dBdT(temp, lambda2):
"""// Computes the first partial derivative of B(T) wrt T, dB/dT:"""
#double logdBdTlam;
#//double c = Useful.c; //linear
logC = Useful.logC() #//log
#//double k = Useful.k #//linear
logK = Useful.logK() #//log
#//double h = Useful.h #//linear
logH = Useful.logH() #//log
logPreFac = math.log(2.0) + logH + 2.0 * logC #//log
logExpFac = logH + logC - logK #//log
#//Declare scratch variables:
#double logLam, logTemp, logPreLamFac, logExpLamFac, expon, logExpon, eTerm, denom, logDenom; //log
#//lambda = lambda * 1.0E-7; // convert nm to cm
logLam = math.log(lambda2) #// Do the call to log for lambda once //log
logTemp = math.log(temp)
logPreLamFac = logPreFac + logExpFac - 6.0 * logLam - 2.0 * logTemp #//log
logExpLamFac = logExpFac - logLam #//log
#//This is very subtle and dangerous!
logExpon = logExpLamFac - logTemp #// log of hc/kTlambda
expon = math.exp(logExpon) #// hc/kTlambda
eTerm = math.exp(expon) #// e^hc/ktlambda
denom = eTerm - 1.0 #// e^hc/ktlambda - 1
logDenom = math.log(denom) #// log(e^hc/ktlambda - 1)
logdBdTlam = logPreLamFac + expon - 2.0 * logDenom #//log
return logdBdTlam;
#} //end method dBdT
| true |
993ca8aaa805e6f982d63a0857749fd8a11d460f
|
Python
|
JohnSmitoff/blog_rest
|
/forum/models.py
|
UTF-8
| 945 | 2.5625 | 3 |
[] |
no_license
|
from django.db import models
from datetime import datetime
from django.utils import timezone
# Create your models here.
class Question(models.Model):
author = models.CharField(default="Anonymous", max_length=200)
question = models.TextField()
question_time = models.DateTimeField(default=timezone.now)
def __str__(self):
return f"{self.author} asked | {self.question}"
class Answer(models.Model):
author = models.CharField(max_length=200, default="Anonymous")
content = models.TextField()
likes = models.PositiveIntegerField(default=0)
dislikes = models.PositiveIntegerField(default=0)
answer_time = models.DateTimeField(default=timezone.now)
question = models.ForeignKey(
Question, on_delete=models.CASCADE, related_name="answers"
)
def __str__(self):
return f"{self.author} answered - {self.content} | {self.question} | likes:{self.likes} dislikes:{self.dislikes}"
| true |
dd7b5a401130437f064413ff4c24280d4353e393
|
Python
|
jancijen/oas-hepb
|
/bin/model_performance.py
|
UTF-8
| 2,237 | 2.8125 | 3 |
[] |
no_license
|
from bin.prediction import predict_in_batches
def model_performance(model_tuple, data, metric_fns, sample_weights, verbose, batches_cnt=5):
model_name, model = model_tuple
X_train, X_valid, y_train, y_valid = data
try:
if verbose:
print(f'Training {model_name}...')
# Fit the model
model.fit(X_train, y_train)
if verbose:
print(f'Predicting using {model_name}...')
# Validation performance
y_pred = predict_in_batches(model, X_valid.values, batches_cnt=batches_cnt)
validation_weights = sample_weights.loc[y_valid.index] if sample_weights is not None else None
metric_vals = [(metric_name, metric_fn(y_valid, y_pred, sample_weight=validation_weights)) for metric_name, metric_fn in metric_fns]
non_weighted_metric_vals = [(f'Non-weighted {metric_name}', metric_fn(y_valid, y_pred)) for metric_name, metric_fn in metric_fns]
metric_vals.extend(non_weighted_metric_vals)
if verbose:
metric_vals_str = ', '.join(['{}: {:.3f}'.format(metric_name, metric_val) for metric_name, metric_val in metric_vals])
print(f'{model_name} - {metric_vals_str}\n')
except ValueError as error:
print(f'{model_name}: {error}\n')
return None
return model_tuple, metric_vals, y_pred
def model_selection(models, data, metric_fns, sample_weights=None, verbose=True):
if verbose:
print(f'Metric values:\n')
model_performances = {}
trained_models = {}
for model in models:
model_tuple, metric_vals, _ = model_performance(model, data, metric_fns, sample_weights, verbose)
model_perf = (model_tuple, metric_vals[0])
if model_perf:
model_performances[model_perf[0][0]] = model_perf[1]
trained_models[model_perf[0][0]] = model_perf[0][1]
# Get information about the best performing model on the data
best_perf = max(model_performances.items(), key=lambda x: x[1][1])
print('-' * 30)
print('-' * 30)
print(f'Best performing model is {best_perf[0]} with metric value ({best_perf[1][0]}) = {"{:.3f}".format(best_perf[1][1])}')
return best_perf, model_performances, trained_models
| true |
35879ae589c85720d0cc08b479c3ebffb2ee4475
|
Python
|
cwyz-dev/deal-scraper
|
/utils.py
|
UTF-8
| 503 | 3.40625 | 3 |
[] |
no_license
|
def convert_price_to_number(price):
price = price.split("$")[1]
try:
price = price.replace("\n", ".")
except:
Exception()
try:
price = price.split(",")[0] + price.split(",")[1]
except:
Exception()
return float(price)
def my_range(start, end, step, forwards):
if (forwards):
while start <= end:
yield start
start += step
else:
while start >= end:
yield start
start -= step
| true |
0f2780e64709ed227bae713fd46d168379362c4a
|
Python
|
ksercs/lksh
|
/2014/Работа в ЛКШ/Python/day11/A/turtle.py
|
UTF-8
| 600 | 3.234375 | 3 |
[] |
no_license
|
fin = open("turtle.in", "r")
fout = open("turtle.out", "w")
row, col = [int(x) for x in fin.readline().split()]
acid = []
for i in range(row):
acid.append(list(map(int, fin.readline().split())))
table = [[0] * col for i in range(row)]
ans = 0
for i in range(row):
ans += acid[i][0]
table[i][0] = ans
ans = 0
for j in range(col):
ans += acid[0][j]
table[0][j] = ans
for i in range(1, row):
for j in range(1, col):
table[i][j] = acid[i][j] + min(table[i - 1][j], table[i][j - 1])
print(table[row - 1][col - 1], file=fout)
fout.close()
| true |
75820f1251acc6b343c1c4d1479338a8f086b967
|
Python
|
bar2104y/FunnyVKBots
|
/AvatarSecurity/avatar.py
|
UTF-8
| 2,593 | 2.78125 | 3 |
[] |
no_license
|
#Загрузка необходимых модулей
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from vk_api import VkUpload
from vk_api.utils import get_random_id
# Перменная-костыль для избежания рекурсии
IsMyUpdate = False
def main():
# Настройки
vk_token="" # Токен пользователя
app_id = "" # ID приложения
vk_client_secret = "" # Ключ приложения
chat_id = # id отслеживаемого чата (Обязательно число)
mesAnswer = True # Подкрепляется ли восстановление втарки сообщением
mesTxt = "Технологии на страже революции" # Текст сообщения
dialog_id = 2000000000 + chat_id # СОздание peer_id
# Авторизация
vk_session = vk_api.VkApi(token=vk_token, app_id=app_id, client_secret=vk_client_secret)
vk = vk_session.get_api()
global IsMyUpdate # Объявления костыля как глобального
# Подключение дополнительных библиотек
longpoll = VkLongPoll(vk_session)
upload = VkUpload(vk_session)
# Перебор данных
for event in longpoll.listen():
if event.type == VkEventType.CHAT_UPDATE:
# Проверяем нужный ли чат
if event.chat_id == chat_id:
# Если не мы меняли аватар
if not IsMyUpdate:
try:
# Загружаем новую
r = upload.photo_chat(photo='gerb.png', chat_id=chat_id)
print(r)
print("Капитализм не пройдет!")
# Отмечаем, что это мы, теперь это смена автара не обработается из за условия вше
IsMyUpdate = True
except Exception as e:
print(e) # выводим ошибки, если таковые имеются
if mesAnswer:
try:
# Отправка сообщения
vk.messages.send(
peer_id=dialog_id,
random_id=get_random_id(),
message=mesTxt
)
except Exception as e:
print(e) # выводим ошибки, если таковые имеются
else:
IsMyUpdate = False # Сбрасываем костыль
elif event.type == VkEventType.MESSAGE_NEW:
print(event.__dict__)
else:
print(event.type)
#Бесконечный цикл с обработкой ошибок
while True:
try:
main()
except Exception as e:
print(e.__class__)
| true |
6af9eeb6390544042d23a7f2befc9d140d8de257
|
Python
|
praneethreddypanyam/DataStructures-Algorithms
|
/LinkedList/insertionInSortedList.py
|
UTF-8
| 1,264 | 4 | 4 |
[] |
no_license
|
class Node:
def __init__(self,data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insertion(self,data):
if self.head == None:
self.head = Node(data)
else:
current = self.head
while current.next != None:
current = current.next
current.next = Node(data)
def display(self):
current = self.head
while current.next != None:
print(current.data,end="->")
current = current.next
print(current.data)
def sortedInsertion(self,data):
node = Node(data)
current = self.head
previous = None
inserted = False
while current != None and not inserted:
if current.data > data:
inserted = True
else:
previous = current
current = current.next
if previous == None:
node.next = self.head
self.head = node
else:
node.next = previous.next
previous.next = node
l = LinkedList()
l.insertion(1)
l.insertion(2)
l.insertion(4)
l.display()
l.sortedInsertion(3)
l.sortedInsertion(5)
l.display()
| true |
162b89ff997292cd1ece3e2aacd2def9a1d9c6b9
|
Python
|
jangmyounhoon/python
|
/day3/turtle_run2.py
|
UTF-8
| 429 | 4.1875 | 4 |
[] |
no_license
|
import turtle as t
a = t.Turtle() # 주인공
b = t.Turtle() # 악당
c = t.Turtle() # 먹이
a.shape("turtle")
b.shape("turtle")
c.shape("circle")
a.color("blue") # 주인공 파란색
b.color("red") # 악당 빨간색
c.color("green") # 먹이 초록색
a.speed(0)
b.speed(0)
c.speed(0)
b.up()
b.goto(0, 200) # 위 방향으로 200 이동
c.up()
c.goto(0, -200) # 아래 방향으로 200 이동
| true |
7b61bd05acb0bf4622058a1fb64f481cd0f0a43b
|
Python
|
rufusmitchellheggs/neuro_analysis
|
/preprocessing/lr_alignment_functions.py
|
UTF-8
| 23,303 | 2.859375 | 3 |
[] |
no_license
|
#All imports
import pandas as pd
import numpy as np
from numpy import *
import scipy.signal
import cv2
import os
from scipy import stats
from scipy.spatial import distance
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
from math import floor
#Functions
def events_pivot_correction(events, traces):
"""Changes format of timestamped events file to time series format
as with raw data traces
INPUT:
------
events = events timestamp csv location (table format below)
---------------------------
|Time (s)|Cell Name|Value|
---------------------------
| | | |
trace = raw traces csv location
OUTPUT:
------
save_as = corrected event traces over input events csv
and outputs the file location
"""
#create new csv
save_as = events
#Read in all sessions for event traces
event = pd.read_csv(events)
#Table must be less than 10 columns
if event.shape[1] < 10:
#Read in all sessions for df/f raw trace
trace = pd.read_csv(traces)
#Pivot event traces so that cell identities are column headers
event = event.pivot(index='Time (s)', columns=' Cell Name', values=' Value')
event.fillna(value=0, inplace=True, axis=1)
event.index = event.index.map(float)
event = event.sort_index(axis=0)
#Prepare events frame for merging
event['Time (s)'] = event.index
del event.index.name
event.astype(float)
event['Time (s)']=event['Time (s)'].astype(float)
#Isolate time column from traces
trace = trace[1:]
trace.rename(columns={trace.columns[0]: "Time (s)" }, inplace = True)
trace = trace.drop(trace.columns[1:], axis=1)
trace['Time (s)']=trace['Time (s)'].astype(float)
#Merge events with traces time column, any time gaps are filled with 0s
event = pd.merge(trace,event, on="Time (s)", how="left")
event.fillna(value=0, inplace=True, axis=1)
event = event.astype(float)
event['Time (s)'] = trace['Time (s)'].values
#Overwrite csv
event.to_csv(save_as, index=False)
print('File', save_as[-25:], 'has corrected and been overwitten')
return save_as
def lr_data_correction(lr_traces_or_events, timestamps):
"""Labels and corrects timings for longitudinally registered csv file of multiple sessions/stages
INPUT
-----
lr_traces_or_events = .csv file location for longitudinally registered events or traces
timestamps = .csv file for timestamps of manually identified stage start and endings
timestamps table format:
---------------------------
|session|pre |sam | cho |
---------------------------
| N01 |12701|21496|30611|
---------------------------
OUPUT:
-----
corrected_data = A datafame containing labelled sessions and stages with corrected timings
"""
input_file = lr_traces_or_events[-7:-4]
#Read in lr_trace file location and make minor corrections
lr_traces_or_events = pd.read_csv(lr_traces_or_events)
if input_file == 'TRA':
lr_traces_or_events = lr_traces_or_events.drop(lr_traces_or_events.index[0])
lr_traces_or_events = lr_traces_or_events.reset_index(drop=True)
lr_traces_or_events = lr_traces_or_events.rename(columns={" ": "Time (s)"})
#Read in timestamp info
timestamps = pd.read_csv(timestamps)
sessions = list(timestamps['session'])
stages = list(timestamps['stage'])
#Identify start and end frames for all sessions
all_data = list(lr_traces_or_events["Time (s)"].astype(float))
stage_starts = [0]
stage_ends = []
for i in range(len(all_data)):
if i + 1 < len(all_data):
if abs(all_data[i+1] - all_data[i]) > 1 :
stage_starts.append(i+1)
stage_ends.append(i)
stage_ends.append(len(lr_traces_or_events))
indiv_stages = []
for sesh, stage, start, end in zip(sessions, stages, stage_starts, stage_ends):
indiv_stage = lr_traces_or_events[start:end]
indiv_stage = indiv_stage.reset_index(drop=True)
#Correct timings and add column showing stage
stage_timings = np.arange(0, len(indiv_stage), 1)*0.05006
indiv_stage.insert(loc=0, column='stage', value=list((stage,) * len(indiv_stage)))
indiv_stage.insert(loc=0, column='Session', value=list((sesh,) * len(indiv_stage)))
indiv_stage["Time (s)"] = stage_timings
indiv_stages.append(indiv_stage)
#Concatenate all sessions into single table
corrected_data = pd.concat(indiv_stages)
return corrected_data
def led_time(behavioural_video):
"""Find frame that an LED is switched on
INPUT:
- behavioural_video = the video being analysed
OUTPUT:
- Frame of LED turing on"""
#Read in video
cap = cv2.VideoCapture(behavioural_video)
#Start Frame number
frame = 1
while True:
pixels = cap.read() # Read in Pixel values
#Define approximate LED region and extract mean of highest 100 pixel values
light_region = pixels[1][400:-50][:,50:300] # LED Region (y range = [400:-1], x range = 0:300)
light_frame = max(np.array(light_region).flatten()) #Maximum pixel value for region
#If max value exceeds 250 then LED is switched on
if light_frame > 250:
start_frame_vid = frame
start_time_vid = start_frame_vid*(0.04)
break
else:
frame +=1
try:
start_frame_vid
except NameError:
start_frame_vid = 'unknown'
start_time_vid = 'unknown'
print('START Time =', start_time_vid)
else:
print('START Time =', start_time_vid, 's')
return start_frame_vid
def door_time(behavioural_video, y_correction = 0):
"""Obtains the frame that the event arena door opens
INPUT:
- behavioural_video = video being analysed
- y_correction = adapt for strange start areas
OUTPUT:
- Frame of door opening
- Top edge of the startbox"""
#Read in video
cap = cv2.VideoCapture(behavioural_video)
#Start Frame number
frame = 1
delta_door_frame = []
while frame < 12000:
pixels = cap.read() # Read in Pixel values
if frame == 1:
box_region = np.mean(pixels[1][350:520][:,300:500], axis=2) # Start box region
#box region correction (if pixel shift required)
box_region_correction = 0
if np.mean(box_region) > 120:
box_region_correction = 50
box_region = np.mean(pixels[1][350:520][:,300+box_region_correction:500+box_region_correction], axis=2)
#Pixel coordinate inside box
inside_box = np.argmin(box_region)
y = int((inside_box/200))+y_correction
x = int(round(200*((inside_box/200)-int(inside_box/200))))
#right edge detection
pix = 1
pix_val = box_region[y][x]
while pix_val < 25:
pix_val = box_region[y][x+pix]
right_edge = x+pix
pix +=1
#top edge detection
pix = 1
pix_val = box_region[y][right_edge]
while pix_val < 60:
pix_val = box_region[y-pix][right_edge]
top_edge = y-pix
pix +=1
# print('right edge =',right_edge)
# print('top edge =', top_edge)
# print('y', 350+top_edge-42,350+top_edge, 'x', 300+box_region_correction+right_edge-100,300+box_region_correction+right_edge-90)
#Door region obtained from top right corner (consistent for all videos)
door_region = pixels[1][350+top_edge-42:350+top_edge][:,300+box_region_correction+right_edge-100:300+box_region_correction+right_edge-90]
#Monitor door opening by tracking slope of pixel values
door_frame = mean(np.array(door_region).flatten()) #Maximum pixel value for region
delta_door_frame.append(door_frame)
if len(delta_door_frame) > 10:
slope, intercept, r_value, p_value, std_err = stats.linregress(np.arange(0,10),delta_door_frame[-10:])
if abs(slope) > 3.2:
door_frame_vid = frame
door_time_vid = door_frame_vid*(0.04)
break
else:
frame+=1
else:
frame +=1
#If no door opening found
try:
door_frame_vid
except NameError:
door_frame_vid = 'unknown'
door_time_vid = 'unknown'
print('Door Opening Time =','corrupted - trying again')
else:
print('Door Opening Time =', door_time_vid, 's')
return door_frame_vid, 350+top_edge
def sandwell_loc(behavioural_video):
"""Indentify Sandwell locations and radii from first frame
INPUT
- Behavioural video
OUTPUT
- Sandwell locations for: sw1, sw2, sw3"""
# Read in first video frame
cap = cv2.VideoCapture(behavioural_video)
correct_sandwell = 'n'
frame=1
while correct_sandwell != 'y':
img = cap.read()[1] # Read in Pixel values
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray_blurred = cv2.blur(gray, (3, 3))
# Circle detection algorithm
sandwells = cv2.HoughCircles(gray_blurred,
cv2.HOUGH_GRADIENT, 1, 100, param1 = 50,
param2 = 30, minRadius = 10, maxRadius = 20)
if sandwells is not None:
# Convert the circle parameters a, b and r to integers.
sandwells = np.uint16(np.around(sandwells))
# Manually check that sandwells are correct
if frame == 1 or frame % 50 == 0:
for pt in sandwells[0, :]:
x, y, r = pt[0], pt[1], pt[2]
cv2.circle(img, (x, y), r+20, (0, 255, 0), 2)
avg_dis = np.mean(distance.cdist(sandwells[0][:,:2], sandwells[0][:,:2], 'euclidean'))
if len(sandwells[0])==3 and avg_dis < 130:
print(len(sandwells[0]),'Wells detected')
plt.imshow(img)
plt.show()
correct_sandwell = input("Are all sandwells correct - y/n?")
frame+=1
else:
correct_sandwell = 'n'
frame+=1
else:
frame+=1
# Classify which sandwell is sw1, sw2, sw3
for pt in sandwells[0, :]:
x, y = pt[0], pt[1]
if y == min(np.array(sandwells).transpose()[1]):
sw1 = [x,y]
elif x == max(np.array(sandwells).transpose()[0]):
sw2 = [x,y]
else:
sw3 = [x,y]
print(len(sandwells[0]),'Wells correctly detected')
return sw1,sw2,sw3
def speed(xy, pix_cm = 65/20, framerate=25):
""" Calculate the speed in cm/s and label as movin
INPUT:
-----
xy = array of x and y coordinates
pix_cm = how many pixels there are in a cm (default 65/20)
framerate = the number of frames in a second (default (25))
OUPUT:
-----
v = array with speed for each frame cm/s
moving = array of movement status for each frame
(>= 2cm/s is considered moving)"""
#Calculate speed
c = np.array([]); moving = np.array([])
window = framerate
for pos in range(len(xy)):
if len(xy)-window <= framerate+1:
speed = 0
c = np.append(c,speed)
else:
speed = distance.euclidean(xy[pos],
xy[window])/pix_cm
c = np.append(c, speed)
#Add labels
if speed >= 2:
moving = np.append(moving, 'moving')
else:
moving = np.append(moving, 'stationary')
window+=1
#smooth speed with std 1
c = gaussian_filter(c, sigma=1)
return c, moving
def resample_Dataframe(df,nsamp):
# df is dataframe - nsamp is number of samples to resample to
resampFact = nsamp/max(df.index)
reSampIdx = np.arange(nsamp).astype(int) # obtain list of new samples
totalPos = (np.round(reSampIdx / resampFact)).astype(int)
df_ = df.loc[totalPos] # obtain resamp duplicates
resamp_df = df_.reset_index(drop=True).reindex(reSampIdx) # re-index df w resamp
return resamp_df
def trace_behav_sync(directory_name, output_directory, file_dictionary, lr_traces, lr_events, animal, session, stage):
"""Synchronise raw calcium traces with rat XY coordinates (Deeplabcut).
INPUT:
------
- directory_name containing all files for one animal
- output_directory is the directory you want to save it to, default = directory with all files
- file_dictionary is a dictionary containing all individual sessions
- lr_traces = longitudinally registered traces table
- lr_events = longitudinally registered events table (corrected)
- animal = amimal ID
- session = recording session
- stage = recording context
Required directory contents:
- GPIO file (start and end time for calcium) .csv
- Behavioural video .flv
- Raw Calcium Trace .csv
- Events trace .csv
- Deep lab cut x,y coordinate .csv
OUTPUT:
------
trace_dlc = data frame and saved csv with calcium traces (as below)
event_dlc = data frame and saved csv with event traces (as below)
door_frame_vid = Frame of start box door opening
sandwells = List of sandwell locations
-------------------------------------------------------------
|Time (s)|well|position|x|y|likelihood|Session|stage|C000|Cn|
-------------------------------------------------------------
| | | | | | | | | |
"""
#--------------------------------------------------------------------------------------------------------------
# Save DLC, Behavioural vids and GPIO files locations as local variables
files = file_dictionary[animal][session][stage]
for file in files:
if file.endswith("DLC.csv"):
input_dlc = os.path.join(directory_name, file)
elif file.endswith("BEH.flv"):
input_behavioural_video = os.path.join(directory_name, file)
elif file.endswith('LED.csv'):
input_gpio = os.path.join(directory_name, file)
#See if there is an events file to process also
for file in file_dictionary[animal]['ALL']['ALL']:
if file.endswith('EVE.csv'):
input_events = os.path.join(directory_name, file)
events_file = True
#--------------------------------------------------------------------------------------------------------------
# Read in GPIO pulse .csv file and extract LED section
gpio = pd.read_csv(input_gpio)
gpio = gpio[gpio[' Channel Name']==' GPIO-1'].convert_objects(convert_numeric=True)
# Define start/end time and duration
gpio_start = np.round(gpio[gpio['Time (s)'] < 100][gpio[gpio['Time (s)'] < 100][' Value'] > 1000].iloc[0][0] / 0.05) * 0.05
gpio_end = np.round(gpio[gpio['Time (s)'] > 100][gpio[gpio['Time (s)'] > 100][' Value'] > 1000].iloc[0][0] / 0.05) * 0.05
duration = np.round((gpio_end - gpio_start)/ 0.05) * 0.05
#--------------------------------------------------------------------------------------------------------------
#Clean up DLC dataframe
dlc = pd.read_csv(input_dlc)
# dlc = dlc.drop([0,1]).convert_objects(convert_numeric=True)
dlc = dlc.convert_objects(convert_numeric=True)
dlc = dlc.rename(columns={dlc.columns[0]:'Time (s)',
dlc.columns[1]:'x',
dlc.columns[2]:'y'})
dlc['Time (s)'] = dlc['Time (s)']*0.04
# LED light turning on identification
led_start = led_time(input_behavioural_video)*0.04
# Trim DLC file before LED and after
dlc = dlc[dlc['Time (s)'] >= led_start]
dlc['Time (s)'] = dlc['Time (s)']-dlc['Time (s)'].iloc[0]
dlc = dlc[dlc['Time (s)'] <= duration]
dlc = dlc.reset_index(drop=True)
#--------------------------------------------------------------------------------------------------------------
#Isolate individual sessions and stages to align with gpio file
trace = lr_traces[lr_traces['Session']==session][lr_traces[lr_traces['Session']==session]['stage']==stage]
trace = trace.reset_index(drop=True)
# Trim traces using GPIO start/end times and reset to 0 start
trace_trimmed = trace[trace['Time (s)'] >= gpio_start][trace[trace['Time (s)'] >= gpio_start]['Time (s)'] <= gpio_end]
trace_trimmed['Time (s)'] = trace_trimmed['Time (s)']-trace_trimmed['Time (s)'].iloc[0]
trace_trimmed = trace_trimmed.reset_index(drop=True)
#Check that behaviour end is the same length as calcium recording and update it if not
if np.array(trace_trimmed['Time (s)'])[-1]-np.array(dlc['Time (s)'])[-1] > 0.2:
print('Calcium traces are longer than DLC - Ive corrected this for you')
gpio_end = gpio_start+(ceil(np.array(dlc['Time (s)'])[-1]*100)/100)+0.01
elif np.array(trace_trimmed['Time (s)'])[-1]-np.array(dlc['Time (s)'])[-1] < -0.2:
print('DLC are longer than Caclium traces - Ive corrected this for you')
duration = (ceil(np.array(trace_trimmed['Time (s)'])[-1]*100)/100)+0.01
dlc = dlc[dlc['Time (s)'] <= duration]
dlc = dlc.reset_index(drop=True)
# Trim traces using GPIO start/end times and reset to 0 start
trace_trimmed = trace[trace['Time (s)'] >= gpio_start][trace[trace['Time (s)'] >= gpio_start]['Time (s)'] <= gpio_end]
trace_trimmed['Time (s)'] = trace_trimmed['Time (s)']-trace_trimmed['Time (s)'].iloc[0]
trace_trimmed = trace_trimmed.reset_index(drop=True)
#Trim trace and events to correct start time and end time
try:
input_events
except NameError:
events_file = False
event_dlc = 0
else:
#Isolate individual sessions and stages to align with gpio file
event = lr_events[lr_events['Session']==session][lr_events[lr_events['Session']==session]['stage']==stage]
event = event.reset_index(drop=True)
# Trim traces using GPIO start/end times and reset to 0 start
event_trimmed = event[event['Time (s)'] >= gpio_start][event[event['Time (s)'] >= gpio_start]['Time (s)'] <= gpio_end]
event_trimmed['Time (s)'] = event['Time (s)']-event['Time (s)'].iloc[0]
event_trimmed = event_trimmed.reset_index(drop=True)
#--------------------------------------------------------------------------------------------------------------
# Door opening and tone frame
door_frame_vid, start_box = door_time(input_behavioural_video)
if door_frame_vid == 'unknown' or door_frame_vid < 1:
door_frame_vid, start_box = door_time(input_behavioural_video, y_correction=10)
tone_frame = door_frame_vid - (125)
#Assign all points that in the vicinity of the start box to the start box
dlc['y'][dlc['y'] > start_box] = np.mean(np.array(dlc[dlc['y'] > start_box]['y']))
dlc['x'][dlc['y'] > start_box] = np.mean(np.array(dlc[dlc['y'] > start_box]['x']))
dlc['y'][dlc['Time (s)'] < door_frame_vid*0.04] = np.mean(np.array(dlc[dlc['y'] > start_box]['y']))
dlc['x'][dlc['Time (s)'] < door_frame_vid*0.04] = np.mean(np.array(dlc[dlc['y'] > start_box]['x']))
#--------------------------------------------------------------------------------------------------------------
#DLC resampling from 25hz to 20hz (different methods avialable)
# dlc = scipy.signal.resample(dlc, len(trace_trimmed['Time (s)'])) # Interpolation method
# dlc = resample_Dataframe(dlc, len(trace_trimmed['Time (s)'])) # Interpolation method
# dlc = dlc.iloc[:,1:]
# dlc = np.array(dlc.values)
dlc['time'] = pd.date_range('1/1/2000',periods=len(dlc),freq='40ms')
dlc = dlc.iloc[:,1:]
dlc = dlc.resample('50.06ms', on='time').mean()
dlc.index = np.arange(0,len(dlc),1)
dlc = np.array(dlc.values)
if len(trace_trimmed)-len(dlc.transpose()[0])>0:
print('dropping the last frame')
trace_trimmed = trace_trimmed.drop([len(trace_trimmed)-1])
#--------------------------------------------------------------------------------------------------------------
#Add Additional columns
c, c_label = speed(dlc, pix_cm = 65/20, framerate=25)
well = [input_dlc[-15:-12]]*len(trace_trimmed) # Well that pellet is hidden in (SW1,2,3)
print(input_behavioural_video)
sandwells = sandwell_loc(input_behavioural_video) # Sandwell center coordinates and radius
sw_radius = 32 #<-------------------------------------SAND WELL RADIUS
position = []
for i in range(len(trace_trimmed)):
if i < door_frame_vid:
position.append(12)
elif dlc.transpose()[1][i] > start_box:
position.append(12)
elif distance.euclidean(sandwells[0], [dlc.transpose()[0][i], dlc.transpose()[1][i]]) < sw_radius:
position.append(1)
elif distance.euclidean(sandwells[1], [dlc.transpose()[0][i], dlc.transpose()[1][i]]) < sw_radius:
position.append(2)
elif distance.euclidean(sandwells[2], [dlc.transpose()[0][i], dlc.transpose()[1][i]]) < sw_radius:
position.append(3)
else:
position.append(0)
#Creat output dataframe
dlc = pd.DataFrame({'Time (s)':list(trace_trimmed['Time (s)']),
'well':well,
'position':position,
'x':dlc.transpose()[0],
'y':dlc.transpose()[1],
'Speed (cm/s)':c,
'Movement status':c_label,
'door_frame':[door_frame_vid]*len(trace_trimmed),
'tone_frame':[tone_frame]*len(trace_trimmed),
'SW_locs':[sandwells]*len(trace_trimmed)})
trace_trimmed = trace_trimmed.drop(['Time (s)'], axis=1)
trace_dlc = pd.merge(dlc,trace_trimmed,left_index=True, right_index=True)
#--------------------------------------------------------------------------------------------------------------
#Saves file to CSV
trace_dlc.to_csv(output_directory+input_behavioural_video[-25:-7]+'trace_dlc.csv', index=False)
print(input_behavioural_video[-25:-7]+'trace_dlc.csv', 'saved to:', output_directory)
if events_file:
event_trimmed = event_trimmed.drop(['Time (s)'], axis=1)
event_dlc = pd.merge(dlc,event_trimmed,left_index=True, right_index=True)
#Saves file to CSV
event_dlc.to_csv(output_directory+input_behavioural_video[-25:-7]+'events_dlc.csv', index=False)
print(input_behavioural_video[-25:-7]+'events_dlc.csv', 'saved to:', output_directory)
else:
event_dlc = 'EMPTY'
#--------------------------------------------------------------------------------------------------------------
return trace_dlc, event_dlc
| true |
922ccfd763646816d1fdaa5e7f862edc13477579
|
Python
|
betteroutthanin/BrewComputer
|
/Dev/Zobjects/States/Recovery.py
|
UTF-8
| 4,421 | 2.578125 | 3 |
[] |
no_license
|
import Config
from Zobjects.States.State import State
from StateManager import StateManager
class Recovery(State):
##############################################################
def __init__(self):
super(Recovery, self).__init__()
self.loggingPrefix = "State.Recovery"
# Normally these details are loaded in from the json file
# Force them to emulate the loading
self.id = 0
self.type = "Zobjects.States.Recovery.Recovery"
self.nameShort = "REC"
self.nameLong = "Recovery"
self.oldSM = False
self.newSM = False
self.LogMe("Booted")
##############################################################
def OnEntry(self):
super(Recovery, self).OnEntry()
self.EnableQuiteMode()
self.oldSM = StateManager()
if self.oldSM.LoadStateMachineFromDisk(Config.currentStateMachine) == False:
self.oldSM = False
self.newSM = StateManager()
if self.newSM.LoadStateMachineFromDisk(Config.newStateMachine) == False:
self.newSM = False
self.DisableQuiteMode()
return True
##############################################################
def Process(self):
# must call the parent process - see comments on State.Process
super(Recovery, self).Process()
sm = self.bb.Get("sm")
timeSec = sm.Get("timeSec")
# if both the new and old are invalid then shit is really broken
if (self.oldSM == False) and (self.newSM == False):
return False
# If there is nothing to recover then just punch out and exit recovery mode completely
if self.oldSM == False:
self.LogMe("Process: No previous Statemachine found - recovery mode not need")
sm.Set("recoverOldStateMachine", False)
return True
# Pressing the button implies that the user wants to dump the old stateMachine and start fresh
buttonPressed = self.bb.Get("dm").GetDevice("KeyBoard").ButtonWasPressed("proceed")
if buttonPressed == True:
sm.Set("recoverOldStateMachine", False)
self.LogMe("Process: User pressed the proceed button - recoverOldStateMachine=False")
return True
# Keeping checking for to see if the timer expires - if so, then recover the old stateMachine
thresholdTimeSec = Config.recoveryTimeOutSec + self.startTimeSec
if timeSec > thresholdTimeSec:
sm.Set("recoverOldStateMachine", True)
self.LogMe("Process: Timeout - recoverOldStateMachine=True")
return True
# Else - keep on ticking
return False
##############################################################
def RenderWeb(self):
oldTitle = False
newTitle = False
# We want to show the titles of the old and new statemachines
if self.oldSM:
oldTitle = self.oldSM.Get('name')
if self.newSM:
newTitle = self.newSM.Get('name')
timeLeftSec = (self.startTimeSec + Config.recoveryTimeOutSec) - self.bb.Get("sm").Get("timeSec")
buffer = ""
buffer = buffer + "<div class='title'>Recover Mode</div>"
# if both the new and old are invalid then shit is really broken
if (self.oldSM == False) and (self.newSM == False):
buffer = buffer + "<div class='proceed'>Can't proceed</div>"
buffer = buffer + "<div class='proceed'>Old and New are both missing</div>"
buffer = buffer + "<div class='proceed'>What have you done!</div>"
return buffer
buffer = buffer + "<div class='proceed'>Time Left: " + str(int(timeLeftSec)) + "</div>"
buffer = buffer + "<div class='message'>Press PROCEED to dump the OLD run and load the NEW run<br><br>or<br><br>Wait and the OLD run will recover</div>"
if oldTitle:
buffer = buffer + "<div class='proceed'>OLD = " + str(oldTitle) + "</div>"
if newTitle:
buffer = buffer + "<div class='proceed'>NEW = " + str(newTitle) + "</div>"
return buffer
| true |
3f4b1253c2a2e182ae6ef676f16c82ae804fac51
|
Python
|
AK-1121/code_extraction
|
/python/python_27232.py
|
UTF-8
| 212 | 3.390625 | 3 |
[] |
no_license
|
# How can you tell if numbers in a list are bigger than 126? If it is bigger the program needs to add 94 to it
for i, element in enumerate(addOffset):
if element > 126:
addOffset[i] = element + 94
| true |
b6f44dc86674462620799c0311643c9b400d8f7e
|
Python
|
danielabud/Data_Science_Projects
|
/Transforming data with Python/count.py
|
UTF-8
| 484 | 3.453125 | 3 |
[] |
no_license
|
import read
import collections
df = read.load_data()
headlines = df['headline']
#join all headlines together
string = ""
for i in headlines:
string = string + " " + str.lower(str(i))
print("Successfully joined headlines into one string")
#split strings
headline_words = string.split()
print("Successfully split string into individual words")
#count occurances
c = collections.Counter(headline_words)
print(c.most_common(100))
print("Successfully print most common 100 words")
| true |
d421e32fc72dc715edadc2595e1489a9cc5fac90
|
Python
|
happyhappyhappyhappy/pythoncode
|
/atcoder/mizuiro_h20/unionfind/ABC157D_FriendSuggestions/used/sample2.py
|
UTF-8
| 3,026 | 2.78125 | 3 |
[] |
no_license
|
# ライブラリのインポート
import sys
# import heapq,copy
import pprint as pp
from collections import defaultdict
# pypy3用
# import pypyjit
# 再帰制御解放
# pypyjit.set_param('max_unroll_recursion=-1')
# sys.setrecursionlimit(10**6)
from logging import getLogger, StreamHandler, DEBUG
# 入力のマクロ
def II(): return int(sys.stdin.readline())
def MI(): return map(int, sys.stdin.readline().split())
def LI(): return list(map(int, sys.stdin.readline().split()))
def LLI(rows_number): return [LI() for _ in range(rows_number)]
# デバッグ出力の作成
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.propagate = False
# クラス+メソッドを一関数
xdebug=logger.debug
ppp=pp.pprint
# Const
MAXSIZE = ( 1 << 59 ) -1
MINSIZE = -( 1 << 59) + 1
class UnionFind():
def __init__(self,n):
self.n=n
self.parents=[-1]*n
def find(self,x):
if self.parents[x]<0:
return x
else:
self.parents[x]=self.find(self.parents[x])
return self.parents[x]
def union(self,x,y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.parents[y]<self.parents[x]:
x,y = y,x
self.parents[x]=self.parents[x]+self.parents[y]
self.parents[y]=x
def size(self,x):
res = (-1)*self.parents[self.find(x)]
return res
def same(self,x,y):
ok = (self.find(x)==self.find(y))
return ok
def members(self,x):
root = self.find(x)
res = [j for j in range(0,self.n) if self.find(j) == root ]
return res
def roots(self):
res = [j for j , x in enumerate(self.parents) if x < 0]
return res
def group_count(self):
return len(self.roots())
def all_group_members(self):
group_members=defaultdict(list)
for m in range(0,self.n):
group_members[self.find(m)].append(m)
return group_members
def __str__(self):
res = "\n".join(f"{r}: {m}" for r,m in self.all_group_members().items())
return res
N,M,K=MI()
F=[0]*N # 友好リスト
B=[0]*N # ブロックリスト
uf = UnionFind(N)
for _ in range(0,M):
AR,BR = MI()
a = AR-1
b = BR-1
F[a]=F[a]+1
F[b]=F[b]+1
uf.union(a,b)
for _ in range(0,K):
CR,DR = MI()
c = CR-1
d = DR-1
if uf.same(c,d):
B[c]=B[c]+1
B[d]=B[d]+1
ANS_L = []
for j in range(0,N):
xdebug(f"人 {j+1} について")
xdebug(f"この人の所属するグループの数は {uf.size(j)} 人 ")
xdebug(f"友好関係にある人は {F[j]} 人")
xdebug(f"グループは同じだがブロックしている人は {B[j]} 人")
ans = uf.size(j)-1-F[j]-B[j]
ANS_L.append(ans)
# print(ans)
ANS_L_STR=" ".join(list(map(str,ANS_L)))
print(ANS_L_STR)
| true |
3a4cb9aa61cab0eac7c3dc41ac1818ac5d289b30
|
Python
|
nishiyamayo/atcoder-practice
|
/src/main/scala/abc150/F.py
|
UTF-8
| 1,514 | 2.890625 | 3 |
[] |
no_license
|
N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
C = [0] * (2 * N - 1)
D = [0] * N
for i in range(2 * N - 1):
C[i] = A[i % N] ^ A[(i + 1) % N]
D[i % N] = B[i % N] ^ B[(i + 1) % N]
class KMP:
def __init__(self, W):
self.W = W
self.L = len(W)
self.T = self._build(W)
def _build(self, W):
T = [0] * self.L
T[0] = -1
T[1] = 0
i = 2
j = 0
while i < self.L:
if W[i - 1] == W[j]:
T[i] = j + 1
i += 1
j += 1
elif j > 0:
j = T[j]
else:
T[i] = 0
i += 1
return T
def search(self, S):
m = 0
i = 0
L = len(S)
while m + i < L:
# print(m, i)
if self.W[i] == S[m + i]:
i += 1
if i == self.L:
i -= 1
ret = m
m = m + i - self.T[i]
i = self.T[i]
yield ret
else:
m = m + i - self.T[i]
if i > 0:
i = self.T[i]
# def ok(x, i):
# for j in range(N):
# if A[(j + i) % N] ^ B[j] != x:
# return False
# return True
#
#
# for i in range(N):
# x = A[i] ^ B[0]
# if ok(x, i):
# print(i, x)
kmp = KMP(D)
for x in kmp.search(C):
xor = A[x] ^ B[0]
print(x, xor)
| true |
afedd1171099a36870e9c586b13c58be59105ac7
|
Python
|
Linxi-brave/HogwartsStudy
|
/testing_my_selenium_PO/base/seleniumAction.py
|
UTF-8
| 6,190 | 2.796875 | 3 |
[] |
no_license
|
import os
import time
from selenium.webdriver import ActionChains, TouchActions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from util.handle_time import timenow
parent_dir = os.path.abspath(os.path.join(os.getcwd(),'../..'))
class SeleniumAction:
def __init__(self,driver:WebDriver):
self.driver = driver
def save_screenshot(self):
'''截图'''
time = str(timenow())
filename = parent_dir + '/screenshot/'+ time +'.png'
self.driver.save_screenshot(filename)
def click_ele(self,ele):
ele.click()
print("true")
def click_ele_wait(self,locator,ele):
'''等待元素出现之后进行点击'''
WebDriverWait(self.driver,10,0.5).until(
expected_conditions.element_to_be_clickable(locator)
)
ele.click()
print("true")
def sendkeys_ele(self,ele,value):
ele.send_keys(value)
def click_ele_try(self,loctor,time_out=10):
''' 封装点击元素,元素可能存在无法点击,这里不断点击元素'''
# loctor = (By.XPATH,'')
def wait_for_next(x:WebDriver):
try:
x.find_element(*loctor).click()
except:
return False
WebDriverWait(self.driver,timeout= time_out).until(wait_for_next)
def actoinchain_click(self,ele):
'''
使用 ActionChain 对元素进行单击,传入参数 ele 为元素
:param ele: 元素
'''
action = ActionChains(self.driver)
action.click(ele)
action.perform()
def actionchain_contextclick(self,ele):
'''
使用 ActionChain 对元素进行右击,传入参数 ele为元素
:param ele: 元素
'''
action = ActionChains(self.driver)
action.context_click(ele)
action.perform()
def actionchain_doubleclick(self,ele):
'''
使用 ActionChain对元素进行双击,传入参数 ele为元素
:param ele: 元素
'''
action = ActionChains(self.driver)
action.double_click(ele)
action.perform()
def actionchain_movetoelement(self,ele):
'''
使用 ActionChain 将光标移动到元素上
:param ele: 元素
'''
action = ActionChains(self.driver)
action.move_to_element(ele)
action.perform()
def actionchain_dragdrop(self,ele1,ele2):
'''
使用 ActionChain 将元素1拖动到元素2上面
:param ele1: 元素1
:param ele2: 元素2
'''
action = ActionChains(self.driver)
action.drag_and_drop(ele1,ele2)
action.perform()
def actionchain_keys(self,ele,key,second):
'''
使用 ActionChain 模拟键盘操作
:param ele: 元素输入框
:param key: 键盘数字
Keys.BACKSPACE = '\ue003'
Keys.BACK_SPACE = BACKSPACE
'''
ele.click()
action = ActionChains(self.driver)
action.send_keys(key).pause(second) # pause(1) 延时1秒,观看效果
action.perform()
def touchaction_scrollbottom(self,ele):
'''
使用 TouchActions 实现界面滑动到底部
:param ele:
'''
action = TouchActions(self.driver)
action.scroll_from_element(ele,0,10000)
action.perform()
def switch_windows(self,n):
'''
切换当前操作的窗口
:param 第n个窗口
'''
print(self.driver.current_window_handle) # 打印当前窗口
print(self.driver.window_handles) # 打印所有窗口
windows = self.driver.window_handles
self.driver.switch_to_window(windows[n])
def switch_frame(self,frameId):
'''
切换frame进行操作
'''
self.driver.switch_to_frame(frameId)
def alert_accept(self):
'''
点击alert弹框中的同意按钮
'''
self.driver.switch_to_alert().accept()
def alert_dismiss(self):
'''
点击alert弹框中的拒绝按钮
'''
self.driver.switch_to_alert.dismiss()
def execute_jsscript(self,jsscript):
'''
执行js语句
'''
self.driver.execute_script(jsscript)
def get_title(self):
'''
获取界面title
'''
title = self.driver.title
return title
def get_elementtext(self,ele):
'''
返回元素的值
'''
elementtext = ele.text
return elementtext
def get_elementattribute(self,ele,attribute):
'''
获取元素的属性值
:param ele: 元素
:param attribute: 属性值
'''
# elementattribute = ele.get_attribute("textContent")
elementattribute = ele.get_attribute(attribute)
return elementattribute
def inputfile(self,inputele,filepath):
'''
文件上传
'''
inputele.send_keys(filepath)
# 获取屏幕的宽高
def getsize(self):
size = self.driver.get_window_size()
width = size["width"]
height = size["height"]
return width, height
#向左滑动
def swipeleft(self):
x1 = self.getsize()[0] /10
x2 = self.getsize()[0] /10 * 9
y1 = self.getsize()[1] /2
self.driver.swipe(x1,y1,x2,y1,2000)
# 向右滑动
def swiperight(self):
x1 = self.getsize()[0] / 10
x2 = self.getsize()[0] / 10 * 9
y1 = self.getsize()[1] / 2
self.driver.swipe(x2, y1, x1, y1, 2000)
# 向上滑动
def swipeup(self):
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# 向下滑动
def swipedown(self):
y1 = self.getsize()[1] / 10
y2 = self.getsize()[1] / 10 * 9
x1 = self.getsize()[0] / 2
self.driver.swipe(x1, y2, x1, y1, 2000)
#浏览器返回上一界面
def driverback(self):
self.driver.back()
time.sleep(2)
| true |
efc21d91b3513136fa29c353e7594a4ab7d767e6
|
Python
|
perdikeas/python
|
/programs/constantine_space_invaders.py
|
UTF-8
| 5,355 | 3.234375 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
import random
import turtle
import math
import time
#global variables
tick=0
aliens=[]
missiles=[]
aliens_escaped=0
kills=0
player_health=15
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
#Setting up the Screen
screen=turtle.Screen()
screen.bgcolor('black')
screen.title("Space invaders")
spaceship=turtle.Turtle()
spaceship.speed(0)
spaceship.setheading(90)
spaceship.penup()
spaceship.shapesize(1,0.75,0.75)
spaceship.shape("triangle")
spaceship.color('turquoise')
spaceship.goto(0,-260)
spaceship_speed=10
#Setting up the border
border_pen=turtle.Turtle()
border_pen.color('white')
border_pen.penup()
border_pen.speed(0)
x=300
y=270
border_pen.setposition(-x,-y)
border_pen.pendown()
border_pen.hideturtle()
border_pen.color('green')
for i in range(4):
border_pen.forward(x+y)
border_pen.left(90)
border_pen.color('white')
colors=['red','orange','chartreuse','dark green']
#defining missile class
class Missile():
global spaceship
def __init__(self,speed):
self.speed=speed
self.avatar=turtle.Turtle()
self.avatar.hideturtle()
self.avatar.setheading(90)
self.avatar.penup()
self.avatar.color('yellow')
self.avatar.speed(0)
self.avatar.goto(spaceship.xcor(),spaceship.ycor()+15)
self.avatar.shapesize(0.5,1.2,0.5)
self.avatar.shape('triangle')
def live(self):
self.avatar.showturtle()
self.avatar.forward(self.speed)
#defining alien class
class Alien():
global colors
#initialization of alien instance
def __init__(self,speed):
self.speed=speed
self.clone=turtle.Turtle()
self.clone.setheading(270)
self.clone.penup()
self.clone.health=4
self.clone.speed(0)
self.clone.goto(random.randint(-250,250),270)
self.clone.shapesize(1.75,1.75,1.75)
self.clone.shape('circle')
self.tick=0
#method for Alien instances
def live(self):
self.tick+=1
if (self.tick%5==0):
if (random.randint(1,2)==1):
self.clone.left(random.randint(5,10))
else:
self.clone.right(random.randint(5,10))
self.clone.forward(self.speed)
self.clone.color(colors[self.clone.health-1])
#functions
def sqrt(a):
return math.sqrt(a)
def distance_between(a,b):
return sqrt( math.pow(a.xcor()-b.xcor(),2)+ math.pow(a.ycor()-b.ycor(),2))
def border_check_x(a):
limit=265
b=a.clone
if ((b.xcor()>=limit) and (b.heading()>270 or b.heading()<90)):
b.setheading(230)
elif((b.xcor()<=-1*limit) and (b.heading()>90 and b.heading()<270)):
b.setheading(300)
def border_check_y(a):
limit=260
global aliens,aliens_escaped,player_health
b=a.clone
if ((b.ycor()>=limit) and (b.heading()<180 and b.heading()>0)):
b.setheading(b.heading()*-1)
elif (b.ycor()<limit*-1):
b.hideturtle()
aliens.remove(a)
aliens_escaped+=1
player_health-=1
def right():
global spaceship,spaceship_speed
x=spaceship.xcor()
x+=spaceship_speed
spaceship.setx(x)
def left():
global spaceship,spaceship_speed
x=spaceship.xcor()
x-=spaceship_speed
spaceship.setx(x)
def add_missile():
global missiles
missiles.append(Missile(30))
#main loop
while aliens_escaped<10 and kills<10 and player_health!=0:
#counter
tick+=1
print('you now have {} kills'.format(kills))
#key binding and user input
turtle.listen()
turtle.onkey(left,'Left')
turtle.onkey(right,'Right')
turtle.onkey(add_missile,'space')
#collison checking
for missile in missiles:
for alien in aliens:
if distance_between(missile.avatar,alien.clone)<20:
alien.clone.health-=1
alien.speed-=1
missiles.remove(missile)
missile.avatar.hideturtle()
for alien in aliens:
if distance_between(alien.clone,spaceship)<20:
player_health-=0.75
#thorough boundaries check for all turtles
for alien in aliens:
border_check_x(alien)
border_check_y(alien)
if alien.clone.health==0:
aliens.remove(alien)
alien.clone.hideturtle()
kills+=1
if spaceship.xcor()<-270:
spaceship.setx(-270)
elif spaceship.xcor()>265:
spaceship.setx(265)
#geneating new aliens
if (tick%100==0):
if len(aliens)<10:
aliens.append(Alien(5))
#alien movement
for alien in aliens:
alien.live()
#missile movement
for missile in missiles:
missile.live()
if missile.avatar.ycor()>270:
missiles.remove(missile)
missile.avatar.hideturtle()
turtle.clearscreen()
screen.bgcolor('white')
turtle.color('red')
style=('Courier',15,'bold')
if kills<=15:
turtle.write('Congratulations, you won and killed 10 aliens',font=style,align='center')
else:
spaceship.hideturtle()
turtle.write('You fucked up, too many aliens escpaed or damaged your spaceship,',font,align='right')
turtle.done()
| true |
5c35bd554ab749fbe42de071819377389c91c853
|
Python
|
schrismartin/434_proj2
|
/PA1/AuthenticateHere.py
|
UTF-8
| 1,214 | 2.765625 | 3 |
[] |
no_license
|
#!/usr/bin/env python
import socket
#set variables for server connection
TCP_IP = '127.0.0.1'
TCP_PORT = 2017
BUFFER = 1024
#create socket for server and listen for Alice
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
#connect to Alice
conn, addr = s.accept()
print 'Connection address: ', addr
#set variables for client connection
HOST = 'taranis.eecs.utk.edu'
TCP_PORT = 15153
#connect to Bob
t = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
t.connect((HOST, TCP_PORT))
#get challenge string from Bob
challenge = t.recv(BUFFER)
print 'Received challenge string: ', challenge
#send challenge string to Alice
conn.send(challenge)
#get authentication string from Alice
authentication = conn.recv(BUFFER)
print 'Authentication message is: ', authentication
#close connection with Alice. We don't need her anymore. MUAHAHA
conn.close()
#send Bob the proper authentication string for his challenge
t.send(authentication)
#get secret from bob and print
secret = t.recv(BUFFER)
print 'THE SECRET IS: ', secret
#close connection to Bob. All has gone according to plan. *insert ultimate evil laugh*
t.close()
| true |
7c762d0699a24e73b976e211960e170bb3661fbf
|
Python
|
ufbmi/olass-server
|
/app/olass/routes/oauth.py
|
UTF-8
| 5,698 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
"""
Goal: Implement routes specific to OAuth2 provider
@authors:
Andrei Sura <sura.andrei@gmail.com>
Client Credentials Grant: http://tools.ietf.org/html/rfc6749#section-4.4
Note: client credentials grant type MUST only be used by confidential clients.
--- Confidential Clients ---
Clients capable of maintaining the confidentiality of their
credentials (e.g., client implemented on a secure server with
restricted access to the client credentials), or capable of secure
client authentication using other means.
+---------+ +---------------+
| | | |
| |>--(A)- Client Authentication --->| Authorization |
| Client | | Server |
| |<--(B)---- Access Token ---------<| |
| | | |
+---------+ +---------------+
Client Credentials Flow
The flow illustrated above includes the following steps:
(A) The client authenticates with the authorization server and
requests an access token from the token endpoint.
(B) The authorization server authenticates the client, and if valid,
issues an access token.
-------------------------------------------------------------------------------
According to the rfc6749, client authentication is required in the
following cases:
- Resource Owner Password Credentials Grant: see `Section 4.3.2`.
- Authorization Code Grant: see `Section 4.1.3`.
- Refresh Token Grant: see `Section 6`.
"""
# TODO: read http://flask-oauthlib.readthedocs.io/en/latest/client.html
from datetime import datetime, timedelta
from flask_oauthlib.provider import OAuth2Provider
from flask import request
from olass import utils
from olass.main import app
from olass.models.oauth_client_entity import OauthClientEntity
from olass.models.oauth_access_token_entity import OauthAccessTokenEntity
TOKEN_TYPE_BEARER = 'Bearer'
# TODO: read this options from config file
TOKEN_EXPIRES_SECONDS = 3600 # one hour
TOKEN_LENGTH = 40 # max 255
log = app.logger
oauth = OAuth2Provider(app)
@oauth.usergetter
def load_user():
log.info("==> load_user()")
return None
@oauth.clientgetter
def load_client(client_id):
"""
This method is used by provider->authenticate_client()
"""
return OauthClientEntity.query.filter_by(client_id=client_id).one()
@oauth.tokengetter
def load_token(access_token=None, refresh_token=None):
tok = None
if access_token:
tok = OauthAccessTokenEntity.query.filter_by(
access_token=access_token).one_or_none()
elif refresh_token:
tok = OauthAccessTokenEntity.query.filter_by(
refresh_token=refresh_token).one_or_none()
if tok:
log.debug('Loaded token [{}] for user [{}]'.format(tok.id, tok.client))
return tok
@oauth.tokensetter
def save_token(token_props, req, *args, **kwargs):
"""
Saves token to the database
"""
result_token = None
token_id = token_props.get('id')
token = OauthAccessTokenEntity.get_by_id(token_id)
# log.debug("From {} got {}".format(token_id, token))
if token and not token.is_expired():
# log.debug("Reuse access token: {} expiring on {} ({} seconds left)"
# .format(token.id, token.expires, token.expires_in))
result_token = token
else:
access_token = utils.generate_token()
# access_token = utils.generate_token_urandom(TOKEN_LENGTH)
expires = datetime.utcnow() + timedelta(seconds=TOKEN_EXPIRES_SECONDS)
added_at = utils.get_db_friendly_date_time()
if token:
result_token = OauthAccessTokenEntity.update(
token,
access_token=access_token,
expires=expires,
added_at=added_at)
else:
result_token = OauthAccessTokenEntity.create(
access_token=access_token,
token_type=TOKEN_TYPE_BEARER,
_scopes='',
expires=expires,
client_id=req.client.client_id,
added_at=added_at
)
# log.info("return from save_token: {}".format(result_token))
return result_token
@app.route('/oauth/token', methods=['POST', 'GET'])
@oauth.token_handler
def handle_request_auth_token():
"""
The dictionary returned by this method is passed to the meth:`save_token`
in order to be saved
"""
if request.method == 'POST':
client_id = request.form.get('client_id')
client_secret = request.form.get('client_secret')
else:
client_id = request.args.get('client_id')
client_secret = request.args.get('client_secret')
if client_id is None:
raise Exception("Error: Missing client_id")
if client_secret is None:
raise Exception("Error: Missing client_secret")
client = OauthClientEntity.query.filter_by(
client_id=client_id).one_or_none()
if client is None:
raise Exception("Error: invalid client_id")
if client.client_secret != client_secret:
raise Exception("Error: invalid client_secret")
token = OauthAccessTokenEntity.query.filter_by(
client_id=client_id,
token_type=TOKEN_TYPE_BEARER).one_or_none()
# log.info("return from handle_request_auth_token(): {}".format(token))
return token.serialize() if token else {}
@oauth.grantgetter
def load_grant(client_id, code):
log.debug("==> load_grant()")
return None
@oauth.grantsetter
def save_grant(client_id, code, req):
log.debug("==> save_grant()")
return None
| true |
e5dfe0b34e123408cc50aaaf6e908df60d14c4dd
|
Python
|
Jaydeep-07/Python-Assignments
|
/Assignment 1/Assignment1_2.py
|
UTF-8
| 171 | 3.53125 | 4 |
[] |
no_license
|
def main(no):
if(no%2==0):
print("Even Number")
else:
print("Odd Number")
print("Enter The Number")
num=input()
num=int(num)
if __name__=='__main__':
main(num)
| true |
7f46b692ae0b4bcbaa6000519271e429f82221e6
|
Python
|
Kennnnnnji/MapReduce
|
/problem3/python/frdMapper.py
|
UTF-8
| 476 | 3.0625 | 3 |
[] |
no_license
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import json
friendsof_a = {}
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the line with json method
record = json.loads(line)
a = record[0]
b = record[1]
friendsof_a.setdefault(a, set([]))
if b not in friendsof_a[a]:
friendsof_a[a].add(b)
print("%s\t%s" % (a, 1))
| true |
ad27fb722814a0d2a568bc4f8b1ba6e3a6740be2
|
Python
|
jnshwu/py3gemast
|
/ANSWERS/file_count.py
|
UTF-8
| 427 | 2.75 | 3 |
[] |
no_license
|
#!/usr/bin/env python
"""
@author: jstrick
Created on Thu Mar 21 00:26:40 2013
"""
import sys
import logging
import os
logging.basicConfig(
filename='file_count.log',
level=logging.INFO,
filemode='w', # create new log each time program is run
)
start_dir = sys.argv[1]
for curr_dir, dir_list, file_list in os.walk(start_dir):
message = '{0}: {1}'.format(curr_dir,len(file_list))
logging.info(message)
| true |
0aa043e3563307132af47edf045f329462223816
|
Python
|
baigarkalpana/Python_Numbers
|
/Problems_on_Numbers/starpattrn1.py
|
UTF-8
| 404 | 3.953125 | 4 |
[] |
no_license
|
'''
program which accepting one number display * pattern
*****
*****
*****
*****
*****
'''
#accepting number fron user
num=int(input("enter number"))
#function defination for displaying star pattern
def stardisplay(star):
for x in range(star):
for y in range(star):
print("*",end=" ")
print()
#function call
stardisplay(num)
| true |
c196a87f68c52d2ca2e42998949c275029bb12ae
|
Python
|
jerryhanhuan/LearnPython
|
/re/do_re.py
|
UTF-8
| 589 | 3.5625 | 4 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#File Name:do_re.py
#Created Time:2019-07-20 08:49:03
import re
# 使用 r 前缀,不用考虑转义的问题
match_re = r'^\d{3}\-\d{3,8}$'
def main():
str = input('Phone Num:')
if re.match(match_re,str):
print('match')
else:
print('Not match')
# 分组,用 () 表示的就是要提取的分组 (group)
pattern = r'^(\d{3})-(\d{3,8})$'
m = re.match(pattern,str)
# 原始字符串
print(m.group(0))
print(m.group(1))
print(m.group(2))
if __name__ == '__main__':
main()
| true |
acc34034a686217ab82ee0c6a411da87d3b56288
|
Python
|
sgiardl/LeafClassification
|
/classifiers/Ridge.py
|
UTF-8
| 865 | 3.03125 | 3 |
[] |
no_license
|
from sklearn.linear_model import RidgeClassifier
from classifiers.Classifier import Classifier
class Ridge(Classifier):
"""
CLASS NAME:
Ridge
DESCRIPTION:
Child class for the Ridge classifier,
inherits from the Classifier parent class.
"""
def __init__(self):
"""
PARAMETERS:
None.
RETURNS:
None.
DESCRIPTION:
Initializes the class with the range of parameters
to test during hyperparameter search in the
self.param_grid attribute. The sklearn classifier
class is specified in the self.classifier attribute.
"""
super(Ridge, self).__init__()
self.classifier = RidgeClassifier()
self.param_grid = {'alpha': [1e-8, 1e-7, 1e-6, 1e-5, 1e-4]}
| true |
e9cb4c9858b0f392e46dece2c11f3f6af2ef1dd6
|
Python
|
Leberwurscht/eartrainer
|
/guitar.py
|
UTF-8
| 3,007 | 2.859375 | 3 |
[
"WTFPL"
] |
permissive
|
#!/usr/bin/python
import gtk, gobject
from playnote import play_note
import random
strings = "ebgdaE"
current_fret = 0
current_note = 0
buttons = {}
status = gtk.Label()
status.set_markup('<span size="x-large"> </span>')
right = 0
total = 0
def play(*args):
global current_note
play_note(current_note)
def new_question():
global current_note
current_note = random.randint(-29, -5+12)
play()
def note_name(note):
note_names = [" a ","ais"," b "," c" ,"cis"," d ","dis"," e "," f ","fis"," g ","gis"]
while note<0: note += 12
return note_names[note % 12]
def note(string, fret):
tunings = [-5, -10, -14, -19, -24, -29]
return tunings[string] + fret
def select(widget, data):
global buttons, current_note, right, total
string, fret = data
try:
buttons[string, fret].grab_focus()
except KeyError: pass
guess = note(string, fret)
play_note(guess)
total += 1
if guess==current_note:
right += 1
status.set_markup('<span foreground="dark green" size="x-large">RIGHT</span> %d%%' % int(100.0*right/total))
gobject.timeout_add(1500, new_question)
else:
status.set_markup('<span foreground="red" size="x-large">FALSE</span> %d%%' % int(100.0*right/total))
gobject.timeout_add(400, play)
def key_callback(window, event):
global current_fret, strings
key = gtk.gdk.keyval_name(event.keyval)
if key=="Escape":
current_fret = 0
play()
if key.isdigit():
current_fret *= 10
current_fret += int(key)
if key in strings:
string = strings.index(key)
fret = current_fret
current_fret = 0
select(None, (string, fret))
w = gtk.Window()
table = gtk.Table(7, 15, False)
for string in xrange(6):
label = gtk.Label(" %s " % strings[string])
table.attach(label, 0, 1, string+1, string+2)
circle = "\xe2\x97\x8f"
for fret in [5,7,9,12]:
label = gtk.Label(circle)
table.attach(label, fret+2, fret+3, 0, 1)
for string in xrange(6):
#button = gtk.Button("%d/%d" % (string+1, 0))
button = gtk.Button(note_name(note(string, 0)))
button.connect("clicked", select, (string, 0))
table.attach(button, 1, 2, string+1, string+2)
buttons[string, 0] = button
label = gtk.Label("|")
table.attach(label, 2, 3, string+1, string+2)
for fret in xrange(12):
#button = gtk.Button("%d/%d" % (string+1, fret+1))
button = gtk.Button(note_name(note(string, fret+1)))
button.connect("clicked", select, (string, fret+1))
table.attach(button, fret+3, fret+4, string+1, string+2)
buttons[string, fret+1] = button
#status.connect("clicked", play)
vbox = gtk.VBox()
w.add(vbox)
vbox.add(status)
vbox.add(table)
w.set_events(gtk.gdk.KEY_PRESS_MASK)
w.connect("key_press_event", key_callback)
w.show_all()
w.connect("destroy", lambda *args: gtk.main_quit())
w.connect("delete_event", lambda *args: gtk.main_quit())
new_question()
gtk.main()
| true |
08c7d456f9ddf0e9da23beea4477409a55c090a4
|
Python
|
IEEE-NITK/NLQ_to_SQL
|
/chandana/ml/_rnn1.py
|
UTF-8
| 1,133 | 3.203125 | 3 |
[] |
no_license
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import numpy as np
class SingleRNN(nn.Module):
def __init__(self, n_inputs, n_neurons):
super(SingleRNN, self).__init__()
self.Wx = torch.randn(n_inputs, n_neurons) # 4 X 1
self.Wy = torch.randn(n_neurons, n_neurons) # 1 X 1
self.b = torch.zeros(1, n_neurons) # 1 X 4
def forward(self, X0, X1):
self.Y0 = torch.tanh(torch.mm(X0, self.Wx) + self.b) # 4 X 1
self.Y1 = torch.tanh(torch.mm(self.Y0, self.Wy) +
torch.mm(X1, self.Wx) + self.b) # 4 X 1
return self.Y0, self.Y1
N_INPUT = 4
N_NEURONS = 1
X0_batch = torch.tensor([[0,1,2,0], [3,4,5,0],
[6,7,8,0], [9,0,1,0]],
dtype = torch.float) #t=0 => 4 X 4
X1_batch = torch.tensor([[9,8,7,0], [0,0,0,0],
[6,5,4,0], [3,2,1,0]],
dtype = torch.float) #t=1 => 4 X 4
model = SingleRNN(N_INPUT, N_NEURONS)
Y0_val, Y1_val = model(X0_batch, X1_batch)
print(Y0_val)
print(Y1_val)
| true |
be90a24bc3ced1411b2a889c776bde96b8f5ae76
|
Python
|
tehwentzel/cd_map
|
/backend/Stats.py
|
UTF-8
| 379 | 2.765625 | 3 |
[] |
no_license
|
import numpy as np
import pandas as pd
def records_to_array(record_list,keys = None):
#should take a list of dicts from json stuff
#[{x0: 1, x1: 1...},{x0: 0...}...] -> np.array([[x0,x1,x2...],[ x0...]...]
if keys is None:
keys = record_list[0].keys()
keys=set(keys)
records = [[v for k,v in entry.items() if k in keys] for entry in record_list]
return np.array(records)
| true |
2784a2db1d55796b62c258762aab60e7acdaa746
|
Python
|
atulanandnitt/arun
|
/uploadFileToAWS.py
|
UTF-8
| 843 | 2.53125 | 3 |
[] |
no_license
|
import boto3
bucket_name = 'bucket_name' # having public access
def upload_text_file():
content = open('local_file.txt', 'rb')
s3 = boto3.client('s3')
s3.put_object(
Bucket=bucket_name,
Key='remote-file.txt',
Body=content
)
upload_text_file()
def upload_media_file(f1):
content = open(f1, 'rb')
s3 = boto3.client('s3')
s3.put_object(
Bucket=bucket_name,
Key='remote-file-2.png',
Body=content
)
# f1 = 'python_java.mp4'
# f1 = 'local_file.txt'
f1 = 'bitmoji.png'
upload_media_file(f1)
def upload_media_file(f1, key1):
content = open(f1, 'rb')
s3 = boto3.client('s3')
s3.put_object(
Bucket=bucket_name,
Key=key1,
Body=content
)
f1 = 'python_java.mp4'
key1 = 'remote-file-3.mp4'
upload_media_file(f1, key1)
print("done")
| true |
514a140cdfbcf4bc1c2da6fc2e8f3dc2c9857b81
|
Python
|
atavares75/MQP-URL_Classifier
|
/src/Metrics/AlgorithmPerformance.py
|
UTF-8
| 6,182 | 3.140625 | 3 |
[] |
no_license
|
from itertools import cycle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_curve, auc
from sklearn.preprocessing import label_binarize
class AlgorithmPerformance:
def __init__(self, test_urls, test_output, prediction, algorithm, autoGenerateMetrics=True):
"""
Initializes parameters to generate algorithm performance metrics
:param test_urls: the labeled input the model was tested with
:param test_output: the labeled output model was tested with
:param prediction: the predicted output of model
:param algorithm: algorithm used by model (default is empty string)
:param autoGenerateMetrics: boolean value indicating if all metrics should be automatically generated
"""
self.data_labels = np.unique(test_output)
self.test_urls = test_urls
self.test_output = test_output
self.prediction = prediction
self.algorithm = algorithm
if autoGenerateMetrics is True:
self.cmtx = self.createConfusionMatrix()
self.FP = self.cmtx.sum(axis=0) - np.diag(self.cmtx)
self.FN = self.cmtx.sum(axis=1) - np.diag(self.cmtx)
self.TP = np.diag(self.cmtx)
self.TN = self.cmtx.values.sum() - (self.FP.values.sum() + self.FN.values.sum() + self.TP.sum())
else:
self.cmtx = None
self.FP = None
self.FN = None
self.TP = None
self.TN = None
def createConfusionMatrix(self):
"""
Creates a confusion matrix from the predicted and actual output
:return: a data frame with the confusion matrix and labeled rows and column
"""
c_matrix = confusion_matrix(self.test_output, self.prediction, self.data_labels)
idx = list()
c = list()
for label in self.data_labels:
idx.append('true: ' + label)
c.append('pred: ' + label)
self.cmtx = pd.DataFrame(c_matrix, index=idx, columns=c)
self.FP = self.cmtx.sum(axis=0) - np.diag(self.cmtx)
self.FN = self.cmtx.sum(axis=1) - np.diag(self.cmtx)
self.TP = np.diag(self.cmtx)
self.TN = self.cmtx.values.sum() - (self.FP.values.sum() + self.FN.values.sum() + self.TP.sum())
return self.cmtx
def createClassificationReport(self):
"""
Wrapper function for sklearn.metrics classification_report function
:return: returns a dictionary containing classification report
"""
return classification_report(self.test_output, self.prediction)
def calculateAccuracy(self):
"""
Wrapper function for sklearn.metrics accuracy_score function
:return: float
"""
return accuracy_score(self.test_output, self.prediction)
def generateROC(self):
"""
Generates an ROC curve and ROC area for each class
:return: plot with ROC curve
"""
fpr = dict()
tpr = dict()
n_classes = len(self.data_labels)
roc_auc = dict()
y_test = label_binarize(self.test_output, classes=self.data_labels)
y_score = label_binarize(self.prediction, classes=self.data_labels)
for i in range(n_classes):
t = y_test[:, i]
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
lw = 2
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'skyblue', 'red'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve for {0} URLs (area = {1:0.2f})'
''.format(self.data_labels[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title(self.algorithm + ' - Multi-class ROC Curve Plot')
plt.legend(loc="lower right")
return plt.gcf()
def calculateFalsePostiveRate(self):
"""
:RETURN: returns the false positive rate
"""
return self.FP / (self.FP + self.TN)
def calculateFalseNegativeRate(self):
"""
:RETURN: returns the false negative rate
"""
return self.FN / (self.TP + self.FN)
def get_results(self, metric):
"""
This method returns the wanted metric inputted by the user
:PARAM metric: the wanted metric inputed by the user
:RETURN: the value of the wanted metric
"""
if metric == "accuracy":
return self.calculateAccuracy()
elif metric == "false_positive":
return self.calculateFalsePostiveRate()
elif metric == "false_negative":
return self.calculateFalseNegativeRate()
def set_prediction(self, new):
"""
Sets new prediction. Used for tagging method.
:param new: new prediction
:return: None
"""
self.prediction = pd.Series(new)
| true |
122c69caa2d9159acc6c47e46adbe825539ded96
|
Python
|
nimadorostkar/img-size
|
/a.py
|
UTF-8
| 478 | 3.171875 | 3 |
[] |
no_license
|
import cv2
# read image
imgA = cv2.imread('p1.jpg', cv2.IMREAD_UNCHANGED)
imgB = cv2.imread('p2.jpg', cv2.IMREAD_UNCHANGED)
# height, width
heightA = imgA.shape[0]
widthA = imgA.shape[1]
heightB = imgB.shape[0]
widthB = imgB.shape[1]
print('Image A :')
print(' Height : ',heightA)
print(' Width : ',widthA)
print(' -------------------------------- ')
print('Image B :')
print(' Height : ',heightB)
print(' Width : ',widthB)
| true |
f478d87a40506ae09e10affcb01210685ef6dac9
|
Python
|
Samuel-Maddock/Zilean
|
/league_api/graphs/games_per_month.py
|
UTF-8
| 2,662 | 2.953125 | 3 |
[] |
no_license
|
from riotwatcher import RiotWatcher
import datetime
import matplotlib.pyplot as plt
from .base_graph import Graph
class GamesPerMonthGraph(Graph):
def __init__(self, api_watcher, region):
super(GamesPerMonthGraph, self).__init__(api_watcher, region)
def retrieve_matchlist(self, summoner):
canBeLoaded = True
beginIndex = -100
total = 0
gameDateList = []
# Retrieve a list of all game dates
while canBeLoaded:
beginIndex += 100
history = self.api_watcher.match.matchlist_by_account(self.region, summoner["accountId"], begin_index=beginIndex)
if len(history["matches"]) < 100:
canBeLoaded = False
for match in history["matches"]:
gameDate = datetime.datetime.fromtimestamp(match["timestamp"]/1000).strftime('%Y-%m-%d %H:%M:%S.%f')
gameDateList.append(gameDate)
total += len(history["matches"])
print("All Game Dates have been loaded for: " + summoner["name"])
return gameDateList
def render(self, summoner_name="SamuelTheRandom", filepath="gpm-summoner.png"):
api_watcher = self.api_watcher
summoner = api_watcher.summoner.by_name(self.region, summoner_name)
gameDateList = self.retrieve_matchlist(summoner)
# Format data into a dictionary of games played per month
dateData = dict()
yearSet = set()
for gameDate in gameDateList:
year = gameDate[0:4]
month = gameDate[5:7]
day = gameDate[8:10]
key = year + "-" + month
yearSet.add(year)
if key not in dateData:
dateData[key] = 1
else:
dateData[key] += 1
# Formatting the Graph
months = ["01", "02", "03", "04","05","06","07","08","09","10","11","12"]
years = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
plt.clf()
for year in sorted(yearSet):
gamesPlayed = list()
for month in months:
key = year + "-" + month
if key in dateData.keys():
gamesPlayed.append(dateData[key])
else:
gamesPlayed.append(0)
plt.plot(years, gamesPlayed, "o-", label=year)
plt.title("League of Legends Games Played Per Month for: " + summoner["name"])
plt.xlabel("Months of the Year")
plt.ylabel("Number of Games Played")
plt.legend(bbox_to_anchor=(1.05, 1),loc=2, borderaxespad=0.)
plt.savefig(filepath, bbox_inches='tight')
| true |
888d8c467c55d5279dfdfda38dfdf92f989084a7
|
Python
|
ankurtaly/Integrated-Gradients
|
/IntegratedGradients/integrated_gradients.py
|
UTF-8
| 4,902 | 3.375 | 3 |
[] |
no_license
|
import numpy as np
def integrated_gradients(
inp,
target_label_index,
predictions_and_gradients,
baseline,
steps=50):
"""Computes integrated gradients for a given network and prediction label.
Integrated gradients is a technique for attributing a deep network's
prediction to its input features. It was introduced by:
https://arxiv.org/abs/1703.01365
In addition to the integrated gradients tensor, the method also
returns some additional debugging information for sanity checking
the computation. See sanity_check_integrated_gradients for how this
information is used.
This method only applies to classification networks, i.e., networks
that predict a probability distribution across two or more class labels.
Access to the specific network is provided to the method via a
'predictions_and_gradients' function provided as argument to this method.
The function takes a batch of inputs and a label, and returns the
predicted probabilities of the label for the provided inputs, along with
gradients of the prediction with respect to the input. Such a function
should be easy to create in most deep learning frameworks.
Args:
inp: The specific input for which integrated gradients must be computed.
target_label_index: Index of the target class for which integrated gradients
must be computed.
predictions_and_gradients: This is a function that provides access to the
network's predictions and gradients. It takes the following
arguments:
- inputs: A batch of tensors of the same same shape as 'inp'. The first
dimension is the batch dimension, and rest of the dimensions coincide
with that of 'inp'.
- target_label_index: The index of the target class for which gradients
must be obtained.
and returns:
- predictions: Predicted probability distribution across all classes
for each input. It has shape <batch, num_classes> where 'batch' is the
number of inputs and num_classes is the number of classes for the model.
- gradients: Gradients of the prediction for the target class (denoted by
target_label_index) with respect to the inputs. It has the same shape
as 'inputs'.
baseline: [optional] The baseline input used in the integrated
gradients computation. If None (default), the all zero tensor with
the same shape as the input (i.e., 0*input) is used as the baseline.
The provided baseline and input must have the same shape.
steps: [optional] Number of intepolation steps between the baseline
and the input used in the integrated gradients computation. These
steps along determine the integral approximation error. By default,
steps is set to 50.
Returns:
integrated_gradients: The integrated_gradients of the prediction for the
provided prediction label to the input. It has the same shape as that of
the input.
The following output is meant to provide debug information for sanity
checking the integrated gradients computation.
See also: sanity_check_integrated_gradients
prediction_trend: The predicted probability distribution across all classes
for the various (scaled) inputs considered in computing integrated gradients.
It has shape <steps, num_classes> where 'steps' is the number of integrated
gradient steps and 'num_classes' is the number of target classes for the
model.
"""
if baseline is None:
baseline = 0*inp
assert(baseline.shape == inp.shape)
# Scale input and compute gradients.
scaled_inputs = [baseline + (float(i)/steps)*(inp-baseline) for i in range(0, steps+1)]
predictions, grads = predictions_and_gradients(scaled_inputs, target_label_index) # shapes: <steps+1>, <steps+1, inp.shape>
# Use trapezoidal rule to approximate the integral.
# See Section 4 of the following paper for an accuracy comparison between
# left, right, and trapezoidal IG approximations:
# "Computing Linear Restrictions of Neural Networks", Matthew Sotoudeh, Aditya V. Thakur
# https://arxiv.org/abs/1908.06214
grads = (grads[:-1] + grads[1:]) / 2.0
avg_grads = np.average(grads, axis=0)
integrated_gradients = (inp-baseline)*avg_grads # shape: <inp.shape>
return integrated_gradients, predictions
def random_baseline_integrated_gradients(
inp,
target_label_index,
predictions_and_gradients,
steps=50,
num_random_trials=10):
all_intgrads = []
for i in range(num_random_trials):
intgrads, prediction_trend = integrated_gradients(
inp,
target_label_index=target_label_index,
predictions_and_gradients=predictions_and_gradients,
baseline=255.0*np.random.random([224,224,3]),
steps=steps)
all_intgrads.append(intgrads)
avg_intgrads = np.average(np.array(all_intgrads), axis=0)
return avg_intgrads
| true |
fe49fe70acacca4c739a28860c6900d166d5616a
|
Python
|
sparshagarwal16/Assignment
|
/Assignment18.py
|
UTF-8
| 1,477 | 3.171875 | 3 |
[] |
no_license
|
import tkinter
from tkinter import *
import tkinter as tk
#Question 1
print("Question 1")
dict={}
for i in range(2):
name=input("Enter the name: ")
mob=int(input("Enter mobile number: "))
dict[name]=mob
r= Tk()
z=Label(r,text="DATA",width=15,bg="blue")
z.pack()
scrollbar = Scrollbar(r)
scrollbar.pack( side = RIGHT, fill = Y )
mylist = Listbox(r, yscrollcommand = scrollbar.set )
for line in dict:
mylist.insert(END, str(line))
mylist.pack( side = LEFT, fill = BOTH )
scrollbar.config( command = mylist.yview )
all_items=mylist.get(0,tkinter.END)
def poll():
z.after(200, poll)
sel = mylist.curselection()
print(sel)
if(len(sel)>0):
t=all_items[sel[0]]
z.config(text=dict[t])
poll()
print(all_items)
#Question 2
print("Question 2")
dict2={}
name1=""
name2=""
mob1=0
mob2=0
def add():
name1=input("Enter the name: ")
mob1=int(input("Enter mobile number: "))
name2=input("Enter the name: ")
mob2=int(input("Enter mobile number: "))
dict2[name1]=mob1
dict2[name2]=mob2
print(dict2)
def insert():
for line2 in dict2: #entry added to GUI
mylist.insert(END, str(line2))
mylist.pack(side=LEFT, fill=BOTH)
button=tk.Button(r, text='ADD',width=15,activebackground='#1CDDD8',activeforeground="black",bg="#1CDDD8",command=add)
button.pack()
button2=tk.Button(r, text='Insert',width=15,activebackground='#1CDDD8',activeforeground="black",bg="#1CDDD8",command=insert)
button2.pack()
mainloop()
| true |
ce2eb2a5232159594b23316331ee21f2c39e721c
|
Python
|
zhuyanxi/CarnoFinance
|
/pythonVer/main.py
|
UTF-8
| 903 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
import xlrd
CSI_300_GROWTH_INDEX_EXCEL = "csi_300_growth_index.xls"
CSI_300_VALUE_INDEX_EXCEL = "csi_300_value_index.xls"
print("hello py")
bookGrowth = xlrd.open_workbook(CSI_300_GROWTH_INDEX_EXCEL)
sheetGrowth = bookGrowth.sheet_by_index(0)
growthList = sheetGrowth.col_values(5)[1:]
# print(growthList)
bookValue = xlrd.open_workbook(CSI_300_VALUE_INDEX_EXCEL)
sheetValue = bookValue.sheet_by_index(0)
valueList = sheetValue.col_values(5)[1:]
# print(valueList)
niubiStock = [x for x in growthList if x in valueList]
print(niubiStock, len(niubiStock))
print("--------------------------------")
niubiStock1 = [x for x in growthList if x not in valueList]
print(niubiStock1, len(niubiStock1))
print("--------------------------------")
niubiStock2 = [x for x in valueList if x not in growthList]
print(niubiStock2, len(niubiStock2))
bookValue = xlrd.open_workbook(CSI_300_VALUE_INDEX_EXCEL)
| true |
04d15225faec3a4a70e7f3db4ac3ddabf78f5cde
|
Python
|
RuolinZheng08/phonetic-acoustic-word-embeddings
|
/lib/data/batch_samplers.py
|
UTF-8
| 2,611 | 2.703125 | 3 |
[] |
no_license
|
import logging as log
import random
import numpy as np
class _StatefulBatchSampler:
def __len__(self):
return len(self.batches)
def __iter__(self):
while self.iter < len(self):
batch = self.batches[self.iter]
self.iter += 1
yield batch
self.init_iter()
def state_dict(self, itr):
return {
"iter": self.iter - (itr._send_idx - itr._rcvd_idx),
"batches": np.array(self.batches)
}
def load_state_dict(self, state_dict):
self.iter = state_dict["iter"]
self.batches = state_dict["batches"].tolist()
class BatchSampler(_StatefulBatchSampler):
def __init__(self, examples, batch_size,
shuffle=False):
log.info(f" >> # examples= {len(examples)}")
log.info(f" >> batch_size= {batch_size}")
log.info(f" >> shuffle= {shuffle}")
self.examples = list(examples)
self.batch_size = batch_size
self.shuffle = shuffle
self.init_iter()
def init_iter(self):
if self.shuffle:
random.shuffle(self.examples)
self.iter = 0
self.batches = []
batch = []
for example in self.examples:
if len(batch) < self.batch_size:
batch.append(example)
else:
self.batches.append(batch)
batch = [example]
if len(batch) > 0:
self.batches.append(batch)
class PackedBatchSampler(_StatefulBatchSampler):
def __init__(self, examples, batch_size, sort_by,
variable=False, shuffle=False):
log.info(f" >> # examples= {len(examples)}")
log.info(f" >> batch_size= {batch_size}")
log.info(f" >> shuffle= {shuffle}")
log.info(f" >> sort by {sort_by}")
log.info(f" >> variable= {variable}")
self.examples = examples
self.batch_size = batch_size
def get_size(k):
return self.examples[k][sort_by]
self.get_size = get_size
self.variable = variable
self.shuffle = shuffle
self.init_iter()
def init_iter(self):
self.iter = 0
batches = []
batch = []
batch_size = 0
examples = sorted(self.examples, key=self.get_size, reverse=True)
example_size = self.get_size(examples[0]) if self.variable else 1
for example in examples:
if batch_size + example_size <= self.batch_size:
batch.append(example)
batch_size += example_size
else:
batches.append(batch)
batch = [example]
example_size = self.get_size(example) if self.variable else 1
batch_size = example_size
if len(batch) > 0:
batches.append(batch)
self.batches = batches[::-1]
if self.shuffle:
random.shuffle(self.batches)
| true |
483ea429b898a70e49dda119ca8d460329ca22e0
|
Python
|
blackglowen/BookManager
|
/books/management/commands/seed.py
|
UTF-8
| 2,056 | 2.65625 | 3 |
[
"MIT"
] |
permissive
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from faker import Faker
from books import models
from data import genres, authors
def create_authors():
for author in authors.DEFAULT_AUTHORS:
auth = models.Author(name=author['name'], country=author['country'])
auth.save()
def create_genres():
for genre in genres.DEFAULT_CATEGORIES:
cat = models.Category(name=genre)
cat.save()
def create_user():
faker = Faker()
first_name = faker.first_name()
last_name = faker.last_name()
email = '{}.{}@example.com'.format(first_name.lower(), last_name.lower())
password = 'Pwd987654321@'
username = first_name.lower() + '.' + last_name.lower()
user = User.objects.create_user(username, email, password, first_name=first_name, last_name=last_name)
user.save()
class Command(BaseCommand):
help = 'Populates the database with records'
def add_arguments(self, parser):
self.parser = parser
parser.add_argument('-u', '--users', metavar='N', type=int, help='The number of fake users to create.')
parser.add_argument('-g', '--genres', action='store_true', help='Flag to create default book genres.')
parser.add_argument('-a', '--authors', action='store_true', help='Flag to create default authors.')
def handle(self, *args, **options):
if options['users']:
for _ in range(options['users']):
create_user()
self.stdout.write(self.style.SUCCESS('Successfully created %s users' % options['users']))
if options['genres']:
create_genres()
self.stdout.write(self.style.SUCCESS('Successfully created book genres'))
if options['authors']:
create_authors()
self.stdout.write(self.style.SUCCESS('Successfully created book authors'))
if not (options['users'] or options['genres'] or options['authors']):
self.parser.print_help()
| true |
92b7f82c0b66b874f9839be4bdd804b226557e97
|
Python
|
shonenada/crawler
|
/tests/test_link_item.py
|
UTF-8
| 834 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
#-*- coding: utf-8 -*-
import unittest
from crawler.link import Link
from crawler.item import Item
class LinkItemTestCase(unittest.TestCase):
def setUp(self):
self.item = Item('img', r'(?P<img><img [^>]+?>)')
self.link = Link('movie.douban', 'http://movie.douban.com/', [self.item])
def test_register_funcs(self):
def cf(one):
print one
return one
self.link.register_funcs([cf])
self.assertTrue(cf in self.item.clean_funcs)
def test_fetch(self):
results = self.link.fetch()
douban_logo = '<img style="top: -5px; position: relative;" src="http://img3.douban.com/pics/site/icon_site_beta.gif"/>'
self.assertIn('img', results)
movie = results['img']
self.assertIn(douban_logo, [m['img'] for m in movie])
| true |
bad3b25b62280567cf51c2caff704cd4ad0a6a12
|
Python
|
mdauthentic/ETLProject-Batch
|
/src/config.py
|
UTF-8
| 400 | 2.71875 | 3 |
[] |
no_license
|
from os import path, getcwd
import json
class Config:
def __init__(self) -> None:
pass
def __get_path_from_rel(self, rel_path: str):
return path.join(getcwd(), rel_path)
def load_config(self):
config_path = self.__get_path_from_rel("config.json")
with open(config_path, 'r') as f:
config_data = json.load(f)
return config_data
| true |
e6664a43bde9dde96c1e42eab1927c6a70aaaa5d
|
Python
|
shhuan/algorithms
|
/py/codeforces/321D.py
|
UTF-8
| 2,470 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
"""
created by huash06 at 2015-07-15
"""
__author__ = 'huash06'
import os
import sys
import functools
import collections
import itertools
import math
h, q = [int(x) for x in input().split()]
def rightIndex(index, height):
res = index
for _ in range(height):
res = res * 2 + 1
return res
def cross4(l1, r1, l2, r2):
return l2 <= r1 <= r2 or l1 <= l2 <= r1
def cross2(range1, range2):
l1 = range1[0]
r1 = range1[1]
l2 = range2[0]
r2 = range2[1]
return cross4(l1, r1, l2, r2)
def merge4(l1, r1, l2, r2):
return min(l1, l2), max(l2, r2)
def merge2(range1, range2):
l1 = range1[0]
r1 = range1[1]
l2 = range2[0]
r2 = range2[1]
return merge4(l1, r1, l2, r2)
def intersection(range1, range2):
l1 = range1[0]
r1 = range1[1]
l2 = range2[0]
r2 = range2[1]
return max(l1, l2), min(r1, r2)
def difference(range1, range2):
if cross2(range1, range2):
l1 = range1[0]
r1 = range1[1]
l2 = range2[0]
r2 = range2[1]
if l1 < l2:
return l1, l2 - 1
elif r1 > r2:
return r2 + 1, r1
else:
return None
else:
return range1
def addRange(rangeList, newRange):
ar = newRange
for i, v in enumerate(rangeList):
r = difference(newRange, v)
if ar:
ar = difference(ar, v)
if not r:
return False
else:
rangeList[i] = r
if ar:
rangeList.append(ar)
return True
possible = []
impossible = []
res = None
for qi in range(q):
i, l, r, ans = [int(x) for x in input().split()]
if res:
continue
newRange = (int(math.pow(2, h - i) * l), rightIndex(r, h - i))
if ans == 0:
if not addRange(impossible, newRange):
res = "Game cheated!"
break
else:
if not addRange(possible, newRange):
res = "Game cheated!"
break
if not possible:
possible.append((int(pow(2, h - 1)), int(pow(2, h)) - 1))
print(possible)
print(impossible)
if res:
print(res)
else:
res = []
for p in possible:
v = p
for ip in impossible:
if not v:
break
v = difference(v, ip)
if v:
addRange(res, v)
count = 0
for v in res:
count += v[1] - v[0] + 1
if count == 1:
print(res[0][0])
else:
print("Data not sufficient!")
| true |
23483e674d518c890e571643ac48c6ddd3ca0f70
|
Python
|
ZwEin27/wedc-one-class-classification
|
/wedc/common/str.py
|
UTF-8
| 1,459 | 3.109375 | 3 |
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
# @Author: ZwEin
# @Date: 2016-08-09 13:52:35
# @Last Modified by: ZwEin
# @Last Modified time: 2016-08-09 13:55:11
import re
import string
def hasNumbers(inputString):
# return any(char.isdigit() for char in inputString)
return bool(re.search(r'\d', inputString))
def hasUnicode(s):
if isinstance(s, unicode):
return True
return False
def hasSpecial(s):
# unicode_char = u'aa\u2764\ufe0f\u0455\u03c9\u0454\u0454\u0442\u0454\u0455\u0442'
# unicode_char = u'ss'
encoded = s.encode('utf-8')
reg = re.compile("^[A-Za-z"+string.punctuation+"]+$")
if reg.search(encoded):
return False
return True
def hasPunctuation(s):
reg = re.compile("["+string.punctuation+"]+")
if reg.search(s):
return True
return False
def hasHTMLTag(s):
reg = re.compile("<\w+>")
if reg.search(s):
return True
return False
def whatisthis(s):
if isinstance(s, str):
print "ordinary string"
elif isinstance(s, unicode):
print "unicode string"
else:
print "not a string"
# print hasUnicode(u'\u2113')
# print string.punctuation
# s = '<p>Source: <a rel="nofollow" target="_blank" href="http://www.jobs2careers.com/click.php?id=1834227799.96&job_loc=Santa+Maria%2CCA">http://www.jobs2careers.com/click.php?id=1834227799.96&job_loc=Santa+Maria%2CCA</a></p>'
# print hasHTMLTag(s)
# print hasNumbers('live')
| true |
89ea8c5fd4225ed700d2926bfb75f3e21b8b2cf6
|
Python
|
jmstudyacc/python_practice
|
/POP1-Exam_Revision/repl_problems/session_4/matrix_max_index.py
|
UTF-8
| 1,063 | 3.96875 | 4 |
[] |
no_license
|
# M = matrix of numbers, list of lists
# m = number of rows in M
# n = number of columns in M
def matrix_max_index(M, m, n):
# init the var to hold the current max int from matrix
ele_max = 0
idx = 0
# iterate over the matrix
for i in range(0, m):
# if the value of ele_max is less than the max value of the current iteration (a list, i, in the matrix
if ele_max < max(M[i]):
# set ele_max to equal this value
ele_max = max(M[i])
# record the index it happened at
idx = i
# calculate the position IN the list, i, to return
idx_pos = M[idx].index(ele_max)
# return a tuple containing the index and the list index of the max value of the matrix
return idx, idx_pos
M = [[0, 3, 2, 4], [2, 3, 5, 5], [5, 1, 2, 3]]
print(matrix_max_index(M, 3, 4))
M2 = [[1]]
print(matrix_max_index(M2, 1, 1))
M3 = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]]
print(matrix_max_index(M3, 3, 5))
M4 = [[1], [2], [3], [2], [1], [2]]
print(matrix_max_index(M4, 6, 1))
| true |
bc6df96d75dd8988f9ba02619e85b6bd99254a17
|
Python
|
Panda3D-public-projects-archive/sfsu-multiplayer-game-dev-2011
|
/branches/johanbranch/clientTeam/src/net/ServerResponseTable.py
|
UTF-8
| 1,143 | 2.640625 | 3 |
[] |
no_license
|
from common.Constants import Constants
from net.response.ResponseLogin import ResponseLogin
from net.response.ResponseRegister import ResponseRegister
class ServerResponseTable:
responseTable = {}
@staticmethod
def init():
"""Initialize the response table."""
ServerResponseTable.add(Constants.SMSG_AUTH, 'ResponseLogin')
ServerResponseTable.add(Constants.SMSG_REGISTER, 'ResponseRegister')
@staticmethod
def add(constant, name):
"""Map a numeric response code with the name of an existing response module."""
if name in globals():
ServerResponseTable.responseTable[constant] = name
else:
print 'Add Response Error: No module named ' + str(name)
@staticmethod
def get(responseCode):
"""Retrieve an instance of the corresponding response."""
serverResponse = None
if responseCode in ServerResponseTable.responseTable:
serverResponse = globals()[ServerResponseTable.responseTable[responseCode]]()
else:
print 'Bad Response Code: ' + str(responseCode)
return serverResponse
| true |
b2072e435d9974fe95f9482d2aa04f09dd563208
|
Python
|
Breast-Cancer-Team/Final-Project
|
/Final-Project/logistic_regression.py
|
UTF-8
| 1,526 | 3.328125 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Import cleaning and splitting functions
from clean_split_data import clean_data
from clean_split_data import split_data
# Import pandas and plotting libraries
import pandas as pd
# Import Scikit-Learn library for the regression models and confusion matrix
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
# ### Data
data = pd.read_csv('data/data.csv')
data = clean_data(data)
X_train, X_test, y_train, y_test = split_data(data)
# ### Classifier
clf = LogisticRegression(solver="saga", max_iter=5000)
clf.fit(X_train, y_train)
# ### Optimized Logistic Regression Predictor
def feature_names():
'''
Returns array of input features of
best performing backwards stepwise selection test.
'''
return ['radius_mean', 'texture_mean', 'perimeter_mean',
'area_mean', 'smoothness_mean',
'compactness_mean', 'concavity_mean',
'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean']
# User input to predict diagnosis
def predict(test_data):
'''
Takes test data and uses classifier to predict boolean output.
'''
X = data[feature_names()]
y = data.diagnosis
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
logistic_reg = LogisticRegression(solver="saga", max_iter=5000)
logistic_reg.fit(X_train, y_train)
y_pred = logistic_reg.predict(test_data)
return y_pred
| true |
6fada24729ca97253cf05cdd0aa3c34279a81ad7
|
Python
|
yerlantemir/HandwrittenLetterRecognition
|
/script.py
|
UTF-8
| 3,334 | 2.578125 | 3 |
[] |
no_license
|
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
train_path = r'./train_set/'
test_path = r'./test_set/'
train_batches = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True).flow_from_directory(train_path,target_size=(28,28),
classes='ә,і,ң,ғ,ү,ұ,қ,ө,һ'.split(','))
test_batches = ImageDataGenerator(rescale=1./255).flow_from_directory(test_path,target_size=(28,28),
classes='ә,і,ң,ғ,ү,ұ,қ,ө,һ'.split(','))
def rgb2gray(rgb):
return np.dot(rgb[...,:], [0.2989, 0.5870, 0.1140])
data_list_x = []
data_list_y = []
batch_index = 0
data_list_x_1 = []
data_list_y_1 = []
while batch_index <= test_batches.batch_index:
x,y = test_batches.next()
for i in range(len(x)):
img_data = rgb2gray(x[i])
data_list_x_1.append(img_data)
for k in range(y.shape[1]):
if y[i][k] == 1:
data_list_y_1.append(k)
batch_index = batch_index + 1
batch_index = 0
while batch_index <= train_batches.batch_index:
x,y = train_batches.next()
for i in range(len(x)):
img_data = rgb2gray(x[i])
data_list_x.append(img_data)
for k in range(y.shape[1]):
if y[i][k] == 1:
data_list_y.append(k)
batch_index = batch_index + 1
train_x = np.array(data_list_x).reshape(len(data_list_x),28,28,1).astype('float32')
train_y = np.array(data_list_y).astype('int64')
test_x = np.array(data_list_x_1).reshape(len(data_list_x_1),28,28,1).astype('float32')
test_y = np.array(data_list_y_1).astype('int64')
model = Sequential()
input_shape = (28, 28,1)
model.add(Conv2D(32, kernel_size=(5,5), input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(4,4)))
model.add(Flatten()) # Flattening the 2D arrays for fully connected layers
model.add(Dense(128, activation = tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(9,activation=tf.nn.softmax))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x=train_x,y=train_y, epochs=10)
scores = model.evaluate(test_x, test_y)
print(scores)
print(model.metrics_names)
#model.save('model.h5')
#convolutional
#model_json = model.to_json()
#with open ('model2.json','w') as json_file:
# json_file.write(model_json)
#model.save_weights('model2.h5')
#print('saved model to disk')
#
#def plots(ims,figsize=(8,6),rows = 1,interp = False , titles=None):
# if type(ims[0]) is np.ndarray:
# ims = np.array(ims).astype(np.uint8)
# if(ims.shape[-1] != 3):
# ims = ims.transpose(0,2,3,1)
# f = plt.figure(figsize=figsize)
# cols = len(ims)//rows if len(ims) % 2 == 0 else len(ims)//rows+1
# for i in range(len(ims)):
# sp = f.add_subplot(rows,cols,i+1)
# sp.axis('Off')
# if titles is not None:
# sp.set_title(titles[i],fontsize=16)
# plt.imshow(ims[i],interpolation=None if interp else 'none')
| true |
c0258edd2eb883b3d849c2942862c7d80536412a
|
Python
|
1oglop1/rst2text
|
/src/rst2text/elements.py
|
UTF-8
| 11,927 | 3.265625 | 3 |
[
"MIT"
] |
permissive
|
"""
Extracted from sphinx.writers.text
"""
import math
import re
import textwrap
from itertools import chain, groupby
from typing import cast
from docutils import writers
from docutils.utils import column_width
from rst2text import MAXWIDTH
class Cell:
"""Represents a cell in a table.
It can span on multiple columns or on multiple lines.
"""
def __init__(self, text="", rowspan=1, colspan=1):
self.text = text
self.wrapped = [] # type: List[str]
self.rowspan = rowspan
self.colspan = colspan
self.col = None
self.row = None
def __repr__(self):
return "<Cell {!r} {}v{}/{}>{}>".format(
self.text, self.row, self.rowspan, self.col, self.colspan
)
def __hash__(self):
return hash((self.col, self.row))
def wrap(self, width):
self.wrapped = my_wrap(self.text, width)
class Table:
"""Represents a table, handling cells that can span on multiple lines
or rows, like::
+-----------+-----+
| AAA | BBB |
+-----+-----+ |
| | XXX | |
| +-----+-----+
| DDD | CCC |
+-----+-----------+
This class can be used in two ways:
- Either with absolute positions: call ``table[line, col] = Cell(...)``,
this overwrite an existing cell if any.
- Either with relative positions: call the ``add_row()`` and
``add_cell(Cell(...))`` as needed.
Cell spanning on multiple rows or multiple columns (having a
colspan or rowspan greater than one) are automatically referenced
by all the table cells they covers. This is a usefull
representation as we can simply check ``if self[x, y] is self[x,
y+1]`` to recognize a rowspan.
Colwidth is not automatically computed, it has to be given, either
at construction time, either during the table construction.
Example usage::
table = Table([6, 6])
table.add_cell(Cell("foo"))
table.add_cell(Cell("bar"))
table.set_separator()
table.add_row()
table.add_cell(Cell("FOO"))
table.add_cell(Cell("BAR"))
print(table)
+--------+--------+
| foo | bar |
|========|========|
| FOO | BAR |
+--------+--------+
"""
def __init__(self, colwidth=None):
self.lines = [] # type: List[List[Cell]]
self.separator = 0
self.colwidth = colwidth if colwidth is not None else [] # type: List[int]
self.current_line = 0
self.current_col = 0
def add_row(self):
"""Add a row to the table, to use with ``add_cell()``. It is not needed
to call ``add_row()`` before the first ``add_cell()``.
"""
self.current_line += 1
self.current_col = 0
def set_separator(self):
"""Sets the separator below the current line.
"""
self.separator = len(self.lines)
def add_cell(self, cell):
"""Add a cell to the current line, to use with ``add_row()``. To add
a cell spanning on multiple lines or rows, simply set the
``cell.colspan`` or ``cell.rowspan`` BEFORE inserting it to
the table.
"""
while self[self.current_line, self.current_col]:
self.current_col += 1
self[self.current_line, self.current_col] = cell
self.current_col += cell.colspan
def __getitem__(self, pos):
line, col = pos
self._ensure_has_line(line + 1)
self._ensure_has_column(col + 1)
return self.lines[line][col]
def __setitem__(self, pos, cell):
line, col = pos
self._ensure_has_line(line + cell.rowspan)
self._ensure_has_column(col + cell.colspan)
for dline in range(cell.rowspan):
for dcol in range(cell.colspan):
self.lines[line + dline][col + dcol] = cell
cell.row = line
cell.col = col
def _ensure_has_line(self, line):
while len(self.lines) < line:
self.lines.append([])
def _ensure_has_column(self, col):
for line in self.lines:
while len(line) < col:
line.append(None)
def __repr__(self):
return "\n".join(repr(line) for line in self.lines)
def cell_width(self, cell, source):
"""Give the cell width, according to the given source (either
``self.colwidth`` or ``self.measured_widths``).
This take into account cells spanning on multiple columns.
"""
width = 0
for i in range(self[cell.row, cell.col].colspan):
width += source[cell.col + i]
return width + (cell.colspan - 1) * 3
@property
def cells(self):
seen = set() # type: Set[Cell]
for lineno, line in enumerate(self.lines):
for colno, cell in enumerate(line):
if cell and cell not in seen:
yield cell
seen.add(cell)
def rewrap(self):
"""Call ``cell.wrap()`` on all cells, and measure each column width
after wrapping (result written in ``self.measured_widths``).
"""
self.measured_widths = self.colwidth[:]
for cell in self.cells:
cell.wrap(width=self.cell_width(cell, self.colwidth))
if not cell.wrapped:
continue
width = math.ceil(max(column_width(x) for x in cell.wrapped) / cell.colspan)
for col in range(cell.col, cell.col + cell.colspan):
self.measured_widths[col] = max(self.measured_widths[col], width)
def physical_lines_for_line(self, line):
"""From a given line, compute the number of physical lines it spans
due to text wrapping.
"""
physical_lines = 1
for cell in line:
physical_lines = max(physical_lines, len(cell.wrapped))
return physical_lines
def __str__(self):
out = []
self.rewrap()
def writesep(char="-", lineno=None):
# type: (str, Optional[int]) -> str
"""Called on the line *before* lineno.
Called with no *lineno* for the last sep.
"""
out = [] # type: List[str]
for colno, width in enumerate(self.measured_widths):
if (
lineno is not None
and lineno > 0
and self[lineno, colno] is self[lineno - 1, colno]
):
out.append(" " * (width + 2))
else:
out.append(char * (width + 2))
head = "+" if out[0][0] == "-" else "|"
tail = "+" if out[-1][0] == "-" else "|"
glue = [
"+" if left[0] == "-" or right[0] == "-" else "|"
for left, right in zip(out, out[1:])
]
glue.append(tail)
return head + "".join(chain(*zip(out, glue)))
for lineno, line in enumerate(self.lines):
if self.separator and lineno == self.separator:
out.append(writesep("=", lineno))
else:
out.append(writesep("-", lineno))
for physical_line in range(self.physical_lines_for_line(line)):
linestr = ["|"]
for colno, cell in enumerate(line):
if cell.col != colno:
continue
if lineno != cell.row:
physical_text = ""
elif physical_line >= len(cell.wrapped):
physical_text = ""
else:
physical_text = cell.wrapped[physical_line]
adjust_len = len(physical_text) - column_width(physical_text)
linestr.append(
" "
+ physical_text.ljust(
self.cell_width(cell, self.measured_widths) + 1 + adjust_len
)
+ "|"
)
out.append("".join(linestr))
out.append(writesep("-"))
return "\n".join(out)
class TextWrapper(textwrap.TextWrapper):
"""Custom subclass that uses a different word separator regex."""
wordsep_re = re.compile(
r"(\s+|" # any whitespace
r"(?<=\s)(?::[a-z-]+:)?`\S+|" # interpreted text start
r"[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|" # hyphenated words
r"(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))"
) # em-dash
def _wrap_chunks(self, chunks):
# type: (List[str]) -> List[str]
"""_wrap_chunks(chunks : [string]) -> [string]
The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
lines = [] # type: List[str]
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - column_width(indent)
if self.drop_whitespace and chunks[-1].strip() == "" and lines:
del chunks[-1]
while chunks:
l = column_width(chunks[-1])
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
else:
break
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if self.drop_whitespace and cur_line and cur_line[-1].strip() == "":
del cur_line[-1]
if cur_line:
lines.append(indent + "".join(cur_line))
return lines
def _break_word(self, word, space_left):
# type: (str, int) -> Tuple[str, str]
"""_break_word(word : string, space_left : int) -> (string, string)
Break line by unicode width instead of len(word).
"""
total = 0
for i, c in enumerate(word):
total += column_width(c)
if total > space_left:
return word[: i - 1], word[i - 1 :]
return word, ""
def _split(self, text):
# type: (str) -> List[str]
"""_split(text : string) -> [string]
Override original method that only split by 'wordsep_re'.
This '_split' split wide-characters into chunk by one character.
"""
def split(t):
# type: (str) -> List[str]
return super(TextWrapper, self)._split(t)
chunks = [] # type: List[str]
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
chunks.extend(split("".join(g)))
else:
chunks.extend(list(g))
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
# type: (List[str], List[str], int, int) -> None
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Override original method for using self._break_word() instead of slice.
"""
space_left = max(width - cur_len, 1)
if self.break_long_words:
l, r = self._break_word(reversed_chunks[-1], space_left)
cur_line.append(l)
reversed_chunks[-1] = r
elif not cur_line:
cur_line.append(reversed_chunks.pop())
def my_wrap(text, width=MAXWIDTH, **kwargs):
# type: (str, int, Any) -> List[str]
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
| true |
89fe4bb55154aa2fc662cbf574ae67cabda9bd42
|
Python
|
Noxy3301/AtCoder
|
/OtherContest/dp/dp_a.py
|
UTF-8
| 235 | 2.96875 | 3 |
[] |
no_license
|
n = int(input())
h = tuple(map(int, input().split()))
dp = [0]
for i in range(1,n):
if i == 1:
dp.append(abs(h[i]-h[i-1]))
else:
dp.append(min(dp[-1]+abs(h[i]-h[i-1]), dp[-2]+abs(h[i]-h[i-2])))
print(dp[-1])
| true |
7b58820f730f011f4cb285dfb5c219b5982dcc69
|
Python
|
snaress/studio
|
/lib/system/seqList.py
|
UTF-8
| 2,791 | 2.96875 | 3 |
[] |
no_license
|
import os
class SeqLs(object):
""" List given directory with a sequence compact view
ex: ima_1.[001:005:1].txt ([start:stop:step])
:param dir: Directory to list
:type dir: str """
def __init__(self, dir):
if not os.path.exists(dir):
raise IOError, "!!! ERROR: Directory not found: %s !!!" % dir
self.dir = dir
self.seqList = []
self.seqDict = {}
self.dirList = os.listdir(self.dir)
self._exec()
def _exec(self):
""" Launch listing commands """
self.parseDir()
self.printResult()
def parseDir(self):
""" Parse given directory """
for item in self.dirList:
#-- Item is folder --#
if os.path.isdir(item):
self.seqList.append(item.upper())
#-- Item is file --#
elif os.path.isfile(item):
#-- Seq type : name.index.ext --#
if len(item.split('.')) == 3 and item.split('.')[1].isdigit():
name = item.split('.')[0]
index = item.split('.')[1]
ext = item.split('.')[2]
label = '%s/%s' % (name, ext)
if not label in self.seqDict.keys():
self.seqDict[label] = [index]
else:
self.seqDict[label].append(index)
#-- Seq type : other --#
else:
self.seqList.append(item)
def printResult(self):
""" Print sequence listing """
lines = []
for k in sorted(self.seqDict.keys()):
first = self.seqDict[k][0]
last = self.seqDict[k][-1]
if len(self.seqDict[k]) == ((int(last) - int(first)) + 1):
step = 1
else:
sec = self.seqDict[k][1]
step = (int(sec) - int(first))
for n, ind in enumerate(self.seqDict[k]):
if n > 0:
prevInd = self.seqDict[k][n-1]
if not (int(ind) - int(prevInd)) == step:
step = None
break
if step is None:
lines.append('%s.[%s...%s].%s' % (k.split('/')[0], self.seqDict[k][0],
self.seqDict[k][-1], k.split('/')[-1]))
else:
lines.append('%s.[%s:%s:%s].%s' % (k.split('/')[0], self.seqDict[k][0], self.seqDict[k][-1],
step, k.split('/')[-1]))
self.seqList.extend(lines)
for l in sorted(lines):
print l
if __name__ == '__main__':
currentDir = os.getcwd()
sls = SeqLs(currentDir)
| true |
09dca82f0d4b5de3d2ef41cc5384c37993d0e016
|
Python
|
rcburnet/PHYS-437A
|
/Assignment_6/Ryans_list/315_primaries_compare_to_274.py
|
UTF-8
| 1,834 | 3.21875 | 3 |
[] |
no_license
|
import numpy as np
from sklearn.neighbors import BallTree
#This script will read my list of 315 primaries and compare it to Ryan's list
# to make sure all of Ryan's primaries are in my list (which they are).
# Identical to script in Assignment_3.
#read files
file_284_primary = open('CasJobs_315_primaries_in_SDSS.txt')
list_284_primary = file_284_primary.readlines()
file_284_primary.close()
file_274_primary = open('Ryans_list_of_274_primaries.txt')
list_274_primary = file_274_primary.readlines()
file_274_primary.close()
#organize lists
for i in range(len(list_284_primary)):
list_284_primary[i] = list_284_primary[i].split(',')
for i in range(len(list_274_primary)):
list_274_primary[i] = list_274_primary[i].split('\t')
#find extras (find every entry in my list that isn't in Ryan's list)
length = 0
for i in range(len(list_284_primary)):
count = 0
for j in range(len(list_274_primary)):
if list_284_primary[i][0] == list_274_primary[j][0]:
count += 1
if count == 0:
print list_284_primary[i]
length += 1
print length
#I get back 23 primaries that were in my list but not Ryan's. 23 extra, not 10.
#Compare Ryan's list to mine. Which primaries are in Ryan's list, but not my list.
for i in range(len(list_274_primary)):
count = 0
for j in range(len(list_284_primary)):
if list_274_primary[i][0] == list_284_primary[j][0]:
count += 1
#if count == 0:
# print list_274_primary[i]
#I get back 13 primaries that were in Ryan's list but not mine. 13 primaries
# I cut that Ryan did not.
#This means I cut 13 that I shouldn't have cut, leaving me with 297 primaries,
# not 284 primaries after applying the second cut, and 23 primaries extra which
# are in badly masked regions or regions of incomplete coverage (297 - 23 = 274)
| true |
4b52c8cc5f884e0e931437bb55886b4edfedf835
|
Python
|
elimoss/broad_malaria
|
/snp_call_mods/util_cmd.py
|
UTF-8
| 6,149 | 2.703125 | 3 |
[] |
no_license
|
# util_cmd.py - This gives a main() function that serves as a nice wrapper
# around other commands and presents the ability to serve up multiple
# command-line functions from a single python script.
#
# requires python >= 2.5
#
# dpark@broadinstitute.org
# $Id: util_cmd.py 7351 2013-01-22 22:53:06Z dpark $
import os, tempfile, sys, shutil, optparse, logging
log = logging.getLogger()
tmpDir = None
def setup_logger(log_level):
loglevel = getattr(logging, log_level.upper(), None)
assert loglevel, "unrecognized log level: %s" % log_level
log.setLevel(loglevel)
h = logging.StreamHandler()
h.setFormatter(logging.Formatter("%(asctime)s - %(module)s:%(lineno)d:%(funcName)s - %(levelname)s - %(message)s"))
log.addHandler(h)
def script_name():
return sys.argv[0].split('/')[-1].rsplit('.',1)[0]
def common_opts(parser, optlist=['tmpDir', 'loglevel']):
for opt in optlist:
if opt=='loglevel':
parser.add_option("--loglevel", dest="loglevel", type='choice',
help="Verboseness of output. [default: %default]",
default='DEBUG',
choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL','EXCEPTION'])
elif opt=='tmpDir':
parser.add_option("--tmpDir", dest="tmpDir", type='string',
help="Directory for temp files. [default: %default]",
default=tmpDir)
elif opt=='tmpDirKeep':
parser.add_option("--tmpDirKeep",
action="store_true", dest="tmpDirKeep",
help="If set, do not delete the tmpDir if an exception occurs while running.",
default=False)
else:
raise Exception("unrecognized option %s" % opt)
return parser
def main(commands, version, tool_paths, description=''):
''' commands: a list of 4-tuples containing the following:
1. name of command (string, no whitespace)
2. method to call that takes two arguments, args (a list of required
arguments) and options (an optparse construct), and returns
the desired exit code
3. method to call that returns an optparse parser. we provide
the name of the command and a version string for convenience.
4. the number of required arguments for this command. If None,
we allow any number.
If commands contains exactly one member and the name of the
only command is None, then we get rid of the whole multi-command
thing and just present the options for that one function.
version: the version string to provide to the parser methods of each command
tool_paths: a dict. we will set the 'tmpDir' value so that your
commands will have access to a suggested temp directory
description: a long string to present as a description of your script
as a whole if the script is run with no arguments
log_level: the logging log level to set the log instance to
'''
tool_paths['tmpDir'] = find_tmpDir()
tmpDir = tool_paths['tmpDir']
cmdlist = [x[0] for x in commands]
commands = dict([(x[0],x[1:]) for x in commands])
if len(cmdlist)==1 and cmdlist[0]==None:
# only one (nameless) command in this script, simplify
command = None
parser = commands[command][1]('', version)
(options, args) = parser.parse_args()
else:
# multiple commands available
if len(sys.argv) <= 1:
print "Usage: python %s commandname options" % sys.argv[0]
if description.strip():
print description
print "\ncommands:"
for cmd in cmdlist:
print "\t%s" % cmd
print "\nRun a command with no options for help on that command."
return
command = sys.argv[1]
assert command in commands, "command '%s' not recognized" % command
parser = commands[command][1](command, version)
(options, args) = parser.parse_args(sys.argv[2:])
if len(args)==0:
parser.print_help()
return
assert commands[command][2]==None or len(args)==commands[command][2], "%d required arguments, got %d." % (commands[command][2], len(args))
setup_logger(not hasattr(options, 'loglevel') and 'DEBUG' or options.loglevel)
log.info("version: " + parser.get_version())
log.debug("command line parameters (including implicit defaults): %s %s" % (
' '.join(args),
' '.join(
["%s=%s" % (o.get_opt_string(), vars(options)[o.dest])
for o in parser.option_list if o.dest!=None])))
## this is so that ajb073 can run R and see emma (assuming we're not on local)
#RLibs = '/home/unix/dpark/.RLibs' -- eh, we don't run this much from calhoun anymore anyway
#if os.access(RLibs, os.F_OK):
# os.environ['R_LIBS'] = RLibs
if hasattr(options, 'tmpDir'):
''' If this command has a tmpDir option, use that as a base directory
and create a subdirectory within it which we will then destroy at
the end of execution.
'''
proposed_dir = 'tmp-%s-%s' % (script_name(),command!=None and command or '')
if 'LSB_JOBID' in os.environ:
proposed_dir = 'tmp-%s-%s-%s-%s' % (script_name(),command,os.environ['LSB_JOBID'],os.environ['LSB_JOBINDEX'])
tempfile.tempdir = tempfile.mkdtemp(prefix='%s-'%proposed_dir, dir=options.tmpDir)
log.debug("using tempDir: %s" % tempfile.tempdir)
os.environ['TMPDIR'] = tempfile.tempdir # this is for running R
try:
ret = commands[command][0](args, options)
except:
if hasattr(options, 'tmpDirKeep') and options.tmpDirKeep and not (tempfile.tempdir.startswith('/tmp') or tempfile.tempdir.startswith('/local')):
log.exception("Exception occurred while running %s, saving tmpDir at %s" % (command, tempfile.tempdir))
else:
shutil.rmtree(tempfile.tempdir)
raise
else:
shutil.rmtree(tempfile.tempdir)
return ret
else:
# otherwise just run the command
return commands[command][0](args, options)
def find_tmpDir():
''' This provides a suggested base directory for a temp dir for use in your
optparse-based tmpDir option.
'''
tmpdir = '/tmp'
if os.access('/local/scratch', os.X_OK | os.W_OK | os.R_OK):
tmpdir = '/local/scratch'
if 'LSB_JOBID' in os.environ:
# this directory often exists for LSF jobs, but not always.
# for example, if the job is part of a job array, this directory is called
# something unpredictable and unfindable, so just use /local/scratch
proposed_dir = '/local/scratch/%s.tmpdir' % os.environ['LSB_JOBID']
if os.access(proposed_dir, os.X_OK | os.W_OK | os.R_OK):
tmpdir = proposed_dir
return tmpdir
| true |
a028b2814142246d733143a223d8f9770a76dfcc
|
Python
|
secreter/QA
|
/offline/answerTheQuestion.py
|
UTF-8
| 986 | 2.515625 | 3 |
[] |
no_license
|
# 进行图匹配找到答案
from getRelFromN import getTriple
import json
import requests
# 根据谓词短语查询到patternid
fRelT=open('./txt/dist/my/relT.txt','r',encoding='utf-8')
relT=json.load(fRelT)
# 根据patternid查询谓词路径path
fPaths=open('./txt/dist/my/paths_tf-idf.txt','r',encoding='utf-8')
paths=json.load(fPaths)
# sents="where does Aaron Kemps come from?"
sents="what is Hurricane Joe?"
tu=getTriple(sents)
if tu ==None:
print("tu is none!")
exit()
sub,rel,obj=tu
print(rel)
print(relT[rel])
# 多个数组合并
pathArr=[]
for pid in relT[rel]:
if pid in paths:
pathArr+=paths[pid]
if len(pathArr)>3:
# 排序并取top-3
pathArr=sorted(pathArr,key=(lambda x:x[1]), reverse=True)[:3]
else:
pathArr=sorted(pathArr,key=(lambda x:x[1]), reverse=True)
print(pathArr)
url='http://localhost:5003/rdf?v='+sub+'&e='+pathArr[0][0]
req = requests.get(url)
data=json.loads(req.text)
answer=data['pointTo'].replace('resource/','')
print(sents)
print(answer)
| true |
49118b0ce721fb878cb7e853391710aaccf21e72
|
Python
|
abdibogor/Thenewboston
|
/03_Software Engineering/004_Python Reverse Shell/011_Selecting a Target/server.py
|
UTF-8
| 2,894 | 2.90625 | 3 |
[] |
no_license
|
import socket
import threading
import sys
from queue import Queue
NUMBER_OF_THREADS = 2
JOB_NUMBER = [1, 2]
queue = Queue()
all_connections = []
all_addresses = []
# create socket (allows two computers to connect)
def socket_create():
try:
global host
global port
global s
host = ''
port = 9999
s = socket.socket()
except socket.error as msg:
print("Socket creation error: " + str(msg))
#2_Binding the socket and listening for Connections
# Bind socket to port and wait for connection from client
def socket_bind():
try:
global host
global port
global s
print("Binding socket to port: " + str(port))
s.bind((host, port))
s.listen(5)
except socket.error as msg:
print("Socket binding error: " + str(msg))
time.sleep(5)
socket_bind()
# Accept connect from multiple clients and save to list
def accept_connections():
for c in all_connections:
c.close()
del all_connections[:]
del all_addresses[:]
while 1:
try:
conn, address = s.accept()
conn.setblocking(1)
all_connections.append(conn)
all_addresses.append(address)
print("\nConnection has been esthablished: " + address[0])
except:
print("Error accepting connections")
#9_Creating a custom Interactive Shell
# Interactive prompt for sending commands remotely
def start_turtle():
while True:
cmd = input('turle> ')
if == 'list':
list_connections()
elif 'select' in cmd:
conn = get_target(cmd)
if conn is not None:
send_target_commands(conn)
else:
print("Command not recognized")
#10_Displaying All Current Connections
# Displays all current connections
def list_connections(self):
results = ''
for i, conn in enumerate(self.all_connections):
try:
conn.send(str.encode(' '))
conn.recv(20480)
except:
del self.all_connections[i]
del self.all_addresses[i]
continue
results += str(i) + ' ' + str(self.all_addresses[i][0]) + ' ' + str(
self.all_addresses[i][1]) + ' ' + str(self.all_addresses[i][2]) + '\n'
print('----- Clients -----' + '\n' + results)
return
#11_Selecting a Target
#Select a target client
"""...
Select target client
:param cmd:
"""
def get_target(self, cmd):
target = cmd.split(' ')[-1]
try:
target = int(target)
except:
print('Client index should be an integer')
return None, None
try:
conn = self.all_connections[target]
except IndexError:
print('Not a valid selection')
return None, None
| true |
d9d0ddca41cb7fda6786175790dadd6534c6a57f
|
Python
|
okomeshino/python-practice
|
/applications/class_method.py
|
UTF-8
| 848 | 3.765625 | 4 |
[] |
no_license
|
# encoding:utf-8
import datetime
class TestClass:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
# クラスメソッド
@classmethod
def sample_class_method(cls, date_diff=0):
today = datetime.date.today()
d = today + datetime.timedelta(date_diff)
return cls(d.year, d.month, d.day)
# インスタンス化しないで呼び出し
test_class_1 = TestClass.sample_class_method()
print(test_class_1.year, test_class_1.month, test_class_1.day)
# インスタンス化しないで呼び出し
test_class_2 = TestClass.sample_class_method(-10)
print(test_class_2.year, test_class_2.month, test_class_2.day)
# 通常のインスタンス
test_class_3 = TestClass(2000, 1, 1)
print(test_class_3.year, test_class_3.month, test_class_3.day)
| true |
5da744fc1c2da4557980f9b55dc650b9208390ec
|
Python
|
N2BBrasil/text-processing
|
/text_processing/pos_processing/test_correct_text.py
|
UTF-8
| 322 | 2.75 | 3 |
[
"MIT"
] |
permissive
|
from .correct_text import CorrectText
def test_texts(texts):
for incorrect, correct in texts:
print(incorrect, correct)
assert CorrectText().transform(incorrect)==correct
def test_sent_tokenizer():
assert 'Olá Como Vai' == CorrectText().captalize(
'olá como vai', lambda _: _.split(' ')
)
| true |
42b86c19ac74b3f8557b248a721031bb5a5d5783
|
Python
|
JahouNyan/learningpython
|
/wikipediaextract.py
|
UTF-8
| 933 | 3.71875 | 4 |
[] |
no_license
|
#Import the requests and json libraries
import requests
import json
#Ask the user for an article and strip it and replaces the spaces with underscores (_)
article = input("What wikipedia article do you want? ")
article = article.strip().replace(" ", "_")
#Format the API endpoint with the article
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{article}"
print(article)
#Use requests.get() to get the data
r = requests.get(url)
print(r)
#Check if we got a 200 status code, otherwise abort the program
if r.status_code is 200:
print("ok")
else:
print ("error")
exit()
#Display the title of the article, description and extract
lines = r.text
lines = json.loads(lines)
title = lines["title"]
print(title)
if "description" not in lines:
print ("No Description")
else:
print(f"The description on wikipedia is: {'description'}")
extract = lines["extract"]
print(f"The extract is: {extract}")
| true |
b4814c270ee40d26f08ed7824c142ff402bd13ce
|
Python
|
mikecasey93/CSSI-Lucky7
|
/main.py
|
UTF-8
| 5,937 | 2.671875 | 3 |
[] |
no_license
|
import webapp2
import os
import random
import jinja2
import datetime
from database import seed_data
from app_models import Lottery
jinja_current_dir = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class DisplayHandler(webapp2.RequestHandler):
def get(self):
start_template = jinja_current_dir.get_template("templates/WelcomePage.html")
self.response.write(start_template.render())
#Verifies the user's age
def post(self):
user_age = self.request.get('Users-age')
if user_age < 18:
self.response.write("Sorry you cannot use our site")
else:
self.response.write("Welcome to our site we hope you enjoy")
class NumberInputHandler(webapp2.RequestHandler):
def get(self):
start_template = jinja_current_dir.get_template("templates/select.html")
self.response.write(start_template.render())
def post(self):
number_template = jinja_current_dir.get_template("templates/displaynumbers.html")
n1 = self.request.get('n1')
n2 = self.request.get('n2')
n3 = self.request.get('n3')
n4 = self.request.get('n4')
n5 = self.request.get('n5')
n6 = self.request.get('n6')
numDict = {"n1":n1, "n2":n2, "n3":n3, "n4":n4, "n5":n5, "n6":n6}
userList = []
userList.append(n1)
userList.append(n2)
userList.append(n3)
userList.append(n4)
userList.append(n5)
userList.append(n6)
self.response.write(number_template.render(numDict))
class OptionHandler(webapp2.RequestHandler):
def get(self):
start_template = jinja_current_dir.get_template("templates/option.html")
self.response.write(start_template.render())
class LoadPage(webapp2.RequestHandler):
def get(self):
seed_data()
#t = the_jinja_env.get_template('/templates/loader.html')
self.response.write("done")
#def post(self):
class RandomHandler(webapp2.RequestHandler):
def get(self):
start_template = jinja_current_dir.get_template("templates/random.html")
self.response.write(start_template.render())
def post(self):
winningNumber = []
newList = []
userList = []
for i in range(1,60):
winningNumber.append(i)
for j in range(1,7):
index = random.randint(0,len(winningNumber)-1)
newList.append(winningNumber[index])
winningNumber.pop(index)
winNumDict = {"wn1":newList[0], "wn2":newList[1], "wn3":newList[2], "wn4":newList[3], "wn5":newList[4], "wn6":newList[5]}
start_template = jinja_current_dir.get_template("templates/randomdisplay.html")
n1 = self.request.get('n1')
n2 = self.request.get('n2')
n3 = self.request.get('n3')
n4 = self.request.get('n4')
n5 = self.request.get('n5')
n6 = self.request.get('n6')
numDict = {"n1":n1, "n2":n2, "n3":n3, "n4":n4, "n5":n5, "n6":n6}
userList.append(int(n1))
userList.append(int(n2))
userList.append(int(n3))
userList.append(int(n4))
userList.append(int(n5))
userList.append(int(n6))
for l in range(len(userList)):
print userList[l], " ",userList[l] in winNumDict.values(),winNumDict.values()
print type(userList[l]), type(winNumDict.values()[0])
if userList[l] in winNumDict.values():
userList[l] = (int(userList[l]),"cl","match")
else:
userList[l] = (int(userList[l]),"cl","NoMatch")
self.response.write(start_template.render(winNumDict, userList=userList))
class ChooseDateHandler(webapp2.RequestHandler):
def get(self):
start_template = jinja_current_dir.get_template("templates/chooseDate.html")
self.response.write(start_template.render())
def post(self):
date = self.request.get('Date')
wn = Lottery.query().filter(Lottery.date >= date).get()
#wn = Lottery.query(Lottery.date >= date).order(Lottery.date).get()
win={"n1":wn.n1, "n2":wn.n2, "n3":wn.n3, "n4":wn.n4, "n5":wn.n5, "n6":wn.n6,"date": wn.date}
start_template = jinja_current_dir.get_template("templates/winningnumber.html")
n1 = int(self.request.get('n1'))
n2 = int(self.request.get('n2'))
n3 = int(self.request.get('n3'))
n4 = int(self.request.get('n4'))
n5 = int(self.request.get('n5'))
n6 = int(self.request.get('n6'))
numDict = {"n1":n1, "n2":n2, "n3":n3, "n4":n4, "n5":n5, "n6":n6}
for i in numDict:
if numDict[i] in win.values():
numDict[i] = (numDict[i],"cl","match")
else:
numDict[i] = (numDict[i],"cl","NoMatch")
d = {"win":win, "numDict":numDict}
if n1 != "" and\
n2 != "" and\
n3 != "" and\
n4 != "" and\
n5 != "" and\
n6 != "":
wm = Lottery(n1 = int(n1), n2 = int(n2), n3 = int(n3), n4 = int(n4), n5 = int(n5), n6 = int(n6), date = date)
self.response.write(start_template.render(d))
#for key in numDict:
#self.response.write(numDict[key])
#start_template = jinja_current_dir.get_template("templates/error.html")
#self.response.write(start_template.render())
app = webapp2.WSGIApplication([
('/', DisplayHandler), # age
('/load',LoadPage),
('/numberInput', NumberInputHandler), # manual entry for 6 numbers and a date
('/random', RandomHandler), # radom gate with manual entry for 6 num
('/chooseDate', ChooseDateHandler),
('/option', OptionHandler)
], debug=True)
| true |
2f5259450d59c94e82cbcf6712a8b04d3409576f
|
Python
|
ipinak/naftis
|
/test/tools.py
|
UTF-8
| 1,244 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/env python
# -*- coding:utf-8 -*-
import sys
import os
import HTMLTestRunner
import time
from unittest import makeSuite, TestSuite
__author__ = 'ipinak'
def run_tests(test_cases, location='', title=None, description=None):
suite = TestSuite()
[suite.addTest(makeSuite(tc)) for tc in test_cases]
timestamp = time.strftime('%Y_%m_%d__%H_%M_%S')
filepath = os.getcwd() + "/" + location
if not os.path.exists(filepath):
os.mkdir(filepath)
buffer = file(filepath + '/TestReport_' + timestamp + '.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=buffer,
title=title,
description=description)
runner.run(suite)
def include_path(*directories):
"""
Include in the python path one or more directories
:param directories
"""
for dir in directories:
print("> including in python path: " + dir + "\n")
sys.path.append(dir)
def exclude_path(*directories):
"""
Exclude from the python path one or more directories
:param directories
"""
for dir in directories:
print("> excluding from python path: " + dir + "\n")
sys.path.remove(dir)
| true |
c489568e2e0ba9741b7fbb6d050438b192730631
|
Python
|
sidparasnis/client-server
|
/UDP_Client.py
|
UTF-8
| 894 | 2.84375 | 3 |
[] |
no_license
|
# UDP_Client
from socket import *
serverName = '127.0.0.1'
serverPort = 50069
clientSocket = socket(AF_INET, SOCK_DGRAM)
again = "Y"
while True:
message = input("\nInput int,int,operation or 'quit' to quit: ")
if message == 'quit':
break
print ("\n ")
print ("-->> Sending: " + message)
d = 0.1
while d<2:
try:
clientSocket.settimeout(d)
clientSocket.sendto(message.encode(), (serverName, serverPort))
modifiedMessage, serverAddress = clientSocket.recvfrom(2048)
print ("<<-- At Client message received: " + modifiedMessage.decode() + "\n")
clientSocket.settimeout(None)
break;
except:
print ('Resending...')
d *= 2
print (" ")
print ("++++ Client Program Ends ++++")
print (" ")
clientSocket.close()
| true |
ed0ff468239cdb5edd9a7afea962c6b310c7e09d
|
Python
|
DaHuO/Supergraph
|
/codes/CodeJamCrawler/CJ/16_1_2_ManojPammi_2.py
|
UTF-8
| 508 | 2.609375 | 3 |
[] |
no_license
|
f=open("B-large.in",'r')
g=int(f.readline())
for d in range(g):
a=int(f.readline()[:-1])
g={}
for i in range(2*a-1):
m=f.readline()[:-1]
t=m.split()
for l in t:
if l in g:
g[l]=g[l]+1
else:
g[l]=1
c=[]
for h in g:
if (g[h]%2)!=0:
c.append(int(h))
rt=sorted(c)
y=[]
for v in rt:
y.append(str(v))
print "Case #"+str(d+1)+": "+" ".join(y)
| true |
5379cb328d150bda28397e4356438732db738082
|
Python
|
nxexox/python-rest-framework
|
/tests/test_fields.py
|
UTF-8
| 54,382 | 2.5625 | 3 |
[
"Apache-2.0"
] |
permissive
|
"""
Fields testing
"""
import datetime
from unittest import TestCase
import six
from rest_framework.exceptions import SkipError
from rest_framework.serializers.exceptions import ValidationError
from rest_framework.serializers.fields import (
Field, CharField, IntegerField, FloatField, BooleanField, BooleanNullField, ListField,
TimeField, DateField, DateTimeField,
JsonField, DictField,
SerializerMethodField,
get_attribute
)
from rest_framework.serializers.validators import (
RequiredValidator, MaxValueValidator, MinValueValidator, MinLengthValidator, MaxLengthValidator
)
from tests.serializers_for_tests import SerializerMethodFieldDefault, SerializerMethodFieldSingle
class BaseFieldTestCase(TestCase):
"""
Testing base field class.
"""
field_class = Field
abstract_methods = {
'to_internal_value': {'data': None},
'to_representation': {'value': None},
} # Custom abstract methods.
requirement_arguments_for_field = {} # Required arguments for creating a field.
to_representation_cases = (
# data - arguments, return - return, params - arguments __init__, exceptions - expected errors
# {'data': {}, 'return': None, 'params': {}, 'exceptions': []}
{},
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
# data - arguments, return - return, params - arguments __init__, exceptions - expected errors
# {'data': {}, 'return': None, 'params': {}, 'exceptions': []}
{},
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
# data - arguments, return - return, params - arguments __init__, exceptions - expected errors
# {'data': {}, 'return': None, 'params': {}, 'exceptions': []}
{},
) # Cases, to test the performance of `.run_validation()`.
field_error_messages = {} # Custom field error key list.
_fields_vals = {
'required': True, 'default': None, 'label': None, 'validators': [],
'error_messages': {}, 'default_error_messages': {}, 'default_validators': []
}
__error_messages = {'required': None, 'null': None} # Default list of errors.
# Class for testing for empty.
class Empty:
pass
@classmethod
def setUpClass(cls):
"""
We supplement the data for each field separately.
"""
cls.__error_messages.update(cls.field_error_messages)
def assert_base_fields(self, field, **additional_fields):
"""
Checks on all base fields and extras.
:param rest_framework.serializers.fields.Field field: Field object.
:param additional_fields: Dict additional attributes to check.
"""
copy_fields = self._fields_vals.copy()
copy_fields.update(additional_fields)
msg = 'Invalid value in %s for field: {}. Expected: {}, Reality: {}.' % field.__class__.__name__
for key, val in six.iteritems(copy_fields):
field_val = getattr(field, key, self.Empty())
# We try to check in three ways, depending on the type.
if isinstance(val, (bool, type(None))): # First single types.
assert val is field_val, msg.format(key, val, field_val)
elif isinstance(val, (six.string_types + six.integer_types + (float,))): # Now primitives.
assert val == field_val, msg.format(key, val, field_val)
else: # If the object is complex.
assert isinstance(val, type(field_val)), msg.format(key, val, type(field_val))
def create_params(self, **params):
"""
Creating parameters to create a field object.
:return: Parameters for creating a field.
:rtype: dict
"""
r_params = self.requirement_arguments_for_field.copy()
r_params.update(params)
return r_params
def assert_bind(self, field, field_name=None, parent=None, label=None):
"""
Checks the effects of the bind method.
:param rest_framework.serializers.fields.Field field: Object for check.
:param str field_name: Field name.
:param object parent: Parent field.
:param str label: Label field.
"""
assert field.label == label, '`.label` expected {}, reality {}.'.format(label, field.label)
assert field.parent == parent, '`.parent` expected {}, reality {}.'.format(parent, field.parent)
assert field.field_name == field_name, \
'`.field_name` expected {}, reality {}.'.format(field_name, field.field_name)
def create_method_for_get_attribute(self, field_name=None, call_bind=True,
default=None, required=None, attr=None, set_self=True, **kwargs):
"""
Creating an attribute on an object to test `field.get_attribute ()`
:param str field_name: Field name.
:param bool call_bind: Do I need to call the bind method in a field?
:param object default: Default value.
:param bool required: Is required field?
:param object attr: The attribute itself that we put.
:param bool set_self: Do I need to set the link to the class parent?.
:return: Created and ready for testing Field.
:rtype: rest_framework.serializers.fields.Field
"""
field = self.field_class(**self.create_params(required=required, default=default, **kwargs))
if call_bind:
field.bind(field_name, self)
if set_self:
setattr(self, field_name, attr)
return field
def __test_method_cases(self, method_name):
"""
Testing methods by cases.
:param str method_name: Method name for testing by cases.
"""
# Check all cases
for case in getattr(self, '%s_cases' % method_name, []):
# Skip case.
if not case:
continue
try:
# Get the data.
data, result = case.get('data', {}), case.get('return', None)
params, exceptions = case.get('params', {}), case.get('exceptions', {})
data = data or {} # Transform None into dict.
# Building a field and looking for a method to test..
field = self.field_class(**self.create_params(**params))
method = getattr(field, method_name, self.Empty())
if isinstance(method, self.Empty):
self.fail('Testing by cases failed. Method not found `{}` have class `{}`.'.format(
method_name, field.__class__.__name__
))
# If errors are expected.
if exceptions:
try:
res = method(**data)
self.fail('In method `{}.{}()` case `{}` not raise error. Method return: `{}`.'.format(
field.__class__.__name__, method_name, case, res
))
except tuple(exceptions):
pass
else:
# If no errors are expected.
res = method(**data)
assert res == result, \
'In method `{}.{}()` case {} return incorrect result `{}`'.format(
field.__class__.__name__, method_name, case, res
)
except Exception as e:
self.fail('During the inspection of the case `{}` for method `{}.{}` an unexpected error occurred: `{}: {}`'.format(
case, self.field_class.__class__.__name__, method_name, e.__class__.__name__, e
))
def test_default_create(self):
"""
Testing creation with default settings.
"""
self.assert_base_fields(self.field_class(**self.create_params())) # First make default.
# Now create with settings.
params = self.create_params(required=False)
self.assert_base_fields(self.field_class(**params), **params)
# See how default affects required..
params = self.create_params(default='')
self.assert_base_fields(self.field_class(**params), required=False, **params)
# See how required affects validators..
field = self.field_class(**self.create_params(required=True))
assert isinstance(field.validators, list), \
'`.validators` must be list, reality {}'.format(type(field.validators))
assert len(field.validators) == 1, \
'In `.validators` must be 1 validator, reality {}'.format(len(field.validators))
assert isinstance(field.validators[0], RequiredValidator), \
'In `.validators` must be `RequiredValidator`. Reality: `{}`'.format(type(field.validators[0]))
# Now we check that there is no validator.
field = self.field_class(**self.create_params(required=False))
assert isinstance(field.validators, list), \
'`.validators` must be list, reality {}'.format(type(field.validators))
assert len(field.validators) == 0, \
'In `.validators` there should be no validators, reality `{}`'.format(len(field.validators))
# Check for error messages.
field, messages_keys = self.field_class(**self.create_params()), self.__error_messages
for key in field.error_messages:
assert key in messages_keys, 'In `.error_messages` must be key `{}`.'.format(key)
# We update the dictionary of errors, and we try with a custom error.
new_error_message = self.__error_messages.copy()
new_error_message['test'] = None
field = self.field_class(**self.create_params(error_messages={'test': 'test'}))
messages_keys = new_error_message
for key in field.error_messages:
assert key in messages_keys, 'In `.error_messages` must be key `{}`.'.format(key)
def test_bind(self):
"""
Testing bind method.
"""
# First default.
field = self.field_class(**self.create_params())
field.bind('test_label', self)
self.assert_bind(field, 'test_label', self, 'Test label')
# Now change label.
field = self.field_class(**self.create_params(label='test_label'))
field.bind('test_label', self)
self.assert_bind(field, 'test_label', self, 'test_label')
def test_fail_field_validation(self):
"""
Testing fail_field_validation method.
"""
# We test without our errors.
field = self.field_class(**self.create_params())
try:
field.fail_field_validation('required')
self.fail('`.fail_field_validation()` must throw as exception `ValidationError`.')
except ValidationError:
pass
try:
field.fail_field_validation('test')
self.fail('`.fail_field_validation()` must throw as exception `AssertionError`.')
except AssertionError:
pass
# Now add custom error message
field = self.field_class(**self.create_params(error_messages={'test': '{test}-test'}))
try:
field.fail_field_validation('test', test='test')
self.fail('`.fail_field_validation()` must throw as exception `ValidationError`.')
except ValidationError as e:
assert e.detail == 'test-test', 'The error message should be `{}`, reality `{}`.'.format(
'test-test', e.detail
)
def test_fail_validate(self):
"""
Testing fail method.
"""
field = self.field_class(**self.create_params())
detail = {'error': 'test'}
try:
field.fail_validate(detail=detail)
self.fail('`.fail()` must throw as exception `ValidationError`.')
except ValidationError as e:
self.assertEqual(
e.detail, detail,
'ValidationError.detail not equal source error. '
'Should be: `{}`, reality: `{}`.'.format(detail, e.detail)
)
self.assertEqual(
e.status, 400,
'ValidationError.status = `{}`. This is not 400.'.format(e.status)
)
status = 404
try:
field.fail_validate(detail=detail, status=status)
self.fail('`.fail()` must throw as exception `ValidationError`.')
except ValidationError as e:
self.assertEqual(
e.status, status,
'ValidationError.status not equal source status. '
'Should be: `{}`, reality: `{}`.'.format(status, e.status)
)
def test_abstract_methods(self):
"""
Testing abstract methods.
"""
field = self.field_class(**self.create_params())
for method_name, method_params in six.iteritems(self.abstract_methods):
try:
getattr(field, method_name, lambda: None)(**method_params)
self.fail('Method `.{}` must throw as exception `NotImplementedError`.'.format(method_name))
except NotImplementedError:
pass
def test_to_internal_value(self):
"""
A test for converting data to a valid python object.
"""
if 'to_internal_value' not in self.abstract_methods:
self.__test_method_cases('to_internal_value')
def test_to_representation(self):
"""
Test data conversion to a valid JSON object.
"""
if 'to_representation' not in self.abstract_methods:
self.__test_method_cases('to_representation')
def test_get_default(self):
"""
Testing the get_default method.
"""
field = self.field_class(**self.create_params())
res = field.get_default()
assert res is None, '`.get_default()` must return None, reality: {}.'.format(res)
field = self.field_class(**self.create_params(default=1))
res = field.get_default()
assert res == 1, '`.get_default()` must return 1, reality: {}.'.format(res)
field = self.field_class(**self.create_params(default=lambda: 100))
res = field.get_default()
assert res == 100, '`.get_default()` must return 100, reality: {}.'.format(res)
def test_get_attribute(self):
"""
Testing get_attribute method.
"""
params = dict(
field_name='test_get_attribute_field', call_bind=True, required=False, default=None,
attr=self.test_get_attribute, set_self=True
)
# First normal work.
res = self.create_method_for_get_attribute(**params).get_attribute(self)
assert res == self.test_get_attribute, \
'`.get_attribute()` must return {}, reality {}.'.format(self.test_get_attribute, res)
# Now we try non-existent to look for and return default.
params.update(default=100, call_bind=False, attr=None, set_self=False)
res = self.create_method_for_get_attribute(**params).get_attribute(self)
assert res == 100, '`.get_attribute()` must return 100, reality {}.'.format(res)
# We see that if the field is mandatory and there is no default, it throws the exception `SkipError`.
params.update(required=True, default=None)
try:
self.create_method_for_get_attribute(**params).get_attribute(self)
self.fail('`.get_attribute()` must throw as exception `SkipError`.')
except SkipError:
pass
# Now we try to get the original exception.
params.update(field_name=None, required=False, call_bind=True)
try:
res = self.create_method_for_get_attribute(label='test', **params).get_attribute(self)
self.fail('`.get_attribute()` must throw as exception `TypeError`, return `{}`.'.format(res))
except TypeError:
pass
except Exception as e:
self.fail('`.get_attribute()` must throw as exception `TypeError`, reality {}.'.format(type(e)))
def test_validate_empty_values(self):
"""
Testing validation on an empty type.
"""
# First default settings.
field = self.field_class(**self.create_params(required=False))
is_empty, data = field.validate_empty_values(None)
assert is_empty is True, '`.validate_empty_values()` must return True.'
assert data is None, '`.validate_empty_values()` must return None.'
# Now we check the response to the binding.
field = self.field_class(**self.create_params(required=True))
try:
field.validate_empty_values(None)
self.fail('`.validate_empty_values()` must throw as exception `ValidationError`.')
except ValidationError:
pass
# Now we check for normal data
field = self.field_class(**self.create_params(required=True))
is_empty, data = field.validate_empty_values(123)
assert is_empty is False, '`.validate_empty_values()` must return False.'
assert data == 123, '`.validate_empty_values()` must return 123.'
def test_run_validation(self):
"""
Testing run_validation method
"""
self.__test_method_cases('run_validation')
def test_run_validation_base_field(self):
"""
Testing start validation for base field.
"""
to_internal_value = lambda x: x # We do a mock for internal function.
# Check on default settings.
field = self.field_class(**self.create_params())
setattr(field, 'to_internal_value', to_internal_value)
res = field.run_validation(123)
assert res == 123, '`.run_validation()` must return 123.'
# Check when the field is required.
field = self.field_class(**self.create_params(required=True))
setattr(field, 'to_internal_value', to_internal_value)
res = field.run_validation(123)
assert res == 123, '`.run_validation()` must return 123.'
# Now we try to make validator work.
try:
field.run_validation(None)
self.fail('`.run_validation()` must throw as exception `ValidationError`.')
except ValidationError:
pass
def test_run_validators(self):
"""
Testing work validators
"""
# Check without validators.
field = self.field_class(**self.create_params(required=False, validators=[]))
field.run_validators(123)
# Check with default validators.
field = self.field_class(**self.create_params(required=True, validators=[]))
field.run_validators(123)
try:
field.run_validators(None)
self.fail('`.run_validators()` must throw as exception `ValidationError`.')
except ValidationError:
pass
# Check with custom validators.
def test_validator(value):
if value == 1:
raise ValidationError(1)
field = self.field_class(**self.create_params(required=True, validators=[test_validator]))
field.run_validators(10)
try:
field.run_validators(1)
self.fail('`.run_validators()` must throw as exception `ValidationError`.')
except ValidationError:
pass
class CharFieldTest(BaseFieldTestCase):
"""
Testing CharField.
"""
field_class = CharField
abstract_methods = {} # Custom abstract methods.
field_error_messages = {
'invalid': None,
'blank': None,
'min_length': None,
'max_length': None
} # Custom errors list.
to_representation_cases = (
{'data': {'value': '123'}, 'return': '123'},
{'data': {'value': 123}, 'return': '123'},
{'data': {'value': 'qwe'}, 'return': 'qwe'},
{'data': {'value': None}, 'return': None},
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': '123'}, 'return': '123'},
{'data': {'data': True}, 'exceptions': (ValidationError,)},
{'data': {'data': BaseFieldTestCase.Empty()}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'exceptions': (ValidationError,)}
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': '123'}, 'return': '123'},
{'data': {'data': 123}, 'return': '123'},
{'data': {'data': 'qwe'}, 'return': 'qwe'},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'params': {'required': False}, 'return': None},
{'data': {'data': ''}, 'params': {'allow_blank': False}, 'exceptions': (ValidationError,)},
{'data': {'data': ''}, 'params': {'allow_blank': True}, 'return': ''},
{'data': {'data': ' '}, 'params': {'allow_blank': True, 'trim_whitespace': True}, 'return': ''},
{'data': {'data': ' '}, 'params': {'allow_blank': False, 'trim_whitespace': True}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.run_validation()`.
def test_init(self):
"""
Testing create.
"""
params = dict(max_length=10, min_length=20, trim_whitespace=False, allow_blank=True, required=True)
field = self.field_class(**params)
# Look at validators.
assert len(field.validators) == 3, '`.validators` must have length 3, reality {}'.format(len(field.validators))
for v in field.validators:
assert isinstance(v, (RequiredValidator, MaxLengthValidator, MinLengthValidator)), \
'Validator must be `RequiredValidator, MaxLengthValidator, MinLengthValidator`, reality `{}`'.format(
type(v)
)
# Look that without them too it is possible.
params.update(max_length=None, min_length=None)
field = self.field_class(**params)
assert len(field.validators) == 1, '`.validators` must have length 1, reality {}'.format(len(field.validators))
for v in field.validators:
assert isinstance(v, RequiredValidator), 'Validator must be `RequiredValidator`, reality `{}`'.format(type(v))
class TestIntegerField(BaseFieldTestCase):
"""
Testing IntegerField.
"""
field_class = IntegerField
abstract_methods = {} # Custom abstract methods.
field_error_messages = {
'invalid': None,
'min_value': None,
'max_value': None,
'max_string_length': None
} # Custom errors list.
to_representation_cases = (
{'data': {'value': 123}, 'return': 123},
{'data': {'value': '123'}, 'return': 123},
{'data': {'value': 'qwe'}, 'exceptions': (ValueError,)},
{'data': {'value': None}, 'return': None},
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': 123}, 'return': 123},
{'data': {'data': '123'}, 'return': 123},
{'data': {'data': '123.0'}, 'return': 123},
{'data': {'data': '123.1'}, 'exceptions': (ValidationError,)},
{'data': {'data': 'qwe'}, 'exceptions': (ValidationError,)},
{'data': {'data': False}, 'exceptions': (ValidationError,)},
{'data': {'data': '11' * IntegerField.MAX_STRING_LENGTH}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': 123}, 'return': 123},
{'data': {'data': '123'}, 'return': 123},
{'data': {'data': '123.0'}, 'return': 123},
{'data': {'data': '123.1'}, 'exceptions': (ValidationError,)},
{'data': {'data': 'qwe'}, 'exceptions': (ValidationError,)},
{'data': {'data': False}, 'exceptions': (ValidationError,)},
{'data': {'data': '11' * IntegerField.MAX_STRING_LENGTH}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': 10}, 'params': {'min_value': 5}, 'return': 10},
{'data': {'data': 10}, 'params': {'min_value': 10}, 'return': 10},
{'data': {'data': 10}, 'params': {'min_value': 11}, 'exceptions': (ValidationError,)},
{'data': {'data': 10}, 'params': {'max_value': 11}, 'return': 10},
{'data': {'data': 10}, 'params': {'max_value': 10}, 'return': 10},
{'data': {'data': 10}, 'params': {'max_value': 5}, 'exceptions': (ValidationError,)},
{'data': {'data': 10}, 'params': {'max_value': 11, 'min_value': 5}, 'return': 10},
{'data': {'data': 10}, 'params': {'max_value': 10, 'min_value': 10}, 'return': 10},
{'data': {'data': 10}, 'params': {'max_value': 5, 'min_value': 5}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.run_validation()`.
class TestFloatField(BaseFieldTestCase):
"""
Testing FloatField.
"""
field_class = FloatField
abstract_methods = {} # Custom abstract methods.
field_error_messages = {
'invalid': None,
'min_value': None,
'max_value': None,
'max_string_length': None
} # Custom errors list.
to_representation_cases = (
{'data': {'value': 123}, 'return': 123.0},
{'data': {'value': '123'}, 'return': 123.0},
{'data': {'value': 'qwe'}, 'exceptions': (ValueError,)},
{'data': {'value': None}, 'return': None},
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': 123}, 'return': 123.0},
{'data': {'data': '123'}, 'return': 123.0},
{'data': {'data': '123.0'}, 'return': 123.0},
{'data': {'data': '123.1'}, 'return': 123.1},
{'data': {'data': 'qwe'}, 'exceptions': (ValidationError,)},
{'data': {'data': False}, 'return': 0.0},
{'data': {'data': '11' * IntegerField.MAX_STRING_LENGTH}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': 123}, 'return': 123.0},
{'data': {'data': '123'}, 'return': 123.0},
{'data': {'data': '123.0'}, 'return': 123.0},
{'data': {'data': '123.1'}, 'return': 123.1},
{'data': {'data': 'qwe'}, 'exceptions': (ValidationError,)},
{'data': {'data': False}, 'return': 0.0},
{'data': {'data': '11' * IntegerField.MAX_STRING_LENGTH}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': 10}, 'params': {'min_value': 5}, 'return': 10.0},
{'data': {'data': 10}, 'params': {'min_value': 10}, 'return': 10.0},
{'data': {'data': 10}, 'params': {'min_value': 11}, 'exceptions': (ValidationError,)},
{'data': {'data': 10}, 'params': {'max_value': 11}, 'return': 10.0},
{'data': {'data': 10}, 'params': {'max_value': 10}, 'return': 10.0},
{'data': {'data': 10}, 'params': {'max_value': 5}, 'exceptions': (ValidationError,)},
{'data': {'data': 10}, 'params': {'max_value': 11, 'min_value': 5}, 'return': 10.0},
{'data': {'data': 10}, 'params': {'max_value': 10, 'min_value': 10}, 'return': 10.0},
{'data': {'data': 10}, 'params': {'max_value': 5, 'min_value': 5}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.run_validation()`.
class TestBooleanField(BaseFieldTestCase):
"""
Testing BooleanField.
"""
field_class = BooleanField
abstract_methods = {} # Custom abstract methods.
field_error_messages = {'invalid': None}
to_representation_cases = (
{'data': {'value': True}, 'return': True},
{'data': {'value': False}, 'return': False},
{'data': {'value': None}, 'return': False},
{'data': {'value': 'Yes'}, 'return': True},
{'data': {'value': 1}, 'return': True},
{'data': {'value': 'No'}, 'return': False},
{'data': {'value': 0}, 'return': False},
{'data': {'value': 'null'}, 'return': True},
{'data': {'value': ''}, 'return': False},
{'data': {'value': '100'}, 'return': True}
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': True}, 'return': True},
{'data': {'data': False}, 'return': False},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': 'Yes'}, 'return': True},
{'data': {'data': 1}, 'return': True},
{'data': {'data': 'No'}, 'return': False},
{'data': {'data': 0}, 'return': False},
{'data': {'data': 'null'}, 'exceptions': (ValidationError,)},
{'data': {'data': ''}, 'exceptions': (ValidationError,)},
{'data': {'data': '100'}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': True}, 'return': True},
{'data': {'data': False}, 'return': False},
{'data': {'data': None}, 'params': {'required': False}, 'return': None},
{'data': {'data': 'Yes'}, 'return': True},
{'data': {'data': 1}, 'return': True},
{'data': {'data': 'No'}, 'return': False},
{'data': {'data': 0}, 'return': False},
{'data': {'data': 'null'}, 'params': {'required': False}, 'exceptions': (ValidationError,)},
{'data': {'data': ''}, 'params': {'required': False}, 'exceptions': (ValidationError,)},
{'data': {'data': '100'}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.run_validation()`.
class TestBooleanNullField(BaseFieldTestCase):
"""
Testing BooleanNullField.
"""
field_class = BooleanNullField
abstract_methods = {} # Custom abstract methods.
field_error_messages = {'invalid': None}
to_representation_cases = (
{'data': {'value': True}, 'return': True},
{'data': {'value': False}, 'return': False},
{'data': {'value': None}, 'return': None},
{'data': {'value': 'Yes'}, 'return': True},
{'data': {'value': 1}, 'return': True},
{'data': {'value': 'No'}, 'return': False},
{'data': {'value': 0}, 'return': False},
{'data': {'value': 'null'}, 'return': None},
{'data': {'value': ''}, 'return': None},
{'data': {'value': '100'}, 'return': True}
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': True}, 'return': True},
{'data': {'data': False}, 'return': False},
{'data': {'data': None}, 'return': None},
{'data': {'data': 'Yes'}, 'return': True},
{'data': {'data': 1}, 'return': True},
{'data': {'data': 'No'}, 'return': False},
{'data': {'data': 0}, 'return': False},
{'data': {'data': 'null'}, 'return': None},
{'data': {'data': ''}, 'return': None},
{'data': {'data': '100'}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': True}, 'return': True},
{'data': {'data': False}, 'return': False},
{'data': {'data': None}, 'params': {'required': False}, 'return': None},
{'data': {'data': 'Yes'}, 'return': True},
{'data': {'data': 1}, 'return': True},
{'data': {'data': 'No'}, 'return': False},
{'data': {'data': 0}, 'return': False},
{'data': {'data': 'null'}, 'params': {'required': False}, 'return': None},
{'data': {'data': ''}, 'params': {'required': False}, 'return': None},
{'data': {'data': '100'}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.run_validation()`.
class TestListField(BaseFieldTestCase):
"""
Testing ListField.
"""
field_class = ListField
abstract_methods = {} # Custom abstract methods.
requirement_arguments_for_field = {
'child': CharField(required=False)
} # Required arguments for creating a field.
field_error_messages = {
'not_a_list': None,
'empty': None,
'min_length': None,
'max_length': None
}
_fields_vals = {
'required': True, 'default': None, 'label': None, 'validators': [],
'error_messages': {}, 'default_error_messages': {}, 'default_validators': [],
'child': CharField()
}
to_representation_cases = (
{'data': {'value': ['123', '123', '123']}, 'return': ['123', '123', '123']},
{'data': {'value': [123, 123, 123]}, 'return': ['123', '123', '123']},
{'data': {'value': [True, True, True]}, 'return': ['True', 'True', 'True']},
{'data': {'value': ['123', 123, True, None]}, 'return': ['123', '123', 'True', None]},
{'data': {'value': None}, 'return': []},
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': ''}, 'exceptions': (ValidationError,)},
{'data': {'data': {}}, 'exceptions': (ValidationError,)},
{'data': {'data': BaseFieldTestCase.Empty()}, 'exceptions': (ValidationError,)},
{'data': {'data': []}, 'params': {'allow_empty': True}, 'return': []},
{'data': {'data': []}, 'params': {'allow_empty': False}, 'exceptions': (ValidationError,)},
{'data': {'data': ['123', '123', '123']}, 'return': ['123', '123', '123']},
{'data': {'data': [123, 123, 123]}, 'return': ['123', '123', '123']},
# Errors will be here, because CharField does not get True False No as a string.
{'data': {'data': [True, True, True]}, 'exceptions': (ValidationError,)},
{'data': {'data': ['123', 123, True, None]}, 'exceptions': (ValidationError,)},
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': ''}, 'exceptions': (ValidationError,)},
{'data': {'data': {}}, 'exceptions': (ValidationError,)},
{'data': {'data': BaseFieldTestCase.Empty()}, 'exceptions': (ValidationError,)},
{'data': {'data': []}, 'params': {'allow_empty': True}, 'return': []},
{'data': {'data': []}, 'params': {'allow_empty': False}, 'exceptions': (ValidationError,)},
{'data': {'data': ['123', '123', '123']}, 'return': ['123', '123', '123']},
{'data': {'data': [123, 123, 123]}, 'return': ['123', '123', '123']},
# Errors will be here, because CharField does not get True False No as a string.
{'data': {'data': [True, True, True]}, 'exceptions': (ValidationError,)},
{'data': {'data': ['123', 123, True, None]}, 'exceptions': (ValidationError,)},
{'data': {'data': [1, 1, 1]}, 'params': {'min_length': 2, 'child': IntegerField()}, 'return': [1, 1, 1]},
{'data': {'data': [1, 1, 1]}, 'params': {'min_length': 3, 'child': IntegerField()}, 'return': [1, 1, 1]},
{'data': {'data': [1, 1, 1]}, 'params': {'min_length': 5}, 'exceptions': (ValidationError,)},
{'data': {'data': [1, 1, 1]}, 'params': {'max_length': 5, 'child': IntegerField()}, 'return': [1, 1, 1]},
{'data': {'data': [1, 1, 1]}, 'params': {'max_length': 3, 'child': IntegerField()}, 'return': [1, 1, 1]},
{'data': {'data': [1, 1, 1]}, 'params': {'max_length': 2}, 'exceptions': (ValidationError,)},
{'data': {'data': [1, 1, 1]}, 'params': {'min_length': 5, 'max_length': 10}, 'exceptions': (ValidationError,)},
{'data': {'data': [1, 1, 1]}, 'params': {'min_length': 3, 'max_length': 5, 'child': IntegerField()}, 'return': [1, 1, 1]},
{'data': {'data': [1, 1, 1]}, 'params': {'min_length': 1, 'max_length': 5, 'child': IntegerField()}, 'return': [1, 1, 1]},
{'data': {'data': [1, 1, 1]}, 'params': {'min_length': 1, 'max_length': 3, 'child': IntegerField()}, 'return': [1, 1, 1]},
{'data': {'data': [1, 1, 1]}, 'params': {'min_length': 1, 'max_length': 2}, 'exceptions': (ValidationError,)},
{'data': {'data': [1, True, '1']}, 'params': {'child': None}, 'return': [1, True, '1']} # Check empty child field.
) # Cases, to test the performance of `.run_validation()`.
class TestTimeField(BaseFieldTestCase):
"""
Testing TimeField.
"""
field_class = TimeField
abstract_methods = {} # Custom abstract methods.
requirement_arguments_for_field = {} # Required arguments for creating a field.
field_error_messages = {
'invalid': None,
'time': None
}
to_representation_cases = (
{'data': {'value': datetime.time()}, 'return': '00:00:00'},
{'data': {'value': datetime.time(10, 10)}, 'return': '10:10:00'},
{'data': {'value': '10:10:10'}, 'return': '10:10:10'},
{'data': {'value': 'test'}, 'return': 'test'}, # TODO: fix
{'data': {'value': None}, 'return': None},
{"data": {'value': type('object', (object,), {})}, 'exceptions': (AttributeError,)}, # Not valid object.
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': '00:00:00'}, 'return': datetime.time(0, 0, 0)},
{'data': {'data': '10:10:10'}, 'return': datetime.time(10, 10, 10)},
{'data': {'data': '10:10'}, 'exceptions': (ValidationError,)},
{'data': {'data': datetime.time()}, 'return': datetime.time()},
{'data': {'data': datetime.time(10, 10)}, 'return': datetime.time(10, 10)},
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': '00:00:00'}, 'return': datetime.time(0, 0, 0)},
{'data': {'data': '10:10:10'}, 'return': datetime.time(10, 10, 10)},
{'data': {'data': '10:10'}, 'exceptions': (ValidationError,)},
{'data': {'data': datetime.time()}, 'return': datetime.time()},
{'data': {'data': datetime.time(10, 10)}, 'return': datetime.time(10, 10)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'params': {'required': False}, 'return': None},
{'data': {'data': None}, 'params': {'default': datetime.time()}, 'return': datetime.time()},
) # Cases, to test the performance of `.run_validation()`.
class TestDateField(BaseFieldTestCase):
"""
Testing DateField.
"""
field_class = DateField
abstract_methods = {} # Custom abstract methods.
requirement_arguments_for_field = {} # Required arguments for creating a field.
field_error_messages = {
'invalid': None,
'datetime': None
}
to_representation_cases = (
{'data': {'value': datetime.date(2018, 1, 1)}, 'return': '2018-01-01'},
{'data': {'value': datetime.date(2018, 10, 10)}, 'return': '2018-10-10'},
{'data': {'value': datetime.date(2018, 10, 10)}, 'params': {'format': '%d.%m.%Y'}, 'return': '10.10.2018'},
{'data': {'value': datetime.datetime.now()}, 'exceptions': (AssertionError,)},
{'data': {'value': 'test'}, 'return': 'test'}, # TODO: fix
{'data': {'value': None}, 'return': None},
{"data": {'value': type('object', (object,), {})}, 'exceptions': (AttributeError,)}, # Not valid object.
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': '2018-01-01'}, 'return': datetime.date(2018, 1, 1)},
{'data': {'data': datetime.date(2018, 1, 1)}, 'return': datetime.date(2018, 1, 1)},
{'data': {'data': '2018-10'}, 'exceptions': (ValidationError,)},
{'data': {'data': '1.1.2018'}, 'params': {'input_format': '%d.%m.%Y'}, 'return': datetime.date(2018, 1, 1)},
{'data': {'data': datetime.datetime.now()}, 'exceptions': (ValidationError,)},
{'data': {'data': '2018-10'}, 'params': {'input_format': '%Y-%m'}, 'return': datetime.date(2018, 10, 1)},
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': '2018-01-01'}, 'return': datetime.date(2018, 1, 1)},
{'data': {'data': datetime.date(2018, 1, 1)}, 'return': datetime.date(2018, 1, 1)},
{'data': {'data': '2018-10'}, 'exceptions': (ValidationError,)},
{'data': {'data': '1.1.2018'}, 'params': {'input_format': '%d.%m.%Y'}, 'return': datetime.date(2018, 1, 1)},
{'data': {'data': datetime.datetime.now()}, 'exceptions': (ValidationError,)},
{'data': {'data': '2018-10'}, 'params': {'input_format': '%Y-%m'}, 'return': datetime.date(2018, 10, 1)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'params': {'required': False}, 'return': None},
{'data': {'data': None}, 'params': {'default': datetime.date(2018, 1, 1)}, 'return': datetime.date(2018, 1, 1)},
) # Cases, to test the performance of `.run_validation()`.
class TestDateTimeField(BaseFieldTestCase):
"""
Testing DateTimeField.
"""
field_class = DateTimeField
abstract_methods = {} # Custom abstract methods.
requirement_arguments_for_field = {} # Required arguments for creating a field.
field_error_messages = {
'invalid': None,
'date': None,
}
__now = datetime.datetime.now()
__now_for_test = datetime.datetime(__now.year, __now.month, __now.day, __now.hour, __now.minute, __now.second)
to_representation_cases = (
{'data': {'value': datetime.datetime(2018, 1, 1)}, 'return': '2018-01-01 00:00:00'},
{'data': {'value': datetime.datetime(2018, 10, 10)}, 'return': '2018-10-10 00:00:00'},
{'data': {'value': datetime.datetime(2018, 10, 10)}, 'params': {'format': '%d.%m.%Y'}, 'return': '10.10.2018'},
{'data': {'value': datetime.datetime(2018, 1, 1, 1, 1, 1)}, 'return': '2018-01-01 01:01:01'},
{'data': {'value': datetime.datetime(2018, 1, 1, 1, 1, 1)}, 'params': {'format': '%d.%m.%Y %H-%M-%S'}, 'return': '01.01.2018 01-01-01'},
{'data': {'value': 'test'}, 'return': 'test'}, # TODO: fix
{'data': {'value': None}, 'return': None},
{"data": {'value': type('object', (object,), {})}, 'exceptions': (AttributeError,)}, # Not valid object.
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': '2018-01-01 00:00:00'}, 'return': datetime.datetime(2018, 1, 1)},
{'data': {'data': datetime.datetime(2018, 1, 1)}, 'return': datetime.datetime(2018, 1, 1)},
{'data': {'data': '2018-10'}, 'exceptions': (ValidationError,)},
{'data': {'data': '1.1.2018'}, 'params': {'input_format': '%d.%m.%Y'}, 'return': datetime.datetime(2018, 1, 1)},
{'data': {'data': __now_for_test.strftime(DateTimeField.input_format)}, 'return': __now_for_test},
{'data': {'data': '2018-10'}, 'params': {'input_format': '%Y-%m'}, 'return': datetime.datetime(2018, 10, 1)},
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': '2018-01-01 00:00:00'}, 'return': datetime.datetime(2018, 1, 1)},
{'data': {'data': datetime.datetime(2018, 1, 1)}, 'return': datetime.datetime(2018, 1, 1)},
{'data': {'data': '2018-10'}, 'exceptions': (ValidationError,)},
{'data': {'data': '1.1.2018'}, 'params': {'input_format': '%d.%m.%Y'}, 'return': datetime.datetime(2018, 1, 1)},
{'data': {'data': __now_for_test.strftime(DateTimeField.input_format)}, 'return': __now_for_test},
{'data': {'data': '2018-10'}, 'params': {'input_format': '%Y-%m'}, 'return': datetime.datetime(2018, 10, 1)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'params': {'required': False}, 'return': None},
{'data': {'data': None}, 'params': {'default': datetime.datetime(2018, 1, 1)}, 'return': datetime.datetime(2018, 1, 1)},
) # Cases, to test the performance of `.run_validation()`.
class TestJsonField(BaseFieldTestCase):
"""
Testing JsonField.
"""
field_class = JsonField
abstract_methods = {} # Custom abstract methods.
requirement_arguments_for_field = {} # Required arguments for creating a field.
field_error_messages = {
'invalid': None,
}
to_representation_cases = (
{'data': {'value': '123'}, 'return': '"123"'},
{'data': {'value': 123}, 'return': '123'},
{'data': {'value': {}}, 'return': '{}'},
{'data': {'value': []}, 'return': '[]'},
{'data': {'value': {'123': 123}}, 'return': '{"123": 123}'},
{'data': {'value': {'123': [123, '123']}}, 'return': '{"123": [123, "123"]}'},
{'data': {'value': lambda: None}, 'exceptions': (ValidationError,)},
{'data': {'value': {123: 123}}, 'return': '{"123": 123}'},
{'data': {'value': None}, 'return': 'null'},
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': {}}, 'return': {}},
{'data': {'data': []}, 'return': []},
{'data': {'data': {123: 123}}, 'return': {123: 123}},
{'data': {'data': [123]}, 'return': [123]},
{'data': {'data': {123: [123]}}, 'return': {123: [123]}},
{'data': {'data': [{123: 123}]}, 'return': [{123: 123}]},
{'data': {'data': '123'}, 'return': 123},
{'data': {'data': 'asd'}, 'exceptions': (ValidationError,)},
{'data': {'data': 123}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'exceptions': (ValidationError,)}
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': {}}, 'return': {}},
{'data': {'data': []}, 'return': []},
{'data': {'data': {123: 123}}, 'return': {123: 123}},
{'data': {'data': [123]}, 'return': [123]},
{'data': {'data': {123: [123]}}, 'return': {123: [123]}},
{'data': {'data': [{123: 123}]}, 'return': [{123: 123}]},
{'data': {'data': '123'}, 'return': 123},
{'data': {'data': 'asd'}, 'exceptions': (ValidationError,)},
{'data': {'data': 123}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'params': {'required': False}, 'return': None}, # TODO: FIXME
) # Cases, to test the performance of `.run_validation()`.
class TestDictField(BaseFieldTestCase):
"""
Testing DictField.
"""
field_class = DictField
abstract_methods = {} # Custom abstract methods.
requirement_arguments_for_field = {} # Required arguments for creating a field.
field_error_messages = {
'not_a_dict': None,
}
to_representation_cases = (
{'data': {'value': {}}, 'return': {}},
{'data': {'value': {'123': 123}}, 'return': {'123': 123}},
{'data': {'value': {'123': [123, '123']}}, 'return': {'123': [123, '123']}},
{'data': {'value': '123'}, 'exceptions': (AttributeError,)},
{'data': {'value': 123}, 'exceptions': (AttributeError,)},
{'data': {'value': lambda: None}, 'exceptions': (AttributeError,)},
{'data': {'value': {123: 123}}, 'return': {'123': 123}},
{'data': {'value': None}, 'return': None},
{'data': {'value': {123: [123]}}, 'params': {'child': IntegerField()}, 'exceptions': (TypeError,)}
) # Cases, to test the performance of `.to_representation()`.
to_internal_value_cases = (
{'data': {'data': {}}, 'return': {}},
{'data': {'data': {123: 123}}, 'return': {'123': 123}},
{'data': {'data': {123: [123]}}, 'return': {'123': [123]}},
{'data': {'data': '123'}, 'exceptions': (ValidationError,)},
{'data': {'data': 'asd'}, 'exceptions': (ValidationError,)},
{'data': {'data': 123}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': {123: [123]}}, 'params': {'child': IntegerField()}, 'exceptions': (ValidationError,)}
) # Cases, to test the performance of `.to_internal_value()`.
run_validation_cases = (
{'data': {'data': {}}, 'return': {}},
{'data': {'data': {123: 123}}, 'return': {'123': 123}},
{'data': {'data': {123: [123]}}, 'return': {'123': [123]}},
{'data': {'data': '123'}, 'exceptions': (ValidationError,)},
{'data': {'data': 'asd'}, 'exceptions': (ValidationError,)},
{'data': {'data': 123}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'exceptions': (ValidationError,)},
{'data': {'data': None}, 'params': {'required': False}, 'return': None}, # TODO: FIXME
{'data': {'data': {123: [123]}}, 'params': {'child': IntegerField()}, 'exceptions': (ValidationError,)}
) # Cases, to test the performance of `.run_validation()`.
class TestSerializerMethodField(TestCase):
"""
Testing SerializerMethodField.
"""
def test_default_validation(self):
"""
Testing default methods.
"""
ser = SerializerMethodFieldDefault(data={'test': 'test'})
ser.is_valid()
assert ser.validated_data['test'] == 'test', 'Expected `test`. Reality: `{}`.'.format(ser.validated_data['test'])
ser = SerializerMethodFieldDefault(data={'test': 'test'})
setattr(ser, 'pop_test', lambda *args: None)
ser.is_valid()
assert ser.validated_data['test'] is None, 'Expected `None`. Reality: `{}`.'.format(ser.validated_data['test'])
ser = SerializerMethodFieldDefault(data={'test': 'test'})
setattr(ser, 'pop_test', lambda *args: 123)
ser.is_valid()
assert ser.validated_data['test'] == 123, 'Expected `123`. Reality: `{}`.'.format(ser.validated_data['test'])
def test_default_serializing(self):
"""
Testing serializing object.
"""
# Standard value.
obj = type('Object', (object,), {'test': 'test'})
ser = SerializerMethodFieldDefault(instance=obj)
assert isinstance(ser.data, dict), 'Expected type: `dict`. Reality: `{}`.'.format(type(ser.data))
assert len(ser.data) == 1, 'Expected single value in data. Reality: `{}`.'.format(ser.data)
assert ser.data['test'] == obj, 'Expected value `test`. Reality: `{}`.'.format(ser.data['test'])
ser = SerializerMethodFieldDefault(instance=obj)
setattr(ser, 'get_test', lambda *args: None)
assert isinstance(ser.data, dict), 'Expected type: `dict`. Reality: `{}`.'.format(type(ser.data))
assert len(ser.data) == 1, 'Expected single value in data. Reality: `{}`.'.format(ser.data)
assert ser.data['test'] is None, 'Expected value `None`. Reality: `{}`.'.format(ser.data['test'])
ser = SerializerMethodFieldDefault(instance=obj)
setattr(ser, 'get_test', lambda *args: 123)
assert isinstance(ser.data, dict), 'Expected type: `dict`. Reality: `{}`.'.format(type(ser.data))
assert len(ser.data) == 1, 'Expected single value in data. Reality: `{}`.'.format(ser.data)
assert ser.data['test'] == 123, 'Expected value `123`. Reality: `{}`.'.format(ser.data['test'])
def test_single_method_validation(self):
"""
Testing single method.
"""
ser = SerializerMethodFieldSingle(data={'test': 'test'})
ser.is_valid()
assert ser.validated_data['test'] == 'test', 'Expected `test`. Reality: `{}`.'.format(ser.validated_data['test'])
ser = SerializerMethodFieldSingle(data={'test': 'test'})
setattr(ser, 'test_test', lambda *args: None)
ser.is_valid()
assert ser.validated_data['test'] is None, 'Expected `None`. Reality: `{}`.'.format(ser.validated_data['test'])
ser = SerializerMethodFieldSingle(data={'test': 'test'})
setattr(ser, 'test_test', lambda *args: 123)
ser.is_valid()
assert ser.validated_data['test'] == 123, 'Expected `123`. Reality: `{}`.'.format(ser.validated_data['test'])
ser = SerializerMethodFieldSingle(data={'test': 'test'})
setattr(ser, 'pop_test', lambda *args: 123)
ser.is_valid()
assert ser.validated_data['test'] == 'test', 'Expected `test`. Reality: `{}`.'.format(ser.validated_data['test'])
def test_single_method_serializing(self):
"""
Testing serializing object.
"""
# Standard value.
obj = type('Object', (object,), {'test': 'test'})
ser = SerializerMethodFieldSingle(instance=obj)
assert isinstance(ser.data, dict), 'Expected type: `dict`. Reality: `{}`.'.format(type(ser.data))
assert len(ser.data) == 1, 'Expected single value in data. Reality: `{}`.'.format(ser.data)
assert ser.data['test'] == obj, 'Expected value `test`. Reality: `{}`.'.format(ser.data['test'])
ser = SerializerMethodFieldSingle(instance=obj)
setattr(ser, 'test_test', lambda *args: None)
assert isinstance(ser.data, dict), 'Expected type: `dict`. Reality: `{}`.'.format(type(ser.data))
assert len(ser.data) == 1, 'Expected single value in data. Reality: `{}`.'.format(ser.data)
assert ser.data['test'] is None, 'Expected value `None`. Reality: `{}`.'.format(ser.data['test'])
ser = SerializerMethodFieldSingle(instance=obj)
setattr(ser, 'test_test', lambda *args: 123)
assert isinstance(ser.data, dict), 'Expected type: `dict`. Reality: `{}`.'.format(type(ser.data))
assert len(ser.data) == 1, 'Expected single value in data. Reality: `{}`.'.format(ser.data)
assert ser.data['test'] == 123, 'Expected value `123`. Reality: `{}`.'.format(ser.data['test'])
ser = SerializerMethodFieldSingle(instance=obj)
setattr(ser, 'get_test', lambda *args: 123)
assert isinstance(ser.data, dict), 'Expected type: `dict`. Reality: `{}`.'.format(type(ser.data))
assert len(ser.data) == 1, 'Expected single value in data. Reality: `{}`.'.format(ser.data)
assert ser.data['test'] == obj, 'Expected value `test`. Reality: `{}`.'.format(ser.data['test'])
| true |
603ba9dda00da362b46b5d4f37471b84f918c8c4
|
Python
|
luispuentesvega/util-scripts-py
|
/get_directory_size.py
|
UTF-8
| 300 | 2.96875 | 3 |
[] |
no_license
|
import os
total_size = 0
start_path = 'This PC\Luis Puentes (Galaxy A5)\Card' # To get size of current directory
for path, dirs, files in os.walk(start_path):
for f in files:
fp = os.path.join(path, f)
total_size += os.path.getsize(fp)
print("Directory size: " + str(total_size))
| true |
d2a5c5093d93c845ca4e5a8b2445ff524c29dbde
|
Python
|
anishpdm/SNIT-IEDC-PYTHON-PGM
|
/add.py
|
UTF-8
| 36 | 2.96875 | 3 |
[] |
no_license
|
a=10
b=33
c=a+b
print("Result is",c)
| true |
41d6026f11457df61e4592a92e6e35bbeb31b1a9
|
Python
|
ColdMatter/PhotonBEC
|
/learning/daq-board-fast-read/daq-read-individual.py
|
UTF-8
| 763 | 2.75 | 3 |
[
"MIT"
] |
permissive
|
#read data from the daq board with lots of individual calls
#written around 11/4/2017
import sys
sys.path.append("D:\\Control\\PythonPackages\\")
import time, datetime
import SingleChannelAI
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
reading_count = 200
Npts = 1000
rate = 1e4
interval = 0.05
points = []
for i in range(reading_count):
print('[' + str(datetime.datetime.now()) + '] reading')
data = SingleChannelAI.SingleChannelAI(Npts=Npts, rate=rate, device="Dev1", channel="ai0", minval=0, maxval=5)
points.append(np.mean(data))
time.sleep(interval)
fig = plt.figure(1)
plt.clf()
plt.plot(points, '-x', markersize=2)
plt.grid()
plt.ylabel('Volts / V')
plt.xlabel('Point')
plt.title('Data from DAQ board')
plt.show()
| true |
e588cb7284af979df1d9e6b2814ae4b8552f86a1
|
Python
|
yassinhc/Building_digital
|
/test/CoridorTest.py
|
UTF-8
| 1,406 | 2.546875 | 3 |
[] |
no_license
|
import unittest
import sys
sys.path.append('..')
import src.coridor as Corridor
import src.Wall as Wall
import src.coordinate as Coordinate
from test.areaTest import AreaTest
class Test_Coridor(AreaTest,unittest.TestCase):
global List_Walls
def createArea(self):
global List_Walls
c1 = Coordinate.Coordinate(0,0)
c2 = Coordinate.Coordinate(10,0)
c3 = Coordinate.Coordinate(10,5)
c4 = Coordinate.Coordinate(0,5)
w1 = Wall.Wall((c1,c2))
w2 = Wall.Wall((c3,c4))
List_Walls=(w1,w2)
area = Corridor.Corridor((w1,w2))
return area
def test_getSurface(self):
pass
def test_surface_square(self):
surface = self.area.getSurface()
self.assertEqual(surface, 50)
def test_surface_parallelogram(self):
c1 = Coordinate.Coordinate(1,0)
c2 = Coordinate.Coordinate(1,8)
c3 = Coordinate.Coordinate(5,0)
c4 = Coordinate.Coordinate(5,8)
w1 = Wall.Wall((c1,c2))
w2 = Wall.Wall((c3,c4))
area = Corridor.Corridor((w1,w2))
surface = area.getSurface()
self.assertEqual(surface,32)
def test_getListWalls(self):
self.assertEqual(self.area.getListWalls(),List_Walls)
if __name__ == '__main__':
unittest.main()
| true |
a65fed5c478757d78926a9958b962f0b47106bd6
|
Python
|
vikulovm5/Homeworks
|
/Урок 2. Практическое задание/task_4.py
|
UTF-8
| 1,114 | 4.09375 | 4 |
[] |
no_license
|
"""
4. Найти сумму n элементов следующего ряда чисел: 1 -0.5 0.25 -0.125 ...
Количество элементов (n) вводится с клавиатуры.
Пример:
Введите количество элементов: 3
Количество элементов: 3, их сумма: 0.75
Подсказка:
Каждый очередной элемент в 2 раза меньше предыдущего и имеет противоположный знак
Решите через рекурсию. Решение через цикл не принимается.
Для оценки Отлично в этом блоке необходимо выполнить 5 заданий из 7
"""
def rec(a, num, count, sum):
if a == count:
print(f'Элементов: {count}, Сумма: {sum}')
elif a < count:
return rec(a + 1, num / 2 * -1, count, sum+num)
try:
n = int(input('Количество элементов: '))
rec(0, 1, n, 0)
except ValueError:
print('Введенные данные некорректны.')
| true |
76d511ada20317db568944c7820f62db9fa63778
|
Python
|
janmarkuslanger/clean-flask
|
/app/user/models.py
|
UTF-8
| 587 | 2.640625 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from passlib.apps import custom_app_context as pwd_context
from app import db
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
username = db.Column(db.String, unique=True, nullable=False)
password_hash = db.Column(db.String, nullable=False)
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
| true |