blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
24d0d7c81e53034eaf712af1ad116f35dfd05f36
|
Python
|
goldworm/python-test
|
/asyncio-test/client.py
|
UTF-8
| 1,008 | 3 | 3 |
[] |
no_license
|
import asyncio
import sys
async def on_recv(reader, writer, name, loop):
i = 0
while True:
data = await reader.read(100)
if data == b'stop':
break
data = data.decode()
print(f'{name}-{i}: {data}')
await asyncio.sleep(1.0)
resp = f'{name}-{i}-{data}'
writer.write(resp.encode())
await writer.drain()
i += 1
writer.close()
loop.stop()
async def on_connect(name: str, loop):
path = "/tmp/iiss.sock"
reader, writer = await asyncio.open_unix_connection(path, loop=loop)
loop.create_task(on_recv(reader, writer, name, loop=loop))
def main():
if len(sys.argv) == 0:
return print(f'Usage: {sys.argv[0]} <name>')
name = sys.argv[1]
loop = asyncio.get_event_loop()
try:
loop.create_task(on_connect(name, loop))
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
loop.close()
if __name__ == '__main__':
main()
| true |
b92d6b52150af63d5a6fa19213d98817535cb683
|
Python
|
swati2728/function_questions
|
/more_eexercise.py
|
UTF-8
| 2,722 | 3.65625 | 4 |
[] |
no_license
|
# Question 1
# i=1
# while i<=1000:
# if i%2==0:
# print("nav")
# if i%7==0:
# print("gurukul")
# if i%21==0:
# print("navgurukul")
# i=i+1
# Question 2
# Number_of_students=input("enter a name:")
# Ek_student_ka_kharcha=int(input("enter a spending amount:"))
# if Ek_student_ka_kharcha<=50000:
# print("Hum budget ke andar hain")
# else:
# print(" hum budget ke bahar hain")
# # Question 3
password=input("enter your sakht password:")
if len(password)>6 or len(password)>16:
if "$" in password:
if 2 or 8 in password:
if "A" or "Z" in password:
print("it is Strong password")
else:
print("it is Weak password")
# Question 4
# num1=int(input("enter a first numbers:"))
# num2=int(input("enter a second numbers:"))
# num3=int(input("enter a third numbers:"))
# if num1>num2:
# print(num1,"is highst")
# elif num2>num1:
# print(num2,"is highst")
# elif num3>num1:
# print(num3,"is highst")
# elif num3>num2:
# print(num3,"is highst")
# elif num2>num3:
# print(num2,"is highst")
# elif num1>num3:
# print(num1,"is highst")
# else:
# ("nothing")
# Question 5
# Question 6
# string_list = ["Rishabh", "Rishabh", "Abhishek", "Rishabh", "Divyashish", "Divyashish"]
# new_list=[ ]
# index=0
# while index<len(string_list):
# if string_list[index] not in new_list:
# new_list.append(string_list[index])
# index=index+1
# print(new_list)
# Question 7
# list1 = [1, 342, 75, 23, 98]
# list2 = [75, 23, 98, 12, 78, 10, 1]
# new_list=[ ]
# index=0
# while index<len(list1):
# if list1[index] in list2:
# new_list.append(list1[index])
# index=index+1
# print(new_list)
# Question 8
# list1 = [1, 5, 10, 12, 16, 20]
# list2 = [1, 2, 10, 13, 16]
# list3=[ ]
# index=0
# while index<len(list1):
# list2.append(list1[index])
# index=index+1
# print(list2)
# j=0
# while j<len(list2):
# if list2[j] not in list3:
# list3.append(list2[j])
# j=j+1
# print(list3)
# # Question 9
# # def is_harshad_number(num):
# # numbers=(input("enter a number:"))
# # x=numbers.split()
# print(x)
# Question 10
# numbers= [[45, 21, 42, 63], [12, 42, 42, 53], [42, 90, 78, 13], [94, 89, 78, 76], [87, 55, 98, 99]]
# index=0
# while index<len(numbers):
# print(max(numbers[index]))
# index=index+1
#
# numbers= [[45, 21, 42, 63], [12, 42, 42, 53], [42, 90, 78, 13], [94, 89, 78, 76], [87, 55, 98, 99]]
# index=0
# maxi=[]
# while index<len(numbers):
# j=0
# maximum=0
# while j<len(numbers[index]):
# if numbers[index][j]>maximum:
# maximum = numbers[index][j]
# j=j+1
# maxi.append(maximum)
# index=index+1
# print(maxi)
# Question 11
# words = "navgurukul is great"
# counter = 0
# while counter < len(words):
# print (words[counter])
# counter=counter+1
| true |
41e45c7d8296f6a35c327b3d3bc69bdebc43036c
|
Python
|
jiuzhibuguniao/Test
|
/Test/time_test.py
|
UTF-8
| 464 | 3.125 | 3 |
[] |
no_license
|
##################
#time_test
#Author:@Rooobins
#Date:2019-01-03
##################
import time
a = "Sat Mar 28 22:24:24 2016"
print(time.time())
print(time.localtime(time.time()))
print(time.asctime(time.localtime(time.time())))
print(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()))
print(time.strftime("%a %b %d %H:%M:%S",time.localtime()))
print(time.strptime(a,"%a %b %d %H:%M:%S %Y"))
print(time.mktime(time.strptime(a,"%a %b %d %H:%M:%S %Y")))
| true |
9d4ff64326613d3eadcb17a2c2f5a5deebd295fb
|
Python
|
dianluyuanli-wp/xiaohuli-leetCode-python-newcoder-js
|
/sort/balance tree or not and tree ergodic.py
|
UTF-8
| 979 | 3.0625 | 3 |
[] |
no_license
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def com_bv(node):
#print node.val
if node.left is None and node.right is None:
node.bv=0
elif node.left is None and node.right is not None:
node.bv=com_bv(node.right)+1
elif node.left is not None and node.right is None:
node.bv=com_bv(node.left)+1
else:
left_n=com_bv(node.left)
right_n=com_bv(node.right)
node.bv=left_n+1 if left_n>right_n else right_n+1
return node.bv
p1=TreeNode(1)
p2=TreeNode(2)
p3=TreeNode(3)
p4=TreeNode(4)
p5=TreeNode(5)
p6=TreeNode(6)
p7=TreeNode(7)
p8=TreeNode(8)
p9=TreeNode(9)
p5.left=p2
p5.right=p7
p2.left=p1
p2.right=p4
p4.left=p3
p7.left=p6
p7.right=p9
p9.left=p8
stack=[]
r_node=p5
a=com_bv(p5)
while r_node is not None or stack:
print str(r_node.val)+' '+str(r_node.bv)
if r_node.right is not None:
stack.append(r_node.right)
r_node=r_node.left
if r_node is None and stack:
r_node=stack.pop()
a=com_bv(p5)
| true |
3902cbf7da24b71cb39d37454d23c33fb5d59af0
|
Python
|
AngrySaltyFish24/PollutionBikeMonitor
|
/server.py
|
UTF-8
| 4,749 | 2.609375 | 3 |
[] |
no_license
|
from http.server import *
import socketserver
import ssl
import json
import sqlite3
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
try:
if self.path == "/datafromDB":
self.wfile.write(database.getValues())
elif len(self.path) > 1:
with open(self.path[1:], "rb") as file:
self.send_response(200)
if "js" in self.path:
self.send_header("Content-type", "text/javascript")
else:
self.send_header("Content-type", "text/css")
self.end_headers()
self.wfile.write(file.read())
else:
self.homepage()
except Exception as e:
print(self.path)
print(e)
def do_POST(self):
self.send_response(200)
length = int(self.headers["content-length"])
data = self.rfile.read(length)
database.insertValues(data)
self.homepage()
def homepage(self):
with open("index.html", "rb") as file:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(file.read())
class CreateDatabase():
def __init__(self):
self.dbinit()
def dbinit(self):
self.conn = sqlite3.connect("database.db")
self.c = self.conn.cursor()
self.c.execute("""
CREATE TABLE IF NOT EXISTS data (
lat int(4),
lng int(4),
sound int(1),
CO int(1),
airquality int(1),
routeID int(1)
)
""")
data = self.c.execute('select * from data')
self.collums = list(map(lambda x: x[0], data.description))
def generateID(self):
self.c.execute("SELECT * FROM data")
try:
lastID = self.c.fetchall()[-1][-1]
print(lastID)
except:
lastID = 0
return lastID+1
def insertValues(self, rawData):
try:
str = rawData.decode()
str = str[:str.rfind('}')+1] + ']' + str[str.rfind('}')+1:]
data = json.loads(str[str.find("["):str.rfind("]") + 1])
id = self.generateID()
for item in data:
values = []
for key, value in item.items() :
if isinstance( value, (frozenset, list, set, tuple,) ):
for coord in value:
values.append(coord)
else:
values.append(value)
values = list(reversed(values))
values.append(id)
self.c.execute("""
INSERT INTO data VALUES (
?,
?,
?,
?,
?,
?
)
""", (values))
print(values)
self.conn.commit()
self.c.execute("SELECT * FROM data")
data = self.c.fetchall()
for dataItem in range(len(data)):
self.c.execute("SELECT * FROM data WHERE lat=%s AND lng=%s" % (data[dataItem][0], data[dataItem][1]))
duplicates = self.c.fetchall()
if len(duplicates) > 0:
average = [sum(x) for x in zip(*duplicates)]
for item in range(len(average)):
average[item] /= len(duplicates)
for duplicate in duplicates:
self.c.execute("""DELETE FROM data WHERE
lat=%s AND lng=%s
""" % (data[dataItem][0], data[dataItem][1]))
average[-1] = duplicates[0][-1]
self.c.execute("""
INSERT INTO data VALUES (
?,
?,
?,
?,
?,
?
)
""", (average))
self.conn.commit()
except Exception as e:
print(e)
def getValues(self):
self.c.execute("SELECT * FROM data")
return str(self.c.fetchall()).encode()
if __name__ == "__main__":
database = CreateDatabase()
server = socketserver.TCPServer(("", 80), Handler)
server.socket = ssl.wrap_socket(
server.socket,
certfile="cert.pem",
keyfile="key.pem",
server_side=True)
try:
server.serve_forever()
except KeyboardInterrupt:
print(" Recieved Shutting Down")
| true |
3cc5e86d6016f7e162ef98bd91139c0d11ea1aa1
|
Python
|
kevinyuQ/SideProjs
|
/old/gnolac/gnolac.py
|
UTF-8
| 2,805 | 2.65625 | 3 |
[] |
no_license
|
import code
import functools
import inspect
import re
import signal
import sys
def main(fn):
"""Call fn with command line arguments. Used as a decorator.
The main decorator marks the function that starts a program. For example,
@main
def my_run_function():
# function body
Use this instead of the typical __name__ == "__main__" predicate.
"""
if inspect.stack()[1][0].f_locals['__name__'] == '__main__':
args = sys.argv[1:] # Discard the script name from command line
fn(*args) # Call the main function
return fn
def read_eval_print_loop(next_line, env, interactive=False, quiet=False,
startup=False, load_files=()):
"""Read and evaluate input until an end of file or keyboard interrupt."""
if startup:
for filename in load_files:
scheme_load(filename, True, env)
while True:
try:
src = next_line()
while src.more_on_line:
expression = scheme_read(src)
result = scheme_eval(expression, env)
if not quiet and result is not None:
print(result)
except (SchemeError, SyntaxError, ValueError, RuntimeError) as err:
if (isinstance(err, RuntimeError) and
'maximum recursion depth exceeded' not in getattr(err, 'args')[0]):
raise
elif isinstance(err, RuntimeError):
print('Error: maximum recursion depth exceeded')
else:
print('Error:', err)
except KeyboardInterrupt: # <Control>-C
if not startup:
raise
print()
print('KeyboardInterrupt')
if not interactive:
return
except EOFError: # <Control>-D, etc.
print()
return
@main
def run(*argv):
import argparse
parser = argparse.ArgumentParser(description='Gnolac Language Interpreter')
parser.add_argument('-load', '-i', action='store_true',
help='run file interactively')
parser.add_argument('file', nargs='?',
type=argparse.FileType('r'), default=None,
help='Scheme file to run')
args = parser.parse_args()
next_line = buffer_input
interactive = True
load_files = []
if args.file is not None:
if args.load:
load_files.append(getattr(args.file, 'name'))
else:
lines = args.file.readlines()
def next_line():
return buffer_lines(lines)
interactive = False
read_eval_print_loop(next_line, create_global_frame(), startup=True,
interactive=interactive, load_files=load_files)
tscheme_exitonclick()
| true |
183880eb25bf8f5e44685f53820c5776a20d4946
|
Python
|
yurifarias/CursoEmVideoPython
|
/ex034.py
|
UTF-8
| 204 | 4.15625 | 4 |
[] |
no_license
|
salário = float(input('Digite o valor do salário: R$'))
if salário <= 1250:
salário *= 1.15
else:
salário *= 1.10
print('O salário inicial será aumentado para R${:.2f}'.format(salário))
| true |
f284c4d3b0a02f9b6c25aeaa2fc81141d545d17e
|
Python
|
fg0x0/Python-Lesson
|
/Auto-File-Move-And-Rename.py
|
UTF-8
| 1,138 | 2.65625 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import os
import json
import time
class MyHandler(FileSystemEventHandler):
i = 1
def on_modif(self,event):
new_name = "new_file_" + str(self.i) + ".txt"
for filename in os.listdir(folder_to_track):
file_exists = os.path.isfile(folder_destination + "/" + new_name)
while file_exists:
self.i +=1
new_name = "new_file_" + str(self.i) + ".txt"
file_exists = os.path.isfile(folder_destination + "/" + new_name)
src = folder_to_track + "/" + filename
new_destination = folder_destination + "/" + new_name
os.rename(src, new_destination)
folder_to_track = "/home/fg0d/Desktop/myFolder"
folder_destination = "/home/fg0d/Desktop/newFolder"
event_handler = Myhandler()
observer = Observer()
observer.schedule(event_handler , folder_to_track , recursive = True)
observer.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
observer.stop()
observer.join()
| true |
4bb46a59e4c5df5cd1771765ff852e474d926e61
|
Python
|
huaxiawudi/yz1806
|
/7/day07/app/views.py
|
UTF-8
| 308 | 2.578125 | 3 |
[] |
no_license
|
from random import randint
from django.shortcuts import render
# Create your views here.
def add_student(request):
stu = Student()
stu.sname = '小明' + str(randint(1,1000))
stu.ssex = randint(0,1)
stu.sage = randint(15,30)
stu.save()
return HttpResponse("添加学生%d" % stu.id)
| true |
3511054dbca5c31cf98f294da32fc333dcd094ea
|
Python
|
rheehot/Algorithm_problem
|
/leetcode_3Sum.py
|
UTF-8
| 1,165 | 3.125 | 3 |
[] |
no_license
|
class Solution(object):
def threeSum(self, nums):
result = []
nums.sort() #중복제거 간소화 하기 위해 정렬
for i in range(len(nums)-2):
#중복 일 경우 건너 뜀
if i > 0 and nums[i] == nums[i - 1]:
continue
left, right = i + 1, len(nums) - 1
while left < right:
sum = nums[i] + nums[left] + nums[right]
if sum < 0:
left += 1
elif sum > 0:
right -= 1
else:
result.append([nums[i], nums[left], nums[right]])
#정답에 중복이 들어가면 X => 중복처리
while left < right and nums[left] == nums[left + 1]:
left += 1
while left < right and nums[right] == nums[right - 1]:
right -= 1
#정답일 경우 투 포인터 이동
left += 1
right -= 1
return result
| true |
7c72f37ccb94d4b9250e63bfd8170362e98e0b6a
|
Python
|
RunningIkkyu/ProxyPool
|
/proxypool/getter.py
|
UTF-8
| 1,284 | 2.53125 | 3 |
[] |
no_license
|
from proxypool.tester import Tester
from proxypool.db import RedisClient
from proxypool.crawler import Crawler
from proxypool.setting import POOL_UPPER_THRESHOLD, PRIVATE_PROXY_ENABLE
from proxypool.private_proxy import PrivateProxy
import sys
class Getter():
def __init__(self):
self.redis = RedisClient()
self.crawler = Crawler()
def is_over_threshold(self):
""" Check if the mount of proxies is over threshold."""
if self.redis.count() >= POOL_UPPER_THRESHOLD:
return True
else:
return False
def run(self):
print('Getter started.')
if PRIVATE_PROXY_ENABLE:
proxies = PrivateProxy().get_proxies()
for proxy in proxies:
print('Add private proxy {}'.format(proxy))
self.redis.add(proxy)
else:
if not self.is_over_threshold():
for callback_label in range(self.crawler.__CrawlFuncCount__):
callback = self.crawler.__CrawlFunc__[callback_label]
# 获取代理
proxies = self.crawler.get_proxies(callback)
sys.stdout.flush()
for proxy in proxies:
self.redis.add(proxy)
| true |
486ef6398fb532fb5c73aaa6d74c09b77dcba1cd
|
Python
|
nithish1199/Color-detection
|
/main.py
|
UTF-8
| 868 | 2.59375 | 3 |
[] |
no_license
|
import cv2
import numpy as np
import pyautogui
cap = cv2.VideoCapture(0)
lower_red = np.array([0, 50, 50]) # example value
upper_red = np.array([10, 255, 255])
prev_y=0
while True:
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_red, upper_red)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
area=cv2.contourArea(c)
if area>300:
x,y,w,h=cv2.boundingRect(c)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
if y<prev_y:
pyautogui.press('down')
else:
pyautogui.press('up')
prev_y=y
cv2.imshow('frame', frame)
if cv2.waitKey(10) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| true |
50fe8efb534d572eb3082448ad7a39aa4f089091
|
Python
|
ankurgupta7/fast-rcnn-attributes
|
/data/attributes/Images/people_labels_parser_fcnn.py
|
UTF-8
| 977 | 2.59375 | 3 |
[
"MIT"
] |
permissive
|
f = open('labels.txt')
ft = open('../ImageSets/train.txt', 'w')
j = 0
while True:
l = f.readline()
if len(l) == 0:
break
words = l.split()
labels = ['is_male', 'has_long_hair', 'has_glasses', 'has_hat', 'has_t-shirt', 'has_long_sleeves', 'has_shorts', 'has_jeans', 'has_long_pants']
output_str = ''
for word in words[1:5]:
output_str += word + ' '
for i in range(words.__len__()):
if (words[i] == '1'):
output_str += labels[i - 5] + ' '
output_str = output_str[0:-1]
output_str = output_str.replace('NaN NaN NaN NaN', '0.0 0.0 20.0 20.0')
if len(output_str.split()) < 5:
print 'ignoring file' + str(j)
j+=1
continue
output_str = output_str + '\n'
f1 = open('../Annotations/'+ words[0].replace('jpg','txt'), 'w')
f1.write(output_str)
f1.close()
ft.write('{:05d}\n'.format(int(words[0].replace('.jpg','\n'))))
j+=1
#raw_input('Proceed?')
ft.close()
| true |
da52ab252aafc16cb83748639bb0268f3503b03f
|
Python
|
shivekkhurana/learning
|
/python/generator_paradigm/bytes_transfered.py
|
UTF-8
| 315 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
import re
byte_regex = re.compile("""^.*]\s".*"\s[0-9]{3}\s([0-9]*)\s.*$""")
log = (line for line in open('access.log'))
byte_groups = (byte_regex.search(line) for line in log)
bytes = (int(byte_group.group(1)) for byte_group in byte_groups if byte_group != None)
print("Total bytes transfered : %d" % sum(bytes))
| true |
bc01c1bcbb4994926df81453b617c69e7df9931e
|
Python
|
hendrikvgl/RoboCup-Spielererkennung
|
/code-master/lib/bitbots/modules/behaviour/body/actions/throw.py
|
UTF-8
| 2,002 | 2.890625 | 3 |
[] |
no_license
|
# -*- coding:utf-8 -*-
"""
Throw
^^^^^
Handling throwing of the goalie.
History:
''''''''
* ??.??.??: Created (Unknown)
"""
from bitbots.modules.abstract.abstract_action_module import AbstractActionModule
import time
from bitbots.util import get_config
LEFT = "LEFT"
MIDDLE = "MIDDLE"
RIGHT = "RIGHT"
BOTH_ARMS_HIGH = "BOTH"
class Throw(AbstractActionModule):
"""
Basic version. Throwing to one direction and remembering where we trowed ourselfs.
"""
def __init__(self, args):
super(Throw, self).__init__()
config = get_config()
self.richtung = args
self.initializationTime = time.time()
self.played = False
self.left_animation = config["animations"]["goalie"]["throw_left"]
self.middle_animation = config["animations"]["goalie"]["throw_middle"]
self.right_animation = config["animations"]["goalie"]["throw_right"]
def perform(self, connector, reevaluate=False):
connector.blackboard_capsule().cancel_ball_tracking()
if self.richtung == LEFT:
# Returns true if can be performed
if connector.animation_capsule().play_animation(self.left_animation):
connector.blackboard_capsule().set_thrown(LEFT)
return self.interrupt()
elif self.richtung == RIGHT:
if connector.animation_capsule().play_animation(self.right_animation):
connector.blackboard_capsule().set_thrown(RIGHT)
connector.blackboard_capsule().freeze_till(time.time() + 4)
return self.interrupt()
elif self.richtung == MIDDLE: # MIDDLE
if connector.animation_capsule().play_animation(self.middle_animation):
connector.blackboard_capsule().set_thrown(MIDDLE)
connector.blackboard_capsule().freeze_till(time.time() + 4)
return self.interrupt()
else:
raise ValueError('%s is not a possible throw direction.' % self.richtung)
| true |
46a80ff46b4690a5802b464aec76434bf74a916d
|
Python
|
Kalen-Ssl/shilu
|
/7.1.py
|
UTF-8
| 392 | 2.546875 | 3 |
[] |
no_license
|
f = open('./Py7-1.txt', "w")
f.writelines("秦时明月汉时关,\n")
f.writelines("秦时明月汉时关。\n")
f.writelines("但使龙城飞将在,\n")
f.writelines("不教胡马度阴山。\n")
f = open('./Py7-1.txt', "r+")
flist = f.readlines()
flist[1]='万里长征人未还。\n'
f=open("./Py7-1.txt",'r+')
f.writelines(flist)
f=open("D:\Py7-1.txt",'r+')
print(f.read())
f.close()
| true |
636cddf9f90621cd3f9340fb63f32dae743d95b0
|
Python
|
alancucki/recursive-convolutional-autoencoder
|
/logger.py
|
UTF-8
| 10,645 | 2.65625 | 3 |
[
"MIT"
] |
permissive
|
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
# and https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/04-utils/tensorboard/logger.py
import datetime
import glob
import os
import shutil
import sys
import time
from collections import defaultdict
import torch
import numpy as np
import tensorflow as tf
def to_np(x):
return x.data.cpu().numpy()
class Writer(object):
def __init__(self, logdir):
"""Create a summary writer logging to log_dir."""
self.__writer = tf.summary.FileWriter(logdir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(
value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.__writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.__writer.add_summary(summary, step)
def flush(self):
self.__writer.flush()
class Logger(object):
def __init__(self, initial_lr, log_interval, num_batches, logdir=None,
log_weights=False, log_grads=False):
"""Create a training logger with summary writers logging to logdir."""
self.timestamp = datetime.datetime.now().isoformat().replace(':', '.')
if not logdir:
logdir = self.timestamp
self.logdir = logdir.rstrip('/') + '/'
os.makedirs(self.logdir)
for py_file in glob.glob(r"*.py"):
shutil.copy(py_file, self.logdir)
self.model_path = self.logdir + "model.pt"
self.current_model_path = self.logdir + "current_model.pt"
self.writers = {name: Writer(self.logdir + name)
for name in ['train', 'valid', 'test']}
self.total_losses = None
self.epoch = 0
self.lr = initial_lr
self.log_interval = log_interval
self.num_batches = num_batches
self.log_weights = log_weights
self.log_grads = log_grads
self.minibatch_start_time = None
self.epoch_start_time = None
self.training_start_time = None
def mark_epoch_start(self, epoch):
self.epoch = epoch
self.minibatch_start_time = self.epoch_start_time = time.time()
self.total_losses = 0
def save_model_state_dict(self, model_state_dict, current=False):
path = self.model_path if not current else self.current_model_path
with open(path, 'wb') as f:
torch.save(model_state_dict, f)
def load_model_state_dict(self, current=False):
path = self.model_path if not current else self.current_model_path
with open(path, 'rb') as f:
return torch.load(f)
def save_training_state(self, optimizer, args):
th = torch.cuda if args.cuda else torch
# XXX Writers cannot be pickled -- are they stateful or stateless?
_writers = self.writers
self.writers = None
state = {'random': th.get_rng_state(),
'optimizer': optimizer.state_dict(),
'args': args,
'logger': self,
}
torch.save(state, self.training_state_path(self.logdir))
self.writers = _writers
@staticmethod
def training_state_path(logdir):
return os.path.join(logdir, 'training_state.pkl')
@staticmethod
def load_training_state(resume_path):
state_path = Logger.training_state_path(resume_path)
state = torch.load(open(state_path, 'rb'))
# XXX Writers cannot be pickled -- are they stateful or stateless?
state['logger'].writers = {
name: Writer(state['logger'].logdir + name) \
for name in ['train', 'valid', 'test']}
return state
def set_training_state(self, state, optimizer):
th = torch.cuda if state['args'].cuda else torch
th.set_rng_state(state['random'])
del state['random']
optimizer.load_state_dict(state['optimizer'])
# https://discuss.pytorch.org/t/saving-and-loading-sgd-optimizer/2536
optimizer.state = defaultdict(dict, optimizer.state)
state['optimizer'] = optimizer
return state
def save_model_info(self, classes_with_kwargs):
kwargs_to_str = lambda kwargs: ','.join(
["%s=%s" % (key, str(kw) if type(kw) != str else '\\"%s\\"' % kw) \
for key,kw in kwargs.items()])
info = ""
for field, (name, kwargs) in classes_with_kwargs.items():
info += "%s_class=%s\n" % (field, name)
if kwargs:
info += "%s_kwargs=%s\n" % (field, kwargs_to_str(kwargs))
with open(self.logdir+"model.info", 'w') as f:
f.write(info.strip())
def train_log(self, batch, batch_losses, named_params):
# logger.train_log(batch, {'nll_per_w': nll.data[0]},
# named_params=self.named_parameters)
# if log_every and batch % log_every == 0:
# print("Minibatch {0: >3} | loss {1: >5.2f} | err rate {2: >5.2f}%" \
# .format(batch, losses[-1], err_rate))
if not self.total_losses:
self.total_losses = dict(batch_losses)
else:
for k, v in batch_losses.iteritems():
self.total_losses[k] += v
if batch % self.log_interval == 0 and batch > 0:
elapsed = (time.time() - self.minibatch_start_time
) * 1000 / self.log_interval
cur_loss = {k: v / self.log_interval
for k, v in self.total_losses.items()}
# cur_loss['pplx'] = np.exp(cur_loss['nll_per_w'])
loss_str = ' | '.join(
[' {} {:5.2f}'.format(k, cur_loss[k]) \
for k in sorted(cur_loss.keys())])
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.5f} | '
'ms/batch {:5.2f} | {}'.format(
self.epoch, batch, self.num_batches, self.lr,
elapsed, loss_str))
cur_loss["ms/batch"] = elapsed
cur_loss["learning_rate"] = self.lr
step = (self.epoch-1) * self.num_batches + batch
self.tb_log(mode="train", info=cur_loss, step=step,
named_params=named_params)
self.total_losses = None
self.minibatch_start_time = time.time()
def valid_log(self, val_loss, batch=0):
elapsed = time.time() - self.epoch_start_time
losses = dict(val_loss)
# losses['pplx'] = np.exp(val_loss['nll_per_w'])
loss_str = ' : '.join(
[' {} {:5.2f}'.format(k, v) for k, v in losses.items()])
loss_str = ('| end of epoch {:3d} | time: {:5.2f}s | '
'valid {}'.format(self.epoch, elapsed, loss_str))
print('-' * len(loss_str))
print(loss_str)
print('-' * len(loss_str))
losses['s/epoch'] = elapsed
losses['learning_rate'] = self.lr
step = self.epoch * self.num_batches + batch
self.tb_log(mode="valid", info=losses,
step=step, named_params=lambda: [])
def mem_log(self, mode, named_params, batch):
step = self.epoch * self.num_batches + batch
for tag, value in named_params:
self.writers[mode].histo_summary(tag, to_np(value), step, bins=20)
# self.writers[mode].flush()
def final_log(self, results, result_file="results/log_file.md"):
if not os.path.exists(os.path.dirname(result_file)):
os.makedirs(os.path.dirname(result_file))
#for losses in results.values():
# losses['pplx'] = np.exp(losses['nll_per_w'])
log_line = ('| End of training | test losses {} |'
''.format(results['test']))
print('=' * len(log_line))
print(log_line)
print('=' * len(log_line))
header = "|timestamp|args|train acc|valid acc|test acc|other|\n"
header += "|---------|----|---------|---------|--------|-----|\n"
if not results.has_key('train') or not results.has_key('valid'):
log_line = "| %s | %s | not_evald | not_evald | %.2f | %s |\n" % (
self.timestamp, '<br>'.join(sys.argv[1:]),
results['test']['acc'], results)
else:
log_line = "| %s | %s | %.2f | %.2f | %.2f | %s |\n" % (
self.timestamp, '<br>'.join(sys.argv[1:]),
results['train']['acc'], results['valid']['pplx'],
results['test']['acc'], results)
with open(self.logdir+"results.md", 'w') as f:
f.write(header + log_line)
if not os.path.isfile(result_file):
with open(result_file, 'a') as f:
f.write(header)
with open(result_file, 'a') as f:
f.write(log_line)
step = self.epoch * self.num_batches
for mode in results.keys():
self.tb_log(mode=mode, info=results[mode], step=step,
named_params=lambda: [])
def tb_log(self, mode, info, step, named_params):
# Log scalar values
for tag, value in info.items():
self.writers[mode].scalar_summary(tag, value, step)
# Log values and gradients of the parameters (histogram)
if self.log_weights:
for tag, value in named_params():
tag = tag.replace('.', '/')
self.writers[mode].histo_summary(tag, to_np(value), step)
if self.log_grads:
for tag, value in named_params():
tag = tag.replace('.', '/')
self.writers[mode].histo_summary(tag+'/grad',
to_np(value.grad), step)
self.writers[mode].flush()
| true |
6da5f9280ffd802899f2a5f99ce6a64ee2a6a359
|
Python
|
sabrikrdnz/LYK-17-Python-Examples
|
/machine_learning/1_intro.py
|
UTF-8
| 645 | 3.375 | 3 |
[] |
no_license
|
from sklearn import datasets, neighbors
import numpy as np
# Load iris data from 'datasets module'
iris = datasets.load_iris()
# Get data-records and record-labels in arrays X and Y
X = iris.data
y = iris.target
# Create an instance of KNeighborsClassifier and then fit training data
clf = neighbors.KNeighborsClassifier()
clf.fit(X, y)
# Make class predictions for all observations in X
Z = clf.predict(X)
# Compare predicted class labels with actual class label
accuracy = clf.score(X, y)
print("Predicted model accuracy: "+ str(accuracy))
# Add a row of predicted classes to y-array for ease of comparison
A = np.vstack([y, Z])
print(A)
| true |
bb8e08fd8648cfaf17aefe37a1acb710fcd95f42
|
Python
|
theemadnes/rPi_camera_indexer
|
/lambda_functions/rPi_index_photo/lambda_function.py
|
UTF-8
| 1,394 | 2.609375 | 3 |
[] |
no_license
|
# import necessary modules
from __future__ import print_function
import json
import boto3
import urllib
dynamodb_table = 'mattsona-rpi-camera_metadata' # replace with your DDB table
# connect to dynamodb & s3
dynamodb = boto3.resource('dynamodb')
s3 = boto3.client('s3')
region_name = s3.meta.region_name # need the region name for the object URL
print('Loading function')
def lambda_handler(event, context):
# get details of s3 object & bucket
bucket = event['Records'][0]['s3']['bucket']['name']
object_key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key']).decode('utf8')
# connect to the dynamoDB table
table = dynamodb.Table(dynamodb_table)
try:
# grab the object
print('Grabbing S3 object data')
response = s3.get_object(Bucket=bucket, Key=object_key)
# index metadata to dynamoDB
print('Putting item to DynamoDB')
table.put_item(
Item = {
'device_id': response['Metadata']['rpi_id'],
'epoch_time': int(response['Metadata']['epoch_time_stamp']),
'image_time_stamp': response['Metadata']['image_time_stamp'],
's3_object_url': ('https://s3-' + region_name + '.amazonaws.com/' + bucket + '/' + object_key)
}
)
except Exception as e:
print(e)
raise e
return "Done"
| true |
d6d0f9fee6bce8e6caf9c7184b8b017dee9fbe8d
|
Python
|
bballamudi/DataGristle
|
/scripts/gristle_slicer
|
UTF-8
| 13,920 | 2.765625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env python
"""
Extracts subsets of input files based on user-specified columns and rows.
The input csv file can be piped into the program through stdin or identified
via a command line option. The output will default to stdout, or redirected
to a filename via a command line option.
The columns and rows are specified using python list slicing syntax -
so individual columns or rows can be listed as can ranges. Inclusion
or exclusion logic can be used - and even combined.
Supported slicing specification:
'NumberN, StartOffset:StopOffset' - The specification is a comma-
delimited list of individual offsets or ranges.
Offsets are based on zero, and if negative are
measured from the end of the record or file with
-1 being the final item. There can be N number
of individual offsets.
Ranges are a pair of offsets separated by a colon.
The first number indicates the starting offset,
and the second number indicates the stop offset +1.
Arguments:
--long-help Print verbose help and exit.
-i, --inputs=<files> One or more input files. Alternatively, could be
piped in via stdin.
-o, --output=<file> Specify the output file. The default is stdout.
-c, --columns=<spec> Provide the column inclusion specification,
Default is ':' which includes all columns.
-C, --excolumns=<spec> Provide the column exclusion specification.
Default is None which excludes nothing.
-r, --records=<spec> Provide the record inclusion specification.
Default is ':' which includes all records.
-R, --exrecords=<spec> Provide the record exclusion specification.
Default is None which excludes nothing.
-d, --delimiter=<del> Provide a quoted single-character field delimiter.
Typically useful if automatic csv dialect
detection fails to correctly interpret file. Also
required for STDIN input. If provided then quoting
should also be provided.
-q, --quoting=<qt> Specify quoting behavior. Typically used when
automatic csv dialect detection fails or
when processing data via stdin. Values:
- quote_all - all fields are quoted
- quote_none - no fields are quoted. This is the
default used if the delimiter is overridden.
- quote_minimal - only quoting of fields with
special characters.
- quote_nonnumeric - only quotes text fields.
-h, --help Print help and exit.
Examples:
$ gristle_slicer -i sample.csv
Prints all rows and columns
$ gristle_slicer -i sample.csv -c":5, 10:15" -C 13
Prints columns 0-4 and 10,11,12,14 for all records
$ gristle_slicer -i sample.csv -C:-1
Prints all columns except for the last for all
records
$ gristle_slicer -i sample.csv -c:5 -r-100:
Prints columns 0-4 for the last 100 records
$ gristle_slicer -i sample.csv -c:5 -r-100 -d'|' --quoting=quote_all:
Prints columns 0-4 for the last 100 records, csv
dialect info (delimiter, quoting) provided manually)
$ cat sample.csv | gristle_slicer -c:5 -r-100 -d'|' --quoting=quote_all:
Prints columns 0-4 for the last 100 records, csv
dialect info (delimiter, quoting) provided manually)
This source code is protected by the BSD license. See the file "LICENSE"
in the source code root directory for the full language or refer to it here:
http://opensource.org/licenses/BSD-3-Clause
Copyright 2011,2012,2013,2017 Ken Farmer
"""
import sys
import csv
import fileinput
from os.path import basename
from pprint import pprint as pp
from signal import signal, SIGPIPE, SIG_DFL
from typing import List, Tuple, Dict, Any, Optional, IO
import datagristle.location_slicer as slicer
import datagristle.configulator as configulator
import datagristle.file_io as file_io
#Ignore SIG_PIPE and don't throw exceptions on it... (http://docs.python.org/library/signal.html)
signal(SIGPIPE, SIG_DFL)
NAME = basename(__file__)
SHORT_HELP = 'Extract column and row subsets out of files using python string slicing notation\n'
def main() -> int:
config = get_args()
try:
input_handler = file_io.InputHandler(config['infiles'],
config['delimiter'],
config['quoting'],
config['quotechar'],
config['has_header'])
except EOFError:
print('Warning: empty file')
return 0
(incl_rec_slicer,
excl_rec_slicer,
incl_col_slicer,
excl_col_slicer) = setup_slicers(config['infiles'],
input_handler.dialect,
config['records'],
config['exrecords'],
config['columns'],
config['excolumns'])
output_handler = file_io.OutputHandler(config['outfile'], input_handler.dialect)
for rec in input_handler:
new_rec = process_rec(input_handler.rec_cnt - 1,
incl_rec_slicer,
excl_rec_slicer,
rec,
incl_col_slicer,
excl_col_slicer)
if new_rec:
output_handler.write_rec(new_rec)
input_handler.close()
output_handler.close()
return 0
def setup_slicers(infiles: List[str],
dialect: csv.Dialect,
config_records: str,
config_exrecords: str,
config_columns: str,
config_excolumns: str) -> Tuple[slicer.SpecProcessor,
slicer.SpecProcessor,
slicer.SpecProcessor,
slicer.SpecProcessor]:
""" Sets up the 4 slicer objects: inclusion & exclusion for
rec and column.
Then counts records and columns if negative slice references
exist and calls the spec adjuster.
"""
# first parse the config items;
columns = config_columns.split(',')
excolumns = config_excolumns.split(',') if config_excolumns else []
records = config_records.split(',')
exrecords = config_exrecords.split(',') if config_exrecords else []
incl_rec_slicer = slicer.SpecProcessor(records, 'incl_rec_spec')
excl_rec_slicer = slicer.SpecProcessor(exrecords, 'excl_rec_spec')
incl_col_slicer = slicer.SpecProcessor(columns, 'incl_col_spec')
excl_col_slicer = slicer.SpecProcessor(excolumns, 'excl_col_spec')
rec_cnt = None
if incl_rec_slicer.has_negatives or excl_rec_slicer.has_negatives:
if infiles == '-':
raise ValueError('ERROR: negative record slicing with stdin')
rec_cnt = get_rec_count(infiles, dialect)
incl_rec_slicer.spec_adjuster(loc_max=rec_cnt)
excl_rec_slicer.spec_adjuster(loc_max=rec_cnt)
col_cnt = None
if incl_col_slicer.has_negatives or excl_col_slicer.has_negatives:
if infiles == '-':
raise ValueError('negative column slicing with stdin')
col_cnt = get_col_count(infiles, dialect)
incl_col_slicer.spec_adjuster(loc_max=col_cnt)
excl_col_slicer.spec_adjuster(loc_max=col_cnt)
return incl_rec_slicer, excl_rec_slicer, incl_col_slicer, excl_col_slicer
def get_rec_count(files: List[str],
dialect: csv.Dialect) -> Tuple[Optional[int], int]:
""" Gets record counts for input files.
- Counts have an offset of 0
"""
rec_cnt = -1
for _ in csv.reader(fileinput.input(files), dialect):
rec_cnt += 1
fileinput.close()
return rec_cnt
def get_col_count(files: List[str],
dialect: csv.Dialect) -> int:
""" Gets column counts for input files.
- Counts have an offset of 0
"""
for record in csv.reader(fileinput.input(files[0]), dialect):
field_cnt = len(record) -1
break
fileinput.close()
return field_cnt
def process_rec(rec_number: int,
incl_rec_slicer: slicer.SpecProcessor,
excl_rec_slicer: slicer.SpecProcessor,
rec: List[str],
incl_col_slicer: slicer.SpecProcessor,
excl_col_slicer: slicer.SpecProcessor) -> Optional[List[str]]:
""" Evaluates all the specifications against a single record
from the input file. First it applies inclusion & exclusion
specifications against the record, then it applies inclusion
& exclusion specifications against each column.
Input:
- rec_number: used for rec specs
- incl_rec_spec
- excl_rec_spec
- rec: a list of all columns from the record
- incl_col_spec: which columns to include
- excl_col_spec: which columns to exclude
Output:
- if the rec_number fails: None
- if the rec_number passes: a list of the columns that qualified
"""
# minimal validation
assert int(rec_number) >= 0
# reject record if it isn't in the inclusion spec
if not incl_rec_slicer.spec_evaluator(rec_number):
return None
# reject record if it is in the exclusion spec
if excl_rec_slicer.spec_evaluator(rec_number):
return None
output_rec = []
for col_number in range(len(rec)):
if not incl_col_slicer.spec_evaluator(col_number):
continue
if excl_col_slicer.spec_evaluator(col_number):
continue
output_rec.append(rec[col_number])
if output_rec:
return output_rec
else:
return None # don't return empty list
class SlicerConfigulator(configulator.Config):
def validate_custom_config(self, config: configulator.CONFIG_TYPE):
if not config['delimiter'] and config['infiles'] == '-':
self.parser.error('Provide delimiter and quoting when piping data into program via stdin')
def get_args() -> Dict[str, Any]:
config_mgr = SlicerConfigulator(NAME, SHORT_HELP, __doc__)
config_mgr.add_standard_config('infiles')
config_mgr.add_standard_config('outfile')
config_mgr.add_standard_config('delimiter')
config_mgr.add_standard_config('quoting')
config_mgr.add_standard_config('quotechar')
config_mgr.add_standard_config('escapechar')
config_mgr.add_standard_config('has_header')
config_mgr.add_standard_config('has_no_header')
config_mgr.add_custom_config(name='columns',
short_name='c',
arg_type='option',
default=':',
config_type=str,
help_msg='Specify the columns to include via a comma-separated '
'list of columns and colon-separated pairs of column '
'start & stop ranges. Default is to include all '
'columns (":"). ')
config_mgr.add_custom_config(name='excolumns',
short_name='C',
arg_type='option',
default='',
config_type=str,
help_msg='Specify the columns to exclude via a comma-separated '
'list of columns and colon-separated pairs of column '
'start & stop ranges. Default is to exclude nothing.')
config_mgr.add_custom_config(name='records',
short_name='r',
arg_type='option',
default=':',
config_type=str,
help_msg='Specify the records to include via a comma-separated '
'list of record numbers and colon-separated pairs of '
'record start & stop ranges. Default is to include all '
'records (":").')
config_mgr.add_custom_config(name='exrecords',
short_name='R',
arg_type='option',
default='',
config_type=str,
help_msg='Specify the records to exclude via a comma-separated list '
'of record numbers and colon-separated pairs of record '
'start & stop ranges. Default is to exclude nothing.')
config_mgr.process_configs()
return config_mgr.config
if __name__ == '__main__':
sys.exit(main())
| true |
ecfe1755b7f3f4f22762ba1b720e2e9e9a8cd885
|
Python
|
tm-hsgw/AtCoderRepos
|
/atcoder.jp/past201912-open/past201912_f/Main.py
|
UTF-8
| 413 | 3.359375 | 3 |
[] |
no_license
|
s = input()
dic = []
i0 = 0
st = False
for i in range(len(s)):
if s[i].islower():
continue
if st:
dic.append(s[i0 : i + 1])
st = False
else:
i0 = i
st = True
dic = [w.lower() for w in dic]
dic.sort()
for w in dic:
w_ls = list(w)
w_ls[0] = w_ls[0].upper()
w_ls[len(w_ls) - 1] = w_ls[len(w_ls) - 1].upper()
print("".join(w_ls), end="")
print()
| true |
055444edb77a8cf4e79c6e1a57f7c9d4d00ba699
|
Python
|
Javacream/org.javacream.training.python
|
/org.javacream.training.python/src/3-Einfache Anwendungen/types_control_loop.py
|
UTF-8
| 351 | 3.890625 | 4 |
[] |
no_license
|
even_message = "an even number: "
odd_message = "an odd number: "
numbers = range(1, 10)
finished = False
for i in numbers:
print ("processing number " , i, ", finished: ", finished)
if i % 2 == 0:
print (even_message, i)
else:
print (odd_message, i)
finished = True
print ("all numbers processed, finished: ", finished)
| true |
ab3b91e9cbde7ffc08a4f2f086ca43ffbe991617
|
Python
|
Nadi0998/RIP
|
/Lab3/ex_2.py
|
UTF-8
| 647 | 3.078125 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
from librip.gens import gen_random
from librip.iterators import Unique
data1 = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
data2 = gen_random(1, 3, 10)
data3 = gen_random(1,9,15)
data = ['a', 'A', 'b', 'B']
# Реализация задания 2
for i in Unique(data1):
print(i, end=', ')
print()
#for i in data2:
# print(i, end=', ')
#print()
for i in Unique(data2):
#print('a')
print(i, end = ', ')
print()
for i in Unique(data3):
#print('a')
print(i, end = ', ')
print()
for i in Unique(data):
print(i, end=', ')
print()
for i in Unique(data, ignore_case=True):
print(i, end=', ')
| true |
1c7675bb81bceeab5f4befdb3b08b259de7c7931
|
Python
|
freirezinho/lp2-python-ac04
|
/physician.py
|
UTF-8
| 1,463 | 3.03125 | 3 |
[] |
no_license
|
from person import Pessoa
class Medico(Pessoa):
__crm: str = ''
__salario: int = 0
__especialidades: [str] = []
def __init__(
self,
nome: str,
rg: str,
cpf: str,
crm: str,
telefone: str,
salario: int,
especialidades: [str]
):
super().__init__(nome, rg, cpf, telefone)
self.__crm = crm
self.__salario = salario
self.__especialidades = especialidades
def get_crm(self) -> str:
return self.__crm
def get_especialidade_principal(self) -> str:
if (len(self.__especialidades) > 0):
return self.__especialidades[0]
else:
return ''
def get_nome(self) -> str:
return super().get_nome()
def get_salario(self) -> int:
return self.__salario
def get_todas_especialidades(self) -> [str]:
return self.__especialidades
def set_nova_especialidade(self, especialidade: str):
self.__especialidades.append(especialidade)
def set_salario(self, valor: int) -> None:
self.__salario = valor
def edit_crm(self, crm: str) -> None:
self.__crm = crm
def substituir_especialidades(self, especialidades: [str]):
self.__especialidades = especialidades
def remover_especialidade(self, especialidade: str) -> None:
if especialidade in self.__especialidades:
self.__especialidades.remove(especialidade)
| true |
ce9fab8bcb99e6e477b50f7b95a36780f59b1505
|
Python
|
bajjurisupraja/phytonprogram
|
/85.py
|
UTF-8
| 143 | 3.078125 | 3 |
[] |
no_license
|
v= raw_input().rstrip()
evenB = oddB = ''
for l, m in enumerate(v):
if l & 1 == 0:
evenB += m
else:
oddB += m
print(evenB + " " + oddB)
| true |
c06e66fb1e1684feb960ef71b3d55bde6e1b645c
|
Python
|
BinhMinhs10/learning_pyspark
|
/graphanalytics.py
|
UTF-8
| 2,101 | 2.8125 | 3 |
[] |
no_license
|
# $SPARK_HOME/bin/spark-submit --packages graphframes:graphframes:0.7.0-spark2.4-s_2.11 graphanalytics.py
from pyspark.sql import SparkSession
import pyspark
from graphframes import GraphFrame
from pyspark.sql.functions import desc
spark = SparkSession.builder.appName("Python Spark SQL basic example").getOrCreate()
spark.sparkContext.setLogLevel("WARN")
spark.sparkContext.setCheckpointDir("~/tmp/checkpoints")
bikeStations = spark.read.option("header", "true").csv(
"data/bike-data/201508_station_data.csv"
)
# print(bikeStations.collect())
tripData = spark.read.option("header", "true").csv(
"data/bike-data/201508_trip_data.csv"
)
tripData.show(5)
stationVertices = bikeStations.withColumnRenamed("name", "id").distinct()
tripEdges = tripData.withColumnRenamed("Start Station", "src").withColumnRenamed(
"End Station", "dst"
)
stationGraph = GraphFrame(stationVertices, tripEdges)
stationGraph.cache()
print("\nTotal Number of Station: " + str(stationGraph.vertices.count()))
print("\nTotal Number of Trips in Graph: " + str(stationGraph.edges.count()))
print("\nTotal Number of Trips in Original Data: " + str(tripData.count()))
# Query in graph
# stationGraph.edges.groupBy("src", "dst").count()\
# .orderBy(desc("count")).show()
stationGraph.edges.where("src = 'Townsend at 7th' OR dst = 'Townsend at 7th'").groupBy(
"src", "dst"
).count().show(10)
# # sub graph
# townAnd7thEdges = stationGraph.edges.where("src = 'Townsend at 7th' OR dst = 'Townsend at 7th'")
# subgraph = GraphFrame(stationGraph.vertices, townAnd7thEdges)
# # motif finding "triangle" pattern
# print("\nfind motif----------------")
# motifs = stationGraph.find("(a)-[ab]->(b); (b)-[bc]->(c); (c)-[ca]->(a)")
# motifs.show()
# In-Degree and Out-Degree Metrics
inDeg = stationGraph.inDegrees
inDeg.orderBy(desc("inDegree")).show(5, False)
# Connected component
print("\nCalculate connected component---------")
minGraph = GraphFrame(stationVertices, tripEdges.sample(False, 0.1))
cc = minGraph.connectedComponents()
cc.where("component != 0").show()
print("\nDONE======================\n")
| true |
22ab301a72223254e9fe581d608a5a2dd7cd1d84
|
Python
|
Sushmita00/class-5
|
/hw5.py
|
UTF-8
| 1,903 | 4.46875 | 4 |
[] |
no_license
|
# write a programm to print the multiplication table of the number entered by the user.
number=int(input("enter the number"))
i=1
while i<=10:
print(i*number)
i=i+1
# ask the user to enter 10 number using only one input statement and add them to the list
list=[]
for name in range(10):
number=input("enter the number:")
list.append(number)
print(list)
else:
print("that's all")
# from a list of numbers make a new list containing only the even numbers.
x=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19]
print("the value of x",x)
list=[]
for i in x:
if i%2==0:
list.append(i)
print("even numbers are:",list)
# from a list separate the integer,string and floats elements into three different lists.
list=[1,"egg",2.50,5,"fish",3.9,6,"hen",6.9]
print("list:",list)
integer=[]
string_list=[]
float_value=[]
for i in list:
if type(i)==str:
string_list.append(i)
if type(i)==int:
integer.append(i)
if type(i)==float:
float_value.append(i)
print(integer)
print(string_list)
print(float_value)
# from a list ask the user the number he wants to remove from the list and then print the list.
list=[1,2,3,4,5,6,7,8,9,10]
print("list is",list)
n=int(input("enter the number want to remove from list"))
list.remove(n)
print("our new list",list)
# make a calculator
a=int(input("enter the number"))
b=int(input("enter the number"))
ch=input("a-Addition\n s-subtract\n m-multiplication\n d-division\n Q-quit")
while ch!='q':
if ch=='a':
print("the sum of two number is",a+b)
if ch=='s':
print("the subtract of two number is",a-b)
if ch=='m':
print("the multiplication of two number is",a*b)
if ch=='d':
print("the division of two number is",a/b)
if ch=='Q':
print("quit")
ch=input("enter choice again\n a-Addition\n s-subtract\n m-multiplay\n d-division\n Q-quit")
| true |
ec80618ad626a8e70ae574509b4c81a0bc4f3cfd
|
Python
|
uladkasach/Academic
|
/CSCI/gamedev/camera.py
|
UTF-8
| 1,233 | 2.90625 | 3 |
[] |
no_license
|
import mathutils;
import math;
import bge;
print(dir(bge.logic.getCurrentScene().objects));
player = bge.logic.getCurrentScene().objects['Snek']; # get a reference to player object
playerRotationZ = player.localOrientation.to_euler()[2]; # get player direction as X,Y,Z (in radians not degrees)
inverseRotationZ = playerRotationZ + math.pi; # to position camera: calculate opposite direction
cameraDistance = 7.5; # camera will be 7.5 blender units on the XY plane away from player
camera = bge.logic.getCurrentController().owner; # get a reference to camera object
cameraX = player.position[0] + (math.cos(inverseRotationZ)*cameraDistance); # x position of camera
cameraY = player.position[1] + (math.sin(inverseRotationZ)*cameraDistance); # y position of camera
cameraZ = player.position[2] + 1.5; # z position of camera (1.5 units above player)
cameraRotateX = math.pi/2-math.radians(15); # tip camera to look 15 degrees downward
cameraRotateY = 0; # no sideways tilt
cameraRotateZ = playerRotationZ+(math.pi/2*(-1)); # look same way in horizontal plane as player
camera.position = (cameraX,cameraY,cameraZ); # set camera position
camera.localOrientation = (cameraRotateX,cameraRotateY,cameraRotateZ); # set camera orientation
| true |
c660b66bf9a50960a02d7e38c5a1f9f69800ba90
|
Python
|
sdpython/botadi
|
/_unittests/ut_mokadi/test_grammar_mokadi.py
|
UTF-8
| 3,146 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
@brief test log(time=5s)
"""
import unittest
from pyquickhelper.loghelper import fLOG
class TestGrammarMokadi(unittest.TestCase):
def test_mokadi_grammar(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from botadi.mokadi.mokadi_parser import get_tree_string, parse_mokadi
from botadi.mokadi.grammars import MokadiGrammar_frParser, MokadiGrammar_frLexer, MokadiGrammar_frListener
codes = ["MOKADI a", "MOKADI lire mail"]
expec = [[('MOKADI', ':MOKADI:'), ('a', ':word:'), ('<EOF>', ':P:')],
[('MOKADI', ':MOKADI:'), ('lire', ':verb_voir:'),
('mail', ':mails:'), ('<EOF>', ':P:')]
]
for i, code in enumerate(codes):
fLOG("{0}/{1}: {2}".format(i + 1, len(codes), code))
parser = parse_mokadi(
code, MokadiGrammar_frParser, MokadiGrammar_frLexer)
tree = parser.parse()
res, simple = get_tree_string(
MokadiGrammar_frListener, tree, parser, code)
if "error" in res:
raise Exception("unable to parse '{0}'".format(code))
fLOG("SIMPLE", simple)
fLOG("OUTPUT")
def display(li, level=0):
if isinstance(li, list):
for el in li:
display(el, level + 1)
else:
fLOG(" " * level, li)
display(res)
self.assertEqual(simple, expec[i])
def test_mokadi_interpret(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from botadi.mokadi import interpret
from botadi.mokadi.grammars import MokadiGrammar_frParser, MokadiGrammar_frLexer, MokadiGrammar_frListener
codes = ["MOKADI a", "MOKADI lire mail"]
expec = [[('MOKADI', ':MOKADI:'), ('a', ':word:'), ('<EOF>', ':P:')],
[('MOKADI', ':MOKADI:'), ('lire', ':verb_voir:'),
('mail', ':mails:'), ('<EOF>', ':P:')]
]
for i, code in enumerate(codes):
fLOG("{0}/{1}: {2}".format(i + 1, len(codes), code))
simple = interpret(code, MokadiGrammar_frParser,
MokadiGrammar_frLexer, MokadiGrammar_frListener)
self.assertEqual(simple, expec[i])
def test_mokadi_interpret_exception(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from botadi.mokadi import interpret
from botadi.mokadi.grammars import MokadiGrammar_frParser, MokadiGrammar_frLexer, MokadiGrammar_frListener
try:
interpret("ROOCADI", MokadiGrammar_frParser,
MokadiGrammar_frLexer, MokadiGrammar_frListener)
raise AssertionError("should fail")
except SyntaxError as e:
fLOG(e)
self.assertTrue("missing" in str(e))
if __name__ == "__main__":
unittest.main()
| true |
9eacc24bd9010cf4d28417feb8a792ef7dc82e66
|
Python
|
wai030/Python-project
|
/sudoku_general.py
|
UTF-8
| 3,229 | 3.75 | 4 |
[] |
no_license
|
###################
## Variable Domains
###################
domain = (1,2,3,4)
######################################################
## Definition of a state (i.e., what the variables are
######################################################
start_state = [ None,1 ,None,3 ,
4 ,None,None,2 ,
None,2 ,None,None,
3 ,None,2 ,1 ]
def display_solution (state):
print ("[{},{},{},{},".format(state[0],state[1],state[2],state[3]))
print (" {},{},{},{},".format(state[4],state[5],state[6],state[7]))
print (" {},{},{},{},".format(state[8],state[9],state[10],state[11]))
print (" {},{},{},{}]".format(state[12],state[13],state[14],state[15]))
##########################################
## Functions related to the problem domain
##########################################
def is_goal(state):
if 0 in state:
return False
if not satisfy_constraints(state):
return False
return True
##########################################
## Constraints
##########################################
# If either is None (unassigned), or the two are not the same
def check_constraint_1 (x, y):
if x!=y:
return True
else:
return False
def satisfy_constraints(state):
for i in range(16):
j=i%4
h=i//4
if state[i]!= 0:
for z in range(4):
if (4*h+z == i):
z+=1
elif (state[i] == state[4*h+z]):
return False
for g in range(4):
if (4*g+j == i):
g+=1
elif ( state[i] == state[4*g+j]):
return False
return True
##################
## Search Function
##################
def find_children(state):
if 0 in state:
children=[]
none_idx = state.index(0)
for value in domain:
child = state.copy()
child[none_idx] = value
children.append(child)
return children
else:
return 0
def csp_search(start_state):
to_visit = []
next_state = start_state
end = False
while not end:
if is_goal(next_state):
display_solution (next_state)
print("Solution Found!")
end = True
else:
for child_state in find_children(next_state):
if satisfy_constraints(child_state):
to_visit.append(child_state)
if to_visit:
next_state=to_visit.pop()
else:
print("Failed to find a goal state")
end=True
##################
## Main
##################
x=[]
for i in range(1, 5):
a = input("Enter {} row (enter 0 for space): ".format(i))
x += a.split( )
x= list(map(int, x))
csp_search(x)
input()
| true |
e99e24102d582fe1f39c9295734e6651f391f8c7
|
Python
|
DJoseph11/pyjects
|
/coinflip.py
|
UTF-8
| 1,229 | 4.65625 | 5 |
[] |
no_license
|
import random
print(" I will flip a coin 1000 times. Guess how many times it will comp up heads. (Press Enter to begin")
input()
flips = 0 # a counter variable to keep track of how many flips has been made
heads = 0 # another counter variable to keep track of how many heads pop from the while loop in the if statement.
while flips < 1000: # if the flip is less then a 1000 cycles when true it recycles if false the program continues to line 20
if random.randint(0,1) == 1: # two options for 0 = tail 1 = heads
heads += 1 # if line 7 is true where random = 1 it will go to line 8 where heads = heads + 1 line 5 get updated
flips += 1 # regardless of line 7 is true or false the counter for flips is still added to line 4
if flips == 900: #once the flip count reach 900 the program will print line 12
print(" 900 flips and there have been {h} heads.".format(h = heads))
if flips == 100:
print(" 100 flips and there have been {h} heads".format(h = heads))
if flips == 500:
print(" 500 flips your halfway there and there have been {h} heads".format(h = heads))
print()
print("Out of 1000 tosses, heads came up {h} heads times.".format(h = heads))
print("Where you close?")
| true |
303501c4de4c8b61f4770376be368a8e8e6d717d
|
Python
|
dszokolics/reinforcement-learning
|
/Navigation.py
|
UTF-8
| 5,106 | 3.71875 | 4 |
[] |
no_license
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.0
# kernelspec:
# display_name: drlnd
# language: python
# name: drlnd
# ---
# # Navigation
#
# ---
#
# In this notebook, you will learn how to use the Unity ML-Agents environment for the first project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893).
#
# ### 1. Start the Environment
#
# We begin by importing some necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).
from unityagents import UnityEnvironment
import numpy as np
import sys
# Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.
#
# - **Mac**: `"path/to/Banana.app"`
# - **Windows** (x86): `"path/to/Banana_Windows_x86/Banana.exe"`
# - **Windows** (x86_64): `"path/to/Banana_Windows_x86_64/Banana.exe"`
# - **Linux** (x86): `"path/to/Banana_Linux/Banana.x86"`
# - **Linux** (x86_64): `"path/to/Banana_Linux/Banana.x86_64"`
# - **Linux** (x86, headless): `"path/to/Banana_Linux_NoVis/Banana.x86"`
# - **Linux** (x86_64, headless): `"path/to/Banana_Linux_NoVis/Banana.x86_64"`
#
# For instance, if you are using a Mac, then you downloaded `Banana.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:
# ```
# env = UnityEnvironment(file_name="Banana.app")
# ```
env = UnityEnvironment(file_name="Banana.app")
# Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# ### 2. Examine the State and Action Spaces
#
# The simulation contains a single agent that navigates a large environment. At each time step, it has four actions at its disposal:
# - `0` - walk forward
# - `1` - walk backward
# - `2` - turn left
# - `3` - turn right
#
# The state space has `37` dimensions and contains the agent's velocity, along with ray-based perception of objects around agent's forward direction. A reward of `+1` is provided for collecting a yellow banana, and a reward of `-1` is provided for collecting a blue banana.
#
# Run the code cell below to print some information about the environment.
# +
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
# -
# ### 4. It's Your Turn!
#
# Now it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:
# ```python
# env_info = env.reset(train_mode=True)[brain_name]
# ```
from src.dqn_agent import Agent
agent = Agent(state_size, action_size, 1)
def monitor(env, agent, n_episodes=1000):
"""Run the agent and monitor its performance"""
scores = []
best_score = -np.inf
for episode in range(n_episodes):
env_info = env.reset(train_mode=True)[brain_name]
state = env_info.vector_observations[0]
score = 0
while True:
action = agent.act(state, 1/(episode+1))
env_info = env.step(action)[brain_name]
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
agent.step(state, action, reward, next_state, done)
score += reward
state = next_state
if done:
scores.append(score)
break
if np.mean(scores[-100:]) > best_score:
best_score = np.mean(scores[-100:])
print("\rEpisode {} || Best average reward {}".format(episode, best_score), end="")
sys.stdout.flush()
if (episode+1) % 50 == 0:
print("\rEpisode: {} Mean score: {} ".format(episode+1, np.mean(scores[-50:])))
if np.mean(scores[-100:]) > 13:
print("\rEnvironment solved in {} episodes! ".format(episode+1))
break
return scores
agent.load("checkpoints/")
agent.set_mode("eval")
score = monitor(env, agent, 1000)
import seaborn as sns
sns.lineplot(x=range(len(score)), y=np.array(score))
| true |
04a32402e7a7ea238076844b5c0fc31a304ac092
|
Python
|
mohamedalani/Other
|
/Challenge Sounds/python_code.py
|
UTF-8
| 1,249 | 3.4375 | 3 |
[] |
no_license
|
import sys
#Recursive function that returns a generator with all combinations
def get_all_splits(array):
if len(array) > 1:
for sub in get_all_splits(array[1:]):
yield [' '.join([array[0],sub[0]])] + sub[1:]
yield [array[0]] + sub
else:
yield array
def get_max_font(wide, height, words):
max_found = 1
for words in get_all_splits(words):
j=1
length_words = [len(word) for word in words]
#Let's find the max font size
while max(length_words)*j <= wide and (len(words)*j) <= height:
if j > max_found:
max_found = j
j+=1
return max_found
def process_file(filename):
file = open("output.txt","w")
with open(filename, "r") as input:
lines = input.read().split("\n")
for i, line in enumerate(lines[1:]):
size = [int(item) for item in line.split()[:2]]
sentence = " ".join(line.split()[2:])
print("Case #{}: {}".format(i, get_max_font(size[0], size[1], sentence.split())))
file.write("Case #{}: {}".format(i, get_max_font(size[0], size[1], sentence.split()))+"\n")
file.close()
process_file(sys.argv[1])
| true |
24eb0b9e106d0da9c07f92a852a0d78e119be5f7
|
Python
|
AwuorMarvin/Day-Three-Bootcamp
|
/WordCount/words_count.py
|
UTF-8
| 578 | 3.34375 | 3 |
[] |
no_license
|
def words(sentence):
lib = {}
count = 0
sentence_list = sentence.split()
length = len(sentence_list)
for word in sentence_list:
for i in range(length):
if sentence_list[i].isdigit() == True:
if sentence_list[i] == word:
count+=1
lib[int(sentence_list[i])] = count
else:
if sentence_list[i] == word:
count+=1
lib[sentence_list[i]] = count
count = 0
return lib
words('This is a test')
| true |
a757eb4617b82b72da2672f043c78d17e643c9cd
|
Python
|
pavelsimo/ProgrammingContest
|
/spoj/328_BISHOPS/P328.py
|
UTF-8
| 264 | 3.046875 | 3 |
[] |
no_license
|
#!/usr/bin/python
# @BEGIN_OF_SOURCE_CODE
# @SPOJ BISHOPS C++ "Simple Math"
from sys import stdin
while True:
line = stdin.readline()
if line == '':
break;
n = int(line)
if n == 1:
print 1
else:
print 2*n-2
# @END_OF_SOURCE_CODE
| true |
0480e74d00b77b7e62beebbcc016372a65214b9c
|
Python
|
JordanMcK5/Week_2_Classes_weekend_hw
|
/tests/guest_test.py
|
UTF-8
| 564 | 3.046875 | 3 |
[] |
no_license
|
import unittest
from src.guest import Guest
from src.room import Room
from src.song import Song
class TestGuest(unittest.TestCase):
def setUp(self):
self.guest = Guest("Katy", 20)
def test_guest_has_name(self):
self.assertEqual("Katy", self.guest.guest_name)
def test_guest_has_cash(self):
self.assertEqual(20, self.guest.cash)
def test_guest_pays_entry_fee(self):
entry_fee = Room("lounge", 8, 100, 5)
self.guest.pay_entry_fee(entry_fee)
self.assertEqual(15, self.guest.cash)
| true |
aa8d379e321e6b62ba7991ec98408b87b41e5e21
|
Python
|
pombredanne/wespe
|
/wespe/batch_uploaders/facebook/facebook_batch.py
|
UTF-8
| 7,420 | 2.671875 | 3 |
[
"Apache-2.0"
] |
permissive
|
import logging
from functools import partial
from typing import List, Union
from facebook_business.api import FacebookAdsApi, FacebookRequest, FacebookResponse
from tenacity import retry, retry_if_result, stop_after_attempt, wait_exponential
from wespe.exceptions import (
NoFacebookRequestProvidedError,
TooManyRequestsPerBatchError,
)
from .facebook_batch_request_error import FacebookBatchRequestError
from .facebook_batch_response import FacebookBatchResponse
from .retries import should_retry_facebook_batch
logger = logging.getLogger(__name__)
class FacebookBatch:
# Batch retrying parameters for transient errors
MAX_ATTEMPTS = 5 # The max number of attempts a batch execution should retry
WAIT_EXPONENTIAL_MULTIPLIER = 1 # The exponential multiplier
WAIT_EXPONENTIAL_MIN_IN_SECONDS = (
1 # The minimum waiting time derived from the exponential multiplier
)
WAIT_EXPONENTIAL_MAX_IN_SECONDS = (
10 # The maximum waiting time derived from the exponential multiplier
)
def __init__(self, requests: List[FacebookRequest], api: FacebookAdsApi):
if not requests:
raise NoFacebookRequestProvidedError(
"At least one facebook request must be provided"
)
num_requests = len(requests)
if num_requests > 50:
# See more on https://developers.facebook.com/docs/graph-api/making-multiple-requests
raise TooManyRequestsPerBatchError(
"A maximum of 50 requests per batch is supported"
)
self._api = api
self._batch = None
self._requests = requests
self._responses = [None] * num_requests
self._errors = [None] * num_requests
@property
def requests(self) -> List[FacebookRequest]:
"""
Returns all FacebookRequest instances.
:return: a list of FacebookRequest instances.
"""
return self._requests
@property
def responses(self) -> List[Union[None, FacebookBatchResponse]]:
"""
Returns the responses for the executed FacebookRequest instances. The amount and order of elements will
be the same as of FacebookRequest instances. FacebookRequest with errors will have None as their value
in the respective index.
:return: a list of FacebookBatchResponse instance and/or None.
"""
return self._responses
@property
def errors(self) -> List[Union[None, FacebookBatchRequestError]]:
"""
Returns the errors for the executed FacebookRequest instances. The amount and order of elements will
be the same as of FacebookRequest instances. FacebookRequest without errors will have None as their value
in the respective index.
:return: a list of FacebookBatchRequestError instances and/or None.
"""
return self._errors
@retry()
def execute(self) -> "FacebookBatch":
"""
Execute all requests. This method will be retried for any failed transient errors. For such we employ an
exponential backoff approach.
The exponential back off defaults to wait 2^x * 1 seconds between each retry, up to 10 seconds, then 10
seconds afterwards for a maximum of 5 attempts. Those values can be tweaked by playing with MAX_ATTEMPTS,
WAIT_EXPONENTIAL_MULTIPLIER, WAIT_EXPONENTIAL_MIN_IN_SECONDS, and WAIT_EXPONENTIAL_MAX_IN_SECONDS static
variables.
:raises: RetryError: when retries failed for MAX_ATTEMPTS.
:return: self.
"""
# We set the retry settings during runtime, so don't worry about this method never finishing
self._initialize_execution_retrial_conditions()
self._batch = self._api.new_batch()
for request_index, request in enumerate(self._requests):
response = self.responses[request_index]
error = self.errors[request_index]
if (response is None and error is None) or (error and error.is_transient):
self._batch.add_request(
request,
success=partial(
self._default_success_callback, request_index=request_index
),
failure=partial(
self._default_failure_callback, request_index=request_index
),
)
self._batch.execute()
return self
def _initialize_execution_retrial_conditions(self):
"""
This will re-initialize, during runtime, the arguments of the retry decorator wrapping the execute method
with class-level retry settings. Those are MAX_ATTEMPTS, WAIT_EXPONENTIAL_MULTIPLIER,
WAIT_EXPONENTIAL_MIN_IN_SECONDS, and WAIT_EXPONENTIAL_MAX_IN_SECONDS.
:return:
"""
retry_settings = self.execute.retry
retry_settings.retry = retry_if_result(should_retry_facebook_batch)
retry_settings.stop = stop_after_attempt(self.MAX_ATTEMPTS)
retry_settings.wait = wait_exponential(
multiplier=self.WAIT_EXPONENTIAL_MULTIPLIER,
min=self.WAIT_EXPONENTIAL_MIN_IN_SECONDS,
max=self.WAIT_EXPONENTIAL_MAX_IN_SECONDS,
)
def _default_failure_callback(
self, response: FacebookResponse, request_index: int, object_id: int = None
):
"""
A method that can be used to raise exceptions when the batch object is used for bulk operations.
This callback raises a custom FacebookAPIError.
This is intended to be used as a default callback to fallback to in case a user does not provide one.
:param response: Facebook response object.
:param request_index: The index of the request in the whole batch.
:param object_id: (Optional) The ID of the object being updated.
"""
request = self._requests[request_index]
batch__error = FacebookBatchRequestError(
request=request, request_error=response.error()
)
self._responses[request_index] = None
self._errors[request_index] = batch__error
error_msg = ["#{} -".format(request_index)]
if object_id:
error_msg.append("Error updating object with id [{}].".format(object_id))
error_msg.append(str(batch__error))
logger.error(" ".join(error_msg))
def _default_success_callback(
self, response: FacebookResponse, request_index: int, object_id: int = None
):
"""
A method that can be used to log when the batch object has completed successfully.
This is intended to be used as a default callback to fallback to in case a user does not provide one.
:param response: Facebook response object.
:param request_index: The index of the request in the whole batch.
:param object_id: The ID of the object being updated.
"""
request = self._requests[request_index]
batch_response = FacebookBatchResponse(request=request, response=response)
self._responses[request_index] = batch_response
self._errors[request_index] = None
if object_id is None:
object_id = response.json().get("id")
logger.debug(
"Request #{}: Object with id [{}] updated successfully!".format(
request_index, str(object_id)
)
)
| true |
41c2d0575b769b055c5fb64ca6609d06f0ea0294
|
Python
|
Zhaoty96/lisatools
|
/MLDCpipelines2/bin/playXML.py
|
UTF-8
| 2,936 | 2.671875 | 3 |
[] |
no_license
|
#!/usr/bin/env python
__version__='$Id: $'
import lisaxml
import numpy
import math
import sys
import re
import wave
from optparse import OptionParser
def triang(windowsize,symmetric = 1):
halfkernel = numpy.arange(windowsize) / (1.0 * windowsize)
kernel = numpy.zeros(2 * windowsize + 1)
kernel[0:windowsize] = halfkernel
kernel[windowsize] = 1.0
kernel[-windowsize:] = halfkernel[::-1]
if symmetric:
return kernel
else:
return kernel[:-1]
def normalize(sound,cadence,timewindow = 0.1):
length = len(sound)
windowsize = int(timewindow * cadence)
# chop to a multiple of windowsize
newlength = windowsize * int(length/windowsize)
newsound = numpy.zeros(newlength,'d')
normlength = int(length/windowsize)
norms = numpy.zeros(normlength+1,'d')
norms[0] = norms[-1] = 0.0
kernel = triang(windowsize,0)
# normalize
for i in range(1,normlength):
ir = (i-1)*windowsize
er = (i+1)*windowsize
norm = 4.0 * numpy.sum(numpy.abs(sound[ir:er])) / (2 * windowsize)
if norm != 0.0:
newsound[ir:er] += kernel * sound[ir:er] / norm
# clip
newsound[newsound > 1.0] = 1.0
newsound[newsound < -1.0] = -1.0
# renormalize
newsound[:] /= numpy.max(numpy.abs(newsound[:]))
return newsound
# begin parsing arguments
parser = OptionParser(usage="usage: %prog [options] TDIFILE.xml WAVEFILE.wav ...",
version="$Id: $")
(options, args) = parser.parse_args()
if len(args) < 1 or len(args) > 2:
parser.error("You must specify one input file (XML), or an input file (XML) and an output file (WAV)!")
inputfile = args[0]
if len(args) == 2:
outputfile = args[1]
else:
outputfile = re.sub('\.xml','.wav',inputfile)
inputxml = lisaxml.readXML(inputfile)
tdi = inputxml.getTDIObservables()[0]
inputxml.close()
DataType = tdi.DataType
Length = tdi.TimeSeries.Length
Cadence = tdi.TimeSeries.Cadence
if tdi.DataType == 'FractionalFrequency':
A = (2.0*tdi.Xf - tdi.Yf - tdi.Zf) / 3.0
E = (tdi.Zf - tdi.Yf) / math.sqrt(3.0)
elif tdi.DataType == 'Strain':
A = (2.0*tdi.Xs - tdi.Ys - tdi.Zs) / 3.0
E = (tdi.Zs - tdi.Ys) / math.sqrt(3.0)
else:
raise
# standard frame rates: 44100, 22050, or 11025
# 2**21 samples (1 year @ 15 s) @ 44.1 kHz maps 1 year to 47 s
# 1e-4 Hz -> 66 Hz, 1e-2 -> 6615 Hz (probably OK)
wavefreq = 44100
print "Mapping 1e-4 -> %s, 1e-1 -> %s Hz" % (wavefreq*1.0e-4*Cadence,wavefreq*1.0e-1*Cadence)
windowsize = int(0.1 * wavefreq)
# normalize both channels independently
A = normalize(A,wavefreq)
E = normalize(E,wavefreq)
# create integer buffer and fill it
intbuffer = numpy.empty(2*len(A),numpy.short)
intbuffer[::2] = 32768.0 * A[:]
intbuffer[1::2] = 32768.0 * E[:]
outputwave = wave.open(outputfile,'w')
outputwave.setnchannels(2)
outputwave.setsampwidth(2)
outputwave.setnframes(Length)
outputwave.setframerate(wavefreq)
outputwave.writeframes(intbuffer.tostring())
outputwave.close()
sys.exit(0)
| true |
9198ab706ae4d21e52c1e292da4dba2e8e0b210f
|
Python
|
windniw/just-for-fun
|
/leetcode/373.py
|
UTF-8
| 1,276 | 3.703125 | 4 |
[
"Apache-2.0"
] |
permissive
|
"""
link: https://leetcode.com/problems/find-k-pairs-with-smallest-sums
problem: 用 nums1, nums2 中元素组成元素对 (u,v), 其中 u ∈ nums1,v ∈ nums2,求所有元素对中和最小的前 k 对
solution: 小根堆。因为 (nums[i], nums[j]) < (nums[i], nums[j+1]),可以肯定前者未出堆时后者入堆也没有意义,在前者出堆再将后者入堆
保持堆大小为 n,而不需要 mn,时间复杂度(klogn)
"""
class Solution:
def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:
class T:
def __init__(self, a: int, b: int, i: int):
self.a = a
self.b = b
self.i = i
def __lt__(self, t):
return self.a + self.b < t.a + t.b
if not nums1 or not nums2:
return []
n, m = len(nums1), len(nums2)
h = []
for i in range(n):
heapq.heappush(h, T(nums1[i], nums2[0], 0))
res = []
for i in range(k):
if not h:
break
t = heapq.heappop(h)
res.append([t.a, t.b])
if t.i + 1 < m:
heapq.heappush(h, T(t.a, nums2[t.i + 1], t.i + 1))
return res
| true |
6f9cab72c2f2b80fa79604062f14489f4c16ae43
|
Python
|
xiangzhi/hw-fall2015
|
/Math/projects/code/knnTest.py
|
UTF-8
| 372 | 2.859375 | 3 |
[] |
no_license
|
#!/usr/bin/env python
from sklearn.neighbors import NearestNeighbors
import numpy as np
X = np.array([[-1, -1],[0.2,0.5],[-2, -1], [-3, -2],[0.5,0.5],[1, 1], [2, 1], [3, 2]])
nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(X)
test = np.array([0,0])
test = test.reshape(1,-1)
distances, indices = nbrs.kneighbors(test)
print distances, indices
| true |
0fdedf346c424199f989cedebcfc20b697f1efb6
|
Python
|
fagan2888/arviz
|
/arviz/plots/mcseplot.py
|
UTF-8
| 8,683 | 2.625 | 3 |
[
"Apache-2.0"
] |
permissive
|
"""Plot quantile MC standard error."""
import numpy as np
import xarray as xr
from scipy.stats import rankdata
from ..data import convert_to_dataset
from ..stats import mcse
from ..stats.stats_utils import quantile as _quantile
from .plot_utils import (
xarray_var_iter,
_scale_fig_size,
make_label,
default_grid,
_create_axes_grid,
get_coords,
filter_plotters_list,
)
from ..utils import _var_names
def plot_mcse(
idata,
var_names=None,
coords=None,
errorbar=False,
figsize=None,
textsize=None,
extra_methods=False,
rug=False,
rug_kind="diverging",
n_points=20,
ax=None,
rug_kwargs=None,
extra_kwargs=None,
text_kwargs=None,
**kwargs
):
"""Plot quantile or local Monte Carlo Standard Error.
Parameters
----------
idata : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
var_names : list of variable names, optional
Variables to be plotted.
coords : dict, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
errorbar : bool, optional
Plot quantile value +/- mcse instead of plotting mcse.
figsize : tuple, optional
Figure size. If None it will be defined automatically.
textsize: float, optional
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
extra_methods : bool, optional
Plot mean and sd MCSE as horizontal lines. Only taken into account when
``errorbar=False``.
rug : bool
Plot rug plot of values diverging or that reached the max tree depth.
rug_kind : bool
Variable in sample stats to use as rug mask. Must be a boolean variable.
n_points : int
Number of points for which to plot their quantile/local ess or number of subsets
in the evolution plot.
ax : axes, optional
Matplotlib axes. Defaults to None.
rug_kwargs : dict
kwargs passed to rug plot.
extra_kwargs : dict, optional
kwargs passed to ax.plot for extra methods lines.
text_kwargs : dict, optional
kwargs passed to ax.annotate for extra methods lines labels. It accepts the additional
key ``x`` to set ``xy=(text_kwargs["x"], mcse)``
**kwargs
Passed as-is to plt.hist() or plt.plot() function depending on the value of `kind`.
Returns
-------
ax : matplotlib axes
References
----------
* Vehtari et al. (2019) see https://arxiv.org/abs/1903.08008
Examples
--------
Plot quantile Monte Carlo Standard Error.
.. plot::
:context: close-figs
>>> import arviz as az
>>> idata = az.load_arviz_data("centered_eight")
>>> coords = {"school": ["Deerfield", "Lawrenceville"]}
>>> az.plot_mcse(
... idata, var_names=["mu", "theta"], coords=coords
... )
"""
if coords is None:
coords = {}
if "chain" in coords or "draw" in coords:
raise ValueError("chain and draw are invalid coordinates for this kind of plot")
data = get_coords(convert_to_dataset(idata, group="posterior"), coords)
var_names = _var_names(var_names, data)
probs = np.linspace(1 / n_points, 1 - 1 / n_points, n_points)
mcse_dataset = xr.concat(
[mcse(data, var_names=var_names, method="quantile", prob=p) for p in probs], dim="mcse_dim"
)
plotters = filter_plotters_list(
list(xarray_var_iter(mcse_dataset, var_names=var_names, skip_dims={"mcse_dim"})),
"plot_mcse",
)
length_plotters = len(plotters)
rows, cols = default_grid(length_plotters)
(figsize, ax_labelsize, titlesize, xt_labelsize, _linewidth, _markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
kwargs.setdefault("linestyle", kwargs.pop("ls", "none"))
kwargs.setdefault("linewidth", kwargs.pop("lw", _linewidth))
kwargs.setdefault("markersize", kwargs.pop("ms", _markersize))
kwargs.setdefault("marker", "_" if errorbar else "o")
kwargs.setdefault("zorder", 3)
if extra_kwargs is None:
extra_kwargs = {}
extra_kwargs.setdefault("linestyle", extra_kwargs.pop("ls", "-"))
extra_kwargs.setdefault("linewidth", extra_kwargs.pop("lw", _linewidth / 2))
extra_kwargs.setdefault("color", "k")
extra_kwargs.setdefault("alpha", 0.5)
if extra_methods:
mean_mcse = mcse(data, var_names=var_names, method="mean")
sd_mcse = mcse(data, var_names=var_names, method="sd")
if text_kwargs is None:
text_kwargs = {}
text_x = text_kwargs.pop("x", 1)
text_kwargs.setdefault("fontsize", text_kwargs.pop("size", xt_labelsize * 0.7))
text_kwargs.setdefault("alpha", extra_kwargs["alpha"])
text_kwargs.setdefault("color", extra_kwargs["color"])
text_kwargs.setdefault("horizontalalignment", text_kwargs.pop("ha", "right"))
text_va = text_kwargs.pop("verticalalignment", text_kwargs.pop("va", None))
if ax is None:
_, ax = _create_axes_grid(
length_plotters, rows, cols, figsize=figsize, squeeze=False, constrained_layout=True
)
for (var_name, selection, x), ax_ in zip(plotters, np.ravel(ax)):
if errorbar or rug:
values = data[var_name].sel(**selection).values.flatten()
if errorbar:
quantile_values = _quantile(values, probs)
ax_.errorbar(probs, quantile_values, yerr=x, **kwargs)
else:
ax_.plot(probs, x, label="quantile", **kwargs)
if extra_methods:
mean_mcse_i = mean_mcse[var_name].sel(**selection).values.item()
sd_mcse_i = sd_mcse[var_name].sel(**selection).values.item()
ax_.axhline(mean_mcse_i, **extra_kwargs)
ax_.annotate(
"mean",
(text_x, mean_mcse_i),
va=text_va
if text_va is not None
else "bottom"
if mean_mcse_i > sd_mcse_i
else "top",
**text_kwargs,
)
ax_.axhline(sd_mcse_i, **extra_kwargs)
ax_.annotate(
"sd",
(text_x, sd_mcse_i),
va=text_va
if text_va is not None
else "bottom"
if sd_mcse_i >= mean_mcse_i
else "top",
**text_kwargs,
)
if rug:
if rug_kwargs is None:
rug_kwargs = {}
if not hasattr(idata, "sample_stats"):
raise ValueError("InferenceData object must contain sample_stats for rug plot")
if not hasattr(idata.sample_stats, rug_kind):
raise ValueError("InferenceData does not contain {} data".format(rug_kind))
rug_kwargs.setdefault("marker", "|")
rug_kwargs.setdefault("linestyle", rug_kwargs.pop("ls", "None"))
rug_kwargs.setdefault("color", rug_kwargs.pop("c", kwargs.get("color", "C0")))
rug_kwargs.setdefault("space", 0.1)
rug_kwargs.setdefault("markersize", rug_kwargs.pop("ms", 2 * _markersize))
mask = idata.sample_stats[rug_kind].values.flatten()
values = rankdata(values)[mask]
y_min, y_max = ax_.get_ylim()
y_min = y_min if errorbar else 0
rug_space = (y_max - y_min) * rug_kwargs.pop("space")
rug_x, rug_y = values / (len(mask) - 1), np.full_like(values, y_min) - rug_space
ax_.plot(rug_x, rug_y, **rug_kwargs)
ax_.axhline(y_min, color="k", linewidth=_linewidth, alpha=0.7)
ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)
ax_.tick_params(labelsize=xt_labelsize)
ax_.set_xlabel("Quantile", fontsize=ax_labelsize, wrap=True)
ax_.set_ylabel(
r"Value $\pm$ MCSE for quantiles" if errorbar else "MCSE for quantiles",
fontsize=ax_labelsize,
wrap=True,
)
ax_.set_xlim(0, 1)
if rug:
ax_.yaxis.get_major_locator().set_params(nbins="auto", steps=[1, 2, 5, 10])
y_min, y_max = ax_.get_ylim()
yticks = ax_.get_yticks()
yticks = yticks[(yticks >= y_min) & (yticks < y_max)]
ax_.set_yticks(yticks)
ax_.set_yticklabels(["{:.3g}".format(ytick) for ytick in yticks])
elif not errorbar:
ax_.set_ylim(bottom=0)
return ax
| true |
2cfc1fa6b15e261a4804441fafe1c40b109b8488
|
Python
|
Redwoods/Py
|
/py2020/Code/ch2/ch2_01_numeric.py
|
UTF-8
| 421 | 3.703125 | 4 |
[] |
no_license
|
# ch2_01_numeric.py
#
print("숫자형: 정수")
a = 123
a = -178
a = 0
print("숫자형: 실수")
a = 1.2
a = -3.48
a = 4.24e10
a = 4.24e-10
print("숫자형: 8진수와 16진수")
a = 0o177
a
a = 0x8FF
a
a = 0xABC
a
print("숫자형: 연산자와 연산")
a = 3
b = 4
a + b
a * b
a / b
a ** b
a % b
a // b
14 // 3
14 % 3
"""
Author: redwoods
파이썬 코드: ch2_01_numeric.py
"""
| true |
c72e411a4bdee2ce6c8fa77434c24b8c732c22f9
|
Python
|
filyovaaleksandravl/geek-python
|
/lesson01_DZ/04_max_var.py
|
UTF-8
| 239 | 3.234375 | 3 |
[] |
no_license
|
lists1 = []
n = 0
var_1 = int(input("Введите число: "))
var_lenth = int(len(str(var_1)))
while n < var_lenth:
var_01 = var_1 % 10
lists1.insert(n, var_01)
var_1 = var_1 // 10
n += 1
print(max(lists1))
| true |
153dc578231109388ab7ebbbdea11db6f184f65d
|
Python
|
Pedro-H-Castoldi/descobrindo_Python
|
/pphppe/sessao17/heranca_multipla.py
|
UTF-8
| 2,653 | 4.5 | 4 |
[] |
no_license
|
"""
POO - Herança Múltipla
É a possibilidade de uma classe herdar de múltiplas classes. Desse modo,
a classe filha herda todos os atributos e métodos das super classes.
OBS: A Herança Múltipla pode ser feita de duas maneiras:
- Multiderivação Direta;
- Multiderivação Indireta.
# Exemplo de Multiderivação Direta
class Base1():
pass
class Base2():
pass
class Base3():
pass
class Multipladerivacao(Base1, Base2, Base3): # Note q a herança é dada diretamente na classe Multipladerivacao
pass
# Exemplo de Multipladerivação Indireta
class Base1():
pass
class Base2(Base1):
pass
class Base3(Base2):
pass
class Multipladerivacao(Base3): # Note q a classe Multipladerivacao herda de modo indiretamente as classe Base2 e Base 1
pass
# OBS: N importa se a classe herdar diretamente ou n outra classe, a mesma herdará todos os atributos e métodos das super classes.
"""
# EX de Herança Múltipla
class Animal:
def __init__(self, nome):
self.__nome = nome
@property
def nome(self):
return self.__nome
def cumprimentar(self):
return f'Olá. Meu nome é {self.nome}.'
class Terrestre(Animal):
def __init__(self, nome):
super().__init__(nome)
def cumprimentar(self):
return f'Olá. Meu nome é {self.nome} da Terra.'
def andar(self):
return f'{self.nome} está andando.'
class Aquatico(Animal):
def __init__(self, nome):
super().__init__(nome)
def cumprimentar(self):
return f'Olá. Meu nome é {self.nome} do mar.'
def nadar(self):
return f'{self.nome} está nadando.'
class TerestreAquatico(Aquatico, Terrestre): # Retornará: "Olá. Meu nome é pinguim do mar.". Isso pq a classe Aquatico foi chamada antes da Terrestre.
def __init__(self, nome):
super().__init__(nome)
tatu = Terrestre('Tatu')
print(tatu.cumprimentar())
print(tatu.andar())
print()
tubarao = Aquatico('Tubarão')
print(tubarao.cumprimentar())
print(tubarao.nadar())
print()
pinguim = TerestreAquatico('Pinguim')
print(pinguim.cumprimentar()) # Aparece pinguim do mar e não pinguim da terra. Isso pq a primeira classe está primeiro à esquerda na chamada de herança.
print(pinguim.andar())
print(pinguim.nadar())
print()
# Saber se um objeto é uma instância de uma classe
print(f'Pinguim é instância de Terrestre? : {isinstance(pinguim, Terrestre)}') # True
print(f'Tatu é instância de Aquatico? : {isinstance(tatu, Aquatico)}') # False
print(f'Tubarão é instância de objeto? : {isinstance(tubarao, object)}') # True (todos as classes são instâncias de object).
| true |
5cde147d2956a83e241f066bb0e76f03c6042e21
|
Python
|
314H/competitive-programming
|
/marathon-codes/Algoritmos Complexos/aho-corasick.py
|
UTF-8
| 2,194 | 3.546875 | 4 |
[] |
no_license
|
class AhoNode:
def __init__(self):
self.goto = {}
self.out = []
self.fail = None
def aho_create_forest(patterns):
root = AhoNode()
for path in patterns:
node = root
for symbol in path:
node = node.goto.setdefault(symbol, AhoNode())
node.out.append(path)
return root
def aho_create_statemachine(patterns):
root = aho_create_forest(patterns)
queue = []
for node in root.goto.values():
queue.append(node)
node.fail = root
while len(queue) > 0:
rnode = queue.pop(0)
for key, unode in rnode.goto.items():
queue.append(unode)
fnode = rnode.fail
while fnode != None and not (key in fnode.goto):
fnode = fnode.fail
unode.fail = fnode.goto[key] if fnode else root
unode.out += unode.fail.out
return root
def aho_find_all(s, root, callback):
node = root
for i in range(len(s)):
while node != None and not (s[i] in node.goto):
node = node.fail
if node == None:
node = root
continue
node = node.goto[s[i]]
for pattern in node.out:
callback(i - len(pattern) + 1, pattern)
############################
# Demonstration of work
def on_occurence(pos, patterns):
print("At pos: " + str(pos) + " found pattern: " + str(patterns) )
# patterns = ['a', 'ab', 'abc', 'bc', 'c', 'cba']
patterns = ['an', 'ant', 'cant', 'deca', 'decant', 'plant']
s = "ant"
print("Input:", s)
root = aho_create_statemachine(patterns)
aho_find_all(s, root, on_occurence)
# TESTE: ['an', 'ant', 'cant', 'deca', 'decant', 'plant']
# [ 1 , 2 , 3, , - , 4 , - ]
# https://en.wikipedia.org/wiki/Trie
# https://www.geeksforgeeks.org/aho-corasick-algorithm-pattern-searching/
# Para aplicar:
##### O aho-corasick deve ser aplicado sobre o proprio dicionário
# Input: decant
# At pos: 0 found pattern: deca
# At pos: 3 found pattern: an
# At pos: 0 found pattern: decant
# At pos: 2 found pattern: cant
# At pos: 3 found pattern: ant
# Se você ordenar pelo patters, se cada elemento perte
| true |
be93690c12ca7443cd6fe6381f2cd2bb14a1864e
|
Python
|
chuta2323/Pythonista
|
/Mr Fujiwara.py
|
UTF-8
| 1,320 | 3.421875 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
import appex
import unicodedata
import clipboard
def erase_dakuten(char: chr) -> chr:
myDict = {
'が': 'か', 'ぎ': 'き', 'ぐ': 'く', 'げ': 'け', 'ご': 'こ',
'ざ': 'さ', 'じ': 'し', 'ず': 'す', 'ぜ': 'せ', 'ぞ': 'そ',
'だ': 'た', 'ぢ': 'ち', 'づ': 'つ', 'で': 'て', 'ど': 'と',
'ば': 'は', 'び': 'ひ', 'ぶ': 'ふ', 'べ': 'へ', 'ぼ': 'ほ',
'ガ': 'カ', 'ギ': 'キ', 'グ': 'ク', 'ゲ': 'ケ', 'ゴ': 'コ',
'ザ': 'サ', 'ジ': 'シ', 'ズ': 'ス', 'ゼ': 'セ', 'ゾ': 'ソ',
'ダ': 'タ', 'ヂ': 'チ', 'ヅ': 'ツ', 'デ': 'テ', 'ド': 'ト',
'バ': 'ハ', 'ビ': 'ヒ', 'ブ': 'フ', 'ベ': 'ヘ', 'ボ': 'ホ',
}
if char in myDict.keys():
return myDict[char]
else:
return char
def cnv_mr_fujiwara(str: str) -> str:
cnvStr = ''
for char in str:
cnvStr += erase_dakuten(char)
if char not in ['、', '。', '!', '?', '!', '?', 'ぱ', 'ぴ', 'ぷ', 'ぺ', 'ぽ', 'っ', 'パ', 'ピ', 'プ', 'ペ', 'ポ', 'ッ']:
cnvStr += '゛'
return cnvStr
if __name__ == "__main__":
input = appex.get_text()
chars = list(input)
output = cnv_mr_fujiwara(chars)
print(output)
clipboard.set(output)
| true |
469f502ba2b4798ff1e5417f628d72ac0f9d9a91
|
Python
|
sharky564/Codeforces
|
/CodeForces Problems 0101-0200/CodeForces Problem 0116A.py
|
UTF-8
| 210 | 3.078125 | 3 |
[] |
no_license
|
# Tram
a = int(input())
inputs = [input() for i in range(a)]
min_cap = 0
num = 0
for i in inputs:
c = i.split()
num += int(c[1]) - int(c[0])
if num > min_cap:
min_cap = num
print(min_cap)
| true |
ef51f3e2131b3511165395e0bc816ade50685965
|
Python
|
Richardilemon/ORGANISE-FILES
|
/ORGANIZE FILES.py
|
UTF-8
| 1,472 | 3.4375 | 3 |
[] |
no_license
|
#This python script automatically arranges the files in your folder
#The files will be grouped according to their file type
#The file type that can be grouped are audios, videos, images, and documents
import os
from pathlib import Path
FILETYPE = {
"AUDIO":['.m4a','.m4b','.mp3','.wav','.flac','.cda'],
"DOCUMENTS": ['.pdf','.rtf','.txt','.odt','.ppt'],
"VIDEOS": ['.mov','.avi','.mp4','.wmv','.flv','.ogv','.mkv','.m4v','.3gp'],
"IMAGES": ['.jpg','.jpeg','.png']
}
def SELECTFILETYPE(value):
for file_format, extensions in FILETYPE.items():#This line is used to assign variables to the key and values in the dictionary
for extension in extensions :
if extension == value :
return file_format
return 'MISC' #This is for if the filetype and file extension is not added to the dictionary
def ORGANIZEFILE():
for item in os.scandir():#scandir() calls the OS directory iteration system calls to get the names of the files in the given path
if item.is_dir():#is_dir is basically used to check if the given path is an existing directory or not
continue
filePath = Path(item)
filetype = filePath.suffix.lower()
directory = SELECTFILETYPE(filetype)
directoryPath = Path(directory)
if directoryPath.is_dir() != True:
directoryPath.mkdir()
filePath.rename(directoryPath.joinpath(filePath))
ORGANIZEFILE()
| true |
b0d93b6e3278ad49433b177ce5d3f61b132b7eac
|
Python
|
cessorg/Hacktoberfest-Data-Structure-and-Algorithms
|
/Python/Stack/stack.py
|
UTF-8
| 420 | 3.796875 | 4 |
[] |
no_license
|
class Stack():
def __init__(self,items=[]):
self.items = []
def push(self,data):
i = self.items.append(data)
return i
def pop(self):
i = self.items.pop()
return i
def is_empty(self):
return len(self.items)==0
def peek(self):
if not self.is_empty():
return self.items[-1]
def get_stack(self):
return self.items
| true |
aa537e1b9d06847d4593564f87b36fa8bc8a27a9
|
Python
|
fobbytommy/Python-Assignments
|
/01_Basic/03_averageList/averageList.py
|
UTF-8
| 192 | 3.796875 | 4 |
[] |
no_license
|
# Create a program that prints the average of the values in the list:
# a = [1, 2, 5, 10, 255, 3]
a = [1, 2, 5, 10, 255, 3]
sum = 0
for count in a:
sum += count
avg = sum / len(a)
print avg
| true |
92bc2756dfab7d11ef9b9be8f8a2d5909a01a4b6
|
Python
|
bongtrop/DIPhomwork
|
/cal.py
|
UTF-8
| 1,749 | 3.125 | 3 |
[] |
no_license
|
import numpy as np
import math
# Gaussian Elimination Partial Pivoting
# Input GEPP(Ax = b)
def GEPP(A, b):
n = len(A)
if b.size != n:
raise ValueError("Invalid argument: incompatible sizes between A & b.", b.size, n)
for k in xrange(n-1):
maxindex = abs(A[k:,k]).argmax() + k
if A[maxindex, k] == 0:
raise ValueError("Matrix is singular.")
if maxindex != k:
A[[k,maxindex]] = A[[maxindex, k]]
b[[k,maxindex]] = b[[maxindex, k]]
for row in xrange(k+1, n):
multiplier = A[row][k]/A[k][k]
A[row][k] = multiplier
for col in xrange(k + 1, n):
A[row][col] = A[row][col] - multiplier*A[k][col]
b[row] = b[row] - multiplier*b[k]
#print A
#print b
x = np.zeros(n)
k = n-1
x[k] = b[k]/A[k,k]
while k >= 0:
x[k] = (b[k] - np.dot(A[k,k+1:],x[k+1:]))/A[k,k]
k = k-1
return x
# Bilinear Interpolate
# Input bilinear(Matrix Image, Position y, Position x)
def bilinear(mat, posy, posx):
if posx>mat.shape[1]-1 or posy>mat.shape[0]-1:
return mat[math.floor(posy)][math.floor(posx)]
f00 = mat[math.floor(posy),math.floor(posx)]
f01 = mat[math.floor(posy),math.ceil(posx)]
f10 = mat[math.ceil(posy),math.floor(posx)]
f11 = mat[math.ceil(posy),math.ceil(posx)]
a = f01 - f00
b = f10 - f00
c = f11 + f00 - f01 - f10
d = f00
posx = posx-math.floor(posx)
posy = posy-math.floor(posy)
return a*posx + b*posy + c*posx*posy + d
def rms(F, G):
h = F.shape[0]
w = F.shape[1]
s = 0.0
for i in range(0,h):
for j in range(0,w):
s +=(F[i,j]-G[i,j])**2
return math.sqrt(s/(h*w))
| true |
94732ba40ad5b1cf6989c29be2410fb084908dd4
|
Python
|
CloudKing-GrowLearning/Int-Python-Resources
|
/Resources/T7 - DataFrame in Pandas/guided_activity/world_bank_df_part2_SOLUTION.py
|
UTF-8
| 559 | 2.640625 | 3 |
[] |
no_license
|
import pandas as pd
import os
import Lib as cm
file_dir = os.getcwd()
file_name = file_dir + '/world_data_bank.csv'
data = pd.read_csv(file_name, sep=';')
df = pd.DataFrame()
df = df.append(data.iloc[9:11])
employment_data = data.iloc[[9, 10]]
del employment_data['Country Name']
del employment_data['Country Code']
del employment_data['Indicator Name']
del employment_data['Indicator Code']
print(cm.compare(employment_data.iloc[0]['2015'], employment_data.iloc[1]['2015']))
print(employment_data.iloc[0]['2015'])
print(employment_data.iloc[1]['2015'])
| true |
ea9699d5f3d6c616dde028372822a56b6227f598
|
Python
|
conejo1995/pythonLabs
|
/room_nav/room_nav.py
|
UTF-8
| 1,951 | 3.90625 | 4 |
[] |
no_license
|
from room import Room
from character import Character
def battle(player, npc):
fighting = True
print("You are now battling " + npc.name)
while fighting:
user_input = input("What would you like to do?")
if user_input == 'fight':
npc.take_damage(player.attack)
print(npc.name + " took " + str(player.attack) + ' damage')
if npc.health <= 0:
print("You have defeated " + npc.name)
fighting = False
def players_room(player, player_room):
input_strings = []
player_room.things_in_room()
while True:
user_input = input("What would you like to do? ")
user_input = user_input.lower()
input_strings = user_input.split()
if 'down' in input_strings and 'stairs' in input_strings or 'downstairs' in input_strings:
return 2
def players_home(player, player_home):
input_strings = []
player_home.things_in_room()
while True:
user_input = input()
user_input = user_input.lower()
input_strings = user_input.split()
if 'talk' in input_strings and player_home.characters[0].name.lower() in input_strings:
player_home.characters[0].speak()
if 'fight' in input_strings and player_home.characters[0].name.lower() in input_strings:
battle(player, player_home.characters[0])
user_input = 1
turns = 0
stranger = Character('Stranger')
player = Character()
player_room = Room("This is your room, you recognize all the things in it as being your own. There is a staircase in this room leading downstairs.")
player_home = Room("This is your living room, there is a door leading outside and a staircase leading up stairs to your room.")
player_home.add_character(stranger)
while True:
if user_input ==1:
user_input = players_room(player, player_room)
elif user_input ==2:
user_input = players_home(player, player_home)
| true |
9c076a465b966c53e72c7b9e1d5030d02bf91e8a
|
Python
|
Ukelili/testxiaoshu
|
/testCode.py
|
GB18030
| 4,223 | 2.875 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: gb2312 -*-
import os
import urllib
from PIL import Image
from pytesser import *
from pytesseract import *
# ͼƬ
for i in range(1):
url = 'http://test.xiaoshushidai.com/verify.php' # ֤ĵַ
print "download", i
file("./pic/%04d.gif" % i, "wb").write(urllib.urlopen(url).read())
'''һ'''
# ͼֵ
dir="./pic/"
path = "./font/"
for f in os.listdir(dir):
if f.endswith(".gif"):
img = Image.open(dir+f) # ͼƬ
img = img.convert("RGBA")
pixdata = img.load()
# ֵ
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y][0] < 90:
pixdata[x, y] = (0, 0, 0, 255)
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y][1] < 136:
pixdata[x, y] = (0, 0, 0, 255)
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y][2] > 0:
pixdata[x, y] = (255, 255, 255, 255)
img.save(path+f, "GIF")
# ͼָ
j = 0
dir="./font/"
for f in os.listdir(dir):
if f.endswith(".gif"):
img = Image.open(dir+f)
for i in range(4):
x = 4 + i*10 # ֲҪԼ
y = 6 # ֤ͼƬؽ
img.crop((x, y, x+13, y+17)).save("fonts/%d.gif" % j) # ʵ
print "j=",j
j += 1
# ͼĶֵ
def binary(f):
print f
img = Image.open(f)
# img = img.convert('1')
img = img.convert("RGBA") # οУУᱨ
pixdata = img.load()
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y][0] < 90:
pixdata[x, y] = (0, 0, 0, 255)
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y][1] < 136:
pixdata[x, y] = (0, 0, 0, 255)
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y][2] > 0:
pixdata[x, y] = (255, 255, 255, 255)
return img
nume = 0
# ͼķָ֤밴ַָ
def division(img):
global nume
font=[]
for i in range(4):
x=4+i*10 # úеֵҪԼ
y=5
temp = img.crop((x,y,x+13,y+17))
temp.save("./temp/%d.gif" % nume)
nume=nume+1
font.append(temp)
return font
# ַָԤȶеĽؽжԱҳС
def recognize(img):
fontMods = []
for i in range(1):
fontMods.append((str(i), Image.open("./fonts/%d.gif" % i)))
result = ""
font = division(img)
for i in font:
target=i
points = []
for mod in fontMods:
diffs = 0
for yi in range(10):
for xi in range(7):
if 0 in target.getpixel((xi, yi)):
compare = 0
else:
compare = 255
if mod[1].getpixel((xi, yi)) != compare:
diffs += 1
print "diffs" + str(diffs)
points.append((diffs, mod[0]))
points.sort()
result += points[0][1]
return result
if __name__ == '__main__':
codedir="./pic/"
for imgfile in os.listdir(codedir):
if imgfile.endswith(".gif"):
dir="./result/"
img=binary(codedir+imgfile)
num=recognize(img)
dir += (num+".gif")
print "save to", dir
img.save(dir)
images = Image.open("./result/0000.gif")
text = image_to_string(images)
print "text:" + text
#¶ΪԼģοеֵԱȴ
#print "mod[1].getpixel((xi, yi)):"+str(mod[1].getpixel((xi, yi)))
#print "target.getpixel((xi, yi)):"+str(target.getpixel((xi, yi)))
| true |
d276deaadca11085de13ec883bca57fbb5b6cbfa
|
Python
|
A-ZGJ/Hilbert
|
/hilbert/tests/test_wavelet.py
|
UTF-8
| 1,516 | 3.375 | 3 |
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
"""
Testing for Hilbert transform methods that use wavelets at their core
Using the math relation a^2 / (a^2 + x^2) (Lorentz/Cauchy) has an
analytical Hilbert transform: x^2 / (a^2 + x^2)
"""
import numpy as np
from numpy.testing import assert_array_almost_equal
from hilbert.wavelet import hilbert_haar, _haar_matrix
import pytest
def test_haar():
# Not power-of-2
n = np.linspace(-100, 100, 1000)
x = 2/(2**2 + n**2)
hilb_x = hilbert_haar(x)
hilb_x_analytical = n/(2**2 + n**2)
assert_array_almost_equal(hilb_x_analytical, hilb_x, decimal=1)
# Power-of-2
n = np.linspace(-100, 100, 1024)
x = 2/(2**2 + n**2)
hilb_x = hilbert_haar(x)
hilb_x_analytical = n/(2**2 + n**2)
assert_array_almost_equal(hilb_x_analytical, hilb_x, decimal=1)
# 2D version
x2 = np.vstack((x, x/2))
hilb_x = hilbert_haar(x2)
hilb_x_analytical = np.vstack((n/(2**2 + n**2), 0.5*n/(2**2 + n**2)))
assert_array_almost_equal(hilb_x_analytical, hilb_x, decimal=1)
def test_haar_errors():
n = np.linspace(-100, 100, 1000)
x = 2/(2**2 + n**2)
# Wrong axis
with pytest.raises(NotImplementedError):
_ = hilbert_haar(x, axis=0)
# > 2 dimensions
with pytest.raises(ValueError):
_ = hilbert_haar(np.random.randn(3,3,3))
def test_haar_matrix():
hm, hilb_hm = _haar_matrix(4)
assert hm.shape == (4,4)
assert hilb_hm.shape == (4,4)
# Wrong dimensionsal size
with pytest.raises(ValueError):
_haar_matrix(3)
| true |
f52f5ed35bc4d7f7763cb69127ae3339df27c939
|
Python
|
dimelik/army
|
/weapon/gunshot_weapon/pistol.py
|
UTF-8
| 287 | 2.90625 | 3 |
[] |
no_license
|
from gunshot_weapon import GunshotWeapon
class Pistol(GunshotWeapon):
__armorPiercing = None
@property
def armor_piercing(self):
return self.__armorPiercing
@armor_piercing.setter
def armor_piercing(self, value: int):
self.__armorPiercing = value
| true |
48b163e22353c5503f0d77044502f4cacb37d4b9
|
Python
|
HugoHF/dark_matter
|
/paper_graphs.py
|
UTF-8
| 2,037 | 2.796875 | 3 |
[] |
no_license
|
import numpy as np
from hff import get_hff
from get_freqs import get_freqs
from create_data import create_data
from chisquared_stuff import get_significance
from autocorrelation import autocorrelation
import matplotlib.pyplot as plt
from scipy.fft import fftfreq
test_stds = [0.001, 0.03, 0.1]
test_freqs = np.arange(2, 100, 1)
time_interval = 0.001
fig_num = 1
detected_1 = []
detected_2 = []
detected_3 = []
detected = [detected_1, detected_2, detected_3]
significance_1 = []
significance_2 = []
significance_3 = []
significance = [significance_1, significance_2, significance_3]
for idx, test_freq in enumerate(test_freqs):
for idx2, std in enumerate(test_stds):
sig = create_data(m_phi=test_freq * np.pi, deviation=std, time_interval=time_interval)
sig = autocorrelation(sig, i=1) # uncomment this to do autocorrelation plus HFF
idx, freq = get_hff(sig)
freqs_domain = fftfreq(len(sig[1]), time_interval)[:len(sig[1])//2]
idx = np.where(np.isclose(freqs_domain, freq / 2, atol=0.5))
fhat, _, _ = get_freqs(sig, 0.5)
prob = get_significance(fhat, idx[0][0])
detected[idx2].append(freq)
significance[idx2].append(prob)
fig, ax = plt.subplots(2, 3)
fig.tight_layout(h_pad=2) # figure spacing
label = ['a', 'b', 'c']
for j in range(3):
ax[0, j].plot(test_freqs, detected[j])
ax[0, j].set_title(fr"Fig {fig_num} ({label[j]}). Detected frequencies with $\sigma={test_stds[j]}$")
ax[0, j].set_xlabel('Frequency of clean signal')
ax[0, j].set_ylabel('Frequency detected with HFF')
ax[0, j].set_ylim([0, 501])
label = ['d', 'e', 'f']
for j in range(3):
ax[1, j].plot(test_freqs, significance[j])
ax[1, j].set_title(fr"Fig {fig_num} ({label[j]}). Confidence of det. frequencies with $\sigma={test_stds[j]}$")
ax[1, j].set_xlabel('Frequency of clean signal')
ax[1, j].set_ylabel('Confidence of signal detected with HFF')
ax[1, j].set_ylim([-0.1, 1.1])
plt.show()
| true |
b2416baa2737a4367fac2a334b5b29da773a8e25
|
Python
|
sakella1/Traffic-Sign-Recognition-Deep-Learning-on-Edge
|
/car_class.py
|
UTF-8
| 3,131 | 3.046875 | 3 |
[] |
no_license
|
import paho.mqtt.client as mqtt
import sys
import json
local_broker = "broker" # broker is host name
local_port = 1883 # standard mqtt port
local_topic = "signs" # topic is image
with open("keys.json") as json_file:
keys = json.load(json_file)
# dictionary of signs
signs = {
"30": {
"label": "30_kph",
"speed":30
},
"50": {
"label": "50_kph",
"speed":50
},
"60": {
"label": "60_kph",
"speed": 60
},
"70": {
"label": "70_kph",
"speed": 70
},
"80": {
"label": "80_kph",
"speed": 80
},
"100": {
"label": "100_kph",
"speed": 100
},
"120": {
"label": "120_kph",
"speed": 120
},
"Deer": {
"label": "Deer",
"speed": 0
},
"Stop": {
"label": "Stop",
"speed": 0
},
"Yield": {
"label": "Yield",
"speed": 0
}
}
car_status = {
1: "staying the same speed",
2: "speeding up",
3: "slowing down",
4: "stopping"
}
class Car:
"""
class the simulates are self driving car
"""
def __init__(self):
"""
initiation class for self driving car
sets speed to 0 mph and status as staying the same speed
"""
self.speed = 0
self.status = 1
return None
def new_status(self, input):
"""
setter that allows for the self driving car to change state based on inputs
:param input: integer from mqtt
:return: None
"""
print("The sign seen is ", signs[input]["label"])
new_speed = signs[input]["speed"]
if new_speed < self.speed:
print("slowing down")
self.status = 3
elif new_speed == self.speed:
print("staying the same speed")
self.status = 1
elif new_speed == 0:
print("stopping the car")
self.status = 4
else:
print("speeding up")
self.status = 2
self.speed = new_speed
return None # end new status
def on_connect_local(client, userdata, flags, rc):
print("connected to local broker with rc: " + str(rc))
client.subscribe("signs") # subscribe to local topic
return None # end function
def on_message(client,userdata, msg):
try:
print("message received!")
msg = msg.payload.decode("utf-8") # create message
#print(msg) # confirm message receipt, turn off for production
#print("The corresponding key is ")
new_status = keys[msg]
#print(new_status)
car.new_status(new_status) # change car status
except:
print("Unexpected error:", sys.exc_info()[0])
car = Car()
local_mqttclient = mqtt.Client() # instantiate local client
local_mqttclient.on_connect = on_connect_local # connect using function call
local_mqttclient.connect("broker", local_port, 240) # connect with inputs
local_mqttclient.on_message = on_message # execute when message is received
# go into a loop
local_mqttclient.loop_forever()
| true |
8efe234477e2cd1452b6d065f2446c3fb6280730
|
Python
|
zhaolixiang/my-all
|
/网络爬虫实战/35、 Ajax数据爬取.py
|
UTF-8
| 1,425 | 2.609375 | 3 |
[] |
no_license
|
# 爬取女朋友的微博
import json
import requests
from urllib.parse import urlencode
from pyquery import PyQuery
base_url = 'https://m.weibo.cn/api/container/getIndex?'
uid='3098454065'
headers = {
'Host': 'm.weibo.cn',
'Referer': 'https://m.weibo.cn/u/'+uid,
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
}
def get_page(page):
params = {
'type': 'uid',
'value': uid,
'containerid': '107603'+uid,
'page': page
}
url=base_url+urlencode(params)
print(url)
result = requests.get(url, headers=headers)
if result.status_code==200:
return result.json()
def parse_json(json):
if json:
cards=json.get('data').get('cards')
print(cards)
for card in cards:
mblog=card.get('mblog')
weibo={}
weibo['text']=PyQuery(mblog.get('text')).text()
weibo['created_at']=mblog.get('created_at')
yield weibo
def write_2_text(weibo):
with open('face.txt','a') as file:
file.write(json.dumps(weibo,indent=2,ensure_ascii=False))
file.write('\n')
if __name__ == '__main__':
for index in range(12):
result = get_page(index)
print(result)
for x in parse_json(result):
write_2_text(x)
| true |
afe050ee8f9fe342d3f413e9453aec9a720f08e5
|
Python
|
wbing520/COVID-away
|
/COVID-away_dataset_visualization/visualize_data_patterns_in_COVID-away.py
|
UTF-8
| 2,347 | 2.65625 | 3 |
[] |
no_license
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.rcParams.update({'xtick.bottom' : False, 'axes.titlepad':5})
from scipy import signal
import matplotlib.pyplot as plt
fig, axes = plt.subplots(4,1, figsize=(4, 10), sharex=True, dpi=120)
b, a = signal.butter(3, 0.05)
pattern = input("Enter the pattern number (0-2071) of hand-to-face motion data you want to visualize from the COVID-away dataset: ")
Acc_file ='./COVID-away_dataset/Pattern_'+ pattern + '/Accelerometer.csv'
Gyro_file = './COVID-away_dataset/Pattern_'+ pattern + '/Gyroscope.csv'
Pressure_file = './COVID-away_dataset/Pattern_'+ pattern + '/Pressure.csv'
RVect_file ='./COVID-away_dataset/Pattern_'+ pattern + '/RotationVector.csv'
df_orig = pd.read_csv(Acc_file, index_col= 1)
df_orig2 = pd.read_csv(Gyro_file, index_col= 1)
df_orig3 = pd.read_csv(Pressure_file, index_col= 1)
df_orig4 = pd.read_csv(RVect_file, index_col= 1)
x = signal.filtfilt(b, a, df_orig['X'])
y = signal.filtfilt(b, a, df_orig['Y'])
z = signal.filtfilt(b, a, df_orig['Z'])
x = pd.DataFrame(x)
y = pd.DataFrame(y)
z = pd.DataFrame(z)
x.plot(ax=axes[0], color = 'C0', legend = None)
y.plot(ax=axes[0], color = 'C1', legend = None)
z.plot(ax=axes[0], color = 'C2', legend = None, title='Accelerometer.csv')
x = signal.filtfilt(b, a, df_orig2['X'])
y = signal.filtfilt(b, a, df_orig2['Y'])
z = signal.filtfilt(b, a, df_orig2['Z'])
x = pd.DataFrame(x)
y = pd.DataFrame(y)
z = pd.DataFrame(z)
x.plot(ax=axes[1], color = 'C7', legend = None)
y.plot(ax=axes[1], color = 'C8', legend = None)
z.plot(ax=axes[1], title='Gyroscope.csv', color = 'C9', legend = None)
x = pd.DataFrame(x)
y = pd.DataFrame(y)
z = pd.DataFrame(z)
x = signal.filtfilt(b, a, df_orig3['Millibars'])
x = pd.DataFrame(x)
x.plot(ax=axes[2], title='Pressure.csv', color = 'C6', legend = None)
x = signal.filtfilt(b, a, df_orig4['X'])
y = signal.filtfilt(b, a, df_orig4['Y'])
z = signal.filtfilt(b, a, df_orig4['Z'])
c = signal.filtfilt(b, a, df_orig4['cos'])
x = pd.DataFrame(x)
y = pd.DataFrame(y)
z = pd.DataFrame(z)
c = pd.DataFrame(c)
x.plot(ax=axes[3], color = 'C7', legend = None)
y.plot(ax=axes[3], color = 'C8', legend = None)
z.plot(ax=axes[3], color = 'C9', legend = None)
c.plot(ax=axes[3], title='RotationVector.csv', color = 'C3', legend = None)
plt.show()
| true |
9140e54a047d8b1f6f1c2de01377f0d972b21ceb
|
Python
|
pytutorial/py2103
|
/Day23/vd3.py
|
UTF-8
| 959 | 3.09375 | 3 |
[] |
no_license
|
from flask import Flask
from flask.globals import request
app = Flask(__name__)
productList = [
{'id': 1, 'name': 'IPhone X', 'price': 10500000},
{'id': 2, 'name': 'IPhone 11', 'price': 11500000},
{'id': 3, 'name': 'IPhone 12', 'price': 12500000},
]
@app.route('/')
def index():
html = '<ul>'
for p in productList:
pid = p['id']
html += f'<li><a href="/view-product-detail?id={pid}"> {p["name"]} </a></li>'
html += '</ul>'
return html
#http://127.0.0.1:5000/view-product-detail?id=1
@app.route('/view-product-detail')
def viewProduct():
productId = int(request.args.get('id', -1))
if productId < 1 or productId > len(productList):
return 'Not found'
p = productList[productId-1]
return f'''
<div>
<p>Sản phẩm: <b> {p['name']} </b> </p>
<p>Đơn giá : <b> {p['price']} đ </b> </p>
</div>
'''
app.run(debug=True) # hot reload
| true |
ff76481145c0c6d00c6c8a1f35f5a7d9b2d71a27
|
Python
|
rainmayecho/applemunchers
|
/47.py
|
UTF-8
| 945 | 3.171875 | 3 |
[] |
no_license
|
def get_primes(n):
numbers = set(range(n, 1, -1))
primes = []
while numbers:
p = numbers.pop()
primes.append(p)
numbers.difference_update(set(range(p*2, n+1, p)))
return primes
def pfactorize(a,primes,b,pfactors = []):
if a==1:
return pfactors
if a==2:
pfactors.append(2)
return pfactors
if a==3:
pfactors.append(3)
return pfactors
for x in primes:
if a%x==0:
pfactors.append(x)
prod = 1
for y in pfactors:
prod *= y
if not prod == b:
pfactorize(b/prod,primes,b,pfactors)
return pfactors
primes = get_primes(1000000)
count = 0
fourfact = []
for x in range(133000,1000000):
numfactors = len(list(set(pfactorize(x,primes,x,[]))))
if x in primes:
continue
if numfactors == 4:
count += 1
else:
count = 0
if count == 4:
print x-3
break
| true |
dd7347215cef3d966585e2ad8b278eae86b09375
|
Python
|
jorgegarba/CodiGo9
|
/BackEnd/Semana4/Dia3/12-codigos-extra-impresion-escape-codes.py
|
UTF-8
| 270 | 4.3125 | 4 |
[] |
no_license
|
# \r sirve para eliminar lo anterior escrito
print("Hola \r mundo")
# \n sirve para generar un salto de linea
print("Hola \n mundo")
# \t sirve para una tabulacion
print("\tHola mundo")
# \\ si queremos usar el caracter \ o algun caracter especial
print("Hola \\ mundo")
| true |
6513ab2907da9fc02158a9e2de33fe2b4b1307e4
|
Python
|
beluga13/ChoreBoard
|
/participants_list_module.py
|
UTF-8
| 6,889 | 3.71875 | 4 |
[] |
no_license
|
class Participants():
## Constants used for validation
MINIMUM_NAME_LENGTH = 3 # Used to validate team member's name
MAXIMUM_NAME_LENGTH = 10
MINIMUM_HOUSEHOLD_SIZE = 2
MAXIMUM_HOUSEHOLD_SIZE = 5
## Constructor for the set containing participant's names.
# @param the_participants a set containing the names
#
def __init__(self, the_participants) :
self.participants = the_participants
## Return the participants' list.
#
@property
def participants(self):
return self._participants
## Sets the participants' list attribute.
# @param name the participants set
# @exception ValueError raised if:
# - any of the names in the list are invalid
# - the set is too long or too short
@participants.setter
def participants(self, the_participants) :
try :
self.valid_participants(the_participants)
self._participants = the_participants
except (ValueError,TypeError) as err :
raise
def __str__(self):
participant_length = len(self.participants)
i = 1
participant_string = ""
for participant in self.participants :
participant_string = participant_string + str(participant)
if i < participant_length :
participant_string = participant_string + ", "
i = i + 1
return participant_string
## Check the set of participants.
# Verifies that the set of partcipants is a valid length.
# Verifies that each participants is valid.
#
# @param the_participants the set of participants to be validated
# @return True if the set conforms to the validation conditions
# and raise exception if it does not.
#
@staticmethod
def valid_participants(the_participants) :
# check that the the_participants is a set
if not isinstance(the_participants, set) :
raise TypeError("List of particpants is not a set.")
try :
Participants.is_valid_length(the_participants)
Participants.is_valid_name_set(the_participants)
except (ValueError,TypeError) as err :
raise
return True
## Check the name contains only alphanumeric characters and check that it is the right length.
#
# @param name the string to be validated
# @return True if the string conforms to the validation conditions and
# generate an exception if not.
#
@staticmethod
def is_valid_name_set(name) :
for i in name:
if len(i) < Participants.MINIMUM_NAME_LENGTH \
or len(i) > Participants.MAXIMUM_NAME_LENGTH :
raise ValueError(("Participant Name: {}, is not valid. It should be " +
"more than {} characters long " +
"and less than {} characters long.")
.format(i, Participants.MINIMUM_NAME_LENGTH, Participants.MAXIMUM_NAME_LENGTH))
if not i.isalnum():
raise ValueError(("{}, is not valid. Names in the ParticipantList should be " +
"alphanumeric.").format(i))
return True
@staticmethod
def is_valid_name_indiv(name) :
if len(name) < Participants.MINIMUM_NAME_LENGTH \
or len(name) > Participants.MAXIMUM_NAME_LENGTH :
raise ValueError(("Participant Name: {}, is not valid. It should be " +
"more than {} characters long " +
"and less than {} characters long.")
.format(name, Participants.MINIMUM_NAME_LENGTH, Participants.MAXIMUM_NAME_LENGTH))
for i in name:
if not i.isalnum():
raise ValueError(("{}, is not valid. Names in the ParticipantList should be " +
"alphanumeric.").format(name))
return True
## Check the number of participants in the set is the right length.
#
# @return True if valid and generate an exception if not.
#
@staticmethod
def is_valid_length(the_participants) :
if len(the_participants) < Participants.MINIMUM_HOUSEHOLD_SIZE or \
len(the_participants) > Participants.MAXIMUM_HOUSEHOLD_SIZE :
raise ValueError (("\n\t\tThe number of participants in the household must be" +
" be more than {} and less than {}.")
.format(Participants.MINIMUM_HOUSEHOLD_SIZE - 1, Participants.MAXIMUM_HOUSEHOLD_SIZE + 1))
# If we reached this point then the checks passed
return True
@staticmethod
def is_valid_participant(the_participants) :
for participant in the_participants :
if not isinstance(participant, Participants) :
raise TypeError ("The ParticipantList does not contain objects which are Participants.")
# If we reached this point, the participants are valid.
return True
## main method
#
# Contains some simple tests
#
def main():
print("Test 1: Create a valid participants list")
try:
names = set(["personA","personB","personC"])
p1 = Participants(names)
print("\n\tVALID: ", p1)
except Exception as err:
print("\tERROR: ", err)
print("\nTest 2: Create a set of participants with the wrong data type: list")
try:
names = ["personA","personB","personC"]
p2 = Participants(names)
print("\tVALID: ", p2)
except Exception as err:
print("\tERROR: ", err)
print("\nTest 3: Create a set of participants which is too short")
try:
names = set(["personA"])
p2 = Participants(names)
print("\tVALID: ", p2)
except Exception as err:
print("\tERROR: ", err)
print("\nTest 4: Create a set of participants which is too long")
try:
names = set(["personA","personB","personC", "personD", "personE", "personF"])
p = Participants(names)
print("\tVALID: ", p)
except Exception as err:
print("\tERROR: ", err)
print("\nTest 5: Create a set of participants with invalid name, punctuation character")
try:
names = set(["***","personB","personC"])
p = Participants(names)
print("\tVALID: ", p)
except Exception as err:
print("\tERROR: ", err)
print("\nTest 6: Create a set of participants with name too long")
try:
names = set(["tooooooolongggggg","personB","personC"])
p = Participants(names)
print("\tVALID: ", p)
except Exception as err:
print("\tERROR: ", err)
if __name__ == "__main__":
main()
| true |
6c406bd72947bbdcb6001c46f0f5a106d1ba3a48
|
Python
|
KaloObr/Python-Fundamentals
|
/p5_list_advanced/lab_1_trains.py
|
UTF-8
| 609 | 3.640625 | 4 |
[] |
no_license
|
wagons_n = int(input())
train = [0] * wagons_n
while True:
command = input()
if command == 'End':
break
tokens = command.split(" ")
instructions = tokens[0]
if instructions == 'add':
count = int(tokens[1])
train[-1] += count
elif instructions == 'insert':
index = int(tokens[1])
count = int(tokens[2])
train[index] += count
elif instructions == 'leave':
index = int(tokens[1])
count = int(tokens[2])
# Code sanitalization:
if count <= train[index]:
train[index] -= count
print(train)
| true |
131b9c4eb96f84ab818923afdfff562d89edb283
|
Python
|
Tornike-Skhulukhia/IBSU_Masters_Files
|
/code_files/__PYTHON__/lecture_1/basic data structures/stack.py
|
UTF-8
| 844 | 4.375 | 4 |
[] |
no_license
|
'''
We do not need it in Python, but...
'''
class Stack:
def __init__(self):
self._data = []
self._top = 0
def __len__(self):
return self._top
def __repr__(self):
return f"<Stack: [{', '.join([str(i) for i in self._data[:self._top]])}]>"
def push(self, item):
self._top += 1
self._data += [item]
def pop(self):
if self._top == 0: raise Exception('Stack has 0 elements')
self._top -= 1
last_item = self._data[-1]
self._data = self._data[:-1]
return last_item
# # test
stack = Stack()
print("1)", stack)
assert len(stack) == 0
stack.push(89)
stack.push(False)
stack.push("I am in stack :)")
stack.push(120)
assert len(stack) == 4
print("2)", stack)
stack.pop()
stack.pop()
assert len(stack) == 2
print("3)", stack)
| true |
9e5bd945cfc8adf1358a22f53e16e1a7c4f99934
|
Python
|
akieight5/python-experiments
|
/3.7projects/images.py
|
UTF-8
| 1,682 | 3.34375 | 3 |
[] |
no_license
|
'''
Created on 25 Nov 2020
@author: aki
'''
"""
import tkinter as tk
root = tk.Tk()
root.title("image demonstration")
#iconbitmap only works with black and white images
# root.iconbitmap("/home/aki/Downloads/icons/antenna.xbm")
img = tk.PhotoImage(file='/home/aki/Downloads/icons/antenna.png')
root.tk.call('wm', 'iconphoto', root._w, img)
"""
"""
This is an old question, and there is lots of stuff written about it on the web,
but all of it is either incorrect or incomplete, so having gotten it to work I
thought it would be good to record my actual working code here.
First, you'll need to create an icon and save it in two formats: Windows "ico"
and Unix "xbm". 64 x 64 is a good size. XBM is a 1-bit format--pixels just on or
off, so no colors, no grays. Linux implementations of tkinter only accept XBM
even though every Linux desktop supports real icons, so you're just out of luck
there. Also, the XBM spec is ambiguous about whether "on" bits represent black or
white, so you may have to invert the XBM for some desktops. Gimp is good for creating these.
Then to put the icon in your titlebar, use this code (Python 3):
"""
#"""
import os, sys
from tkinter import *
from tkinter.ttk import *
root = Tk()
root.title("My Application")
if "nt" == os.name:
root.wm_iconbitmap(bitmap = "myicon.ico")
else:
root.wm_iconbitmap(bitmap = "/home/aki/Downloads/icons/antenna.xbm")
root.mainloop()
#"""
"""
#something else to try
import sys, os
from tkinter import *
root = Tk()
root.title("My Application")
program_directory="/home/aki/Downloads/icons"
root.iconphoto(True, PhotoImage(file=os.path.join(program_directory, "antenna.png")))
"""
root.mainloop()
| true |
93a2551b2a5a0bd79bbac0a5e011c65ce04dd686
|
Python
|
wilsonwong2014/MyDL
|
/Tensorflow/demo/opencv/demo_image_segmentation.py
|
UTF-8
| 1,342 | 2.890625 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' 图像分割
------------------------------------------------
'''
#K-means方法进行分割
# 以灰色导入图像
img = cv2.imread('messi5.jpg',0)#image read be 'gray'
plt.subplot(221),plt.imshow(img,'gray'),plt.title('original')
plt.xticks([]),plt.yticks([])
# 改变图像的维度
img1 = img.reshape((img.shape[0]*img.shape[1],1))
img1 = np.float32(img1)
# 设定一个criteria,
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,10,1.0)
# 设定一个初始类中心flags
flags = cv2.KMEANS_RANDOM_CENTERS
# 应用K-means
compactness,labels,centers = cv2.kmeans(img1,2,None,criteria,5,flags)
compactness_1,labels_1,centers_1 = cv2.kmeans(img1,2,None,criteria,10,flags)
compactness_2,labels_2,centers_2 = cv2.kmeans(img1,2,None,criteria,15,flags)
img2 = labels.reshape((img.shape[0],img.shape[1]))
img3 = labels_1.reshape((img.shape[0],img.shape[1]))
img4 = labels_2.reshape((img.shape[0],img.shape[1]))
plt.subplot(222),plt.imshow(img2,'gray'),plt.title('kmeans_attempts_5')
plt.xticks([]),plt.yticks([])
plt.subplot(223),plt.imshow(img3,'gray'),plt.title('kmeans_attempts_10')
plt.xticks([]),plt.yticks([])
plt.subplot(224),plt.imshow(img4,'gray'),plt.title('kmeans_attempts_15')
plt.xticks([]),plt.yticks([])
plt.savefig("kmeans_attempts.png")
plt.show()
| true |
ce657f14292763873c441e37b3589f8acd9d38c4
|
Python
|
snip-animesh/Dynamic-Programming
|
/freecodecamp.org/fibTabulation.py
|
UTF-8
| 305 | 3.40625 | 3 |
[] |
no_license
|
def fibTab(n):
table=[0]*(n+1)
table[1]=1
i,j,k=0,1,2
while (k<=n+1):
if k==n+1:
table[j]+=table[i]
break
table[j]+=table[i]
table[k]+=table[i]
i+=1;j+=1;k+=1
return table[n]
print(fibTab(50))
# Time and space Complexity O(n)
| true |
8bb3365b7ef14775682b5bc92d6b78fd7a7fbe04
|
Python
|
AYWG/RedditBots
|
/askreddit_tracker/askreddit_tracker.py
|
UTF-8
| 1,184 | 2.640625 | 3 |
[] |
no_license
|
# reddit bot that messages me every time there is a post on r/askreddit/hot that has 10000+ comments
import praw
import OAuth2Util
import time
user_agent = "AskReddit tracker 1.0 by /u/TheMaou"
r = praw.Reddit(user_agent)
o = OAuth2Util.OAuth2Util(r) # connect via OAuth2
o.refresh(force=True) # automatically refresh token when necessary
already_received = []
subreddit = r.get_subreddit('askreddit')
popularity_threshold = 10000
while True:
try:
for submission in subreddit.get_hot(limit=25): # look at most recent 25 posts in r/askreddit/hot
if (submission.id not in already_received and
submission.num_comments >= popularity_threshold):
msg = '[AskReddit Thread](%s)' % submission.short_link
r.send_message('TheMaou', 'Popular AskReddit Thread!', msg)
already_received.append(submission.id)
time.sleep(1800) # execute every 30 minutes
except KeyboardInterrupt:
print ("Shutting down.")
break
except praw.errors.HTTPException as e:
exc = e._raw
print ("Something bad happened! HTTPError", exc.status_code)
except Exception as e:
print("Something bad happened!", e)
traceback.print_exc()
| true |
1de328d0c8a0529c1f1388fcbe1256433030f215
|
Python
|
mmore500/dishtiny
|
/postprocessing/tabulate_and_stitch_stint_thread_profiles.py
|
UTF-8
| 5,221 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
"""Combines all available collated data on s3 for a single stint.
Collated data is tabulated (reformatted and processed) and stitched into a single dataframe.
Uploads output to a programatically-generated s3 url.
Usage:
./tabulate_and_stitch_stint_thread_profiles.py [bucket] [endeavor] [stint]
"""
import boto3
from functools import reduce
from iterdub import iterdub as ib
from iterpop import iterpop as ip
from keyname import keyname as kn
import os
import pandas as pd
import sys
import tempfile
from dishpylib.pyassemblers import \
assemble_config_records, \
assemble_evolve_dpp_metrics
################################################################################
print( )
print( 'running tabulate_and_stitch_stint_thread_profiles.py' )
print( '---------------------------------------------------------------------' )
################################################################################
try:
bucket = sys.argv[1]
endeavor, stint = map(int, sys.argv[2:])
except:
print(__doc__)
sys.exit(1)
print(f'bucket {bucket}')
print(f'endeavor {endeavor}')
print(f'stint {stint}')
################################################################################
print( )
print( 'running data assemblers' )
print( '---------------------------------------------------------------------' )
################################################################################
assemblers = [
assemble_evolve_dpp_metrics,
]
dataframes = []
sources = []
for assembler in assemblers:
res = assembler(
bucket=bucket,
endeavor=endeavor,
stint=stint,
)
if res is not None:
res_df, res_sources = res
dataframes.append( res_df )
sources += res_sources
################################################################################
print( )
print( 'stitching data' )
print( '---------------------------------------------------------------------' )
################################################################################
print(f'{len(dataframes)} dataframes to merge')
print(f'dataframes have {[len(df.index) for df in dataframes]} rows')
df_stitched = reduce(
lambda left, right: pd.merge(
left,
right,
on=list(left.columns & right.columns),
how='outer',
),
dataframes,
)
print(f'merged dataframe has {len(df_stitched.index)} rows')
# there should only be one entry for each series/stint/thread/proc
assert len(res_df.groupby([
'Series',
'Stint',
'thread',
'proc',
])) == len(res_df)
################################################################################
print( )
print( 'calculating upload path' )
print( '---------------------------------------------------------------------' )
################################################################################
# common_keys = set.intersection(*[
# set( kn.unpack(source).keys() )
# for source in sources
# ])
out_filename = kn.pack({
# **{
# key : ib.dub(
# kn.unpack(source)[key]
# for source in sources
# )
# for key in common_keys
# },
# **{
'a' : 'thread_profiles',
'stint' : stint,
'ext' : '.csv.xz',
# },
})
out_prefix = f'endeavor={endeavor}/thread-profiles/stage=6+what=tabulated_and_stitched/stint={stint}/'
out_path = out_prefix + out_filename
print(f'upload path will be s3://{bucket}/{out_path}')
################################################################################
print( )
print( 'dumping and uploading' )
print( '---------------------------------------------------------------------' )
################################################################################
# have to work with filename or pandas compression doesn't work
with tempfile.TemporaryDirectory() as tmp:
temp_path = os.path.join(tmp, 'data.xz')
print(f'temp path is {temp_path}')
df_stitched.to_csv(
temp_path,
index=False,
compression='xz',
)
with open(temp_path, 'rb') as f:
client = boto3.client('s3', region_name='us-west-2',)
client.upload_fileobj(
f,
bucket,
out_path,
)
################################################################################
print( )
print( 'tabulation and stitching complete' )
print( '---------------------------------------------------------------------' )
################################################################################
| true |
22be274d4b540791097785dd857ddb68f65ce998
|
Python
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-forgeno
|
/Selenium_test.py
|
UTF-8
| 762 | 2.8125 | 3 |
[
"MIT"
] |
permissive
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def test_home():
selenium = webdriver.Chrome()
selenium.get('http://162.246.157.221:8000')
name = selenium.find_element_by_id('name')
about = selenium.find_element_by_id('about')
education = selenium.find_element_by_id('education')
skills = selenium.find_element_by_id('skills')
work = selenium.find_element_by_id('work')
contact = selenium.find_element_by_id('contact')
assert name.text == "Ivan Ma"
assert about.text == "4th year Computer Science Student"
assert education.text == "University of Alberta 2014-2019"
assert skills.text == "Java, Python, C"
assert work.text == "Unemployed"
assert contact.text == "ima@ualberta.ca"
| true |
d53447be0a558a362459b283d53cc1e7938fe6e9
|
Python
|
Modulus/AmmoFinder
|
/ammo_finder/core/category.py
|
UTF-8
| 700 | 2.890625 | 3 |
[] |
no_license
|
# Python standard library imports
from enum import Enum
class Category(Enum):
RIFLE = 1
HANDGUN = 2
RIMFIRE = 3
SHOTGUN = 4
AIR = 5
@staticmethod
def extract(url):
if "rifle" in url:
return Category.RIFLE
elif "hagle" in url:
return Category.SHOTGUN
elif "haandvaapen" in url or "handvapen" in url or "pistol" in url:
return Category.HANDGUN
elif "rimfire" in url or "22lr" in url or "salong" in url:
return Category.RIMFIRE
elif "air" in url or "luft" in url or "luftvapen" in url:
return Category.AIR
else:
raise ValueError("Category incorrect!")
| true |
6a935e64bf9e98ec2a449856cb78418b11ca551b
|
Python
|
bluestone029/dxf-ruler-generator
|
/dxf_ruler_generator.py
|
UTF-8
| 2,397 | 3.25 | 3 |
[
"MIT"
] |
permissive
|
"""DXF Ruler Generator.
This module generates DXF files for laser cutting and engraving custom sized
rulers, which can be easily manufactured at the nearest FabLab.
Example
-------
Generate a 7cm ruler:
$ python -m dxf_ruler_generator 7
This will create a 'ruler_7cm.dxf' on the current working directory.
"""
import os.path
from argparse import ArgumentParser
import ezdxf
parser = ArgumentParser(description="Generate rulers for digital fabrication.")
parser.add_argument("length", metavar="L", type=int,
help="an integer for the ruler's length, in centimeters.")
parser.add_argument("width", metavar="W", type=int, nargs="?", default=30,
help="an integer for the ruler's width, in milimeters.")
parser.add_argument("tick_width", metavar="TW", type=float,
nargs="?", default=.25,
help="a float for the tick's width, in milimeters.")
args = parser.parse_args()
def run():
"""Draw the ruler."""
dwg = ezdxf.new('R2010')
dwg.layers.new(name='CUT', dxfattribs={'color': 7})
dwg.layers.new(name='SCAN', dxfattribs={'color': 5})
msp = dwg.modelspace()
ruler_outline = [(0, 0),
(10*(args.length+1), 0),
(10*(args.length+1), args.width),
(0, args.width),
(0, 0)]
msp.add_lwpolyline(ruler_outline, dxfattribs={'layer': 'CUT'})
for mm in range(10*args.length+1):
x = mm + 5 - args.tick_width / 2
if mm == 0 or mm % 10 == 0:
tick_height = args.width / 3
msp.add_text(
str(mm//10),
dxfattribs={'rotation': 90,
'height': 2,
'layer': 'SCAN'}
).set_pos((x-1, args.width-tick_height))
elif mm % 5 == 0:
tick_height = args.width / 6
else:
tick_height = args.width / 12
ruler_tick = [(x, args.width),
(x, args.width-tick_height),
(x+.25, args.width-tick_height),
(x+.25, args.width),
(x, args.width)]
msp.add_lwpolyline(ruler_tick, dxfattribs={'layer': 'SCAN'})
filename = f'ruler_{args.length}cm.dxf'
dwg.saveas(filename)
print(os.path.abspath(filename), end='')
if __name__ == "__main__":
run()
| true |
0a591936785a9d2ebbda5fa77d60361d42917258
|
Python
|
fsevero/python-oo
|
/003 - Inheritance/inheritance.py
|
UTF-8
| 717 | 3.625 | 4 |
[] |
no_license
|
class MyClass (object): # class MyClass and, by default, inherits from object
pass
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return 'Person: {} - Age: {}'.format(self.name, self.age)
class PF(Person):
def __init__(self, cpf, name, age):
Person.__init__(self, name, age)
self.cpf = cpf
class PJ(Person):
def __init__(self, cnpj, name, age):
Person.__init__(self, name, age)
self.cnpj = cnpj
pf = PF('00000000000', 'Severo', 29)
print(pf.name)
print(pf.age)
print(pf.cpf)
print(pf)
pj = PJ('0000000000000', 'Severo Ltda', 1)
print(pj.name)
print(pj.age)
print(pj.cnpj)
print(pj)
| true |
d40b8d853d04c530c388dbda40d984b74ffb61ee
|
Python
|
Stomach-ache/Codeforces
|
/round_236/A.py
|
UTF-8
| 192 | 2.96875 | 3 |
[] |
no_license
|
k, a, b, v = map(int, raw_input().split())
cnt = 0
while a > 0:
section = 1
if b >= k-1:
section = k
b -= (k - 1)
else:
section = b + 1
b = 0
a -= (v * section)
cnt += 1
print cnt
| true |
8010703a48c8a63d85f23daba8772419add576e6
|
Python
|
davparra/Kinases
|
/old/kinase-hc.py
|
UTF-8
| 702 | 2.890625 | 3 |
[] |
no_license
|
#%% [markdown]
# ## Libraries
import pandas as pd
import scipy.spatial.distance as ssd
from scipy.cluster.hierarchy import linkage, dendrogram
from matplotlib import pyplot as plt
import plotly.plotly as py
import plotly.offline
#%% [markdown]
# ## preparing data to be processed
data_path = 'data/human_protein_distance_matrix.csv'
df = pd.read_csv(data_path, delimiter=',', index_col=0)
print(df.info())
print(df.shape)
print(df.head(8))
# converting redundant distance matrix to condensed matrix
condensed_matrix = ssd.squareform(df)
#%% [markdown]
# ## Hierarchical clustering of the condensed matrix
H = linkage(condensed_matrix, 'ward')
fig = plt.figure(figsize=(25, 10))
dn = dendrogram(H)
| true |
2fbe905e555b43c6cacbc64ed47e135e5c3c2caf
|
Python
|
cash2one/xai
|
/xai/brain/wordbase/adjectives/_accountable.py
|
UTF-8
| 493 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
#calss header
class _ACCOUNTABLE():
def __init__(self,):
self.name = "ACCOUNTABLE"
self.definitions = [u'Someone who is accountable is completely responsible for what they do and must be able to give a satisfactory reason for it: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| true |
2ce85cbca86621f2ca308d7b9f33285b7679b953
|
Python
|
jaciyu/MouseTrackGAN
|
/dataprocsess.py
|
UTF-8
| 1,990 | 2.53125 | 3 |
[] |
no_license
|
import numpy as np
with open('E:\Workspace\GANproject\data_origin.txt', 'r') as f:
data = f.readlines()
fdata0 = np.zeros([3000, 300, 3], dtype=float)
fdata1 = np.zeros([3000, 4], dtype=float)
fdata_mark = np.zeros([3000, ], dtype=float)
# 提取特征
for i in range(len(data)):
data[i] = data[i].split()
data0 = data[i][1].split(';')[0:-1]
for j in range(len(data0)):
data0[j] = data0[j].split(',')
data0 = np.array(data0).astype(int)
data0 = np.pad(data0, [[0, 300-len(data0)], [0, 0]], 'constant')
fdata0[i]=data0
fdata1[i][0:2]=data0[0,0:2]
fdata1[i][2:4] = data[i][2].split(',')
fdata_mark[i] = data[i][3]
#数据扩充
times=12
angle=2*np.pi/times
edata0=np.zeros([3000*times,300,3])
edata1=np.zeros([3000*times,4])
for i in range(times):
edata0[3000*i:3000*(i+1),:,0]=np.cos(angle*i)*fdata0[:,:,0]-np.sin(angle*i)*fdata0[:,:,1]
edata0[3000*i:3000*(i+1),:,1]=np.cos(angle*i)*fdata0[:,:,1]+np.sin(angle*i)*fdata0[:,:,0]
edata0[3000*i:3000*(i+1),:,2]=fdata0[:,:,2]
edata1[3000*i:3000*(i+1),[0,2]]=np.cos(angle*i)*fdata1[:,[0,2]]-np.sin(angle*i)*fdata1[:,[1,3]]
edata1[3000*i:3000*(i+1),[1,3]]=np.cos(angle*i)*fdata1[:,[1,3]]+np.sin(angle*i)*fdata1[:,[0,2]]
edata_mark=np.tile(fdata_mark,times)
# 用随机数生成负样本
negNumber = 2000
rand_data0 = np.cumsum(np.random.normal(10, 50, [negNumber, 300,3]),axis=1)
rand_data1 = np.random.uniform(-2000, 2000, [negNumber, 4])
edata0=np.concatenate((edata0,rand_data0),axis=0)
edata1=np.concatenate((edata1,rand_data1),axis=0)
edata_mark=np.concatenate((edata_mark,np.zeros([negNumber,])))
# 保存特征
x_fdata=np.concatenate((fdata0.reshape([3000,300*3],order='F'),fdata1),axis=1)
x_edata=np.concatenate((edata0.reshape([3000*times+negNumber,300*3],order='F'),edata1),axis=1)
#np.save('x_fdata.npy',x_fdata)
#np.save('y_fdata.npy',fdata_mark)
np.save('x_edata.npy',x_edata)
np.save('y_edata.npy',edata_mark)
| true |
2cfea324cf9d82ab554fa9ea27f74001ba18a6c1
|
Python
|
robertbsnook/booking_quote
|
/src/booking/Package_booking.py
|
UTF-8
| 8,727 | 3.375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import datetime
import pandas as pd
from tabulate import tabulate
class TravelRoute:
def __init__(self, destination, dangerous, urgency, dimension, weight):
self.destination = destination
self.dangerous = dangerous
self.urgency = urgency
self.dimension = float(dimension)
self.weight = float(weight)
def __str__(self):
return str(self.charge)
class Air(TravelRoute):
def __init__(self, destination, dangerous, urgency, dimension, weight):
super().__init__(destination, dangerous, urgency, dimension, weight)
def charge(self):
if self.dangerous == "unsafe":
return 0
elif self.weight*10 > self.dimension*20:
return float(self.weight*10)
else:
return float(self.dimension * 20)
class Truck(TravelRoute):
def __init__(self, destination, dangerous, urgency, dimension, weight):
super().__init__(destination, dangerous, urgency, dimension, weight)
def charge(self):
if self.destination == 'overseas':
return 0
elif self.urgency == 'urgent':
return 45
else:
return 25
class Boat(TravelRoute):
def __init__(self, destination, dangerous, urgency, dimension, weight):
super().__init__(destination, dangerous, urgency, dimension, weight)
def charge(self):
if self.destination == 'in-country':
return 0
elif self.urgency == 'urgent':
return 0
else:
return 30
def urgent_check(delivery_date):
future = datetime.datetime.today() + datetime.timedelta(days=3)
if delivery_date > future:
return "not urgent"
else:
return "urgent"
def destination_check():
while True:
dest = input("Is this package remaining in (c)ountry, or (o)verseas: ").lower()
if dest == 'c':
return 'in-country'
elif dest == 'o':
return 'overseas'
else:
print("Use 'c' or 'o'.")
def danger_check():
while True:
danger = input("Does the package contain anything dangerous (y/n): ").lower()
if danger == 'n':
return 'Safe'
elif danger == 'y':
return 'unsafe'
else:
print("Is it safe or unsafe? (y/n)")
def next_customer():
next_c = input("Is there another customer: (y/n)").lower()
if next_c == 'y':
return True
else:
return False
def delivery_options(destination, dangerous, urgency, dimension, weight):
options = {}
air_option = Air(destination, dangerous, urgency, dimension, weight)
truck_option = Truck(destination, dangerous, urgency, dimension, weight)
boat_option = Boat(destination, dangerous, urgency, dimension, weight)
if air_option.charge() > 0.0:
options['Air'] = air_option.charge()
if truck_option.charge() > 0.0:
options['Truck'] = truck_option.charge()
if boat_option.charge() > 0.0:
options['Boat'] = boat_option.charge()
df2 = pd.DataFrame(list(options.items()), columns=['Option', 'Cost'])
print(tabulate(df2, tablefmt='psql'))
selection = 0
while selection == 0:
try:
delivery_choice = input("Choose the delivery method:")
delivery_choice = int(delivery_choice)
if delivery_choice < 0 or delivery_choice > df2.last_valid_index():
print("Please select a valid method of transport")
else:
selection = 1
except ValueError:
print('Please enter a valid shipping option')
df2_option = df2.at[delivery_choice, 'Option']
df2_cost = df2.at[delivery_choice, 'Cost']
return df2_option, df2_cost
def print_customer(df):
row = df.tail(1).transpose()
print("Order ID:", df.last_valid_index())
print(tabulate(row, tablefmt='psql'))
def get_name():
while True:
try:
name = input("Please enter customer name: ")
if not name:
raise ValueError("Please enter a valid name. Cannot be blank")
else:
break
except ValueError as e:
print(e)
return name
def get_description():
while True:
try:
description = input("General description of package: ")
if not description:
raise ValueError("Please enter a description. Cannot be blank")
else:
break
except ValueError as e:
print(e)
return description
def get_delivery_date():
day = 0
while day == 0:
d_date = input("When do they want the package to arrive: yyyy/dd/mm ")
try:
d_date = datetime.datetime.strptime(d_date, '%Y/%m/%d')
if d_date <= datetime.datetime.today():
print("Please enter a delivery date at least one day in advance.")
else:
day = 1
except ValueError:
print("Incorrect date format, should be YYYY/MM/DD.")
return d_date
def get_dimensions():
print("Minimum dimension size is 0.1 meter.\n "
"Anything smaller should be rounded up to 0.1.\n"
"Minimum overall size is 0.5m")
while True:
try:
length = float(input("L: "))
if not length:
raise ValueError("Please enter a length.")
elif length < 0.1:
print("Please enter a dimension greater than 0.0999.")
else:
break
except ValueError as e:
print(e)
while True:
try:
width = float(input("W: "))
if not width:
raise ValueError("Please enter a width.")
elif width < 0.1:
print("Please enter a dimension greater than 0.0999.")
else:
break
except ValueError as e:
print(e)
while True:
try:
height = float(input("H: "))
if not height:
raise ValueError("Please enter a height.")
elif height < 0.1:
print("Please enter a dimension greater than 0.0999.")
else:
break
except ValueError as e:
print(e)
if length*width*height < 0.5:
dimension = 0.5
else:
dimension = length*width*height
return dimension
def size_check(dimension):
if dimension > 124.999:
print("Sorry, but this package is too large to be shipped by our methods. Please reduce the size to less "
"than 5x5x5")
return False
else:
return True
def get_weight():
while True:
try:
weight = float(input("How many kilograms does it weigh: "))
if not weight:
raise ValueError("Please enter a weight. Cannot be blank")
elif weight <= 0:
print("Please enter a positive weight.")
else:
break
except ValueError as e:
print(e)
return weight
def weight_check(weight):
if weight > 9.999:
print("Sorry, but this package weighs too much. Please reduce the weight to under 10kg")
return False
else:
return True
def main():
customer = True
while customer:
customer_name = get_name()
destination = destination_check()
package_desc = get_description()
dangerous = danger_check()
delivery_date = get_delivery_date()
urgency = urgent_check(delivery_date)
weight = get_weight()
weight_check(weight)
dimension = get_dimensions()
df = pd.read_csv('booking_quotes.csv', index_col=0)
df.index.name = 'ID'
df = df.reset_index(drop=True)
new_row = {'Customer_Name': customer_name.title(),
'Destination': destination,
'Package_desc': package_desc,
'Dangerous': dangerous,
'Delivery_date': delivery_date.date(),
'Urgency': urgency,
'Weight': weight,
'Size': round(dimension,2),
'Shipping_option': '',
'Cost': ''}
df = df.append(new_row, ignore_index=True)
print_customer(df)
d_option, d_cost = delivery_options(destination, dangerous, urgency, dimension, weight)
df.at[df.last_valid_index(), 'Shipping_option'] = d_option
df.at[df.last_valid_index(), 'Cost'] = d_cost
df.to_csv('booking_quotes.csv', index=True)
print_customer(df)
customer = next_customer()
if __name__ == "__main__":
main()
| true |
a523dfb92df7c9364165cecc81c4a48e4f58e944
|
Python
|
MiracleOfFate/WebUITest
|
/action_chains/drag_and_drop_by_offset_demo.py
|
UTF-8
| 1,351 | 2.875 | 3 |
[] |
no_license
|
from time import sleep
from selenium import webdriver
# 打开浏览器,并加载项目地址
driver = webdriver.Chrome()
driver.get("https://passport.ctrip.com/user/reg/home")
sleep(2)
# 点击同意并继续
element_agree = driver.find_element_by_css_selector("div.pop_footer>a.reg_btn.reg_agree")
element_agree.click()
sleep(3) # important!!!
from selenium.webdriver.common.action_chains import ActionChains
# 定位滑块的位置
element_hk = driver.find_element_by_css_selector('div.cpt-drop-box>div.cpt-drop-btn')
# print(element_hk.size) # {'height': 40, 'width': 40}
# print(element_hk.size['height'], element_hk.size['width'])
# 定义滑块条的位置
element_hkt = driver.find_element_by_css_selector('div.cpt-drop-box>div.cpt-bg-bar')
# print(element_hkt.size) # {'height': 40, 'width': 268}
# print(element_hkt.size['height'], element_hkt.size['width'])
# 实现滑块操作
# ActionChains.drag_and_drop_by_offset(开始移动的元素——原始元素,鼠标对元素拖到另外一个元素的x坐标,鼠标对元素拖到另外一个元素的y坐标)
x_location = element_hk.size['width'] + element_hkt.size['width']
y_location = element_hkt.size['height']
# print(x_location, y_location) # 308 40
ActionChains(driver).drag_and_drop_by_offset(element_hk, x_location, y_location).perform()
sleep(2)
driver.quit()
| true |
bcfc119db38fd6c471f0c248dd04dd23b01e5c70
|
Python
|
loganmeetsworld/advent-of-code
|
/2021/5/solution.py
|
UTF-8
| 1,847 | 3.109375 | 3 |
[] |
no_license
|
import re
from collections import Counter
from aoc_utils import aoc_utils
from tests import cases
def find_all_coordinates(x1, y1, x2, y2, part_one=False):
points = []
dx = x1 - x2
dy = y1 - y2
# Like 2,2 -> 2,1
if dx == 0:
for y in range(min([y2, y1]), max([y2, y1]) + 1):
points.append(str([x1, y]))
# Like 0,9 -> 5,9
if dy == 0:
for x in range(min([x2, x1]), max([x2, x1]) + 1):
points.append(str([x, y1]))
if part_one:
return points
# Like 1,1 -> 3,3
if dx < 0 and dy < 0:
for i in range(abs(dx) + 1):
points.append(str([x1 + i, y1 + i]))
# Like 3,3 -> 1,1
if dx > 0 and dy > 0:
for i in range(abs(dx) + 1):
points.append(str([x1 - i, y1 - i]))
# Like 9,7 -> 7,9
if dx > 0 and dy < 0:
for i in range(abs(dx) + 1):
points.append(str([x1 - i, y1 + i]))
# Like 7,9 > 9,7
if dx < 0 and dy > 0:
for i in range(abs(dx) + 1):
points.append(str([x1 + i, y1 - i]))
return points
def horrizontal_points(x1, y1, x2, y2):
points = []
if x1 - x2 == 0:
for y in range(min([y2, y1]), max([y2, y1]) + 1):
points.append(str([x1, y]))
if y1 - y2 == 0:
for x in range(min([x2, x1]), max([x2, x1]) + 1):
points.append(str([x, y1]))
return points
def answer(problem_input, level, test=False):
coordinates = []
for line in problem_input.splitlines():
coords = [int(i) for i in re.findall(r'(\d+)', line)]
if level == 1:
coordinates += find_all_coordinates(*coords, part_one=True)
else:
coordinates += find_all_coordinates(*coords)
return len([k for k,v in Counter(coordinates).items() if v > 1])
aoc_utils.run(answer, cases)
| true |
e68d7d561bd15a5cff8065ce63e25d4e8dcf5ebe
|
Python
|
akki8087/HackerRank
|
/Binary Numbers.py
|
UTF-8
| 528 | 2.640625 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 17:53:03 2018
@author: NP
"""
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
import sys
n = int(input().strip())
tst = str(bin(n))
tst = tst[2:]
ones = 0
maxOnes = 0
for i in tst:
if i == '1':
ones += 1
if maxOnes < ones:
maxOnes = ones
else:
ones = 0
print(maxOnes)
| true |
22824cec8886ddd756c96fc252fc65541d80de6b
|
Python
|
sinx-wang/dealCourtDocs
|
/deal.py
|
UTF-8
| 2,301 | 3.203125 | 3 |
[] |
no_license
|
"""Convert downloaded documents format to .docx and select
"""
#-*-coding:utf-8-*-
import os
from docx import Document
import win32com.client as wc
class DealDocuments:
"""
把下载下来的文书转为docx格式,方便后面根据关键字检索
"""
def __init__(self, text, docx_text):
self.text = text
self.docx_text = docx_text
# python-dox can not deal with .doc, must convert to .docx
@staticmethod
def convert_doc_to_docx(filename: str):
"""
文件由doc转为docx格式
"""
word = wc.Dispatch('Word.Application')
# 用相对路径会有问题
absolute_read_path = 'D:\\Documents\\dealDocs\\text\\' + filename
absolute_write_path = 'D:\\Documents\\dealDocs\\docx_text\\' + filename + 'x'
doc = word.Documents.Open(absolute_read_path, False, False, True)
doc.SaveAs(absolute_write_path, 12)
doc.Close()
word.Quit()
def convert(self):
"""
批量转换
"""
for file in os.listdir(self.text):
self.convert_doc_to_docx(file)
def deal_content(self, filename: str):
"""
检索文书中是否同时含有"贵阳银行"和"被告",若没有则删除
"""
delete_flag = 1
file_path = self.docx_text + filename
doc1 = Document(file_path)
pl = [paragraph.text for paragraph in doc1.paragraphs]
for sentence in pl:
# 含有冒号且不以冒号结尾,且字符串中含有“被告”和“贵阳银行”
if sentence.endswith(":"):
break
else:
if (":" in sentence) and ("贵阳银行"
in sentence) and ("被告" in sentence):
delete_flag = 0
if delete_flag:
os.remove(file_path)
def deal(self):
"""
批量检索
"""
for file in os.listdir(self.docx_text):
self.deal_content(file)
# FILENAME_LIST = os.listdir('docx_text')
# for file in FILENAME_LIST:
# deal_content(file)
if __name__ == '__main__':
DEAL_DOCUMENT = DealDocuments('text\\', 'docx_text\\')
DEAL_DOCUMENT.convert()
DEAL_DOCUMENT.deal()
| true |
feb4b8b6128ed7aa44f774516380dc3a8803f2d1
|
Python
|
kanta-ishii/math_in_python
|
/線形代数/コサイン類似度.py
|
UTF-8
| 383 | 3.671875 | 4 |
[] |
no_license
|
'''ベクトル同士の向きの近さを表す'''
import numpy as np
def cos_sim(vec_1, vec_2):
return np.dot(vec_1, vec_2) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))
a = np.array([2,2,2,2])
b = np.array([1,1,1,1])
c = np.array([-1,-1,-1,-1])
print(cos_sim(a, b))
print(cos_sim(a, c))
'''↑ベクトルの向きがどれだけ揃っているかの指標になる'''
| true |
c670004e5215c5e2fa4e539393d9c8c21b32cbb7
|
Python
|
anuvarsh/CMSI282
|
/hw2/9-lcm.py
|
UTF-8
| 179 | 3.375 | 3 |
[] |
no_license
|
def lcm(x, y):
if (x > y):
greater = x
else:
greater = y
while(True):
if((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
return lcm
| true |
82b68b6f0cf0e349650e61423730cac74b6db352
|
Python
|
CySpiegel/CS5402-Data-Mining
|
/Part 1/DictCounter.py
|
UTF-8
| 704 | 3.046875 | 3 |
[] |
no_license
|
import os
import glob
from collections import OrderedDict
import csv
ValuesDictionary = {}
fields = []
rows = []
csvLocation = ("originalDataset.csv")
with open(csvLocation, mode='r') as csvfile:
csvreader = csv.reader(csvfile)
fields = next(csvreader)
for row in csvreader:
rows.append(row)
Correct = True
for i in range(30,31):
values = []
valueCounts = []
print(fields[i+1])
for j in range(14996):
ValuesDictionary[rows[j][i+1]] = ValuesDictionary.get( rows[j][i+1], 0) + 1
ValuesDictionarySorted = OrderedDict(sorted(ValuesDictionary.items(), key=lambda x: x[1], reverse=True))
for k, v in ValuesDictionarySorted.items():
print("Value: ",k, "\t Appears: ", v)
print(Correct)
| true |
7e925d1317f8ffaaefe237993e8cec2511626126
|
Python
|
Top1Miami/ITMO_FS
|
/ITMO_FS/utils/information_theory.py
|
UTF-8
| 1,449 | 2.9375 | 3 |
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
from math import log
import numpy as np
def conditional_entropy(x_j, y):
countX = {x: 0 for x in x_j}
dictYByX = {x: {} for x in x_j}
for i in range(len(y)):
x_val = x_j[i]
y_val = y[i]
countX[x_val] += 1
dictYByX[x_val].update({y_val: dictYByX[x_val].get(y_val, 0) + 1})
entropy = 0.0
for x in countX.keys():
partEntropy = 0.0
curDict = dictYByX[x]
partEntropy = sum(map(lambda inClass: elog(inClass / countX[x]), curDict.values()))
entropy += countX[x] / len(y) * partEntropy
return -entropy
def matrix_mutual_information(x, y):
return np.apply_along_axis(mutual_information, 0, x, y)
def mutual_information(x, y):
return entropy(y) - conditional_entropy(x, y)
def conditional_mutual_information(x, y, z):
return entropy(list(zip(x, z))) + entropy(list(zip(y, z))) - entropy(list(zip(x, y, z))) - entropy(z)
def joint_mutual_information(x, y, z):
return mutual_information(x, z) + conditional_mutual_information(y, z, x)
def interaction_information(x, y, z):
return mutual_information(x, z) + mutual_information(y, z) - joint_mutual_information(x, y, z)
def elog(x):
if x <= 0. or x >= 1.:
return 0
else:
return x * log(x)
def entropy(x):
d = dict()
for obj in x:
d[obj] = d.get(obj, 0) + 1
probs = map(lambda z: float(z) / len(x), d.values())
return -sum(map(elog, probs))
| true |
a1901b55c34bccf27ff675f32d8149cf64370862
|
Python
|
Arnukk/TDS
|
/main_assignment1.py
|
UTF-8
| 8,251 | 2.828125 | 3 |
[
"MIT"
] |
permissive
|
__author__ = 'akarapetyan'
import matplotlib.pyplot as plt
from wnaffect import WNAffect
from emotion import Emotion
from nltk.corpus import wordnet as wn
from cursor_spinning import SpinCursor
import time
import sys
import numpy as np
import PorterStemmer as ps
from scipy.interpolate import interp1d
#CONSTANTS
#array of inapproriate words to be excluded
toexclude = ["world", "blue", "weight", "self", "hero", "identification", "will", "sympathy", "empathy", "preference", "compatibility", "softheartedness", "puppy"]
def fancy_output(msg, command, starting_time, *args):
"""
Just a fancy way of outputting the progress of a command
@param msg, command, params: the message to output, command to be executed, params
@return output: the result of the command
"""
spin = SpinCursor(msg=msg, minspin=5, speed=5)
spin.start()
output = command(*args)
if output: spin.stop()
sys.stdout.write("Elapsed time - %3.6f seconds" % (time.time()-starting_time))
print '\n'
return output
def preprocess_database(year_range):
"""
Filter the database of 1-grams according to the year range chosen
@param year_range
@return filtered_db
"""
path_pattern = "data\googlebooks-eng-1M-1gram-20090715-"
filtered_db = {}
for source in [path_pattern + '%d.csv' % i for i in range(10)]:
#df = pd.read_csv(source, names=['word', 'year', 'occurred', 'pages', 'books'], sep='\t', error_bad_lines=False)
#if len(df[(df['word'] == word) & (df['year'] == year)].index.tolist()) > 0:
#occur_count = df.loc[[df[(df['word'] == word) & (df['year'] == year)].index.tolist()[0]]].iloc[0]['occurred']
#return occur_count
with open(source) as f:
for line in f:
data = line.split('\t')
if int(data[1]) in year_range:
if int(data[1]) in filtered_db:
filtered_db[int(data[1])].append(line)
else:
filtered_db[int(data[1])] = []
filtered_db[int(data[1])].append(line)
return filtered_db
def get_mood_score(mood, year, filtered_db):
"""
Calculates the mood score of the give mood for a given year
:param mood, year, filtered_db:
:return moodscore
"""
moodcount = 0
the_count = 0
for item in filtered_db[year]:
data = item.split('\t')
if data[0] in mood or data[0].lower() in mood:
moodcount += int(data[2])
if data[0] == "the" or data[0].lower() == "the":
the_count += int(data[2])
moodscore = (1.0 * moodcount/the_count)/1.0*len(mood)
return moodscore
def get_emotion_terms(emotion):
"""
Given the emotion, the function returns all the terms related to that emotion
@param emotion: name of the emotion - string
@return terms_array
"""
terms_array = [emotion]
for term in Emotion.emotions[emotion].get_children([]):
if "-" in term:
for t in term.split("-"):
if t not in toexclude:
terms_array.append(t.lower()) if t not in terms_array and t.lower() not in terms_array else None
else:
terms_array.append(term) if term not in terms_array and term.lower() not in terms_array else None
if "-" in term:
for t in term.split("-"):
if t not in toexclude:
for synset in wn.synsets(t):
for lemma in synset.lemmas():
if "_" not in str(lemma.name()):
terms_array.append(str(lemma.name()).lower()) if str(lemma.name()) not in terms_array and str(lemma.name()).lower() not in terms_array else None
else:
for synset in wn.synsets(term):
for lemma in synset.lemmas():
if "_" not in str(lemma.name()):
terms_array.append(str(lemma.name()).lower()) if str(lemma.name()) not in terms_array and str(lemma.name()).lower() not in terms_array else None
return terms_array
def get_stems():
"""
Returns the array of the filtered stems according to the conditions mentioned in the paper
@return: stemarray
"""
stemarray = []
p = ps.PorterStemmer()
infile = open("./part-of-speech.txt", 'r')
while 1:
output = ''
line = infile.readline()
line = line.split('\t')[0]
if line == '':
break
for c in line:
if c.isalpha():
word += c.lower()
else:
if word:
output += p.stem(word, 0,len(word)-1)
word = ''
output += c.lower()
stemarray.append(output) if (len(output) > 2 and output not in stemarray) else None
infile.close()
return stemarray
if __name__ == "__main__":
starting_time = time.time()
print "\n+++++++++++++++++++++++++++++++++++"
print "TDS - Assignment 1"
print "+++++++++++++++++++++++++++++++++++\n"
"""
Inittializing Wordnet-Affect
@DEPENDENCIES: NLTK 3.1 or higher, WordNet 1.6 (unix-like version is utilised), WordNet-Domains 3.2
"""
YEAR_RANGE = range(1907, 2001, 4)
wna = fancy_output("Initializing Wordnet", WNAffect, starting_time, './wordnet-1.6/', './wn-domains-3.2/')
joy_terms = fancy_output("Getting the terms for the mood category JOY", get_emotion_terms, starting_time, 'joy')
joy_terms.extend([term for term in fancy_output("", get_emotion_terms, starting_time, 'liking') if term not in joy_terms])
joy_terms.extend([term for term in fancy_output("", get_emotion_terms, starting_time, 'love') if term not in joy_terms])
#joy_terms.extend([term for term in fancy_output("", get_emotion_terms, starting_time, 'levity') if term not in joy_terms])
#joy_terms.extend([term for term in fancy_output("", get_emotion_terms, starting_time, 'gratitude') if term not in joy_terms])
sadness_terms = fancy_output("Getting the terms for the mood category SADNESS", get_emotion_terms, starting_time, 'sadness')
filtered_dataset = fancy_output("Preprocessing the dataset", preprocess_database, starting_time, YEAR_RANGE)
spin = SpinCursor(msg="Computing the mood scores", minspin=5, speed=5)
spin.start()
joy_mood_scores = {}
sadness_mood_scores = {}
for year in YEAR_RANGE:
joy_mood_scores[year] = get_mood_score(joy_terms, year, filtered_dataset)
sadness_mood_scores[year] = get_mood_score(sadness_terms, year, filtered_dataset)
if len(joy_mood_scores) == len(YEAR_RANGE): spin.stop()
sys.stdout.write("Elapsed time - %3.6f seconds" % (time.time()-starting_time))
print '\n'
joy_mood_scores_mean = np.mean(joy_mood_scores.values())
joy_mood_scores_std = np.std(joy_mood_scores.values())
sadness_mood_scores_mean = np.mean(sadness_mood_scores.values())
sadness_mood_scores_std = np.std(sadness_mood_scores.values())
normalize = lambda mood_val: (mood_val - joy_mood_scores_mean)/(1.0 * joy_mood_scores_std)
joy_normalized = {}
for key in joy_mood_scores.keys():
joy_normalized[key] = normalize(joy_mood_scores[key])
normalize = lambda mood_val: (mood_val - sadness_mood_scores_mean)/(1.0 * sadness_mood_scores_std)
sadness_normalized = {}
for key in sadness_mood_scores.keys():
sadness_normalized[key] = normalize(sadness_mood_scores[key])
x = [year for year in YEAR_RANGE]
y = [joy_normalized[key] - sadness_normalized[key] for key in YEAR_RANGE]
f2 = interp1d(x, y, kind='cubic')
xnew = range(1907, 2001, 2)
plt.plot(xnew, f2(xnew))
markerline, stemlines, baseline = plt.stem(x, y, '-.')
plt.grid()
axes = plt.gca()
axes.set_xlim([1897, 2003])
plt.title('Historical periods of positive and negative moods')
plt.xlabel('Year')
plt.ylabel('Joy - Sadness (Z scores)')
plt.setp(markerline, 'markerfacecolor', 'b')
plt.setp(baseline, 'color', 'r', 'linewidth', 2)
plt.setp(stemlines, linewidth=1, color=[0.08,0.4,1])
plt.grid()
print "====== Simulation finished in ", time.time() - starting_time, " seconds =========\n"
plt.show()
| true |
1b18ad35deb475d40a14519becc1a7287439db7e
|
Python
|
15831944/skiaming
|
/SkiaCode/tools/test_pictures.py
|
UTF-8
| 6,084 | 2.546875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
'''
Compares the rendererings of serialized SkPictures to expected images.
Launch with --help to see more information.
Copyright 2012 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
# common Python modules
import os
import optparse
import sys
import shutil
import tempfile
USAGE_STRING = 'Usage: %s input... expectedDir'
HELP_STRING = '''
Compares the renderings of serialized SkPicture files and directories specified
by input with the images in expectedDir. Note, files in directoriers are
expected to end with .skp.
'''
def RunCommand(command):
"""Run a command.
@param command the command as a single string
"""
print 'running command [%s]...' % command
os.system(command)
def FindPathToProgram(program):
"""Return path to an existing program binary, or raise an exception if we
cannot find one.
@param program the name of the program that is being looked for
"""
trunk_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
possible_paths = [os.path.join(trunk_path, 'out', 'Release', program),
os.path.join(trunk_path, 'out', 'Debug', program),
os.path.join(trunk_path, 'out', 'Release',
program + ".exe"),
os.path.join(trunk_path, 'out', 'Debug',
program + ".exe")]
for try_path in possible_paths:
if os.path.isfile(try_path):
return try_path
raise Exception('cannot find %s in paths %s; maybe you need to '
'build %s?' % (program, possible_paths, program))
def RenderImages(inputs, render_dir, options):
"""Renders the serialized SkPictures.
Uses the render_pictures program to do the rendering.
@param inputs the location(s) to read the serlialized SkPictures
@param render_dir the location to write out the rendered images
"""
renderer_path = FindPathToProgram('render_pictures')
inputs_as_string = " ".join(inputs)
command = '%s %s %s' % (renderer_path, inputs_as_string, render_dir)
if (options.mode is not None):
command += ' --mode %s' % ' '.join(options.mode)
if (options.device is not None):
command += ' --device %s' % options.device
RunCommand(command)
def DiffImages(expected_dir, comparison_dir, diff_dir):
"""Diffs the rendered SkPicture images with the baseline images.
Uses the skdiff program to do the diffing.
@param expected_dir the location of the baseline images.
@param comparison_dir the location of the images to comapre with the
baseline
@param diff_dir the location to write out the diff results
"""
skdiff_path = FindPathToProgram('skdiff')
RunCommand('%s %s %s %s %s' %
(skdiff_path, expected_dir, comparison_dir, diff_dir,
'--noprintdirs'))
def Cleanup(options, render_dir, diff_dir):
"""Deletes any temporary folders and files created.
@param options The OptionParser object that parsed if render_dir or diff_dir
was set
@param render_dir the directory where the rendered images were written
@param diff_dir the directory where the diff results were written
"""
if (not options.render_dir):
if (os.path.isdir(render_dir)):
shutil.rmtree(render_dir)
if (not options.diff_dir):
if (os.path.isdir(diff_dir)):
shutil.rmtree(diff_dir)
def ModeParse(option, opt_str, value, parser):
"""Parses the --mode option of the commandline.
The --mode option will either take in three parameters (if tile or
pow2tile) or a single parameter (otherwise).
"""
result = [value]
if value == "tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode tile mising width"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
elif value == "pow2tile":
if (len(parser.rargs) < 2):
raise optparse.OptionValueError(("--mode pow2tile mising minWidth"
" and/or height parameters"))
result.extend(parser.rargs[:2])
del parser.rargs[:2]
setattr(parser.values, option.dest, result)
def Main(args):
"""Allow other scripts to call this script with fake command-line args.
@param The commandline argument list
"""
parser = optparse.OptionParser(USAGE_STRING % '%prog' + HELP_STRING)
parser.add_option('--render_dir', dest='render_dir',
help = ("specify the location to output the rendered files."
" Default is a temp directory."))
parser.add_option('--diff_dir', dest='diff_dir',
help = ("specify the location to output the diff files."
" Default is a temp directory."))
parser.add_option('--mode', dest='mode', type='string',
action="callback", callback=ModeParse,
help = ("specify how rendering is to be done."))
parser.add_option('--device', dest='device',
help = ("specify the device to render to."))
options, arguments = parser.parse_args(args)
if (len(arguments) < 3):
print("Expected at least one input and one ouput folder.")
parser.print_help()
sys.exit(-1)
inputs = arguments[1:-1]
expected_dir = arguments[-1]
if (options.render_dir):
render_dir = options.render_dir
else:
render_dir = tempfile.mkdtemp()
if (options.diff_dir):
diff_dir = options.diff_dir
else:
diff_dir = tempfile.mkdtemp()
try:
RenderImages(inputs, render_dir, options)
DiffImages(expected_dir, render_dir, diff_dir)
finally:
Cleanup(options, render_dir, diff_dir)
if __name__ == '__main__':
Main(sys.argv)
| true |
8daae6df89ec054ae3b94a39f2e7eac9d791f15c
|
Python
|
zhangmingcheng28/pomdp-py
|
/pomdp_py/utils/templates.py
|
UTF-8
| 4,150 | 3.109375 | 3 |
[
"MIT"
] |
permissive
|
"""
Some particular implementations of the interface for convenience
"""
import pomdp_py
import random
class SimpleState(pomdp_py.State):
"""A SimpleState is a state that stores
one piece of hashable data and the equality
of two states of this kind depends just on
this data"""
def __init__(self, data):
self.data = data
def __hash__(self):
return hash(self.data)
def __eq__(self, other):
if isinstance(other, SimpleState):
return self.data == other.data
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.data)
def __repr__(self):
return "SimpleState({})".format(self.data)
class SimpleAction(pomdp_py.Action):
"""A SimpleAction is an action defined by a string name"""
def __init__(self, name):
self.name = name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if isinstance(other, SimpleAction):
return self.name == other.name
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.name
def __repr__(self):
return "SimpleAction({})".format(self.name)
class SimpleObservation(pomdp_py.Observation):
"""A SimpleObservation is an observation
with a piece of hashable data that defines
the equality."""
def __init__(self, data):
self.data = data
def __hash__(self):
return hash(self.data)
def __eq__(self, other):
if isinstance(other, SimpleObservation):
return self.data == other.data
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.data)
def __repr__(self):
return "SimpleObservation({})".format(self.data)
class DetTransitionModel(pomdp_py.TransitionModel):
"""A DetTransitionModel is a deterministic transition model.
A probability of 1 - epsilon is given for correct transition,
and epsilon is given for incorrect transition."""
def __init__(self, epsilon=1e-12):
self.epsilon = epsilon
def probability(self, next_state, state, action):
"""According to problem spec, the world resets once
action is open-left/open-right. Otherwise, stays the same"""
if self.sample(state, action) == next_state:
return 1.0 - self.epsilon
else:
return self.epsilon
def sample(self, state, action):
raise NotImplementedError
class DetObservationModel(pomdp_py.ObservationModel):
"""A DetTransitionModel is a deterministic transition model.
A probability of 1 - epsilon is given for correct transition,
and epsilon is given for incorrect transition."""
def __init__(self, epsilon=1e-12):
self.epsilon = epsilon
def probability(self, observation, next_state, action):
"""According to problem spec, the world resets once
action is open-left/open-right. Otherwise, stays the same"""
if self.sample(next_state, action) == observation:
return 1.0 - self.epsilon
else:
return self.epsilon
def sample(self, next_state, action):
raise NotImplementedError
class DetRewardModel(pomdp_py.RewardModel):
"""A DetRewardModel is a deterministic reward model (the most typical kind)."""
def reward_func(self, state, action, next_state):
raise NotImplementedError
def sample(self, state, action, next_state):
# deterministic
return self.reward_func(state, action, next_state)
def argmax(self, state, action, next_state):
return self.sample(state, action, next_state)
class UniformPolicyModel(pomdp_py.RolloutPolicy):
def __init__(self, actions):
self.actions = actions
def sample(self, state, **kwargs):
return random.sample(self.actions, 1)[0]
def get_all_actions(self, state=None, history=None):
return self.actions
def rollout(self, state, history=None):
return random.sample(self.actions, 1)[0]
| true |
4caa42ead0b819cf666a5ecd8a8af28e0360963a
|
Python
|
vijoin/AtosPythonCourseExamples
|
/ex09-Dictionary.py
|
UTF-8
| 1,861 | 4.03125 | 4 |
[] |
no_license
|
def print_dictionaries():
#Add a country
countries = {'Uruguay':
{'official_name': 'República Oriental del Uruguay',
'capital': 'Montevideo',
'population': 3_457_000,
}
}
print(countries)
#Add a second key
countries['Venezuela'] = {'official_name': 'República Bolivariana de Venezuela',
'capital': 'Caracas',
'population': 31_980_000,
}
print(countries)
#access to a specific key
print(countries['Venezuela'])
print(countries['Uruguay'])
#Access multilevel key
print(countries['Uruguay']['population'])
print(countries['Venezuela']['capital'])
country = 'Venezuela'
print("The Capital of {} is {}".format(country, countries[country]['capital']))
#What if I ask for a non existing key
country = 'Uruguay'
#language = Countries[country]['language']
## I can catch the error with the get() method
language = countries[country].get('language', 'Not Specified')
print(f'The Population of {country} is {language}')
population = countries[country].get('population', 'NULL')
print(f'The Population of {country} is {population}')
#Access to the keys
countries_keys = countries.keys()
print(countries_keys)
#Access to the values
countries_values = countries.values()
print(countries_values)
#Access both keys and items
countries_items = countries['Uruguay'].items()
print(countries_items)
del countries['Venezuela']['population']
print(countries['Venezuela'])
print("Los datos de Venezuela son {official_name}, {capital}, {population}".format(**countries['Venezuela']))
if __name__ == '__main__':
print_dictionaries()
| true |
eda2ba7b3d8592a8ab1569f8766f320003a5c9ee
|
Python
|
DXV-HUST-SoICT/data_mining_mini_projects
|
/neural network/emnist-tensorflow/recognize.py
|
UTF-8
| 2,915 | 2.546875 | 3 |
[] |
no_license
|
from __future__ import absolute_import
from tensorflow.keras.models import load_model
from tensorflow.keras.models import model_from_json
from matplotlib import pyplot as plt
import cv2
import numpy as np
import sys, os
WORKING_DIR = './'
characters = ['0','1','2','3','4','5','6','7','8','9',
'A','B','C','D','E','F','G','H','I','J',
'K','L','M','N','O','P','Q','R','S','T',
'U','V','W','X','Y','Z']
def load_model():
json_file = open(os.path.join(WORKING_DIR, 'results/model_0.0.1.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(os.path.join(WORKING_DIR, 'results/model_0.0.1.h5'))
print('Model successfully loaded')
return loaded_model
def recognize(filepath):
model = load_model()
image = cv2.imread(filepath)
height, width, depth = image.shape
#resizing the image to find spaces better
image = cv2.resize(image, dsize=(width*5,height*4), interpolation=cv2.INTER_CUBIC)
#grayscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#binary
ret, thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY_INV)
#dilation
kernel = np.ones((5,5), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
#adding GaussianBlur
gsblur=cv2.GaussianBlur(img_dilation,(5,5),0)
cv2.imwrite('1_preprocess.png', gsblur)
#find contours
ctrs, hier = cv2.findContours(gsblur.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
m = list()
#sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
dp = image.copy()
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
height_pad = int(0.2*h)
weight_pad = int(0.2*w)
cv2.rectangle(dp,(x-weight_pad,y-height_pad),( x + w + weight_pad, y + h + height_pad ),(36,255,12), 9)
cv2.imwrite('2_contours.png', dp)
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
height_pad = int(0.2 * h)
weight_pad = int(0.2 * w)
roi = image[y-height_pad:y+h+height_pad, x-weight_pad:x+w+weight_pad]
try:
roi = cv2.resize(roi, dsize=(64,64), interpolation=cv2.INTER_CUBIC)
except:
continue
roi = cv2.cvtColor(roi,cv2.COLOR_BGR2GRAY)
roi = np.array(roi)
t = np.copy(roi)
t = t / 255.0
t = 1-t
t = t.reshape(1,64,64,1)
m.append(roi)
pred = model.predict_classes(t)
# cv2.rectangle(dp,(x-weight_pad,y-height_pad),( x + w + weight_pad, y + h + height_pad ),(36,255,12), 9)
cv2.putText(dp, characters[pred[0]] , (x, y-height_pad) , cv2.FONT_HERSHEY_SIMPLEX, 3, (90,0,255),9)
print(characters[pred[0]])
cv2.imwrite('3_recognized.png', dp)
print("recognized")
plt.imshow(dp)
plt.show()
if __name__ == '__main__':
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
recognize(file_location)
else:
print('you have to pass a path of image as a argument')
| true |
b2752ac06aab4e0f283d0c9c6cf23dc2f77df68d
|
Python
|
mir-sam-ali/SEProject2021
|
/PyStackBot/AnswerSearch/preprocessor/preprocessor.py
|
UTF-8
| 10,026 | 2.609375 | 3 |
[] |
no_license
|
import os
import re
from textblob import TextBlob
import software_tokenizer as tokenizer
import nltk
from nltk.stem import WordNetLemmatizer # used for stemming
from remove_stopwords import remove_stopwords
nltk.download('punkt',quiet=True)
nltk.download('wordnet',quiet=True)
class PreprocessPostContent(object):
"""
Contains relevant methods for Preprocessing Phase
"""
code_snippet = re.compile(r"<pre>.*?</pre>")
code_insider = re.compile(r"<code>.*?</code>")
html_tag = re.compile(r"<.*?>")
comment_bracket = re.compile(r"\(.*?\)")
quotation = re.compile(r"(\'\')|(\")|(\`\`)")
href_resource = re.compile(r"<a.*?>.*?</a>")
paragraph = re.compile(r"<p>.*?</p>")
equation1 = re.compile(r"\$.*?\$")
equation2 = re.compile(r"\$\$.*?\$\$")
integers = re.compile(r"^-?[1-9]\d*$")
floats = re.compile(r"^-?([1-9]\d*\.\d*|0\.\d*[1-9]\d*|0?\.0+|0)$")
operators = re.compile(r"[><\+\-\*/=]")
email = re.compile(r"\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*")
web_url = re.compile(r"[a-zA-z]+://[^\s]*")
punctuation = re.compile("[,!?:#$%^&*\']")
@staticmethod
def filterNILStr(s):
def filterFunc(s): return s and s.strip()
s = ' '.join(filter(filterFunc, s.split())).strip()
return s
def __init__(self):
self.max_quote_rate = 1.5
self.max_quote_diff = 5
self.min_words_sent = 5
self.min_words_paragraph = 10
self.max_number_pragraph = 5
self.num_token = "[NUM]"
self.code_token = "[CODE]"
self.max_code = 5
# lemmatize
self.lemmatizer = WordNetLemmatizer()
def remove_bracket(self, txt):
cleaned = []
for sent in TextBlob(txt).sentences:
s = sent.string
s_c = re.sub(self.comment_bracket, "", s)
s_c_words = TextBlob(s_c).words
if len(s_c_words) == 0 or len(sent.words) / len(s_c_words) > self.max_quote_rate or \
len(sent.words) - len(s_c_words) > self.max_quote_diff:
continue
cleaned.append(s_c)
return " ".join(cleaned)
def remove_quotation(self, txt):
cleaned = []
for sent in TextBlob(txt).sentences:
s_c = re.sub(self.quotation, "", sent.string)
cleaned.append(s_c)
return " ".join(cleaned)
def remove_href(self, txt):
cleaned = []
for sent in TextBlob(txt).sentences:
s = sent.string
s_c = re.sub(self.href_resource, "", s)
if sent.words != TextBlob(s_c).words:
continue
cleaned.append(s)
return " ".join(cleaned)
def remove_code(self, txt):
cleaned = re.sub(self.code_snippet, "", txt)
return cleaned
def remove_equation(self, txt):
cleaned = []
for sent in TextBlob(txt).sentences:
s = sent.string
s_c = re.sub(self.equation2, "", s)
s_c = re.sub(self.equation1, "", s_c)
if sent.words != TextBlob(s_c).words:
continue
cleaned.append(s)
return " ".join(cleaned)
def remove_numbers(self, txt):
cleaned = []
for sent in TextBlob(txt).sentences:
s_tokens = sent.string.split()
s_tokens = list(
map(lambda t: re.sub(self.floats, "", t), s_tokens))
s_tokens = map(lambda t: re.sub(self.integers, "", t), s_tokens)
s_c = " ".join(s_tokens)
if len(sent.words) - len(TextBlob(s_c).words) > self.max_number_pragraph:
continue
s_tokens = sent.string.split()
s_tokens = map(lambda t: re.sub(self.floats, " %s " %
self.num_token, t), s_tokens)
s_tokens = map(lambda t: re.sub(self.integers, " %s " %
self.num_token, t), s_tokens)
s_c = " ".join(s_tokens)
cleaned.append(s_c)
cleaned = " ".join(cleaned)
return cleaned
def remove_operators(self, txt):
cleaned = []
for sent in TextBlob(txt).sentences:
s = sent.string
s_c = re.findall(self.operators, s)
if len(s_c) > 3:
continue
cleaned.append(s)
return " ".join(cleaned)
def remove_hmtltag(self, txt):
cleaned = re.sub(self.html_tag, "", txt)
return cleaned
def remove_email(self, txt):
cleaned = []
for sent in TextBlob(txt).sentences:
s = sent.string
s_c = re.sub(self.email, "", s)
if sent.words != TextBlob(s_c).words:
continue
cleaned.append(s)
return " ".join(cleaned)
def remove_url(self, txt):
cleaned = []
for sent in TextBlob(txt).sentences:
s = sent.string
s_c = re.sub(self.web_url, "", s)
if sent.words != TextBlob(s_c).words:
continue
cleaned.append(s)
return " ".join(cleaned)
def remove_useless(self, txt):
cleaned = []
for sent in TextBlob(txt).sentences:
if len(sent.words) < self.min_words_sent:
continue
if sent[-1] not in ('.', '?', '!') and len(sent.words) < 2 * self.min_words_sent:
continue
cleaned.append(sent.string)
return " ".join(cleaned)
def remove_punctuation(self, txt):
cleaned = re.sub(self.punctuation, "", txt)
return cleaned
def __process(self, txt):
txt = self.remove_href(txt)
txt = self.remove_email(txt)
txt = self.remove_url(txt)
txt = self.remove_hmtltag(txt)
txt = self.remove_equation(txt)
txt = self.remove_bracket(txt)
txt = self.remove_numbers(txt)
txt = self.remove_operators(txt)
txt = self.remove_quotation(txt)
return txt
def getParagraphs(self, raw_txt):
raw_txt = self.filterNILStr(raw_txt)
paragraphs_candidates = re.findall(self.paragraph, raw_txt)
# paragraphs_candidates = [p[3:-4]
# for p in paragraphs_candidates if len(p[3:-4]) > 0]
paragraphs = []
for p in paragraphs_candidates:
if len(TextBlob(p).words) < self.min_words_paragraph:
continue
paragraphs.append(p)
return paragraphs
def filter_wordlist(self, wordlist):
def condition(t): return len(t) > 1 or t.upper() == 'I'
filter_list = list(filter(condition, wordlist))
return filter_list
def lemmatize(self, text):
text = [self.lemmatizer.lemmatize(token) for token in text]
return text
# [ [word1, word2, ...], [word1, word2, ...], [word1, word2, ...], ... ]
def get_mul_para_wordlist_list(self, raw_txt):
# return a list of paragraphs of plain text(word list)
raw_txt = raw_txt.lower()
txt = self.remove_code(raw_txt)
paragraphs = self.getParagraphs(txt)
wordlist_list = []
for p in paragraphs:
cleaned = self.__process(p)
if len(cleaned.split()) == 0:
continue
wordlist = self.filterNILStr(cleaned)
wordlist = tokenizer.tokenize(wordlist)
wordlist = self.filter_wordlist(wordlist)
wordlist = remove_stopwords(wordlist)
wordlist = self.lemmatize(wordlist)
wordlist_list.append(' '.join(wordlist))
return ' '.join(wordlist_list)
# [word1, word2, ...]
def get_single_para_word_list(self, raw_txt):
# return a list of plain text(word list)
# filter code
raw_txt = raw_txt.lower()
cleaned = self.__process(raw_txt)
text = self.filterNILStr(cleaned)
word_list = tokenizer.tokenize(text)
word_list = self.filter_wordlist(word_list)
word_list = remove_stopwords(word_list)
word_list = self.lemmatize(word_list)
return ' '.join(word_list)
if __name__ == '__main__':
ans = '''
<p>It is saied that Whenever<code>code</code> a problem becomes solvable by a computer, people start arguing that it does not require intelligence . </p>
<p>[CLS] "Whenever a problem becomes solvable by a computer , people start arguing that it does not require intelligence . [SEP] John McCarthy is often quoted : `` As soon as it works , no one calls it AI anymore '' ( Referenced in CACM )[SEP] ."</p>
<p>"One of my teachers in <code>jet.listen</code>college said that in the 1950 's , a professor was asked what he thought was intelligent for a machine . The professor reputedly answered that if a vending machine gave him the right change , that would be intelligent ."</p>
<p>"Later , playing chess was considered intelligent . However , computers can now defeat grandmasters at chess , and people are no longer saying that it is a form of intelligence ."</p>
<p>"Now we have OCR . It 's already stated in another answer that our methods do not have the recognition facilities of a 5 year old . As soon as this is achieved , people will say `` meh , that 's not intelligence , a 5 year old can do that ! ''"</p>
<p>"A psychological bias , a need to state that we are somehow superior to machines , is at the basis of this ."</p>
'''
answer = PreprocessPostContent().get_mul_para_wordlist_list(ans)
| true |
f68ab18c169f189297a72ee16fcd6971b922075c
|
Python
|
rafaelperazzo/programacao-web
|
/moodledata/vpl_data/3/usersdata/2/797/submittedfiles/ex1.py
|
UTF-8
| 237 | 3.703125 | 4 |
[] |
no_license
|
# -*- coding: utf-8 -*-
from __future__ import division
a = input('Digite a: ')
b = input('Digite b: ')
c = input('Digite c: ')
if a>=b and a>=c:
print('%.2f' % a)
elif b>=a and b>=c:
print('%.2f' %b)
else:
print('%.2f' % c)
| true |
76b2f0b221c3bac3e1e9848d7972b1e0a0282193
|
Python
|
kosta324/algo_and_structures_python
|
/Lesson_2/6.py
|
UTF-8
| 1,499 | 4.1875 | 4 |
[] |
no_license
|
"""
6. В программе генерируется случайное целое число от 0 до 100.
Пользователь должен его отгадать не более чем за 10 попыток. После каждой
неудачной попытки должно сообщаться больше или меньше введенное пользователем
число, чем то, что загадано. Если за 10 попыток число не отгадано,
то вывести загаданное число.
"""
from random import random
min_number = 0
max_number = 100
def random_number(min, max):
return int(random() * (max - min + 1))
def extrasens(counter=1, secret_number=None):
if secret_number is None:
return extrasens(secret_number=random_number(min_number, max_number))
if counter == 11:
return print(f'К сожалению, вы проиграли. Загаданное число: {secret_number}')
user_answer = int(input(f'Попытка №{counter}\nВведите число: '))
if user_answer == secret_number:
return print(f'Поздравляем! Вы отгадали число {secret_number} с {counter} попыток')
elif user_answer > secret_number:
print('Загаданное число меньше')
else:
print('Загаданное число больше')
counter += 1
return extrasens(counter, secret_number)
extrasens()
| true |
a4fdec93922ec50a48a7bc99c4cbe7c2647dbecd
|
Python
|
cristinasewell/mongo_db
|
/about_pymongo.py
|
UTF-8
| 1,386 | 2.90625 | 3 |
[] |
no_license
|
import pymongo
import datetime
# create a connection
conn = "mongodb://localhost:27017"
client = pymongo.MongoClient(conn)
# define the travel_db database in Mongo
db = client.travel_db
# quering all destinations
dest = db.destinations.find()
for d in dest:
print(d)
# inserting a document into the destination collection
db.destinations.insert_one(
{
"continent": "Europe",
"country": "Moldova",
"major_cities": ["Chisinau", "Stefan-Voda", "Causeni"]
}
)
# updating a document - adding an item to a document array
# db.destinations.update_one(
# {
# "country": "Moldova"
# },
# {"$push":
# {"major_cities": "Antonesti"}
# }
# )
# # deleting a field from document
# db.destinations.update_one(
# {"country": "Moldova"},
# {"$unset":
# {"major_cities": ""}
# }
# )
# deleting a document from a collection
# db.destinations.delete_one(
# {
# "country": "Moldova"
# }
# )
# A dictionary that represents the document to be inserted
post = {
"continent": "Europe",
"country": "Romania",
"major_cities": ["Bucuresti", "Cluj-Napoca", 'Iasi', 'Timisoara'],
"date": datetime.datetime.utcnow()
}
# Insert the document into the database
# The database and collection, if they don't already exist, will be created at this point.
db.destinations.insert_one(post)
| true |
42b91b8ab0d88bf62259f495d0ded3ae32f2931e
|
Python
|
JoaoGabrielDamasceno/Estudo_Python
|
/Lambdas/lambdas.py
|
UTF-8
| 445 | 4.15625 | 4 |
[] |
no_license
|
"""
Funções Lambdas são funções sem nome, ou seja, funções anônimas
FORMATO
lambda entrada: retorno
"""
exemplo = lambda x: x*3
y = input()
print(y)
print(exemplo(y))
nome_completo = lambda nome, sobrenome: nome.strip().title() + ' ' + sobrenome.strip().title()
print(nome_completo('Joao ', ' GABRIEL '))
autores = ['Joao Gabriel', 'Joao Damasceno', 'Ana Silva', 'Carlos Botelho']
autores.sort(key= lambda sobrenome: sobrenome.split()[-1])
print(autores)
| true |
68a075c81d629a6cc46a162ce2182bbbf059a39e
|
Python
|
YasirHabib/data_science_deep_learning_in_python
|
/Section5/ann_train_6.py
|
UTF-8
| 2,175 | 3.09375 | 3 |
[] |
no_license
|
# Section 5, Lecture 37
# This is extension of logistic_softmax_train_5.py
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from process_2 import get_data
def target2indicator(Target, K):
N = len(Target)
T = np.zeros((N,K))
for x in range(N):
T[x, Target[x]] = 1
return T
X, Target = get_data()
X, Target = shuffle(X, Target)
D = len(X[0]) # dimensionality of input
M = 5 # hidden layer size
K = len(set(Target)) # number of unique values in Y or number of classes
X_train = X[:-100]
Target_train = Target[:-100]
Target_train_ind = target2indicator(Target_train, K)
X_test = X[-100:]
Target_test = Target[-100:]
Target_test_ind = target2indicator(Target_test, K)
# randomly initialize weights
w1 = np.random.randn(D, M)
b1 = np.zeros(M)
w2 = np.random.randn(M, K)
b2 = np.zeros(K)
def softmax(a):
expA = np.exp(a)
return expA / expA.sum(axis = 1, keepdims = True)
def forward(X, w, b, v, c):
z = np.tanh(np.dot(X, w) + b)
A = np.dot(z, v) + c
return softmax(A), z
def predict(P_Y_given_X):
return np.argmax(P_Y_given_X, axis = 1)
def classification_rate(Y, P):
return np.mean(Y == P)
def cost(T, pY):
return -np.mean(T*np.log(pY))
learning_rate = 1e-3
epochs = 10000
train_costs = []
test_costs = []
for t in range(epochs):
pY_train, Z_train = forward(X_train, w1, b1, w2, b2)
pY_test, Z_test = forward(X_test, w1, b1, w2, b2)
c_train = cost(Target_train_ind, pY_train)
c_test = cost(Target_test_ind, pY_test)
train_costs.append(c_train)
test_costs.append(c_test)
w2 -= learning_rate * Z_train.T.dot(pY_train - Target_train_ind)
b2 -= learning_rate * (pY_train - Target_train_ind).sum(axis = 0)
dZ = (pY_train - Target_train_ind).dot(w2.T) * (1 - Z_train*Z_train)
w1 -= learning_rate * X_train.T.dot(dZ)
b1 -= learning_rate * dZ.sum(axis = 0)
if t % 1000 == 0:
print(t, c_train, c_test)
print("Final train classification_rate:", classification_rate(Target_train, predict(pY_train)))
print("Final test classification_rate:", classification_rate(Target_test, predict(pY_test)))
plt.plot(train_costs, label='train cost')
plt.plot(test_costs, label='test cost')
plt.legend()
plt.show()
| true |
4ee3e4047e91f030f039d91123fb135764fd5f5a
|
Python
|
mobeets/FOMOBot
|
/bin/next_location.py
|
UTF-8
| 967 | 2.921875 | 3 |
[] |
no_license
|
from collections import namedtuple
from random import choice
import csv
INFILE = 'locs.csv'
BIG_CITIES_FILE = 'loc_pops.csv'
RNG = 0.025
HEADER = 'locId,country,region,city,postalCode,latitude,longitude,metroCode,areaCode'
Location = namedtuple('Location', HEADER)
def location(infile=INFILE, bigfile=BIG_CITIES_FILE):
with open(bigfile, 'rb') as csvfile:
rows = list(csv.reader(csvfile))
city, state = choice(rows)
with open(infile, 'rb') as csvfile:
rows = list(csv.reader(csvfile))
for row in rows:
loc = Location(*row)
if not loc.latitude:
continue
if loc.city.lower() == city.lower():
if loc.region == state:
return loc
def bounding_box(loc, rng=RNG):
lat, lon = float(loc.latitude), float(loc.longitude)
lat_1, lat_2 = lat - rng, lat + rng
lon_1, lon_2 = lon - rng, lon + rng
return [lon_1, lat_1, lon_2, lat_2]
| true |