blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
9a9d08616301c9dd4158052c7c106351d644b4da
|
Python
|
jackadam1981/LearnPython
|
/02-数据组合/09.datatime.py
|
UTF-8
| 1,975 | 3.796875 | 4 |
[] |
no_license
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2017/12/13 15:23
# @Author : Jackadam
# @Email :
# @File : 07.date.py.py
# @Software: PyCharm
#时间戳是通用的。
import time,datetime
#当前时间:
print(datetime.datetime.now())#2017-12-13 15:25:28.892519 带毫秒的当前时间字符串
print(type(datetime.datetime.now()))#格式是datetime.datetime,不能直接写数据库,文件记录,需要str转换
now = str(datetime.datetime.now())
print(now)
print(now[:10])
print(type(now))
print('--------')
#时间戳转datetime
t=time.time() #获取当前时间戳
print(type(t))
print(t)
t=datetime.datetime.fromtimestamp(t) #把时间戳转为datetime
print(type(t))
print(t)
print(type(t.date()))#datetime的日期数据
#datetime转换时间戳
dtime = datetime.datetime.now()
ans_time = time.mktime(dtime.timetuple())
print(ans_time)
print('--------')
#自定义一个时间,最少定义3位,最多定义7位,顺序是年,月,日,时,分,秒,毫秒
d1 = datetime.datetime(1970,1,1,0,0,0,1)
print(d1)
print(type(d1))
print('--------')
# #时间加减
print(datetime.datetime.now()) # 返回 2016-08-19 12:47:03.941925
print(datetime.date.fromtimestamp(time.time())) # 时间戳直接转成日期格式 2016-08-19
print(datetime.datetime.now())
print(datetime.datetime.now() + datetime.timedelta(3)) # 当前时间+3天
print(datetime.datetime.now() + datetime.timedelta(-3)) # 当前时间-3天
print(datetime.datetime.now() + datetime.timedelta(hours=3)) # 当前时间+3小时
print(datetime.datetime.now() + datetime.timedelta(minutes=30)) # 当前时间+30分
print(datetime.datetime.now() + datetime.timedelta(seconds =3)) # 当前时间+3秒
c_time = datetime.datetime.now()
print(c_time.replace(minute=3, hour=2)) # 时间替换
#时间比较:
d1 = datetime.datetime(1970,1,1)
d2 = datetime.datetime(1981,2,16)
print( d1+datetime.timedelta(1000) > d2)
print(d1 <d2)
print(d1!=d2)
d3 = d1
print(d1 > d2)
| true |
1c3a64d045ffd4738dae4bbe9f697e2a7f022250
|
Python
|
hoyeoness9837/Algorithm-Sources
|
/problems/greedy/백준문제/15_보석도둑.py
|
UTF-8
| 2,332 | 3.421875 | 3 |
[] |
no_license
|
# 세계적인 도둑 상덕이는 보석점을 털기로 결심했다.
# 상덕이가 털 보석점에는 보석이 총 N개 있다. 각 보석은 무게 Mi와 가격 Vi를 가지고 있다. 상덕이는 가방을 K개 가지고 있고, 각 가방에 담을 수 있는 최대 무게는 Ci이다. 가방에는 최대 한 개의 보석만 넣을 수 있다.
# 상덕이가 훔칠 수 있는 보석의 최대 가격을 구하는 프로그램을 작성하시오.
# 첫째 줄에 N과 K가 주어진다. (1 ≤ N, K ≤ 300,000)
# 다음 N개 줄에는 각 보석의 정보 Mi와 Vi가 주어진다. (0 ≤ Mi, Vi ≤ 1,000,000)
# 다음 K개 줄에는 가방에 담을 수 있는 최대 무게 Ci가 주어진다. (1 ≤ Ci ≤ 100,000,000)
# 첫째 줄에 상덕이가 훔칠 수 있는 보석 가격의 합의 최댓값을 출력한다.
# example) 3 2 | 1 65 | 5 23 | 2 99 | 10 | 2 --> 164
#my answer# timeout.
N, K = map(int, input().split())
gem = []
for i in range(N):
gem.append(list(map(int, input().split())))
gem = sorted(gem, key = lambda x: x[1], reverse = True)
limit = []
for i in range(K):
limit.append(list(map(int, input().split())))
count = 0
value = 0
for i in range(N):
for bag in limit:
if count == K:
break
elif gem[i] > bag:
continue
else:
value += gem[i][1]
count += 1
print(value)
###ANSWER###
import heapq
import sys
input = lambda: sys.stdin.readline().strip()
N, K = map(int, input().split())
gem = []
for i in range(N):
M, V = list(map(int, input().split()))
heapq.heappush(gem, [M, V]) #M 이 작은 순으로 힙이 구성이됨.
bags = [int(input()) for i in range(K)]
bags.sort() #가방에 담을수 있는 최대무게 오름순으로 정렬
value = 0
p = []
for i in range(K):
capacity = bags[i]
while gem and capacity >= gem[0][0]: # 가방에 담을수 있는 gem에있는 모든 보석에대해
[M, V] = heapq.heappop(gem) #힙에서 빼내어
heapq.heappush(p, -V) # -V 가 작은순, 즉, V가 큰 순으로 p에 넣어줌.
if p: #만약 넣을수 있는 보석이 p에 있는 경우.
value -= heapq.heappop(p) # 힙에서 빼내어 -V로 나오므로 -해서 더해줌.
elif not gem: #만약 더이상 가방에 담을수 있는 gem이 없는경우.
break #종료
print(value)
| true |
5b0574e8e2c82eeb4d53b8636b89e86891bd8b80
|
Python
|
chonpsk/38cmSK_C-34
|
/proxyPool/proxyPool/db.py
|
UTF-8
| 2,151 | 3.0625 | 3 |
[
"Apache-2.0"
] |
permissive
|
"""
-------------------------------------------------
File Name: db.py
Description: 数据库操作模块,负责对象与底层数据库
的交互。
Author: Liu
Edited by: chonps
-------------------------------------------------
"""
import redis
import random
from .error import PoolEmptyError
from .setting import HOST, PORT
import datetime
def get_date():
return (datetime.datetime.utcnow() + datetime.timedelta(hours=9)).day
class RedisClient(object):
"""
Redis数据库操作类。
"""
def __init__(self, host=HOST, port=PORT):
self.__db = redis.Redis(host, port)
def get(self, count=1):
"""从Pool中获取一定量数据。"""
proxies = self.__db.lrange("proxies", 0, count - 1)
self.__db.ltrim("proxies", count, -1)
return proxies
def remove(self, proxy):
# self.__db.srem("proxy_set", proxy)
self.__db.lrem("proxies", 0, proxy)
def put(self, proxy):
"""将代理压入Pool中。
用Redis的set容器来负责去重,如果proxy能被压入proxy_set,
就将其放入proxy pool中,否则不压入。
"""
if self.__db.sadd("proxy_set", proxy + '_' + str(get_date())):
self.__db.rpush("proxies", proxy)
else:
pass
def put_many(self, proxies):
"""将一定量的代理压入Pool。
"""
for proxy in proxies:
self.put(proxy)
def choice(self):
if self.__db.llen("proxies") == 0:
return ''
else:
return self.__db.lindex("proxies", random.randint(0, self.__db.llen("proxies") - 1))
def pop(self):
"""弹出一个可用代理。
"""
try:
return self.__db.blpop("proxies", 30)[0].decode('utf-8')
except:
raise PoolEmptyError
@property
def queue_len(self):
"""获取proxy pool的大小。
"""
return self.__db.llen("proxies")
def flush(self):
"""刷新Redis中的全部内容,测试用。
"""
self.__db.flushall()
| true |
e1843eb40501357f8e37205d2f6c200a2d16b45e
|
Python
|
caioraveli/Python2021
|
/02-Intermediate/34-decoradores02.py
|
UTF-8
| 623 | 4.03125 | 4 |
[] |
no_license
|
# Decoradores pode ser utilizada para adicionar outras funcionalidades a função que será decorada
# Pode ser usada para verificar o tempo que a função decorada leva pra ser executada
from time import time,sleep
def velocidade(funcao):
def interna(*args,**kwargs):
start_time = time()
resultado = funcao(*args,**kwargs)
end_time = time()
tempo = (end_time - start_time) * 1000
print(f'A função {funcao.__name__} levou {tempo:.2f}ms para ser executada')
return interna
@velocidade
def demora():
for i in range(10000):
print(i,end='')
print()
demora()
| true |
bf3e0f0137f928f1d324d02de07953303a286240
|
Python
|
gyrospectre/securitybot
|
/securitybot/tasker.py
|
UTF-8
| 4,551 | 2.96875 | 3 |
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
'''
A system for retrieving and assigning tasks for the bot as well as updating
their statuses once acted up. This file contains two abstract classes,
Tasker and Task, which define a class to manage tasks and a task class
respectively.
'''
__author__ = 'Alex Bertsch, Antoine Cardon'
__email__ = 'abertsch@dropbox.com, antoine.cardon@algolia.com'
import pytz
import logging
from enum import Enum, unique
from typing import List
@unique
class StatusLevel(Enum):
# Task status levels
OPEN = 0 # New task
INPROGRESS = 1 # User has been told
VERIFICATION = 2 # User has said it was them, we're verifying
DONE = 3 # User has said they didn't do it, they didn't
# respond at all, or they said yes and we verified
class Task(object):
def __init__(self, hsh, title, username, reason, description,
url, event_time, performed, comment, authenticated,
status, dbclient):
# type: (str, str, str, str, str, bool, str, bool, int) -> None
'''
Creates a new Task for an alert that should go to `username` and is
currently set to `status`.
Args:
title (str): The title of this task.
username (str): The user who should be alerted from the Task.
reason (str): The reason that the alert was fired.
description (str): A description of the alert in question.
url (str): A URL in which more information can be found about the
alert itself, not the Task.
performed (bool): Whether or not the user performed the action that
caused this alert.
comment (str): The user's comment on why the action occured.
authenticated (bool): Whether 2FA has suceeded.
status (enum): See `STATUS_LEVELS` from above.
'''
self.title = title
self.username = username
self.reason = reason
self.description = description
self.url = url
self.event_time = event_time.astimezone(pytz.UTC)
self.performed = performed
self.comment = comment
self.authenticated = authenticated
self.status = status
self._dbclient = dbclient
self.hash = hsh
def _set_status(self, status):
# type: (int) -> None
'''
Sets the status of a task in the DB.
Args:
status (int): The new status to use.
'''
self._dbclient.execute('set_status', (status, self.hash))
def _set_response(self):
# type: () -> None
'''
Updates the user response for this task.
'''
self._dbclient.execute(
'set_response',
(
self.comment,
self.performed,
self.authenticated,
self.hash
)
)
def set_open(self):
self._set_status(StatusLevel.OPEN.value)
def set_in_progress(self):
self._set_status(StatusLevel.INPROGRESS.value)
def set_verifying(self):
self._set_status(StatusLevel.VERIFICATION.value)
self._set_response()
def finalise(self):
self._set_status(StatusLevel.DONE.value)
logging.debug('Deleting task {} from database.'.format(
self.hash)
)
self._dbclient.execute(
'delete_alert', (self.hash, )
)
self._dbclient.execute(
'delete_alert_status', (self.hash, )
)
self._dbclient.execute(
'delete_user_response', (self.hash, )
)
class Tasker(object):
'''
A simple class to retrieve tasks on which the bot should act upon.
'''
def __init__(self, dbclient):
self._dbclient = dbclient
def _get_tasks(self, level) -> List[Task]:
# type: (int) -> List[Task]
'''
Gets all tasks of a certain level.
Args:
level (int): One of StatusLevel
Returns:
List of SQLTasks.
'''
alerts = self._dbclient.execute('get_alerts', (level,))
return [Task(*alert, dbclient=self._dbclient) for alert in alerts]
def get_new_tasks(self):
# type: () -> List[Task]
return self._get_tasks(StatusLevel.OPEN.value)
def get_active_tasks(self):
# type: () -> List[Task]
return self._get_tasks(StatusLevel.INPROGRESS.value)
def get_pending_tasks(self):
# type: () -> List[Task]
return self._get_tasks(StatusLevel.VERIFICATION.value)
| true |
dbe14a7073f6bac1774a83af13c26bb65819cb05
|
Python
|
Akumatic/ViPLab-Backend-C-CPP-Module
|
/example_run.py
|
UTF-8
| 807 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
import os, sys, c, dataObjects
# load exercise and solution data in json format
cur_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
exercise_data_path = os.path.join(cur_dir, "examples", "exercise.json")
solution_data_path = os.path.join(cur_dir, "examples", "solution.json")
exercise_data = dataObjects.readJson(exercise_data_path)
solution_data = dataObjects.readJson(solution_data_path)
# create exercise and solution objects
exercise = dataObjects.Exercise(exercise_data)
solution = dataObjects.Solution(solution_data, exercise)
# optional configuration, e.g. for timeout during running
cfg = {"timelimitInSeconds": 15}
# create C module object, process data and store result data in json format
module = c.C(solution, cfg)
module.processData()
result = module.result.createJson()
print(result)
| true |
c777e00bef9aea52df6b0ceaf9549496cc9bd5c7
|
Python
|
viing937/leetcode
|
/algorithms/python/449.py
|
UTF-8
| 1,232 | 3.625 | 4 |
[
"MIT"
] |
permissive
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from bisect import bisect
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
data = []
if not root: return ''
def dfs(node):
data.append(node.val)
if node.left: dfs(node.left)
if node.right: dfs(node.right)
dfs(root)
return ','.join(map(str, data))
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if not data: return None
def helper(data):
if not data: return None
root = TreeNode(data[0])
bound = bisect(data, data[0], lo=1)
root.left = helper(data[1:bound])
root.right = helper(data[bound:])
return root
data = list(map(int, data.split(',')))
return helper(data)
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| true |
4c12bddd4cf082aa4b3cb9291a02d223d1d57802
|
Python
|
jorgesfj/Jorge-Soares
|
/P1ELP1/funcoes_recursivas/decimal_binario..py
|
UTF-8
| 107 | 3.4375 | 3 |
[] |
no_license
|
def binario(n):
resto = n%2
n = int(n/2)
if n>0:
binario(n)
print(resto)
n = int(input())
binario(n)
| true |
5eb14fd642f7c14762fb6ee2a15dc4af8d37eb2b
|
Python
|
liyong6351/python_sbs
|
/shell/10第十章file/json/1json.py
|
UTF-8
| 398 | 3.09375 | 3 |
[] |
no_license
|
import json
numbers=[1,2,3,4,5,6,7,8]
number1=[]
try:
with open("data/json.txt","w") as js:
json.dump(numbers,js)
except FileNotFoundError as fn:
print(str(fn))
else:
print("Greate !")
print(number1)
try:
with open("data/json.txt") as js:
number1 = json.load(js)
except FileNotFoundError as fn:
print(str(fn))
else:
print("Greate !")
print(number1)
| true |
d60d96a95beed4f866e5b8a1968454bf003309ea
|
Python
|
sandhya563/codechef
|
/multi_list.py
|
UTF-8
| 124 | 2.796875 | 3 |
[] |
no_license
|
# list=[2,3,4,5,6]
# i=0
# multi=1
# while i<len(list):
# a=list[i]
# multi=multi*list[i]
# i=i+1
# print(multi)
| true |
f5c3c89557b3eb14c50d10bfff8fd7f2d79457ae
|
Python
|
PSahithi2k/OMR-SCANNER-APPLICATION
|
/models/AnswersModel.py
|
UTF-8
| 519 | 2.5625 | 3 |
[] |
no_license
|
from lib.db import *
class AnswersModel:
def __init__(self):
self.conn = connect('app.db')
def createAnswer(self, subject, typeOfExam, className, answerskey):
query = f"INSERT INTO keysheet (subject, typeOfExam, answerskey, className) VALUES ('{subject}','{typeOfExam}','{answerskey}', '{className}')"
try:
insert(self.conn,query)
print("done")
return 1
except:
print("Some database error")
return 0
| true |
09e9979ae87fdc888adb312f9046d5fa076455f3
|
Python
|
antoinemaz/CARI
|
/controllers/listeProjets.py
|
UTF-8
| 2,857 | 2.546875 | 3 |
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
# coding: utf8
# page listant les projets
@auth.requires_login()
def projets():
# tableau contenant tous les projets
# attention : la requete (premier argument de la méthode grid, contient la requete qui va lister tous les projets, EN FONCTION DU ROLE
# DU USER CONNECTE : demandeur --> tous les dossiers qu'il a créé, représentant --> tous les dossiers de son entité, président --> tous
# les dossiers
gridProjets = SQLFORM.grid(projetService.getQueryOfDossier(session), fields=[db.dossier.id, db.dossier.intitule, db.dossier.porteur_id, db.dossier.etat_dossier_id, db.dossier.entite_id, db.dossier.date_dossier], orderby=[~db.dossier.date_dossier], searchable=False, csv=False, ui="jquery-ui",user_signature=False, links_in_grid=True, details=False, create=False, deletable=False, editable=False, links=[lambda row:A("Détail", _href=URL("projet", "addProjet", vars=dict(idDossier=row.id)))])
return locals()
# page intégrée à la page listant les projets : recherche de dossier par son id
@auth.requires_login()
def rechercheDossier():
recherche = False
idDossier = None
rowTrouve = False
intOk = True
# création d'un forumulaire de recherche d'un dossier par son id
formRecherche = FORM(DIV('Numéro de dossier : ',_class="center"), INPUT(_name='numDossier',_class="text"), BR(),INPUT(_type='submit', _value="Rechercher"),_class="center", _id="formRecherche")
# si une rechercher a été faite
if formRecherche.accepts(request,session):
recherche = True
response.flash = 'form accepted'
# récupération de l'id du dossier tapé dans le champ de recherche
try:
idDossier = int(formRecherche.vars.numDossier)
except ValueError:
# parsing impossible en int : le user n'a pas tapé de chiffre dans le champ !
intOk = False
# l'id du dossier n'est pas null et c'est un integer
if formRecherche.vars.numDossier != None and intOk ==True:
# on récupère le dossier par son id
row = projetService.getDossierById(formRecherche.vars.numDossier)
# s'il a été trouvé (il existe bien), rowTrouve va fait apparaitre un message dans la vue pour dire qu'un dossier
#a bien été trouvé, accompagné de l'url de détail vers ce dossier
if row != None:
rowTrouve = True
elif formRecherche.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill out the form'
return locals()
| true |
2037eb85f3c38962bab1ed2bc653ba7557076141
|
Python
|
ihrigb/stagebuzzer
|
/usbdrive/_osfile.py
|
UTF-8
| 902 | 3.3125 | 3 |
[
"Apache-2.0"
] |
permissive
|
from ._file import File
from os import listdir, sep
from os.path import isdir, exists, dirname
class OsFile(File):
def __init__(self, path: str):
self._path = path
def is_directory(self) -> bool:
return isdir(self._path)
def get_absolute_path(self):
return self._path
def get_name(self):
return self._path.rsplit(sep, 1)[-1]
def get_children(self, extension: str = None) -> list:
if not self.is_directory():
return list()
paths = listdir(self._path)
paths = map(lambda p: "{}{}{}".format(self._path, sep, p), paths)
if extension is not None:
paths = filter(lambda p: isdir(p) or p.endswith(extension), paths)
return [OsFile(path) for path in paths]
def exists(self) -> bool:
return exists(self._path)
def parent(self):
return OsFile(dirname(self._path))
| true |
a03844d3cec418d29ebe6ecb4edb3bf2e42fec45
|
Python
|
ecccoyao/CNN
|
/make_sample_data.py
|
UTF-8
| 292 | 2.9375 | 3 |
[] |
no_license
|
input_file = open('filter_data', 'r')
output_file1 = open('sample2.txt','w')
with input_file as f:
lines = f.readlines()
count = 0;
for line in lines:
count += 1
if count % 2 == 0:
output_file1.write(line)
input_file.close()
output_file1.close()
| true |
7dd0c9dc79ea13be4de7fd952310521b0b772d8f
|
Python
|
XingyuLu2/Robotics_Navigation
|
/IMU_Sensor_Analysis/scripts/IMU_Driver.py
|
UTF-8
| 3,633 | 2.609375 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import serial
import utm
import numpy as np
from sensor_msgs.msg import Imu, MagneticField
def cov_to_quat(yaw, pitch, roll):
qx = np.sin(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) - np.cos(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
qy = np.cos(roll/2) * np.sin(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.cos(pitch/2) * np.sin(yaw/2)
qz = np.cos(roll/2) * np.cos(pitch/2) * np.sin(yaw/2) - np.sin(roll/2) * np.sin(pitch/2) * np.cos(yaw/2)
qw = np.cos(roll/2) * np.cos(pitch/2) * np.cos(yaw/2) + np.sin(roll/2) * np.sin(pitch/2) * np.sin(yaw/2)
return [qx, qy, qz, qw]
if __name__ == '__main__':
#data_example = "$GPGGA,134658.00,5106.9792,N,11402.3003,W,2,09,1.0,1048.47,M,-16.27,M,08,AAAA*60"
SENSOR_NAME = "imu_sensor"
# confgure the Port parameters
rospy.init_node('data_imu_sensor')
serial_port = rospy.get_param('~port','/dev/ttyUSB0')
serial_baud = rospy.get_param('~baudrate',115200)
# initialize the port
port = serial.Serial(serial_port, serial_baud)
imu_pub = rospy.Publisher(SENSOR_NAME+'/imu', Imu, queue_size = 10)
mag_pub = rospy.Publisher(SENSOR_NAME+'/mag', MagneticField, queue_size = 10)
try:
while not rospy.is_shutdown():
imu_data = port.readline()
# get the $GPGGA format data message
if imu_data[0:6] == "$VNYMR":
# get the data we want bu cutting the header and check num
# and check whether it is complete data
if len(imu_data) < 122:
print('No complete data collected !')
continue
print(imu_data)
imu_data = imu_data[7:117]
split_data = imu_data.split(",")
imu_msg = Imu()
mag_msg = MagneticField()
# magnetometer data: split_data[4:6]
MagX = float(split_data[3])
MagY = float(split_data[4])
MagZ = float(split_data[5])
Mag_vector = [MagX, MagY, MagZ]
# Convert the Yaw/Pitch/Roll data ino Quaternion
yaw = float(split_data[0]) * 3.1415926 / 180
pitch = float(split_data[1]) * 3.1415926 / 180
roll = float(split_data[2]) * 3.1415926 / 180
quater = cov_to_quat(yaw, pitch, roll)
# accelerator data: split_data[7:9]
AccX = float(split_data[6])
AccY = float(split_data[7])
AccZ = float(split_data[8])
# Gyro(angular rates) data: split_data[10:12]
GyroX = float(split_data[9])
GyroY = float(split_data[10])
GyroZ = float(split_data[11])
# Give data to Imu message
imu_msg.header.stamp = rospy.Time.now()
imu_msg.orientation.x = quater[0]
imu_msg.orientation.y = quater[1]
imu_msg.orientation.z = quater[2]
imu_msg.orientation.w = quater[3]
imu_msg.linear_acceleration.x = AccX
imu_msg.linear_acceleration.y = AccY
imu_msg.linear_acceleration.z = AccZ
imu_msg.angular_velocity.x = GyroX
imu_msg.angular_velocity.y = GyroY
imu_msg.angular_velocity.z = GyroZ
# Give data to MagneticField Messages
mag_msg.magnetic_field.x = MagX
mag_msg.magnetic_field.y = MagY
mag_msg.magnetic_field.z = MagZ
# Publish two messages
imu_pub.publish(imu_msg)
mag_pub.publish(mag_msg)
rospy.sleep(0.01)
except rospy.ROSInterruptException:
port.close()
except serial.serialutil.SerialException:
rospy.loginfo("Shutting down paro_depth node...")
| true |
d0cbd8c6594deeaa0ee2469abdaa684372406b72
|
Python
|
shreyaslad/imgmanipulate
|
/common.py
|
UTF-8
| 1,696 | 2.984375 | 3 |
[] |
no_license
|
'''
common.py
Common classes and functions for this project
'''
import enum
def hexToRGB(value):
value = value.strip("0x");
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
# this text object prevents the functions from becoming too repetitive
# TODO: implement text decoration
class Text(object):
def __init__(self, content, style, color, size, decoration):
self.content = content
self.style = style
self.color = color
self.size = size
self.decoration = decoration
class Position(enum.Enum):
TOP_LEFT = 0
TOP_RIGHT = 1
BOTTOM_LEFT = 2
BOTTOM_RIGHT = 3
CENTER = 4
TOP_CENTER = 5
BOTTOM_CENTER = 6
class Decoration(enum.Enum):
NONE = 0
UNDERLINE = 1
BOX = 2
class ImageSize(enum.Enum):
NORESIZE = 0
EXTRA_SMALL = 1
SMALL = 2
MEDIUM = 3
LARGE = 4
EXTRA_LARGE = 5
class TextSize(enum.Enum):
NORMAL = 20
HEADING2 = 45
HEADING1 = 50
SUBTITLE = 80
TITLE = 100
# we can get the value like so: Font.REGULAR.value
class Font(enum.Enum):
REGULAR = 0
ITALIC = 1
SEMIBOLD = 2
BOLD = 3
LIGHT = 4
MEDIUM = 5
THIN = 6
SEMIBOLD_ITALIC = 7
BOLD_ITALIC = 8
LIGHT_ITALIC = 9
MEDIUM_ITALC = 10
THIN_ITALIC = 11
# this one is a bit crusty
# numbers will be converted to hex and then to rgb
class Color(enum.Enum):
# TODO: add transparency
TRANSPARENT = -1
# TODO: detect colors in the image and use those
PRIMARY = -2
SECONDARY = -3
TERTIARY = -4
# TODO: add analagous colors for primary, secondary, and tertiary
BLACK = 0
WHITE = 14540253
RED = 16326688
LIGHTRED = 16539206
LIGHTYELLOW = 16562502
ORANGE = 16740397
LIGHTBLUE = 7973329
LIGHTCYAN = 7983564
LIGHTPURPLE = 7962065
MAGENTA = 16533129
| true |
fbd06d3766e0ca4fbc3b1088d3446e07502f323f
|
Python
|
udayinbiswas/Classification-Analysis
|
/q2e.py
|
UTF-8
| 669 | 2.6875 | 3 |
[] |
no_license
|
import numpy as np
import math
import re
import csv
import random
import sys
testlabel=[]
predictedlabel=[]
with open('mnist/test.csv',newline='') as csvfileX:
reader = csv.reader(csvfileX)
for row in reader:
testlabel.append(int(row[-1]))
file = open('A2SampleInputOutputFiles/q23output.txt', 'r')
for line in file:
predictedlabel.append(int(line))
accurate=0
count=0
for a,b in zip(testlabel,predictedlabel):
if a==b:
accurate+=1
count+=1
print('Confusion matrix')
counter=0
accurate=0
confusion_matrix = np.zeros((10,10))
for a,b in zip(testlabel,predictedlabel):
confusion_matrix[b][a]+=1
for i in range(0,10):
print(confusion_matrix[i])
| true |
e3b23843c6531f69939db92d0f0eaa164b21adae
|
Python
|
huangqiank/Algorithm
|
/laiclass/hashmap/char_remove.py
|
UTF-8
| 216 | 3.0625 | 3 |
[] |
no_license
|
'''
Created on Oct 1, 2017
@author: qiankunhuang
'''
def remove(A):
lst=[]
for fast in A:
if fast not in ["u","n"]:
lst.append(fast)
return "".join(lst)
A="aunsdad"
print(remove(A))
| true |
489cc139273f5effb2c39a13360d6555e66ab059
|
Python
|
kwokmoonho/Machine-Learning-SP500
|
/stock.py
|
UTF-8
| 7,218 | 3.234375 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 14:21:12 2020
@author: kwokmoonho
Stock prediction by using LSTM, KNN, and linear regression
- Using python and different machine learning algorithms to conduct predictions on the S & P 500 index. (LSTM, KNN, Linear Regression)
- Implementing the library stocker from python and compare the result.
Hypothesis:
My hypothesis is that the first and last days of the week could potentially affect the closing price of the stock more than the other days.
"""
#import library
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn import neighbors
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from fastai.tabular import add_datepart
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 20,10
scaler = MinMaxScaler(feature_range=(0, 1))
pd.options.mode.chained_assignment = None # default='warn'
#reading data
df = pd.read_csv('sp500.csv')
#overlook the data
df.head()
#setting index as date
df['Date'] = pd.to_datetime(df.Date,format='%Y-%m-%d')
df.index = df['Date']
#plot
plt.figure(figsize=(16,8))
plt.plot(df['Close'], label='Close Price history')
plt.xlabel("Years")
plt.title("Date overview")
plt.ylabel("S&P500 index")
plt.show()
"""
Linear Regression
"""
#sorting
data = df.sort_index(ascending=True, axis=0)
#creating a separate dataset
new_data = pd.DataFrame(index=range(0,len(df)),columns=['Date', 'Close'])
for i in range(0,len(data)):
new_data['Date'][i] = data['Date'][i]
new_data['Close'][i] = data['Close'][i]
#create features
add_datepart(new_data, 'Date')
new_data.drop('Elapsed', axis=1, inplace=True) #elapsed will be the time stamp
#split into train and validation
train = new_data[:7080]
valid = new_data[7080:]
x_train = train.drop('Close', axis=1)
y_train = train['Close']
x_valid = valid.drop('Close', axis=1)
y_valid = valid['Close']
#implement linear regression
model = LinearRegression()
model.fit(x_train,y_train)
#make predictions and find the rmse
preds = model.predict(x_valid)
rms=np.sqrt(np.mean(np.power((np.array(y_valid)-np.array(preds)),2)))
print(rms)
print("It is not a good fit by using linear regression")
#plot
#add a predictions column
valid['Predictions'] = 0
valid['Predictions'] = preds
valid.index = df[7080:].index
train.index = df[:7080].index
plt.figure(figsize=(16,8))
plt.plot(train['Close'], label='Train Data')
plt.plot(valid['Close'], label='Test Data')
plt.plot(valid['Predictions'], label='Prediction')
plt.ylabel("S&P500 index")
plt.xlabel("Years")
plt.title("S&P500 Linear Regression")
plt.legend(title='Parameter where:')
plt.show()
"""
KNN
"""
x_train_scaled = scaler.fit_transform(x_train)
x_train = pd.DataFrame(x_train_scaled)
x_valid_scaled = scaler.fit_transform(x_valid)
x_valid = pd.DataFrame(x_valid_scaled)
#using gridsearch to find the best parameter
params = {'n_neighbors':[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]}
knn = neighbors.KNeighborsRegressor()
model = GridSearchCV(knn, params, cv=5)
#fit the model and make predictions
model.fit(x_train,y_train)
preds = model.predict(x_valid)
rms=np.sqrt(np.mean(np.power((np.array(y_valid)-np.array(preds)),2)))
print(rms)
#plot
valid['Predictions'] = 0
valid['Predictions'] = preds
plt.figure(figsize=(16,8))
plt.plot(train['Close'], label='Train Data')
plt.plot(valid['Close'], label='Test Data')
plt.plot(valid['Predictions'], label='Prediction')
plt.ylabel("S&P500 index")
plt.xlabel("Years")
plt.title("S&P500 KNN")
plt.legend(title='Parameter where:')
plt.show()
"""
LSTM
"""
#creating dataframe
data = df.sort_index(ascending=True, axis=0)
new_data = pd.DataFrame(index=range(0,len(df)),columns=['Date', 'Close'])
for i in range(0,len(data)):
new_data['Date'][i] = data['Date'][i]
new_data['Close'][i] = data['Close'][i]
#setting index
new_data.index = new_data.Date
new_data.drop('Date', axis=1, inplace=True)
#creating train and test sets
dataset = new_data.values
train = dataset[0:7080,:]
valid = dataset[7080:,:]
#converting dataset into x_train and y_train
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
x_train, y_train = [], []
for i in range(60,len(train)):
x_train.append(scaled_data[i-60:i,0])
y_train.append(scaled_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))
#check for best units
myRMS = []
for p in range (40,60):
model = Sequential()
model.add(LSTM(units=p, return_sequences=True, input_shape=(x_train.shape[1],1)))
model.add(LSTM(units=p))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, epochs=10, batch_size=32, verbose=2)
#predicting values, using past 60 from the train data
inputs = new_data[len(new_data) - len(valid) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(60,inputs.shape[0]):
X_test.append(inputs[i-60:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
closing_price = model.predict(X_test)
closing_price = scaler.inverse_transform(closing_price)
rms=np.sqrt(np.mean(np.power((valid-closing_price),2)))
myRMS.append(rms)
print(rms)
print("Dimensionality of the output space for different units values:")
for i in range (len(myRMS)):
print("units = {} , rms = {}".format(40+i,myRMS[i]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(units=57, return_sequences=True, input_shape=(x_train.shape[1],1)))
model.add(LSTM(units=57))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, epochs=10, batch_size=32, verbose=2)
#predicting values, using past 60 from the train data
inputs = new_data[len(new_data) - len(valid) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(60,inputs.shape[0]):
X_test.append(inputs[i-60:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
closing_price = model.predict(X_test)
closing_price = scaler.inverse_transform(closing_price)
rms=np.sqrt(np.mean(np.power((valid-closing_price),2)))
myRMS.append(rms)
print(rms)
#plotting
train = new_data[:7080]
valid = new_data[7080:]
valid['Predictions'] = closing_price
plt.figure(figsize=(16,8))
plt.plot(train['Close'], label='Train Data')
plt.plot(valid['Close'], label='Test Data')
plt.plot(valid['Predictions'], label='Prediction')
plt.xlabel("Years")
plt.ylabel("S&P500 index")
plt.title("S&P500 LSTM")
plt.legend(title='Parameter where:')
plt.show()
#zoom in
plt.figure(figsize=(16,8))
plt.plot(valid['Close'], label='Test Data')
plt.plot(valid['Predictions'], label='Prediction')
plt.xlabel("Years")
plt.ylabel("S&P500 index")
plt.title("Zoom in the test result")
plt.legend(title='Parameter where:')
plt.show()
| true |
f16644124c9810718b988fa4fa815f37b988f6cb
|
Python
|
darsovit/AdventOfCode2018
|
/Day07/Day07.5.py
|
UTF-8
| 2,437 | 3.25 | 3 |
[] |
no_license
|
#!python
'''
Advent of Code 2018, Day 7, 1st Solution
https://adventofcode.com/2018/day/7
'''
def readInputGraph():
graph = {}
graph['edges'] = []
graph['nodes'] = set()
with open('input.txt') as f:
for line in f:
parts = line.split(' ')
startNode = parts[1]
endNode = parts[7]
graph['edges'] += [(startNode,endNode)]
if startNode not in graph['nodes']:
graph['nodes'].add(startNode)
if endNode not in graph['nodes']:
graph['nodes'].add(endNode)
return graph
def buildRunnableGraph( graph ):
graph['runnable'] = set()
for node in graph['nodes']:
graph['runnable'].add(node)
graph['dependencies'] = {}
graph['dependees'] = {}
for edge in graph['edges']:
if edge[1] in graph['runnable']:
graph['runnable'].remove(edge[1])
if edge[1] not in graph['dependencies']:
graph['dependencies'][edge[1]] = set()
graph['dependencies'][edge[1]].add(edge[0])
if edge[0] not in graph['dependees']:
graph['dependees'][edge[0]] = set()
graph['dependees'][edge[0]].add(edge[1])
return graph
def runGraph( numWorkers, graph ):
runOrder = []
ticks = 0
jobsProgressing = {}
while len( graph['runnable'] ) > 0 or len(jobsProgressing) > 0:
runnableList = list(graph['runnable'])
runnableList.sort()
while len(jobsProgressing) < numWorkers and len(runnableList) > 0:
nodeToRun = runnableList.pop(0)
jobsProgressing[nodeToRun] = ord(nodeToRun) - ord('A') + 61
graph['runnable'].remove(nodeToRun)
ticks += 1
jobsToRemove = []
for job in jobsProgressing:
jobsProgressing[job] -= 1
if jobsProgressing[job] == 0:
jobsToRemove += [job]
if job in graph['dependees']:
for nodeMaybeRunnable in graph['dependees'][job]:
graph['dependencies'][nodeMaybeRunnable].remove(job)
if len(graph['dependencies'][nodeMaybeRunnable]) == 0:
graph['runnable'].add( nodeMaybeRunnable )
for job in jobsToRemove:
del jobsProgressing[job]
return ticks
graph = readInputGraph()
graph = buildRunnableGraph( graph )
print( runGraph(5, graph) )
| true |
7db4153c3cc4e8988d83bf05e3761d75f7b79215
|
Python
|
dafna972/Cracking-The-Coding-Interview
|
/Chapter-2/Node.py
|
UTF-8
| 379 | 3.9375 | 4 |
[] |
no_license
|
class Node:
def __init__(self, next=None, data = None):
self.next = next
self.data = data
def __str__(self):
string = str(self.data)
linked_list = self
while (linked_list.next != None):
string += " -> "
string += str(linked_list.next.data)
linked_list = linked_list.next
return string
| true |
11b73877908496f60c4d112ef861006acd20bda2
|
Python
|
msecher/scripts_python_3_opentelemac_r14499
|
/scripts/python3/data_manip/computation/polyline_integrals.py
|
UTF-8
| 7,324 | 3.25 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
"""@author TELEMAC-MASCARET Consortium
@brief
Compute 2D integrals over time across polygonal chains
@details
Based on PyTelTools that was originally written by Luc Duron (CNR).
The extraction has to be done with the dedicated library.
The integration is performed here.
Can be used to compute:
- the area of the wet section along a polygonal chain
- the flowrate through a polygonal chain
- the sediment flowrate through a polygonal chain (total load, bedload
only or suspension load)
- the flux of any tracer through a polygonal chain
"""
# _____ ___________________________________________________
# ____/ Imports /__________________________________________________/
#
# ~~> dependencies towards other modules
from utils.geometry import get_norm2
from utils.exceptions import TelemacException
import numpy as np
# _____ ________________________________________________
# ____/ MAIN CALL /_______________________________________________/
#
def compute_segments_lengthes(polyline_coords):
"""
Compute the length of each segment of the polygonal chain,
and store it in the second point of each segment (the
first point of the polygonal chain gets a zero length).
@param polyline_coords: coordinates of the polygonal chain
"""
length = []
first_point = None
for x, y in polyline_coords:
if first_point is None: # the first point doesn't have a length
first_point = (x, y)
length.append(0)
else:
second_point = (x, y)
length.append(get_norm2(first_point, second_point))
first_point = second_point
return length
def compute_segments_normals(polyline_coords):
"""
Compute the normal to each segment of the polygonal chain
and store it in the second point of each segment (the
first point of the polygonal chain gets a zero normal).
@param polyline_coords: coordinates of the polygonal chain
"""
normals = []
prev_x, prev_y = None, None
for x, y in polyline_coords:
if prev_x is None: # the first point doesn't have a normal vector
prev_x, prev_y = x, y
normals.append([0, 0])
else:
normal_length = get_norm2((x, y), (prev_x, prev_y))
if normal_length < 10e-10:
raise TelemacException("The normal "\
"length is too small, check your mesh and polyline\n")
normals.append([y-prev_y, prev_x-x]/normal_length)
return normals
def compute_segments_tangents(polyline_coords):
"""
Compute the tangents to each segment of the polygonal chain
and store it in the second point of each segment (the
first point of the polygonal chain gets a zero normal).
@param polyline_coords: coordinates of the polygonal chain
"""
tangents = []
prev_x, prev_y = None, None
for x, y in polyline_coords:
if prev_x is None: # the first point doesn't have a normal vector
prev_x, prev_y = x, y
tangents.append([0, 0])
else:
normal_length = get_norm2((prev_y, x), (y, prev_x))
if normal_length < 10e-10:
raise TelemacException("The normal "\
"length is too small, check your mesh and polyline\n")
tangents.append([x-prev_x, y-prev_y]/normal_length)
return tangents
def wet_area_2d(polyline_coords, water_depth):
"""
Compute the wet section over a polygonal chain
@param polyline_coords: coordinates of the polygonal chain
@param water_depth: water depth along the polygonal chain
"""
wet_area = 0
# first, compute the length of each segment of the polygonal chain
# and store it in an array
lengthes = compute_segments_lengthes(polyline_coords)
# we only loopi from 0 len(lengthes)-1 because the lengthes array has the
# size of the number of points on the line. Here we loop on the number of
# segments, which is the number of points - 1
for i in range(len(lengthes)-1):
# the extracted values can be nan if the polyline
# point is not inside the mesh, thus we chose to
# consider nan values as zero. We make a mistake close to the
# edge of the domain: for example if water_depth[i] is nan and
# water_depth[i+1] is not, the segment is crossing the boundary
# of the domain, but we use the full segment length for the
# integral calculation while only part of it is actually in the domain
if np.isnan(water_depth[i][0]):
water_depth[i] = 0.
if np.isnan(water_depth[i+1][0]):
water_depth[i+1] = 0.
# compute the area of the wet section
wet_area += (water_depth[i+1] + water_depth[i])*lengthes[i+1]/2.
return wet_area
def flux_2d(polyline_coords, flux_x, flux_y, scalar=None):
"""
Compute the 2D flux over a polygonal chain
@param polyline_coords: coordinates of the polygonal chain
@param flux_x: value of the flux along x at each point of
the polygonal chain, it can be
HU or a solid discharge (QSX, QSBLX or QSSUSPX)
@param flux_y: value of the flux along y, it can be
HV or a solid discharge (QSY, QSBLY or QSSUSPY)
@param scalar: optional, value of a scalar for which
we want to compute the flux. If it is set this function
only returns the scalar flux (and not the flow rate or the
solid discharge)
"""
flux = 0
# first, compute the length of each segment of the polygonal chain
# and store it in an array
normals = compute_segments_normals(polyline_coords)
lengthes = compute_segments_lengthes(polyline_coords)
for i in range(len(lengthes)-1):
scal_i = 1.
scal_ip1 = 1.
# In case flux_2d is called with a scalar argument, fill scal_i
# and scal_ip1 values. Otherwise they are equal to 1 and do not affect
# the result
if scalar is not None:
scal_i = scalar[i]
scal_ip1 = scalar[i+1]
# the extracted values can be nan if the polyline
# point is not inside the computational mesh, thus we chose to
# consider nan values as zero. We make a mistake close to the
# edge of the domain: for example if flux_x[i] is nan and flux_x[i+1]
# is not, the segment is crossing the boundary of the domain, but we
# use the full segment length for the integral calculation while only
# part of it is actually in the domain
if np.isnan(flux_x[i][0]):
flux_x[i] = 0.
flux_y[i] = 0.
scal_i = 0.
if np.isnan(flux_x[i+1][0]):
flux_x[i+1] = 0.
flux_y[i+1] = 0.
scal_ip1 = 0.
# we do not make the check on the scalar and on flux_y, considering
# that if flux_x is not nan they should be correctly defined
# compute the fluxes
product_i = (flux_x[i]*normals[i+1][0]\
+ flux_y[i]*normals[i+1][1]) * scal_i
product_i_plus_1 = (flux_x[i+1]*normals[i+1][0]\
+ flux_y[i+1]*normals[i+1][1]) * scal_ip1
flux += (product_i + product_i_plus_1)*lengthes[i+1]/2.
return flux
| true |
42aa1f9f9d346f57982f1da0086c551801a1e31e
|
Python
|
sukeesh/Jarvis
|
/jarviscli/plugins/corona.py
|
UTF-8
| 2,836 | 3 | 3 |
[
"MIT"
] |
permissive
|
import requests
from colorama import Fore
from plugin import plugin, require
from inspect import cleandoc
@require(network=True)
@plugin("corona")
class CoronaInfo:
"""
corona : Display total cases of the world
corona <Country name | country code> : Display cases for the specific country"
corona help : Print this help
** Data provided by: https://api.covid19api.com/
"""
def __call__(self, jarvis, s):
if 'help' in s:
jarvis.say(cleandoc(self.__doc__), Fore.GREEN)
else:
corona_info = self.get_corona_info(s)
if corona_info == "URLError":
jarvis.say(f"Result was not available at the moment. Try again!!", Fore.RED)
elif corona_info is None:
jarvis.say(f"Cant find the country \"{s}\"", Fore.RED)
else:
location = corona_info["Country"]
jarvis.say(f"\t+++++++++++++++++++++++++++++++++++++++", Fore.CYAN)
jarvis.say(f"\tCorona status: \"{location}\"", Fore.CYAN)
jarvis.say(f"\t+++++++++++++++++++++++++++++++++++++++", Fore.CYAN)
new_confirmed = corona_info["NewConfirmed"]
jarvis.say(f"\tNew confirmed cases : {new_confirmed}", Fore.YELLOW)
total_confirmed = corona_info["TotalConfirmed"]
jarvis.say(f"\tTotal confirmed cases : {total_confirmed}", Fore.YELLOW)
new_deaths = corona_info["NewDeaths"]
jarvis.say(f"\tNew deaths : {new_deaths}", Fore.RED)
total_deaths = corona_info["TotalDeaths"]
jarvis.say(f"\tTotal deaths : {total_deaths}", Fore.RED)
new_recovered = corona_info["NewRecovered"]
jarvis.say(f"\tNew recovered : {new_recovered}", Fore.GREEN)
total_recovered = corona_info["TotalRecovered"]
jarvis.say(f"\tTotal recovered : {total_recovered}", Fore.GREEN)
def get_corona_info(self, country_name):
url = "https://api.covid19api.com/summary"
response = requests.get(url)
# Intermittently URL responds with a message - You have reached maximum request limit.
if response.text == "You have reached maximum request limit.":
return "URLError"
result = response.json()
if country_name:
for country in result["Countries"]:
if (
country_name == country["Country"].lower()
or country_name == country["CountryCode"].lower()
or country_name == country["Slug"].lower()
):
return country
return None
global_info = result["Global"]
global_info["Country"] = "Worldwide"
return result["Global"]
| true |
e085390e7c998fe115104e349aa34f3192f1e561
|
Python
|
justindodson/PythonProjects
|
/CPR_Temps/resources/date_processor.py
|
UTF-8
| 316 | 3.640625 | 4 |
[] |
no_license
|
"""
Simple date processor to take the datetime object and convert it into
a more usable date format that would be expected on the card.
"""
def process_date(date):
str_date = str(date)
year = str_date[:4]
month = str_date[5:7]
day = str_date[8:10]
return ("{}/{}/{}".format(month, day, year))
| true |
00ce54d54cc6e930fba8fe17911b12e9195e457c
|
Python
|
AlexCovizzi/critical-node-problem
|
/example.py
|
UTF-8
| 15,795 | 2.828125 | 3 |
[] |
no_license
|
import time
from graphdraw import GraphDraw
from greedy import algo_greedy, max_degree_best, min_conn_best, min_conn_ratio_best, create_population
from graph import create_graph, calc_objective, create_graph_with_n_edges, calc_alt_objective
from asp import global_optimum
from minizinc import relaxed_optimum
from neighbor_search import k_swap, best_1_swap, first_improvement_2_swap, tabu_search
from genetic_algo import genetic_algo_binary, genetic_algo_removed, calc_dist
def calc_errors(opt, sol):
rel_error = ((opt - sol) / opt) * 100
abs_error = opt - sol
return rel_error, abs_error
def print_solution(removed, sol, alt_sol, opt, alt_opt, calc_time):
if opt:
rel_error, abs_error = calc_errors(opt, sol)
elif alt_opt:
rel_error, abs_error = calc_errors(alt_opt, alt_sol)
removed = removed[:]
removed.sort()
print("Tempo di calcolo (in sec): %.3f" % calc_time)
print("Soluzione trovata: {}".format(sol))
print("Soluzione alternativa trovata: {}".format(alt_sol))
print("Nodi rimossi: {}".format(removed))
if opt or alt_opt:
print("Errore relativo: %.1f %%" % rel_error)
print("Errore assoluto: {}".format(abs_error))
def create_dat(graph, k, out="cndp.dat"):
with open(out, "w+") as f:
f.write("N_NODES : {}\n".format(len(graph)))
f.write("K : {}\n".format(k))
f.write("ARCHI :\n[{}]".format("\n".join([" ".join([str(c) for c in r]) for r in graph])))
def count_edges(graph):
counter = 0
for i in range(len(graph)):
for j in range(i+1, len(graph)):
counter += graph[i][j]
return counter
if __name__ == '__main__':
# Dati del problema
dim = 20
k = 6
threshold = None
cconnected = True
n_edges = 45
ddraw = False
#Boolean di controllo
gglobal_optimum = True
rrelaxed_optimum = False
max_degree = True
min_connection = True
min_connection_ratio = True
random_k_swap = True
bbest_1_swap = True
fi_2_swap = True
tabu = True
variable_neighborhood_search = True
multistart_search = True
ggenetic_removed = True
genetic_binary = True
save = False
# Parametri del Random K-Swap
k_s = k // 2 if k // 2 > 0 else 1
n_iter = 100
# Parametri della Tabu Search
n_tabu = k // 2 if k // 2 > 0 else 1
n_stall = 100
# Parametri della Variable Neighborhood Search
moves = [best_1_swap, first_improvement_2_swap]
# Parametri della Multistart Search
mss_n_start = 10
mss_greedy_best = max_degree_best
mss_stoc_dim = 5
mss_move = best_1_swap
# Parametri degli Algoritmi Genetici
pop_dim = 30
stoc_dim = 5
n_parents = 8
max_generations = 500
if threshold:
graph = create_graph(dim, threshold=threshold, connected=cconnected)
else:
graph = create_graph_with_n_edges(dim, edges=n_edges)
n_connected = calc_objective(graph, [])
n_edges = count_edges(graph)
print("Dimensione del grafo: {}".format(dim))
print("Numero di archi: {}".format(n_edges))
print("Numero di nodi da rimuovere: {}".format(k))
print("Componenti connesse nel grafo di partenza: {}".format(n_connected))
if ddraw:
draw = GraphDraw(graph)
draw.show()
# Ottimo globale
print("\n-------------------------------------\n")
if gglobal_optimum:
print("Ottimo globale")
start_time = time.time()
global_opt, opt_removed = global_optimum(graph, k)
global_alt_sol = calc_alt_objective(graph, opt_removed)
calc_time = time.time() - start_time
opt_removed.sort()
print("Ottimo globale: {}".format(global_opt))
print("Nodi rimossi: {}".format(opt_removed))
print("Tempo di calcolo (in sec): %.3f" % calc_time)
print("Soluzione alternativa: {}".format(global_alt_sol))
if ddraw:
draw.show(opt_removed)
print("\n-------------------------------------\n")
# Ottimo rilassato
if rrelaxed_optimum:
print("Ottimo rilassato")
start_time = time.time()
sol_mzn, removed_mzn = relaxed_optimum(graph, k)
calc_time = time.time() - start_time
relaxed_opt = calc_objective(graph, removed_mzn)
print("Soluzione rilassata: {}".format(sol_mzn))
print("Ottimo rilassato: {}".format(relaxed_opt))
print("Nodi rimossi: {}".format(removed_mzn))
print("Tempo di calcolo (in sec): %.3f" % calc_time)
print("\n-------------------------------------\n")
if gglobal_optimum:
opt = global_opt
alt_opt = global_alt_sol
elif rrelaxed_optimum:
opt = None
alt_opt = relaxed_optimum
else:
opt = None
alt_opt = None
# Applichiamo le greeedy
if max_degree or random_k_swap or bbest_1_swap or fi_2_swap or tabu:
print("Max Degree Greedy")
start_time = time.time()
max_degree_removed = algo_greedy(graph, k, max_degree_best)
max_degree_sol = calc_objective(graph, max_degree_removed)
max_degree_alt_sol = calc_alt_objective(graph, max_degree_removed)
calc_time = time.time() - start_time
print_solution(max_degree_removed, max_degree_sol, max_degree_alt_sol, opt, alt_opt, calc_time)
if ddraw:
draw.show(max_degree_removed)
print("\n-------------------------------------\n")
if min_connection:
print("Min Connection Greedy")
start_time = time.time()
min_conn_removed = algo_greedy(graph, k, min_conn_best)
min_conn_sol = calc_objective(graph, min_conn_removed)
min_conn_alt_sol = calc_alt_objective(graph, min_conn_removed)
calc_time = time.time() - start_time
print_solution(min_conn_removed, min_conn_sol, min_conn_alt_sol, opt, alt_opt, calc_time)
if ddraw:
draw.show(min_conn_removed)
print("\n-------------------------------------\n")
if min_connection_ratio:
print("Min Connection Ratio Greedy")
start_time = time.time()
min_conn_ratio_removed = algo_greedy(graph, k, min_conn_ratio_best)
min_conn_ratio_sol = calc_objective(graph, min_conn_ratio_removed)
min_conn_ratio_alt_sol = calc_alt_objective(graph, min_conn_ratio_removed)
calc_time = time.time() - start_time
print_solution(min_conn_ratio_removed, min_conn_ratio_sol, min_conn_ratio_alt_sol, opt, alt_opt, calc_time)
if ddraw:
draw.show(min_conn_ratio_removed)
print("\n-------------------------------------\n")
# Applichiamo le euristiche
if random_k_swap:
print("K-Swap Casuale")
print("Soluzione di partenza (Max Degree): {} ({})".format(max_degree_removed, max_degree_sol))
k_swap_sol = max_degree_sol
k_swap_removed = max_degree_removed
print("\nEseguiamo {} iterazioni, facendo uno swap casuale di {} elementi ad ogni iterazione\n".format(n_iter, k_s))
start_time = time.time()
for i in range(n_iter):
tmp_removed = k_swap(graph, k_swap_removed, k_s)
tmp_sol = calc_objective(graph, tmp_removed)
if tmp_sol >= k_swap_sol:
k_swap_sol = tmp_sol
k_swap_removed = tmp_removed
k_swap_alt_sol = calc_alt_objective(graph, k_swap_removed)
calc_time = time.time() - start_time
print_solution(k_swap_removed, k_swap_sol, k_swap_alt_sol, opt, alt_opt, calc_time)
improvement = k_swap_sol - max_degree_sol
print("Miglioramento: {}".format(improvement))
if ddraw:
draw.show(k_swap_removed)
print("\n-------------------------------------\n")
if bbest_1_swap:
print("Best 1-Swap")
print("Soluzione di partenza (Max Degree): {} ({})".format(max_degree_removed, max_degree_sol))
one_swap_sol = max_degree_sol
one_swap_removed = max_degree_removed
print("\nEseguiamo la migliore mossa di 1-swap, fino al suo ottimo locale\n")
start_time = time.time()
while True:
tmp_removed = best_1_swap(graph, one_swap_removed)
tmp_sol = calc_objective(graph, tmp_removed)
if tmp_sol > one_swap_sol:
one_swap_sol = tmp_sol
one_swap_removed = tmp_removed
else:
break
one_swap_alt_sol = calc_alt_objective(graph, one_swap_removed)
calc_time = time.time() - start_time
print_solution(one_swap_removed, one_swap_sol, one_swap_alt_sol, opt, alt_opt, calc_time)
improvement = one_swap_sol - max_degree_sol
print("Miglioramento: {}".format(improvement))
if ddraw:
draw.show(one_swap_removed)
print("\n-------------------------------------\n")
if fi_2_swap:
print("First Improvement 2-Swap")
print("Soluzione di partenza (Max Degree): {} ({})".format(max_degree_removed, max_degree_sol))
two_swap_sol = max_degree_sol
two_swap_removed = max_degree_removed
print("\nEseguiamo una mossa di 2-swap fino al primo miglioramento: iteriamo fino al suo ottimo locale\n")
start_time = time.time()
while True:
tmp_removed = first_improvement_2_swap(graph, two_swap_removed)
tmp_sol = calc_objective(graph, tmp_removed)
if tmp_sol > two_swap_sol:
two_swap_sol = tmp_sol
two_swap_removed = tmp_removed
else:
break
two_swap_alt_sol = calc_alt_objective(graph, two_swap_removed)
calc_time = time.time() - start_time
print_solution(two_swap_removed, two_swap_sol, two_swap_alt_sol, opt, alt_opt, calc_time)
improvement = two_swap_sol - max_degree_sol
print("Miglioramento: {}".format(improvement))
if ddraw:
draw.show(two_swap_removed)
print("\n-------------------------------------\n")
if tabu:
print("Tabu Search con mossa di 1-swap")
print("Soluzione di partenza (Max Degree): {} ({})".format(max_degree_removed, max_degree_sol))
tabu_removed = max_degree_removed
print("\nEseguiamo una tabu search, con una lista tabu lunga al massimo {}, con condizione di stop il non avere miglioramenti per {} passi\n".format(n_tabu, n_stall))
start_time = time.time()
tabu_removed = tabu_search(graph, tabu_removed, n_tabu=n_tabu, n_stall=n_stall)
tabu_sol = calc_objective(graph, tabu_removed)
tabu_alt_sol = calc_alt_objective(graph, tabu_removed)
calc_time = time.time() - start_time
print_solution(tabu_removed, tabu_sol, tabu_alt_sol, opt, alt_opt, calc_time)
improvement = tabu_sol - max_degree_sol
print("Miglioramento: {}".format(improvement))
if ddraw:
draw.show(tabu_removed)
print("\n-------------------------------------\n")
if variable_neighborhood_search:
print("Variable Neighborhood Search con Best-1-Swap e First Improvement 2-Swap")
print("Soluzione di partenza (Max Degree): {} ({})".format(max_degree_removed, max_degree_sol))
vns_removed = max_degree_removed
vns_sol = max_degree_sol
print("\nEseguiamo una VNS, con le mosse fornite: seguiremo un approccio di tipo descent\n")
start_time = time.time()
cont = 0
tmp_removed = vns_removed
while cont < len(moves):
tmp_removed = moves[cont](graph, tmp_removed)
tmp_sol = calc_objective(graph, tmp_removed)
if tmp_sol > vns_sol:
vns_removed = tmp_removed
vns_sol = tmp_sol
cont = 0
else:
cont += 1
vns_alt_sol = calc_alt_objective(graph, vns_removed)
calc_time = time.time() - start_time
print_solution(vns_removed, vns_sol, vns_alt_sol, opt, alt_opt, calc_time)
improvement = vns_sol - max_degree_sol
print("Miglioramento: {}".format(improvement))
if ddraw:
draw.show(vns_removed)
print("\n-------------------------------------\n")
if multistart_search:
print("Multistart Search")
print("\nEseguiamo una Multistart Search, con la mossa fornita, per un totale di {} diverse ricerche\n".format(mss_n_start))
mss_best_removed = []
mss_best_sol = 0
start_time = time.time()
for i in range(mss_n_start):
mss_removed = algo_greedy(graph, k, mss_greedy_best, mss_stoc_dim)
mss_sol = calc_objective(graph, mss_removed)
tmp_removed = mss_removed
while True:
tmp_removed = mss_move(graph, tmp_removed)
tmp_sol = calc_objective(graph, tmp_removed)
if tmp_sol > mss_sol:
mss_sol = tmp_sol
mss_removed = tmp_removed
else:
break
if mss_sol > mss_best_sol:
mss_best_sol = mss_sol
mss_best_removed = mss_removed
mss_alt_sol = calc_alt_objective(graph, mss_removed)
calc_time = time.time() - start_time
print_solution(mss_removed, mss_sol, mss_alt_sol, opt, alt_opt, calc_time)
if ddraw:
draw.show(mss_removed)
print("\n-------------------------------------\n")
if ggenetic_removed or genetic_binary:
print("Creiamo una popolazione di {} individui per gli algoritmi genetici".format(pop_dim))
population = create_population(graph, k, pop_dim, [max_degree_best, min_conn_best, min_conn_ratio_best], stoc_dim)
pop_dist = calc_dist(population)
print("La popolazione ha una distanza media di %.4f" % pop_dist)
print("Generiamo un totale di {} generazioni e ad ogni generazione verranno scelti {} genitori\n".format(max_generations, n_parents))
print("\n-------------------------------------\n")
if ggenetic_removed:
print("\nAlgoritmo genetico, codifica: vettore a {} interi".format(k))
start_time = time.time()
genetic_removed = genetic_algo_removed(graph, population, n_parents, max_generations)
genetic_sol = calc_objective(graph, genetic_removed)
genetic_alt_sol = calc_alt_objective(graph, genetic_removed)
calc_time = time.time() - start_time
print_solution(genetic_removed, genetic_sol, genetic_alt_sol, opt, alt_opt, calc_time)
if ddraw:
draw.show(genetic_removed)
print("\n-------------------------------------\n")
if genetic_binary:
print("\nAlgoritmo genetico, codifica: vettore binario di {} elementi".format(dim))
start_time = time.time()
genetic_bin_removed = genetic_algo_binary(graph, population, n_parents, max_generations)
genetic_bin_sol = calc_objective(graph, genetic_bin_removed)
genetic_bin_alt_sol = calc_alt_objective(graph, genetic_bin_removed)
calc_time = time.time() - start_time
print_solution(genetic_bin_removed, genetic_bin_sol, genetic_bin_alt_sol, opt, alt_opt, calc_time)
if ddraw:
draw.show(genetic_bin_removed)
print("\n-------------------------------------\n")
if save:
ris = ""
while ris != "y" and ris != "n":
ris = input("Salva grafo? [y/n]")
if ris == "y":
name = input("Mettice il nome, aooooo: ")
with open(name, "w+") as f:
f.write(str(graph))
| true |
a98acea386d9c4bb4e98d54d114d118c1443fe34
|
Python
|
CodingDojoTulsaMay2018/Tom_Reese
|
/Python/python_OOP/intro_to_tdd/intro_to_tdd.py
|
UTF-8
| 2,041 | 3.640625 | 4 |
[] |
no_license
|
def reverseList(list):
for i in range(len(list)//2):
list[i],list[len(list)-i-1] = list[len(list)-i-1],list[i]
return list
def palindrome(str):
for i in range(len(str)//2):
if str[i] == str[len(str)-i-1]: continue
else: return False
return True
def coins(change):
coin = [0,0,0,0]
if change >= 25:
coin[0] = change//25
change -= coin[0]*25
if change >= 10:
coin[1] = change//10
change -= coin[1]*10
if change >= 5:
coin[2] = change//5
change -= coin[2]*5
if change < 5:
coin[3] = change
change -= coin[3]
return coin
import unittest
class reverseListTest(unittest.TestCase):
def test1(self):
return self.assertEqual(reverseList([1,3,5]), [5,3,1])
def test2(self):
return self.assertEqual(reverseList([2,4,-3]), [-3,4,2])
def test3(self):
return self.assertEqual(reverseList([0,0,0,0,1,0,0]), [0,0,1,0,0,0,0])
def test4(self):
return self.assertEqual(reverseList(["a",4,"9"]), ["9",4,"a"])
class palindromeTest(unittest.TestCase):
def test1(self):
return self.assertEqual(palindrome("racecar"), True)
def test2(self):
return self.assertEqual(palindrome("rabbit"), False)
def test3(self):
return self.assertEqual(palindrome("tacocat"), True)
def test4(self):
return self.assertEqual(palindrome("920dkth6"), False)
def test5(self):
return self.assertEqual(palindrome("1I1I1I1"), True)
class coinsTest(unittest.TestCase):
def test1(self):
return self.assertEqual(coins(87), [3,1,0,2])
def test2(self):
return self.assertEqual(coins(93), [3,1,1,3])
def test3(self):
return self.assertEqual(coins(49), [1,2,0,4])
def test4(self):
return self.assertEqual(coins(24), [0,2,0,4])
def test5(self):
return self.assertEqual(coins(6), [0,0,1,1])
def test6(self):
return self.assertEqual(coins(199), [7,2,0,4])
if __name__ == "__main__":
unittest.main()
| true |
6456a302c4514054efcd58e9978a5f155abff955
|
Python
|
SHADIBRAHIM/Python
|
/CO2/3_sumoflist.py
|
UTF-8
| 58 | 3.0625 | 3 |
[] |
no_license
|
l=[1,2,3,4,5]
s=sum(l)
print("Sum of elements in list:",s)
| true |
96baeef88fdc07b0c02a8f32d54091cc07355c71
|
Python
|
devaun-mcfarlane/CST8279-Lab-3
|
/Exercise 10.py
|
UTF-8
| 143 | 3.5 | 4 |
[] |
no_license
|
m = int(input("Enter the miles driven:"))
g = int(input("Enter the gallons used:"))
mpg = (m/g)
print ("The mpg for the car is ", mpg ," .")
| true |
5ab5b8dd07f758ba6aead9633febac5a650938ec
|
Python
|
edforsberg/FFR120
|
/src/AvgSpeed.py
|
UTF-8
| 116 | 3.09375 | 3 |
[] |
no_license
|
import numpy as np
def get_average_speed(dx,v,t,angle):
avg_speed = dx*v*t*np.cos(angle)
return avg_speed
| true |
98ac3c5d0ab99ac54a4ec2eb966ba6fe136515b7
|
Python
|
santiagopemo/holbertonschool-plds_challenges
|
/The_Holbie_Champion/main.py
|
UTF-8
| 9,761 | 2.796875 | 3 |
[] |
no_license
|
#!/usr/bin/python3
from base_champion import BaseChampion
from fighter import Fighter
from cleric import Cleric
from mage import Mage
from ranger import Ranger
from paladin import Paladin
from rogue import Rogue
import fighter
from os import path
import random
import os
import time
import glob
champ_types = [Cleric, Fighter, Mage, Paladin, Ranger, Rogue]
def create_champion():
print("====== Create Champion ======")
print(" Select your champions type ")
print("1-Cleric 2-Figther 3-Mage ")
print("4-Paladin 5-Ranger 6-Rouge")
print("*Any other option to go back ")
try:
op2 = int(input(">> "))
if not 0 < op2 < 7:
raise Exception
except:
return None
while 1:
try:
os.system("clear")
print("====== Create Champion ======")
c_name = input("Enter champions name\n>> ")
os.system("clear")
print("====== Create Champion ======")
c_raze = input("Enter champions raze\n>> ")
os.system("clear")
print("====== Create Champion ======")
c_gender = input("Enter champions gender\n>> ")
except:
os.system("clear")
print("\nError, try again\n")
input("Press enter to continue")
continue
my_champ = champ_types[op2 - 1](c_name, c_raze, c_gender, 0, 5, 0, 0, 2)
os.system("clear")
print("Your champion:")
print(my_champ)
input("Press enter to continue")
return my_champ
def upgrade_champion(my_champ):
print("===== Upgrade Champion =====")
print("Stats Points: {}".format(my_champ.stats_points))
print("what do you want to upgrade?")
print("*Any other option to go back")
print(28 * "=")
for key, value in my_champ.stats.items():
print("{:<10} {:>16} ".format(key, value), end="")
print()
while 1:
try:
op_uc = input(">> ")
if my_champ.stats.get(op_uc, False) == 0:
raise Exception
print("how much?")
quantitie = int(input(">> "))
if quantitie < 0 or quantitie > my_champ.stats_points:
raise ValueError
else:
my_champ.stats[op_uc] += quantitie
my_champ.stats_points -= quantitie
os.system("clear")
print("\nUpgraded {} to {}\n".format(op_uc, my_champ.stats[op_uc]))
input("Press enter to continue")
return 1
except ValueError:
os.system("clear")
print("\nWrong quantitie\n")
input("Press enter to continue")
return 0
except:
# os.system("clear")
# print("Error, try again")
# input("Press enter to continue")
return 0
def load_champion():
# print("Enter Champions Type")
# ch_type = input(">> ")
# print("Enter Champions name")
# ch_name = input(">> ")
jsonfiles = []
for f in glob.glob("*.json"):
jsonfiles.append(f)
if jsonfiles == []:
print("\nNo champions saved\n")
input("Press enter to continue")
return None
print("===== Load Champion =====")
for i, n in enumerate(jsonfiles):
print("{}-{}".format(i + 1, n[:-5]))
try:
print("*Any other option to go back")
l_op = int(input(">> "))
l_op -= 1
if l_op < 0 or l_op >= len(jsonfiles):
raise Exception
except:
# os.system("clear")
# print("\nWrong option\n")
# input("Press enter to continue")
return None
champ_f = jsonfiles[l_op][:-5].split("_")
ch_type = champ_f[0]
ch_name = champ_f[1]
ch_dict = BaseChampion.load_character(ch_type, ch_name)
if ch_dict == {}:
print("no champion found")
return None
if ch_type == "Cleric":
my_champ = Cleric(ch_dict["name"], ch_dict["raze"], ch_dict["gender"], ch_dict["level"], ch_dict["nv_exp"], ch_dict["current_exp"], ch_dict["total_exp"], ch_dict["stat_points"])
if ch_type == "Paladin":
my_champ = Paladin(ch_dict["name"], ch_dict["raze"], ch_dict["gender"], ch_dict["level"], ch_dict["nv_exp"], ch_dict["current_exp"], ch_dict["total_exp"], ch_dict["stat_points"])
if ch_type == "Figther":
my_champ = Figther(ch_dict["name"], ch_dict["raze"], ch_dict["gender"], ch_dict["level"], ch_dict["nv_exp"], ch_dict["current_exp"], ch_dict["total_exp"], ch_dict["stat_points"])
if ch_type == "Mage":
my_champ = Mage(ch_dict["name"], ch_dict["raze"], ch_dict["gender"], ch_dict["level"], ch_dict["nv_exp"], ch_dict["current_exp"], ch_dict["total_exp"], ch_dict["stat_points"])
if ch_type == "Rogue":
my_champ = Rogue(ch_dict["name"], ch_dict["raze"], ch_dict["gender"], ch_dict["level"], ch_dict["nv_exp"], ch_dict["current_exp"], ch_dict["total_exp"], ch_dict["stat_points"])
if ch_type == "Ranger":
my_champ = Ranger(ch_dict["name"], ch_dict["raze"], ch_dict["gender"], ch_dict["level"], ch_dict["nv_exp"], ch_dict["current_exp"], ch_dict["total_exp"], ch_dict["stat_points"])
my_champ.stats = ch_dict["stats"]
os.system("clear")
print(my_champ)
input("Press enter to continue")
return my_champ
def print_fight(my_champ, enemy):
hc = int(enemy.stats["health"] / 10) * "-"
he = int(my_champ.stats["health"] / 10) * "-"
print("{} {} healt: {}".format(type(enemy).__name__, enemy.name, hc))
print()
print("{} {} healt: {}".format(type(my_champ).__name__, my_champ.name, he))
def fight(my_champ):
enemy = None
e_ty = random.randint(0, 5)
e_level = my_champ.level
e_c_exp = random.randint(0, 5)
e_nv_exp = 5 - e_c_exp
e_t_exp = e_level * 5 + e_c_exp
enemy = champ_types[e_ty]("Enemy", "Bug", "Bug", e_level, e_nv_exp, e_c_exp, e_t_exp, 0)
os.system("clear")
print("Your Enemy is")
print(enemy)
my_damage = 0
enemy_damage = 0
input("Press enter to continue")
while 1:
os.system("clear")
# print("{} {} healt: {}".format(type(enemy).__name__,enemy.name,enemy.stats["health"]))
# print("{} {} healt: {}".format(type(my_champ).__name__,my_champ.name,my_champ.stats["health"]))
print_fight(my_champ, enemy)
print("1-Attack 2-Use magic")
try:
f_op = int(input(">> "))
if f_op != 1 and f_op != 2:
raise Exception
except:
del enemy
break
if f_op == 1:
at = my_champ.attack()
if f_op == 2:
at = my_champ.use_magic()
enemy_damage = enemy.defend() - at
# print(enemy_damage)
if enemy_damage < 0:
enemy.stats["health"] += enemy_damage
if (enemy.stats["health"] <= 0):
os.system("clear")
print("\nYou win!!\n")
my_champ.stats["health"] = 1000
my_champ.gain_exp()
enemy.stats["health"] = 1000
del enemy
input("Press enter to continue")
break
os.system("clear")
print_fight(my_champ, enemy)
time.sleep(1)
if type(enemy) is Mage:
at_e = enemy.use_magic()
else:
at_e = enemy.attack()
my_damage = my_champ.defend() - at_e
if my_damage < 0:
my_champ.stats["health"] += my_damage
os.system("clear")
print_fight(my_champ, enemy)
time.sleep(1)
if (my_champ.stats["health"] <= 0):
os.system("clear")
print("\nYou loss!!\n")
my_champ.death()
my_champ.stats["health"] = 1000
enemy.stats["health"] = 1000
del enemy
input("Press enter to continue")
break
while 1:
os.system("clear")
print("===========================")
print(" THE HOLBIE CHAMPION ")
print("===========================")
print("1-Create a new champion")
print("2-Load a existing champion")
print("*Any other option to exit")
try:
op1 = int(input(">> "))
if op1 != 1 and op1 != 2:
raise Exception
except:
break
print()
if op1 == 1:
os.system("clear")
my_champ = create_champion()
if my_champ == None:
continue
if op1 == 2:
os.system("clear")
my_champ = load_champion()
if my_champ == None:
continue
while 1:
os.system("clear")
print("===================================")
print(" THE HOLBIE CHAMPION ")
print("===================================")
print("1-New fight 2-Upgrade Champion")
print("3-Save Champion 4-My Champion ")
print("*Any other option to go back")
try:
op4 =int(input(">> "))
if not 0 < op4 < 5:
raise Exception
except:
del my_champ
break
print()
if op4 == 1:
os.system("clear")
fight(my_champ)
if op4 == 2:
os.system("clear")
upgrade_champion(my_champ)
if op4 == 3:
os.system("clear")
my_champ.save_character()
print("\nChampion Saved\n")
input("Press enter to continue")
if op4 == 4:
os.system("clear")
print(my_champ)
input("Press enter to continue")
print()
os.system("clear")
print()
| true |
c5e29d0f31c936e3b271077cd4c781ff20008a0b
|
Python
|
t-suzuki-shl/0_start_ai_170928
|
/src/05_nn/35_nn_part3.py
|
UTF-8
| 2,406 | 3.359375 | 3 |
[] |
no_license
|
import numpy as np
class NeuralNetwork:
def __init__(self):
self.hw = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
self.hb = np.array([0.1, 0.2])
self.ow = np.array([[0.1, 0.2], [0.3, 0.4] ,[0.5, 0.6]])
self.ob = np.array([0.1, 0.2, 0.3])
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def softmax(self, x):
return np.exp(x) / np.sum(np.exp(x))
def neuron(self, w, b, x, activate):
return activate(w.dot(x) + b)
def cross_entropy(self, y, t):
delta = 1e-7
return -np.sum(t * np.log(y + delta))
def input(self, x, t):
hy = self.neuron(self.hw, self.hb, x, self.sigmoid)
y = self.neuron(self.ow, self.ob, hy, self.softmax)
loss = self.cross_entropy(y, t)
return loss
def grad(self, f, x):
h = 1e-4
y = np.zeros_like(x)
for i in range(x.size):
t = x[i]
x[i] = t + h
ya = f(x)
x[i] = t - h
yb = f(x)
y[i] = (ya - yb) / (2 * h)
x[i] = t
return y
def grad_2dim(self, f, x):
result = np.zeros_like(x)
for i, x in enumerate(x):
result[i] = self.grad(f, x)
return result
def train(self, x, t, lr=0.1):
loss = lambda n: self.input(x, t)
grads = {}
grads['hw'] = self.grad_2dim(loss, self.hw)
grads['hb'] = self.grad(loss, self.hb)
grads['ow'] = self.grad_2dim(loss, self.ow)
grads['ob'] = self.grad(loss, self.ob)
self.hw -= lr * grads['hw']
self.hb -= lr * grads['hb']
self.ow -= lr * grads['ow']
self.ob -= lr * grads['ob']
def test(self, x, t):
hy = self.neuron(self.hw, self.hb, x, self.sigmoid)
y = self.neuron(self.ow, self.ob, hy, self.softmax)
return (np.argmax(y) == np.argmax(t)).astype('int')
nn = NeuralNetwork()
x_train = np.array([[0.5, 0.6, 0.7], [0.6, 0.7, 0.8], [0.7, 0.8, 0.9]])
y_train = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])
x_test = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]])
y_test = np.array([[0, 0, 1], [0, 0, 1], [1, 0, 0]])
for i in range(x_train.shape[0]):
nn.train(x_train[i], y_train[i])
correct = 0
for i in range(x_test.shape[0]):
correct += nn.test(x_test[i], y_test[i])
print("Accuracy: {}".format(correct / x_test.shape[0]))
| true |
26103744cbb9293dfea2697831952b21d346b969
|
Python
|
Edinaldobjr/Python
|
/Geek_University/4. Variáveis e Tipos de Dados em Python/7 - Fahrenheit para Celsius.py
|
UTF-8
| 174 | 3.828125 | 4 |
[] |
no_license
|
print('Conversor de temperatura (Fahrenheit para Celsius)\n')
F = float(input('Digite a temperatura em Fahrenheit: '))
C = 5 * (F -32) / 9
print('C =', round(C, 2), 'ºC')
| true |
b5b307d383284d020670fcf07400bc8d2ea6ccff
|
Python
|
ambarish710/python_concepts
|
/graph/directed_graph_comprehensive.py
|
UTF-8
| 3,011 | 3.9375 | 4 |
[] |
no_license
|
from queue import Queue
# Graph
class Graph:
def __init__(self, edges_list):
self.adjacency_list = {}
self.bfs_parent = {}
self.dfs_parent = {}
self.visited = {}
self.color = {}
self.bfs_traversal_output = []
self.dfs_traversal_output = []
self.bfs_queue = Queue()
for start_vertice, end_vertice in edges_list:
# Check 1
if start_vertice not in self.adjacency_list:
self.adjacency_list[start_vertice] = [end_vertice]
else:
self.adjacency_list[start_vertice].append(end_vertice)
# Check 2
if end_vertice not in self.adjacency_list:
self.adjacency_list[end_vertice] = []
for node in self.adjacency_list:
self.bfs_parent[node] = None
self.dfs_parent[node] = None
self.visited[node] = False
# If node not visited then color white
# If node visited then color grey
# If nodes all children visited then black
self.color[node] = "White"
print(self.adjacency_list)
print(self.bfs_parent)
print(self.dfs_parent)
print(self.visited)
print(self.color)
# Breadth First Search of Directed Graph
def bfs_traversal(self, node):
self.visited[node] = True
self.bfs_queue.put(node)
while not self.bfs_queue.empty():
vertice = self.bfs_queue.get()
self.bfs_traversal_output.append(vertice)
for connected_node in self.adjacency_list[vertice]:
if not self.visited[connected_node]:
self.bfs_queue.put(connected_node)
self.bfs_parent[connected_node] = vertice
self.visited[connected_node] = True
# Depth First Search of Directed Graph
def dfs_traversal(self, node):
self.color[node] = "Grey"
self.dfs_traversal_output.append(node)
for vertice in self.adjacency_list[node]:
if self.color[vertice] == "White":
self.dfs_traversal(vertice)
self.dfs_parent[vertice] = node
self.color[node] = "Black"
# Find Cycle in Directed Graph
def find_cycle(self, node):
pass
if __name__ == "__main__":
edges_list = [
("A", "B"),
("A", "D"),
("B", "C"),
("D", "E"),
("D", "F"),
("E", "F"),
("E", "G"),
("F", "H"),
("G", "H")
]
graph_obj = Graph(edges_list=edges_list)
print("BFS Graph Traversal Output --\n")
graph_obj.bfs_traversal(node="A")
print(graph_obj.bfs_traversal_output)
print(graph_obj.bfs_parent)
print("DFS Graph Traversal Output --\n")
graph_obj.dfs_traversal(node="A")
print(graph_obj.dfs_traversal_output)
print(graph_obj.dfs_parent)
# print("Cycle/Loop found in Graph Traversal Output -- {}".format(graph_obj.find_cycle(node="A")))
| true |
490c7ba35b3d87f250e29043842f43d548a5f80d
|
Python
|
sqiprasanna/coding-questions
|
/Greedy/page_faults_in_LRU.py
|
UTF-8
| 1,705 | 3.984375 | 4 |
[] |
no_license
|
"""
url : https://practice.geeksforgeeks.org/problems/page-faults-in-lru/0
In operating systems that use paging for memory management, page replacement algorithm are needed to decide which page needs to be replaced when the
new page comes in. Whenever a new page is referred and is not present in memory, the page fault occurs and Operating System replaces one of the
existing pages with a newly needed page.
Given a sequence of pages and memory capacity, your task is to find the number of page faults using Least Recently Used (LRU) Algorithm.
Input:
The first line of input contains an integer T denoting the number of test cases. Each test case contains n number of pages and next line contains
space seaprated sequence of pages. The following line consist of the capacity of the memory.
Note: Pages are referred in the order left to right from the array (i.e index 0 page is referred first then index 1 and so on). Memory is empty at
thestart .
Output:
Output the number of page faults.
Constraints:
1<=T<=100
1<=n<=1000
4<=capacity<=100
Example:
Input:
2
9
5 0 1 3 2 4 1 0 5
4
8
3 1 0 2 5 4 1 2
4
Output:
8
7
"""
def page_faults(arr,n,k):
pages = []
count = 0
for item in arr:
if item in pages:
pages.remove(item)
pages.append(item)
continue
else:
pages.append(item)
count = count + 1
if len(pages) > k:
pages.pop(0)
print(count)
def main():
t = int(input().strip())
for i in range(0,t):
n = int(input())
arr = [int(x) for x in input().strip().split(" ")]
k = int(input())
page_faults(arr,n,k)
if __name__ == '__main__':
main()
| true |
f2712a5f329cfd135638cc956284336067e26e11
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03681/s683423628.py
|
UTF-8
| 249 | 2.9375 | 3 |
[] |
no_license
|
import math
n, m = map(int, input().split())
R = 1000000007
if abs(n - m) >= 2:
ans = 0
else:
nf = math.factorial(n)
mf = math.factorial(m)
if n == m:
ans = (nf * mf * 2) % R
else:
ans = (nf * mf) % R
print(ans)
| true |
c706683629ced8b932bf84c8f87d3d20068c3a4c
|
Python
|
ruiwu1990/vwplatform
|
/app/visualization/util.py
|
UTF-8
| 5,663 | 3.40625 | 3 |
[] |
no_license
|
# this is used to force division to be floating point
# e.g. 4/5=0.8
from __future__ import division
import numpy as np
#This function is used to get the name of each item
#and remove the offset and write the results into temp file
#The users need to remove the temp file by themselves
def getItemName(downloadDir='', filename='', itemNameList=[], rowOffset=0, rowOffset2=0, columnOffset=0):
inputFile = open(downloadDir + filename,'r')
itemList = []
outputFile = open(downloadDir + 'temp' + filename, 'w+')
# outputFile = open(downloadDir + 'input.csv', 'w+')
#skip first rowOffset rows
for i in range(0, int(rowOffset)):
itemList = inputFile.readline()
#obtain item name from the list
#go back to the start of the file
itemNameLine = inputFile.readline()
itemList = itemNameLine.rstrip().split(',')
#remove the first columnOffset elements
for i in range(0, int(columnOffset)):
itemList.pop(0)
#does not work to do this: itemNameList = itemList
#should use the following method
for item in itemList:
itemNameList.append(str(item))
#skip second rowOffset rows
for i in range(0, int(rowOffset2)):
itemList = inputFile.readline()
#add the item name list into csv file
tempList = []
tempList = itemNameLine.split(',')
outputFile.write(','.join(tempList))
#skip first columnOffset columns
#this is used to get every line of this file
for line in inputFile:
tempList = line.split(',')
for i in range(0, int(columnOffset)):
tempList.pop(0)
outputFile.write(','.join(tempList))
tempList = []
#close file
inputFile.close()
outputFile.close()
# this function is used for csv interface
# create a file from the input data
# save it into tempData folder
def csv_interface_parse_save(data='', download_dir=''):
temp_list = data.split('//')
# get and remove filename from list
filename = temp_list.pop(0)
# file content
# file_content = "/n".join(temp_list)
# write file content into a file
# no meta data should be in this input data
output_file = open(download_dir + filename, 'w+')
for line in temp_list:
line = line + '\n'
output_file.write(line)
# close file
output_file.close()
# this function is used to grab all the labels of
# a csv file. The csv file should not have any metadata
def csv_interface_obtain_labels(filename='', download_dir=''):
input_file = open(download_dir + filename,'r')
csv_label_list = input_file.readline().replace('\n','').split(',')
input_file.close()
return csv_label_list
# this function is used to create a csv file string from
# two arrays, the first one is for y axis and the second one is for x axis
def create_csv_file_string(y_array=[], y_axis_name='', x_array=[], x_axis_name=''):
# first row is for label
csv_string = x_axis_name + "," + y_axis_name + "\\n"
# y_array and x_array must be the same dimension
for count in range(len(y_array)):
csv_string = csv_string + str(x_array[count]) + "," + str(y_array[count]) + "\\n"
# print csv_string
return csv_string
# this is from:
# http://stackoverflow.com/questions/3675318/how-to-replace-the-some-characters-from-the-end-of-a-string
# this function is used to replace one part of a string from the
# end. e.g. replace_last(str, '2', 'x'), this means replace
# the first 2 from the end of str into x for once
def replace_last(source_string, replace_what, replace_with):
head, sep, tail = source_string.rpartition(replace_what)
return head + replace_with + tail
# This function is used to check if the upload file type is csv
def allowed_file_csv(filename):
# define the allowed extension
allowed_extension_list = ['csv']
# get extension part
file_type = filename.rsplit(".",1)[1]
return file_type in allowed_extension_list
# This function is used to check if the upload file type is NetCDF
def allowed_file_netcdf(filename):
# define the allowed extension
allowed_extension_list = ['nc']
# get extension part
file_type = filename.rsplit(".",1)[1]
return file_type in allowed_extension_list
# This function is used to sort a numpy array and
# return a 1D list with m numbers, each number means
# the number of array elements in the section from min+x*(max-min)/section_number
# to min+(x+1)(max-min)/section_number, this function also returns the tab name list
def sort_section_number(numpy_input_array=[], section_number=1, section_results_number=[],tab_name_list=[]):
ordered_array = np.sort(numpy_input_array)
max_value = ordered_array[numpy_input_array.size-1]
min_value = ordered_array[0]
interval_value = (max_value-min_value)/section_number
# this is used to record elements section numbers
temp_results_number = [0]*section_number
for item in ordered_array:
for count in range(0,section_number):
# record the number of elements in each section
if item>=min_value+count*interval_value and item<=min_value+(count+1)*interval_value:
temp_results_number[count] = temp_results_number[count] + 1
# create tab name
for count in range(0,section_number):
# 2 digits precisions for float
bound_floor = "%.2f" % (min_value+count*interval_value)
bound_cell = "%.2f" % (min_value+(count+1)*interval_value)
tab_name_list.append(str(bound_floor)+' to '+str(bound_cell))
# push results into section_results_number
for item in temp_results_number:
section_results_number.append(item)
| true |
d9dae35079f18ceae43f479f414f5e02033e0d52
|
Python
|
sayed07/sayed
|
/pro10.py
|
UTF-8
| 170 | 2.71875 | 3 |
[] |
no_license
|
bb1=int(input())
nn1=list(map(int,input().split()))
jj1=0
for i in range(0,bb1):
for r in range(0,i):
if nn1[r]<nn1[i]:
jj1=jj1+nn1[r]
print(jj1)
| true |
38426f86d56a78d8172c9eec8f28cc1e47f087bd
|
Python
|
medical-projects/value-of-medical-notes
|
/mimic3preprocess/scripts/get_validation.py
|
UTF-8
| 1,809 | 2.546875 | 3 |
[] |
no_license
|
import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split
import argparse
parser = argparse.ArgumentParser(description='get readmission data that have physician notes')
parser.add_argument('--path', type=str,
default="/data/joe/physician_notes/mimic-data/",help='path to data root dir')
parser.add_argument('--task', type=str,
default="mortality", choices=["readmission", "mortality"], help='task type')
parser.add_argument('--period', type=str
, choices=["24", "48", "retro"], help='task type')
args = parser.parse_args()
listfile_dir = os.path.join(args.path, args.task)
train = pd.read_csv(os.path.join(listfile_dir, f"train_{args.period}_listfile.csv"))
print("Training data before split", train)
train_list = os.listdir(os.path.join(args.path,'train'))
if args.task == 'mortality':
random_state = 20
else:
random_state = 191
train_list, val_list = train_test_split(train_list, test_size =0.2, random_state=random_state)
train_list, val_list = set(train_list), set(val_list)
trains = []
vals = []
for row in train.values:
if row[0].split("_")[0] in train_list:
trains.append(row)
else:
vals.append(row)
trains = np.vstack(trains)
vals = np.vstack(vals)
trains = pd.DataFrame(trains, columns=['stay', 'y_true'])
vals = pd.DataFrame(vals, columns=['stay', 'y_true'])
count_train, count_val = np.sum(trains['y_true']), np.sum(vals['y_true'])
print(f"Training date size: {len(trains)}, {count_train/len(trains)}")
print(f"Validation date size: {len(vals)}, {count_val/len(vals)}")
trains.to_csv(os.path.join(listfile_dir, f'train_{args.period}_listfile_tmp.csv'), index=False)
vals.to_csv(os.path.join(listfile_dir, f'valid_{args.period}_listfile.csv'), index=False)
| true |
b2c3bb022ccbb91e66fe0acefb70a972bbd44dfc
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03079/s159538546.py
|
UTF-8
| 260 | 3.015625 | 3 |
[] |
no_license
|
'''
Created on 2020/08/20
@author: harurun
'''
def main():
import sys
pin=sys.stdin.readline
pout=sys.stdout.write
perr=sys.stderr.write
A,B,C=map(int,pin().split())
if A==B and B==C:
print("Yes")
return
print("No")
return
main()
| true |
b1f687da9cbc6c510888fdcbc115ebd56b117855
|
Python
|
naveen12124/lab2.0
|
/LAB 1 LCM GCD of 3 numbers.py
|
UTF-8
| 396 | 4.1875 | 4 |
[] |
no_license
|
a = int(input("Enter the first number : "))
b = int(input("Enter the second number : "))
c = int(input("Enter the third number : "))
# Calculation of GCD
gcd1 = gcd(a,b)
gcd2 = gcd(gcd1,c) # GCD of 3 numbers
# Calculation of LCM
lcm1 = a*b//gcd1
lcm2 = c*lcm1//gcd(c,lcm1) # GCD of 3 numbers
# Printing the answer
print("The GCD of 3 numbers is " + str(gcd2) + " and the LCM is "+ str(lcm2))
| true |
26ba0b87b9a55c98ac7182be4db3e89e11fb03d1
|
Python
|
Nikkuniku/AtcoderProgramming
|
/JOI/第12回日本情報オリンピック 予選(過去問)/d.py
|
UTF-8
| 700 | 2.515625 | 3 |
[] |
no_license
|
d, n = map(int, input().split())
temp = [int(input()) for _ in range(d)]
cloth = [tuple(map(int, input().split())) for _ in range(n)]
dp = [[0]*n for _ in range(d+1)]
for i in range(d):
t = temp[i]
for j in range(n):
a, b, c = cloth[j]
if i == 0:
if a <= t <= b:
dp[i+1][j] = dp[i][j]
else:
dp[i+1][j] = -1
continue
if a <= t <= b:
for k in range(n):
_, _, x = cloth[k]
if dp[i][k] == -1:
continue
dp[i+1][j] = max(dp[i+1][j], dp[i][k]+abs(c-x))
else:
dp[i+1][j] = -1
ans = max(dp[d])
print(ans)
| true |
085bd32ebe6ed6de34e01fb104c38013a2dd5099
|
Python
|
ahorjia/longform
|
/ConnectDots/ProcessContent.py
|
UTF-8
| 9,092 | 3.109375 | 3 |
[] |
no_license
|
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import *
import pickle
import re
from datetime import datetime
import time
from ArticleEntry import ArticleEntry
from constants import *
_digits = re.compile('\d')
stemmer = PorterStemmer()
def built_article_dictionary(the_dictionary, article, texts):
tokenizer = RegexpTokenizer(r'\w+')
article.dict_0_1 = {}
article.dict_count = {}
article.dict_article_word_prob = {}
for w in texts:
word_list = [stemmer.stem(word.lower()) for word in tokenizer.tokenize(w)]
for word in word_list:
if word in the_dictionary:
article.dict_0_1[word] = 1
if article.dict_count.has_key(word):
article.dict_count[word] += 1
else:
article.dict_count[word] = 1
for word in article.dict_count:
article.dict_article_word_prob[word] = article.dict_count[word] * 1.0 / len(the_dictionary)
# print article.dict_article_word_prob
pass
def process_articles(file_name, output_file_name):
the_dictionary = load_dictionary();
articles = []
e = ET.parse(file_name).getroot()
id_counter = 0
for atype in e.findall('item'):
contents = atype.find('contents/value')
issue = atype.find('issue/value')
title = atype.find('title/value')
author = atype.find('author/value')
if author is None:
author = ""
else:
author = author.text
if contents is not None:
if issue is not None:
issue = try_parsing_date(issue.text)
id_counter += 1
soup = BeautifulSoup(contents.text)
texts = soup.findAll(text=True)
article = ArticleEntry(id_counter, title.text, author, issue, texts)
built_article_dictionary(the_dictionary, article, texts)
articles.append(article)
id_counter = 0
articles.sort(key=lambda x: x.publication)
for article in articles:
article.id = id_counter
id_counter += 1
print len(articles)
pickle.dump(articles, open(output_file_name, "wb"))
print "done"
def load_dictionary():
all_words = pickle.load(open(dictionary_file_name, "rb"))
print len(all_words)
return all_words
def test_articles(output_file_name):
articles = pickle.load(open(output_file_name, "rb"))
print len(articles) # 339
# article_id = 67
# print articles[article_id]
# print articles[article_id].id
# print articles[article_id].publication
# print articles[article_id].title
# print articles[article_id].writer
# # print articles[article_id].dict_0_1
# print articles[article_id].dict_0_1['hi'] # 1
# print articles[article_id].dict_count['hi'] # 23
# print articles[article_id].dict_article_word_prob['hi']
# print len(articles[article_id].dict_0_1)
# print len(articles[article_id].dict_count)
print "********************"
print articles[0]
# print articles[0].content
print (len(articles[0].content))
# print articles[295]
# print articles[327]
# print articles[332]
# print articles[338]
# print articles[27]
# print articles[100]
# for article in articles:
# print article
def build_bipartite_graph():
words = load_dictionary();
articles = pickle.load(open(articles_file_name, "rb"))
article_to_word = {}
for article in articles:
article_to_word[article.id] = []
word_to_article = {}
for word in words:
word_to_article[word] = []
for word in words:
for article in articles:
if article.dict_0_1.has_key(word) and article.dict_0_1[word] == 1: # The same thing!
article_to_word[article.id].append(word)
word_to_article[word].append(article.id)
pickle.dump(word_to_article, open(word_to_article_file_name, "wb"))
pickle.dump(article_to_word, open(article_to_word_file_name, "wb"))
print "Done Building Word To Article Map"
pass
def test_build_bipartite_graph():
word_to_article_map = pickle.load(open(word_to_article_file_name, "rb"))
article_to_word_map = pickle.load(open(article_to_word_file_name, "rb"))
word = 'hi'
print len(word_to_article_map[word])
print word_to_article_map[word]
print len(article_to_word_map[34])
pass
def find_article_intersection():
article1_index = 34
article2_index = 76
words = load_dictionary()
articles = pickle.load(open(articles_file_name, "rb"))
article1 = articles[article1_index]
article2 = articles[article2_index]
common_count = 0
for word in words:
if article1.dict_0_1.has_key(word) and article2.dict_0_1.has_key(word):
common_count += 1
print common_count
pass
def try_parsing_date(text):
text = text.replace('SEPT.', 'SEP.')
for fmt in ('%b. %d, %Y', '%B %d, %Y'):
try:
return datetime.strptime(text, fmt)
except ValueError:
pass
print text
def find_articles_with_no_date():
all_dates = []
articles = pickle.load(open(articles_file_name, "rb"))
print len(articles)
no_date_count = 0
for article in articles:
# print article
if article.publication is None or article.publication == "":
no_date_count += 1
# dt = article.publication.remove
all_dates.append(try_parsing_date(article.publication))
all_dates.sort()
print all_dates
# print no_date_count
def li_code():
word_to_article_map = pickle.load(open(word_to_article_file_name, "rb"))
articles_file = open(articles_file_name, "rb")
articles = pickle.load(articles_file)
articles_file.close()
article_word_count = dict()
articles_stat = articles
# count word in each article
for item in articles_stat:
article_word_count[item.id] = dict()
for w in item.dict_count:
article_word_count[item.id][w] = item.dict_count[w]
# calculate article -> word weight
article_word_prob = dict()
for item in article_word_count:
article_word_prob[item] = dict()
total = sum(article_word_count[item].values())
for w in article_word_count[item]:
article_word_prob[item][w] = float(article_word_count[item][w]) / float(total)
word_l = word_to_article_map.keys()
word_to_article_prob = dict()
# calculate word->article weight
for item in word_l:
word_to_article_prob[item] = dict()
for a in article_word_prob:
if item in article_word_prob[a]:
word_to_article_prob[item][a] = article_word_prob[a][item]
# normalize
word_to_article_prob_norm=dict()
for item in word_to_article_prob:
word_to_article_prob_norm[item]=dict()
total=sum(word_to_article_prob[item].values())
for p in word_to_article_prob[item]:
word_to_article_prob_norm[item][p]=float(word_to_article_prob[item][p])/float(total)
# generate original matrix
origin = []
for st in article_word_prob:
l = [0.0] * 339
for w in article_word_prob[st]:
for ed in word_to_article_prob_norm[w]:
l[ed] += article_word_prob[st][w] * word_to_article_prob_norm[w][ed]
origin.append(l)
start_time = time.clock()
build_mat_coll(word_to_article_map, word_to_article_prob_norm, article_word_prob)
print "Total time:" + str(time.clock() - start_time)
print "Done Processing"
def build_mat_coll(word_to_article_map, word_to_article_prob_norm, article_word_prob):
threshold = 20
mat_coll = dict()
print len(word_to_article_prob_norm)
process_counter = 0
for w in word_to_article_prob_norm:
if process_counter % 10 == 0:
print process_counter
process_counter += 1
if len(word_to_article_map[w]) <= threshold:
continue
sink_word = w # build matrix if w is a sink word
mat_coll[sink_word] = []
for st in article_word_prob:
l = [0.0] * 339
for w2 in article_word_prob[st]: # w2 as a middle between 2 document
if w2 != sink_word:
for ed in word_to_article_prob_norm[w2]:
l[ed] += article_word_prob[st][w2] * word_to_article_prob_norm[w2][ed]
mat_coll[sink_word].append(l) # append one row of the matrix
print len(mat_coll[sink_word])
pickle.dump(mat_coll, open(mat_coll_file_name, "wb"))
if __name__ == '__main__':
in1 = "pickles/TheNewYorker.xml"
out1 = thenewyorker_output
process_articles(in1, out1)
# process_articles("NewYorkTimes.xml", articles_file_name)
# process_articles()
test_articles(out1)
# build_bipartite_graph()
# test_build_bipartite_graph()
# find_article_intersection()
# find_articles_with_no_date()
# li_code()
pass
| true |
b6f66053ce30a44d6d9817b2578102662c9abe87
|
Python
|
amitramachandran/pythonprogramming.org
|
/list overlap comprehensin.py
|
UTF-8
| 455 | 3.4375 | 3 |
[] |
no_license
|
import random
values=range(1,100)
b=[]
a=[]
c=[]
e=[]
#two random list generation
for i in range(10):
b.append(int(random.choice(values)))
a.append(int(random.choice(values)))
print a,b
#this shows the logic for common element without duplication
c=[num for num in a if num==b[i]]
d=set(c)
print "common elements in both list is :",list(d)
print "the first and last element in list is :"
print [a[0], a[len(a)-1],b[0], b[len(b)-1]]
| true |
8e6681e9f6e968b479e6c5e796b352077828f150
|
Python
|
sangee9968/codekata6
|
/print_digits.py
|
UTF-8
| 91 | 2.96875 | 3 |
[] |
no_license
|
n=int(input())
s=""
for i in str(n):
s=s+str(i)+" "
#print result
print(s.strip())
| true |
ce175abb9bb4138b797a14dc5971b1d5d7214d66
|
Python
|
GB-CodeDreams/Statist
|
/crawler/crawler/bd_pages_parser.py
|
UTF-8
| 6,407 | 2.546875 | 3 |
[] |
no_license
|
#!/usr/bin/python3
from sqlalchemy import and_, func
from datetime import datetime
from string import punctuation
from bs4 import BeautifulSoup
from html2text import html2text
import models
from bd_pages_downloader import page_downloader
def make_word_indexes_dict(text):
text = text.lower()
word_list = text.replace('>', ' ').replace('<', ' ')\
.replace('(', ' ').replace(')', ' ').split()
for i in range(len(word_list)):
word_list[i] = word_list[i].strip(punctuation + " ")
index_dict = {word: [] for word in word_list}
for word in index_dict:
search_start = 0
for _n in range(word_list.count(word)):
# index = text.index(word, search_start)
# search_start = index + 1
list_index = word_list.index(word, search_start)
index = sum(map(lambda s: len(s), word_list[:list_index]), list_index)
search_start = list_index + 1
index_dict[word].append(index)
return index_dict
def count_page_person_rank(person, word_index_dict):
rank = 0
for kw in person.keywords:
word1 = kw.name.lower()
if not kw.name_2 or not kw.distance:
word1_ind_list = word_index_dict.get(word1, [])
return len(word1_ind_list)
word2 = kw.name_2.lower()
word1_ind_list = word_index_dict.get(word1, [])
word2_ind_list = word_index_dict.get(word2, [])
distance = kw.distance
for ind1 in word1_ind_list:
for ind2 in word2_ind_list:
i1, i2 = ind1, ind2
w1, w2 = word1, word2
if i1 > i2:
i1, i2 = i2, i1
w1, w2 = w2, w1
start_distance = i2 - i1
if start_distance == 0 or start_distance < len(w1):
continue
word_distance = start_distance - len(w1)
if word_distance <= distance:
rank += 1
return rank
def count_person_old_ranks_sum(session, page, person):
person_old_common_rank = 0
person_url_prev_ranks = session.query(models.PersonPageRanks).join(models.Pages, models.Pages.url==page.url).filter(models.PersonPageRanks.person_id==person.id).filter(models.Pages.id==models.PersonPageRanks.page_id)
for url_rank in person_url_prev_ranks.all():
person_old_common_rank += url_rank.rank
return person_old_common_rank
def parse_pages_from_db(session):
print("\nStart person page rank counting.")
n = 0
today_date = datetime.utcnow().date()
stmt = session.query(models.Pages.url, func.max(models.Pages.last_scan_date).label('lsd')).group_by(models.Pages.url).subquery()
last_scanned_pages = session.query(models.Pages).join(stmt, and_(models.Pages.last_scan_date == stmt.c.lsd, models.Pages.url == stmt.c.url)).filter(func.DATE(models.Pages.last_scan_date) != today_date)
never_scanned_pages = session.query(models.Pages).filter(models.Pages.last_scan_date == None)
for page in last_scanned_pages.union(never_scanned_pages).order_by(models.Pages.id):
new_url = None
if not page.last_scan_date:
page.last_scan_date = datetime.utcnow()
new_url = True
new_ranks = []
# скачиваем и открываем страницу в программе
try:
page_html = page_downloader(page.url)
n += 1
except:
print("failed to download: ", page.url)
continue
# удаляем все лишнее из html
html_text = html2text(BeautifulSoup(page_html, "lxml").text)
# преобразуем текст в словарь, где ключи - индексы слов,
# а значения - список его индексов
word_indexes_dict = make_word_indexes_dict(html_text)
for _ in range(2):
try:
persons_list = session.query(models.Persons).all()
except:
session.rollback()
from main import DB, create_db_session
session = create_db_session(DB)
else:
break
for person in persons_list:
# считаем ранк персоны на странице
person_new_common_rank = count_page_person_rank(person,
word_indexes_dict)
if new_url:
if person_new_common_rank:
new_ranks.append(models.PersonPageRanks(person.id,
page.id,
person_new_common_rank))
else:
# считаем текущий обий ранк персоны в базе
person_old_common_rank = count_person_old_ranks_sum(session,
page,
person)
# в случае изменения ранка, создаем новый, с количеством новых
# упоминаний и добавляем в список новых ранков
if person_new_common_rank > person_old_common_rank:
new_mentions = person_new_common_rank - person_old_common_rank
new_ranks.append(models.PersonPageRanks(person.id,
None,
new_mentions))
# если в списке новых ранков есть объекты,
# создаем для них страницу с текущей датой и привязываем ранки к ней
if new_ranks:
if not new_url:
new_page = models.Pages(page.url, page.site_id,
page.found_date_time, datetime.utcnow())
session.add(new_page)
session.commit()
for rank in new_ranks:
rank.page_id = new_page.id
session.add_all(new_ranks)
session.commit()
if n >= 10 and not n % (10**(len(str(n)) - 1)):
print('%s pages scanned...' % n)
| true |
2909272853d7029ad75cef776bee3cee14947844
|
Python
|
metacoma/overlap
|
/overlap.py
|
UTF-8
| 371 | 3.09375 | 3 |
[] |
no_license
|
import sys, json
import itertools
def overlap(a):
s = sorted(a)
result = []
n = 0
for i in s:
for j in range(0, n):
e = s[j]
if (e[1] >= i[0] and e[1] <= i[1]):
result.append(e)
if (i[0] >= e[0] and e[1] >= i[0]):
result.append(i)
n = n + 1
return result
print(json.dumps(overlap(json.load(sys.stdin)), indent=4))
| true |
5639c74974013512398b1a7b37eff7ad436aa8c8
|
Python
|
Ziggid/GetRichQuick
|
/GetRichQuick/Stock.py
|
UTF-8
| 798 | 3.296875 | 3 |
[] |
no_license
|
import requests
def getCurrentStockPrice(stockId: str):
response = requests.get(
"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=" + stockId +
"&interval=1min&outputsize=compact&apikey=HUTM7V18LBLQPIOP")
# Since we are retrieving stuff from a web service, it's a good idea to check for the return status code
# See: https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
if response.status_code != 200:
raise ValueError("Could not retrieve data, code:", response.status_code)
# The service sends JSON data, we parse that into a Python datastructure
raw_data = response.json()
return [max(raw_data['Time Series (1min)'].keys()),
raw_data['Time Series (1min)'][max(raw_data['Time Series (1min)'].keys())]['4. close']]
| true |
dc5e5e68921594eef5331b2d5a72131c5f1cde5d
|
Python
|
estherbs/iovana.github.io
|
/reddiment.py
|
UTF-8
| 4,574 | 2.796875 | 3 |
[] |
no_license
|
from flask import Flask
from flask import request, render_template
import requests, time, string, re, operator
from lxml import html
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
form_data = request.form['url']
word_list = check_url(form_data, 5)
print(word_list)
return render_template('main.html', slide="url is valid",
pos1=word_list[0], pos2=word_list[1], pos3=word_list[2], pos4=word_list[3],
pos5=word_list[4], pos6=word_list[5], pos7=word_list[6], pos8=word_list[7],
pos9=word_list[8], pos10=word_list[9],neg1=word_list[10], neg2=word_list[11],
neg3=word_list[12], neg4=word_list[13], neg5=word_list[14], neg6=word_list[15],
neg7=word_list[16], neg8=word_list[17], neg9=word_list[18], neg10=word_list[19],
percentage=word_list[20])
return render_template('main.html', slide="")
# Scrape comments with xpath, find element with data-type comment, then find child with class md and then scrape the
# text within the <p></p> elements
# Strip the comments of all punctuation, separate them by space, lower case all words and split into an array
#
def parse_page(name):
raw_html = html.fromstring(name.content)
comments = raw_html.xpath('//div[@data-type="comment"]//div[@class="md"]//text()')
comment_string = ""
for comment in comments:
comment_string += comment
comment_space_nodot = comment_string.replace(".", " ")
exclude = set(string.punctuation)
comments_no_punctuation = ''.join(ch for ch in comment_space_nodot if ch not in exclude)
comments_nolines = comments_no_punctuation.replace("\n", " ")
' '.join(comments_nolines.split())
string_arr = comments_nolines.split(" ")
for s in string_arr:
s.lower()
return hashmapfunction(string_arr)
def importcsv(filename):
with open(filename, 'rU') as f:
wordraw = f.readline()
words = wordraw.split(",")
new_dict = {}
for word in words:
new_dict[word] = 0
return new_dict
def hashmapfunction(stringarr):
positive_hashmap = importcsv('positive-words.csv')
negative_hashmap = importcsv('negative-words.csv')
for word in stringarr:
if word in positive_hashmap.keys():
positive_hashmap[word] += 1
elif word in negative_hashmap.keys():
negative_hashmap[word] += 1
sorted_positivehashmap = sorted(positive_hashmap.items(), key = operator.itemgetter(1), reverse=True)
sorted_negativehashmap = sorted(negative_hashmap.items(), key = operator.itemgetter(1), reverse=True)
total_positive = total(sorted_positivehashmap)
total_negative = total(sorted_negativehashmap)
total_all = total_positive + total_negative
toptenpos = topten(sorted_positivehashmap)
toptenneg = topten(sorted_negativehashmap)
sortedwords = toptenpos + toptenneg
percentage = (-200 / total_all * total_positive) + 100;
sortedwords.append(str(round(percentage, 2)))
return sortedwords
def total(sorted_list):
i = 0
total_val = 0
while i < 10:
temp_tuple = sorted_list[i]
total_val += temp_tuple[1]
i += 1
return total_val
def topten(arr):
i = 0
finalarr = []
while i < 10:
if len(arr) > i:
mypair = arr[i]
if mypair[1] < 10:
finalarr.append(" " + str(mypair[1]) + " - " + mypair[0].capitalize())
else:
finalarr.append(str(mypair[1]) + " - " + mypair[0].capitalize())
else:
finalarr.append("N/A")
i += 1
return finalarr
def check_url(url, timeout):
var = "(https://www.)?reddit.com\/r\/[a-zA-Z_]+\/(comments)\/.*"
if re.search(var, url):
page_name = requests.get(url)
print(page_name.status_code)
if page_name.status_code == requests.codes.ok:
print('website exists ' + url)
return parse_page(page_name)
else:
if timeout > 0:
print('no website here ' + url + ' trying again...')
time.sleep(3)
--timeout
check_url(url, timeout)
else:
print("could not connect")
else:
print("no")
@app.route('/submit', methods=['POST'])
def submit():
form_data = request.form['url']
check_url(form_data, 5)
app.run()
| true |
467aa064f3d6242424697f31088293a1638ee12b
|
Python
|
notro/tmp_CircuitPython_stdlib
|
/stripped/test/test_posixpath.py
|
UTF-8
| 13,319 | 2.75 | 3 |
[] |
no_license
|
import itertools
import os
import posixpath
import sys
import unittest
import warnings
from posixpath import realpath, abspath, dirname, basename
from test import support, test_genericpath
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(support.TESTFN)
def skip_if_ABSTFN_contains_backslash(test):
"""
On Windows, posixpath.abspath still returns paths with backslashes
instead of posix forward slashes. If this is the case, several tests
fail, so skip them.
"""
found_backslash = '\\' in ABSTFN
msg = "ABSTFN is not a posix path - tests fail"
return [test, unittest.skip(msg)(test)][found_backslash]
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
support.unlink(support.TESTFN + suffix)
safe_rmdir(support.TESTFN + suffix)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"),
"/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"),
"/foo/bar/baz/")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"/bar", b"baz"),
b"/bar/baz")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"baz"),
b"/foo/bar/baz")
self.assertEqual(posixpath.join(b"/foo/", b"bar/", b"baz/"),
b"/foo/bar/baz/")
def test_join_errors(self):
# Check posixpath.join raises friendly TypeErrors.
errmsg = "Can't mix strings and bytes in path components"
with self.assertRaisesRegex(TypeError, errmsg):
posixpath.join('str', b'bytes')
# regression, see #15377
with self.assertRaises(TypeError) as cm:
posixpath.join(None, 'str')
self.assertNotEqual(cm.exception.args[0], errmsg)
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
self.assertEqual(posixpath.split(b"/foo/bar"), (b"/foo", b"bar"))
self.assertEqual(posixpath.split(b"/"), (b"/", b""))
self.assertEqual(posixpath.split(b"foo"), (b"", b"foo"))
self.assertEqual(posixpath.split(b"////foo"), (b"////", b"foo"))
self.assertEqual(posixpath.split(b"//foo//bar"), (b"//foo", b"bar"))
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path),
("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path),
("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path),
("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"),
(filename + ext + "/", ""))
path = bytes(path, "ASCII")
filename = bytes(filename, "ASCII")
ext = bytes(ext, "ASCII")
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext(b"/" + path),
(b"/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc/" + path),
(b"abc/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc.def/" + path),
(b"abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(b"/abc.def/" + path),
(b"/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + b"/"),
(filename + ext + b"/", b""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
self.assertIs(posixpath.isabs(b""), False)
self.assertIs(posixpath.isabs(b"/"), True)
self.assertIs(posixpath.isabs(b"/foo"), True)
self.assertIs(posixpath.isabs(b"/foo/bar"), True)
self.assertIs(posixpath.isabs(b"foo/bar"), False)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
self.assertEqual(posixpath.basename(b"/foo/bar"), b"bar")
self.assertEqual(posixpath.basename(b"/"), b"")
self.assertEqual(posixpath.basename(b"foo"), b"foo")
self.assertEqual(posixpath.basename(b"////foo"), b"foo")
self.assertEqual(posixpath.basename(b"//foo//bar"), b"bar")
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
self.assertEqual(posixpath.dirname(b"/foo/bar"), b"/foo")
self.assertEqual(posixpath.dirname(b"/"), b"/")
self.assertEqual(posixpath.dirname(b"foo"), b"")
self.assertEqual(posixpath.dirname(b"////foo"), b"////")
self.assertEqual(posixpath.dirname(b"//foo//bar"), b"//foo")
def test_islink(self):
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), False)
f = open(support.TESTFN + "1", "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
finally:
if not f.close():
f.close()
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
with warnings.catch_warnings():
self.assertIs(posixpath.ismount(b"/"), True)
def test_ismount_non_existent(self):
# Non-existent mountpoint.
self.assertIs(posixpath.ismount(ABSTFN), False)
try:
os.mkdir(ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
safe_rmdir(ABSTFN)
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"),
"/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
self.assertEqual(posixpath.normpath(b""), b".")
self.assertEqual(posixpath.normpath(b"/"), b"/")
self.assertEqual(posixpath.normpath(b"//"), b"//")
self.assertEqual(posixpath.normpath(b"///"), b"/")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//"), b"/foo/bar")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//.//..//.//baz"),
b"/foo/baz")
self.assertEqual(posixpath.normpath(b"///..//./foo/.//bar"),
b"/foo/bar")
@skip_if_ABSTFN_contains_backslash
def test_realpath_curdir(self):
self.assertEqual(realpath('.'), os.getcwd())
self.assertEqual(realpath('./.'), os.getcwd())
self.assertEqual(realpath('/'.join(['.'] * 100)), os.getcwd())
self.assertEqual(realpath(b'.'), os.getcwdb())
self.assertEqual(realpath(b'./.'), os.getcwdb())
self.assertEqual(realpath(b'/'.join([b'.'] * 100)), os.getcwdb())
@skip_if_ABSTFN_contains_backslash
def test_realpath_pardir(self):
self.assertEqual(realpath('..'), dirname(os.getcwd()))
self.assertEqual(realpath('../..'), dirname(dirname(os.getcwd())))
self.assertEqual(realpath('/'.join(['..'] * 100)), '/')
self.assertEqual(realpath(b'..'), dirname(os.getcwdb()))
self.assertEqual(realpath(b'../..'), dirname(dirname(os.getcwdb())))
self.assertEqual(realpath(b'/'.join([b'..'] * 100)), b'/')
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"),
"../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
self.assertEqual(posixpath.relpath("/", "/"), '.')
self.assertEqual(posixpath.relpath("/a", "/a"), '.')
self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
finally:
os.getcwd = real_getcwd
def test_relpath_bytes(self):
(real_getcwdb, os.getcwdb) = (os.getcwdb, lambda: br"/home/user/bar")
try:
curdir = os.path.split(os.getcwdb())[-1]
self.assertRaises(ValueError, posixpath.relpath, b"")
self.assertEqual(posixpath.relpath(b"a"), b"a")
self.assertEqual(posixpath.relpath(posixpath.abspath(b"a")), b"a")
self.assertEqual(posixpath.relpath(b"a/b"), b"a/b")
self.assertEqual(posixpath.relpath(b"../a/b"), b"../a/b")
self.assertEqual(posixpath.relpath(b"a", b"../b"),
b"../"+curdir+b"/a")
self.assertEqual(posixpath.relpath(b"a/b", b"../c"),
b"../"+curdir+b"/a/b")
self.assertEqual(posixpath.relpath(b"a", b"b/c"), b"../../a")
self.assertEqual(posixpath.relpath(b"a", b"a"), b".")
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x/y/z"), b'../../../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/foo/bar"), b'bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/"), b'foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/", b"/foo/bar/bat"), b'../../..')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x"), b'../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/x", b"/foo/bar/bat"), b'../../../x')
self.assertEqual(posixpath.relpath(b"/", b"/"), b'.')
self.assertEqual(posixpath.relpath(b"/a", b"/a"), b'.')
self.assertEqual(posixpath.relpath(b"/a/b", b"/a/b"), b'.')
self.assertRaises(TypeError, posixpath.relpath, b"bytes", "str")
self.assertRaises(TypeError, posixpath.relpath, "str", b"bytes")
finally:
os.getcwdb = real_getcwdb
class PosixCommonTest(test_genericpath.CommonTest, unittest.TestCase):
pathmodule = posixpath
attributes = ['relpath', 'samefile'] ###
| true |
5f60740d0f7f7dd92f35ad9bc4fc17ce962dc128
|
Python
|
Nyapy/TIL
|
/04_algorithm/02day/전기버스.py
|
UTF-8
| 1,321 | 2.6875 | 3 |
[] |
no_license
|
import sys
sys.stdin = open("전기버스_input.txt")
sys.stdout = open("전기버스_output.txt", "w")
T = int(input())
for tc in range(T):
move, stop, charger = map(int, input().split())
bus_stop = [0] * (stop+1)
charge_count = 0
stop_list = list(map(int, input().split()))
for i in range(1, stop):
if i in stop_list:
bus_stop[i] += 1
for j in range(len(bus_stop)):
if j == 0:
charge = move
charge -= 1
else:
if len(bus_stop) - j > move :
li = []
for k in range(1, charge + 1):
if bus_stop[j + k] == 1:
li += [1]
else:
li += [0]
if bus_stop[j] == 0:
charge -= 1
elif bus_stop[j] == 1 and 1 in li :
charge -= 1
elif bus_stop[j] == 1 and 1 not in li :
charge = move -1
charge_count += 1
else:
if bus_stop[j] == 1 and charge < len(bus_stop)-j:
charge = move -1
charge_count += 1
if charge < 0:
charge_count = 0
break
print("#{} {}" .format(tc+1, charge_count))
| true |
29b7dc55f1aad89eca679560fc79492484830c3a
|
Python
|
aoxy/Machine-Learning-Lab
|
/Lab1/LR.py
|
UTF-8
| 3,615 | 3.109375 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def main():
# 载入数据
x_train = np.load("./data/train_data.npy") # shape: (70, 3)
y_train = np.load("./data/train_target.npy") # shape: (70, )
x_test = np.load("./data/test_data.npy") # shape: (30, 3)
y_test = np.load("./data/test_target.npy") # shape: (30, )
# 转换形状为(n,1)
Y_train = y_train.reshape(y_train.shape[0], 1)
Y_test = y_test.reshape(y_test.shape[0], 1)
# 求出W
W, W_save = gradient_descent(x_train, Y_train, 0.001, 100000)
print("W = ", W)
# 训练集精度
p_train = predict(x_train, W)
print(u'训练集精度为 %f%%' % np.mean(np.float64(p_train == Y_train)*100))
# 训练集精度
p_test = predict(x_test, W)
print(u'测试集精度为 %f%%' % np.mean(np.float64(p_test == Y_test)*100))
# 动态展示梯度下降法优化LR模型的过程
plt.figure(num=0, figsize=(18, 14))
show_dynamic(x_train, y_train, W_save)
# 作训练集结果图(包括决策边界)
plt.figure(num=1, figsize=(18, 14))
show_result(x_train, y_train, W, "Train")
# 作测试集结果图(包括决策边界)
plt.figure(num=2, figsize=(18, 14))
show_result(x_test, y_test, W, "Test")
def gradient_descent(X, Y, alpha=0.001, max_iter=100000):
"""
返回使用梯度下降法求得的 W ,X形状为(n,3),Y形状为(n,1)
"""
W = np.random.randn(X.shape[1], 1) # 随机初始化 W ,维度(3,1)
W_save = [] # 记录迭代过程中的 W,用于动态展示迭代过程
save_step = int(max_iter/100) # 记下100组W
Xt = np.transpose(X) # Xt 维度(3,70)
for i in range(max_iter):
H = sigmoid(np.dot(X, W)) # H 维度(70,1)
dW = np.dot(Xt, H-Y) # dw 维度(3,1)
W = W-alpha * dW # 更新 W
if i % save_step == 0:
W_save.append([W.copy(), i])
return W, W_save
def sigmoid(z):
h = np.zeros((len(z), 1)) # 初始化,与z的长度一置
h = 1.0/(1.0+np.exp(-z))
return h
def predict(X, W):
m = X.shape[0] # m 组数据
p = np.zeros((m, 1))
p = sigmoid(np.dot(X, W)) # 预测的结果z,是个概率值
# 概率大于0.5预测为1,否则预测为0
for i in range(m):
p[i] = 1 if p[i] > 0.5 else 0
return p
def show_result(X, y, W, title):
w0 = W[0][0]
w1 = W[1][0]
w2 = W[2][0]
x1_low = min(X[:, 1])
x1_high = max(X[:, 1])
plotx1 = np.arange(x1_low, x1_high, 0.01)
plotx2 = -w0/w2-w1/w2*plotx1
plt.plot(plotx1, plotx2, c='r', label='decision boundary')
plt.title(title)
plt.scatter(X[:, 1][y == 0], X[:, 2][y == 0], s=90, label='label = 0')
plt.scatter(X[:, 1][y == 1], X[:, 2][y == 1],
marker='s', s=90, label='label = 1')
plt.xlabel("X1")
plt.ylabel("X2")
plt.grid()
plt.legend()
plt.show()
def show_dynamic(X, y, W_save):
x1_low = min(X[:, 1])
x1_high = max(X[:, 1])
for w in W_save:
plt.clf()
w0 = w[0][0][0]
w1 = w[0][1][0]
w2 = w[0][2][0]
plotx1 = np.arange(x1_low, x1_high, 0.01)
plotx2 = -w0/w2-w1/w2*plotx1
plt.plot(plotx1, plotx2, c='r', label='decision boundary')
plt.scatter(X[:, 1][y == 0], X[:, 2][y == 0], s=90, label='label = 0')
plt.scatter(X[:, 1][y == 1], X[:, 2][y == 1],
marker='s', s=90, label='label = 1')
plt.grid()
plt.legend()
plt.title('iter:%s' % np.str(w[1]))
plt.pause(0.001)
plt.show()
if __name__ == "__main__":
main()
| true |
e39b900dd27fb4780d17d2c85d33676863405899
|
Python
|
ashwinmhn/guvi
|
/loop/hcf.py
|
UTF-8
| 308 | 3.484375 | 3 |
[] |
no_license
|
# program to find HCF (GCD) of two numbers
print("Enter a")
a=int(input())
print("Enter b")
b=int(input())
small=0
hcf=0
if a > b:
small = b
else:
small = a
for i in range(1, small+1):
if((a % i == 0) and (b % i == 0)):
hcf = i
print(hcf)
| true |
6626481a7494160f0daf8818167b820f48aa8792
|
Python
|
ozsoftcon/QDataSet
|
/qmldataset/system_layers/quantum_evolution.py
|
UTF-8
| 1,100 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
"""Quantum Evolution
"""
from tensorflow import (
complex64,
eye,
shape,
Tensor
)
from tensorflow.keras import layers
from .quantum_step_layer import QuantumCell
class QuantumEvolution(layers.RNN):
"""Custom layer that takes Hamiltonian as input, and
produces the time-ordered evolution unitary as output
:param time_step: time step for each propagator
"""
def __init__(self, time_step: float, **kwargs):
# use the custom-defined QuantumCell as base class for the nodes
cell = QuantumCell(time_step)
# we must call thus function for any tensorflow custom layer
super().__init__(cell, **kwargs)
def call(self, inputs: Tensor): # pylint: disable=arguments-differ
"""Custom call method of the layer
"""
# define identity matrix with correct dimensions to be used as initial propagator
dimensions = shape(inputs)
identity = eye(
dimensions[-1], batch_shape=[dimensions[0], dimensions[2]], dtype=complex64)
return super().call(inputs, initial_state=[identity])
| true |
f86fbacf98d4837fde30209ba6650a72104e859a
|
Python
|
recantha/musicbox
|
/mcp.py
|
UTF-8
| 201 | 2.625 | 3 |
[] |
no_license
|
from gpiozero import MCP3008, LED
from time import sleep
pot = MCP3008(channel=3, device=0)
purple_led = LED(24)
purple_led.on()
while True:
pot_reading = pot.value
print(pot_reading)
sleep(0.1)
| true |
e2444526822efa526669cd5dea34e2431cc36b62
|
Python
|
NSLS-II-XPD/ipython_ophyd
|
/profile_collection/simulators/92-testdipfit.py
|
UTF-8
| 3,618 | 2.84375 | 3 |
[] |
no_license
|
#individually fit peaks
'''
The problem is that the peak is in too narrow of a range. If we're off in
our guess, then we're wrong. Instead, we should just fit to a peak and
return the center.
'''
uids = ('79b54d30-7fff-4a24-80c1-1d5cb3e71373',
'b0b7a12b-ec80-47a1-ac24-c86eb9ebf464',
'c1e766fa-dff6-4747-877a-5a26de278ca4',
'5de9a73c-367e-43a9-8377-80e945ad165f')
#from databroker import Broker
#db = Broker.named('xpd')
#peak1_c = db[uids[0]].table()
#peak1_f = db[uids[1]].table()
#peak2_c = db[uids[2]].table()
#peak2_f = db[uids[3]].table()
import pandas as pd
peak1_c = pd.read_csv("peak1_c.csv")
peak1_f = pd.read_csv("peak1_f.csv")
peak2_c = pd.read_csv("peak2_c.csv")
peak2_f = pd.read_csv("peak2_f.csv")
peaks_x = list(peak1_f.th_cal)
peaks_y = list(peak1_f.sc_chan1)
peaks_x.extend(list(peak2_f.th_cal))
peaks_y.extend(list(peak2_f.sc_chan1))
xdata = np.array(peaks_x)
ydata = np.array(peaks_y)
import matplotlib.pyplot as plt
def peakfunc(x, amplitude, sigma, x0, slope, intercept):
# amplitude is *not* amplitude!!! needs to be rescaled by sigma!!!!
amplitude = amplitude*sigma*np.sqrt(2*np.pi)
result = voigt(x=x, amplitude=amplitude, center=x0, sigma=sigma)
result += slope*x + intercept
return result
def guess(x, y, sigma=None):
'''
sigma is often hard to guess, allow it to be externally guessed
'''
g_average = np.average(y)
# choose between a peak or dip
dip_amp = np.min(y) - g_average
peak_amp = np.max(y) - g_average
if np.abs(dip_amp) > peak_amp:
print("found a dip")
g_amp = dip_amp
else:
print("found a peak")
g_amp = peak_amp
if sigma is None:
# guess fwhm
w, = np.where(np.abs(y-g_average) < np.abs(g_amp/2.))
g_fwhm = np.abs(xdata[w[-1]] - xdata[w[0]])
# guess...
g_sigma = g_fwhm/2.
else:
g_sigma = sigma
x0 = xdata[np.argmax(np.abs(y-g_average))]
init_guess = {
'amplitude': Parameter('amplitude', value=g_amp, vary=True),
'sigma': Parameter('sigma', min=0, value=g_sigma, vary=True),
'x0' : Parameter('x0', value=x0, vary=True),
'intercept': Parameter('intercept', value=g_average,vary=True),
'slope': Parameter('slope', value=0, vary=True),
}
params = Parameters(init_guess)
return params
def ispeak(x, y, sdev=3):
'''
Decision making logic for if we have a peak or dip.
sdev : number of std dev's the peak must be above noise
'''
noise = np.std(y)
avg = np.average(y)
peak = np.max(np.abs(y - avg))
return peak > sdev*noise
def guess_and_fit(xdata, ydata, sigma=None):
'''
Guess fit and return results.
'''
peakmodel = Model(peakfunc, independent_vars=['x'])
init_guess = guess(xdata, ydata, sigma=sigma)
return peakmodel.fit(data=ydata, x=xdata, params=init_guess)
# okay, works for all these
xdata = np.array(peak1_c.th_cal)
ydata = np.array(peak1_c.sc_chan1)
xdata = np.array(peak2_c.th_cal)
ydata = np.array(peak2_c.sc_chan1)
xdata = np.array(peak1_f.th_cal)
ydata = np.array(peak1_f.sc_chan1)
xdata = np.array(peak2_f.th_cal)
ydata = np.array(peak2_f.sc_chan1)
peakmodel = Model(peakfunc, independent_vars=['x'])
init_guess = guess(xdata, ydata, sigma=.001)
yguess = peakmodel.eval(params=init_guess, x=xdata)
result = guess_and_fit(xdata, ydata, sigma=.001)
from pylab import *
ion()
figure(10);clf();
plot(xdata, ydata)
plot(xdata, result.best_fit)
plot(xdata, yguess, color='r')
| true |
5800e458ca68bfc3da4b3cfc004917e4edeb17af
|
Python
|
shobhit-cstep/IDEX-1
|
/LSTM.py
|
UTF-8
| 1,116 | 2.921875 | 3 |
[] |
no_license
|
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
from sklearn.metrics import mean_squared_error
class LSTMModel(object):
def __init__(self,):
self.model = None
def build(self, layer1_parameters=50, layer2_parameters=50, input_shape_n_steps=3, input_shape_n_features=1, loss='mse', metrics='mae'):
# define model
self.model = Sequential()
self.model.add(LSTM(layer1_parameters, activation='relu', return_sequences=True, input_shape=(input_shape_n_steps, input_shape_n_features)))
self.model.add(LSTM(layer2_parameters, activation='relu'))
self.model.add(Dense(1))
self.model.compile(optimizer='adam', loss=loss, metrics=[metrics])
def fit(self, x_train, y_train, epochs=200, verbose=1):
# fit model
self.model.fit(x_train, y_train, epochs=epochs, verbose=verbose)
def forecast(self, x_test, y_test, verbose=0):
y_hat = self.model.predict(x_test, verbose=verbose)
mse_error = mean_squared_error(y_test, y_hat, squared=False)
return y_hat, mse_error
| true |
770621b3e4358facf608c6aa4a6cecd5db78b2f4
|
Python
|
adrian729/LP
|
/Python/PracticaPython/cerca.py
|
UTF-8
| 10,444 | 2.609375 | 3 |
[] |
no_license
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import unicodedata
import urllib.request
import xml.etree.ElementTree as ElementTree
from math import sin, cos, sqrt, atan2, radians
from ast import literal_eval
# Define valid call arguments
parser = argparse.ArgumentParser()
parser.add_argument("--lan", help="select consults language ('cat' for "
"Catalan, 'es' for Spanish, 'en' for "
"English, 'fr' for French). If "
"nothing is selected it will be "
"Catalan by default.")
parser.add_argument("--key", help="consult to do over the interest"
" points. Use tuples representing "
"conjunctions, lists for disjunctions"
" and strings for single consults. "
"Use dictionaries to associate values"
" with fields, where the valid fields"
" are name, content and location.")
args = parser.parse_args()
# Catch arguments and halt if invalid
# Catalan by default
url_pits = 'http://www.bcn.cat/tercerlloc/pits_opendata.xml'
if args.lan:
if args.lan == 'cat':
url_pits = 'http://www.bcn.cat/tercerlloc/pits_opendata.xml'
elif args.lan == 'es':
url_pits = 'http://www.bcn.cat/tercerlloc/pits_opendata_es.xml'
elif args.lan == 'en':
url_pits = 'http://www.bcn.cat/tercerlloc/pits_opendata_en.xml'
elif args.lan == 'fr':
url_pits = 'http://www.bcn.cat/tercerlloc/pits_opendata_fr.xml'
else:
# invalid lan param
print(
'ERROR: The value {} is not a valid lan value.'
''.format(args.lan)
)
exit(1)
# if no key specified, nothing to do.
if not args.key:
print('No key value.')
exit(0)
# key
key = literal_eval(args.key)
if not isinstance(key, (str, list, tuple, dict)):
print('Invalid key type {}.'.format(type(key)))
exit(1)
# Extract pits and stations info
url_stations = 'http://wservice.viabicing.cat/v1/getstations.php?v=1'
sock_stations = urllib.request.urlopen(url_stations)
xml_stations = sock_stations.read()
sock_stations.close()
sock_pits = urllib.request.urlopen(url_pits)
xml_pits = sock_pits.read()
sock_pits.close()
# globals
# xml trees
stations_root = ElementTree.fromstring(xml_stations)
pits_root = ElementTree.fromstring(xml_pits)
def get_elem_text(elem):
if elem is not None:
return elem.text
return ""
# data
# stations
stations_list = []
for item in stations_root.findall('station'):
status = item.find('status').text
if status == 'OPN':
item_data = {
'id': get_elem_text(item.find('id')),
'lat': get_elem_text(item.find('lat')),
'long': get_elem_text(item.find('long')),
'street': get_elem_text(item.find('street')),
'street_number': get_elem_text(item.find('streetNumber')),
'slots': get_elem_text(item.find('slots')),
'bikes': get_elem_text(item.find('bikes'))
}
stations_list.append(item_data)
stations_list = tuple(stations_list)
# pits
pits_list = []
for item in pits_root.findall('*/row'):
item_data = {
'name': item.find('name').text,
'address': get_elem_text(item.find('addresses/item/address')),
'district': get_elem_text(item.find('addresses/item/district')),
'barri': get_elem_text(item.find('addresses/item/barri')),
'content': get_elem_text(item.find('content')),
'lat': item.find('gmapx').text,
'long': item.find('gmapy').text,
'short_desc': get_elem_text(
item.find('custom_fields/descripcio-curta-pics')
)
}
pits_list.append(item_data)
pits_list = tuple(pits_list)
all_pits_index = []
for pits_index in range(len(pits_list)):
all_pits_index.append(pits_index)
all_pits_index = tuple(all_pits_index)
# methods
def calculate_lat_lon_m_distance(lat1, lon1, lat2, lon2):
earth_radius = 6373000.0
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
distance_lon = lon2 - lon1
distance_lat = lat2 - lat1
a = sin(distance_lat / 2) ** 2 \
+ cos(lat1) * cos(lat2) * sin(distance_lon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return earth_radius * c
def u_format(string):
u_string = unicodedata.normalize("NFKD", string.lower())
u_string = u_string.encode("ascii", "ignore")
return u_string
def has_name(consult_str, pit):
u_consult_str = u_format(consult_str)
return u_consult_str in u_format(pit['name'])
def has_content(consult_str, pit):
u_consult_str = u_format(consult_str)
return u_consult_str in u_format(pit['content'])
def has_location(consult_str, pit):
u_consult_str = u_format(consult_str)
return u_consult_str in u_format(pit['address']) \
or u_consult_str in u_format(pit['district']) \
or u_consult_str in u_format(pit['barri'])
def exec_consult_search(consult, field=''):
"""
Executes a consult over all the interest points listed on pits_list.
:rtype: set
"""
data_set = set()
if isinstance(consult, str):
for p_index in range(len(pits_list)):
if (field == 'name' or field == '') \
and has_name(consult, pits_list[p_index]) \
or field == 'content' \
and has_content(consult, pits_list[p_index]) \
or (field == 'location' or field == '') \
and has_location(consult, pits_list[p_index]):
data_set.add(p_index)
return data_set
elif isinstance(consult, tuple):
data_set = set(all_pits_index) # initial tuple full
# conjunction
for cons_conj in consult:
conj_res = exec_consult_search(cons_conj, field)
data_set = data_set & conj_res
return data_set
elif isinstance(consult, list):
# disjunction
for cons_disj in consult:
disj_res = exec_consult_search(cons_disj, field)
data_set = data_set | disj_res
return data_set
elif isinstance(consult, dict):
data_set = set(all_pits_index) # initial tuple full
# fields conjunction
for f_key in consult:
if f_key != 'name' and f_key != 'content' \
and f_key != 'location':
print('Invalid key "{}"'.format(f_key))
print('The consult', key, 'is an invalid consult.')
exit(1)
f_consult = consult[f_key]
f_conj_res = exec_consult_search(f_consult, f_key)
data_set = data_set & f_conj_res
return data_set
else:
print('The consult "{}" is an invalid consult.'.format(key))
exit(1)
def insert_station(elem, st_list, field):
if int(stations_list[elem][field]) > 0:
for index in range(len(st_list)):
st = st_list[index]
if int(stations_list[st][field]) \
< int(stations_list[elem][field]):
st_list.insert(index, elem)
break
else:
if len(st_list) < 5:
st_list.append(elem)
if len(st_list) > 5:
st_list.pop()
def nearest_stations(p_index):
slots = []
bikes = []
pits_item = pits_list[p_index]
for st_index in range(len(stations_list)):
pits_lat = float(pits_item['lat'])
pits_long = float(pits_item['long'])
st_lat = float(stations_list[st_index]['lat'])
st_long = float(stations_list[st_index]['long'])
dist = calculate_lat_lon_m_distance(pits_lat, pits_long,
st_lat, st_long)
if dist < 500.0:
insert_station(st_index, slots, 'slots')
insert_station(st_index, bikes, 'bikes')
return {'slots': slots, 'bikes': bikes}
# script body
consult_result = exec_consult_search(key, '')
content = 'content'
if len(consult_result) > 2:
content = 'short_desc'
html = '<!DOCTYPE html>\n' \
'<html>' \
'\n\t<head>' \
'\n\t\t<meta charset="UTF-8">' \
'\n\t\t<title>Practica LP - Python</title>' \
'\n\t</head>' \
'\n\t<body>'
html += '\n\t\t<table style="width:100%">' \
'\n\t\t\t<tr style="border-bottom: 2em solid red;">' \
'\n\t\t\t\t<th>name</th>' \
'\n\t\t\t\t<th>address</th>' \
'\n\t\t\t\t<th>content</th>' \
'\n\t\t\t\t<th>stations with slots</th>' \
'\n\t\t\t\t<th>stations with bikes</th>' \
'\n\t\t\t</tr>'
for i in consult_result:
html += '\n\t\t\t<tr>' \
'\n\t\t\t\t<td style="border: 1px solid black"><p>' \
+ pits_list[i]['name'] \
+ '</p></td>' \
'\n\t\t\t\t<td style="border: 1px solid black"><p>' \
+ pits_list[i]['address'] \
+ '</p></td>' \
'\n\t\t\t\t<td style="border: 1px solid black">' \
+ pits_list[i][content] \
+ '</td>'
pit_bikes_slots = nearest_stations(i)
# slots
html += '\n\t\t\t\t<td style="border: 1px solid black"><p>\n'
num = 1
for slot_st in pit_bikes_slots['slots']:
html += '\n\t\t\t\t\t<b>Station ' + str(num) + '</b><br>'
html += '\n\t\t\t\t\t address: ' \
+ stations_list[slot_st]['street'] + ' ' \
+ str(stations_list[slot_st]['street_number']) + '<br>'
html += '\n\t\t\t\t\t slots: ' \
+ stations_list[slot_st]['slots'] + '<br>'
num += 1
html += '\n\t\t\t\t</p></td>'
# bikes
html += '\n\t\t\t\t<td style="border: 1px solid black"><p>\n'
num = 1
for bikes_st in pit_bikes_slots['bikes']:
html += '\n\t\t\t\t\t<b>Station ' + str(num) + '</b><br>'
html += '\n\t\t\t\t\t address: ' \
+ stations_list[bikes_st]['street'] + ' ' \
+ str(stations_list[bikes_st]['street_number']) + '<br>'
html += '\n\t\t\t\t\t bikes: ' \
+ stations_list[bikes_st]['bikes'] + '<br>'
num += 1
html += '\n\t\t\t\t</p></td>'
html += '\n\t\t\t</tr>'
html += '\n\t\t</table>'
html += '\n\t</body>' \
'\n</html>'
print(html)
| true |
61a416b990f94a12e19e53d7f2653697029a6154
|
Python
|
konotorii/Albin
|
/Cogs/help.py
|
UTF-8
| 578 | 2.546875 | 3 |
[] |
no_license
|
import discord
from discord.ext import commands
class HelpCog(commands.Cog, name="help commmand"):
def __init__(self, bot:commands.Bot):
self.bot = bot
@commands.command(name = "help",
usage="help",
description = "displays help command")
@commands.cooldown(1, 2, commands.BucketType.member)
async def help(self, ctx:commands.Context):
await ctx.send("bla bla bla help command bla bla bla")
print("Help command sent!")
def setup(bot:commands.Bot):
bot.add_cog(HelpCog(bot))
| true |
b3c83878ecaf1952b577086a9d42fb99dd6b96f4
|
Python
|
LiuJiazhen1999/Qd-Tree
|
/qdTrees/queryparsing/qdtree.py
|
UTF-8
| 9,459 | 2.640625 | 3 |
[] |
no_license
|
import numpy as np
class IterationResult(object):
def __init__(self, qd_tree, rewards):
self.qd_tree = qd_tree
self.rewards = rewards
def get_qd_tree(self):
return self.qd_tree
def set_qd_tree(self, qd_tree):
self.qd_tree = qd_tree
def get_rewards(self):
return self.rewards
def set_rewards(self, rewards):
self.rewards = rewards
# Object used to describe ranges
class Range(object):
def __init__(self, range_left, range_right):
self.range_left = range_left
self.range_right = range_right
def get_range_left(self):
return self.range_left
def set_range_left(self, range_left):
self.range_left = range_left
def get_range_right(self):
return self.range_right
def set_range_right(self, range_right):
self.range_right = range_right
def print(self):
print(self.range_left, " ", self.range_right)
# Object used to describe cuts
class Cut(object):
def __init__(self, attr1, op, attr2, node_type):
self.attr1 = attr1
self.op = op
self.attr2 = attr2
self.node_type = node_type
def key(self):
return (self.attr1, self.op, self.attr2, self.node_type)
def __hash__(self):
return hash(self.key())
def __eq__(self, other):
if isinstance(other, Cut):
return self.key() == other.key()
return NotImplemented
def get_attr1(self):
return self.attr1
def set_attr1(self, attr1):
self.attr1 = attr1
def get_op(self):
return self.op
def set_op(self, op):
self.op = op
def get_attr2(self):
return self.attr2
def set_attr2(self, attr2):
self.attr2 = attr2
def get_cut_attributes(self):
return self.attr1, self.op, self.attr2, self.node_type
def set_node_type(self, node_type):
self.node_type = node_type
def get_node_type(self):
return self.node_type
def print(self):
print(self.attr1, " ", self.op, " ", self.attr2, " ", self.node_type)
class QdTreeNode(object):
def __init__(self, node_cut, node_ranges, categorical_mask, records, categorical_mask_extended):
self.node_cut = node_cut
self.node_ranges = node_ranges
self.categorical_mask = categorical_mask
self.records = records
self.categorical_mask_extended = categorical_mask_extended
self.left = None
self.right = None
self.is_leaf = False
self.block_id = None
self.encoded = None
def set_encoded(self, encoded):
self.encoded = encoded
def get_encoded(self):
return self.encoded
def get_node_type(self):
if self.node_ranges is not None and \
(self.categorical_mask is not None or self.categorical_mask_extended is not None):
return "BOTH"
if self.node_ranges is not None:
return "RANGE"
if self.categorical_mask is not None or self.categorical_mask_extended is not None:
return "CATEGORICAL"
def get_ranges(self):
return self.node_ranges
def set_range(self, node_ranges):
self.node_ranges = node_ranges
def get_categorical_mask(self):
return self.categorical_mask
def set_categorical_mask(self, categorical_mask):
self.categorical_mask = categorical_mask
def get_node_cut(self):
return self.node_cut
def set_node_cut(self, node_cut):
self.node_cut = node_cut
def get_right(self):
return self.right
def set_right(self, right):
self.right = right
def get_left(self):
return self.left
def set_left(self, left):
self.left = left
def set_is_leaf(self, is_leaf):
self.is_leaf = is_leaf
def get_is_leaf(self):
return self.is_leaf
def set_block_id(self, block_id):
self.block_id = block_id
def get_block_id(self):
return self.block_id
def set_categorical_mask_extended(self, categorical_mask_extended):
self.categorical_mask_extended = categorical_mask_extended
def get_categorical_mask_extended(self):
return self.categorical_mask_extended
def set_records(self, records):
self.records = records
def get_records(self):
return self.records
def print_categorical_mask(self):
for key, value in self.categorical_mask.items():
key.print()
print(value)
def print_categorical_mask_extended(self):
for key, value in self.categorical_mask_extended.items():
key.print()
print(value)
def print_ranges(self):
for key, value in self.node_ranges.items():
print(key)
print(value)
def print(self):
print("Cut")
self.get_node_cut().print()
print("Categorical Mask Extended")
self.print_categorical_mask_extended()
print("Categorical Mask")
self.print_categorical_mask()
print("Categorical Ranges")
self.print_ranges()
def evaluate_query_against_metadata(self, config, query_cuts):
for cut in query_cuts:
node_type = cut.get_node_type()
if node_type == "CATEGORICAL":
if self.evaluate_categorical_against_node_metadata(config, cut):
return True
elif node_type == "RANGE":
if self.evaluate_range_against_node_metadata(config, cut):
return True
elif node_type == "EXTENDED_CUT":
if self.evaluate_extended_against_node_metadata(config, cut):
return True
return False
def evaluate_query_against_all_metadata(self, config, query_cuts):
pass
def evaluate_categorical_against_node_metadata(self, config, cut):
return cut.get_attr1() not in self.categorical_mask \
or cut.get_attr2() not in self.categorical_mask[cut.get_attr1()] \
or self.categorical_mask[cut.get_attr1()][cut.get_attr2()] == 1
# return self.categorical_mask[cut.get_attr1()][cut.get_attr2()] == 1
def evaluate_range_against_node_metadata(self, config, cut):
# TODO evaluate against types
values_map = config.get_config_as_dict("column_types")
if cut.get_attr1() not in values_map:
return True
column_type = values_map[cut.get_attr1()]
node_range = self.node_ranges.get(cut.get_attr1(), Range("inf", "inf"))
node_range_left = node_range.get_range_left()
node_range_right = node_range.get_range_right()
query_value = cut.get_attr2()
op = cut.get_op()
if column_type == "INT":
query_value = int(query_value)
if node_range_left == "inf" and node_range_right == "inf":
return True
elif node_range_left == "inf":
node_range_right = int(node_range_right)
elif node_range_right == "inf":
node_range_left = int(node_range_left)
else:
node_range_right = int(node_range_right)
node_range_left = int(node_range_left)
elif column_type == "DOUBLE":
query_value = float(query_value)
if node_range_left == "inf" and node_range_right == "inf":
return True
elif node_range_left == "inf":
node_range_right = float(node_range_right)
elif node_range_right == "inf":
node_range_left = float(node_range_left)
else:
node_range_right = float(node_range_right)
node_range_left = float(node_range_left)
# TODO not supported ? - ordered and dictionary encoded to 1, 2, 3, etc
elif column_type == "STR":
return True
elif column_type == "DATE":
query_value = np.datetime64(query_value)
if node_range_left == "inf" and node_range_right == "inf":
return True
elif node_range_left == "inf":
node_range_right = np.datetime64(node_range_right)
elif node_range_right == "inf":
node_range_left = np.datetime64(node_range_left)
else:
node_range_left = np.datetime64(node_range_left)
node_range_right = np.datetime64(node_range_right)
self.cpm(node_range_left, op, node_range_right, query_value)
def cpm(self, node_range_left, op, node_range_right, value):
if node_range_left == "inf" and node_range_right == "inf":
return True
if op == ">=":
if node_range_right == "inf":
return True
else:
return value <= node_range_right
elif op == ">":
if node_range_right == "inf":
return True
else:
return value < node_range_right
elif op == "<=":
if node_range_left == "inf":
return True
else:
return node_range_left <= value
elif op == "<":
if node_range_left == "inf":
return True
else:
return node_range_left < value
def evaluate_extended_against_node_metadata(self, config, cut):
return self.categorical_mask_extended.get(cut, 1) == 1
| true |
0a276a72bd3bac6cc889ebb1ed0fe84fca5e37a6
|
Python
|
pleimi/Cuenta_regresiva_otros_scripts
|
/list.py
|
UTF-8
| 1,334 | 4.0625 | 4 |
[] |
no_license
|
#demo_list = [1, "hello", 2.5, True, [1, 2, 3]]# aqui hay una lista dentro de otras
#print(dir(demo_list))
#print(demo_list)
#colors = (["red", "green","blue"])# me imprime una lista []
# print(colors)
# number_list= list((1, 2, 3, 4)) # esto es una tuple, datos inmutables
# print(number_list)
# print(type(number_list))# me muestra el tipo de la variable
estadistica = list(range(1, 109))# me da la cantidad menos el último
print(estadistica)
numero_ganador = input("el numero ganador es: ")
print(numero_ganador)
estadistica.insert(0, int(numero_ganador))#inserte en el indice 0 "numero_ganador"
estadistica.pop()# me quita el último elemento
print(estadistica)
numero_ganador = input("el numero ganador es: ")
print(numero_ganador)
estadistica.insert(0, int(numero_ganador))#inserte en el indice 0 "numero_ganador"
estadistica.pop()# me quita el último elemento
print(estadistica)
numero_ganador = input("el numero ganador es: ")
print(numero_ganador)
estadistica.insert(0, int(numero_ganador))#inserte en el indice 0 "numero_ganador"
estadistica.pop()# me quita el último elemento
print(estadistica)
#estadistica[0] = 108 # cambie del indice 0 el elemento por 108
# print(len(ruleta))
#print('red' in colors) # está red in la variable colors? True
#colors[1] = "yelow" # cambie el indice 1 a yellow
#print(colors)
| true |
4db9e98240cea4aa75f3ee4654bc5f33fd668120
|
Python
|
VictorBro/Introduction_To_Algorithms_CLRS_solutions
|
/10_2_7_reverse_list.py
|
UTF-8
| 976 | 3.84375 | 4 |
[] |
no_license
|
class Node:
def __init__(self, val, next=None):
self.key = val
self.next = next
class LinkedList:
def __init__(self):
self.head = None
def insert(self, val):
new_node = Node(val)
if self.head is None:
self.head = new_node
else:
new_node.next = self.head
self.head = new_node
def reverse(self):
prev = None
curr = self.head
while curr is not None:
next_node = curr.next
curr.next = prev
prev = curr
curr = next_node
self.head = prev
def __str__(self):
L = []
x = self.head
while x is not None:
L.append(x.key)
x = x.next
return str(L)
def main():
L = LinkedList()
L.insert(1)
L.insert(2)
L.insert(3)
L.insert(4)
L.insert(5)
print(L)
L.reverse()
print(L)
if __name__ == "__main__":
main()
| true |
7ec8eb2b4f893fa850e6bd33ceeb2371cdd46f43
|
Python
|
ZongweiZhou1/Hope
|
/model/loss.py
|
UTF-8
| 4,381 | 3.046875 | 3 |
[] |
no_license
|
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
class TripletLoss(object):
def __init__(self, margin=None):
self.margin = margin
if margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
def __call__(self, dist_ap, dist_an):
y = Variable(dist_an.data.new().resize_as_(dist_an.data).fill_(1))
if self.margin is not None:
loss = self.ranking_loss(dist_an, dist_ap, y)
else:
loss = self.ranking_loss(dist_an - dist_ap, y)
return loss
def normalize(x, axis=-1):
""" Normalizing to unit length along the specified dimension.
:param x:
:param axis:
:return:
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def euclidean_dist(x, y):
"""Euclidean distance"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def hard_example_mining(dist_mat, labels, return_inds=False):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
labels: pytorch LongTensor, with shape [N]
return_inds: whether to return the indices. Save time if `False`(?)
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
"""
assert dist_mat.ndimension() == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
tmp = Variable(dist_mat.data.new().resize_as_(dist_mat.data).fill_(1e4))
dist_ap, p_inds = torch.max(dist_mat - is_neg.float() * tmp, 1, keepdim=False)
dist_an, n_inds = torch.min(dist_mat + is_pos.float() * tmp, 1, keepdim=False)
if return_inds:
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an
def global_loss(tri_loss, global_feat, labels, normalize_feature=True):
"""
Args:
tri_loss: a `TripletLoss` object
global_feat: pytorch Variable, shape [N, C]
labels: pytorch LongTensor, with shape [N]
normalize_feature: whether to normalize feature to unit length along the
Channel dimension
Returns:
loss: pytorch Variable, with shape [1]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
==================
For Debugging, etc
==================
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
dist_mat: pytorch Variable, pairwise euclidean distance; shape [N, N]
"""
if normalize_feature:
global_feat = normalize(global_feat, axis=-1)
# shape [N, N]
dist_mat = euclidean_dist(global_feat, global_feat)
dist_ap, dist_an, p_inds, n_inds = hard_example_mining(
dist_mat, labels, return_inds=True)
loss = tri_loss(dist_ap, dist_an)
return loss, p_inds, n_inds, dist_ap, dist_an, dist_mat
if __name__ == '__main__':
x = torch.rand(8, 128)
triplet_loss = TripletLoss(margin=1)
labels = torch.tensor([-1, 0, 1, 1, 0, 2, 2, -2]).long()
loss, p_inds, n_inds, dist_ap, dist_an, dist_mat = global_loss(triplet_loss, x, labels)
print(loss)
print(p_inds)
print(n_inds)
print(dist_ap)
print(dist_an)
print(dist_mat)
| true |
3f87072f4c12f9109bc57784e4402987947f5aba
|
Python
|
tkaczk/Bootcamp-Kodilla-2020
|
/Projects/profits.py
|
UTF-8
| 305 | 3.296875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 21:21:26 2020
@author: tkacz
"""
items= [
['456_Longneck', 100000, 1000, 99],
['454_Bier', 25000, 1000, 180],
['155_Bordeaux', 50000, 1000, 150]
]
cost = [(item[1]*item[3])/1000 for item in items]
print(cost)
| true |
41c5aaf87f9a9d873db20d3d4bcc250565e70b83
|
Python
|
Sunnysuh99/HelloPython03
|
/untitled/lab02.py
|
UTF-8
| 2,068 | 3.609375 | 4 |
[] |
no_license
|
#19. 윤년계산 (2012년은 윤년)
year = int(input('알고싶은 윤년 년도를 입력하세요'))
if((year % 4 == 0 and year %100 != 0) or (year % 400 == 0)):
print('%d 는 윤년입니다' % year)
else:
print('%d는 윤년이 아닙니다' % year)
#20. 복권 빌행
import random
lotto = str(random.randint(100, 999))
print(lotto)
lucky = str(input('복권 숫자 3자리를 입력하세요! \n'))
match = 0;
if lotto[0] == lucky[0] or lotto[0] == lucky[1] or lotto[0] == lucky[2]:
match += 1
if lotto[1] == lucky[0] or lotto[1] == lucky[1] or lotto[1] == lucky[2]:
match += 1
if lotto[2] == lucky[0] or lotto[2] == lucky[1] or lotto[2] == lucky[2]:
match += 1
msg = '꽝이군요! 다음 기회에!'
if match == 3:
msg = '모두 일치! 상금 1백만원!'
if match == 2:
msg = '2개 일치! 상금 1만원!'
if match == 1:
msg = '1개 일치! 상금 1천원!'
print(msg)
#21. 구구단 입력
number = int(input('1 - 9 숫자 하나를 입력하세요'))
if(lotto == a):
print('축하합니다!! 상금 1백만원에 당첨되었습니다.')
else:
print('잘못 입력하셨습니다.')
#22. 소문자 --> 대문자
if(lotto == a):
print('')
else:
print('잘못 입력하셨습니다.')
#23. 숫자 맞추기
num1 = int(input('1 - 100 사이의 숫자를 입력하세요'))
num2 = int number
if(num1 > num2):
print('추측한 숫자가 큽니다')
if(num1 > num2):
print('추측한 숫자가 작습니다')
if(num1 = num2):
print('빙고, 숫자를 맞췄습니다')
if(num1 > num2 or num1 < num2):
print('빙고, 숫자를 맞췄습니다')
else:
print('1 - 100 사이의 숫자를 입력하세요')
#24. Multiplication Table
#25. 신용카드 조회 및 은행정보 출력
35(JCB카드)
3563 17 NH농협카드
3569 01 신한카드
3569 12 KB국민카드
4(비자카드)
4048 25 비씨카드
4386 76 신한카드
4579 73 국민은행
5(마스타카드, Maestro)
5155 94 신한카드
5243 53 외환카드
5409 26 국민은행
#26 주민등록번호 유효성 검사
| true |
d2e41daaa90d2145cc8dc94e5ec96d29d4a66320
|
Python
|
jorpramo/CALLES
|
/busqueda.py
|
UTF-8
| 4,932 | 2.609375 | 3 |
[] |
no_license
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'jpradas'
import wikipedia
import settings
from collections import Counter
from fastkml import kml
import operator
import csv, re, os
import genderator
import pypyodbc
from nltk.corpus import stopwords
from nltk import word_tokenize
import nltk
import time
from google import google
def procesa_KML(fichero):
f = open(fichero, 'r')
doc=f.read()
k = kml.KML()
k.from_string(doc)
calles = list(k.features())
calles[0].features()
list_calles = list(calles[0].features())
list_detallado=list(list_calles[0].features())
#for i in range(0,len(list_calles)):
for i in range(0,10):
print(list_detallado[i].name)
def categoriza(values, searchFor):
resultado={}
for k in values:
busqueda=re.findall(values[k].lower(),searchFor)
if (len(busqueda)>0):
resultado[k]=resultado.get(k,0) + len(busqueda)
if len(resultado)==0:
maximo=""
else:
maximo=max(resultado.items(), key=operator.itemgetter(1))[0]
#print(max(resultado.items(), key=operator.itemgetter(1))[0])
return maximo
# def procesa_sql(municipio, provincia):
# conn = pypyodbc.connect("DRIVER={SQL Server};SERVER=DHARMA\SQLEXPRESS;UID=loginR;PWD=loginR;DATABASE=CARRERS")
# cur = conn.cursor()
# cur.execute("SELECT nvia FROM VIAS where CMUM='%s' and CPRO='%s' and SUBCAT is null and CAT is null" % (municipio, provincia))
# categorias=settings.categorias
# for row in cur.fetchall():
# cat_temp=categoriza(categorias,row["nvia"].lower().strip())
# if cat_temp!="":
# print(row["nvia"])
# sql="UPDATE VIAS set SUBCAT=? where nvia=?"
# cur.execute(sql, (cat_temp,row["nvia"])) #Actualizamos de todos los municipios
# cur.commit()
# cur.close()
# conn.close()
def procesa_sql(municipio, provincia,tipo):
conn = pypyodbc.connect("DRIVER={SQL Server};SERVER=DHARMA\SQLEXPRESS;UID=loginR;PWD=loginR;DATABASE=CARRERS")
cur = conn.cursor()
cur.execute("SELECT nvia FROM VIAS where CMUM='%s' and CPRO='%s' and SUBCAT is null and CAT is null" % (municipio, provincia))
#cur.execute("SELECT nvia FROM VIAS where CMUM='%s' and CPRO='%s' and genero is null and ACTUALIZADO IS NULL" % (municipio, provincia))
categorias=settings.categorias
for row in cur.fetchall():
cat_temp=""
calle=row["nvia"].lower().strip().replace("'","")
if (tipo=="nombre"):
cat_temp=categoriza(categorias,calle)
if (tipo=="google"):
try:
time.sleep(5)
num_page = 1
search_results = google.search(calle + " -calle -carrer -kalea", num_page,'es')
cad=[]
[cad.append(result.description) for result in search_results]
cad=' '.join(cad)
cat_temp=categoriza(categorias,cad)
if (cat_temp==""):
spanish_stops = set(stopwords.words('spanish'))
cad=word_tokenize(cad)
texto=[w.lower() for w in cad if w not in spanish_stops and len(w)>4]
print(nltk.FreqDist(texto).most_common(10))
except:
print("No se ha podido acceder a Google")
if (tipo=="wiki"):
try:
result=wikipedia.search(calle, results=1)
cad=result[:1]
try:
pag=wikipedia.page(cad)
cat_temp=categoriza(categorias,pag.content[:200].lower())
except:
pass
except:
print("No se ha podido acceder a la wikipedia")
genero=''
if (tipo=="genero"):
guesser = genderator.Parser()
answer = guesser.guess_gender(calle)
if answer:
print(answer)
genero=answer['gender']
else:
print('Name doesn\'t match')
if cat_temp!="":
print(row["nvia"],cat_temp)
sql="UPDATE VIAS set SUBCAT=?, ACTUALIZADO=GETDATE() where nvia=?"
cur.execute(sql, (cat_temp,row["nvia"])) #Actualizamos de todos los municipios
cur.commit()
if cat_temp=="" and (genero=="Male" or genero=="Female"):
sql="UPDATE VIAS set ACTUALIZADO=GETDATE(), GENERO=? where nvia=?"
cur.execute(sql, (genero,row["nvia"])) #Actualizamos de todos los municipios
cur.commit()
cur.close()
conn.close()
ciudades=settings.ciudades
# for c in ciudades:
# if c['tipo']=='KML':
# procesa_KML(c['fichero'])
#
# if c['tipo']=="CSV":
# procesa_csv(c['fichero'],c['nombre'],c['categoria'])
localidades=[['019','08'],['020','48'],['091','41'],['079','28'],['250','46']]
for i in localidades:
procesa_sql(i[0],i[1],"google")
| true |
a0a3bcfd64d3bef6e65c235546bb944cc9377282
|
Python
|
RooshhilPatel/AsthmaAnalytics
|
/regression_model.py
|
UTF-8
| 3,480 | 3.53125 | 4 |
[] |
no_license
|
import csv
import warnings
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import *
warnings.filterwarnings("ignore") # ignore warnings
# Read the file given and save the data in arrays
def read_CSV(filename, var_title):
variable = []
percents = []
# read csv file and extract 2nd and 3rd columns into lists
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
variable.append(row[var_title])
percents.append(float(row['Percent']))
return variable, percents
# Plot scatter points on percents
def build_scatter_plot(percents, num_categories, categories):
plt.figure(figsize=(9,7)) # set size of plot 9" x 7"
# Scatter plot for each category
for x in range(num_categories):
plt.plot([x]*53, percents[x::num_categories], 'o', label="{0}".format(categories[x]))
# Plot regression line on averages
def build_regression_plot(percents, num_categories, degrees):
x_axis = np.arange(num_categories) # helper array for categories
# Get average percents of all categories
avg_percents = []
for x in range(num_categories):
avg_percents.append(sum(percents[x::num_categories]) / 53) # find averages of percents
# Coorelation Coefficient
print("R^2 = ", end='') # print R^2
print(pow(linregress(x_axis, avg_percents)[2],2)) # print coorelation coefficient value
# Fit and plot the regression line
fit = sp.polyfit(x_axis, avg_percents, degrees) # fit the polynomial line to the average of all sets
lin_sp = sp.linspace(0, num_categories-1, 80) # smooth out the line by mapping to more points
plt.plot(lin_sp, sp.polyval(fit,lin_sp), "r-", label="Regression Line")
# Display currently built plot
def display_plot(title, x_label, categories):
# Labeling and showing the plot
plt.title("{0}".format(title)) # set title
plt.xlabel("{0}".format(x_label)) # set x label
plt.ylabel("Asthma Prevalence (percent)") # set y label
labels = categories # label array
plt.xticks(np.arange(len(categories))) # set ticks to replace
plt.axes().set_xticklabels(labels) # replace x-axis with our labels
plt.legend() # invoke legend on labels
plt.savefig("{0}.png".format(title)) # save plot
plt.show() # display plot
# Read the CSVs into arrays for ML later
i, p1 = read_CSV('incomeByState.csv', 'Income')
a, p2 = read_CSV('ageByState.csv', 'Age')
r, p3 = read_CSV('raceByState.csv', 'Race')
s, p4 = read_CSV('sexByState.csv', 'Sex')
# Income Plot
build_scatter_plot(p1, 5, i[0:5])
build_regression_plot(p1, 5, 3)
display_plot("Polynomial Regression On Income", "Income Brackets", i[0:5])
# Age Plot
build_scatter_plot(p2, 6, a[0:6])
build_regression_plot(p2, 6, 3)
display_plot("Polynomial Regression On Age", "Age Brackets", a[0:6])
# Race Plot
build_scatter_plot(p3, 4, r[0:4])
# build_regression_plot(p3, 4, 2)
display_plot("Plot Of Race", "Race Brackets", r[0:4])
# Sex Plot
build_scatter_plot(p4, 2, s[0:2])
# build_regression_plot(p4, 2, 1)
display_plot("Plot Of Gender", "Sex Brackets", s[0:2])
| true |
b586c4fa051c794fe6ddf32757aa9d47bcf08e55
|
Python
|
kishao1996/ashu_music_deprecated
|
/bilibili/singer.py
|
UTF-8
| 666 | 2.59375 | 3 |
[] |
no_license
|
# encoding: utf-8
from moviepy.editor import *
from common.logger import logger
from settings import PWD
class Singer(object):
def __init__(self):
pass
def sing(self, path):
clip = AudioFileClip(path)
clip.preview()
clip.close()
def sing_all():
with open(PWD + '/song_list.txt', 'r') as f:
songlist = f.read()
for item in songlist.split('\n'):
s = item.split(' ')
if len(s) == 0:
continue
song_name = s[0] if len(s) == 1 else s[1]
logger.info('sing {} ...'.format(song_name))
Singer().sing('output/{}.m4a'.format(song_name))
| true |
1531fe56d1c96a3e78e360a7db1f3cb34f490202
|
Python
|
ROB7-StayHumble/golfcart-sensorfusion
|
/sensorfusion/utils/img_utils.py
|
UTF-8
| 1,349 | 3.203125 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
import cv2
import numpy as np
colors = {
'white':(255, 255, 255),
'black':(0,0,0),
'green':(0, 255, 0),
'blue':(0,255,255)
}
def angle_from_box(img,box):
h,w = img.shape[:2]
(xA, yA, xB, yB) = box
center_y, center_x = (h-(yA+(yB-yA)/2),(xA+(xB-xA)/2)-w/2)
if center_x == 0:
angle = 0
else: angle = np.round(np.rad2deg(np.arctan2(center_y,center_x))-90.0,decimals=1)
return angle
def plot_boxes(img, boxes, color='white'):
global colors
for box in boxes:
(xA, yA, xB, yB) = box
angle = angle_from_box(img,box)
cv2.rectangle(img, (xA, yA), (xB, yB), colors[color], 5)
cv2.putText(img, str(angle), (xA, yA-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, colors[color], 2)
return img
def plot_polygons(img, polygons, color='white'):
global colors
for polygon in polygons:
#print(polygon)
pts = np.int32(polygon).reshape((-1,1,2))
cv2.polylines(img,[pts],True,colors[color],5)
return img
def downsample_image(img, desired_width):
h,w = img.shape[:2]
small_to_large_image_size_ratio = desired_width/w
img = cv2.resize(img,
(0,0), # set fx and fy, not the final size
fx=small_to_large_image_size_ratio,
fy=small_to_large_image_size_ratio,
interpolation=cv2.INTER_LINEAR)
return img
| true |
640f58c00c45f2362a33ae8ec43c81c9102ad519
|
Python
|
catapult-project/catapult
|
/dashboard/dashboard/api/utils.py
|
UTF-8
| 1,032 | 2.671875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import datetime
def ParseBool(value):
"""Parse a string representation into a True/False value."""
if value is None:
return None
value_lower = value.lower()
if value_lower in ('true', '1'):
return True
elif value_lower in ('false', '0'):
return False
else:
raise ValueError(value)
def ParseISO8601(s):
if s is None:
return None
# ISO8601 specifies many possible formats. The dateutil library is much more
# flexible about parsing all of the possible formats, but it would be annoying
# to third_party it just for this. A few formats should cover enough users.
try:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
| true |
af3d093aa64d3b7d72c8ac4c6882a6290c13689f
|
Python
|
E3798211/ITASposter
|
/CQIPrediction/script_conservative_cqi.py
|
UTF-8
| 675 | 2.796875 | 3 |
[] |
no_license
|
#!/usr/bin/python3
# Experiment setup
step = 10 # ms
experiment_time = 1000 # ms
import re
# read real CQI
fin = open("CQI_current", "r")
time_cqi = fin.read()
fin.close()
time_cqi_list = re.findall(r'\d+', time_cqi)
time_cqi_list = [int(i) for i in time_cqi_list]
time = 0
while time_cqi_list[time] < 400:
time_cqi_list[time + 1] -= 3
if time_cqi_list[time + 1] < 0:
time_cqi_list[time + 1] = 0
time += 2
while time_cqi_list[time] < 1000:
time_cqi_list[time + 1] -= 8
if time_cqi_list[time + 1] < 0:
time_cqi_list[time + 1] = 0
time += 2
i = 0
while i < 400:
print(time_cqi_list[i], time_cqi_list[i + 1])
i += 2;
| true |
b481f7251cddbabd9d358a5e1fdc08d37d9292eb
|
Python
|
wulrahman/cv
|
/pages/models.py
|
UTF-8
| 2,005 | 2.5625 | 3 |
[] |
no_license
|
from django.db import models
from os.path import splitext, basename
import uuid
# Create your models here.
def image_upload_to(instance, filename):
base, ext = splitext(filename)
newname = "%s%s" % (uuid.uuid4(), ext)
return "static/image/{}".format(newname)
class Experience(models.Model):
job_title = models.CharField(max_length=200)
company_name = models.CharField(max_length=200)
job_start = models.DateTimeField('Job Start')
job_end = models.DateTimeField('Job End')
# company_logo = models.URLField(max_length=200)
image = models.ImageField(default="", upload_to=image_upload_to)
class Skill(models.Model):
skill_name = models.CharField(max_length=200)
SKILL_LEVEL = [
('AV', 'Advanced'),
('IM', 'Intermediate'),
('B', 'Beginner')
]
SKILL_TYPE = [
('E', 'Endorsements'),
('IK', 'Industry Knowledge'),
('TT', 'Tools & Technologies'),
('IS', 'Interpersonal Skills'),
('L', 'Languages'),
('OS', 'Other Skills')
]
skill_type = models.CharField(
max_length=2,
choices=SKILL_TYPE,
default='OS',
)
skill_level = models.CharField(
max_length=2,
choices=SKILL_LEVEL,
default='B',
)
class Education(models.Model):
academy_name = models.CharField(max_length=200)
course_name = models.CharField(max_length=200)
course_grade = models.CharField(max_length=200, default="")
course_start = models.DateTimeField('Job Start')
course_end = models.DateTimeField('Job End')
# academy_logo = models.URLField(max_length=200)
image = models.ImageField(default="", upload_to=image_upload_to)
COURSE_TYPE = [
('Bachelor\'s degree', 'Bachelor\'s degree'),
('Master\'s degree', 'Master\'s degree'),
('AS Level', 'AS Level'),
('GCSE', 'GCSE')
]
course_type = models.CharField(
max_length=20,
choices=COURSE_TYPE,
default='GCSE',
)
| true |
e1a5ed0b4cd219baff061a4f2f0f025ae624bdbb
|
Python
|
netaz/dirty-rl
|
/dqn.py
|
UTF-8
| 19,512 | 2.75 | 3 |
[
"Apache-2.0"
] |
permissive
|
"""Deep Q-Learning
Based on the code cited below with various changes to make it more conformant with the DQN paper.
https://github.com/pytorch/tutorials/blob/master/intermediate_source/reinforcement_q_learning.py
Also described in https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html.
Changes:
- The Q-value function input (observation) is composed of 4 RGB frames, instead of a 1D
difference frame.
- The replay-buffer is more sophisticated because it supports extracting the history-context
of a sampled frame (i.e. when sampling a frame we also get the 3 temporally-neighboring
frames. The buffer implementation is from the Berkeley CS294 RL course.
- Gradient debugging code
"""
import os
os.environ["OMP_NUM_THREADS"] = "1"
import gym
import torch
import torch.nn as nn
import itertools
from utils.exploration import LinearExplorationSchedule
from utils.utils import GameProgressBar, load_checkpoint, save_checkpoint, \
get_device, ExponentialMovingAvg, dtype
from utils.memory import ReplayBuffer
from utils.log import TBWrapper
from utils.preprocess import pre_process_game_frame
import torchviz
import torch.nn.functional as F
import numpy as np
import random
experiment_name = "dqn-v10_huber"
writer = TBWrapper(experiment_name)
class QNet(nn.Module):
def __init__(self, n_actions, n_input_ch, input_shape, bias=True, bn=True):
"""Q-value function approximator: compute the expected value of an input state.
Given an input state, produce a tensor with an expected value for each action.
The input to the DNN is a group of 80x80 image frames produced by the environment,
and pre-processed to scale and crop.
Args:
n_actions: (int) the number of actions in the action-space
n_input_ch: (int) number of input channels
input_shape: (tuple) the shape of the input
bias: (boolean) add bias parameters to the convolution and linear layers.
bn: (boolean)
This is a copy of the model provided here:
https://github.com/pytorch/tutorials/blob/master/intermediate_source/reinforcement_q_learning.py
Changes:
- configurable BN and bias settings
- gradient hooks for debug
"""
super().__init__()
self.n_actions = n_actions
self.conv1 = nn.Conv2d(n_input_ch, 16, kernel_size=3, stride=2, bias=bias)
self.bn1 = nn.BatchNorm2d(16) if bn else nn.Identity()
self.relu_conv1 = nn.ReLU()
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2, bias=bias)
self.bn2 = nn.BatchNorm2d(32) if bn else nn.Identity()
self.relu_conv2 = nn.ReLU()
def conv2d_size_out(size, kernel_size=3, stride=2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(input_shape[0]))
convh = conv2d_size_out(conv2d_size_out(input_shape[1]))
linear_input_size = convw * convh * 32
self.fc1 = nn.Linear(linear_input_size, 256, bias=bias)
self.relu_fc1 = nn.ReLU()
self.fc2 = nn.Linear(256, n_actions, bias=bias)
# Various gradient hooks - enable if you are debugging
# self.relu_fc1_hook_handle = self.relu_fc1.register_backward_hook(relu_fc1_backward_hook)
# self.fc1_hook_handle = self.fc1.register_backward_hook(fc1_backward_hook)
# self.fc2_hook_handle = self.fc2.register_backward_hook(fc2_backward_hook)
# self.fc2.weight.register_hook(param_backward_hook)
def forward(self, x):
x = x / 255. #- 0.5
x = self.relu_conv1(self.bn1(self.conv1(x)))
x = self.relu_conv2(self.bn2(self.conv2(x)))
x = x.flatten(start_dim=1)
x = self.relu_fc1(self.fc1(x))
# Debug placeholder to identify empty feature-maps
#if x.data.abs().sum() == 0:
# debug = True
x = self.fc2(x)
return x
def param_backward_hook(grad_output):
print(grad_output[0].data.abs().sum(), grad_output[0].data.sum())
# this is per: https://arxiv.org/pdf/1312.5602.pdf
# see also: https://github.com/transedward/pytorch-dqn/blob/master/dqn_model.py
def relu_fc1_backward_hook(module, grad_input, grad_output):
print(f"relu_fc1: grad_input = {grad_input[0].data.sum()} grad_output = {grad_output[0].data.sum()}")
# if grad_input[0].data.abs().sum() == 0:
# print("in zero - fc1")
# if grad_output[0].data.abs().sum() == 0:
# print("out zero - fc1")
def fc1_backward_hook(module, grad_input, grad_output):
print(f"fc1: grad_input = {grad_input[0].data.sum()} grad_output = {grad_output[0].data.sum()}")
# if grad_input[0].data.abs().sum() == 0:
# print("in zero - fc1")
# if grad_output[0].data.abs().sum() == 0:
# print("out zero - fc1")
def fc2_backward_hook(module, grad_input, grad_output):
# if grad_input[0].data.abs().sum() == 0:
# print("in zero - fc2")
# if grad_output[0].data.abs().sum() == 0:
# print("out zero - fc2")
print(grad_input[0].shape)
print(grad_output[0].shape)
print(f"fc2: grad_input = {grad_input[0].data.sum()} grad_output = {grad_output[0]}")
def epsilon_greedy_action(state, eps, n_actions, q_behavior):
"""Select an action, under an epsilon-greedy exploration schedule.
Arguments:
state: (torch.Tensor) observation
eps: (float) epsilon-greedy threshold value
n_actions: (int) number of actions to choose from
q_behavior: (nn.Module) Q-value model used for acting
Supports discrete action-spaces only.
"""
if np.random.random() <= eps:
# Random
action = random.randrange(n_actions)
action = torch.tensor([[action]], device=get_device(), dtype=torch.long)
else:
# Greedy
with torch.no_grad():
state_action_values = q_behavior(state.type(dtype))
# Take argmax of the action row-tensor - this is the index of the
# action with the largest expected value for state (s)
action = state_action_values.max(1)[1].view(1, 1)
return action
def is_heating_up(replay_buf, cnt_transitions):
"""Are we still heating up (acting randomly).
During heat-up we act randomly and collect trajectories in
the replay buffer.
Args:
replay_buf: replay buffer
cnt_transitions: number of transitions/steps/iterations
played so far.
"""
return cnt_transitions < args.heatup_transitions or \
not replay_buf.can_sample(args.batch_size)
def run_episode(episode_number,
replay_buf,
cnt_transitions,
q_behavior,
q_target,
optimizer,
criterion,
exploration,
progress_bar):
episode_reward = 0
discount = 0.99
s0 = env.reset()
s0 = pre_process_atari(s0)
for t in itertools.count(1):
if args.render:
env.render()
heating_up = is_heating_up(replay_buf, cnt_transitions + t)
eps = 1. if heating_up else exploration.step()
last_idx = replay_buf.store_frame(s0)
recent_observations = replay_buf.encode_recent_observation()
recent_observations = torch.from_numpy(recent_observations).type(dtype).unsqueeze(0)
action = epsilon_greedy_action(recent_observations, eps, q_behavior.n_actions, q_behavior)
s1, reward, done, _ = env.step(encode_action(action.item()))
s1 = pre_process_atari(s1)
replay_buf.store_effect(last_idx, action, reward, done)
s0 = s1
episode_reward += reward
if not heating_up and t % 4 == 0:
train_on_batch(args.batch_size, q_behavior, q_target, replay_buf,
discount, optimizer, criterion, episode_number) # cnt_transitions + t)
if reward != 0:
# Game is done, but episode may still be in progress
progress_bar(reward)
if done:
break
writer.add_scalar('epsilon', eps, episode_number)
writer.add_scalar('episode_reward', episode_reward, episode_number)
return episode_reward, t
def train_on_batch(batch_size, q_behavior, q_target, replay_buf, discount, optimizer, criterion, episode_number):
optimize_model(batch_size, q_behavior, q_target, replay_buf, discount, optimizer, criterion, episode_number)
#debug = writer.log_parameters(episode_number, q_behavior, ["conv1.weight", "conv2.weight"])
def optimize_model(batch_size, q_behavior, q_target, memory, discount, optimizer, criterion, episode_number):
if not memory.can_sample(batch_size):
return
obs_batch, act_batch, rew_batch, next_obs_batch, done_mask = memory.sample(batch_size)
# Convert numpy nd_array to torch variables for calculation
start_state_batch = torch.from_numpy(obs_batch).type(dtype)
action_batch = torch.from_numpy(act_batch).long()
reward_batch = torch.from_numpy(rew_batch).to(get_device())
next_states_batch = torch.from_numpy(next_obs_batch).type(dtype)
#not_done_mask = torch.from_numpy(1 - done_mask).type(dtype)
is_terminal = torch.from_numpy(done_mask).bool()
# Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
# detailed explanation). This converts batch-array of Transitions
# to Transition of batch-arrays.
#batch = [*zip(*transitions)]
#start_state_batch = torch.cat(batch[0]).type(dtype) # we convert to float only when we must - in order to save memory
# action_batch = torch.cat(batch[1])
# reward_batch = torch.tensor(batch[2], device=get_device())
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
start_state_values = q_behavior(start_state_batch).to(get_device())
action_mask = F.one_hot(action_batch.squeeze(), q_behavior.n_actions).to(get_device())
predicted_start_state_Q_values = (start_state_values * action_mask).sum(dim=1)
#predicted_start_state_Q_values = start_state_values.gather(1, action_batch)
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
# non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
# batch[3])), device=device, dtype=torch.bool)
# next_states_batch = torch.cat([s for s in batch[3]
# if s is not None])
#next_states_batch = torch.cat(batch[3]).type(dtype)
#is_terminal = batch[4]
#non_final_mask = torch.tensor([not is_terminal for is_terminal in batch[4]], device=device, dtype=torch.bool)
with torch.no_grad():
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for next_states_batch are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
next_state_action_values = q_target(next_states_batch).to(get_device())
next_state_Q_values = next_state_action_values.max(1)[0].detach()
next_state_Q_values[is_terminal] = 0
# Compute the expected Q values
discount = 0.99
target_state_values = next_state_Q_values * discount + reward_batch
# Compute Huber loss
#loss = criterion(predicted_start_state_Q_values, target_state_values.unsqueeze(1))
loss = criterion(predicted_start_state_Q_values, target_state_values)
#torchviz.make_dot(loss, params=dict(q_behavior.named_parameters())).render("dqn_backward-3", format="png")
writer.add_scalar('loss', loss, episode_number)
# Optimize the model
optimizer.zero_grad()
loss.backward()
for name, param in q_behavior.named_parameters():
if param.grad.data.abs().sum() == 0:
debug = True
# clip gradients
param.grad.data.clamp_(-1, 1)
optimizer.step()
def update_target_network(q_behavior, q_target, rate=1.):
for target_p, behavior_p in zip(q_target.parameters(), q_behavior.parameters()):
assert not torch.all(torch.eq(target_p, behavior_p))
for target_p, behavior_p in zip(q_target.parameters(), q_behavior.parameters()):
target_p.data = (1. - rate) * target_p.data + rate * behavior_p.data
for target_p, behavior_p in zip(q_target.parameters(), q_behavior.parameters()):
assert torch.all(torch.eq(target_p, behavior_p))
#q_target.load_state_dict(q_behavior.state_dict())
#pass
"""
v0 vs v4: v0 has repeat_action_probability of 0.25 (meaning 25% of the time the previous action will be used instead of the new action), while v4 has 0 (always follow your issued action)
Deterministic: a fixed frameskip of 4, while for the env without Deterministic, frameskip is sampled from (2,5) (code here)
"""
env = gym.make("Pong-v0") # ("PongDeterministic-v4") # Pong-ram-v0 #"BreakoutDeterministic-v4"
"""
For Gym Pong n_actions == 6, which includes 4 NoOps, and 2 motion actions.
I don't know why they did they, but we don't want to be biased to NoOps.
We will use:
0 - noop
2 - up
5 - down
See: https://github.com/openai/atari-py/blob/master/doc/manual/manual.pdf
"""
#assert env.env.game == 'pong'
#actions_encoding = {0: 0, 1: 2, 2: 5}
#actions_encoding = {0: 2, 1: 5}
#n_actions = len(actions_encoding)
def encode_action(a):
"""Map action from 0..n to game-specific semantic action values.
Pong-specific:
For Gym Pong n_actions == 6, which includes 4 NoOps, and 2 motion actions.
I don't know why they did that, but we don't want to be biased to NoOps,
so we constrain the action space to 2 or 3 actions. We will use:
0 - noop
2 - up
5 - down
See: https://github.com/openai/atari-py/blob/master/doc/manual/manual.pdf
"""
assert hasattr(encode_action, 'n_actions')
if encode_action.n_actions == 2:
actions_encoding = {0: 2, 1: 5}
else:
actions_encoding = {0: 0, 1: 2, 2: 5}
return actions_encoding[a]
def DQN(args):
# Initialize replay memory D to capacity N
memory = ReplayBuffer(size=args.replay_mem_size, frame_history_len=4)
exploration = LinearExplorationSchedule(args.eps_start, args.eps_end, args.eps_decay)
#exploration = ExponentialExplorationSchedule(args.eps_start, args.eps_end, args.eps_decay)
# Initialize action-value function Q with random weights
D = PRE_PROCESS_OUTPUT_DIM
n_actions = encode_action.n_actions = args.num_actions
q_target = QNet(n_actions=n_actions,
n_input_ch=history_len*n_channels,
input_shape=(D, D)).to(get_device())
q_behavior = QNet(n_actions=n_actions,
n_input_ch=history_len*n_channels,
input_shape=(D, D)).to(get_device())
q_target.eval()
# Freeze target network
for p in q_target.parameters():
p.requires_grad = False
q_behavior.train()
# Copy the weights, so both Q-value approximators initialize the same
q_behavior.load_state_dict(q_target.state_dict())
criterion = nn.MSELoss()
#criterion = nn.SmoothL1Loss() # Huber loss
# “Human-level control through deep reinforcement learning” - rmsprop config
LEARNING_RATE = 0.00025
ALPHA = 0.95
EPS = 0.01
optimizer = torch.optim.RMSprop(q_behavior.parameters(),
lr=LEARNING_RATE, alpha=ALPHA, eps=EPS) # , lr=0.00025, momentum=0.95, eps=0.01)
reward_ema = ExponentialMovingAvg(args.reward_eam_factor)
max_return = -np.inf
cnt_transitions = 0
for episode in itertools.count():
with GameProgressBar(episode) as progress_bar:
episode_return, n_transitions = run_episode(episode,
memory,
cnt_transitions,
q_behavior,
q_target,
optimizer,
criterion,
exploration,
progress_bar)
reward_ema.update(episode_return)
cnt_transitions += n_transitions
if episode % args.target_update_rate == 0:
update_target_network(q_behavior, q_target)
max_return = max(max_return, episode_return)
writer.add_scalar('running_return', reward_ema.value, episode)
# print(f"End of episode {episode} (G={episode_return} "
# f"transitions={n_transitions} max_return={max_return} "
# f"reward_ema={reward_ema.value})")
print(' '.join([f'reward={episode_return:.2f}',
f'running mean={reward_ema.value:.2f}']), end='')
env.close()
# see https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
# The number of frames to input to the DQN model.
# This includes the latest frame from the game, plus some previous frames.
# When history_len is set to 1, we use frame difference as our observation.
history_len = 4
n_channels = 3
from functools import partial
PRE_PROCESS_OUTPUT_DIM = 80
pre_process_atari = partial(pre_process_game_frame,
n_channels=3,
output_shape=(PRE_PROCESS_OUTPUT_DIM, PRE_PROCESS_OUTPUT_DIM))
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument('-b', '--batch-size', default=32, type=int,
help='mini-batch size (default: 32)')
argparser.add_argument('--render', action='store_true',
help='render flag')
argparser.add_argument('--heatup-transitions', default=50000, type=int)
argparser.add_argument('--replay-mem-size', default=500000, type=int)
argparser.add_argument('--learning-freq', default=1, type=int,
help='the number iterations between Q-value trainings')
argparser.add_argument('--target-update-rate', default=10, type=int,
help='the number of episodes between updates of the approximated Q*')
argparser.add_argument('--reward-eam-factor', default=0.01, type=float,
help='reward exponential-moving-average factor (default: 0.01)')
argparser.add_argument('--eps-start', default=1.0, type=float,
help='epsilon-greedy exploration schedule: start value')
argparser.add_argument('--eps-end', default=0.1, type=float,
help='epsilon-greedy exploration schedule: end value')
argparser.add_argument('--eps-decay', default=1000000, type=int,
help='the number of iterations between updates of the approximated Q*')
argparser.add_argument('--num-actions', default=3, type=int, choices=(2, 3),
help='the number of actions in the action space')
args = argparser.parse_args()
if __name__ == "__main__":
args = argparser.parse_args()
DQN(args)
| true |
dd3d63f91c71d022509c6ee7c021a16fd79a09ec
|
Python
|
reproducible-agile/reviews-2020
|
/reports/2020-009/computational-workflow/reachability-python/src/visualizer.py
|
UTF-8
| 14,815 | 2.546875 | 3 |
[
"Apache-2.0"
] |
permissive
|
import pandas as pd
import numpy as np
import shutil
import multiprocessing.pool
import matplotlib.dates
import matplotlib.pyplot as plt
import matplotlib.cbook
import matplotlib.cm
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from matplotlib import colors as mcolors
from postgreSQLSuite import *
log = logging.getLogger(__name__)
log.setLevel(DEBUG)
try:
from osgeo import ogr, osr, gdal
except Exception as e:
log.error(f"Error {e} occured while importing osgeo")
pd.options.display.width = 1000
pd.options.display.max_columns=999
pd.options.display.max_rows=100
def _rasterizeTimesliceWorker(df, rasterPath, imagePath, vmin, vmax, dt, xres, yres, perform_rasterization=True):
"""
Timeslices rasterize worker
Rasters a timeslice based on a pd.DataFrame using GDAL by first converting the timeslice to a
OGR vector layer and then rasterizing the content to a raster layer using GDAL
:param df: dictionary containing timeslices in format hour:minute:timeslice
:param rasterPath: path to folder where rasters should be stored
:param imagePath: path to folder where images should be stored
:param vmin: minimum data value (number of available vehicles) on all to-be-rastered dataframes
:param vmax: maximum data value (number of available vehicles) on all to-be-rastered dataframes
:param xres: width of rastered image
:param yres: height of rastered image
:param perform_rasterization: whether or not to raster GDAL layers or just create and store them
"""
driver = ogr.GetDriverByName("ESRI Shapefile")
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
lats = list(df.lat.values)
lons = list(df.lon.values)
values = list(df.countReachable.values)
raster_fn = os.path.join(imagePath, f"{dt.strftime('%d.%m.%y')}-{dt.strftime('%H-%M')}-{xres}-{yres}.tiff")
vector_fn = os.path.join(rasterPath, f"{dt.strftime('%d.%m.%y')}-{dt.strftime('%H-%M')}.shp")
# check if vector layer already exists, otherwise create new one and convert values from df to layer
if not os.path.isfile(vector_fn):
outsrc = driver.CreateDataSource(vector_fn)
outlayer = outsrc.CreateLayer(vector_fn, srs, ogr.wkbPoint)
outlayer.CreateField(ogr.FieldDefn("color_r"), ogr.OFTInteger)
outlayer.CreateField(ogr.FieldDefn("color_g"), ogr.OFTInteger)
outlayer.CreateField(ogr.FieldDefn("color_b"), ogr.OFTInteger)
normalizer = mcolors.Normalize(vmin=vmin, vmax=vmax)
cmap = matplotlib.cm.get_cmap("hot")
for idx in range(len(lats)):
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(float(lats[idx]), float(lons[idx]))
color = cmap(normalizer(values[idx]))
c_r = int(color[0] * 255)
c_g = int(color[1] * 255)
c_b = int(color[2] * 255)
feature = ogr.Feature(outlayer.GetLayerDefn())
feature.SetGeometry(point)
feature.SetField("color_r", c_r)
feature.SetField("color_g", c_g)
feature.SetField("color_b", c_b)
outlayer.CreateFeature(feature)
feature = None # explicitly set feature to None, indicating to OGR that the content should now be stored
outsrc = None # explicitly set vector layer to None, indicating to OGR that the content should now be stored
if perform_rasterization:
NoData_value = 0
# Open the data source and read in the extent
source_ds = ogr.Open(vector_fn)
source_layer = source_ds.GetLayer()
xmin, xmax, ymin, ymax = source_layer.GetExtent()
# Create the destination data source
x_pixel_size = ((xmax - xmin) / xres)
y_pixel_size = ((ymax - ymin) / yres)
target_ds = gdal.GetDriverByName('GTiff').Create(raster_fn, xres, yres, 3, gdal.GDT_Byte)
target_ds.SetGeoTransform((xmin, x_pixel_size, 0, ymax, 0, -y_pixel_size))
# use three bands to encode colors
band1 = target_ds.GetRasterBand(1)
band1.SetNoDataValue(NoData_value)
band2 = target_ds.GetRasterBand(2)
band2.SetNoDataValue(NoData_value)
band3 = target_ds.GetRasterBand(3)
band3.SetNoDataValue(NoData_value)
gdal.RasterizeLayer(target_ds, [1], source_layer, options = ["ATTRIBUTE=color_r", "MERGE_ALG=ADD", "ALL_TOUCHED=TRUE"])
gdal.RasterizeLayer(target_ds, [2], source_layer, options = ["ATTRIBUTE=color_g", "MERGE_ALG=ADD", "ALL_TOUCHED=TRUE"])
gdal.RasterizeLayer(target_ds, [3], source_layer, options = ["ATTRIBUTE=color_b", "MERGE_ALG=ADD", "ALL_TOUCHED=TRUE"])
return dt
def rasterizeTimeslices(timeslices: dict, slice_datetime: datetime.datetime, rasterPath: str, imagePath: str, perform_rasterization=True, xres=1000, yres=1000):
"""
Rasterize timeslices of one day using GDAL
:param timeslices: dictionary containing timeslices in format hour:minute:timeslice
:param slice_datetime: datetime indicating begin of timeslices
:param rasterPath: path to folder where rasters should be stored
:param imagePath: path to folder where images should be stored
:param perform_rasterization: whether or not to raster GDAL layers or just create and store them
:param xres: width of rastered image
:param yres: height of rastered image
"""
log.info(f"Rasterizing timeslices")
if not os.path.isdir(rasterPath):
log.warning(f"{rasterPath} does not exist, attempting to create folder..")
os.mkdir(rasterPath)
if not os.path.isdir(imagePath):
log.warning(f"{imagePath} does not exist, attempting to create folder..")
os.mkdir(imagePath)
maxAgents = 0
minAgents = 4000
for hour in sorted(list(timeslices.keys())):
for minute in timeslices[hour]:
minAgents = min(minAgents, timeslices[hour][minute][timeslices[hour][minute].countReachable > 3].countReachable.min())
maxAgents = max(maxAgents, timeslices[hour][minute].countReachable.max())
multproc = False
hours = sorted(timeslices.keys())
minutes = range(0, 60, 10)
global parsed
parsed = 0
maxParsed = len(hours)*len(minutes)
steps = 10
iter = int(maxParsed / steps)
def callback(result):
dt = result
c_hour = dt.hour
c_minute = dt.minute
global parsed
parsed += 1
numBlocks = int(parsed / (iter + 1)) if parsed != maxParsed else steps
print(f"\rRendering timeslices [" + ''.join(['#' for _ in range(numBlocks)]).ljust(steps) + f"] ({str(c_hour).rjust(2)} {str(c_minute).rjust(2)})", end="", flush=True)
log.info(f"Starting processing (multiprocessing: {multproc})")
if multproc:
pool = multiprocessing.Pool()
for hour in hours:
for minute in sorted(list(timeslices[hour].keys())):
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
pool.apply_async(_rasterizeTimesliceWorker,
(timeslices[hour][minute], rasterPath, imagePath, minAgents, maxAgents, dt, xres, yres, perform_rasterization),
callback=callback)
pool.close()
pool.join()
else:
for hour in hours:
for minute in minutes:
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
callback(_rasterizeTimesliceWorker(timeslices[hour][minute], rasterPath, imagePath, minAgents, maxAgents, dt, xres, yres, perform_rasterization))
print()
def rasterizeTimesliceMultipleDays(timeslices_range: dict, perform_rasterization):
"""
Rasterize timeslices over multiple days while keeping consistent color scheme across rasters
timeslices_range shall for each day contain a dictioanry with keys:
- timeslices
- startTime
- endTime
- imagePath
- rasterPath
:param timeslices_range: dictionary containing timeslices and metadata for each day
:param perform_rasterization: whether or not to raster GDAL layers or just create and store them
"""
xres = 1000
yres = 1000
multproc = True
min_agents_range = 4000
max_agents_range = 0
log.info(f"Calculating min and max agents over all timeslices")
for day in timeslices_range:
timeslices = timeslices_range[day]["timeslices"]
for hour in sorted(list(timeslices.keys())):
for minute in timeslices[hour]:
min_agents_range = min(min_agents_range, timeslices[hour][minute][timeslices[hour][minute].countReachable > 3].countReachable.min())
max_agents_range = max(max_agents_range, timeslices[hour][minute].countReachable.max())
log.info(f"min agents: {min_agents_range}, max agents: {max_agents_range}")
hours = range(0,24)
minutes = range(0, 60, 10)
log.info(f"Rasterizing timeslices from {timeslices_range[list(timeslices_range.keys())[0]]['startTime']} to {timeslices_range[list(timeslices_range.keys())[-1]]['startTime']}")
for day in timeslices_range:
timeslices = timeslices_range[day]["timeslices"]
rasterPath = timeslices_range[day]["rasterPath"]
imagePath = timeslices_range[day]["imagePath"]
slice_datetime = timeslices_range[day]["startTime"]
log.info(f"Rasterizing timeslices on day {day}")
if not os.path.isdir(rasterPath):
log.warning(f"{rasterPath} does not exist, attempting to create folder..")
os.mkdir(rasterPath)
if not os.path.isdir(imagePath):
log.warning(f"{imagePath} does not exist, attempting to create folder..")
os.mkdir(imagePath)
global parsed
parsed = 0
maxParsed = len(hours)*len(minutes)
steps = 10
iter = int(maxParsed / steps)
def callback(result):
dt = result
c_hour = dt.hour
c_minute = dt.minute
global parsed
parsed += 1
numBlocks = int(parsed / (iter + 1)) if parsed != maxParsed else steps
print(f"\rRendering timeslices [" + ''.join(['#' for _ in range(numBlocks)]).ljust(steps) + f"] ({str(c_hour).rjust(2)} {str(c_minute).rjust(2)})", end="", flush=True)
if multproc:
pool = multiprocessing.Pool()
for hour in hours:
for minute in minutes:
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
pool.apply_async(_rasterizeTimesliceWorker,
(timeslices[hour][minute], rasterPath, imagePath, min_agents_range, max_agents_range, dt, xres, yres, perform_rasterization),
callback=callback)
pool.close()
pool.join()
else:
for hour in hours:
for minute in minutes:
dt = datetime.datetime(year=slice_datetime.year, month=slice_datetime.month, day=slice_datetime.day, hour=hour, minute=minute)
callback(_rasterizeTimesliceWorker(timeslices[hour][minute], rasterPath, imagePath, min_agents_range, max_agents_range, dt, xres, yres, perform_rasterization))
print()
shutil.rmtree(rasterPath)
def visualizeOverview(timeslices: dict, imagePath: str, startTime: datetime.datetime, endTime: datetime.datetime, write_out: bool = False):
"""
Visualize multiple timeslices by a line graph representing the minimum, mean and maximum number of usable vehicles per timeslice
:param timeslices: dictionary containing timeslices in format hour:minute:timeslice
:param imagePath: path to dictionary where output image should be stored
:param startTime: datetime representing begin of timeslices
:param endTime: datetime representing end of timeslices
:param write_out: whether or not to write image and overviewDf to disk
"""
maxAgents = 0
minAgents = 4000
meanAgents = []
maxs = []
mins = []
means = []
idxs = []
df_data = []
for hour in sorted(list(timeslices.keys())):
for minute in sorted(list(timeslices[hour].keys())):
minVal = timeslices[hour][minute][timeslices[hour][minute].countReachable > 5].countReachable.min()
maxVal = timeslices[hour][minute][timeslices[hour][minute].countReachable > 5].countReachable.max()
meanVal = timeslices[hour][minute][timeslices[hour][minute].countReachable > 5].countReachable.mean()
idx = datetime.datetime(year=startTime.year, month=startTime.month, day=startTime.day, hour=hour, minute=minute)
idxs.append(idx)
mins.append(minVal)
maxs.append(maxVal)
means.append(meanVal)
minAgents = min(minAgents, minVal)
maxAgents = max(maxAgents, maxVal)
meanAgents.append(meanVal)
df_data.append([idx, minVal, meanVal, maxVal])
meanAgents = int(np.mean(meanAgents))
log.debug(f"Minimum agents at one spot: {minAgents}, mean agents: {meanAgents}, maximum agents: {maxAgents}")
fig: plt.Figure = plt.figure(figsize=(15, 8), dpi=300)
ax: plt.Axes = plt.gca()
ax.plot_date(idxs, mins, '-g', label="minimum")
ax.plot_date(idxs, means, '-y', label="avgerage")
ax.plot_date(idxs, maxs, '-r', label="maximum")
ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=0))
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M"))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=10))
ax.set_xlim(datetime.datetime(year=startTime.year, month=startTime.month, day=startTime.day, hour=0, minute=0),
datetime.datetime(year=startTime.year, month=startTime.month, day=startTime.day, hour=23, minute=59), emit=False)
# removed for small sample based reproduction # ax.set_ylim(250,900)
fig.autofmt_xdate()
ax.legend()
plt.title(f"Minimum, average and maximum number of vehicles seamlessly reaching one vertex, per 10 minute timeslice")
plt.xlabel(f"time\nat {startTime.strftime('%d.%m.%y')}")
plt.ylabel("number of seamlessly reaching vehicles")
overview_df = pd.DataFrame(df_data, columns=["idx", "min", "mean", "max"])
if write_out:
overview_df.to_csv(os.path.join(imagePath, f"analysis-{startTime.strftime('%d.%m.%y')}-{endTime.strftime('%d.%m.%y')}.csv"))
plt.savefig(os.path.join(imagePath, f"analysis-{startTime.strftime('%d.%m.%y')}-{endTime.strftime('%d.%m.%y')}.png"))
| true |
c5b93f128533583ad43c5728bb6383f9a9a3441f
|
Python
|
keionbis/stm32f1-custom-usbhid
|
/hid-test.py
|
UTF-8
| 489 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import hidapi
import binascii
import time
hidapi.hid_init()
print 'Loaded hidapi library from: {:s}\n'.format(hidapi.hid_lib_path())
devices = hidapi.hid_enumerate(0x0483, 0x5750)
if len(devices) == 0:
print "No dev attached"
exit(1)
device = hidapi.hid_open(0x0483, 0x5750)
import random
while True:
result = hidapi.hid_read(device, 4)
state = binascii.hexlify(result)
print "#%d: %s" % (len(result), state)
hidapi.hid_close(device)
| true |
f43b2010f8e5e803a81557f776f09f7717ec88eb
|
Python
|
baovu98/Homework-Python
|
/multiply.py
|
UTF-8
| 435 | 3.59375 | 4 |
[] |
no_license
|
def multiply_list(Lalisalist):
# Multiply the Numbers in the list one by one
result = 1
for i in Lalisalist:
try:
int(i)
except:
return False
for i in Lalisalist:
result = result * i
return result
#Insert Desired numbers
yourList = []
yourList = list(map(int,input("Input: ").split()))
print("Input: ",yourList)
print("Output: ",multiply_list(yourList))
| true |
e4d52afd4ff43109e382d7b549260c907193649c
|
Python
|
rubys/feedvalidator
|
/time.cgi
|
UTF-8
| 229 | 2.6875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/python
print "Content-type: text/plain\r\n\r\n",
import rfc822
import time
print "Current time:\n"
print " RFC 2822: " + rfc822.formatdate()
print " RFC 3339: " + time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
| true |
71238a0a6a9a180192f73c37938f49eb854f7b4f
|
Python
|
LuisHernandez96/Pichon
|
/NeededSize.py
|
UTF-8
| 1,228 | 3.15625 | 3 |
[
"MIT"
] |
permissive
|
# Store the amount of variables a scope needs to execute
# NOTE: WE ENDED UP NOT USING THIS AND JUST GIVE 10000 ADDRESSES TO EACH DATA TYPE
class NeededSize:
# Constructor
def __init__(self, localInts = 0, localFloats = 0, localBooleans = 0, tempInts = 0, tempFloats = 0, tempBooleans = 0):
self.localInts = localInts
self.localFloats = localFloats
self.localBooleans = localBooleans
self.tempInts = tempInts
self.tempFloats = tempFloats
self.tempBooleans = tempBooleans
# __str__ override to print in an easy to read format
def __str__(self):
return "localInts: {} localFloats: {} localBooleans: {} tempInts: {} tempFloats: {} tempBooleans: {}".format(
self.localInts, self.localFloats, self.localBooleans, self.tempInts, self.tempFloats, self.tempBooleans)
# Add local or temporal integers
def addInts(self, Ints, temp):
if temp:
self.tempInts += Ints
else:
self.localInts += Ints
# Add local or temporal floats
def addFloats(self, Floats, temp):
if temp:
self.tempFloats += Floats
else:
self.localFloats += Floats
# Add local or temporal booleans
def addBooleans(self, Booleans, temp):
if temp:
self.tempBooleans += Booleans
else:
self.localBooleans += Booleans
| true |
1b643c9ac2da65e238e8dd0cdf564e403a699a4e
|
Python
|
sainihimanshu1999/Leetcode---Top-100-Liked-Questions
|
/longestvalidparanthesis.py
|
UTF-8
| 341 | 2.6875 | 3 |
[] |
no_license
|
def longestvalidparanthesis(self,s):
if not s:
return 0
stack = []
dp = [0]*len(s)
for i in range(len(s)):
if s[i] == '(':
stack.append(i)
continue
if stack:
leftIndex = stack.pop()
dp[i] = i-leftIndex+1+dp[leftIndex-1]
return max(dp)
| true |
40157c1585bd14365ceb5fa50d17f0736f950501
|
Python
|
wikilife-org/wikilife_processors
|
/wikilife_processors/tests/processors/stats/was/factors/height_factor_tests.py
|
UTF-8
| 1,026 | 2.765625 | 3 |
[] |
no_license
|
# coding=utf-8
from wikilife_processors.tests.base_test import BaseTest
from wikilife_processors.processors.stats.was.factors.factors_model import HeightFactor
class HeightFactorTests(BaseTest):
def test_process_value(self):
factor = HeightFactor(values=[
{"id": "lt1", "name": "less than 1.00", "min": 0.0, "max": 1.0},
{"id": "100-130", "name": "1.00 to 1.30", "min": 1.0, "max": 1.3},
{"id": "131-150", "name": "1.31 to 1.50", "min": 1.31, "max": 1.5},
{"id": "151-160", "name": "1.51 to 1.60", "min": 1.51, "max": 1.6},
{"id": "161-170", "name": "1.61 to 1.70", "min": 1.61, "max": 1.7},
{"id": "171-180", "name": "1.71 to 1.80", "min": 1.71, "max": 1.8},
{"id": "181-190", "name": "1.81 to 1.90", "min": 1.81, "max": 1.9},
{"id": "gt190", "name": "more than 1.90", "min": 1.9, "max": None}
])
raw_value = 1.72
value = factor.process_value(raw_value)
assert value == raw_value
| true |
0000d029a1c348a1769b5604e4efcd07cf2cb4d7
|
Python
|
ninjascant/wiki-parser
|
/tests/parse_sections_tests.py
|
UTF-8
| 2,426 | 3.09375 | 3 |
[] |
no_license
|
import unittest
import wikitextparser as wtp
from wikiparser.parse_sections import check_is_nested_section, parse_section_text
TEST_PAGE = '''
'{{other uses|Answer (disambiguation)}}\n{{refimprove|date=August 2013}}\n{{Use mdy dates|date=June 2013}}\n\n\nIn law, an \'\'\'answer\'\'\' was originally a solemn assertion in opposition to someone
or something, and thus generally any counter-statement or [[defense (legal)|defense]], a [[reply]] to a [[question]]
\n\nThe famous Latin \'\'Responsa Prudentium\'\' ("answers of the learned ones") were the accumulated views of many
successive generations of Roman [[lawyer]]s, a body of legal opinion which gradually became authoritative.
<ref name="Chisholm1911"/>\n\nDuring debates of a contentious nature, deflection, colloquially known as \'changing the
topic\', has been widely observed, and is often seen as a failure to answer a question.<ref>{{cite book|last1=Baaske
|first1=Kevin|title=Arguments and Arguing: The Products and Process of Human Decision Making|date=2015|page=246}}
</ref>\n\n==Notes==\n{{Reflist}}\n\n==External links==\n* [https://answerssite.com/ Answers Site]\n*
[https://answerskey.com/ Answers key]\n\n[[Category:Common law]]\n[[Category:Legal documents]]'
'''
TEST_PARSED_TEXT = '''
'In law, an answer was originally a solemn assertion in opposition to someone
or something, and thus generally any counter-statement or defense, a reply to a question The famous Latin Responsa Prudentium ("answers of the learned ones") were the accumulated views of many
successive generations of Roman lawyers, a body of legal opinion which gradually became authoritative.During debates of a contentious nature, deflection, colloquially known as 'changing the
topic', has been widely observed, and is often seen as a failure to answer a question.
'''
class TestParseCategorySection(unittest.TestCase):
def test_check_is_nested_section(self):
test_parsed_page = wtp.parse(TEST_PAGE)
test_sections = test_parsed_page.sections
check_results = [check_is_nested_section(section) for section in test_sections]
assert check_results == [False, False, False]
def test_parse_section_text(self):
test_parsed_page = wtp.parse(TEST_PAGE)
test_sections = test_parsed_page.sections
parsed_text = parse_section_text(test_sections[0])
assert parsed_text.split() == TEST_PARSED_TEXT.split()
| true |
9c7826ef72647aa32093bcb21a611db2644b10ed
|
Python
|
WeeJang/basic_algorithm
|
/TreeArray.py
|
UTF-8
| 729 | 3.484375 | 3 |
[] |
no_license
|
#!/usr/bin/env python2
def lowbit(x):
return x & (-x)
class TreeArray(object):
def __init__(self,n):
self.__n__ = n
self.__array__ = [ 0 ] * ( n + 1 )
def update(self,index,value):
assert index <= self.__n__
while index <= self.__n__:
print "modify index",index
self.__array__[index] += value
index += lowbit(index)
def query(self,index):
assert index <= self.__n__
sum_ = 0
while index >= 0:
sum_ += self.__array__[index]
index -= lowbit(index)
return sum_
def debug(self):
print self.__array__
a = TreeArray(16)
a.update(4,4)
print(a.debug())
print a.query(4)
print a.query(7)
print a.query(16)
a.update(8,3)
print(a.debug())
print a.query(4)
print a.query(7)
print a.query(16)
| true |
cce860b8707d8adedcadea5374103bab6ce571d8
|
Python
|
Guruscode/getting-started-with-python
|
/modules-and-packages/basicmath/calculator.py
|
UTF-8
| 362 | 3.09375 | 3 |
[] |
no_license
|
# calculator module
print( 'basicmath/calculator.py: __name__ => ', __name__ )
print( 'basicmath/calculator.py: __package__ => ', __package__ )
# export a function
def add( num1, num2 ):
return num1 + num2
# import numbers subpackage using absolute intra-package referennce
from basicmath.numbers import power
def square(num):
return power.square(num)
| true |
87f0393dc5a0ec631ee5cefc3e894450153fd275
|
Python
|
m-atlantis/BloodTypeCompatibility
|
/dealer.py
|
UTF-8
| 1,904 | 3.078125 | 3 |
[] |
no_license
|
import random
import numpy as np
import alice
import bob
def __init():
global s, r, truth_table, matrix_a, matrix_b, n_size
# We work with bloodtypes, and there's 8 different ones, so to get 2**n = 8, n = 3.
n_size = 3
r = create_random_bit_string()
s = create_random_bit_string()
# Initialize matrix M_b \in {0,1}^(2^n x 2^n) uniformly random
matrix_b = np.random.randint(2, size=(2 ** n_size, 2 ** n_size))
# Initialize matrix M_a as a 2^n x 2^n matrix with zeroes
matrix_a = np.zeros((2 ** n_size, 2 ** n_size)).astype(int)
truth_table = __get_truth_table()
for index, _ in np.ndenumerate(matrix_a):
i = index[0]
j = index[1]
matrix_a[i, j] = __create_matrix_a(i, j, n_size, matrix_b[i, j])
def create_random_bit_string():
""" Creates a string of random bits that is of length n. """
final_bit_str = ""
for i in range(n_size):
temp = str(random.randint(0, 1))
final_bit_str += temp
return int(final_bit_str, 2)
def __create_matrix_a(i, j, n, matrix_b_index):
""" Creates index M_A[i,j] as defined in the one-time-truth-table protocol. """
return matrix_b_index ^ truth_table[(i - r) % (2 ** n), (j - s) % (2 ** n)]
def __get_truth_table():
row_1 = np.array([1, 0, 0, 0, 0, 0, 0, 0])
row_2 = np.array([1, 1, 0, 0, 0, 0, 0, 0])
row_3 = np.array([1, 0, 1, 0, 0, 0, 0, 0])
row_4 = np.array([1, 1, 1, 1, 0, 0, 0, 0])
row_5 = np.array([1, 0, 0, 0, 1, 0, 0, 0])
row_6 = np.array([1, 1, 0, 0, 1, 1, 0, 0])
row_7 = np.array([1, 0, 1, 0, 1, 0, 1, 0])
row_8 = np.array([1, 1, 1, 1, 1, 1, 1, 1])
return np.array([row_1, row_2, row_3, row_4, row_5, row_6, row_7, row_8])
def init_alice(x):
""" Returns (r, n, M_A). """
alice.init(x, r, n_size, matrix_a)
def init_bob(y):
""" Returns (s, n, M_B). """
bob.init(y, s, n_size, matrix_b)
| true |
431e57c2a288807fb06d8ffc20dda0eac5e451bd
|
Python
|
Zakalren/Problem-Solve
|
/BOJ/5430.py
|
UTF-8
| 500 | 2.90625 | 3 |
[] |
no_license
|
import sys
T = int(input())
for _ in range(T):
P = input()
N = int(input())
arr = list(sys.stdin.readline().replace('[', '').replace(']', '').split(','))
for c in P:
if c == 'R':
arr.reverse()
else:
if arr:
arr.pop(0)
else:
print('error')
break
print('[', end='')
for a in arr:
print(a, end='')
if a != arr[-1]:
print(',', end='')
print(']')
| true |
23b438dad91fc77099d1e540fab1bb932649bc01
|
Python
|
slieser/flowdesignbuch
|
/python/wordwrap/interactors_tests.py
|
UTF-8
| 1,044 | 3.0625 | 3 |
[
"MIT"
] |
permissive
|
from unittest import TestCase
from interactors import start
class InteractorTests(TestCase):
def test_start(self):
result = list(start(['', 'max_und_moritz.txt', 20]))
self.assertEqual(result, [
'Mancher gibt sich',
'viele Müh Mit dem',
'lieben Federvieh:',
'Einesteils der Eier',
'wegen, Welche diese',
'Vögel legen,',
'Zweitens, weil man',
'dann und wann Einen',
'Braten essen kann;',
'Drittens aber nimmt',
'man auch Ihre Federn',
'zum Gebrauch In die',
'Kissen und die',
'Pfühle, Denn man',
'liegt nicht gerne',
'kühle.',
'',
'”Seht, da ist die',
'Witwe Bolte, Die das',
'auch nicht gerne',
'wollte.”',
'',
'Seht, da ist die',
'Witwe Bolte, Die das',
'auch nicht gerne',
'wollte.',
''
])
| true |
cadfa25cc55de6a588c8d184bc7770de3801da39
|
Python
|
yanigisawa/data-dispenser
|
/tests/write_results.py
|
UTF-8
| 476 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
"""
Creates a .result file for each file in this directory, for verifying results.
"""
from file_stems import split_filenames
from data_dispenser.sources import Source
import pickle
for (filename, stem, ext) in split_filenames():
src = Source(filename)
src._dump('%s.result' % stem)
src = Source(filename)
if ext != '.xls' and ('.header' not in filename):
with open('%s.pickle' % stem, 'wb') as outfile:
pickle.dump(list(src), outfile)
| true |
88d8a85e16455c447e7eacc96f571ff69862e848
|
Python
|
malgorzatagwinner/Python
|
/ZADANIA_11/11_1.py
|
UTF-8
| 874 | 3.328125 | 3 |
[] |
no_license
|
#!usr/bin/env python3
# -*- coding: utf -8-*-
import random as r
import statistics as s
from random import gauss as g
def ad_a(n):
lista = list(range(n))
r.shuffle(lista)
return lista
def ad_b(n):
lista = list(range(n))
for i in range(n-1):
a = r.randrange(i, min(n-1, i+5))
lista[i], lista[a] = lista[a], lista[i]
return lista
def ad_c(n):
lista = ad_b(n)
return lista[::-1]
def ad_d(n, mean=1.0, sigma=1.0):
while n>0:
yield g(mean, sigma)
n-=1
def my_sqrt(n):
x = n
y = (x+1) // 2
while y<x:
x = y
y = (x+n // x)//2
return x
def ad_e(n):
max = my_sqrt(n)
while n > 0:
yield r.randint(0, max)
n-=1
if __name__ == '__main__':
print(ad_a(10))
print(ad_b(10))
print(ad_c(10))
print(list(ad_d(10)))
print(list(ad_e(10)))
| true |
3e7fb0bd67ea6a663d840b554b2aa9dc1b9c03ad
|
Python
|
yhtyht308/linux_study
|
/pratice/calculator.py
|
UTF-8
| 3,961 | 2.78125 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
import sys
import csv
from collections import namedtuple
IncomeTaxQuickLookupItem=namedtuple(
'IncomeTaxQuickLookupItem',
['start_point','tax_rate','quick_subtractor']
)
INCOME_TAX_START_POINT=3500
INCOME_TAX_QUICK_LOOKUP_TALE=[
IncomeTaxQuickLookupItem(80000,0.45,13505),
IncomeTaxQuickLookupItem(55000, 0.35, 5505),
IncomeTaxQuickLookupItem(35000, 0.30, 2755),
IncomeTaxQuickLookupItem(9000, 0.25, 1005),
IncomeTaxQuickLookupItem(4500, 0.2, 555),
IncomeTaxQuickLookupItem(1500, 0.1, 105),
IncomeTaxQuickLookupItem(0, 0.03, 0)
]
#class args
class Args(object):
def __init__(self):
self.args=sys.argv[1:]
def _option_after_value(self,option):
try:
index=self.args.index(option)
return self.args[index+1]
except (ValueError,IndexError):
print('Args Parameter Error')
exit()
@property
def config_path(self):
return _option_after_value('-c')
@property
def userdata_path(self):
return _option_after_value('-d')
@property
def export_path(self):
return _option_after_value('-o')
args=Args()
#class config
class Config(object):
def __init__(self):
self.config=self._read_config(self)
def _read_config(self):
config={}
config_path=args.config_path
with open(config_path) as f:
for line in f.readlines():
key,value=line.strip().split('=')
try:
config[key.strip()]=float(value.strip())
return config
except(ValueError):
print('Config Parameter Error')
exit()
def _get_config(self,key):
return self.config[key]
@property
def social_insurance_baseline_low(self):
retrun self._get_config('JiShuL')
@property
def social_insurance_baseline_high(self):
return self._get_config('JiShuH')
@property
def social_insurance_total_rate(self):
return sum([
self._get_config('YiLiao'),
self._get_config('YiLiao'),
self._get_config('ShiYe'),
self._get_config('GongShang'),
self._get_config('ShengYu'),
self._get_config('GongJiJin')
])
config=Config()
#class userdata
class Userdata(object):
def __init__(self):
self.userdata=self._read_userdata()
def _read_userdata():
userdata=[]
userdata_path=args.userdata_path
with open(userdata_path) as f:
for line in f.readlines():
employee_id,income_string=line.strip().split(',')
try:
income=int(income_string)
except(ValueError):
print('Userdata Parameter Error')
exit()
userdata.append((employee_id,income))
return userdata
def __iter__(self):
return iter(self.userdata)
#class AllCalculator
class AllCalculator(object):
@staticmethod
def calc_social_insurance(income):
if income<=config.social_insurance_baseline_low:
return '{:.2f}'.format(income*config.social_insurance_baseline_low)
if income>=config.social_insurance_baseline_high:
return '{:.2f}'.format(income*config.social_insurance_baseline_high)
return '{:.2f}'.format(income*config.social_insurance_total_rate)
@classmethod
def calc_income_tax_remain(cls,income):
real_income=income-cls.calc_social_insurance(income)
if real_income>INCOME_TAX_START_POINT:
return '0.00','{:.2f}'.format(real_income)
for item in INCOME_TAX_QUICK_LOOKUP_TABLE:
if real_income>item[start_point]:
income_tax=real_income*item[tax_rate]-item[quick_subtractor]
return '{:.2f}'.format(income_tax),'{:.2f}'.format(real_income-income_tax)
def calc_for_all_userdata(self):
result=[]
for employee_id,income in self.userdata:
data=[employee_id,income]
social_insurance=calc_social_insurance(income)
tax,remain=calc_income_tax_remain(income)
data+=[social_insurance,tax,remain]
result.append(data)
return result
def export(self,default='csv'):
result=self.calc_for_all_userdata()
with open(args.export_path,'w',newline='') as f:
writer=csv.writer(f)
writer.writerow(result)
if __name__=='__main__':
calculator=AllCalculator(Userdata())
calculator.export()
| true |
7f32b989627448390741459eea04ced7f31ac39b
|
Python
|
hectorgavin/squash
|
/squash2000/api.py
|
UTF-8
| 539 | 2.65625 | 3 |
[] |
no_license
|
from datetime import datetime
import requests
class Squash2000Api(object):
calendar_endpoint = 'http://www.squash2000-paramount-fitness.de/plan.php'
@classmethod
def get_timetable(cls, date=datetime.now().date()):
response = requests.get(cls.calendar_endpoint + '?jahr={}&monat={}&tag={}'.format(date.year, date.month, date.day))
if response.status_code == 200:
return response.text
else:
raise Exception('There was an error fetching the Squash Calendar for {}'.format(date))
| true |
8cfae95d372deb19202c08417c3d8b8ce52f797d
|
Python
|
tavo18/MLCaptchaSolver
|
/train_model_8.py
|
UTF-8
| 4,383 | 2.890625 | 3 |
[] |
no_license
|
import cv2
import pickle
import os.path
import numpy as np
from imutils import paths
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Flatten, Dense
from helpers import resize_to_fit
LETTER_IMAGES_FOLDER = "/content/MLCaptchaSolver/dataset/examples8v2"
MODEL_FILENAME = "captcha_model_8.hdf5"
MODEL_LABELS_FILENAME = "model_labels_8.dat"
# initialize the data and labels
data = []
labels = []
# loop over the input images
for image_file in paths.list_images(LETTER_IMAGES_FOLDER):
# Load the image and convert it to grayscale
image = cv2.imread(image_file)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Resize the letter so it fits in a 20x20 pixel box
#image = resize_to_fit(image, 20, 20)
# Add a third channel dimension to the image to make Keras happy
# image = np.expand_dims(image, axis=2)
# Grab the name of the letter based on the folder it was in
label = image_file.split(os.path.sep)[-2]
# Add the letter image and it's label to our training data
data.append(image)
labels.append(label)
# scale the raw pixel intensities to the range [0, 1] (this improves training)
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
# Split the training data into separate train and test sets
(X_train, X_test, Y_train, Y_test) = train_test_split(data, labels, test_size=0.25, random_state=0)
# Convert the labels (letters) into one-hot encodings that Keras can work with
lb = LabelBinarizer().fit(Y_train)
Y_train = lb.transform(Y_train)
Y_test = lb.transform(Y_test)
# Save the mapping from labels to one-hot encodings.
# We'll need this later when we use the model to decode what it's predictions mean
with open(MODEL_LABELS_FILENAME, "wb") as f:
pickle.dump(lb, f)
# Build the neural network!
model = Sequential()
# First convolutional layer with max pooling
model.add(Conv2D(20, (5, 5), padding="same", input_shape=(31, 26, 1), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Second convolutional layer with max pooling
model.add(Conv2D(50, (5, 5), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# Hidden layer with 500 nodes
model.add(Flatten())
model.add(Dense(500, activation="relu"))
# Output layer with 32 nodes (one for each possible letter/number we predict)
model.add(Dense(36, activation="softmax"))
# Ask Keras to build the TensorFlow model behind the scenes
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# Train the neural network
model.fit(X_train, Y_train, validation_data=(X_test, Y_test), batch_size=32, epochs=10, verbose=1)
# Train on 5998 samples, validate on 2000 samples
# Epoch 1/10
# 5998/5998 [==============================] - 20s 3ms/step - loss: 2.8304 - acc: 0.2321 - val_loss: 0.7964 - val_acc: 0.7420
# Epoch 2/10
# 5998/5998 [==============================] - 19s 3ms/step - loss: 0.3400 - acc: 0.8961 - val_loss: 0.1439 - val_acc: 0.9590
# Epoch 3/10
# 5998/5998 [==============================] - 19s 3ms/step - loss: 0.1107 - acc: 0.9653 - val_loss: 0.0626 - val_acc: 0.9880
# Epoch 4/10
# 5998/5998 [==============================] - 19s 3ms/step - loss: 0.0613 - acc: 0.9823 - val_loss: 0.0479 - val_acc: 0.9795
# Epoch 5/10
# 5998/5998 [==============================] - 19s 3ms/step - loss: 0.0358 - acc: 0.9915 - val_loss: 0.0371 - val_acc: 0.9870
# Epoch 6/10
# 5998/5998 [==============================] - 19s 3ms/step - loss: 0.0249 - acc: 0.9950 - val_loss: 0.0152 - val_acc: 0.9970
# Epoch 7/10
# 5998/5998 [==============================] - 19s 3ms/step - loss: 0.0177 - acc: 0.9977 - val_loss: 0.0120 - val_acc: 0.9970
# Epoch 8/10
# 5998/5998 [==============================] - 19s 3ms/step - loss: 0.0180 - acc: 0.9955 - val_loss: 0.0320 - val_acc: 0.9880
# Epoch 9/10
# 5998/5998 [==============================] - 19s 3ms/step - loss: 0.0303 - acc: 0.9930 - val_loss: 0.0085 - val_acc: 0.9980
# Epoch 10/10
# 5998/5998 [==============================] - 19s 3ms/step - loss: 0.0170 - acc: 0.9965 - val_loss: 0.0120 - val_acc: 0.9965
# <keras.callbacks.History at 0x7fb4f4f79dd8>
# Save the trained model to disk
model.save(MODEL_FILENAME)
| true |
0bb5ad685b6a8fcf3eec4fc8d540dbafa0e5bb0a
|
Python
|
Nikkuniku/AtcoderProgramming
|
/ARC/ARC109/B.py
|
UTF-8
| 176 | 2.984375 | 3 |
[] |
no_license
|
n=int(input())
def f(k):
return k*(k+1)//2
l=-1
r=n+1
while r-l>1:
mid=l + (r-l)//2
if f(mid)<=n+1:
l=mid
else:
r=mid
ans=n-l+1
print(ans)
| true |
3d135605923214bc9b5e5641ee69ee471678f822
|
Python
|
suchen2018/Some-useful-code
|
/quadratic.py
|
UTF-8
| 874 | 3.484375 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 3 14:14:55 2018
@author: n10027301
"""
import math
def quadratic(a,b,c):
if a == 0:
raise TypeError('a不能为0')
if not isinstance(a,(int,float)) or not isinstance(b,(int,float)) or not isinstance(c,(int,float)):
raise TypeError('Bad operand type')
delta = math.pow(b,2) - 4*a*c
if delta < 0:
return '无实根'
elif delta==0:
x1= (math.sqrt(delta)-b)/(2*a)
print('x=', x1)
print('error='+a*x1*x1+b*x1+c)
else:
x1= (math.sqrt(delta)-b)/(2*a)
x2=-(math.sqrt(delta)+b)/(2*a)
print('x1=%.2f, x2=%.2f'%(x1, x2))
print('error1=', a*x1*x1+b*x1+c)
print('error2=', a*x2*x2+b*x2+c)
#77=10.69(13.375-t)t
a=10.69
b=13.375
c=77
quadratic(-a,a*b,-c)
| true |
37da93dce4a58cb24b9ac632044abee92bb57f9c
|
Python
|
DiagnosticRobotics/dr-chf-models
|
/deep/lstm_model.py
|
UTF-8
| 3,121 | 2.90625 | 3 |
[] |
no_license
|
from deep.attention import AttentionWithContext
from keras import Model, Input
from keras.layers import Dense, Dropout, Activation
from deep.deep_sequential_model import DeepSequentialModel
class LstmModel(DeepSequentialModel):
def __init__(self, model_name = 'deep_model',
activation = 'sigmoid',
loss = 'binary_crossentropy',
optimizer = 'adam',
batch_size = 128,
dropout_rate = 0.4,
epochs = 50,
initial_learning_rate = 0.001,
vector_size = 300,
use_gpu = True,
use_attention = True,
layers_dim = [64,32],
lstm_layers = [64,32],
bidirectional_lstm = True):
'''
Args:
model_name: str. choose a name for the model
activation: str. activation function for the deep layers. for example, 'relu', 'sigmoid'.
loss: str. loss function.
optimizer: str. deep model optimizer. choose from ['adam', 'adadelta', 'adagrad', 'adamax']
batch_size: int. batch size.
dropout_rate: float. dropout layer rate
epochs: int. number of epochs.
initial_learning_rate: float. initial learning model for the deep model.
vector_size: int. embedding vector size.
use_gpu: boolean [True, False]
use_attention: boolean [True, False] to use attention layer on the sequence.
layers_dim: list. list of fully connected layers dimensions.
lstm_layers: list. list of lstm layers dimensions.
bidirectional_lstm: boolean [True, False]. use bidirectional lstm instead of 1 directional lstm.
'''
super().__init__(model_name,
activation,
loss,
optimizer,
batch_size,
dropout_rate,
epochs,
initial_learning_rate,
vector_size,
use_gpu,
use_attention, layers_dim)
self.lstm_layers = lstm_layers
self.bidirectional_lstm = bidirectional_lstm
def build_sequential_network(self):
# input shape sequence length X embedding size
codes_input = Input(shape = (self.sequence_length, self.vector_size))
fc_input = self._add_layers(codes_input, self.layers_dim, name = f'input')
lstm_output = self._add_lstm_layers(fc_input, self.lstm_layers, self.bidirectional_lstm, self.use_attention,
name = f'lstm_input')
if self.use_attention:
lstm_output = AttentionWithContext()(lstm_output)
lstm_output = Dropout(self.dropout_rate, name = f'att_dropout')(lstm_output)
model = Dense(1, name = 'output')(lstm_output)
model = Activation('sigmoid')(model)
model = Model(inputs = codes_input, outputs = model, name = 'Final_output')
return model
| true |
7df6fd4ec83524eb18c4fa483feedd21724ca722
|
Python
|
worklifesg/Daily-Coding-Challenges-Practice
|
/GeneralCodingPractice/1.Arrays/6_RotateArray_Modified.py
|
UTF-8
| 748 | 3.6875 | 4 |
[] |
no_license
|
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
def reverse_array(start, end):
while start < end:
nums[start],nums[end] = nums[end],nums[start]
start+=1
end-=1
length = len(nums)
k = k % length
reverse_array(0,length-1)
reverse_array(0,k-1)
reverse_array(k,length-1)
'''
In this program, the reverse array function is defined within the main function and array calling is avoided being twice that improved the runtime
Runtime: 208 ms, faster than 61.91% of Python3 online submissions for Rotate Array.
Memory Usage: 25.6 MB, less than 25.02% of Python3 online submissions for Rotate Array.
'''
| true |
96d42012277bad79f8d75638c36dee1a2588bbb8
|
Python
|
justgolikeme/My_MachineLearning
|
/Base_On_Scikit-Learn_TensorFlow/Chapter_2/Demo_2/Analyze_Data.py
|
UTF-8
| 565 | 3.328125 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Time : 2019/12/21 15:33
# @Author : Mr.Lin
'''
分析数据
'''
from Chapter_2.Demo_2.DataSource import housing
# 查看前五行的数据 每一行代表一个区
print(housing.head())
print("")
# info方法获取数据集的简单描述
print(housing.info())
print("")
# 查看 ocean_proximity 属性有多少中分类存在
print(housing["ocean_proximity"].value_counts())
print("")
print(housing.describe())
print("")
# 绘制直方图
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
plt.show()
| true |
559d7e35ddb621d7ff3ca774d292a473c0413468
|
Python
|
pto8913/KyoPro
|
/ABC/ABC105/B.py
|
UTF-8
| 179 | 2.890625 | 3 |
[] |
no_license
|
# URL: https://atcoder.jp/contests/abc105/tasks/abc105_b
N = int(input())
ans = "No"
for i in range(26):
for j in range(16):
if(i*4+j*7 == N):
ans = "Yes"
print(ans)
| true |
0fc9b9a2f752b27266536d2f3fb18f2003a93bd8
|
Python
|
brandonhillert/StructeredProgramming
|
/Mastering Mastermind/test.py
|
UTF-8
| 1,605 | 3.90625 | 4 |
[] |
no_license
|
import itertools
import random
def code_invullen():
code = " "
print("Vul een code in: ")
code = input()
for i in code:
if i not in "abcdef" or len(code) != 4:
print("Foutmelding")
print("Vul een code in")
code = input()
return list(code)
def feedback_geven_mens():
feedback = []
print("Geef aantal zwarten pinnen: ")
zwarten_pinnen = int(input())
print("Geef aantal witte pinnen: ")
witte_pinnen = int(input())
feedback.append(zwarten_pinnen)
feedback.append(witte_pinnen)
return feedback
def gok():
letters_antwoord = ""
for letter in "abcdef":
print(letter * 4)
mogelijke_feedback = [[1, 0], [2, 0], [3, 0], [4, 0]]
feedback = feedback_geven_mens()
if feedback == [1, 0]:
letters_antwoord += letter
if feedback == [2, 0]:
for i in range(2):
letters_antwoord += letter
if feedback == [3, 0]:
for i in range(3):
letters_antwoord += letter
if feedback == [4, 0]:
for i in range(4):
letters_antwoord += letter
return letters_antwoord
"""Deze functie retouneert een lijst met alle mogelijke combinaties van abcdef"""
"""https://stackoverflow.com/questions/45990454/generating-all-possible-combinations-of-characters-in-a-string"""
def lijst_combinaties(gok):
lijst = []
for x in itertools.product(*([gok] * 4)):
lijst.append(''.join(x))
return lijst
code_invullen()
print(lijst_combinaties(gok()))
| true |
61301d112826b38c23d58c441a1e8057d0fc8557
|
Python
|
Nikukzn/ParaMol
|
/ParaMol/Force_field/force_field.py
|
UTF-8
| 47,945 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
"""
Description
-----------
This module defines the :obj:`ParaMol.Force_field.force_field.ForceField` class which is the ParaMol representation of a force field that contains all the information about the force field terms and correspondent parameters (even relatively to those that will not enter the optimization).
"""
import os, copy
import numpy as np
import logging
# ParaMol imports
from .force_field_term import *
class ForceField:
"""
ParaMol representation of a force field.
Parameters
----------
openmm_engine : :obj:`ParaMol.MM_engines.openmm.OpenMMEngine`
ParaMol representation of the OpenMMEngine
Attributes
----------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`. This mapping is constructed as given by OpenMM.
force_field_optimizable : dict
Same as before but only containing optimizable force field terms. Force groups that do not have optimizable force field terms will not be part of this dictionary.
force_groups : dict
Dictionary that defines the mapping between force group names and force group numbers, which is defined accordingly to the information obtained form the OpenMM System.
optimizable_parameters : list
List that contains instances of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter` that are optimizable.
optimizable_parameters_values : list of float/int
List that contains the values of the optimizable force field parameters. This is usually fed into the optimization itself.
"""
symmetry_group_default = "X"
def __init__(self, openmm_engine):
self._openmm = openmm_engine
self.force_field = None
self.force_field_optimizable = None
self.force_groups = None
self.optimizable_parameters = None
self.optimizable_parameters_values = None
# ------------------------------------------------------------ #
# #
# PUBLIC METHODS #
# #
# ------------------------------------------------------------ #
def create_force_field(self, opt_bonds=False, opt_angles=False, opt_torsions=False, opt_charges=False, opt_lj=False, opt_sc=False, ff_file=None):
"""
Method that wraps the methods create_force_field_from_openmm/read_ff_file and create_force_field_optimizable in order to ease the procedure of creating a ParaMol representation of a force field.
Notes
-----
If `ff_file` is not `None` the force field will be created from the provided file. The system stored in :obj:`ParaMol.MM_engines.openmm.OpenMMEngine` should contain exactly the same forces and force field terms as the ones in this file.
Parameters
----------
opt_bonds : bool
Flag that signals whether or not the bond parameters will be optimized.
opt_angles : bool
Flag that signals whether or not the angle parameters will be optimized.
opt_torsions : bool
Flag that signals whether or not the dihedral parameters will be optimized.
opt_charges : bool
Flag that signal whether or not the charges will be optimized.
opt_lj : bool
Flag that signal whether or not the charges will be optimized.
opt_sc : bool
Flag that signal whether or not the 1-4 Lennard-Jones and electrostatic scaling factor will be optimized.
ff_file : str
Name of the ParaMol force field file to be read.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# Set empty Force Field and Force Group dictionaries
self.force_field = {}
self.force_groups = {}
if ff_file is None:
# No .ff file was provided - create parameter list from force field
logging.info("Creating force field directly from OpenMM.")
assert self._openmm is not None, "OpenMM was not set."
self.create_force_field_from_openmm(opt_bonds, opt_angles, opt_torsions, opt_charges, opt_lj, opt_sc)
else:
logging.info("Creating force from .ff file named '{}'.".format(ff_file))
# A .param file was provided - create parameter list from the information contained in this file.
assert os.path.exists(ff_file), "\t * ERROR: .param file provided - {} - does not exist."
self.read_ff_file(ff_file)
self.create_force_field_optimizable()
return self.force_field
def update_force_field(self, optimizable_parameters_values, symmetry_constrained=True):
"""
Method that updates the value of each Parameter object instance.
Parameters
----------
optimizable_parameters_values : list of float/int
List that contains the values of the optimizable force field parameters.
symmetry_constrained : bool
Whether or not the optimization is constrained by symmetries.
Returns
-------
optimizable_parameters : list of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter`
List that contains instances of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter` that are optimizable.
"""
if symmetry_constrained:
# Update the parameter list taking into account the symmetry constraints
symm_groups = {}
# Iterate over all optimizable parameters; update all the parameters that belong do the default
# symmetry group and save the new paramter values of the others
for i in range(len(self.optimizable_parameters)):
parameter = self.optimizable_parameters[i]
if parameter.symmetry_group == self.symmetry_group_default:
# If symmetry group of optimizable parameter is default just update it
parameter.value = optimizable_parameters_values[i]
else:
# Optimizable parameter does not belong to default symmetry group
if parameter.symmetry_group in symm_groups.keys():
# If symmetry group is already in symm_groups
if parameter.param_key not in symm_groups[parameter.symmetry_group].keys():
# If the param_key is not in symm_groups
symm_groups[parameter.symmetry_group][parameter.param_key] = optimizable_parameters_values[i]
else:
symm_groups[parameter.symmetry_group] = {}
symm_groups[parameter.symmetry_group][parameter.param_key] = optimizable_parameters_values[i]
# The only parameters that were not updated yet were the ones that do not belong to the default
# symmetry group. We have to iterate over force_field_optimizable and update them.
for force in self.force_field_optimizable:
# For a given force, iterate over all force field terms
for sub_force in self.force_field_optimizable[force]:
for force_field_term in sub_force:
# For each term, iterate over all its Parameter instances
for parameter in force_field_term.parameters.values():
if parameter.optimize and parameter.symmetry_group != self.symmetry_group_default:
parameter.value = symm_groups[parameter.symmetry_group][parameter.param_key]
else:
for i in range(len(self.optimizable_parameters)):
self.optimizable_parameters[i].value = optimizable_parameters_values[i]
# TODO: check if there's a better way do this
# Make all scee, scnb positive and eps and sigma positive
if "Scaling14" in self.force_field_optimizable:
for sub_force in self.force_field_optimizable["Scaling14"]:
for ff_term in sub_force:
ff_term.parameters["scee"].value = abs(ff_term.parameters["scee"].value)
ff_term.parameters["scnb"].value = abs(ff_term.parameters["scnb"].value)
if "NonbondedForce" in self.force_field_optimizable:
for sub_force in self.force_field_optimizable["NonbondedForce"]:
for ff_term in sub_force:
ff_term.parameters["lj_eps"].value = abs(ff_term.parameters["lj_eps"].value)
ff_term.parameters["lj_sigma"].value = abs(ff_term.parameters["lj_sigma"].value)
return self.optimizable_parameters
def create_force_field_from_openmm(self, opt_bonds, opt_angles, opt_torsions, opt_charges, opt_lj, opt_sc):
"""
Method that creates the force field dictionary that contains all the FFTerms of the force field as given by OpenMM.
The FFTerms are grouped in lists that can be accessed by the key of the correspondent force group.
Notes
-----
This method constructs the force_groups dictionary, and calls the methods create_harmonic_bond_force_field,create_harmonic_angle_force_field, create_periodic_torsion_force_field, create_nonbonded_force_field in order to construct the force_filed dictionary.
Parameters
----------
opt_bonds : bool
Flag that signals whether or not the bond parameters will be optimized.
opt_angles : bool
Flag that signals whether or not the angle parameters will be optimized.
opt_torsions : bool
Flag that signals whether or not the dihedral parameters will be optimized.
opt_charges : bool
Flag that signal whether or not the charges will be optimized.
opt_lj : bool
Flag that signal whether or not the charges will be optimized.
opt_sc : bool
Flag that signal whether or not the 1-4 Lennard-Jones and electrostatic scaling factor will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# Iterate over all forces present in the system and determine the force groups
assert self._openmm.system is not None, "System was not set"
"""
# Left here only if needed in the future
forces = self._openmm.system.getForces()
for i in range(len(forces)):
force = forces[i]
# Get force group name
# Alternatively,force_key = force.__str__().split(".")[3].split(";")[0]
force_key = force.__class__.__name__
# Set force group number
force.setForceGroup(i)
assert force_key not in self.force_groups, "\t * ERROR: Force {} already in the dictionary.".format(force_key)
self.force_groups[force_key] = i
"""
self.force_groups = copy.deepcopy(self._openmm.forces_indexes)
# Add extra force group for 1-4 scaling factors
force_key = "Scaling14"
assert force_key not in self.force_groups, "\t * ERROR: Force {} already in the dictionary.".format(force_key)
self.force_groups[force_key] = self.force_groups["NonbondedForce"]
# Create the force field from OpenMM
self.create_harmonic_bond_force_field(opt_bonds)
self.create_harmonic_angle_force_field(opt_angles)
self.create_periodic_torsion_force_field(opt_torsions)
self.create_nonbonded_force_field(opt_charges, opt_lj, opt_sc)
return self.force_field
def create_force_field_optimizable(self):
"""
Method that creates the optimizable force field dictionary that contains all the optimizable FFTerms.
The FFTerms are grouped in lists that can be accessed by the key of the correspondent force group.
Returns
-------
force_field_optimizable : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
assert self.force_field is not None, "\t * force_field dictionary was not created yet. Run create_force_field " \
"method before"
self.force_field_optimizable = {}
# Structure:
# force_field["HarmonicBondForce"] = [first_occurrence, second_occurrence]
# where
# first_occurrence = [ff_term_1_1, ff_term_1_2, ...]
# second_occurrence = [ff_term_2_1, ff_term_2_2, ...]
# Iterate over all existent forces
for force in self.force_field:
# For a given force, iterate over all occurrence of that force
for sub_force in self.force_field[force]:
sub_force_field_optimizable = []
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
# For each term, iterate over all its Parameter instances
for parameter in force_field_term.parameters.values():
if parameter.optimize:
if force not in self.force_field_optimizable:
self.force_field_optimizable[force] = []
sub_force_field_optimizable.append(force_field_term)
break
if force in self.force_field_optimizable:
self.force_field_optimizable[force].append(sub_force_field_optimizable)
return self.force_field_optimizable
def get_optimizable_parameters(self, symmetry_constrained=True):
"""
Method that gets the lists containing all optimizable Parameter instances and parameter values.
Parameters
----------
symmetry_constrained : bool
Whether or not the optimization is constrained by symmetries.
Returns
-------
optimizable_parameters, optimizable_parameters_values : list of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter`, list of int/float
Attributes of self.
"""
assert self.force_field_optimizable is not None, "\t * force_field_optimizable dictionary was not created yet." \
" First run create_force_field_optimizable method."
self.optimizable_parameters = []
self.optimizable_parameters_values = []
# Multiplicity of the parameters
ref_parameters = {}
if symmetry_constrained:
# Keep track of symmetry groups already included
symm_groups = {}
# Iterate over all existent forces
for force in self.force_field_optimizable:
for sub_force in self.force_field_optimizable[force]:
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
# For each term, iterate over all its Parameter instances
for parameter in force_field_term.parameters.values():
if parameter.optimize:
if parameter.symmetry_group == self.symmetry_group_default:
# If symmetry group is the default ("X")
self.optimizable_parameters.append(parameter)
self.optimizable_parameters_values.append(parameter.value)
elif parameter.symmetry_group in symm_groups.keys():
# If group is not the default one ("X")
# but that symmetry_group is already in symm_groups
if parameter.param_key not in symm_groups[parameter.symmetry_group]:
# Add missing param_key
symm_groups[parameter.symmetry_group].append(parameter.param_key)
self.optimizable_parameters.append(parameter)
self.optimizable_parameters_values.append(parameter.value)
# Parameter multiplicity
ref_parameters[parameter.symmetry_group].update({parameter.param_key : parameter})
parameter.multiplicity = 1
else:
# Increase multiplicity of the reference parameter
ref_parameters[parameter.symmetry_group][parameter.param_key].multiplicity += 1
else:
# If group is not the default one ("X") and not in symm_groups
symm_groups[parameter.symmetry_group] = []
symm_groups[parameter.symmetry_group].append(parameter.param_key)
self.optimizable_parameters.append(parameter)
self.optimizable_parameters_values.append(parameter.value)
# Parameter multiplicity
ref_parameters[parameter.symmetry_group] = {parameter.param_key : parameter}
parameter.multiplicity = 1
else:
# Iterate over all existent forces
for force in self.force_field_optimizable:
for sub_force in self.force_field_optimizable[force]:
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
# For each term, iterate over all its Parameter instances
for parameter in force_field_term.parameters.values():
if parameter.optimize:
self.optimizable_parameters.append(parameter)
self.optimizable_parameters_values.append(parameter.value)
return self.optimizable_parameters, self.optimizable_parameters_values
def create_harmonic_bond_force_field(self, opt_bonds):
"""
Method that creates the part of the force field regarding OpenMM's force 'HarmonicBondForce'.
Parameters
----------
opt_bonds : bool
Flag that signals whether or not the bond parameters will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
force_key = "HarmonicBondForce"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
for force_idx in self.force_groups[force_key]:
bond_force = self._openmm.system.getForce(force_idx)
sub_force_field = []
for i in range(bond_force.getNumBonds()):
# Create the FFTerm for this bond term
at1, at2, length, k = bond_force.getBondParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [at1, at2])
# Add parameters to this FFTerm
force_field_term.add_parameter(self.symmetry_group_default, int(opt_bonds), "bond_eq", length._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_bonds), "bond_k", k._value)
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
return self.force_field
def create_harmonic_angle_force_field(self, opt_angles):
"""
Method that creates the part of the force field regarding OpenMM's force 'HarmonicAngleForce'.
Parameters
----------
opt_angles : bool
Flag that signals whether or not the angle parameters will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
force_key = "HarmonicAngleForce"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
for force_idx in self.force_groups[force_key]:
angle_force = self._openmm.system.getForce(force_idx)
sub_force_field = []
for i in range(angle_force.getNumAngles()):
# Create the FFTerm for this bond term
at1, at2, at3, angle, k = angle_force.getAngleParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [at1, at2, at3])
# Add parameters to this FFTerm
force_field_term.add_parameter(self.symmetry_group_default, int(opt_angles), "angle_eq", angle._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_angles), "angle_k", k._value)
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
return self.force_field
def create_periodic_torsion_force_field(self, opt_torsions):
"""
Method that creates the part of the force field regarding OpenMM's force 'PeriodicTorsionForce'.
Parameters
----------
opt_torsions : bool
Flag that signals whether or not the torsion parameters will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
force_key = "PeriodicTorsionForce"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
for force_idx in self.force_groups[force_key]:
dihedral_force = self._openmm.system.getForce(force_idx)
sub_force_field = []
for i in range(dihedral_force.getNumTorsions()):
# Create the FFTerm for this bond term
at1, at2, at3, at4, per, phase, k = dihedral_force.getTorsionParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [at1, at2, at3, at4])
# Add parameters to this FFTerm
# OBS: currently not possible to optimize the periodicity
force_field_term.add_parameter(self.symmetry_group_default, 0, "torsion_periodicity", int(per))
force_field_term.add_parameter(self.symmetry_group_default, int(opt_torsions), "torsion_phase", phase._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_torsions), "torsion_k", k._value)
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
return self.force_field
def create_nonbonded_force_field(self, opt_charges, opt_lj, opt_sc):
"""
Method that creates the part of the force field regarding OpenMM's force 'NonbondedForce'.
Parameters
----------
opt_charges : bool
Flag that signals whether or not the charge parameters will be optimized.
opt_lj : bool
Flag that signals whether or not the Lennard-Jones 12-6 parameters will be optimized.
opt_sc : bool
Flag that signals whether or not the 1-4 Lennard-Jones and electrostatic scaling factors's parameters will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
force_key = "NonbondedForce"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
for force_idx in self.force_groups[force_key]:
nonbonded_force = self._openmm.system.getForce(force_idx)
sub_force_field = []
for i in range(nonbonded_force.getNumParticles()):
# Create the FFTerm for this bond term
charge, sigma, eps = nonbonded_force.getParticleParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [i])
# Add parameters to this FFTerm
force_field_term.add_parameter(self.symmetry_group_default, int(opt_charges), "charge", charge._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_lj), "lj_sigma", sigma._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_lj), "lj_eps", eps._value)
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
# Create empty list for 1-4 scaling
force_key = "Scaling14"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
sub_force_field = []
for i in range(nonbonded_force.getNumExceptions()):
at1, at2, charge_prod, sigma, eps, = nonbonded_force.getExceptionParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [at1, at2])
if abs(charge_prod._value) < 1e-8 and abs(eps._value) < 1e-8:
# No scaling
scee = 0.0
scnb = 0.0
force_field_term.add_parameter(self.symmetry_group_default, 0, "scee", float(scee))
force_field_term.add_parameter(self.symmetry_group_default, 0, "scnb", float(scnb))
continue
else:
# Determine default scaling
charge_at1, sigma_at1, eps_at1 = nonbonded_force.getParticleParameters(at1)
charge_at2, sigma_at2, eps_at2 = nonbonded_force.getParticleParameters(at2)
try:
scee = charge_prod / (charge_at1 * charge_at2)
except:
scee = 1 / 1.2
try:
scnb = eps / np.sqrt(eps_at1 * eps_at2)
except:
scnb = 1 / 2.0
# Add parameters to this FFTerm
force_field_term.add_parameter(self.symmetry_group_default, int(opt_sc), "scee", float(scee))
force_field_term.add_parameter(self.symmetry_group_default, int(opt_sc), "scnb", float(scnb))
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
return self.force_field
def write_ff_file(self, file_name):
"""
Method that writes the force field parameters in the standard format used by ParaMol (usually .ff extension).
Parameters
----------
file_name : str
Name of the file to be written.
Returns
-------
`True` if file was closed successfully. `False` otherwise.
"""
logging.info("Writing force field to .ff file named '{}'.".format(file_name))
# Open file for writing
ff_file = open(file_name, 'w')
# Iterate over all existent forces
for force in self.force_field:
# Iterate over all force field term
for k, sub_force in enumerate(self.force_field[force]):
# For a given force occurrence, iterate over all force field terms
ff_file.write("{} {:3d} \n".format(force, self.force_groups[force][k]))
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
ff_term_line = ("{:3d} " + "{:3d} " * len(force_field_term.atoms)).format(force_field_term.idx, *force_field_term.atoms)
# For each term, iterate over all its Parameter instances
optimization_flags = ""
for parameter in force_field_term.parameters.values():
ff_term_line += "{:16.8f} ".format(parameter.value)
optimization_flags += "{:3d} ".format(int(parameter.optimize))
ff_term_line += optimization_flags
ff_term_line += " " + str(parameter.symmetry_group) + " \n"
ff_file.write(ff_term_line)
ff_file.write("END \n")
return ff_file.close()
def read_ff_file(self, file_name):
"""
Method that reads the force field parameters in the standard format used by ParaMol (usually .ff extension) and creates its ParaMol representation.
Parameters
----------
file_name : str
Name of the file to be read.
Returns
-------
`True` if file was closed successfully. `False` otherwise.
"""
# Open file for writing
ff_file = open(file_name, 'r')
# Iterate over all existent forces
for line in ff_file:
line_split = line.split()
if 'END' in line_split:
break
elif len(line_split) == 2:
# A new force was found; set the force key and force group
force_key = line_split[0]
force_index = int(line_split[1])
if force_key not in self.force_groups:
self.force_groups[force_key] = []
self.force_groups[force_key].append(force_index)
if force_key not in self.force_field:
# Create empty list for the force_key
self.force_field[force_key] = []
self.force_field[force_key].append([])
# current_occurrence of this force_key
current_occurrence = len(self.force_field[force_key])-1
continue
else:
if force_key == 'HarmonicBondForce':
idx, at1, at2, bond_eq, bond_k, bond_eq_opt, bond_k_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at1), int(at2)])
# Add parameters to this FFTerm
force_field_term.add_parameter(symm_group, int(bond_eq_opt), "bond_eq", float(bond_eq))
force_field_term.add_parameter(symm_group, int(bond_k_opt), "bond_k", float(bond_k))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
elif force_key == 'HarmonicAngleForce':
idx, at1, at2, at3, angle_eq, angle_k, angle_eq_opt, angle_k_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at1), int(at2), int(at3)])
# Add parameters to this FFTerm
force_field_term.add_parameter(symm_group, int(angle_eq_opt), "angle_eq", float(angle_eq))
force_field_term.add_parameter(symm_group, int(angle_k_opt), "angle_k", float(angle_k))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
elif force_key == 'PeriodicTorsionForce':
idx, at1, at2, at3, at4, torsion_periodicity, torsion_phase,\
torsion_k, torsion_periodicity_opt, torsion_phase_opt, torsion_k_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at1), int(at2), int(at3), int(at4)])
# Add parameters to this FFTerm
# OBS: currently not possible to optimize the periodicity
assert int(torsion_periodicity_opt) == 0, \
"Flag to parameterize torsions was set to {} but this is not possible.".format(torsion_periodicity_opt)
force_field_term.add_parameter(symm_group, int(0), "torsion_periodicity", int(float(torsion_periodicity)))
force_field_term.add_parameter(symm_group, int(torsion_phase_opt), "torsion_phase", float(torsion_phase))
force_field_term.add_parameter(symm_group, int(torsion_k_opt), "torsion_k", float(torsion_k))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
elif force_key == 'NonbondedForce':
idx, at, charge, sigma, eps, charge_opt, sigma_opt, eps_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at)])
# Add parameters to this FFTerm
force_field_term.add_parameter(symm_group, int(charge_opt), "charge", float(charge))
force_field_term.add_parameter(symm_group, int(sigma_opt), "lj_sigma", float(sigma))
force_field_term.add_parameter(symm_group, int(eps_opt), "lj_eps", float(eps))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
elif force_key == 'Scaling14':
idx, at1, at2, scee, scnb, scee_opt, scnb_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at1), int(at2)])
# Add parameters to this FFTerm
force_field_term.add_parameter(symm_group, int(scee_opt), "scee", float(scee))
force_field_term.add_parameter(symm_group, int(scnb_opt), "scnb", float(scnb))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
return ff_file.close()
def optimize_selection(self, lower_idx, upper_idx, change_other=False):
"""
Methods that sets a parameter as optimizable if it belongs to a force field term for which at least one of the atoms's indices is greather than lower_idx and lower than upper_idx.
Notes
-----
If [10,20] is given a the lower_idx list and [15,25] is given as the upper_idx list, the selection will comprise the atoms between 10-15 and 20-25.
Parameters
----------
lower_idx : list of int
Lower index limits.
upper_idx : list of int
Upper index limits.
change_other : bool
Whether or not the remaining parameter's optimization state is to be set to False. (default is False, i.e., their optimization state is not change)
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
assert len(lower_idx) == len(upper_idx)
# Iterate over all forces
for force in self.force_field:
# Iterate over all force field term
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
# Iterate over all atoms of a given force field term
for at in force_field_term.atoms:
for i in range(len(lower_idx)):
low_limit = lower_idx[i]
upper_limit = upper_idx[i]
if (at >= low_limit) and (at <= upper_limit):
for parameter in force_field_term.parameters.values():
parameter.optimize = 1
elif (at < low_limit) or (at > upper_limit) and change_other:
for parameter in force_field_term.parameters.values():
parameter.optimize = 0
else:
# If outside range but change other is False
pass
return self.force_field
def optimize_torsions(self, torsions, change_other_torsions=False, change_other_parameters=False):
"""
Methods that sets as optimizable all parameters of the torsions contained in the listed passed as an argument.
Parameters
----------
torsions : list of lists
List of list, wherein the inner lists contain indices of the quartets of atoms that define the torsion to be optimized.
change_other_torsions : bool
Whether or not the remaining torsions's optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
change_other_parameters : bool
Whether or not the remaining parameters' optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# ----------------------------------------------------------------------------------------------
# Set optimization flag in ParaMol Force Field representation for given dihedrals
# ----------------------------------------------------------------------------------------------
for force in self.force_field:
if force == 'PeriodicTorsionForce':
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
# If the param key is not torsion periodicity since this are not handled by ParaMol
if parameter.param_key != "torsion_periodicity":
if force_field_term.atoms in torsions:
parameter.optimize = 1
elif change_other_torsions:
parameter.optimize = 0
elif change_other_parameters:
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
parameter.optimize = 0
else:
pass
return self.force_field
def optimize_scaling_constants(self, atom_pairs, change_other_sc=False, change_other_parameters=False):
"""
Methods that sets as optimizable all parameters of the scaling factors contained in the listed passed as an argument.
Parameters
----------
atom_pairs : list of lists
List of list, wherein the inner lists contain indices of the pair of atoms for which the scaling factors are to be optimized.
change_other_sc : bool
Whether or not the remaining scaling constants's optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
change_other_parameters : bool
Whether or not the remaining parameters' optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# ----------------------------------------------------------------------------------------------
# Set optimization flag in ParaMol Force Field representation for given dihedrals
# ----------------------------------------------------------------------------------------------
for force in self.force_field:
if force == 'Scaling14':
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
if force_field_term.atoms in atom_pairs:
parameter.optimize = 1
elif change_other_sc:
parameter.optimize = 0
elif change_other_parameters:
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
parameter.optimize = 0
else:
pass
return self.force_field
def optimize_torsions_by_symmetry(self, torsions, change_other_torsions=False, change_other_parameters=False, set_zero=False):
"""
Methods that sets as optimizable all parameters of the torsions with the same symmetry groups as the ones contained in the listed passed as an argument.
Parameters
----------
torsions : list of lists
List of list, wherein the inner lists contain indices of the quartets of atoms that define the torsion to be optimized.
change_other_torsions : bool
Whether or not the remaining torsions's optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
change_other_parameters : bool
Whether or not the remaining parameters' optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
set_zero : bool
Whether or not to set the force constant of the optimizable torsions to 0.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# ----------------------------------------------------------------------------------------------
# Set optimization flag in ParaMol Force Field representation for given dihedrals
# ----------------------------------------------------------------------------------------------
# Get symmetry groups of given dihedrals
dihedral_types = []
for force in self.force_field:
if force == 'PeriodicTorsionForce':
for sub_force in self.force_field[force]:
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
if parameter.param_key is not "torsion_periodicity":
if force_field_term.atoms in torsions:
dihedral_types.append(parameter.symmetry_group)
# Change the necessary optimization states
for force in self.force_field:
if force == 'PeriodicTorsionForce':
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
# If the param key is not torsion periodicity since this are not handled by ParaMol
if parameter.param_key != "torsion_periodicity":
if parameter.symmetry_group in dihedral_types:
parameter.optimize = 1
if parameter.param_key == "torsion_k" and set_zero:
parameter.value = 0.0
elif change_other_torsions:
parameter.optimize = 0
elif change_other_parameters:
for sub_force in self.force_field[force]:
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
parameter.optimize = 0
else:
pass
return self.force_field
def set_parameter_optimization(self, force_key, sub_force, idx, param_key, optimize):
"""
Method that for the force field term with index `idx` of the force `force_key` set the parameter with name `param_key` to the optimization state in `optimize`.
Parameters
----------
force_key : str
Name of the force.
sub_force : int
Ocurrence of the force.
idx : int
Index of the force field term.
param_key : str
Name of the parameter.
optimize : bool
Optimization state (0 or 1).
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
self.force_field[force_key][sub_force][idx].parameters[param_key].optimize = optimize
return self.force_field
| true |
44d1f98dd1b5ea3e87c9c9d31a33a4bf718c083d
|
Python
|
glamod/icoads2cdm
|
/py_tools/common/colorbar_functions.py
|
UTF-8
| 2,614 | 2.703125 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 09:42:02 2018
@author: iregon
"""
for vari in vars_in:
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = cmaps.get(vari)
norm = mpl.colors.Normalize(vmin=min_value_global, vmax=max_value_global)
# Vertical + horizontal colorbars
fig = plt.figure(i,figsize = (1.8,7), dpi = 150);
ax_v = fig.add_axes([0.05, 0.05, 0.45, 0.9])
fig = plt.figure(i + 1,figsize = (14,1.8), dpi = 150);
ax_h = fig.add_axes([0.05, 0.5, 0.9, 0.45])
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
cb_v = mpl.colorbar.ColorbarBase(ax_v, cmap=cmap,norm=norm,orientation='vertical')
cb_v.set_label(vari + ' (' + vars_units.get(vari)+ ' )', size = colorbar_title_size)
cb_v.ax.tick_params(labelsize = colorbar_label_size)
plt.savefig('/Users/iregon/C3S/dessaps/CDSpy/' + '-'.join([str(deck),vari,'vertical_cb']) + '.png',bbox_inches='tight')
cb_h = mpl.colorbar.ColorbarBase(ax_h, cmap=cmap,norm=norm,orientation='horizontal')
cb_h.set_label(vari + ' (' + vars_units.get(vari)+ ' )', size = colorbar_title_size)
cb_h.ax.tick_params(labelsize = colorbar_label_size)
plt.savefig('/Users/iregon/C3S/dessaps/CDSpy/' + '-'.join([str(deck),vari,'horizontal_cb']) + '.png',bbox_inches='tight')
plt.close(i)
plt.close(i+1)
i +=2
vari= 'counts'
cmap = cmaps.get(cmap_counts)
norm = mpl.colors.Normalize(vmin=min_value_global, vmax=max_value_global)
# Vertical + horizontal colorbars
fig = plt.figure(i,figsize = (1.8,7), dpi = 150);
ax_v = fig.add_axes([0.05, 0.05, 0.45, 0.9])
fig = plt.figure(i + 1,figsize = (14,1.8), dpi = 150);
ax_h = fig.add_axes([0.05, 0.5, 0.9, 0.45])
cb_v = mpl.colorbar.ColorbarBase(ax_v, cmap=cmap,norm=norm,orientation='vertical')
cb_v.set_label(vari, size = colorbar_title_size)
cb_v.ax.tick_params(labelsize = colorbar_label_size)
plt.savefig('/Users/iregon/C3S/dessaps/CDSpy/' + '-'.join([str(deck),vari,'vertical_cb']) + '.png',bbox_inches='tight')
cb_h = mpl.colorbar.ColorbarBase(ax_h, cmap=cmap,norm=norm,orientation='horizontal')
cb_h.set_label(vari, size = colorbar_title_size)
cb_h.ax.tick_params(labelsize = colorbar_label_size)
plt.savefig('/Users/iregon/C3S/dessaps/CDSpy/' + '-'.join([str(deck),vari,'horizontal_cb']) + '.png',bbox_inches='tight')
plt.close(i)
plt.close(i+1)
| true |
3eeaf7f71b2b79dc94b50bd981e3dfa71bd7a68e
|
Python
|
JEDGIT18/PythonExercises
|
/EPpractice/binarydistance.py
|
UTF-8
| 823 | 3.90625 | 4 |
[] |
no_license
|
#Converts a number to binary and finds its binary gap
check = False
while not check :
try:
num = int(input("Input a number: 0 - 255 "))
check = True
except ValueError:
print("Enter a number!")
binaryStr = ""
max = 128
while num > 1 or len(binaryStr) != 8:
if num >= max:
num -= max
max /= 2
binaryStr += "1"
else:
print(binaryStr)
binaryStr += "0"
max /= 2
# use bin(num)[2:] to create string
maxDist = 0
innernum = 0
for i in range(0, len(binaryStr)):
if binaryStr[i] == "0":
innernum += 1
if innernum >= maxDist:
maxDist = innernum
continue
else:
temp = ""
innernum = 0
print(binaryStr)
print("Largest Binary gap is: ", maxDist)
| true |