blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
b362bf956d6d3dbc9af0d32514ab0c1aaec61faa
|
Python
|
Dwyanepeng/leetcode
|
/qianxu_144.py
|
UTF-8
| 1,813 | 3.515625 | 4 |
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/29 18:56
# @Site :
# @File : qianxu_144.py
# @Software: PyCharm
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#前序
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
lst = []
s = []
s.append(root)
if not root:
return lst
while s:
temp = s.pop()
lst.append(temp.val)
if temp.right:
s.append(temp.right)
if temp.left:
s.append(temp.left)
return lst
class Solution1:
def preorderTraversal(self, root):
# 0 表示当前遍历到它,1 表示压入栈
# 刚开始是 1 ,不要写成 0 了
stack = [(1,root)]
s = []
while stack:
command, node = stack.pop()
if node is None:
# 不能写 return ,这不是递归
continue
if command == 0:
s.append(node.val)
else:
# 此时 command == 1 的时候,表示递归遍历到的
# 注意:写的时候倒过来写
#注意和后序顺序不一样,stack需要pop
stack.append((1, node.right))
stack.append((1, node.left))
stack.append((0, node))
return s
#递归
class Solution1:
def inorderTraversal(self, root):
res=[]
if root:
res+=self.inorderTraversal(root.left)
res.append(root.val)
res+=self.inorderTraversal(root.right)
return res
| true |
b507ae447b683ff3cf3e97c02832297066a5b390
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03862/s133133737.py
|
UTF-8
| 326 | 3.09375 | 3 |
[] |
no_license
|
import sys
def LI():
return list(map(int,sys.stdin.readline().rstrip().split())) #空白あり
N,x = LI()
a = LI()
ans = 0
# 左から2番目以降貪欲に
if a[0] > x:
ans += a[0]-x
a[0] = x
for i in range(1,N):
if a[i]+a[i-1] > x:
ans += a[i]+a[i-1]-x
a[i] -= a[i]+a[i-1]-x
print(ans)
| true |
f09cbab959e97d4ceae492c80a9c342dacb1221c
|
Python
|
alexcamero/name-projects
|
/Name Clusters/python files/investigation_time_series.py
|
UTF-8
| 9,284 | 3.09375 | 3 |
[] |
no_license
|
import matplotlib.pyplot as plt
import os, json, csv, requests
import numpy as np
from bs4 import BeautifulSoup as bs
from sklearn.cluster import KMeans
def scrape_totals(
write_to_directory = '../json files/',
url = "https://www.ssa.gov/OACT/babynames/numberUSbirths.html",
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; '
+ 'Intel Mac OS X 10_15_7)'
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
+ 'Chrome/88.0.4324.150 Safari/537.36'}):
#initialize the list
data = []
# Retrieve the webpage
response = requests.get(url, headers = headers)
# Check if the request was successful
if response.status_code == 200:
#start parsing the response text
soup = bs(response.text, 'html.parser')
# Each row of data is in a <tr> tag
data_rows = soup.find_all('tr')
data_rows.pop(0)
for row in data_rows:
# Extract data from <tr> tag
cell_list = row.find_all('td')
year = int(cell_list[0].get_text())
male = int(cell_list[1].get_text().replace(',',''))
female = int(cell_list[2].get_text().replace(',',''))
total = int(cell_list[3].get_text().replace(',',''))
# Append to data list
data.append([year, total, female, male])
# Sort data by year and write to file
data.sort(key=lambda x: x[0])
with open(write_to_directory + 'totals.json', 'w') as file:
json.dump(data,file)
return data
else:
print(f"There was an issue: Status Code {response.status_code}")
raise NameError("AN HTML SITUATION")
class NameData:
def __init__(self, min_year = 1937, max_year = 2020,
directory_year = '../../Name Data/namesbyyear/',
helper_files_directory = '../json files/'):
self.min_year = min_year
self.max_year = max_year
self.files = helper_files_directory
self.file_ending = '_' + str(min_year) + '_' + str(max_year) + '.json'
#make list of relevant years and zero array of correct length
self.years = range(self.min_year, self.max_year + 1)
self.num_years = len(self.years)
self.zero = np.array([0 for _ in self.years])
self.distance_from = {"SoS":{"raw":{}, "prop":{}, "raw normed":{},
"prop normed":{}}}
#open/make file of distinct names
#format is [Name]
try:
with open(self.files + 'distinct_names'
+ self.file_ending, 'r') as file:
self.distinct_names = json.load(file)
except FileNotFoundError:
print("Distinct name file not found. Making one.....")
self.distinct_names = set()
for Y in self.years:
path = os.path.join(directory_year, "yob" + str(Y) + ".txt")
with open(path, 'r') as file:
csv_reader = csv.reader(file)
list_of_rows = list(csv_reader)
for row in list_of_rows:
self.distinct_names.add(row[0])
self.distinct_names = list(self.distinct_names)
self.distinct_names.sort()
with open(self.files + 'distinct_names'
+ self.file_ending, 'w') as file:
json.dump(self.distinct_names,file)
self.num_names = len(self.distinct_names)
self.name_index = {self.distinct_names[i]:i
for i in range(self.num_names)}
print("List of distinct names ready.")
#open/make the file of total births by year
#format is [[year, total, female, male]]
try:
with open(self.files + 'totals.json', 'r') as file:
total_data = json.load(file)
except FileNotFoundError:
print("File with total births data not found. Making one.....")
total_data = scrape_totals(write_to_directory
= self.files)
temp = {row[0]: {'B': row[1], 'F': row[2], 'M': row[3]}
for row in total_data}
self.totals = np.array([temp[year]['B'] for year in self.years])
self.totals_female = np.array([temp[year]['F'] for year in self.years])
self.totals_male = np.array([temp[year]['M'] for year in self.years])
print("Total births data ready.")
#open/make the raw time series data for the names
#format is {Name: [Int]}
try:
with open(self.files + 'time_series_national'
+ self.file_ending, 'r') as file:
tseries = json.load(file)
except FileNotFoundError:
print("Time series data file not found. Making one.....")
#initialize dictionary for time series data
tseries = [[0 for _ in range(self.num_years)]
for _ in range(self.num_names)]
for year in self.years:
#open year file
path = os.path.join(directory_year, "yob" + str(year) + ".txt")
with open(path, 'r') as file:
csv_reader = csv.reader(file)
list_of_rows = list(csv_reader)
#update dictionary for each row
for row in list_of_rows:
i = self.name_index[row[0]]
tseries[i][year - self.min_year] += int(row[2])
#write time series data to file
with open(self.files + 'time_series_national'
+ self.file_ending, 'w') as file:
json.dump(tseries, file)
#convert to np.array
self.tseries_raw = np.array(tseries)
print("Time series data ready.")
#get the total number of named people by year
self.total_named = sum(self.tseries_raw)
self.others = self.totals - self.total_named
self.distinct_named = sum(self.tseries_raw > 0)
self.lower_distinct = self.distinct_named + np.ceil(self.others/4)
self.upper_distinct = self.distinct_named + self.others
def get_matrix(self, series_type = "prop normed"):
if series_type == "prop normed":
return self.make_normed(self.tseries_raw/self.totals)
elif series_type == "raw normed":
return self.make_normed(self.tseries)
elif series_type == "prop":
return self.tseries_raw/self.totals
elif series_type == "raw":
return self.tseries_raw
def make_normed(self, series):
return np.transpose(np.transpose(series)/np.amax(series, axis = 1))
def closest_neighbors_SoS(self, name, series_type = "prop normed"):
matrix = self.get_matrix(series_type)
if name in self.distinct_names:
Nvector = matrix[self.name_index[name]]
else:
Nvector = self.zero
result = np.sum(((matrix - Nvector)**2), axis=1)/self.num_years
neighbors = [(self.distinct_names[i],result[i])
for i in range(self.num_names)]
neighbors.sort(key = lambda x: x[1])
self.distance_from["SoS"][series_type][name] = neighbors
def raw(self, name):
if name in self.distinct_names:
return self.tseries_raw[self.name_index[name]]
elif name == "OTHER NAMES":
return self.others
elif name == "ALL NAMED":
return self.total_named
else:
return self.zero
def proportion(self, name):
return self.raw(name)/self.totals
def raw_normed(self, name):
series = self.raw(name)
M = max(series)
if M == 0:
M = 1
return series/M
def prop_normed(self, name):
series = self.proportion(name)
M = max(series)
if M == 0:
M = 1
return series/M
def compare_shape(self, name1, name2):
figure, axes = plt.subplots(nrows=2,ncols=1)
axes[0].plot(self.years, self.raw(name1))
axes[1].plot(self.years, self.raw(name2),'r-')
axes[0].set_title(name1)
axes[1].set_title(name2)
figure.tight_layout()
def run_kmeans(self, series_type = "prop normed",
most_clusters = 20, iterations = 10):
X = self.get_matrix(series_type)
self.km = {}
num_clusters = range(2,most_clusters+1)
for k in num_clusters:
self.km[k] = KMeans(n_clusters = k, n_init = iterations).fit(X)
SSE = [self.km[k].inertia_ for k in num_clusters]
plt.plot(num_clusters, SSE)
| true |
820fb4dd8bb20294d9970fa244dd3e801c7f416f
|
Python
|
liamgak/graduating_proj
|
/parse_dataset_file.py
|
UTF-8
| 4,112 | 2.90625 | 3 |
[] |
no_license
|
class ParseDatasetFile():
file_name="" #file_path
file_name_csv=""
__file_one_hop_friend="GowallaAmerica_LvOneFri.csv"
__file_two_hop_friend="GowallaAmerica_LvTwoFri.csv"
exac_two_hop_friends_list=dict()
one_hop_friends_list=dict()
def __init__(self, file_one_hop_friend="BrightkiteEurope_LvOneFri.csv", file_two_hop_friend="BrightkiteEurope_LvTwoFri.csv"):
#self.file_name=file_name
self.__file_one_hop_friend = file_one_hop_friend
self.__file_two_hop_friend = file_two_hop_friend
self.one_hop_friends_list = self.upload_friends_file(self.__file_one_hop_friend)
self.exac_two_hop_friends_list = self.upload_friends_file(self.__file_two_hop_friend)
def get_one_hop_filename(self):
return self.__file_one_hop_friend
def get_two_hop_filename(self):
return self.__file_two_hop_friend
def get_num_user(self):
return len(self.one_hop_friends_list)
def txt_to_csv(self, file_name):
### 수정이 필요한 함수
with open(file_name, 'r') as dataset_txt:
S=dataset_txt.read()
S=S.replace(' ', ',')
D=file_name.split('.')[0] #filename without extension
file_name_csv=D
# CAUTION: if csv file is opend by excel, you will see float has the smaller precision.
with open(D+'.csv','w') as dataset_csv:
dataset_csv.write(S)
def upload_friends_file(self, file_name):
""" dict() which has the entire information about friends, storting <user_id: user_friend_list>
"""
result_dict = dict()
with open(file_name, 'r') as frineds_file:
S=frineds_file.read()
print(S.split('\n')[0])
S=S.split('\n')[1:] # remove header
for row in S: # traversing each row in csv
if row is '': # ignoring the last \n, if that is exist
continue
str_to_list=row.split(',')
user_id = str_to_list[0] # parsing userid
freind_list_size= str_to_list[1] # size of list
user_friend_list = str_to_list[2:] # parsing friends list of specific userid
user_friend_list = list(map(int, user_friend_list)) # converting str to int
result_dict[int(user_id)]=user_friend_list # storting <user_id: user_friend_list>
return result_dict
def one_parse_row_friends(self, userID):
"""input is the specific UserID
:return: List of user's friends (List(int) built-in python)
"""
return self.one_hop_friends_list[userID]
def two_parse_row_friends(self, userID):
return self.exac_two_hop_friends_list[userID]
#CODE FOR TEST
# _pfs=ParseDatasetFile()
# _pfs.file_name="BrightkiteEurope_LvTwoFri.txt"
# # _pfs.txt_to_csv("Barabasi_200K_LvOneFri.txt")
# # _pfs.txt_to_csv("Barabasi_70K_LvOneFri.txt")
# # _pfs.txt_to_csv("Barabasi_80K_SpatioTemporal.txt")
# # _pfs.txt_to_csv("Barabasi_90K_SpatioTemporal.txt")
# _pfs.txt_to_csv("Barabasi_200K_SpatioTemporal.txt")
# _pfs.txt_to_csv("Barabasi_300K_SpatioTemporal.txt")
# _pfs.txt_to_csv("Barabasi_600K_LvOneFri.txt")
# _pfs.txt_to_csv("Barabasi_900K_LvOneFri.txt")
#
# _pfs.txt_to_csv("Barabasi_60K_LvTwoFri.txt")
# _pfs.txt_to_csv("Barabasi_70K_LvTwoFri.txt")
# _pfs.txt_to_csv("Barabasi_80K_LvTwoFri.txt")
# _pfs.txt_to_csv("Barabasi_90K_LvTwoFri.txt")
# _pfs.txt_to_csv("Barabasi_100K_LvTwoFri.txt")
# _pfs.txt_to_csv("Barabasi_300K_LvTwoFri.txt")
# _pfs.txt_to_csv("Barabasi_600K_LvTwoFri.txt")
# _pfs.txt_to_csv("Barabasi_900K_LvTwoFri.txt")
#다 하면 INDEX OBJ로 가서 전부다 객체 생성 해버리기. >> index파일 만들어진다.
# start_time = time.time()
#print(_pfs.two_parse_row_friends(30))
# print("A--- %s seconds ---" % (time.time() - start_time))
#parse_friend_data_obj = ParseDatasetFile()
#_pws=ParseDatasetFile('GowallaAmerica_LvTwoFri.txt')
#_p.txt_to_csv()
#_pfs.txt_to_csv()
#_pws.txt_to_csv()
| true |
3d8e16a1295d35f04a4867cb9bf8a003bfe4a64e
|
Python
|
linhx13/leetcode-code
|
/code/794-valid-tic-tac-toe-state.py
|
UTF-8
| 1,985 | 3.375 | 3 |
[] |
no_license
|
from typing import List
class Solution:
def validTicTacToe(self, board: List[str]) -> bool:
cnt1, cnt2 = 0, 0
rows = [0, 0, 0]
cols = [0, 0, 0]
xrow, xcol, xdia = 0, 0, 0
orow, ocol, odia = 0, 0, 0
for x, row in enumerate(board):
for y, c in enumerate(row):
if c == "X":
cnt1 += 1
rows[x] += 1
cols[y] += 1
elif c == "O":
cnt2 += 1
rows[x] -= 1
cols[y] -= 1
if rows[x] == 3:
xrow += 1
if cols[y] == 3:
xcol += 1
if rows[x] == -3:
orow += 1
if cols[y] == -3:
ocol += 1
if not (0 <= cnt1 - cnt2 <= 1):
return False
if board[0][0] == board[1][1] and board[1][1] == board[2][2]:
if board[0][0] == "X":
xdia += 1
elif board[0][0] == "O":
odia += 1
if board[0][2] == board[1][1] and board[1][1] == board[2][0]:
if board[0][2] == "X":
xdia += 1
elif board[0][2] == "O":
odia += 1
if xrow > 1 or xcol > 1:
return False
if orow > 1 or ocol > 1:
return False
if (xrow > 0 and orow > 0) or (xcol > 0 and ocol > 0):
return False
if (xrow > 0 or xcol > 0 or xdia > 0) and cnt2 == cnt1:
return False
if (orow > 0 or ocol > 0 or odia > 0) and cnt1 > cnt2:
return False
return True
if __name__ == "__main__":
# board = ["XXX", " ", "OOO"]
# board = ["XOX", " X ", " "]
# board = ["XOX", "OXO", "XOX"]
# board = ["XXX", "XOO", "OO "]
# board = ["OXX", "XOX", "OXO"]
board = ["X ", " ", " O"]
sol = Solution()
print(sol.validTicTacToe(board))
| true |
48699e8e37181eedbee42f40070d8f6c148a6059
|
Python
|
manishbalyan/python
|
/User_Input.py
|
UTF-8
| 86 | 2.984375 | 3 |
[] |
no_license
|
response = raw_input("Hey,How are you")
response = response.lower()
print response
| true |
d65e03047b363091f1285ecfc179f62a877ff034
|
Python
|
zNIKK/Exercicios-Python
|
/Python_3/DEF/MÓDULOS/Moedas/programa -- Exercitando módulos em Python.py
|
UTF-8
| 217 | 3.078125 | 3 |
[
"MIT"
] |
permissive
|
import calculo as cal
pre=float(input('Digite um preço: R$'))
print(f'A metade de R${pre} é {cal.metade(p):.2f}')
print(f'O dobro de R${pre} é {cal.dob(p):.2f}')
print(f'Aumentando 10%, temos {cal.porc(p,10):.2f}')
| true |
876c045623d9fcbf7949806698a2fdafe46cb444
|
Python
|
glue-viz/glue
|
/glue/core/tests/test_data_retrieval.py
|
UTF-8
| 1,091 | 2.671875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
# pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
import numpy as np
from ..data import Data, Component
class TestDataRetrieval(object):
def setup_method(self, method):
data1 = Data()
comp1 = Component(np.arange(5))
id1 = data1.add_component(comp1, 'comp_1')
comp2 = Component(np.arange(5) * 2)
id2 = data1.add_component(comp2, 'comp_2')
data2 = Data()
comp3 = Component(np.arange(5) * 3)
id3 = data2.add_component(comp3, 'comp_3')
comp4 = Component(np.arange(5) * 4)
id4 = data2.add_component(comp4, 'comp_4')
self.data = [data1, data2]
self.components = [comp1, comp2, comp3, comp4]
self.component_ids = [id1, id2, id3, id4]
def test_direct_get(self):
assert self.data[0][self.component_ids[0]] is self.components[0].data
assert self.data[0][self.component_ids[1]] is self.components[1].data
assert self.data[1][self.component_ids[2]] is self.components[2].data
assert self.data[1][self.component_ids[3]] is self.components[3].data
| true |
a904662a56477598ec104e04f497ab27c05dbdac
|
Python
|
tberhanu/RevisionS
|
/revision/3.py
|
UTF-8
| 680 | 3.96875 | 4 |
[] |
no_license
|
""" 3. Get n largest/smallest elts of the array of dicts
"""
import heapq
arr_dicts = [{"name": "John", "age": 23, "city": "Oakland", "state": "CA"},
{"name": "Mary", "age": 33, "city": "San Jose", "state": "CA"},
{"name": "Henock", "age": 27, "city": "Las Vegas", "state": "NV"},
{"name": "James", "age": 19, "city": "Seattle", "state": "WA"}]
# print(arr_dicts)
largests = heapq.nlargest(2, arr_dicts, lambda dict: dict['age'])
print(largests)
smallests = heapq.nsmallest(2, arr_dicts, lambda dict: dict['age'])
print(smallests)
states = heapq.nsmallest(2, arr_dicts, lambda d: d['state'])
print(states)
states = heapq.nlargest(2, arr_dicts, lambda d: d['state'])
print(states)
| true |
68491f157665365d1d2026b4368aff2241746357
|
Python
|
iefuzzer/vnpy_crypto
|
/vnpy/data/huobi/huobi_data.py
|
UTF-8
| 7,824 | 2.625 | 3 |
[
"MIT"
] |
permissive
|
# encoding: UTF-8
# 从huobi载数据
from datetime import datetime, timezone
import sys
import requests
import execjs
import traceback
from vnpy.trader.app.ctaStrategy.ctaBase import CtaBarData, CtaTickData
from vnpy.trader.vtFunction import systemSymbolToVnSymbol
period_list = ['1min','3min','5min','15min','30min','1day','1week','60min']
symbol_list = ['ltc_btc','eth_btc','etc_btc','bch_btc','btc_usdt','eth_usdt','ltc_usdt','etc_usdt','bch_usdt',
'etc_eth','bt1_btc','bt2_btc','btg_btc','qtum_btc','hsr_btc','neo_btc','gas_btc',
'qtum_usdt','hsr_usdt','neo_usdt','gas_usdt','eos_usdt','ada_usdt','xmr_usdt','zrx_usdt','ht_usdt']
class HuobiData(object):
# ----------------------------------------------------------------------
def __init__(self, strategy=None):
"""
构造函数
:param strategy: 上层策略,主要用与使用strategy.writeCtaLog()
"""
self.strategy = strategy
# 设置HTTP请求的尝试次数,建立连接session
requests.adapters.DEFAULT_RETRIES = 5
self.session = requests.session()
self.session.keep_alive = False
def writeLog(self,content):
if self.strategy:
self.strategy.writeCtaLog(content)
else:
print(content)
def writeError(self,content):
if self.strategy:
self.strategy.writeCtaError(content)
else:
print(content,file=sys.stderr)
def get_bars(self, symbol, period, callback, bar_is_completed=False,bar_freq=1, start_dt=None):
"""
返回k线数据
symbol:合约
period: 周期: 1min,3min,5min,15min,30min,1day,3day,1hour,2hour,4hour,6hour,12hour
"""
ret_bars = []
symbol_pair = systemSymbolToVnSymbol(symbol)
if symbol_pair not in symbol_list:
msg = u'{} {}不在下载清单中'.format(datetime.now(), symbol_pair)
if self.strategy:
self.strategy.writeCtaError(msg)
else:
print(msg)
return False,ret_bars
symbol = symbol_pair.replace('_', '')
if period == '1hour':
period = '60min'
if period not in period_list:
self.writeError(u'{}不在下载时间周期范围:{} 内'.format(period, period_list))
return False,ret_bars
url = u'https://api.huobipro.com/market/history/kline?symbol={}&period={}& size=2000&AccessKeyId=fff-xxx-ssss-kkk'.format(
symbol, period)
self.writeLog('{}开始下载:{} {}数据.URL:{}'.format(datetime.now(), symbol_pair, period, url))
content = None
try:
content = self.session.get(url).content.decode('gbk')
except Exception as ex:
self.writeError('exception in get:{},{},{}'.format(url, str(ex), traceback.format_exc()))
return False,ret_bars
ret_dict = execjs.eval(content)
bars = ret_dict.get('data', None)
if not isinstance(bars, list):
self.writeError('返回数据不是list:{}'.format(content))
return False,ret_bars
bars.reverse()
for i, bar in enumerate(bars):
add_bar = CtaBarData()
try:
add_bar.vtSymbol = symbol
add_bar.symbol = symbol
add_bar.datetime = datetime.fromtimestamp(bar[0] / 1000)
add_bar.date = add_bar.datetime.strftime('%Y-%m-%d')
add_bar.time = add_bar.datetime.strftime('%H:%M:%S')
add_bar.tradingDay = add_bar.date
add_bar.open = float(bar[1])
add_bar.high = float(bar[2])
add_bar.low = float(bar[3])
add_bar.close = float(bar[4])
add_bar.volume = float(bar[5])
except Exception as ex:
self.strategy.writeCtaError('error when convert bar:{},ex:{},t:{}'.format(bar, str(ex), traceback.format_exc()))
return False,ret_bars
if start_dt is not None and bar.datetime < start_dt:
continue
if callback is not None:
callback(add_bar, bar_is_completed, bar_freq)
return True,ret_bars
def download_bars(self, symbol, period, size_=None, start_dt=None):
"""
返回k线数据
symbol:合约
period: 周期: 1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year
"""
ret_bars = []
symbol_pair = systemSymbolToVnSymbol(symbol)
if symbol_pair not in symbol_list:
msg = u'{} {}不在下载清单中'.format(datetime.now(), symbol_pair)
if self.strategy:
self.strategy.writeCtaError(msg)
else:
print(msg)
return ret_bars
symbol = symbol_pair.replace('_','')
if period =='1hour':
period = '60min'
if period not in period_list:
self.writeError(u'{}不在下载时间周期范围:{} 内'.format(period,period_list))
return ret_bars
url = u'https://api.huobipro.com/market/history/kline?symbol={}&period={}&AccessKeyId=fff-xxx-ssss-kkk'.format(symbol, period)
if not isinstance(size_,int):
size_=2000
url = url + u'&size={}'.format(size_)
self.writeLog('{}开始下载:{} {}数据.URL:{}'.format(datetime.now(), symbol_pair, period,url))
content = None
try:
content = self.session.get(url).content.decode('gbk')
except Exception as ex:
self.writeError('exception in get:{},{},{}'.format(url,str(ex), traceback.format_exc()))
return ret_bars
ret_dict = execjs.eval(content)
bars = ret_dict.get('data',None)
if not isinstance(bars,list):
self.writeError('返回数据不是list:{}'.format(content))
return ret_bars
bars.reverse()
for i, bar in enumerate(bars):
add_bar = {}
try:
bar_datetime= datetime.fromtimestamp(bar.get('id'))
add_bar['datetime'] = bar_datetime.strftime('%Y-%m-%d %H:%M:%S')
add_bar['date'] = bar_datetime.strftime('%Y-%m-%d')
add_bar['time'] = bar_datetime.strftime('%H:%M:%S')
add_bar['open'] = float(bar.get('open',0.0))
add_bar['high'] = float(bar.get('high',0.0))
add_bar['low'] = float(bar.get('low',0.0))
add_bar['close'] = float(bar.get('close',0.0))
add_bar['volume'] = float(bar.get('vol',0.0))
except Exception as ex:
self.writeError('error when convert bar:{},ex:{},t:{}'.format(bar, str(ex), traceback.format_exc()))
ret_bars.append(add_bar)
return ret_bars
class TestStrategy(object):
def __init__(self):
self.minDiff = 1
self.shortSymbol = 'btc'
self.vtSymbol = 'btc'
self.TMinuteInterval = 1
def addBar(self,bar,bar_is_completed, bar_freq):
print(u'tradingDay:{},dt:{},{} o:{},h:{},l:{},c:{},v:{}'.format(bar.tradingDay, bar.datetime,bar.vtSymbol, bar.open, bar.high,
bar.low, bar.close, bar.volume))
def onBar(self, bar):
print(u'tradingDay:{},dt:{},{} o:{},h:{},l:{},c:{},v:{}'.format(bar.tradingDay,bar.datetime,bar.vtSymbol, bar.open, bar.high, bar.low, bar.close, bar.volume))
def writeCtaLog(self, content):
print(content)
def writeCtaError(self, content):
print(content)
if __name__ == '__main__':
t = TestStrategy()
hb_data = HuobiData(t)
bars = hb_data.download_bars(symbol='htusdt', period='1day')
for bar in bars:
print(bar['datetime'])
| true |
e014a1d05993482f9689fca32cfd7984098c4054
|
Python
|
JayJayDee/python-lecture-examples
|
/002/dictionary_basics.py
|
UTF-8
| 161 | 3.21875 | 3 |
[] |
no_license
|
product = {
'name': '딸기',
'price': 8000
}
product_name = product['name']
print(product_name)
product_price = product['price']
print(product_price)
| true |
0281d241a38682c5d2347f2f841500804ccd9588
|
Python
|
noureddined/AdventOfCode2016
|
/day-6/day6-1.py
|
UTF-8
| 1,414 | 3.234375 | 3 |
[] |
no_license
|
#!/usr/bin/python3
import re
import string
import pprint
from collections import Counter
file = open("input.txt", 'r')
lines = file.readlines()
file.close()
totaal = ''
for line in lines:
s = line
s = " ".join(s)
s= s.replace('\n','')
totaal = totaal +'\n'+ s
totaal = "\n".join(totaal.split("\n")[1:])
file = open("test.txt", "w")
file.write(totaal)
file.close()
file = open("test.txt", 'r')
lines = file.readlines()
file.close()
column1 = []
column2 = []
column3 = []
column4 = []
column5 = []
column6 = []
column7 = []
column8 = []
for line in lines:
parts = line.split() # split line into parts
column1.append(parts[0])
column2.append(parts[1])
column3.append(parts[2])
column4.append(parts[3])
column5.append(parts[4])
column6.append(parts[5])
column7.append(parts[6])
column8.append(parts[7])
c1 = Counter(elem[0] for elem in column1).most_common(1)[0][0]
c2 = Counter(elem[0] for elem in column2).most_common(1)[0][0]
c3 = Counter(elem[0] for elem in column3).most_common(1)[0][0]
c4 = Counter(elem[0] for elem in column4).most_common(1)[0][0]
c5 = Counter(elem[0] for elem in column5).most_common(1)[0][0]
c6 = Counter(elem[0] for elem in column6).most_common(1)[0][0]
c7 = Counter(elem[0] for elem in column7).most_common(1)[0][0]
c8 = Counter(elem[0] for elem in column8).most_common(1)[0][0]
print (c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8)
| true |
0c221f171e3efbbd5579f0c8518678dff9d68640
|
Python
|
4179e1/misc
|
/python/pygame/ch4/allcolor.py
|
UTF-8
| 403 | 2.984375 | 3 |
[] |
no_license
|
import pygame
pygame.init()
screen = pygame.display.set_mode ((640,480))
all_colors = pygame.Surface((4096, 4096), depth=24)
for r in xrange(256):
print r+1, "out of 256"
# x = (r&15) * 256
x = (r % 16) * 256
y = (r>>4) * 256
print x, y
for g in xrange(256):
for b in xrange(256):
all_colors.set_at ((x+g, y+b), (r, g, b))
pygame.image.save (all_colors, "allcolors.bmp")
| true |
e8ffe13881fc463d25a3271dd230a74dede894d5
|
Python
|
YannChye/web-scraping-challenge
|
/Missions_to_Mars/scrape_mars.py
|
UTF-8
| 3,163 | 2.84375 | 3 |
[] |
no_license
|
# setup and import dependencies
from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
from time import sleep
from webdriver_manager.chrome import ChromeDriverManager
def init_browser():
executable_path={"executable_path":ChromeDriverManager().install()}
return Browser("chrome",**executable_path,headless=False)
def scrape_info():
browser=init_browser()
# NASA Mars News
# connect to url
url="https://mars.nasa.gov/news/"
browser.visit(url)
sleep(1)
# use BeautifulSoup to get latest news title and paragraph text
html=browser.html
soup=bs(html,"html.parser")
result=soup.find("li",class_="slide")
for r in result:
news_title=r.find("div",class_="content_title").text.strip()
news_p=r.find("div",class_="rollover_description_inner").text.strip()
# JPL Mars Space Images
# connect to url
url="https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url)
sleep(1)
# use BeautifulSoup to get featured image
html=browser.html
soup=bs(html,"html.parser")
base_url=soup.find("a",id="jpl_logo")["href"]
img_url=soup.find("a",id="full_image")["data-fancybox-href"]
featured_image_url="https:"+base_url+img_url[1:]
# Mars Facts
# set url
url="https://space-facts.com/mars/"
# get table
tables=pd.read_html(url)
table=tables[0]
table.columns=['Description','Mars']
table.set_index('Description',inplace=True)
# convert pandas dataframe to html table + remove pandas dataframe class in html table
html_table=table.to_html()
html_table=html_table.replace('<table border="1" class="dataframe">','<table class="table table-striped">')
html_table=html_table.replace('text-align: right','text-align: left')
# Mars Hemisphere
# connect to url
url="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(url)
sleep(1)
# get hemisphere title and link to each hemisphere
title=[]
hemi_url=[]
html=browser.html
soup = bs(html,"html.parser")
hemi=soup.find_all("div",class_="description")
for h in hemi:
title.append(h.a.h3.text)
hemi_url.append("https://astrogeology.usgs.gov"+h.a["href"])
# visit hemisphere url to obtaine image url
img_url=[]
for url in hemi_url:
browser.visit(url)
html=browser.html
soup = bs(html,'html.parser')
for link in soup.find_all("a"):
if link.text=="Sample":
img_url.append(link["href"])
# create a list containing dictionary for each hemisphere
hemisphere_image_urls=[]
for i in range(4):
title_short=title[i].replace(" Enhanced","") # clean up title
hemisphere_image_urls.append({"title":title_short,"img_url":img_url[i]})
# quit browser
browser.quit()
# return results
mars_data={
"mars_news_title":news_title,
"mars_news_p":news_p,
"mars_feature_image":featured_image_url,
"mars_facts_table":html_table,
"mars_hemi_url":hemisphere_image_urls
}
return mars_data
| true |
6db427a72d40e1ad982db2bd6f95155adc35167e
|
Python
|
DennisGordic/Chapter_6
|
/Challenge_6_3.py
|
UTF-8
| 1,423 | 4.375 | 4 |
[
"Unlicense"
] |
permissive
|
#Challenge_6_3
#11/24/2014
#Dennis Gordick
def main():
# Guess My Number
#
# The computer picks a random number between 1 and 100
# The player tries to guess it and the computer lets
# the player know if the guess is too high, too low
# or right on the money
import random
def ask_number(question, low, high, step):
response = None
while response not in range(low, high):
response = int(input(question))
return response
tries += step
#Opening Remarks
print("Welcome to 'Guess My Number'!")
print("I'm thinking of a number between 1 and 100.")
print("Try to guess it in as few attempts as possible.")
# set the initial values
the_number = random.randint(1, 100)
# Create the priming read here
tries = 0
guess= 0
while int(guess) != int(the_number):
guess = ask_number("Enter your guess:", 1, 100, 1)
if int(guess) == int(the_number):
print("Your right on the money!")
elif int(guess) > int(the_number):
print("To high!")
elif int(guess) < int(the_number):
print("To low!")
tries += 1
#Didnt know how to make it reloop to the start...
print("You guessed it! The number was", the_number)
print("And it only took you", tries, "tries!")
#Program Closing
input("Press the enter key to exit.")
main()
| true |
f2ccc57b0cf46cdf754c2b536cfcb3fd5d26ca05
|
Python
|
casiarobot/motionPlanning
|
/scripts/beats_plot.py
|
UTF-8
| 740 | 2.578125 | 3 |
[] |
no_license
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_json('data-latest.json')
print(df.columns)
# df = df.loc[:, ['Planner', 'time', 'EnvironmentMesh']]
print(df.Planner.unique())
df['instance'] = df.Domain + '_' + df.Seed.astype(str) + '_' + \
df.AgentMesh + '_' + df.EnvironmentMesh + '_' + \
df.EnvironmentBounds + '_' + df.Start + '_' + df.Goal
df.instance = df.instance.str.replace(' ', '#')
sns.boxplot(x='EnvironmentMesh', y='time', hue='Planner', data=df)
plt.show()
# plt.savefig('test.svg')
df.pivot(index='instance', columns='Planner')['time'].plot.scatter('BEAST', 'BEATS')
plt.savefig('test.svg')
plt.show()
if __name__ == '__main__':
print('done')
| true |
af795a0d5e48f4be87788dd1d9cc889232c0587b
|
Python
|
gissellemm/girlswhocodeprojects
|
/dict_attack.py
|
UTF-8
| 420 | 3.78125 | 4 |
[] |
no_license
|
f = open("dictionary.txt","r")
print("Can your password survive a dictionary attack?")
dict_word = ""
#NOTE - You will have to use .strip() to strip whitespace and newlines from the file and passwords
test_password = input("Type in a trial password: ")
for word in f:
if word.strip() == test_password.strip():
print("Your password is too weak.")
else:
print("Your password is okay.")
| true |
58fd6d6ba59410109f673c87663936753c7c43f7
|
Python
|
DaniaMartiuk/Ursina
|
/core.py
|
UTF-8
| 3,368 | 2.671875 | 3 |
[] |
no_license
|
from ursina import *
from ursina.prefabs.first_person_controller import FirstPersonController
app = Ursina()
# загрузка текстур
grass_block = load_texture('assets/grass_block.png')
dirt_block = load_texture('assets/dirt_block.png')
stone_block = load_texture('assets/stone_block.png')
brick_block= load_texture('assets/brick_block.png')
sky_t = load_texture('assets/skybox.png')
block = grass_block
def update():
global block
if held_keys['1']:
block = grass_block
if held_keys['2']:
block = dirt_block
if held_keys['3']:
block = stone_block
if held_keys['4']:
block = brick_block
# ///////////////////////////////////////
# class Test_Qube(Entity):
# def __init__(self):
# super().__init__(
# model = 'cube',
# color = color.white,
# texture = 'white_cube',
# rotation = Vec3(45,45,45)
# )
#
# app = Ursina()
#
# def update():
# # передвижение по координате X
# # sq.x += 0.1
#
# # Время между кадрами
# # print("Time DT: ", time.dt)
#
# # если кнопка нажата held_keys['любая кнопка'] то передвигаемся
# # if held_keys['a']:
# # sq.x -= 0.5
# # if held_keys['d']:
# # sq.x += 0.5
# # if held_keys['s']:
# # sq.y -= 0.5
# # if held_keys['w']:
# # sq.y += 0.5
#
# #Прокрутка куба на месте
# if held_keys['a']:
# sq.rotation_y -= 0.5
# if held_keys['d']:
# sq.rotation_y += 0.5
# if held_keys['s']:
# sq.rotation_x -= 0.5
# if held_keys['w']:
# sq.rotation_x += 0.5
#
# # ////////////////////////////////////////////
# # Название окна с игрой
# window.title = 'Test Game'
#
# # Белая линия для закрития приложения
# window.borderless = False
#
# #Полний екран
# window.fullscreen = False
#
# # Доп кнопка для закрития окна
# window.exit_button.visible = False
#
# # Видимость ФПС
# window.fps_counter.enable = True
# # ////////////////////////////////////////////
# # Создание квадрата
# sq = Test_Qube()
#
# app.run()
# //////////////////////////////////////////////////////////////////
class Sky(Entity):
def __init__(self):
super().__init__(
parent=scene,
model='sphere',
texture=sky_t,
scale=300,
double_sided=True
)
class Cube(Button):
def __init__(self, position = (0,0,0),block_t = grass_block):
super().__init__(
parent = scene,
position = position,
model = 'assets/block',
origin_y = 0.5,
texture = block_t,
scale = 0.5,
color = color.white,
highlight_color = color.white
)
def input(self, key):
# если нажали любую кнопку
if self.hovered:
if key == 'right mouse down':
cube = Cube(self.position + mouse.normal,block)
if key == 'left mouse down':
destroy(self)
for z in range(16):
for x in range(16):
cube = Cube((x,0,z))
player = FirstPersonController()
sky = Sky()
app.run()
| true |
9c149ee1752f26ad179c2eec9bc21292b786541c
|
Python
|
rabe-gitops/rabectl
|
/src/rabectl/status.py
|
UTF-8
| 2,926 | 2.71875 | 3 |
[
"Apache-2.0"
] |
permissive
|
import os
import sys
import yaml
import click
import boto3
from PyInquirer import prompt, style_from_dict
from botocore.exceptions import ProfileNotFound
from PyInquirer import Token, Separator, Validator, ValidationError
class AWSProfileValidator(Validator):
def validate(self, profile_doc):
try:
boto3.Session(profile_name=profile_doc.text)
except ProfileNotFound:
raise ValidationError(
message=('Profile not found! Enter a valid profile name, or create it: '
'"aws configure --profile <PROFILE_NAME>"'),
cursor_position=len(profile_doc.text)
)
class Resources:
style = style_from_dict({
Token.Separator: '#cc5454',
Token.QuestionMark: '#673ab7 bold',
Token.Selected: '#cc5454',
Token.Pointer: '#673ab7 bold',
Token.Instruction: '',
Token.Answer: '#f44336 bold',
Token.Question: '',
})
questions = [
{
'type': 'input',
'message': 'Insert a project name (UpperCamelCaseRecommended)',
'name': 'project'
},
{
'type': 'list',
'message': 'Select your cloud provider',
'name': 'cloud',
'choices': [
{
'name': 'AWS'
}
]
},
{
'when': lambda answers: answers['cloud'] == 'AWS',
'type': 'input',
'message': 'Insert a valid AWS CLI profile name',
'name': 'aws.profile',
'validate': AWSProfileValidator
},
{
'when': lambda answers: answers['cloud'] == 'AWS',
'type': 'input',
'message': 'Insert a valid AWS region name',
'name': 'aws.region'
},
{
'type': 'input',
'message': 'Insert a valid GitHub organization name',
'name': 'github.owner'
},
{
'type': 'input',
'message': 'Insert a valid name for your new IaC repository',
'name': 'github.repo'
},
{
'type': 'password',
'message': 'Insert a valid GitHub token with admin permissions',
'name': 'github.token'
},
{
'type': 'confirm',
'message': 'Are you sure you want to deploy your pipeline?',
'name': 'continue',
'default': True
}
]
answers = {}
def ask(self):
self.answers = prompt(self.questions, style=self.style)
return self.answers
def load(self, path):
with open(path, 'r') as f:
self.answers = yaml.load(f, Loader=yaml.FullLoader)
return self.answers
def store(self, path):
with open(path, 'w') as f:
yaml.dump(self.answers, f, default_flow_style=False)
| true |
3728aa22af829f4907dc47c608bc011abe39a8e0
|
Python
|
khyathipurushotham/python_programs
|
/list1.py
|
UTF-8
| 81 | 2.671875 | 3 |
[] |
no_license
|
names=['khyathi','purushotham','sarada','sai']
print(names)
print(len(names))
| true |
4fa76e3320ca06fd92d2b5c7684f1b028e6b4c6f
|
Python
|
jimmcgaw/CodeEval
|
/python/timediff/timediff.py
|
UTF-8
| 1,161 | 2.90625 | 3 |
[] |
no_license
|
#!/usr/bin/python
import sys
import datetime
# check that a filename argument was provided, otherwise
if len(sys.argv) < 2:
raise Exception("Filename must be first argument provided")
filename = sys.argv[1]
lines = []
# open file in read mode, assuming file is in same directory as script
try:
file = open(filename, 'r')
# read Fibbonacci indexes from file into list
lines = file.readlines()
file.close()
except IOError:
print "File '%s' was not found in current directory" % filename
lines = [line.replace('\n', '') for line in lines]
try:
lines.remove("")
except:
pass
def chunkify(sequence, k):
for i in xrange(0, len(sequence), k):
yield sequence[i:i+k]
for line in lines:
start_time, end_time = line.split(' ')
# print start_time
# print end_time
start_time = datetime.datetime.strptime(start_time, '%H:%M:%S')
end_time = datetime.datetime.strptime(end_time, '%H:%M:%S')
if end_time < start_time:
start_time, end_time = end_time, start_time
diff = end_time - start_time
hours = diff.seconds
minutes = 0
seconds = 0
print '%02d:%02d:%02d' % (hours, minutes, seconds)
| true |
c6239c9f326bd4680b62118338ed76c43340a8ae
|
Python
|
yinon4/YCOIN
|
/blockchain.py
|
UTF-8
| 3,623 | 3.078125 | 3 |
[] |
no_license
|
from hashlib import sha256
from datetime import date
def hash(string):
return None if (string == None) else sha256(string.encode("ascii")).hexdigest()
class Transaction:
def __init__(self, private_from_address, public_to_address, amount):
self.private_from_address = private_from_address
self.public_to_address = public_to_address
self.amount = amount
def __str__(self):
return f"\t{str(self.private_from_address)} -> ${str(self.amount)} -> {str(self.public_to_address)}"
def __repr__(self):
return str(self)
class Block:
def __init__(self, transactions, previous_hash):
self.transactions = transactions
self.previous_hash = previous_hash
self.nonce = 0;
self.date = date.today()
self.hash = ""
def hash_recalc(self):
header = str(self.nonce) + str(self.transactions) + str(self.previous_hash)
return hash(header)
def mineBlock(self, difficulty):
while not self.hash.startswith("0" * difficulty):
self.nonce += 1
self.hash = self.hash_recalc()
if self.nonce % 10000 == 0:
print(self.nonce, end = '\r')
print("nonce = " + str(self.nonce))
def __str__(self):
self.string = ""
for i in self.transactions:
self.string += str(i) + "\n"
return self.string
class Blockchain:
def __init__(self, difficulty, mining_reward):
self.chain = [self.gen_block()]
self.difficulty = difficulty
self.pending_transactions = []
self.mining_reward = mining_reward
def gen_block(self):
return Block([Transaction(None, None, 0)], 0)
def latest_block(self):
return self.chain[-1]
def minePendingTransactions(self, public_to_address):
newBlock = Block(self.pending_transactions, self.latest_block().hash)
newBlock.mineBlock(self.difficulty)
self.chain.append(newBlock)
self.pending_transactions = [Transaction(None, public_to_address, self.mining_reward)]
def balance_of_address(self, private_address):
balance = 0
public_address = hash(private_address)
#add pending block to balance so to make sure no negative balance
pending_chain = self.chain + [Block(self.pending_transactions, self.latest_block().hash)]
for block in pending_chain:
for trans in block.transactions:
if(trans.private_from_address == private_address):
balance -= trans.amount
if(trans.public_to_address == public_address):
balance += trans.amount
return balance
def addTransaction(self, transaction):
if ((transaction.private_from_address != None) and (self.balance_of_address(transaction.private_from_address) < transaction.amount)):
print(str(transaction) + "\nNot enough funds on address")
else: self.pending_transactions.append(transaction)
def chain_validity(self):
for i in range(1, len(self.chain)):
current_block = self.chain[i]
previous_block = self.chain[i-1]
if(current_block.previous_hash != previous_block.hash):
return False
if(current_block.hash != current_block.hash_recalc()):
return False
return True
def __str__(self):
self.print = ""
for i in self.chain:
self.print += str(i) + "\n"
return self.print
| true |
1a7278d3f0d74813846702d08ade55c39482f7b3
|
Python
|
atrukhanov/routine_python
|
/23_nxopen_refactoring/7_check_pmi_objects.py
|
UTF-8
| 936 | 2.84375 | 3 |
[] |
no_license
|
import NXOpen
class NXJournal:
def __init__(self):
self.session = NXOpen.Session.GetSession()
self.work_part = self.session.Parts.Work
self.lw = self.session.ListingWindow
self.PMIs = self.work_part.PmiManager.Pmis
def find_pmi_objects(self):
try:
if self.PMIs:
pmi_list = [item.JournalIdentifier for item in self.PMIs]
out_string = "Модель содержит объекты PMI \n{}".format(
"\n".join(map(str, pmi_list))
)
else:
out_string = "Модель соответствует требованиям"
return out_string
except Exception as ex:
return("find_pmi_objects failed with {}".format(ex))
def main():
app = NXJournal()
app.lw.Open()
app.WriteLine(app.find_pmi_objects())
if __name__ == "__main__":
main()
| true |
b6a70ea7f6c7132a8c60f71310f3c5140eafa479
|
Python
|
chxzh/stroke_rendering_thesis
|
/src/stroke_generalize.py
|
UTF-8
| 7,734 | 2.59375 | 3 |
[] |
no_license
|
from PIL import Image
import random
import math
def get_stroke(color, radius, length, thickness =7):
def draw_head():
r, g, b, a = color
head = Image.new("RGBA", (width, head_length), (255, 255, 255, 0))
data = list(head.getdata())
w, h = head.size
start, end = head_edge
line = [0] * (end-start)
copy_hair_bunch_list = hair_bunch_list[start:end]
for i in range(end-start):
line[i-1] = (copy_hair_bunch_list[i-2]+2*copy_hair_bunch_list[i-1]+copy_hair_bunch_list[i])*0.25
i = start
for hair_bunch in line:
dist = i - width * 0.5
try:
length = max(0, min(h,math.sqrt(radius*radius-dist*dist) * 0.5 + hair_bunch))
except ValueError:
print radius, dist, i
exit()
integer = int(length)
decimal = length - integer
delta = deltas[i]
data[i*h:i*h+integer+1] = [(r + delta, g + delta, b + delta, a)] * (integer) + [(r + delta, g + delta, b + delta, int(255*decimal))]
i += 1
data = data[:w*h]
head = head.transpose(Image.ROTATE_90)
head.putdata(data)
head = head.transpose(Image.ROTATE_90)
data = list(head.getdata())
data2 = []
for j in range(h):
line = data[j*w: j*w + w]
new_line = [(255, 255, 255, 0)] * w
for i in range(-2, w-2):
try:
a = line[i-2][3] + line[i-1][3] * 3 + line[i][3] * 6 + line[i+1][3] * 3 + line[i+2][3]
except IndexError:
print i, len(line), line[i-1]
exit()
if a != 0:
a = a/14
delta = deltas[i]
new_line[i] = (r + delta, g + delta, b + delta, a)
data2.extend(new_line)
head.putdata(data2)
im.paste(head, (0, 0))
return
def draw_tail():
r, g, b, a = color
tail = Image.new("RGBA", (width, tail_length), (255, 255, 255, 0))
data = list(tail.getdata())
w, h = tail.size
start, end = tail_edge
line = [0] * (end-start)
i = start
copy_hair_bunch_list = hair_bunch_list[start:end]
for hair_bunch in copy_hair_bunch_list:
#length = max(0, min(h,math.sqrt(radius*radius-dist*dist) * 0.5 + hair_bunch))
length = max(0, min(h, radius * 0.5 + hair_bunch))
integer = int(length)
decimal = length - integer
delta = deltas[i]
data[i*h: i*h+integer+1] = [(0, 0, 0, 255)]+ [(r + delta, g + delta, b + delta, a)] * (integer - 1) + [(r + delta, g + delta, b + delta, int(255*decimal))]
i += 1
data = data[:w*h]
tail = tail.transpose(Image.ROTATE_90)
tail.putdata(data)
tail = tail.transpose(Image.ROTATE_90).transpose(Image.FLIP_TOP_BOTTOM)
data = list(tail.getdata())
data2 = []
for j in range(h):
line = data[j*w: j*w + w]
new_line = [(255, 255, 255, 0)] * w
for i in range(-2, w-2):
try:
a = line[i-2][3] + line[i-1][3] * 3 + line[i][3] * 6 + line[i+1][3] * 3 + line[i+2][3]
except IndexError:
print i, len(line), line[i-1]
exit()
if a != 0:
a = a/14
delta = deltas[i]
new_line[i] = (r + delta, g + delta, b + delta, a)
data2.extend(new_line)
tail.putdata(data2)
im.paste(tail, (0, height-tail_length))
return
def draw_wing():
wing = Image.new("RGBA", (wing_width, height - head_length - tail_length), (255, 255, 255, 0))
w, h = wing.size
left_wing = list(wing.getdata())
for i in range(h):
left_wing[i*w+wing_width/2:i*w+w] = [color] * (w - wing_width / 2)
wing.putdata(left_wing)
im.paste(wing, (0, head_length))
im.paste(wing.rotate(180), (width-wing_width, head_length))
return head_edge, tail_edge
def draw_body():
#color = (127, 127, 127, 255)
body = Image.new("RGBA", (width-wing_width*2, height - head_length - tail_length), color)
data = list(body.getdata())
w, h = body.size
r, g, b, a = color
line = []
data2 = []
#band = 16
#data2.extend(line*50)
#deltas = [int(round(i)) for i in hair_bunch_list[wing_width/2:wing_width/2+w]]
#for i in range(w):
#delta = int(round(random.normalvariate(0, band/2)))
#deltas[i] = delta
#for i in range(w):
#delta = deltas[i]
#line[i] = (r+delta, g+delta, b+delta, a)
#data2.extend(line*50)
#for i in range(-1, w-1):
#delta = int( 0.6 * deltas[i] + 0.2 * deltas[i-1] + 0.2 * deltas[i+1])
#line[i] = (r+delta, g+delta, b+delta, a)
#data2.extend(line*50)
for delta in deltas[wing_width:wing_width+w]:
#delta = int(round( 0.6 * deltas[i+1] - 0.6 * deltas[i-1]+ 0.2 * deltas[i+2] - 0.2 * deltas[i-2]))
line.append((r+delta, g+delta, b+delta, a))
#data2.extend(line*(50))
#data2.extend([color]*w*(h-200))
data2.extend(line*h)
#for i in range(h):
#for j in range(w):
#pos = i * w + j
#data[pos] = line[j]
body.putdata(data2)
im.paste(body, (wing_width, head_length))
return
def settle_parameters(thickness):
#settle parameters including:
# head_edge, tail_edge, head_length, tail_length, wing_width
# according to the given parameters including:
# thickness, width,
# and more importantly, the hair-bunches
wing_width = radius / 4
head_length = radius
tail_length = radius
head_edge = (wing_width / 2, width - wing_width / 2)
tail_edge = (wing_width / 2, width - wing_width / 2)
sigma = radius * 0.25 * thickness / 8
hair_bunch_list.extend([random.normalvariate(0, sigma) for i in range(width)])
hblist_length = width
deltas.extend([0]*hblist_length)
for i in range(-2, hblist_length-2):
deltas[i] = int(round( 0.6 * hair_bunch_list[i+1] - 0.6 * hair_bunch_list[i-1]
+ 0.2 * hair_bunch_list[i+2] - 0.2 * hair_bunch_list[i-2]))
return wing_width, head_length, head_edge, tail_length, tail_edge
if len(color) == 3:
r, g, b = color
a = 255
color = r, g, b, a
elif len(color) == 4:
r, g, b, a = color
else:
#wrong format of the color
return null
if length <radius * 2:
#wrong size
return null
im = Image.new("RGBA", (radius * 2, length), color)
data = im.getdata()
width = radius * 2
height = length
head_edge = (0, width - 1)
tail_edge = (0, width - 1)
wing_width = 0
head_length = 0
tail_length = 0
hair_bunch_list = []
deltas = []
wing_width, head_length, head_edge, tail_length, tail_edge = settle_parameters(thickness)
draw_body()
head_edge, tail_edge = draw_wing()
draw_head()
draw_tail()
return im
def main():
color = (0, 192, 128)
radius, length = 31, 200
thickness = 7
im = get_stroke(color, radius, length, thickness)
im.save("result.png")
im.show()
if __name__ == "__main__":
main()
| true |
b2c7e3942990e0c4b5a24dd62c2e26dbb92af5c3
|
Python
|
kimx5227/beginner_python
|
/Basal_Metabolic_Rate_Redux.py
|
UTF-8
| 307 | 3.609375 | 4 |
[] |
no_license
|
def main():
windchill()
def windchill():
temperature = input("Enter temperature (F): ")
v = input("Enter wind velocity (MPH): ")
windchill = 35.74 = 0.6215*temperature - 35.75 * (v ** (.16)) + 0.4275 * temperature * (v ** (0.16))
print(windchill)
if __name__=='__main__':
main()
| true |
366eb8e0ec2973383d89cee0bfe9b3931691148e
|
Python
|
Kingdomdark/ProjectOP2
|
/main/classes/databasemanager.py
|
UTF-8
| 2,270 | 3.34375 | 3 |
[] |
no_license
|
import psycopg2
class databasemanager:
# Use the database
def interact_with_database(self, command):
# Connect and set up cursor
#connection = psycopg2.connect("dbname=game_db user=postgres password=postgres")
connection = psycopg2.connect(dbname="game_db", user="postgres", password="postgres")
cursor = connection.cursor()
# Execute the command
cursor.execute(command)
connection.commit()
# Save results
results = None
try:
results = cursor.fetchall()
except psycopg2.ProgrammingError:
# Nothing to fetch
pass
# Close connection
cursor.close()
connection.close()
return results
# Uploads a score into the hiscore table
def upload_score(self, CQuestions , turns , name):
self.interact_with_database("UPDATE highscores SET "
" correct_questions = {}, "
" turns_taken = {} "
" WHERE name = '{}' "
.format( CQuestions , turns , name))
# Downloads score data from database
def download_scores(self):
return self.interact_with_database("SELECT * FROM highscores")
# Downloads the top score from database
def download_top_score(self):
result = self.interact_with_database("SELECT *, h.correct_questions / h.turns_taken as ranked FROM highscores h ORDER by ranked DESC LIMIT 5 ")
return result
def createTable(self):
result = self.interact_with_database(
"CREATE TABLE IF NOT EXISTS highscores(correct_questions real ,turns_taken real , name varchar);")
def insertplayer(self , CQuestions , turns , name):
self.interact_with_database("INSERT INTO highscores (correct_questions , turns_taken , name ) VALUES ( {} , {} , '{}') "
.format( CQuestions , turns , name))
db = databasemanager()
# create table if not exist
# db.createTable()
# db.insertplayer( 20 , 20 , "stefan")
# db.insertplayer( 5 , 15 , "Jordan")
# db.insertplayer( 3 , 7 , "Binh")
| true |
5613201c8407d0dd2a3c5299da933fc90dcca958
|
Python
|
knts0/atcoder
|
/AtCoder/ABC/103/a.py
|
UTF-8
| 103 | 2.90625 | 3 |
[] |
no_license
|
a, b, c = map(int, input().split())
l = sorted([abs(a - b), abs(b - c), abs(c - a)])
print(l[0] + l[1])
| true |
0c51839ed097e72c0904860dc32c16859f48c0a6
|
Python
|
dirac1/SecRouter
|
/test/execute.py
|
UTF-8
| 583 | 3.015625 | 3 |
[] |
no_license
|
import subprocess
def execute(cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
# cmd_in and cmd_out are tuples
#popen = subprocess.Popen(cmd_in, stdout=subprocess.PIPE)
#output = subprocess.check_output(cmd_out, stdin=popen.stdout)
#popen.wait()
def main():
for path in execute(['ls']):
print(path, end="")
| true |
3a2ea957b6d9c772bbd18f79552e21a2bf9d10c8
|
Python
|
marinehero/Capital-construction-project
|
/Projects/BlogWebSite/框架/Inspect.py
|
UTF-8
| 1,573 | 3.234375 | 3 |
[
"MIT"
] |
permissive
|
import inspect
def a(a, b=0, *c, d, e=1, **f):
pass
aa = inspect.signature(a) # 获取函数的表达签名
print("inspect.signature(fn)是:%s" % aa) #
bb = aa.parameters # [(获取所有参数)]
for name ,parameter in bb.items():
print(name,parameter) # name: a b c d e f parameter 具体映射 a b=0 *c d e=1 **f
print("signature.paramerters属性是:%s" % bb)
print("ignature.paramerters属性的类型是%s" % type(bb)) # mappingproxy 映射代理
print("\n")
for cc, dd in bb.items(): # 映射代理
print("mappingproxy.items()返回的两个值分别是:%s和%s" % (cc, dd)) # a 和 a b和b=0 c 和 *c
print("mappingproxy.items()返回的两个值的类型分别是:%s和%s" % (type(cc), type(dd))) # 'str' inspect.Parameter'
print("\n")
ee = dd.kind # 参数所属种类 a b 位置参数 c可变位置参数 后面 是制定关键字参数 最后**f是 可变关键字参数
print("Parameter.kind属性是:%s" % ee)
# POSITIONAL_OR_KEYWORD (a,b=0) VAR_POSITIONAL(*c) KEYWORD_ONLY(d 在*c后 指定关键字参数 e) VAR_KEYWORD(**f)
print("Parameter.kind属性的类型是:%s" % type(ee))# enum '_ParameterKind'
print("\n")
gg = dd.default# 参数的默认值 分别是 0 1 inspect._empty inspect._empty 1 inspect._empty
print("Parameter.default的值是: %s" % gg)
print("Parameter.default的属性是: %s" % type(gg))
print("\n")
ff = inspect.Parameter.KEYWORD_ONLY
print("inspect.Parameter.KEYWORD_ONLY的值是:%s" % ff)
print("inspect.Parameter.KEYWORD_ONLY的类型是:%s" % type(ff))
| true |
a534d2a6c20c0511fff2864458514d81cacc0486
|
Python
|
Code-Institute-Submissions/richard-ui-b_fitness_store_SepResub
|
/products/templatetags/product_tags.py
|
UTF-8
| 846 | 2.75 | 3 |
[] |
no_license
|
from django import template
from reviews_list.models import Reviews_list
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def calculate_rating(current_product):
reviews = Reviews_list.objects.filter(product=current_product)
review_avg = 0
# if product has greater reviews than 0...
if len(reviews) > 0:
for review in reviews:
review_avg += review.review_rating
review_avg = review_avg / len(reviews)
review = '<small class="text-muted">' \
'<i class="fas fa-star mr-1"></i>%d / 5</small>' \
% review_avg
else:
review = '<small class="text-muted">No Rating</small>'
# mark a string as safe for (HTML) output purposes.
return mark_safe(review)
| true |
a5d80594ca73804687c11eb548c70428345d9080
|
Python
|
ChitturiPadma/ComputerVision
|
/10.Bitwise_Operations.py
|
UTF-8
| 1,301 | 3.21875 | 3 |
[] |
no_license
|
# coding: utf-8
# In[1]:
#Extract non-rectangular regions of interest (ROI)
import cv2
import numpy as np
import imutils
# In[2]:
#Draw a rectangle
canvas1 = np.zeros((300,300), dtype='uint8')
rectangle = cv2.rectangle(canvas1, (25, 25),(275, 275),255,-1)
cv2.imshow('Rectangle', rectangle) #Binary imagei
cv2.waitKey(0)
# In[3]:
canvas2 = np.zeros((300,300), dtype='uint8')
circle = cv2.circle(canvas2, (150,150), 150,255,-1)
cv2.imshow('Circle', circle) #Binary Image
cv2.waitKey(0)
# In[4]:
#Bitwise AND - examine both pixels and if both > 0 then the pixel is turned ON and set to 255
bitwiseAnd = cv2.bitwise_and(rectangle, circle)
cv2.imshow('AND', bitwiseAnd)
cv2.waitKey(0)
# In[5]:
#Bitwise OR - extract if either of the pixels is > 0 and output pixel is set to 255
bitWiseOR = cv2.bitwise_or(rectangle, circle)
cv2.imshow('OR', bitWiseOR)
cv2.waitKey(0)
# In[6]:
#Bitwise XOR- both the pixels should not have value > 0
bitwiseXOR = cv2.bitwise_xor(rectangle, circle)
cv2.imshow('XOR', bitwiseXOR)
cv2.waitKey(0)
# In[7]:
#Bitwise NOT - invert the values of pixel values
bitwiseNOT = cv2.bitwise_not(rectangle)
cv2.imshow('NOT', bitwiseNOT)
cv2.waitKey(0)
bitwiseNOT2 = cv2.bitwise_not(circle)
cv2.imshow('NOT', bitwiseNOT2)
cv2.waitKey(0)
# In[12]:
# In[ ]:
| true |
27ebbde47617b4a0a74183f3d434366cfad42c10
|
Python
|
alvarongg/QSLcardGenerator
|
/Importador_archivos.py
|
UTF-8
| 2,947 | 2.890625 | 3 |
[
"MIT"
] |
permissive
|
import tkinter as tk
from tkinter import filedialog
import pandas as pd
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
path_foto = ''
def Generador_Imagenes( dia,
mes,
anio,
licencia,
hora,
mhz,
rst,
mod,
qsl
):
global path_foto
img = Image.open(path_foto.name)
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("Roboto-Regular.ttf", 40)
#font = ImageFont.truetype("radio_stars.ttf", 40)
#font = ImageFont.truetype(font='Arial', size=16, index=0, encoding='')
#draw.text((x, y),"Sample Text",(r,g,b))
draw.text((130,900),licencia,(0,0,0),font=font)
draw.text((380,900),dia,(0,0,0),font=font)
draw.text((470,900),mes,(0,0,0),font=font)
draw.text((550,900),anio,(0,0,0),font=font)
draw.text((700,900),hora,(0,0,0),font=font)
draw.text((900,900),mhz,(0,0,0),font=font)
draw.text((1160, 900),rst,(0,0,0),font=font)
draw.text((1300,900),mod,(0,0,0),font=font)
draw.text((1470,900),qsl,(0,0,0),font=font)
img.save(licencia+'_'+anio+'_'+mes+'_'+dia+'_'+hora+'_'+'qsl_9_Julio.jpg')
def getExcel ():
global df
global dia
global mes
global anio
global licencia
global hora
global mhz
global rst
global mod
global qsl
import_file_path = filedialog.askopenfilename()
df = pd.read_excel (import_file_path)
shape = df.shape
cantidad = shape[0]
print(cantidad)
print(df)
print(df.iloc[5,3])
for i in range(0,cantidad):
dia = str(df.iloc[i,9])
mes = str(df.iloc[i,10])
anio = str(df.iloc[i,11])
licencia = str(df.iloc[i,3])
hora = str(df.iloc[i,4])
mhz = str(df.iloc[i,5])
rst = str(df.iloc[i,6])
mod = str(df.iloc[i,7])
qsl = str(df.iloc[i,8])
print(dia+''+mes+''+anio+''+licencia+''+hora+''+mhz+''+rst+''+mod+''+qsl)
Generador_Imagenes(dia,mes,anio,licencia,hora,mhz,rst,mod,qsl)
def getImage ():
global path_foto
path_foto = filedialog.askopenfile()
print(path_foto.name)
root= tk.Tk()
root.title("Generador de tarjetas QSL")
canvas1 = tk.Canvas(root, width = 300, height = 300, bg = 'lightsteelblue')
canvas1.pack()
browseButton_Excel = tk.Button(text='Paso 2: Importar Base_QSL', command=getExcel, bg='green', fg='white', font=('helvetica', 12, 'bold'))
browseButton_Mail = tk.Button(text='Paso 1:Cargar nueva imagen', command=getImage, bg='green', fg='white', font=('helvetica', 12, 'bold'))
canvas1.create_window(150, 150, window=browseButton_Mail)
canvas1.create_window(150, 200, window=browseButton_Excel)
root.mainloop()
| true |
3ef48147b11423d0fc8bbe917f16ef9ddda94fa6
|
Python
|
bozenamrozek/PYTHON
|
/03obliczenia.py
|
UTF-8
| 179 | 3.296875 | 3 |
[] |
no_license
|
a=5
b=2
print('suma=' ,a+b)
print('suma=' ,a-b)
print('suma=' ,a/b)
print('suma=' ,a*b)
print('suma=' ,a**b)
print('suma=' ,a%b)
print('suma=' ,a//b) #dzielenie całkowite
| true |
2da3d29de8e36f47817676b1f5b9d2895fa2a46e
|
Python
|
praveenn7/blockchain_masterexamples
|
/Sample1.py
|
UTF-8
| 181 | 2.53125 | 3 |
[] |
no_license
|
from bitcoin import *
priv = random_key()
print("Private Key : ", priv)
pub = privtopub(priv)
print("Public Key : ", pub)
addr = pubtoaddr(pub)
print("Address : " + addr)
| true |
17202184dfe564bb7fab472f37f8c2629797a900
|
Python
|
shilpageo/pythonproject
|
/advanced python/decorators/demo3.py
|
UTF-8
| 739 | 2.9375 | 3 |
[] |
no_license
|
#
#def vaccinaion portal(**kwargs):
# print("request id allowed location ekm")
#vaccination_portal(name="ram",age=25,address="address",health_issue=True)
#age above>65 or health_issue=True {allowed}
def decorator(fun):
def wrapped(name,age,health_issue,place):
if (age>65)or(health_issue==True):
print("request id is allowed to ekm")
else:
raise Exception("not eligible")
return fun(name,age,place,health_issue)
return wrapped
@decorator
def vaccination_portal(**kwargs):
name=kwargs["name"]
age = kwargs["age"]
place = kwargs["place"]
health_issue = kwargs["health_issue"]
vaccination_portal(name="ram",age=67,place="ekm",health_issue=False)
| true |
c5df9d21e579388b4d08ca4bf1219615097bab55
|
Python
|
enthusiasm99/crazypython
|
/04/P94_xiti_02.py
|
UTF-8
| 220 | 4.03125 | 4 |
[] |
no_license
|
size = int(input("请输入一个数字:"))
for i in range(1, size + 1):
# 空格与行数i的关系
kongge = " " * (size - i)
# *与行数i的关系
stars = "*" * (2 * i - 1)
print(kongge + stars)
| true |
1b208280698b1dde433c20b1195f0efb0102fad6
|
Python
|
mmaina8/Python-AkiraChixDataModel
|
/student/models.py
|
UTF-8
| 1,403 | 2.5625 | 3 |
[] |
no_license
|
from django.db import models
from course.models import Course
import datetime
from django.core.exceptions import ValidationError
# Create your models here.
class Student(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
date_of_birth = models.DateField()
registration_number = models.CharField(max_length=50)
place_of_residence = models.CharField(max_length=50)
phone_number = models.CharField(max_length=50)
email = models.EmailField(max_length=50)
guardian_phone = models.CharField(max_length=50)
id_number = models.IntegerField()
date_joined = models.DateField()
profile_picture = models.ImageField(upload_to="student_image",blank=True,null=True)
course = models.ManyToManyField(Course,blank=False,related_name="students")
def __str__(self):
return self.first_name + " " + self.last_name
def full_name(self):
return "{} {}".format (
self.first_name,
self.last_name)
def get_age(self):
today = datetime.date.today()
return today.year-self.date_of_birth.year
age = property(get_age)
def clean(self):
age=self.age
if age<18 or age>30:
raise ValidationError("above 18, below 30")
return age
# def teachers(self):
# return [course.teacher for course in self.course.all()]
| true |
680bb01d5dec29f54dc0b598e855da72c12e0eda
|
Python
|
Firmino-Neto/Exercicios---Python--UFC-
|
/Função e vetores -py/21.py
|
UTF-8
| 557 | 3.65625 | 4 |
[
"MIT"
] |
permissive
|
def Vetor( lista, tam ):
print ("Digite os elementos da lista")
contador = 0
while contador < tam:
numero = int(input("Digite um numero: "))
lista.append( numero )
contador = contador + 1
def subtraiVetor(a,b):
c = []
i = 0
while i < len(a):
subtracao = a[i]-b[i]
c.append(subtracao)
i = i + 1
return c
#Codigo principal [chamar função]
a = []
b = []
#Fazer CompLista = 10
CompLista = int(input( "Digite a quantidade de numeros: " ))
Vetor(a,CompLista)
Vetor(b,CompLista)
print(a)
print(b)
print(subtraiVetor(a,b))
| true |
4029c8ed53ebda15d04b606e132ac3c1ec7d8b41
|
Python
|
shengchaohu/shengchao
|
/yixue/interval.py
|
UTF-8
| 5,115 | 3.265625 | 3 |
[] |
no_license
|
from collections import OrderedDict
import bisect
import load
class Interval:
'''
interval from json file, which is string by default
'''
def time_start_end(self, label=load.label, question_start_time_delta=8,
question_end_time_delta=5):
'''
return a list of time interval, consisting of [start,end,(file) index]
'''
_start_end=[[label[index][question_start_time_delta],
label[index][question_end_time_delta],index] for index in range(len(label))]
return sorted(_start_end,key=lambda x: x[0])
def merge(self,intervals):
'''
merge intervals if overlapping. Intervals is of type list of list
returns a list of list
'''
ans = []
for interval in sorted(intervals, key=lambda x: x[0]):
if not ans or interval[0] > ans[-1][1]:
ans.append(interval)
else:
ans[-1][1] = max(ans[-1][1], interval[1])
return ans
def insert(self,intervals,newinterval,merge=0):
'''
linear time insert
Insert new interval to a list of non-overlapping intervals (merge if merge=1).
if merge=0, the two overlapping intervals will be printed out.
The latter interval will not be added. Then Insert will continue.
Intervals is of type list of list
returns a list of list
'''
ans = []
index = len(intervals)
for i in range(len(intervals)):
if newInterval[0] < intervals[i][0]:
index = i
break
intervals.insert(index, newInterval) # this insert is built_in method for list
# then repeat the same prodecure as Interval.merge(self, intervals)
for interval in intervals:
if not ans or interval[0] > ans[-1][1]:
ans.append(interval)
else:
if(merge==0):
print(interval, ans[-1])
continue
elif(merge==1):
ans[-1][1] = max(ans[-1][1], interval[1])
return ans
class Interval_quick(Interval):
'''
fast implementation of Interval
'''
def time_start_end(self):
'''
return a list of time interval, consisting of [start,end,(file) index]
'''
c=Interval.time_start_end(self)
c=[list(map(lambda ccc: int(float(ccc)), cc)) for cc in c]
return sorted(c,key=lambda x: x[0])
def insert(self,intervals,newinterval,merge=1):
'''
O(logn) time implementation of insert method
ans and intervals are of type OrderedDict
a in ans, i in intervals is of form {start time:(end time, index}
new interval is given via a tuple (start time, end time, index)
if merge=1, the overlapping intervals will be printed out and then merge.
'''
# suppose OrderedDict is implemented using double linked list
ans = OrderedDict()
# find left bound
keys = list(intervals.keys())
left_key=keys[bisect.bisect_left(keys,newinterval[0])]
right_key=keys[bisect.bisect_left(keys,newinterval[1])]
......
for i in range(len(intervals)):
if newInterval[0] < intervals[i][0]:
index = i
break
intervals.insert(index, newInterval) # this insert is built_in method for list
# then repeat the same prodecure as Interval.merge(self, intervals)
for interval in intervals:
if not ans or interval[0] > ans[-1][1]:
ans.append(interval)
else:
if(merge==0):
print(interval, ans[-1])
continue
elif(merge==1):
ans[-1][1] = max(ans[-1][1], interval[1])
return ans
class RangeModule():
def __init__(self):
self.ranges = []
def _bounds(self, left, right):
i, j = 0, len(self.ranges) - 1
for d in (100, 10, 1):
while i + d - 1 < len(self.ranges) and self.ranges[i+d-1][1] < left:
i += d
while j >= d - 1 and self.ranges[j-d+1][0] > right:
j -= d
return i, j
def addRange(self, left, right):
i, j = self._bounds(left, right)
if i <= j:
left = min(left, self.ranges[i][0])
right = max(right, self.ranges[j][1])
self.ranges[i:j+1] = [(left, right)]
def queryRange(self, left, right):
i = bisect.bisect_left(self.ranges, (left, float('inf')))
if i: i -= 1
return (bool(self.ranges) and
self.ranges[i][0] <= left and
right <= self.ranges[i][1])
def removeRange(self, left, right):
i, j = self._bounds(left, right)
merge = []
for k in range(i, j+1):
if self.ranges[k][0] < left:
merge.append((self.ranges[k][0], left))
if right < self.ranges[k][1]:
merge.append((right, self.ranges[k][1]))
self.ranges[i:j+1] = merge
| true |
4644ba197803f2a7f1f6012c9676144633b540a1
|
Python
|
jzeeck/BeautifulSoap-Example
|
/handler.py
|
UTF-8
| 696 | 2.578125 | 3 |
[] |
no_license
|
from bs4 import BeautifulSoup
import requests
def processProduct(baseUrl, Item):
#print "Processing {}{}".format(baseUrl, Item['href'].encode('utf8'))
# Fetch page and parser
r = requests.get(baseUrl + Item['href'].encode('utf8'))
data = r.text
soup = BeautifulSoup(data, "html.parser")
prod_info = soup.find('div', {'id': 'prod-info'})
#print soup.prettify()
#file = open(Item['href'].encode('utf8').replace("/","-") + ".txt", "wb")
#file.write(unicode(soup.prettify()).encode("utf-8"))
#file.flush()
print prod_info.h1.string
price = prod_info.find('span', {'class': 'price'})
print price.string
print baseUrl + Item['href'].encode('utf8') + "\n"
# unicode(s).encode("utf-8")
| true |
79a36f9f3052599679dfa7c0d5d6b8e9febf6773
|
Python
|
jace/zine-main
|
/zine/utils/text.py
|
UTF-8
| 4,117 | 2.65625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
zine.utils.text
~~~~~~~~~~~~~~~
This module provides various text utility functions.
:copyright: (c) 2009 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import string
import unicodedata
from urlparse import urlparse
from werkzeug import url_quote
from zine._dynamic.translit_tab import LONG_TABLE, SHORT_TABLE, SINGLE_TABLE
_punctuation_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
_string_inc_re = re.compile(r'(\d+)$')
def gen_slug(text, delim=u'-'):
"""Generates a proper slug for the given text. It calls either
`gen_ascii_slug` or `gen_unicode_slug` depending on the application
configuration.
"""
from zine.application import get_application
if get_application().cfg['ascii_slugs']:
return gen_ascii_slug(text, delim)
return gen_unicode_slug(text, delim)
def gen_ascii_slug(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punctuation_re.split(text.lower()):
word = _punctuation_re.sub(u'', transliterate(word))
if word:
result.append(word)
return unicode(delim.join(result))
def gen_unicode_slug(text, delim=u'-'):
"""Generate an unicode slug."""
return unicode(delim.join(_punctuation_re.split(text.lower())))
def gen_timestamped_slug(slug, content_type, pub_date):
"""Generate a timestamped slug, suitable for use as final URL path."""
from zine.application import get_application
from zine.i18n import to_blog_timezone
cfg = get_application().cfg
pub_date = to_blog_timezone(pub_date)
prefix = cfg['blog_url_prefix'].lstrip('/')
if prefix:
prefix += '/'
if content_type == 'entry':
if cfg['fixed_url_date_digits']:
year = '%04d' % pub_date.year
month = '%02d' % pub_date.month
day = '%02d' % pub_date.day
hour = '%02d' % pub_date.hour
minute = '%02d' % pub_date.minute
second = '%02d' % pub_date.second
else:
year = '%d' % pub_date.year
month = '%d' % pub_date.month
day = '%d' % pub_date.day
hour = '%d' % pub_date.hour
minute = '%d' % pub_date.minute
second = '%d' % pub_date.second
full_slug = u'%s%s%s' % (
prefix,
cfg['post_url_format'].replace(
'%year%', year).replace(
'%month%', month).replace(
'%day%', day).replace(
'%hour%', hour).replace(
'%minute%', minute).replace(
'%second%', second),
slug
)
else:
full_slug = u'%s%s' % (prefix, slug)
return full_slug
def increment_string(string):
"""Increment a string by one:
>>> increment_string(u'test')
u'test2'
>>> increment_string(u'test2')
u'test3'
"""
match = _string_inc_re.search(string)
if match is None:
return string + u'2'
return string[:match.start()] + unicode(int(match.group(1)) + 1)
def transliterate(string, table='long'):
"""Transliterate to 8 bit using one of the tables given. The table
must either be ``'long'``, ``'short'`` or ``'single'``.
"""
table = {
'long': LONG_TABLE,
'short': SHORT_TABLE,
'single': SINGLE_TABLE
}[table]
return unicodedata.normalize('NFKC', unicode(string)).translate(table)
def build_tag_uri(app, date, resource, identifier):
"""Build a unique tag URI.
The tag URI must obey the ABNF defined in
http://www.faqs.org/rfcs/rfc4151.html """
host, path = urlparse(app.cfg['blog_url'])[1:3]
if ':' in host:
host = host.split(':', 1)[0]
path = path.strip('/')
if path:
path = ',' + path
if not isinstance(identifier, basestring):
identifier = str(identifier)
return 'tag:%s,%s:%s/%s;%s' % (host, date.strftime('%Y-%m-%d'), path,
url_quote(resource), url_quote(identifier))
| true |
9cf789f529d8c44b847652739809346399164e9e
|
Python
|
ancestor-mithril/UAIC_FII_PYTHON
|
/PythonProjects/Spanzuratoarea/main.py
|
UTF-8
| 1,637 | 3.046875 | 3 |
[] |
no_license
|
"""C3. Spanzuratoarea
Sa se scrie o aplicatie in care utilizatorul trebuie sa ghicesca un anumit cuvant. Cuvintele vor
fi predefinite si vor avea o anumita categorie. La rulare userul alege o categorie si se va alege
un cuvant random din cele existente in categoria aleasa. Utilizatorul poate incerca cate o
litera odata. Daca ghiceste o litera, atunci i se vor afisa pozitiile din cuvant pentru litera
respectiva. Utilizatorul are voie sa greseasca literele de un anumit numar maxim de incercari
(in functie de lungimea cuvantului). In timpul jocului se va afisa numarul de incarcari ramase.
La final, se va afisa cuvantul si numarul de incercari esuate. Cuvintele vor fi salvate in fisiere
specifice categoriilor din care fac parte. De asemenea, se va tine evidenta scorurilor (tot
intr-un fisier).
"""
import os
import re
from Spanzuratoarea.utils import handle_user_input, play_game
from utilities import error_print, color_print
def run():
game_folder = "game_folder"
if not os.path.isdir(game_folder):
error_print("run build_game.py script first to initialize game")
exit(0)
assert len(os.listdir(game_folder)) > 0, "assure there is at least 1 category ablo to be chosen"
color_print(f"Bine ai venit la spanzuratoarea!")
color_print(f"Alege una din urmatoarele categorii pentru joc:\n{', '.join(os.listdir(game_folder))}")
possible_categories = "|".join(os.listdir(game_folder))
pattern = re.compile(f"({possible_categories})", re.IGNORECASE)
chosen_category = handle_user_input(pattern).lower()
play_game(chosen_category, game_folder)
if __name__ == "__main__":
run()
| true |
2e5e466262dcf6209a1f20ccfbbb751f40974885
|
Python
|
daivikswarup/opensoft16
|
/Grapher/test.py
|
UTF-8
| 28,014 | 2.6875 | 3 |
[] |
no_license
|
import curve
import math
from curve import curve
import numpy as np
import cv2
# from SimpleCV import *
from pytesseract import image_to_string
from PIL import Image as IMAGE
#from matplotlib import pyplot as plt
# import matplotlib.pyplot as plt
import re
import time
from sklearn.cluster import KMeans
class graph:
def __init__(self,document,pageno,x2,x4,y2,y4,imagename,crop_image):
# reference to the document
self.document = document
# page in which the graph is present
self.pageno = pageno
#coordinates
self.x2 = x2
self.x4 = x4
self.y2 = y2
self.y4 = y4
#scale
self.dx = 0 #get it by calling findGradient(marking_imageurl) method
self.dy = 0 #get it by calling findGradient(marking_imageurl) method
#marking
self.minx = 0 #get it by calling findMarkings(marking_imageurl)method twice.
self.maxx = 0 #one time each for x and y axes
self.miny = 0 #not working for bad quality images.
self.maxy = 0 #markings on image must be sharp and clear enough.
# is it a log graph
self.isLog = False
#poll
self.pollDistance = 0
#curve point data for
self.curveList = []
self.xlabel = None #get it by calling findLabel(label_imageurl) method twice.
self.ylabel = None #one time each for x and y axes.
self.description = None #even description can be obtained by the same method.
self.graphboundX=None
self.graphboundY=None
self.image=imagename
self.rectangle=crop_image
self.textBoxImages=[]
# to find x axis label and values
def findxaxis_width(self,x1,y1,x2,y2):
#cv2.imwrite("x_width.png",self.image)
#img = IMAGE.open("x_width.png")
#w,h=img.size
#print'img size w=%d h=%d' %(w,h)
#im = img.load()
im=self.image
#print self.image
# print "x_width"
# print "x1=%d y1=%d x2=%d y2=%d"%(x1,y1,x2,y2)
y_temp=y1
y_array=[]
alternate_flag=1
while(len(y_array)<3):
x_temp=x1
#print "husdh"
#print x_temp,x1,x2
a=np.array([200,200,200])
flag=1
while(x_temp<x2-1):
# print "x_temp=%d y_temp=%d x2=%d"%(x_temp,y_temp,x2)
#print (im[y_temp,x_temp].tolist())
if( (im[y_temp,x_temp+1].tolist() < [200,200,200]or im[y_temp,x_temp+1].tolist()<[200,200,200])):
#print "failed"
flag=0
break
x_temp=x_temp+1
# print "out of loop flag=%d"%(flag)
# print x_temp,y_temp
if(flag==1 and alternate_flag==1):
y_array.append(y_temp)
alternate_flag=0
elif(flag==0):
alternate_flag=1
y_temp+=1
self.graphboundY=y_array[2]
return y_array
def findyaxis_width(self,x4,y4,x1,y1):
# img = Image.open(self.image)
# w,h=img.size
# im = img.load()
im=self.image
digit_pixel_size=(self.x2-x1)*9/236
marking_pixel_distance=(self.x2-x1)*10/488
print digit_pixel_size,marking_pixel_distance
x_temp=x4
x_array=[]
count =0
alternate_flag=1
while(x_temp>=0 and count<3):
y_temp=y4
flag=1
while(y_temp<y1-1):
if((im[y_temp,x_temp].tolist()<[200,200,200] or im[y_temp+1,x_temp].tolist()<[200,200,200])):
flag=0
break
y_temp+=1
if(flag==1 and alternate_flag==1):
if(count==0):
#print "count 0"
#print x4-x_temp,marking_pixel_distance
if(x4-x_temp>marking_pixel_distance):
return -1
if(count==2):
#print "count 2"
#print x_array[1],x_temp
#print x_array[1]-x_temp,digit_pixel_size
if(x_array[1]-x_temp>digit_pixel_size):
x_array.append(x_temp)
count+=1
else:
x_array[1]=x_temp
else:
print "count 1/0"
print count
x_array.append(x_temp)
count+=1
alternate_flag=0
elif(flag==0):
alternate_flag=1
x_temp-=1
print x_array
self.graphboundX=x_array[2]
return x_array
def findLabel(self):
x1=self.x4
y1=self.y2
img=self.image
#print self.x2,self.x4,self.y2,self.y4
y=self.findxaxis_width(x1,y1,self.x2,self.y2)
#print y
iterator=0
y_array_len=len(y)
if(y_array_len==2):
y.insert(2,y[1]+100)
y_array_len+=1
#print y
#print 'len of y1 %d' % (y_array_len)
while iterator<y_array_len-1:
#print "iterator"+str(iterator)
cropped = img[y[iterator] :y[iterator+1] ,x1:self.x2]
s = 'images/temp_x' +str(iterator+1)+'.jpg'
cv2.imwrite(s , cropped)
iterator+=1
x=self.findyaxis_width(self.x4,self.y4,x1,y1)
print x
if(x!=-1):
x_array_len=len(x)
if(x_array_len==2):
x.insert(x_array_len,0)
x_array_len+=1
#print x
iterator=x_array_len-1
#print 'len of x1 %d' % (x_array_len)
i=0
while iterator>0:
if(iterator==2 and x_array_len==4):
cropped = img[self.y4:y1, x[iterator]: x[iterator-2]]
iterator=1
else:
cropped = img[self.y4:y1, x[iterator]: x[iterator-1]]
s = 'images/temp_y'+str(i+1)+'.jpg'
cv2.imwrite(s , cropped)
iterator-=1
i+=1
else:
print 'not a graph'
#OCR for text labels and description of the graph. Call this method once for each of the three values.
def findLabelText(self):
for i in range(0,2):
flag=0
if i==0:
img = Image("images/temp_x2.jpg",0)
else:
img = Image("images/temp_y1.jpg",0)
#print str(img.width) +" " + str(img.height)
if img.width<img.height:
img = img.rotate(-90,fixed = False)
flag=1
#if img.width/img.height<3 : #crop image
#print str(img.width) +" " + str(img.height)
#if img.width<400 :
#img = img.resize(img.width*5,img.height*5)
if img.height<50 :
#print "ok"
img = img.resize(img.width*2,img.height*4)
img_inv = img.invert()
img_inv.scale(100,100)
img_bin = img_inv.binarize()
img_bin = img_bin.dilate(2)
img_bin = img_bin.erode(1)
else:
img_inv = img.invert()
img_inv.scale(100,100)
img_bin = img_inv.binarize()
#elif flag!=1:
img_bin.save("images/temp.jpg")
img = cv2.imread("images/temp.jpg")
dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
label = image_to_string(IMAGE.fromarray(dst),lang='eng')
if flag==1:
self.ylabel = label
print label
else:
self.xlabel = label
print label
#return label
#pass
#OCR for markings on x and y axes.Returns min & max value of the markings on an axis in that order.Call this method once for each axis.
def findMarkings(self):
#flag=0
for i in range(0,2):
if i == 0:
img = Image("images/temp_x1.jpg",0)
else:
img = Image("images/temp_y2.jpg",0)
if i==0:
num_marking = int(img.width/self.dx+1)
else:
num_marking = int(img.height/self.dy+1)
#print str(img.width) +" " + str(img.height)
#if img.width/img.height<3 : #crop image
if img.width<70:
#print "ok"
img = img.resize(img.width*4,img.height*4)
img_inv = img.invert()
img_inv.scale(100,100)
img_bin = img_inv.binarize()
img_bin = img_bin.dilate(2)
img_bin = img_bin.erode(1)
#img_bin.erode().show()
#print str(img.width) +" " + str(img.height)
#if img.width<400 :
#img = img.resize(img.width*5,img.height*5)
else:
img_inv = img.invert()
img_inv.scale(100,100)
img_bin = img_inv.binarize()
if img.height<50:
#print "ok"
img = img.resize(img.width*4,img.height*4)
img_inv = img.invert()
img_inv.scale(100,100)
img_bin = img_inv.binarize()
img_bin = img_bin.dilate(2)
img_bin = img_bin.erode(1)
else:
img_inv = img.invert()
img_inv.scale(100,100)
img_bin = img_inv.binarize()
"""
if flag==1:
img_dilate = img_inv.dilate(2)
img_erode = img_dilate.erode(2)
img_erode.save("rot_1.jpg")
"""
#elif flag!=1:
img_bin.save("images/temp.jpg")
img = cv2.imread("images/temp.jpg")
dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
marking = image_to_string(IMAGE.fromarray(dst),lang='eng')
marking = marking.replace("\n"," ")
marking = marking.split(" ")
print marking
for j in marking:
if j=='':
marking.remove(j)
for j in range(len(marking)):
print len(marking)
marking[j] = marking[j].replace("O","0")
#if marking[i][1]=="O":
#marking[i][1]="0"
for j in marking:
if re.match("^\d+?\.?\d+?$", j) is None:
marking.remove(j)
print len(marking)
for j in range(len(marking)):
marking[j]=float(marking[j])
marking=sorted(marking)
print marking
for j in range(len(marking)):
if marking[1]/marking[0] == marking[2]*1.0/marking[1]:
self.isLog = True
else:
self.isLog = False
"""
if num_marking>len(marking) and self.isLog==False:
if num_marking-len(marking)==2:
#print "diff is 2"
marking.append(marking[1]-2*marking[0])
marking.sort()
marking.append(marking[len(marking)-1]+(marking[1]-marking[0]))
marking.sort()
if num_marking-len(marking)==1:
marking.append(marking[len(marking)-1]+(marking[1]-marking[0]))
marking.sort()
#else:
#i=int(i)
if num_marking>len(marking) and self.isLog==True:
if num_marking-len(marking)==2:
#print "diff is 2"
marking.append(marking[0]**2/marking[1])
marking.sort()
marking.append(marking[len(marking)-1]*(marking[1]/marking[0]))
marking.sort()
if num_marking-len(marking)==1:
marking.append(marking[len(marking)-1]*(marking[1]/marking[0]))
marking.sort()
"""
if self.isLog==True:
marking.append(marking[0]**2/marking[1])
marking.sort()
marking.append(marking[len(marking)-1]*(marking[1]/marking[0]))
marking.sort()
if self.isLog==False:
marking.append(2*marking[0]-marking[1])
marking.sort()
marking.append(marking[len(marking)-1]+(marking[1]-marking[0]))
marking.sort()
print marking
mark_len = len(marking)
if i==0:
self.minx=marking[0]
self.maxx=marking[mark_len-1]
else:
self.miny=marking[0]
self.maxy=marking[mark_len-1]
#returns number of pixels between two markings on an axis. Call this method once for each axis.
def findGradient(self):
for i in range(0,2):
if i==0:
img = Image("images/temp_x1.jpg",0)
else:
img = Image("images/temp_y2.jpg",0)
img_inv = img.invert()
img_inv.scale(100,100)
img_bin = img_inv.binarize()
resize=1
if img_bin.width<70:
resize = 1
img_bin = img_bin.resize(img.width*4,img.height*resize)
flag=0
if img_bin.width<img_bin.height:
flag = 1
img_bin = img_bin.rotate(-90,fixed = False)
img_bin.save("images/temp.jpg")
img = cv2.imread("images/temp.jpg")
#h_img, w_img = img.shape[:2]
#if w_img<h_img:
img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # grayscale
_,thresh = cv2.threshold(gray,150,255,cv2.THRESH_BINARY_INV) # threshold
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(2,2))
dilated = cv2.dilate(thresh,kernel,iterations = 13) # dilate
contours, hierarchy = cv2.findContours(dilated,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) # get contours
height = np.size(img, 0)
width = np.size(img, 1)
index =1
rect_dim=[]
for contour in contours:
# get rectangle bounding contour
[x,y,w,h] = cv2.boundingRect(contour)
y=y+h
rect = [x,y,w,h]
# discard areas that are too large
if h>0.7*height and w>0.7*width:
continue
# discard areas that are too small
if h<height*0.02 or w<width*0.02:
continue
#print rect
cv2.rectangle(img,(x,y),(x+w,y-h),(255,0,255),2)
rect_dim.append(x+w/2.0)
cv2.imwrite("images/contoured.jpg", img)
rect_dim.sort(reverse=True)
pix_diff=[]
#print len(rect_dim)
#print rect_dim
for i in range(0,len(rect_dim)-1):
pix_diff.append(rect_dim[i]-rect_dim[i+1])
print pix_diff
print sum(pix_diff)/len(pix_diff)
print sum(pix_diff[:-1])/len(pix_diff[:-1])
if abs(sum(pix_diff)/len(pix_diff)-sum(pix_diff[:-1])/len(pix_diff[:-1]))>3:
pix_diff.remove(pix_diff[-1])
pix_avg=sum(pix_diff)/(len(pix_diff)*1.0)
pix_avg = pix_avg/resize
if flag==1:
pix_avgy=pix_avg
print "pix_avgy="+str(pix_avgy)
self.dy = pix_avgy
else:
pix_avgx=pix_avg
print "pix_avgx="+str(pix_avgx)
self.dx = pix_avgx
def findCrop(self):
for i in range(len(self.textBoxImages)):
cv2.imwrite("images/temp_textbox.png",self.textBoxImages[i])
img=IMAGE.open("images/temp_textbox.png")
# img = cv2.imread("temp_textbox.png")
# w=np.size(img, 0)
# h=np.size(img, 1)
w,h=img.size
print'img size w=%d h=%d i=%d' %(w,h,i)
im = img.load()
y_temp=0
while(y_temp<h):
x_temp=0
while(x_temp<w-1):
print y_temp,x_temp,h,w
print im[x_temp,y_temp]
if((im[x_temp,y_temp]<(200,200,200) or im[x_temp+1,y_temp]<(200,200,200))):
img=cv2.imread("images/temp_textbox.png")
cropped = img[y_temp-1:h,0:w]
s = 'images/onlylabel_'+str(i+1)+'.png'
cv2.imwrite(s , cropped)
return y_temp
x_temp+=1
y_temp+=1
print "als x_temp=%d y_temp=%d"%(x_temp,y_temp)
#Returns all the labels and the corresponding colors in HSV of plots in graph in a curve datastructure.
def findColorNnumOfPlots(self):
for i in range(len(self.textBoxImages)):
#flag=0
s='images/onlylabel_'+str(i+1)+'.png'
img = Image(s,0)
#print str(img.width) +" " + str(img.height)
#print str(img.width) +" " + str(img.height)
#if img.width<400 :
#img = img.resize(img.width*5,img.height*5)
img_inv = img.invert()
img_inv.scale(100,100)
img_bin = img_inv.binarize()
"""
if flag==1:
img_dilate = img_inv.dilate(2)
img_erode = img_dilate.erode(2)
img_erode.save("rot_1.jpg")
"""
#elif flag!=1:
img_bin.save("images/rot_1.jpg")
img = cv2.imread("images/rot_1.jpg")
dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
graphs = image_to_string(IMAGE.fromarray(dst),lang='eng')
graphs = graphs.replace("\n",">%$ ") #formatting the string to get the list
graphs = graphs.split(">%$ ") #of plots plotted in given graphs
print graphs
graphNamesList=graphs
n=len(graphs) #number of plots in given graph
img = Image(s,0)
img = img.resize(img.width*3,img.height*3) #resizing the image to make it big enough for cropping
#print height
#img = img.crop(15,15,img.width,img.height) #removing the edges of the given graph description image
height = (img.height)*1.0/n
width = img.width
graphList=[]
start = 0
for i in range(0,n): #cropping the image so as to get a single plot description
cropImg = img.crop(0, start, width, height) #in one image
graphList.append(cropImg)
start+=height
graphList1 = graphList
#time.sleep(3)
"""
graphName = []
for i in graphList1: #getting the names of all the plots from the images cropped above
#i = i.resize(i.width*4,i.height*4)
i = i.invert()
i.scale(100,100)
i = i.binarize()
#i = i.erode()
i.save("temp.jpg")
i = cv2.imread("temp.jpg")
i = cv2.fastNlMeansDenoisingColored(i,None,10,10,7,21)
g = image_to_string(IMAGE.fromarray(i),lang='eng')
print g
print "\n"
graphName.append(g)
"""
graphColor = []
for i in graphList: #finding colors of plots of all the images cropped above
i.save("images/temp.jpg")
#raw_input()
imge = cv2.imread("images/temp.jpg",1)
imge = cv2.fastNlMeansDenoisingColored(imge,None,10,10,7,21)
imge = cv2.cvtColor(imge, cv2.COLOR_BGR2RGB)
#imge = cv2.cvtColor(imge, cv2.COLOR_RGB2HSV)
# show our image
plt.figure()
#plt.axis("off")
#plt.imshow(imge)
imge = imge.reshape((imge.shape[0] * imge.shape[1], 3))
n_clusters = 3 #number of clusters in kmeans clustering
clt = KMeans(n_clusters = 3)
clt.fit(imge)
hist = centroid_histogram(clt)
bar,color = plot_colors(hist, n_clusters, clt.cluster_centers_)
#bar = cv2.cvtColor(bar,cv2.COLOR_GRAY2RGB)
# show our color bart
plt.figure()
#plt.axis()
#plt.imshow(bar)
#plt.show()
if color[0]>240 and color[1]>240 and color[2]>240:
color = [10.00, 10.00, 10.00]
color = list(rgb2hsv(color[0],color[1],color[2]))
#color[1] = color[1] #increasing the picture saturation and value of the image
#color[2] = color[2] #which got reduced due to processing
#color = hsv2rgb(color[0],color[1],color[2])
print color
graphColor.append(color)
for i in range(0,len(graphColor)):
c = curve(graphColor[i],graphNamesList[i])
#c.color(graphColor[i])
#c.name(graphName[i])
self.curveList.append(c)
#return graphColor, graphName
#pass
print graphNamesList
print graphColor
print self.curveList
#<<<<<<< HEAD
# the fill data funtion doesn,t take care of textbox inside the graph
#image passing needs clarification
def findMarkings(self):
table=np.zeros((len(self.curveList),self.x2-self.x1,2))
for i in range(self.x1,self.x2):
for j in range(self.y1,self.y2):
for k in range(0,len(self.curveList)):
if img_object[j,i].all()<(self.curveList[k]+20).all() and img_object[i,j].all()>(self.curveList[k]-20).all():
#replace image list with appropriatre object
if self.miny+j*self.dy>table[k,i-1][1]-(self.maxy-self.miny)/10 and self.miny+j*self.dy<table[k,i-1][1]+(self.maxy-self.miny)/10 :
self.istable=True
if self.isLog==False:
table[k,i]=[i*self.dx+self.minx,self.miny+j*self.dy]
else:
table[k,i]=[i*self.dx,math.exp(self.dy)]
return table
#End of Funtion to avoid merge conflict -Adapa
def centroid_histogram(clt):
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1) # grab the number of different clusters and create a histogram
(hist, _) = np.histogram(clt.labels_, bins = numLabels) # based on the number of pixels assigned to each cluster
hist = hist.astype("float") # normalize the histogram, such that it sums to one
hist /= hist.sum()
return hist # return the histogram
#=======
# is it a log graph
#>>>>>>> 1919b2d4fde8870d45695236eb2367418646a5ba
def plot_colors(hist, n_clusters, centroids):
bar = np.zeros((50, 300, 3), dtype = "uint8") # initialize the bar chart representing the relative frequency
startX = 0 # of each of the colors
i = 0 # loop over the percentage of each cluster and the color of
mini = 300 # each cluster
colval = 0
for (percent, color) in zip(hist, centroids):
endX = startX + (percent * 300) # plot the relative percentage of each cluster
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
color.astype("uint8").tolist(), -1)
flag=0
"""
if color[0]>240 and color[1]>240 and color[2]>240:
i=i+1
startX = endX
continue
"""
temp=endX-startX
if temp<mini and ((color[0]-color[1])>3 or (color[1]-color[0])>3 or (color[2]-color[1])>3 or (color[1]-color[2])>3 or (color[2]-color[0])>3 or (color[0]-color[2])>3):
mini = temp
flag=1
colval = i
i=i+1
startX = endX
"""
if flag==1 and color[0]>240 and color[1]>240 and color[2]>240:
i=i+1
startX = endX
continue
"""
#color_req = color
#startX = endX
# return the bar chart
print colval
color_req = centroids[colval]
return bar,color_req
def rgb2hsv(r, g, b):
r, g, b = r/255.0, g/255.0, b/255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx-mn
if mx == mn:
h = 0
elif mx == r:
h = (60 * ((g-b)/df) + 360) % 360
elif mx == g:
h = (60 * ((b-r)/df) + 120) % 360
elif mx == b:
h = (60 * ((r-g)/df) + 240) % 360
if mx == 0:
s = 0
else:
s = df/mx
v = mx
return h, s, v
def hsv2rgb(h, s, v):
h = float(h)
s = float(s)
v = float(v)
h60 = h / 60.0
h60f = math.floor(h60)
hi = int(h60f) % 6
f = h60 - h60f
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = 0, 0, 0
if hi == 0: r, g, b = v, t, p
elif hi == 1: r, g, b = q, v, p
elif hi == 2: r, g, b = p, v, t
elif hi == 3: r, g, b = p, q, v
elif hi == 4: r, g, b = t, p, v
elif hi == 5: r, g, b = v, p, q
r, g, b = int(r * 255), int(g * 255), int(b * 255)
return r, g, b
'''g1=graph('doc',1,292,57,232,39,'images/d.jpg')
g1=graph('doc',1,1309,409,850,290,'images/a.jpg')
g1.findLabel()
g1.findLabelText()
g1.findMarkings()
g1.findGradient()
print g1.xlabel
print g1.ylabel
print g1.dx
print g1.dy
print g1.minx, g1.maxx
print g1.miny, g1.maxy
print g1.isLog'''
#289,407,855,1
| true |
5e5ac1d9af64a08f513d8eccf281aac64af9c5cc
|
Python
|
bhaskarbagchi/HackerNewsRanking
|
/Ycombinator_scrape/ycombinator_scrape.py
|
UTF-8
| 715 | 2.765625 | 3 |
[] |
no_license
|
from bs4 import BeautifulSoup
import requests
for i in xrange(28):
name = 'https://news.ycombinator.com/news?p='
page = requests.get(name +`(i+1)`)
soup = BeautifulSoup(page.text)
headings = soup.find_all('tr', { 'class':'athing' })
comments = soup.find_all('td', { 'class':'subtext'})
for i in xrange(len(headings)):
if headings[i].span:
print headings[i].span.string.strip()
xx = headings[i].find_all('a')
for x in xx:
if x.parent.name == 'td':
data = x.string
udata=data.encode("utf-8").strip()
asciidata=udata.decode("ascii","ignore")
print asciidata
if comments[i].span:
print comments[i].span.string
xx = comments[i].find_all('a')
for x in xx:
print x.string
| true |
240df440a7f0da9b43436d8016ffb9cd709dc3a8
|
Python
|
bematthe/IST736-Text-Mining
|
/Becky_MatthewsPeaseHW07.py
|
UTF-8
| 18,757 | 2.578125 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 21:13:20 2021
@author: Becky Matthews-Pease
"""
############################################################
#Packages
import nltk
import sklearn
import re
import os
import string
import pandas as pd
import numpy as np
import random as rd
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from nltk.corpus import stopwords
RawfileName = "C:/Users/becky/Desktop/DartData.csv"
AllReviewsList=[]
AllLabelsList=[]
#For loop to split reviews and save to lists
with open(RawfileName,'r') as FILE:
FILE.readline()
for row in FILE:
print(row)
NextLabel,NextReview=row.split(",", 1)
AllReviewsList.append(NextReview)
AllLabelsList.append(NextLabel)
print(AllReviewsList)
print(AllLabelsList)
#InsertVectorizer
MyCV1 = CountVectorizer(input = "content",
stop_words = "english")
MyFile = "C:/Users/becky/Desktop/DartData.csv"
#Stemmer
STEMMER = PorterStemmer()
# Use NLTK's PorterStemmer in a function
def MY_STEMMER(str_input):
words = re.sub(r"[^A-Za-z\-]", " ", str_input).lower().split()
words = [STEMMER.stem(word) for word in words]
return words
#Create different vectorizers:
MyVect_STEM = CountVectorizer(input = 'content',
analyzer = 'word',
stop_words = 'english',
tokenizer = MY_STEMMER,
lowercase = True
)
MyVect_IFIDF_STEM = TfidfVectorizer(input = 'content',
analyzer = 'word',
stop_words = 'english',
tokenizer = MY_STEMMER,
lowercase = True
)
MyVect_STEM_Bern=CountVectorizer(input='content',
analyzer = 'word',
stop_words='english',
tokenizer=MY_STEMMER,
lowercase = True,
binary=True
)
FinalDF_STEM = pd.DataFrame()
FinalDF_TFIDF_STEM = pd.DataFrame()
FinalDF_STEM_Bern = pd.DataFrame()
X1 = MyVect_STEM.fit_transform(AllReviewsList)
X2 = MyVect_IFIDF_STEM.fit_transform(AllReviewsList)
XB = MyVect_STEM_Bern.fit_transform(AllReviewsList)
#Lisa of Tokens
ColumnNames1 = MyVect_STEM.get_feature_names()
def print_full(ColumnNames1):
pd.set_option('display.max_rows', len(ColumnNames1))
print(ColumnNames1)
pd.reset_option('display.max_rows')
print_full(ColumnNames1)
ColumnNames2 = MyVect_IFIDF_STEM.get_feature_names()
def print_full(ColumnNames2):
pd.set_option('display.max_rows', len(ColumnNames2))
print(ColumnNames2)
pd.reset_option('display.max_rows')
print_full(ColumnNames2)
ColumnNamesB = MyVect_STEM_Bern.get_feature_names()
def print_full(ColumnNamesB):
pd.set_option('display.max_rows', len(ColumnNamesB))
print(ColumnNamesB)
pd.reset_option('display.max_rows')
print_full(ColumnNamesB)
NumFeatures4 = len(ColumnNamesB)
#Place in data table
builderS = pd.DataFrame(X1.toarray(),columns = ColumnNames1)
builderTS = pd.DataFrame(X2.toarray(),columns = ColumnNames2)
builderB = pd.DataFrame(XB.toarray(),columns = ColumnNamesB)
#Add column
builderS["Label"] = AllLabelsList
builderTS["Label"] = AllLabelsList
builderB["Label"] = AllLabelsList
#Convert to Data frame
FinalDF_STEM = FinalDF_STEM.append(builderS)
FinalDF_TFIDF_STEM = FinalDF_TFIDF_STEM.append(builderTS)
FinalDF_STEM_Bern = FinalDF_STEM_Bern.append(builderB)
## Replace NA with 0
FinalDF_STEM = FinalDF_STEM.fillna(0)
FinalDF_TFIDF_STEM = FinalDF_TFIDF_STEM.fillna(0)
FinalDF_STEM_Bern = FinalDF_STEM_Bern.fillna(0)
#Remove columns with numbers
MyList=[]
for col in FinalDF_TFIDF_STEM.columns:
LogR=col.isdigit()
if(LogR==True):
MyList.append(str(col))
print(MyList)
FinalDF_TFIDF_STEM.drop(MyList, axis = 1, inplace = True)
def RemoveNums(SomeDF):
temp = SomeDF
MyList = []
for col in temp.columns:
Logical2=str.isalpha(col)
if(Logical2==False):
MyList.append(str(col))
temp.drop(MyList, axis = 1, inplace = True)
return temp
FinalDF_STEM = RemoveNums(FinalDF_STEM)
FinalDF_STEM_Bern = RemoveNums(FinalDF_STEM_Bern)
FinalDF_TFIDF_STEM = RemoveNums(FinalDF_TFIDF_STEM)
print(FinalDF_STEM)
print(FinalDF_TFIDF_STEM)
print(FinalDF_STEM_Bern)
########################################################################
## Create the testing set with a random sample.
rd.seed(1234)
sklearn.model_selection.StratifiedKFold(n_splits = 10, shuffle = False, random_state = None)
TrainDF1, TestDF1 = train_test_split(FinalDF_STEM, test_size = 0.3, random_state = 10)
TrainDF2, TestDF2 = train_test_split(FinalDF_TFIDF_STEM, test_size = 0.3, random_state = 10)
TrainDF3, TestDF3 = train_test_split(FinalDF_STEM_Bern, test_size = 0.3)
print(FinalDF_STEM)
print(TrainDF1)
print(TrainDF2)
print(TrainDF3)
print(TestDF1)
print(TestDF2)
print(TestDF3)
## Separate and save labels; Remove labels from test set
Test1Labels = TestDF1["Label"]
Test2Labels = TestDF2["Label"]
Test3Labels = TestDF3["Label"]
## remove labels
TestDF1 = TestDF1.drop(["Label"], axis = 1)
TestDF2 = TestDF2.drop(["Label"], axis = 1)
TestDF3 = TestDF3.drop(["Label"], axis = 1)
#Remove from Training set
Train1Labels = TrainDF1["Label"]
Train2Labels = TrainDF2["Label"]
Train3Labels = TrainDF3["Label"]
## remove labels
TrainDF1 = TrainDF1.drop(["Label"], axis = 1)
TrainDF2 = TrainDF2.drop(["Label"], axis = 1)
TrainDF3 = TrainDF3.drop(["Label"], axis = 1)
########################################################################
### Create the NB model ###
MyModelNB1 = MultinomialNB()
MyModelNB2 = MultinomialNB()
MyModelNB3 = MultinomialNB()
MyModelNB1.fit(TrainDF1, Train1Labels)
MyModelNB2.fit(TrainDF2, Train2Labels)
MyModelNB3.fit(TrainDF3, Train3Labels)
Prediction1 = MyModelNB1.predict(TestDF1)
Prediction2 = MyModelNB2.predict(TestDF2)
Prediction3 = MyModelNB3.predict(TestDF3)
### Predition vs labeled data ###
print("\nThe prediction from NB is:")
print(Prediction1)
print("\nThe actual labels are:")
print(Test1Labels)
def print_full(Test1Labels):
pd.set_option('display.max_rows', len(Test1Labels))
print(Test1Labels)
pd.reset_option('display.max_rows')
print_full(Test1Labels)
print("\nThe prediction from NB is:")
print(Prediction2)
print("\nThe actual labels are:")
print(Test2Labels)
def print_full(Test2Labels):
pd.set_option('display.max_rows', len(Test2Labels))
print(Test2Labels)
pd.reset_option('display.max_rows')
print_full(Test2Labels)
print("\nThe prediction is:")
print(Prediction3)
print("\nThe actual labels are:")
print(Test3Labels)
def print_full(Test3Labels):
pd.set_option('display.max_rows', len(Test3Labels))
print(Test3Labels)
pd.reset_option('display.max_rows')
print_full(Test3Labels)
########################################################################
### Confusion Matrix ###
#CountVectorizer
cnf_matrix1 = confusion_matrix(Test1Labels, Prediction1)
print("\nThe confusion matrix is:")
print(cnf_matrix1)
#IFIDF
cnf_matrix2 = confusion_matrix(Test2Labels, Prediction2)
print("\nThe confusion matrix is:")
print(cnf_matrix2)
#Bernoulli
cnf_matrix3 = confusion_matrix(Test3Labels, Prediction3)
print("\nThe confusion matrix is:")
print(cnf_matrix3)
########################################################################
### Prediction Probabilities ###
#CountVectorizer
print(np.round(MyModelNB1.predict_proba(TestDF1),2))
#IFIDF
print(np.round(MyModelNB2.predict_proba(TestDF2),2))
#Bernoulli
print(np.round(MyModelNB3.predict_proba(TestDF3),2))
########################################################################
### SVM with Linear Kernel ###
#CountVectorizer
SVM_Model = LinearSVC(C = 10)
SVM_Model.fit(TrainDF1, Train1Labels)
print("SVM prediction:\n", SVM_Model.predict(TestDF1))
print("Actual:")
print(Test1Labels)
def print_full(Test1Labels):
pd.set_option('display.max_rows', len(Test1Labels))
print(Test1Labels)
pd.reset_option('display.max_rows')
print_full(Test1Labels)
SVM_matrix = confusion_matrix(Test1Labels, SVM_Model.predict(TestDF1))
print("\nThe confusion matrix is:")
print(SVM_matrix)
print("\n\n")
#IFIDF
SVM_Model.fit(TrainDF2, Train2Labels)
print("SVM prediction:\n", SVM_Model.predict(TestDF2))
print("Actual:")
print(Test2Labels)
def print_full(Test2Labels):
pd.set_option('display.max_rows', len(Test2Labels))
print(Test2Labels)
pd.reset_option('display.max_rows')
print_full(Test2Labels)
SVM_matrix = confusion_matrix(Test2Labels, SVM_Model.predict(TestDF2))
print("\nThe confusion matrix is:")
print(SVM_matrix)
print("\n\n")
#Bernoulli
SVM_Model.fit(TrainDF3, Train3Labels)
print("SVM prediction:\n", SVM_Model.predict(TestDF3))
print("Actual:")
print(Test3Labels)
def print_full(Test3Labels):
pd.set_option('display.max_rows', len(Test3Labels))
print(Test3Labels)
pd.reset_option('display.max_rows')
print_full(Test3Labels)
SVM_matrix = confusion_matrix(Test3Labels, SVM_Model.predict(TestDF3))
print("\nThe confusion matrix is:")
print(SVM_matrix)
print("\n\n")
########################################################################
### SVM with Poly Kernel ###
#CountVectorizer
SVM_ModelPoly = sklearn.svm.SVC(C = 1000, kernel = 'poly', degree = 2,
gamma = "auto", verbose = True)
print(SVM_ModelPoly)
SVM_ModelPoly.fit(TrainDF1, Train1Labels)
print("SVM prediction:\n", SVM_ModelPoly.predict(TestDF1))
print("Actual:")
print(Test1Labels)
def print_full(Test1Labels):
pd.set_option('display.max_rows', len(Test1Labels))
print(Test1Labels)
pd.reset_option('display.max_rows')
print_full(Test1Labels)
SVM_matrixPoly = confusion_matrix(Test1Labels, SVM_ModelPoly.predict(TestDF1))
print("\nThe confusion matrix is:")
print(SVM_matrixPoly)
print("\n\n")
#IFIDF
SVM_ModelPoly = sklearn.svm.SVC(C = 1000, kernel = 'poly', degree = 2,
gamma = "auto", verbose = True)
print(SVM_ModelPoly)
SVM_ModelPoly.fit(TrainDF2, Train2Labels)
print("SVM prediction:\n", SVM_ModelPoly.predict(TestDF2))
print("Actual:")
print(Test2Labels)
def print_full(Test2Labels):
pd.set_option('display.max_rows', len(Test2Labels))
print(Test2Labels)
pd.reset_option('display.max_rows')
print_full(Test2Labels)
SVM_matrixPoly = confusion_matrix(Test2Labels, SVM_ModelPoly.predict(TestDF2))
print("\nThe confusion matrix is:")
print(SVM_matrixPoly)
print("\n\n")
#Bernoulli
SVM_ModelPoly = sklearn.svm.SVC(C = 1000, kernel = 'poly', degree = 2,
gamma = "auto", verbose = True)
print(SVM_ModelPoly)
SVM_ModelPoly.fit(TrainDF3, Train3Labels)
print("SVM prediction:\n", SVM_ModelPoly.predict(TestDF3))
print("Actual:")
print(Test3Labels)
def print_full(Test3Labels):
pd.set_option('display.max_rows', len(Test3Labels))
print(Test3Labels)
pd.reset_option('display.max_rows')
print_full(Test3Labels)
SVM_matrixPoly = confusion_matrix(Test2Labels, SVM_ModelPoly.predict(TestDF2))
print("\nThe confusion matrix is:")
print(SVM_matrixPoly)
print("\n\n")
########################################################################
### SVM with Radial Kernel ###
#CountVectorizer
SVM_ModelRBF = sklearn.svm.SVC(C = 1000, kernel = 'rbf',
verbose = True, gamma = "auto")
SVM_ModelRBF.fit(TrainDF1, Train1Labels)
print("SVM prediction:\n", SVM_ModelRBF.predict(TestDF1))
print("Actual:")
print(Test1Labels)
def print_full(Test1Labels):
pd.set_option('display.max_rows', len(Test1Labels))
print(Test1Labels)
pd.reset_option('display.max_rows')
print_full(Test1Labels)
SVM_matrixRBF = confusion_matrix(Test1Labels, SVM_ModelRBF.predict(TestDF1))
print("\nThe confusion matrix is:")
print(SVM_matrixRBF)
print("\n\n")
#IFIDF
SVM_ModelRBF = sklearn.svm.SVC(C = 1000, kernel = 'rbf',
verbose = True, gamma = "auto")
SVM_ModelRBF.fit(TrainDF2, Train2Labels)
print("SVM prediction:\n", SVM_ModelRBF.predict(TestDF2))
print("Actual:")
print(Test2Labels)
def print_full(Test2Labels):
pd.set_option('display.max_rows', len(Test2Labels))
print(Test2Labels)
pd.reset_option('display.max_rows')
print_full(Test2Labels)
SVM_matrixRBF = confusion_matrix(Test2Labels, SVM_ModelRBF.predict(TestDF2))
print("\nThe confusion matrix is:")
print(SVM_matrixRBF)
print("\n\n")
#Bernoulli
SVM_ModelRBF = sklearn.svm.SVC(C = 1000, kernel = 'rbf',
verbose = True, gamma = "auto")
SVM_ModelRBF.fit(TrainDF3, Train3Labels)
print("SVM prediction:\n", SVM_ModelRBF.predict(TestDF3))
print("Actual:")
print(Test3Labels)
def print_full(Test3Labels):
pd.set_option('display.max_rows', len(Test3Labels))
print(Test3Labels)
pd.reset_option('display.max_rows')
print_full(Test3Labels)
SVM_matrixRBF = confusion_matrix(Test3Labels, SVM_ModelRBF.predict(TestDF3))
print("\nThe confusion matrix is:")
print(SVM_matrixRBF)
print("\n\n")
########################################################################
###Frequency Distribution#######
def plot_coefficients(MODEL = SVM_Model, COLNAMES = TrainDF1.columns, top_features = 10):
coef = MODEL.coef_.ravel()
top_positive_coefficients = np.argsort(coef,axis = 0)[-top_features:]
top_negative_coefficients = np.argsort(coef,axis = 0)[:top_features]
top_coefficients = np.hstack([top_negative_coefficients, top_positive_coefficients])
plt.figure(figsize=(15, 5))
colors = ["red" if c < 0 else "blue" for c in coef[top_coefficients]]
plt.bar( x = np.arange(2 * top_features) , height=coef[top_coefficients], width = .5, color = colors)
feature_names = np.array(COLNAMES)
plt.xticks(np.arange(0, (2*top_features)), feature_names[top_coefficients], rotation = 60, ha = "right")
plt.show()
plot_coefficients()
########################################################################
###Create Word Cloud#######
text = ("sinking-ship",
"selfish",
"high-turnover",
"no-growth",
"overtime",
"caution",
"misery",
"keep-looking",
"unrealistic-expectations",
"downhill",
"cult",
"excellent",
"playground",
"racists",
"good",
"poor",
"okay",
"unorganized",
"easy",
"liked",
"run",
"unfair-treatment",
"plastics-are-evil",
"mediocrity",
"too-many-hours",
"hard-work",
"amazing-exposure",
"bad-treatment",
"good",
"potential",
"employees-under-valued",
"mediocre",
"high-stress",
"constant-rifs",
"tired",
"careful-what-you-say",
"lazy",
"solid-employment",
"company-went-south",
"needs-improvement",
"sham",
"would-not-recommend",
"look-elsewhere",
"watch-your-back",
"quite-miserable",
"elitist",
"egotistical",
"judgmental",
"racist",
"going down-hill",
"enjoyed",
"great-experience",
"unsafe",
"unclean",
"seems-great-but-improvement-needed",
"no-help",
"bobs-greed",
"environment-lacking-understanding",
"good-experience",
"sad-company",
"great-job-if-you-don't-want-to-contribute",
"company-going downhill",
"friendly-co-workers",
"friendly-co-workers",
"friendly-co-workers",
"friendly-co-workers",
"friendly-co-workers",
"friendly-co-workers",
"friendly-co-workers",
"great-culture",
"great-culture",
"great-culture",
"strong-values",
"no-core-values",
"toxic-corporate-culture",
"toxic-corporate-culture",
"decent-pay",
"low-pay",
"decent-pay",
"low-pay", "low-pay","low-pay","low-pay","low-pay",
"low-pay,"
"low-pay",
"low-pay",
"terrible-management",
"terrible-management",
"terrible-management",
"terrible-management",
"terrible-management",
"terrible-management",
"terrible-management",
"terrible-management",
"management-not-interested-in-growing",
"cold-hearted-management",
"terrible-management",
"terrible-management",
"leadership-lacking",
"leadership-lacking",
"fair-management",
"great-employer",
"great-experience",
"toxic-environment",
"worst-environment",
"outdated",
"decent-stepping-stone",
"outdated",
"outdated",
"outdated",
"okay-entry-level",
"okay-entry-level",
"advancement",
"outdated",
"outdated",
"just-a-job",
"opportunity",
"decent-for-initial-experience",
"outdated",
"outdated",
"outdated",
"outdated",
"good-steppingstone",
"not-a-place-to-stay-long-term",
"good-place-to-start",
"good-work-life-balance",
"good-work-life-balance",
"worst-training ",
"worst-training",
"worst-onboarding-experience",
"great-benefits",
"good-benefits",
"great-benefits",
"great-benefits",
"great-benefits,"
"great-benefits",
"great-benefits",
"cheap-health-insurance",
"bad-paid-time-off",
"worst-company",
"solid-company",
"good-company",
"worst-company",
"small-company",
"great-company",
"solid-company",
"steady-job",
"challenging-job ",
"decent-job",
"steady-job",
"best-job",
"demanding-job",
"great-place",
"nice-place",
"nice-place",
"great-place",
"good-enough-place")
type(text)
def listToString(text):
str1 = ""
for ele in text:
str1 += ele
return str1
print(listToString(text))
mytext = (listToString(text))
# Import packages
import matplotlib.pyplot as plt
def plot_cloud(wordcloud):
# Set figure size
plt.figure(figsize=(40, 30))
# Display image
plt.imshow(wordcloud)
# No axis details
plt.axis("off");
from wordcloud import WordCloud, STOPWORDS
wordcloud = WordCloud(width = 3000, height = 2000, random_state = 1,
background_color = 'deepskyblue', colormap = 'Pastel1', collocations = False).generate(mytext)
plot_cloud(wordcloud)
| true |
588d194d1c1ec4ed8cf36f93f0d3fa20d5f83530
|
Python
|
ggqshr/fill_mongo
|
/fill_mongo.py
|
UTF-8
| 4,747 | 2.703125 | 3 |
[] |
no_license
|
import yaml
import pymongo
import random
from os.path import exists
import pickle
import logging
import tqdm
import base64
logging.basicConfig(level=logging.INFO)
CONFIG_FILE = "config.yaml"
BOUND = range(3000, 6000) # 随机对数据进行减少,防止数据都一样
random_list = []
for i in range(65,91):
random_list.append(chr(i))
for i in range(0,10):
random_list.append(i)
class ConfigObj:
obj = None
def __init__(self):
with open(CONFIG_FILE, "r", encoding="utf-8") as f:
self.obj = yaml.load(f.read(), Loader=yaml.FullLoader)
def get_db(self):
return self.obj['db']
def get_keys(self):
return self.obj['from'], self.obj['to']
def get_field_name(self):
return self.obj['field_name'] if self.obj['field_name'] is not None else "post_time"
def get(self, k: str):
return self.obj[k]
obj = ConfigObj()
def read_from_data(key):
"""
根据传入的Key拿取数据,如果本地有,就从本地读取,如果没有就从mongo中读取,然后返回
:param key: 拿去数据的key
:return: 根据key拿到的数据
"""
db_config = obj.get_db()
flag = str(base64.b64encode(
f"{db_config['host']}:{db_config['port']}{db_config['name']}{db_config['collectionname']}".encode("utf-8")),
"utf-8")
key_file_name = f".{flag}-{key}"
# 如果本地有缓存的数据就从缓存中拿去数据
if exists(key_file_name):
with open(key_file_name, "rb") as f:
return pickle.load(f)
# 如果本地没有数据,就需要从mongo中读取,然后存储到本地
mongo_data = [dd.copy() for dd in get_data_from_mongo(db_config, key)]
with open(key_file_name, "wb") as f:
pickle.dump(mongo_data, f)
return mongo_data
def get_data_from_mongo(db, key):
"""
根据Key从mongo中读取数据
:param db: 连接的mongo的信息
:param key: 拿取数据的Key
:return: 拿取的数据
"""
client = pymongo.MongoClient(db['host'], db['port'])
client.admin.authenticate("jason#619", "jason#619")
collect = client.get_database(db['name']).get_collection(db['collectionname'])
data = collect.find(
filter={"post_time": key},
projection={'_id': False},
# skip=random.randint(*BOUND),
batch_size=500,
)
return data
def update_func(x, key, value):
x.update({key: value})
this_id = x.get("id",None)
if this_id is None:
return x
if type(this_id) is str:
random_index = random.choice(list(range(len(this_id))))
this_id = this_id[:random_index] + str(random.choice(random_list)) + this_id[random_index:]
x['id'] = this_id
else:
this_id = str(this_id)
random_index = random.choice(list(range(len(this_id))))
this_id = this_id[:random_index] + str(random.choice(list(range(0,10)))) + this_id[random_index:]
x['id'] = int(this_id)
return x
def write_to_mongo(db, key, data):
"""
根据db的配置以及key写入数据
:param db: 数据库配置
:param key: 要写入的key
:param data: 要写入的数据
:return:
"""
client = pymongo.MongoClient(db['host'], db['port'])
client.admin.authenticate("jason#619", "jason#619")
collect = client.get_database(db['name']).get_collection(db['collectionname'])
write_data = map(lambda x: update_func(x, obj.get_field_name(), key), data)
collect.insert_many(write_data)
def write_to_aim():
logging.info("加载配置文件")
from_keys, to_keys = obj.get_keys()
logging.info(f"从{from_keys}填充到{to_keys}")
logging.info("开始写入数据")
for to_key in tqdm.tqdm(to_keys):
from_key = random.choice(from_keys)
logging.info(f"当前选择读取的数据源为{from_key}")
dd = read_from_data(from_key)
logging.info(f"当前数据源的数据长度为{len(dd)}")
write_len = len(dd) - random.choice(BOUND)
while write_len <= 100:
logging.info(f"当前选择的key对应的数据长度过小!当前key为{from_key}长度为{len(dd)},考虑删除掉此key")
from_key = random.choice(from_keys)
logging.info(f"当前选择读取的数据源为{from_key}")
dd = read_from_data(from_key)
logging.info(f"当前数据源的数据长度为{len(dd)}")
write_len = len(dd) - random.choice(BOUND)
final_dd = random.sample(dd, write_len).copy()
logging.info(f"向{to_key}写入{write_len}条数据")
write_to_mongo(obj.get_db(), to_key, final_dd)
logging.info(f"写入{to_key}完成")
del dd
if __name__ == '__main__':
write_to_aim()
| true |
56ada6bf524975107d2c045096262525437cd49d
|
Python
|
tennyson-mccalla/PPaItCS
|
/7.4.py
|
UTF-8
| 395 | 3.796875 | 4 |
[] |
no_license
|
def standing(credits):
if credits >= 26:
stand = "Senior"
elif credits >= 16:
stand = "Junior"
elif credits >= 7:
stand = "Sophomore"
else:
stand = "Freshman"
return stand
def main():
print()
C = int(input("How many credits have you got?: "))
print()
print("That makes a {} at this institution.\n".format(standing(C)))
main()
| true |
fbbb87dc3bf000ea05b729930e6c5d440b7f7df9
|
Python
|
anirudhpillai/algorithms
|
/leetcode/max_increase_to_keep_city_skyline.py
|
UTF-8
| 603 | 2.796875 | 3 |
[] |
no_license
|
class Solution(object):
def maxIncreaseKeepingSkyline(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
lr_skyline = [max(row) for row in grid]
tb_skyline = []
for col_idx in range(len(grid[0])):
tb_skyline.append(max(row[col_idx] for row in grid))
result = 0
for r in range(len(grid)):
for c in range(len(grid[0])):
height = min(lr_skyline[r], tb_skyline[c])
result += height - grid[r][c]
return result
| true |
dd3fc5f23ba2a16893bef3d50c54ae2307e46fe6
|
Python
|
philips-ni/ecfs
|
/leetcode/077_combinations/python/combinations.py
|
UTF-8
| 719 | 3.03125 | 3 |
[] |
no_license
|
class Solution:
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
l=list(range(1,n+1))
allPaths=[]
visited = []
self.dfs(l,k,0,visited,allPaths)
return allPaths
def dfs(self,l,k,index,visited,allPaths):
# print "index: %d , visited: %s, allPaths: %s" % (index, str(visited), str(allPaths))
if len(visited)==k:
allPaths.append(visited)
# print "path founded"
return
while len(l) - index >= k - len(visited):
tmpVisited = visited + [l[index]]
index += 1
self.dfs(l,k,index,tmpVisited,allPaths)
| true |
a1d6e8bc173f3843e466fbe4e680f519ae79fccd
|
Python
|
jmr988/serverTips
|
/tips.py
|
UTF-8
| 441 | 3.484375 | 3 |
[] |
no_license
|
TIP_OUT = .03
totalSales = int(input('Enter total sales:'))
totalBeer = int(input('Enter total beer sales:'))
totalLiquor = int(input('Enter total liquor sales:'))
totalWine = int(input('Enter total wine sales:'))
totalDue = int(input('Enter total due:'))
totalAlchol = (totalBeer + totalLiquor + totalWine)
supportTip = (totalSales * TIP_OUT)
barTip = (totalAlchol * TIP_OUT)
tips = totalDue - (supportTip + barTip)
print (tips)
| true |
18600fe017f7c802b73ff99f516716e92b51a650
|
Python
|
bigfacebig/NGSTools
|
/VCF/vcf.py
|
UTF-8
| 4,431 | 2.75 | 3 |
[] |
no_license
|
import sys
import os
import re
class VCF:
"""some function to deal with VCF file"""
def __init__(self, file):
# super(VCF, self).__init__()
self.file = file
def _readVCF(self):
"""read vcf file"""
if not os.path.exists(self.file):
sys.exit('%s file not exist\n' % self.file)
if self.file.endswith('.gz'):
import gzip
return gzip.open(self.file, 'r').readlines()
else:
return open(self.file, 'r').readlines()
def filter(self,stat=''):
"""filter the VCF"""
vcf = self._readVCF()
filter_vcf = []
for i in vcf:
if i.startswith('#'):
continue
words = i.split('\t')
if stat == 'PASS':
if words[6] != 'PASS' or words[6] != '.':
continue
if words[4].find(','):
words[4] = words[4].split(',')[0]
filter_vcf.append(i)
vcf = []
return filter_vcf
def novo(self, vcf):
"""get the novo muation"""
novo = []
for i in range(len(vcf)):
words = vcf[i].split('\t')
if not re.search('snp\d+=rs',words[7]):
novo.append(vcf[i])
continue
return novo
def get_SNP_INDEL(self, vcf):
"""get the SNP and InDel muation\n """
snp = []
indel = []
for i in range(len(vcf)):
words = vcf[i].split('\t')
if len(words[3]) == 1 and len(words[4].split(',')[0]) == 1:
snp.append(vcf[i])
else:
indel.append(vcf[i])
return snp, indel
def chr_stat(self, vcf):
"""stat the muation in the chromosomo\n
return a dict:
chromosomo = {
'1' : numOfMuationInTheChr1
'2' : numOfMuationInTheChr2
...
}
"""
chromosomo = {}
for line in vcf:
words = line.split('\t')
if words[0] in chromosomo:
chromosomo[str(words[0])] += 1
else:
chromosomo[str(words[0])] = 1
return chromosomo
def stat_indel_length(self, vcf):
"""stat the number per indel length
return a dict;
indel_length = {
1 : number
2 : number
-1 : number
...
}
"""
indel_length = {}
for line in vcf:
words = line.split('\t')
if len(words[3]) == 1 and len(words[4]) == 1:
continue
else:
length = len(words[4]) - len(words[3])
if length in indel_length:
indel_length[length] += 1
else:
indel_length[length] = 1
return indel_length
def stat_func(self, vcf):
"""stat the num of snp or indel muation that functional
return a dict:
func = {
'intergenic' : number
'exonic' : number
...
}
"""
func = {}
function = ''
for line in vcf:
words = line.split('\t')
for i in words[7].split(';'):
if i.startswith('Func=') :#Func=ncRNA_exonic
function = i.split('=')[1]
break
for f in function.split(','):
if f in func:
func[f] += 1
else:
func[f] = 1
return func
def stat_exonicfunc(self, vcf):
"""stat the num of snp/indel muation that exonic functional
return a dict:
exonicfunc = {
'missense_SNV' : number
...
}
"""
exonicfunc = {}
function = ''
flag = 0
for line in vcf:
words = line.split('\t')
for i in words[7].split(';'):
if i.startswith('Func='):
function = i.split('=')[1]
for j in function.split(','):
if j == 'exonic':
flag = 1
break
if flag == 0:
continue
for i in words[7].split(';'):
if i.startswith('ExonicFunc='):
function = i.split('=')[1]
break
for f in function.split(','):
if f in exonicfunc:
exonicfunc[f] += 1
else:
exonicfunc[f] = 1
flag = 0
return exonicfunc
def get_Hom_Het(self, vcf):
"""get the number of Het and Hom muation
return a array[2]
[Hom_num,Het_num]
"""
Het_num = 0
Hom_num = 0
for line in vcf:
hehe = line.split('\t')[-2].split(':')
xixi = line.split('\t')[-1].split(':')
format = dict(zip(hehe,xixi))
if not format.has_key('GT'):
continue
if format['GT'] == '1/1' or format['GT'] == '0/0':
Hom_num += 1
else:
Het_num += 1
return Hom_num, Het_num
def ts_tv(self, vcf):
"""for snp muation
get the ts/tv ratio
return:
ts,tv,ts/tv
"""
ts = 0
tv = 0
ts_flag = ['AG', 'GA', 'CT', 'TC']
for line in vcf:
words = line.split('\t')
if words[3].upper()+words[4].split(',')[0].upper() in ts_flag:
ts += 1
else:
tv += 1
return ts, tv, '%.9f' % (ts/float(tv))
| true |
5c086dcabef87c393760cd0ee6fc3a7b32d130ef
|
Python
|
C109156233/pythonlearn
|
/python-mid/階層判斷.py
|
UTF-8
| 196 | 3.40625 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 15:49:24 2021
@author: linnn
"""
m=int(input("請輸入M?"))
b=1
i=1
while(b<m):
i=i+1
b=b*i
print("超過M為",m,"的最小值為:",i)
| true |
37e5fc6e8ca941d219eabe9af424bf70ecd8c638
|
Python
|
sanchi191/Plagiarism
|
/plag-project/plagproject/users/basics.py
|
UTF-8
| 3,977 | 2.921875 | 3 |
[] |
no_license
|
import numpy as np
import os
import matplotlib.pyplot as plt
def remComm(prgm):
n = len(prgm)
res = ""
s_cmt = False
m_cmt = False
#Traversethe given program
i=0
while(i<n):
# Ifsinglelinecomment
if (s_cmt == True and prgm[i] == '\n'):
s_cmt = False
i+=1
# If multiple line comment is on, then check for end of it
elif (m_cmt == True and prgm[i] == '*' and prgm[i + 1] == '/'):
m_cmt = False
i+=2
# If this character is in a comment, ignore it
elif (s_cmt or m_cmt):
i+=1
continue
# Check for beginning of comments and set the approproate flags
elif (prgm[i] == '/' and prgm[i + 1] == '/'):
s_cmt = True
i+=1
elif (prgm[i] == '/' and prgm[i + 1] == '*'):
m_cmt = True
i+=1
# If current character is a non-comment character, append it to res
else:
res += prgm[i]
i+=1
return res
# f = open("subtree.cpp", 'r')
# prgm = f.read()
# w = remComm(prgm)
# print(w)
# exit()
numerics=['+','-','=','*','/','<','>',':','.','!','"','\'','&','|','^','#',';','{','}','(',')']
keywords = ['auto' ,'break','case', 'char', 'const', 'continue' ,'default', 'do',
'double','else' ,'enum' ,'extern', 'float' ,'for', 'goto', 'if',
'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof', 'static',
'struct', 'switch','typedef', 'union' ,'unsigned' ,'void' ,'volatile' ,'while']
def specialchars(code):
new_str = " "
remove = False
for num in code:
if num in numerics:
if remove==True:
fil = " "
else:
fil = " " + num + " "
new_str = new_str + fil
else:
new_str+=num
return new_str
# f = open("subtree.cpp", 'r')
# prgm = f.read()
# w = remComm(prgm)
# w2 = specialchars(w)
# print(w2)
# exit()
def perform_all_funcs(files):
finalstr = ""
o1 = remComm(files)
new_str= specialchars(o1)
for i in new_str.split():
# print(j)
# for j in i:
if i in keywords or i in numerics:
finalstr += (" " + i + " ")
# pass
else:
finalstr = finalstr + " k "
#print(finalstr,"b")
return finalstr
# f = open("subtree.cpp", 'r')
# prgm = f.read()
# w = perform_all_funcs(prgm)
# print(w)
# exit()
def k_gram(finalstr,k):
#print(finalstr,"a")
k=5
result = set()
finalstr = finalstr.split()
for line in range(len(finalstr)-k+1):
#print(line)
kgram_string = ""
for word in range(k):
#print(word)
kgram_string = kgram_string + finalstr[line+word]
#print(kgram_string)
#break
result.add(kgram_string)
return result
# f = open("subtree.cpp", 'r')
# prgm = f.read()
# w = perform_all_funcs(prgm)
# w= k_gram(w,5)
# print(w)
# exit()
def common (files_in_dir):
files_in_directory = []
l=0
C=0
Cv= []
path = files_in_dir
dir = os.listdir(path)
dir = sorted(dir)
for i in dir:
files_in_directory.append(i)
kgrams = []
for x in range(len(files_in_directory)):
f= open(path+"/"+files_in_directory[x],"r")
kgrams.append(k_gram(perform_all_funcs(f.read()), 5))
# print(files_in_directory)
matrix = np.zeros((len(files_in_directory), len(files_in_directory)))
for z in range(len(files_in_directory)):
for w in range(z + 1, len(files_in_directory)):
C=0
for a in kgrams[z]:
if a in kgrams[w]:
C += 1
#print(C)
l = min(len(kgrams[z]), len(kgrams[w]))
Cv.append(C/l)
matrix[z,w] = C/l
return matrix
# path = '/home/oem/PycharmProjects/pythonProject1/Parser/Assignment_5'
# data = common(path)
# plt.imshow(data)
# plt.colorbar()
# plt.show()
| true |
73ab6f34c063439af6e3c2725973eaf15058583e
|
Python
|
ipod825/HOPE
|
/pysrc/run_batch.py
|
UTF-8
| 2,117 | 2.703125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import timeit
import config
from config import SolverType
from libdai_solver import JunctionTree, MF, TRWBP
from wish_solver import Wish
import argparse
import math
from utils import listEnum
def parse_arg():
parser = argparse.ArgumentParser(description='Run different algorithms for a problem in a directory')
parser.add_argument("indir", help="Direcory where problems are stored.")
parser.add_argument("solver_type", type=int, help="SolverType:"+listEnum(SolverType))
parser.add_argument("-s", "--samplesize", type=int, default=7, help="Saplesize per quantile.")
parser.add_argument("-t", "--timeout", type=int, default=10, help="Timeout for optimization.")
parser.add_argument("-o", "--outdir", default=config.output_dir, help="Directory where output are stored")
return parser.parse_args()
if __name__ == '__main__':
args = parse_arg()
solver_type = args.solver_type
timeout = args.timeout
isLog = True
if solver_type == SolverType.JT:
solver = JunctionTree()
elif solver_type == SolverType.MF:
solver = MF()
elif solver_type == SolverType.TRWBP:
solver = TRWBP()
elif solver_type == SolverType.WISH:
solver = Wish(samplesize=args.samplesize, timeout=args.timeout)
else:
isLog = False
output_path = os.path.join(args.outdir,solver.__class__.__name__)
if solver_type==SolverType.WISH:
output_path += '_'+str(timeout)
# empty the file ifit exists
open(output_path, 'w').close()
for path in os.listdir(args.indir):
if not path.endswith('.uai'):
continue
path = os.path.join(args.indir, path)
start = timeit.default_timer()
res = solver.solve(path)
stop = timeit.default_timer()
if not isLog:
res = math.log(res)
output = open(output_path, 'a')
output.write('{},{},{}\n'.format(os.path.basename(path), res, stop - start))
print '{},{},{}\n'.format(os.path.basename(path), res, stop - start)
output.close()
| true |
0f77bb38efc091fe61d785f6bda5bcbc33027eb1
|
Python
|
eraguzin-bnl/nEXO_SiPM_Simulator
|
/bnl_asic_sim.py
|
UTF-8
| 10,264 | 2.9375 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 18 15:51:50 2019
@author: Eraguzin
This is the top level module for the entire digital simulation. It sets up the GUI and calls the simulation
"""
import Tkinter as tk
import imageio
from simulation import Simulation_Functions
import os, re
class GUI_WINDOW(tk.Frame):
def __init__(self, master=None):
self.params = dict(
#Rectangles are top left x and y, and then bottom right x and y
FPGA_width = 1000,
FPGA_height = 100
)
self.params['canvas_x'] = 1000
self.params['canvas_y'] = 900
self.params['num_asics'] = 4
self.params['common_signals'] = 1
self.params['serial_signals'] = 1
self.params['daisy_signals'] = 1
self.params['buffer_x'] = 500
self.params['buffer_y'] = 50
self.params['block_size'] = 20
self.params['FPGA_dims'] = [0, 0, self.params['FPGA_width'], self.params['FPGA_height']]
#Create the simulator
self.sim = Simulation_Functions()
#Give the simulator the gui objects so it can manipulate them (change colors)
self.sim.gui = self.set_up_gui(master)
self.sim.master = master
self.sim.params = self.params
def set_up_gui(self, master):
#It's calling the constructor of the parent tkinter object, the pack method of the class, which is now a tkinter object
frame = tk.Frame.__init__(self,master)
self.pack()
#Finish/reset button
finish_button = tk.Button(self, text="Start Simulation",command=self.simulation,width=25)
finish_button.grid(row=0,column=0,columnspan=1)
hbar=tk.Scrollbar(frame,orient=tk.HORIZONTAL)
vbar=tk.Scrollbar(frame,orient=tk.VERTICAL)
w = tk.Canvas(width=self.params['canvas_x'], height=self.params['canvas_y'], bg = "white", scrollregion=(0,0,2000,2000),
yscrollcommand=vbar.set, xscrollcommand=hbar.set)
hbar.pack(side=tk.BOTTOM,fill=tk.X)
hbar.config(command=w.xview)
vbar.pack(side=tk.RIGHT,fill=tk.Y)
vbar.config(command=w.yview)
w.pack()
w.create_rectangle(self.params['FPGA_dims'], fill="yellow")
#X and then Y coordinates
w.create_text((self.params['FPGA_dims'][2] - self.params['FPGA_dims'][0])/2,
(self.params['FPGA_dims'][3] - self.params['FPGA_dims'][1])/2, text="FPGA")
size = self.determine_asic_size()
self.params['size'] = size
signals_to_pass = self.params['common_signals'] + self.params['daisy_signals']
w.create_line(700,150,700,850,width = 2)
w.create_line(800,150,800,850,width = 2)
w.create_line(900,150,900,850,width = 2)
w.create_text(850,130, text="How far light is from creating another peak in the ASIC", anchor=tk.CENTER)
w.create_text(700,870, text="0 ns", anchor=tk.CENTER)
w.create_text(800,870, text="500 ns", anchor=tk.CENTER)
w.create_text(900,870, text="1000 ns", anchor=tk.CENTER)
w.create_line(950,200,1000,200,width = 2, arrow=tk.FIRST, tag = "ASIC0_line", fill = "red")
w.create_line(950,400,1000,400,width = 2, arrow=tk.FIRST, tag = "ASIC1_line", fill = "red")
w.create_line(950,600,1000,600,width = 2, arrow=tk.FIRST, tag = "ASIC2_line", fill = "red")
w.create_line(950,800,1000,800,width = 2, arrow=tk.FIRST, tag = "ASIC3_line", fill = "red")
for count, i in enumerate(range(self.params['num_asics'])):
#Create ASICs
w.create_rectangle((self.params['serial_signals'] * self.params['per_signal']),
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + (i * size[1]),
(self.params['serial_signals'] * self.params['per_signal']) + size[0],
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + ((i + 1) * size[1]),
fill="light blue", tags=("asic{}".format(i)))
w.create_text((self.params['serial_signals'] * self.params['per_signal']),
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + (i * size[1]), text="ASIC {}".format(i), anchor=tk.SW)
#Create common signals
for j in range(signals_to_pass):
w.create_line((self.params['serial_signals'] * self.params['per_signal']) + ((j + 0.5) * self.params['per_signal']),
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + (i * size[1]),
(self.params['serial_signals'] * self.params['per_signal']) + ((j + 0.5) * self.params['per_signal']),
self.params['FPGA_height'] + ((i) * self.params['buffer_y']) + (i * size[1]),
tags=("chip{}".format(i), "common{}".format(j)), width = 5)
#Create horizontal line for serial signals
y_spacing = size[1]/self.params['serial_signals']
for j in range(self.params['serial_signals']):
w.create_line((self.params['serial_signals'] * self.params['per_signal']),
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + (i * size[1]) + ((j + 0.5) * y_spacing),
(self.params['serial_signals'] * self.params['per_signal']) - ((j+0.5) * self.params['per_signal']),
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + (i * size[1]) + ((j + 0.5) * y_spacing),
tags=("chip{}".format(i), "serial{}".format(j)), width = 5)
if (count > (self.params['num_asics'] - 2)):
#Create vertical line for serial signals
for j in range(self.params['serial_signals']):
w.create_line((self.params['serial_signals'] * self.params['per_signal']) - ((j+0.5) * self.params['per_signal']),
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + (i * size[1]) + ((j + 0.5) * y_spacing),
(self.params['serial_signals'] * self.params['per_signal']) - ((j+0.5) * self.params['per_signal']),
self.params['FPGA_height'],
tags=("serial{}".format(j)), width = 5)
#Create daisy chained signals
y_spacing = size[1]/self.params['daisy_signals']
for j in range(self.params['daisy_signals']):
#horizontal line
w.create_line((self.params['serial_signals'] * self.params['per_signal']) + size[0],
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + (i * size[1]) + ((j + 0.5) * y_spacing),
(self.params['serial_signals'] * self.params['per_signal']) + size[0] + ((j+0.5) * self.params['per_signal']),
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + (i * size[1]) + ((j + 0.5) * y_spacing),
tags=("chip{}".format(i), "daisy{}".format(j)), width = 5)
#vertical line
w.create_line((self.params['serial_signals'] * self.params['per_signal']) + size[0] + ((j+0.5) * self.params['per_signal']),
self.params['FPGA_height'] + ((i+1) * self.params['buffer_y']) + (i * size[1]) + ((j + 0.5) * y_spacing),
(self.params['serial_signals'] * self.params['per_signal']) + size[0] + ((j+0.5) * self.params['per_signal']),
self.params['FPGA_height'],
tags=("chip{}".format(i), "daisy{}".format(j)), width = 5)
return w
def example_of_changing_color(self,w):
print(w.find_withtag("serial1"))
lines = (w.find_withtag("serial1"))
for i in lines:
w.itemconfig(i, fill="blue")
def determine_asic_size(self):
available_x = self.params['canvas_x']
total_signal_lines = self.params['common_signals'] + self.params['serial_signals'] + (2 * self.params['daisy_signals'])
self.params['per_signal'] = (available_x - self.params['buffer_x']) / (total_signal_lines - 1)
available_y = self.params['canvas_y'] - self.params['FPGA_height'] - self.params['buffer_y']
y_per_asic = available_y / self.params['num_asics']
y_for_block = y_per_asic - self.params['buffer_y']
x_for_block = self.params['per_signal'] * (self.params['common_signals'] + self.params['daisy_signals'])
return x_for_block, y_for_block
def simulation(self):
self.sim.simulation()
images = []
directory = os.path.join(os.getcwd(), "Simulation_Output")
filenames = os.listdir(directory)
ordered_files = sorted(filenames, key=lambda x: (int(re.sub('\D','',x)),x))
print(directory)
for filename in ordered_files:
filename_to_use = os.path.join(directory, filename )
print(filename_to_use)
images.append(imageio.imread(filename_to_use))
for speed in [0.05, 0.1, 0.5]:
imageio.mimsave('Simulation{}.gif'.format(speed), images, 'GIF', duration = speed, loop = 0)
print("DONE")
def main():
root = tk.Tk()
root.geometry("1500x1000") #You want the size of the app to be 500x500
# root.resizable(0, 0) #Don't allow resizing in the x or y direction
root.title("Quad FE ASIC Test GUI")
GUI_WINDOW(root)
root.mainloop()
if __name__ == "__main__":
main()
| true |
553fe21397313d555e0956b690a8e6c90d15f1c0
|
Python
|
ArielHL/DataScienceRepo
|
/PythonStatistics/moby_dick_file.py
|
UTF-8
| 2,098 | 3.5625 | 4 |
[] |
no_license
|
import re
def bag_of_words(filename="./data/moby_dick.txt"):
with open(filename, "r", encoding="utf8") as f:
bag = {}
for line in f:
words = re.findall("\w*", line)
for w in words:
if len(w) <= 0:
continue
if w in bag:
bag[w] += 1
else:
bag[w] = 1
return bag
def bag_of_bigramms(filename="./data/moby_dick.txt"):
with open(filename, "r", encoding="utf8") as f:
bag = {}
prev_word = None
for line in f:
words = re.findall("\w*", line)
for w in words:
if len(w) <= 0:
continue
if prev_word is None or len(prev_word) <= 0:
prev_word = w
continue
bigramm = "{} {}".format(prev_word, w)
prev_word = w
if bigramm in bag:
bag[bigramm] += 1
else:
bag[bigramm] = 1
return bag
def most_frequent_words(bag):
values = list(bag.values())
top_index = values.index(max(values))
return list(bag.keys())[top_index]
def word_conditional_probability(bag_words, bag_bigramms, prev_word = "The", word = "Whale"):
num_words = sum(bag_words.values())
p_prev_word = bag_words[prev_word] / num_words
num_bigramms = sum(bag_bigramms.values())
p_bigramm = bag_bigramms["{} {}".format(prev_word, word)] / num_bigramms
return p_bigramm / p_prev_word
bag_words = bag_of_words()
print("Bag of words (moby dick text) size: {}".format(len(bag_words)))
top_word = most_frequent_words(bag_words)
print("Most frequent word: {}; count: {}".format(top_word, bag_words[top_word]))
bag_bigramms = bag_of_bigramms()
print("Bag of bigramms (moby dick text) size: {}".format(len(bag_bigramms)))
p_prev_word = word_conditional_probability(bag_words, bag_bigramms, "with", "this")
print("Probability of word <{}> after word <{}> is: {}".format("this", "with", p_prev_word))
| true |
54e0be56044b96569a25ccd2d1abb660e18b09c3
|
Python
|
hayeong922/final_projec_info_viz
|
/demo/time_series/time_series.py
|
UTF-8
| 8,090 | 2.921875 | 3 |
[] |
no_license
|
import pandas
import vincent
import re
from collections import defaultdict
import math
import operator
import json
from collections import Counter
from nltk.corpus import stopwords
import string
# f is the file pointer to the JSON data set
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'(' + '|'.join(regex_str) + ')', re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^' + emoticons_str + '$', re.VERBOSE | re.IGNORECASE)
def tokenize(s):
return tokens_re.findall(s)
def preprocess(s, lowercase=False):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]
return tokens
print ''
print ''
print '*******Read Twitter Data!********'
fname = 'data/collect_zika.json'
dates_zika = []
en_list = []
pt_list = []
es_list = []
tweet_count = 0
with open(fname, 'r') as f:
for line in f:
tweet = json.loads(line)
tweet_count +=1
terms_hash = [term for term in preprocess(tweet['text']) if term.startswith('zika')]
# track when the hashtag is mentioned
#if '#zika' in terms_hash:
if 'zika' in terms_hash:
dates_zika.append(tweet['created_at'])
if tweet['lang'] == 'en':
en_list.append(tweet['created_at'])
if tweet['lang'] == 'pt':
pt_list.append(tweet['created_at'])
if tweet['lang'] == 'es':
es_list.append(tweet['created_at'])
#print 'en',en_list
print 'tweet_count',tweet_count
# Removing stop-words
punctuation = list(string.punctuation)
stop = stopwords.words('english') + punctuation + ['rt','via']
# count single term with counter
with open(fname, 'r') as f:
count_all = Counter()
for line in f:
tweet = json.loads(line)
# create a list with all the terms
terms_stop = [term for term in preprocess(tweet['text']) if term not in stop]
# update the counter
count_all.update(terms_stop)
#print the first 5 most frequent words
print 'single term frequence',(count_all.most_common(5))
# update terms without stop words
### sentiment analysis part
# compute probability
# n_docs is the total n. of tweets
# com: co-occurence was stored
# count_stop_single(which does not store stop-words)
# co-occurence
com = defaultdict(lambda : defaultdict(int))
# f is the file pointer to the JSON data set
with open(fname, 'r') as f:
for line in f:
tweet = json.loads(line)
terms_only = [term for term in preprocess(tweet['text'])
if term not in stop
and not term.startswith(('#','@'))]
# Build co-occurrence matrix
for i in range(len(terms_only)-1):
for j in range(i+1,len(terms_only)):
w1,w2 = sorted([terms_only[i],terms_only[j]])
if w1 != w2:
com[w1][w2] += 1
print ' '
print 'co-occurence'
com_max = []
# for each term, look for the most common co-occurrent terms
for t1 in com:
t1_max_terms = sorted(com[t1].items(),key=operator.itemgetter(1),reverse=True)[:5]
for t2,t2_count in t1_max_terms:
com_max.append(((t1,t2),t2_count))
# Get the most frequent co-occurrences
terms_max = sorted(com_max,key=operator.itemgetter(1),reverse=True)
print (terms_max[:5])
print ' '
count_stop_single = {}
count_stop_single = count_all
p_t = {}
p_t_com = defaultdict(lambda: defaultdict(int))
n_docs = tweet_count
for term, n in count_stop_single.items():
p_t[term] = float(n) / float(n_docs)
for t2 in com[term]:
p_t_com[term][t2] = float(com[term][t2])/float(n_docs)
# compute semantic orientation
# given two vocab for positives and negative terms
# these postive and negative opinion words or sentiement words are from
# Bing Liu, Minqing Hu and Junsheng Cheng. "Opinion Observer: Analyzing
# and Comparing Opinions on the Web." Proceedings of the 14th
# International World Wide Web conference (WWW-2005), May 10-14,
# 2005, Chiba, Japan.
with open('positive-words.txt','r') as f:
positive_vocab = [line.strip() for line in f]
with open('negative-words.txt','r') as f:
negative_vocab = [line.strip() for line in f]
# can compute PMI of each pair of term
# and then compute the Semantic Orientation
pmi = defaultdict(lambda : defaultdict(int))
for t1 in p_t:
for t2 in com[t1]:
denom = p_t[t1] * p_t[t2]
pmi[t1][t2] = math.log(p_t_com[t1][t2]/denom)
semantic_orientation = {}
for term, n in p_t.items():
positive_assoc = sum(pmi[term][tx] for tx in positive_vocab)
negative_assoc = sum(pmi[term][tx] for tx in negative_vocab)
semantic_orientation[term] = positive_assoc - negative_assoc
semantic_sorted = sorted(semantic_orientation.items(),
key=operator.itemgetter(1),
reverse=True)
top_pos = semantic_sorted[:10]
top_neg = semantic_sorted[-10:]
print 'top positive'
print(top_pos)
print ' '
print 'top negative'
print(top_neg)
print ' '
print 'sentiment analysis'
#print("zika: %f" % semantic_orientation['zika'])
print("Zika: %f" % semantic_orientation['Zika'])
print("birth: %f" % semantic_orientation['birth'])
# a list of "1" to count the hashtags
ones = [1] * len(dates_zika)
# the index of the series
idx = pandas.DatetimeIndex(dates_zika)
# the actual series (at series of 1s for the moment)
zika = pandas.Series(ones, index=idx)
# Resampling / bucketing
per_minute = zika.resample('1Min').sum().fillna(0)
time_chart = vincent.Line(per_minute)
time_chart.axis_titles(x='Time', y='Freq')
#time_chart.to_json('time_series.json', html_out=True, html_path='time_series.html')
time_chart.to_json('time_series.json', html_out=True, html_path='time_series.html')
# different language time_series, currently 10 min maybe change it to 1 min
# english
ones = [1] * len(en_list)
idx = pandas.DatetimeIndex(en_list)
ENGLISH = pandas.Series(ones, index=idx)
per_minute_en = ENGLISH.resample('10Min').sum().fillna(0)
# portuguese
ones = [1] * len(pt_list)
idx = pandas.DatetimeIndex(pt_list)
PORTUGUESE = pandas.Series(ones, index=idx)
per_minute_pt = PORTUGUESE.resample('10Min').sum().fillna(0)
# spanish
ones = [1] * len(es_list)
idx = pandas.DatetimeIndex(es_list)
SPANISH = pandas.Series(ones, index=idx)
per_minute_es = SPANISH.resample('10Min').sum().fillna(0)
# all the data together
match_data = dict(ENGLISH = per_minute_en, PORTUGUESE = per_minute_pt, SPANISH = per_minute_es)
# we need a DataFrame, to accomodate multiple series
all_matches = pandas.DataFrame(data=match_data,
index=per_minute_en.index)
# Resampling as above
all_matches = all_matches.resample('10Min').sum().fillna(0)
# and now the plotting
time_chart = vincent.Line(all_matches[['ENGLISH', 'PORTUGUESE', 'SPANISH']])
time_chart.axis_titles(x='Time', y='Freq')
time_chart.legend(title='Matches')
time_chart.to_json('time_chart.json', html_out=True, html_path='time_chart.html')
with open(fname, 'r') as f:
count_all = Counter()
for line in f:
tweet = json.loads(line)
# Create a list with all the terms
terms_all = [term for term in preprocess(tweet['lang'])]
# Update the counter
count_all.update(terms_all)
# Print the first 5 most frequent words
word_freq = count_all.most_common(5)
labels, freq = zip(*word_freq)
data = {'data': freq, 'x': labels}
bar = vincent.Bar(data, iter_idx='x')
#bar.to_json('bar_chart.json', html_out=True, html_path='bar_chart.html')
bar.to_json('bar_chart.json', html_out=True, html_path='bar_chart.html')
| true |
ba559319127e38a76b663da7c0a4179b1c36c3a9
|
Python
|
euribates/advent_of_code_2020
|
/day05/second.py
|
UTF-8
| 375 | 3.0625 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import loader
karte = {}
for seat in loader.read_input("input"):
karte[seat.seat_id] = seat
min_seat_id = min(karte.keys())
max_seat_id = max(karte.keys())
for i in range(min_seat_id+1, max_seat_id):
if (i-1) in karte and i not in karte and (i+1) in karte:
sol = i
break
print(f"Solution 2 is {sol}")
| true |
a83bc75fb55ce7fc54c38e388d2a13ed8ea23244
|
Python
|
Zhihao-de/kh_community
|
/server/util/MessageProducer.py
|
UTF-8
| 892 | 2.6875 | 3 |
[] |
no_license
|
import pika
# 新建连接,rabbitmq安装在本地则hostname为'localhost'
class MessageProducer:
connection = None
def __init__(self, host, username, password):
self.hostname = host
self.port = 5672
self.username = username
self.password = password
def connect(self):
credentials = pika.PlainCredentials(self.username, self.password)
parameters = pika.ConnectionParameters(host=self.hostname, port=5672, virtual_host='/', credentials=credentials)
connection = pika.BlockingConnection(parameters)
return connection
def produce(self, message):
channel = self.connection.channel()
channel.queue_declare(queue='sat')
channel.basic_publish(exchange='', routing_key='sat', body=message)
print(" [x] Sent %s" % message)
def close(self):
self.connection.close()
| true |
26529fae28b79323ac40c07e1a63cfd93affb66b
|
Python
|
Subtracting/mp3player
|
/ideas.py
|
UTF-8
| 423 | 2.671875 | 3 |
[] |
no_license
|
# # keyboard input
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_s:
# play_song()
# elif event.key == pygame.K_p and paused == False:
# pause_song()
# elif event.key == pygame.K_q:
# # write last known time when quitting
# file1 = open("temp.txt", "w")
# file1.write(str(timer) + ',' + str(file))
# pygame.mixer.quit()
# running = False
| true |
afef230370580c72e01bc79ba5336353d15d36b6
|
Python
|
VicGanoh/Python_Tut
|
/Python/hello.py
|
UTF-8
| 83 | 3.296875 | 3 |
[] |
no_license
|
#name = input('What is your name?\n')
#print('hello ' + name)
print('hello world')
| true |
9a67b182ed7030360568d3d7fb50a81db96c82dc
|
Python
|
thiagofeijor/100-days-of-python-bootcamp
|
/day-35-rain-alert/main.py
|
UTF-8
| 1,234 | 2.546875 | 3 |
[] |
no_license
|
import requests
import os
from twilio.rest import Client
from twilio.http.http_client import TwilioHttpClient
OWM_Endpoint = "https://api.openweathermap.org/data/2.5/onecall"
api_key = os.environ.get("OWM_API_KEY")
account_sid = os.environ.get("sid")
auth_token = os.environ.get("AUTH_TOKEN")
weather_params = {
"lat": "-14.663350",
"lon": "-52.339279",
"appid": api_key,
"exclude": "current,minutely,daily"
}
response = requests.get(OWM_Endpoint, params=weather_params)
response.raise_for_status()
weather_data = response.json()
weather_slice = weather_data["hourly"][:12]
will_rain = False
for hour_data in weather_slice:
condition_code = hour_data["weather"][0]["id"]
if int(condition_code) < 700:
will_rain = True
if will_rain:
proxy_client = TwilioHttpClient()
proxy_client.session.proxies = {'https': os.environ['https_proxy']}
client = Client(account_sid, auth_token, http_client=proxy_client)
message = client.messages \
.create(
body="It's going to rain today. Remember to bring an ☔️",
from_="YOUR TWILIO VIRTUAL NUMBER",
to="YOUR TWILIO VERIFIED REAL NUMBER"
)
print(message.status)
| true |
aef1d9a1480c8729bc0cbcc399b6ba3467b261e2
|
Python
|
DangoWang/dayu_widgets
|
/dayu_widgets/examples/MLabelTest.py
|
UTF-8
| 4,035 | 2.84375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : muyanru345@163.com
###################################################################
from dayu_widgets.MDivider import MDivider
from dayu_widgets.MFieldMixin import MFieldMixin
from dayu_widgets.MLabel import MLabel
from dayu_widgets.MPushButton import MPushButton
from dayu_widgets.qt import *
class MLabelTest(QWidget, MFieldMixin):
def __init__(self, parent=None):
super(MLabelTest, self).__init__(parent)
self._init_ui()
def _init_ui(self):
div1 = MLabel(text='Default')
div2 = MLabel(text='Disabled')
div2.setEnabled(False)
grid_lay = QGridLayout()
data_list = [
(MLabel.H1Type, u'一级标题', 'H1 Type', True, False),
(MLabel.H2Type, u'二级标题', 'H2 Type', True, False),
(MLabel.H3Type, u'三级标题', 'H3 Type', True, False),
(MLabel.H4Type, u'四级标题', 'H4 Type', True, False),
(MLabel.TextType, u'正文', 'Text Type', True, False),
(MLabel.HelpType, u'辅助文字', 'Help Type', True, False),
]
for row, data in enumerate(data_list):
type, title1, title2, link1, link2 = data
grid_lay.addWidget(MLabel(text=title1, type=type, link=link1), row, 0)
grid_lay.addWidget(MLabel(text=title1, type=type, link=link2), row, 1)
grid_lay.addWidget(MLabel(text=title2, type=type, link=link1), row, 2)
grid_lay.addWidget(MLabel(text=title2, type=type, link=link2), row, 3)
data_bind_label = MLabel(type=MLabel.H3Type)
self.register_field('show_text', 'Guess')
self.register_field('is_link', True)
self.bind('show_text', data_bind_label, 'text')
self.bind('is_link', data_bind_label, 'link')
button = MPushButton.primary(text='Random An Animal')
button.clicked.connect(self.slot_change_text)
link_button = MPushButton.primary(text='Link')
link_button.clicked.connect(self.slot_link_text)
lay_elide = QVBoxLayout()
label_none = MLabel('This is a elide NONE mode label. Ellipsis should NOT appear in the text.')
label_left = MLabel(
'This is a elide LEFT mode label. The ellipsis should appear at the beginning of the text. xiao mao xiao gou xiao ci wei')
label_left.set_elide_mode(Qt.ElideLeft)
label_middle = MLabel(
'This is a elide MIDDLE mode label. The ellipsis should appear in the middle of the text. xiao mao xiao gou xiao ci wei')
label_middle.set_elide_mode(Qt.ElideMiddle)
label_right = MLabel(
'This is a elide RIGHT mode label. The ellipsis should appear at the end of the text. xiao mao xiao gou xiao ci wei')
label_right.set_elide_mode(Qt.ElideRight)
lay_elide.addWidget(label_none)
lay_elide.addWidget(label_left)
lay_elide.addWidget(label_middle)
lay_elide.addWidget(label_right)
main_lay = QVBoxLayout()
main_lay.addWidget(div1)
main_lay.addWidget(div2)
main_lay.addWidget(MDivider('different type'))
main_lay.addLayout(grid_lay)
main_lay.addWidget(MDivider('data bind'))
main_lay.addWidget(data_bind_label)
main_lay.addWidget(button)
main_lay.addWidget(link_button)
main_lay.addWidget(MDivider('elide mode'))
main_lay.addLayout(lay_elide)
main_lay.addStretch()
self.setLayout(main_lay)
def slot_change_text(self):
import random
self.set_field('show_text', random.choice(['Dog', 'Cat', 'Rabbit', 'Cow']))
def slot_link_text(self):
self.set_field('is_link', not self.field('is_link'))
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = MLabelTest()
from dayu_widgets import dayu_theme
dayu_theme.apply(test)
test.show()
sys.exit(app.exec_())
| true |
b4a841d70d4104908887566e4872ec402cd17632
|
Python
|
omivore/connect4
|
/rules/lowinverse.py
|
UTF-8
| 1,871 | 3.046875 | 3 |
[] |
no_license
|
# lowinverse.py
import computer
import itertools
def generate_solutions(board, me):
# Find all combinations of columns. For each pair's columns, find all possible pairs of two vertically consecutive squares
# that have an odd upper square and are empty. Find all combinations of these square pairs to each other across columns.
for col1, col2 in itertools.product(range(7), range(7)):
if col1 == col2: continue
for upper1, upper2 in itertools.product(*[[row for row in range(6) if row % 2 == 0 and row != 0
and board[col][row].state.value == computer.State.empty.value
and board[col][row - 1].state.value == computer.State.empty.value]
for col in [col1, col2]]):
# Find all the verticals' solutions.
vertical = computer.rules.vertical
solved = []
for solution in vertical.generate_solutions(board, me):
for square in solution.squares:
if square in (board[col1][upper1], board[col1][upper1 - 1], board[col2][upper2], board[col2][upper2 - 1]):
continue
else:
for solutionset in solution.solved:
solved.append(solutionset)
# Yield the two upper squares as solutions.
yield computer.Solution(computer.Rule.lowinverse,
(board[col1][upper1], board[col1][upper1 - 1], board[col2][upper2], board[col2][upper2 - 1]),
[(board[col1][upper1], board[col2][upper2]),
(board[col1][upper1], board[col1][upper1 - 1]), (board[col2][upper2], board[col2][upper2 - 1])] + solved)
| true |
36c7926a8f4283bab7fa5b9e301999c0a5e2d8fe
|
Python
|
renwenhua/webUI_autoTest
|
/pageobjects/page_exit.py
|
UTF-8
| 326 | 2.515625 | 3 |
[] |
no_license
|
#!usr/bin/env python
#encoding:utf-8
from pageobjects.base import BasePage
from selenium.webdriver.common.by import By
import sys
class Exit(BasePage):
button_exit=(By.PARTIAL_LINK_TEXT,"退出")
def exit(self):
self.driver.switch_to.window(self.driver.window_handles[0])
self.click(*self.button_exit)
| true |
2bc9eeaf3977bea5493bb2ad1999eed3659493b9
|
Python
|
lightstep/lightstep-benchmarks
|
/benchmark/satellite.py
|
UTF-8
| 6,415 | 2.75 | 3 |
[] |
no_license
|
import logging
import platform
import requests
import time
from os import path
from .utils import BENCHMARK_DIR, start_logging_subprocess
from .exceptions import SatelliteBadResponse, DeadSatellites
DEFAULT_PORTS = list(range(8360, 8368))
logger = logging.getLogger(__name__)
BANDWIDTH_LIMIT_KB_PER_SEC = 50*1024
class MockSatelliteHandler:
def __init__(self, port, mode):
self.port = port
# we will subtract this number from how many received spans satellites
# report this will give us the ability to reset spans_received without
# even communicating with satellites
self._spans_received_baseline = 0
mock_satellite_path = path.join(BENCHMARK_DIR, 'mock_satellite.py')
mock_satellite_logger = logging.getLogger(f'{__name__}.{port}')
args = ["python3", mock_satellite_path, str(port), mode]
if platform.system() == "Linux":
args = [
"trickle",
"-s",
"-u",
str(BANDWIDTH_LIMIT_KB_PER_SEC),
"-d",
str(BANDWIDTH_LIMIT_KB_PER_SEC)
] + args
self._handler = start_logging_subprocess(args, mock_satellite_logger)
def is_running(self):
return self._handler.poll() is None
def get_spans_received(self):
host = "http://localhost:" + str(self.port)
res = requests.get(host + "/spans_received")
if res.status_code != 200:
raise SatelliteBadResponse("Error getting /spans_received.")
try:
spans_received = int(res.text) - self._spans_received_baseline
return spans_received
except ValueError:
raise SatelliteBadResponse("Satellite didn't sent an int.")
def reset_spans_received(self):
self._spans_received_baseline += self.get_spans_received()
def terminate(self):
# cross-platform way to terminate a program
# on Windows calls TerminateProcess, on Posix sends SIGTERM
self._handler.terminate()
# wait for an exit code
while self._handler.poll() is None:
pass
class MockSatelliteGroup:
""" A group of mock satellites. """
def __init__(self, mode, ports=DEFAULT_PORTS):
""" Initializes and starts a group of mock satellites.
Parameters
----------
mode : str
Mode determines the response characteristics, like timing, of the
mock satellites. Can be 'typical', 'slow_succeed', or 'slow_fail'.
ports : list of int
Ports the mock satellites should listen on. A mock satellite will
be started for each specified port.
Raises
------
DeadSatellites
If one or more of the satellites died during startup.
"""
self._ports = ports
self._satellites = \
[MockSatelliteHandler(port, mode) for port in ports]
time.sleep(1)
if not self.all_running():
raise DeadSatellites()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.shutdown()
return False
def get_spans_received(self):
""" Gets the number of spans that mock satellites have received.
Returns
-------
int
The number of spans that the mock satellites have received.
None
If the satellite group has been shutdown.
Raises
------
DeadSatellites
If one or more of the mock satellites have died unexpctedly.
SatelliteBadResponse
If one or more of the mock satellites sent a bad response.
"""
# before trying to communicate with the mock, check if its running
if not self._satellites or not self.all_running():
raise DeadSatellites("One or more satellites is not running.")
received = sum([s.get_spans_received() for s in self._satellites])
logger.info(f'All satellites have {received} spans.')
return received
def all_running(self):
""" Checks if all of the mock satellites in the group are running.
Returns
-------
bool
Whether or not the satellites are running.
"""
# if the satellites are shutdown, they aren't running
if not self._satellites:
return False
for s in self._satellites:
if not s.is_running():
return False
return True
def reset_spans_received(self):
""" Resets the number of spans that the group of mock satellites have
received to 0. Does nothing if the satellite group has been shutdown.\
Raises
------
SatelliteBadResponse
If we were unable to reset the number of spans received.
"""
if not self._satellites:
logger.warn(
"Cannot reset spans received since satellites are shutdown.")
return
logger.info("Resetting spans received.")
for s in self._satellites:
s.reset_spans_received()
def start(self, mode, ports=DEFAULT_PORTS):
""" Restarts the group of mock satellites. Should only be called if the
group is currently shutdown.
Parameters
----------
mode : str
Mode deteremines the response characteristics, like timing, of the
mock satellites. Can be 'typical', 'slow_succeed', or 'slow_fail'.
ports : list of int
Ports the mock satellites should listen on. A mock satellite will
be started for each specified port.
"""
if self._satellites:
logger.warn(
"Cannot startup satellites because they are already running.")
return
logger.info("Starting up mock satellite group.")
self.__init__(mode, ports=ports)
def shutdown(self):
""" Shutdown all satellites. Should only be called if the satellite
group is currently running.
"""
if not self._satellites:
logger.warn(
"Cannot shutdown satellites since they are already shutdown.")
return
logger.info("Shutting down mock satellite group.")
for s in self._satellites:
s.terminate()
self._satellites = None
| true |
c67b3401b6c150422d3d18233c63004c20337f38
|
Python
|
marcelopontes1/Estudos-Python-GUPPE
|
/S5/trigesimo_programa.py
|
UTF-8
| 272 | 4.28125 | 4 |
[] |
no_license
|
num1 = int(input('Insira um número: '))
num2 = int(input('Insira um número: '))
num3 = int(input('Insira um número: '))
numbers = [num1, num2, num3]
# Sorting list of Integers in ascending
numbers.sort()
print(f'A ordem crescente desses números é: {numbers}')
| true |
1e61d5b086adbb0b91072cb1ed003a3754fe6294
|
Python
|
jecustoms/yaweather
|
/examples/simple.py
|
UTF-8
| 235 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
from yaweather import UnitedKingdom, YaWeather
y = YaWeather(api_key='secret')
res = y.forecast(UnitedKingdom.London)
print(f'Now: {res.fact.temp} °C, feels like {res.fact.feels_like} °C')
print(f'Condition: {res.fact.condition}')
| true |
d79100c2cf4f428e4b02bcd3fc83fc10f2052fce
|
Python
|
Pride7K/Python
|
/Dicionario/dicionario_exercicio1.py
|
UTF-8
| 450 | 3.421875 | 3 |
[] |
no_license
|
from random import *
from operator import itemgetter
jogadores = {}
jogadores_vencedor = {}
for i in range (1,5):
jogadores[f'Jogador{i}'] = randint(1,6);
print(jogadores)
for jogador,dado in jogadores.items():
print(f'O {jogador} tirou {dado}');
print("");
jogadores_vencedor = sorted(jogadores.items(),key=itemgetter(1),reverse=True);
for i in jogadores_vencedor:
print(f'{i[0]} tirou {i[1]} no dado');
| true |
6b96e6145e09308a795759f7f733bc3924b8f928
|
Python
|
hhhhjjj/my_scrapy
|
/my_re.py
|
UTF-8
| 261 | 3.375 | 3 |
[] |
no_license
|
import re
line = "Cats are smarter than dogs"
matchObj = re.match(r'(.*) are (.*?) .*', line, re.M | re.I)
# re.M是多行匹配,re.I是对大小写不敏感
if matchObj:
print(matchObj.group(1))
print(matchObj.group(2))
else:
print("no match")
| true |
78a056927bce75e078dc6c277b44cdabb26bea40
|
Python
|
caristi/CursoPython
|
/Rectangulo.py
|
UTF-8
| 251 | 3.703125 | 4 |
[] |
no_license
|
class Rectangulo:
def __init__(self,base,altura):
self.base = base
self.altura = altura
def calcularArea(self):
return self.base * self.altura
rec = Rectangulo(2,3)
print(rec.calcularArea())
| true |
e9ee625b24848dd5aa237ba9a9b4441374a9f64a
|
Python
|
chenxu0602/LeetCode
|
/1382.balance-a-binary-search-tree.py
|
UTF-8
| 1,755 | 3.515625 | 4 |
[] |
no_license
|
#
# @lc app=leetcode id=1382 lang=python3
#
# [1382] Balance a Binary Search Tree
#
# https://leetcode.com/problems/balance-a-binary-search-tree/description/
#
# algorithms
# Medium (75.48%)
# Likes: 398
# Dislikes: 23
# Total Accepted: 21.9K
# Total Submissions: 29.1K
# Testcase Example: '[1,null,2,null,3,null,4,null,null]'
#
# Given a binary search tree, return a balanced binary search tree with the
# same node values.
#
# A binary search tree is balanced if and only if the depth of the two subtrees
# of every node never differ by more than 1.
#
# If there is more than one answer, return any of them.
#
#
# Example 1:
#
#
#
#
# Input: root = [1,null,2,null,3,null,4,null,null]
# Output: [2,1,3,null,null,null,4]
# Explanation: This is not the only correct answer, [3,1,4,null,2,null,null] is
# also correct.
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is between 1 and 10^4.
# The tree nodes will have distinct values between 1 and 10^5.
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def balanceBST(self, root: TreeNode) -> TreeNode:
# O(N)
def dfs(node):
return dfs(node.left) + [node.val] + dfs(node.right) if node else []
def chain(data):
if not data: return None
mid = len(data) // 2
head = TreeNode(data[mid])
head.left = chain(data[:mid])
head.right = chain(data[mid + 1:])
return head
if not root: return root
data = dfs(root)
return chain(data)
# @lc code=end
| true |
e5aadd45883492841edc1bcbea1ae7fdc8bfa48a
|
Python
|
Miguelmargar/ucd_programming1
|
/practical 1/p1p3.py
|
UTF-8
| 161 | 2.921875 | 3 |
[] |
no_license
|
# My third program
print("Name: Miguel Martinez")
print("")
print("Address: 4 Westminster Park, Mount Brown, Blackrock, D8")
print("")
print("Phone no.: 0873752689")
| true |
beb92ba4a298eec5af3f535c45e534c4f7f653f5
|
Python
|
Ihsan545/Search-and-Sort-Array-Assignment
|
/test_collections/test_sort_and_search_array.py
|
UTF-8
| 642 | 2.828125 | 3 |
[] |
no_license
|
import unittest
import array as r
import fun_with_collections.sort_and_search_array as basic_list_exception
class MyTestCase(unittest.TestCase):
def test_search_array(self):
self.assertFalse(basic_list_exception.search_array([5, -1, 3, 6, 8, 9, 4, 10], -1))
def test_sort_array(self):
n = r.array([9, 3, 1, 5, 8, 6, 4, 7])
to_list = n.tolist()
_list = to_list.sort()
to_list = r.array(to_list)
self.assertTrue(basic_list_exception.sort_array(([9.0, 3.0, 1.0, 5.0, 8.0, 6.0, 4.0, 7.0])))
"""" I tried my bet, but I could not figure out the sort test"""
if __name__ == '__main__':
unittest.main()
| true |
69c56de19260f86dd9ac1ad0e7b22c6f1bc1b81f
|
Python
|
Wizmann/ACM-ICPC
|
/Leetcode/Algorithm/python/1000/00693-Binary Number with Alternating Bits.py
|
UTF-8
| 195 | 2.5625 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
def lowbit(x):
return x & (-x)
class Solution(object):
def hasAlternatingBits(self, n):
a = n ^ (n >> 1)
b = a ^ (a >> 1)
c = b - lowbit(b)
return c == 0
| true |
8e0c89b19ebf6efdea0b42c506edaed4224def0d
|
Python
|
md6380/vaccine-feed-ingest
|
/vaccine_feed_ingest/schema/schema.py
|
UTF-8
| 4,637 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import warnings
from typing import List, Optional
from pydantic import BaseModel
"""
DEPRECATION NOTICE
vaccine_feed_ingest/schema/schema.py is DEPRECATED. Instead of using this file,
import the published package using the line:
from vaccine_feed_ingest_schema import location
~or~
from vaccine_feed_ingest_schema import load
This file is maintained in the source so that currently open PRs will not break.
It will be removed from source by 2021-05-01, potentially earlier.
"""
warnings.warn(
"vaccine_feed_ingest/schema/schema.py is deprecated. Use the the published vaccine_feed_ingest_schema"
+ "package instead.",
DeprecationWarning,
stacklevel=2,
)
class Address(BaseModel):
"""
{
"street1": str,
"street2": str,
"city": str,
"state": str as state initial e.g. CA,
"zip": str,
},
"""
street1: str
street2: Optional[str]
city: str
state: str
zip: str
class LatLng(BaseModel):
"""
{
"latitude": float,
"longitude": float,
},
"""
latitude: float
longitude: float
class Contact(BaseModel):
"""
{
"contact_type": str as contact type enum e.g. booking,
"phone": str as (###) ###-###,
"website": str,
"email": str,
"other": str,
}
"""
contact_type: Optional[str]
phone: Optional[str]
website: Optional[str]
email: Optional[str]
other: Optional[str]
class OpenDate(BaseModel):
"""
{
"opens": str as iso8601 date,
"closes": str as iso8601 date,
}
"""
opens: Optional[str]
closes: Optional[str]
class OpenHour(BaseModel):
"""
{
"day": str as day of week enum e.g. monday,
"opens": str as hh:mm,
"closes": str as hh:mm,
}
"""
day: str
open: str
closes: str
class Availability(BaseModel):
"""
{
"drop_in": bool,
"appointments": bool,
},
"""
drop_in: Optional[bool]
appointments: Optional[bool]
class Vaccine(BaseModel):
"""
{
"vaccine": str as vaccine type enum,
"supply_level": str as supply level enum e.g. more_than_48hrs
}
"""
vaccine: str
supply_level: Optional[str]
class Access(BaseModel):
"""
{
"walk": bool,
"drive": bool,
"wheelchair": str,
}
"""
walk: Optional[bool]
drive: Optional[bool]
wheelchair: Optional[str]
class Organization(BaseModel):
"""
{
"id": str as parent organization enum e.g. rite_aid,
"name": str,
}
"""
id: Optional[str]
name: Optional[str]
class Link(BaseModel):
"""
{
"authority": str as authority enum e.g. rite_aid or google_places,
"id": str as id used by authority to reference this location e.g. 4096,
"uri": str as uri used by authority to reference this location,
}
"""
authority: Optional[str]
id: Optional[str]
uri: Optional[str]
class Source(BaseModel):
"""
{
"source": str as source type enum e.g. vaccinespotter,
"id": str as source defined id e.g. 7382088,
"fetched_from_uri": str as uri where data was fetched from,
"fetched_at": str as iso8601 datetime (when scraper ran),
"published_at": str as iso8601 datetime (when source claims it updated),
"data": {...parsed source data in source schema...},
}
"""
source: str
id: str
fetched_from_uri: Optional[str]
fetched_at: Optional[str]
published_at: Optional[str]
data: dict
class NormalizedLocation(BaseModel):
id: str
name: Optional[str]
address: Optional[Address]
location: Optional[LatLng]
contact: Optional[List[Contact]]
languages: Optional[List[str]] # [str as ISO 639-1 code]
opening_dates: Optional[List[OpenDate]]
opening_hours: Optional[List[OpenHour]]
availability: Optional[Availability]
inventory: Optional[List[Vaccine]]
access: Optional[Access]
parent_organization: Optional[Organization]
links: Optional[List[Link]]
notes: Optional[List[str]]
active: Optional[bool]
source: Source
class ImportMatchAction(BaseModel):
"""Match action to take when importing a source location"""
id: Optional[str]
action: str
class ImportSourceLocation(BaseModel):
"""Import source location record"""
source_uid: str
source_name: str
name: Optional[str]
latitude: Optional[float]
longitude: Optional[float]
import_json: dict
match: Optional[ImportMatchAction]
| true |
74fe53c37c91a3d49d0d414247b77bef2d27ed5b
|
Python
|
Murph9/KaggleComp
|
/intseq/firstreadin.py
|
UTF-8
| 703 | 2.96875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 13 13:36:59 2016
@author: Kazuma Wittick
"""
"""
import numpy as np
train = np.loadtxt('train.csv', dtype = str)
train = train[1:2]
for row in train:
seq = str(row)
print seq
"""
import re
with open('train.csv') as f:
content = f.readlines()
content.pop(0)
data = []
for line in content:
m = re.search('"(.*)"', line)
string = m.group(0)
string = string.replace('"', '')
array = map(int, string.split(','))
data.append(array)
#print data[0:10]
minimum = 10000
for array in data:
yy = len(array)
minimum = min(yy, minimum)
if yy == 1:
print array
print minimum
print 348751093847501987*19087501938475
| true |
2bd1b2d6412fa0a567a715a206706e8d758335bf
|
Python
|
harryhaos/pyal
|
/reverselist.py
|
UTF-8
| 597 | 3.625 | 4 |
[] |
no_license
|
__author__ = 'harryhao'
'''
reverse linked list
'''
class Node:
def __init__(self, data, next_n):
self.data = data
self.next_n = next_n
def reve(head):
p = head
q = head.next_n
head.next_n = None
while q:
r = q.next_n
q.next_n = p
p = q
q = r
return p
def print_arr(head):
while head:
print head.data
head = head.next_n
if __name__ == '__main__':
a = Node(1, None)
b = Node(2, None)
c = Node(3, None)
a.next_n = b
b.next_n = c
c.next_n = None
print_arr(reve(a))
| true |
c6b5ab2788210f46d49bca706cc12150c2fc7fe2
|
Python
|
SucharitaDhar/PirplePythonCourse
|
/Homework #4 (lists)/List.py
|
UTF-8
| 693 | 3.828125 | 4 |
[
"Unlicense"
] |
permissive
|
myUniqueList = []
myLeftovers = []
def addToList(item):
if item in myUniqueList:
addToLeftovers(item)
return False
else:
myUniqueList.append(item)
return True
def addToLeftovers(item):
myLeftovers.append(item)
# Testing the addToList function
print(myUniqueList) #[]
print(addToList("pizza")) # Returns True
print(myUniqueList) # ['pizza']
print(myLeftovers) # []
# Adding the element that already exists
print(addToList("pizza")) # Returns False
print(myUniqueList) # ['pizza']
print(myLeftovers) # ['pizza']
# Adding a new element
print(addToList("burger")) # Returns True
print(myUniqueList) # ['pizza', 'burger']
print(myLeftovers) # ['pizza']
| true |
a48510578f6daade35a3b151d73ceffd84baf6e0
|
Python
|
satyx/Data-Structure-Practice
|
/paranthesis_balance.py
|
UTF-8
| 559 | 3.21875 | 3 |
[] |
no_license
|
test=int(input())
start=('(','{','[')
while test>0:
a=list()
flag=True
exp=input()
for i in exp:
if i in start:
a.append(i)
elif i==')':
if len(a)==0:
flag=False
break
if a.pop()!='(':
flag=False
break
elif i=='}':
if len(a)==0:
flag=False
break
if a.pop()!='{':
flag=False
break
elif i==']':
if len(a)==0:
flag=False
break
if a.pop()!='[':
flag=False
break
if flag==False:
print("not balanced")
elif len(a)!=0:
print("not balanced")
else:
print("balanced")
test-=1
| true |
3bebe8768c0ba67dd3b638ddef003e9f7ee04350
|
Python
|
mve17/SPISE-2019
|
/Day 3/rain.py
|
UTF-8
| 1,723 | 3.5 | 4 |
[] |
no_license
|
from tkinter import *
import time
WINDOW_WIDTH=1000 #pixels
WINDOW_HEIGHT=300
RAINDROP_WIDTH=10
class Sky():
def __init__(self):
#gui window setup
self.root=Tk()
self.canvas=Canvas(self.root,width=WINDOW_WIDTH,height=WINDOW_HEIGHT)
self.canvas.pack()
#lists to store raindrop information
self.raindrops=[]
self.raindrop_heights=[]
#initialize scheduled rainfall
self.time=0
self.schedule=[]
def add_raindrops_to_top(self,position_list): #positions between 0 and WINDOW_WIDTH
for position in position_list:
self.raindrops.append(self.canvas.create_rectangle(position,0,position+RAINDROP_WIDTH,RAINDROP_WIDTH,fill='blue'))
self.raindrop_heights.append(0)
def fall(self,distance=7):
remaining_raindrops=[]
remaining_raindrop_heights=[]
for i,raindrop in enumerate(self.raindrops):
self.canvas.move(raindrop,0,distance)
self.raindrop_heights[i]+=distance
if self.raindrop_heights[i]<WINDOW_HEIGHT: #raindrop i still on screen
remaining_raindrops.append(raindrop)
remaining_raindrop_heights.append(self.raindrop_heights[i])
else:
self.canvas.delete(raindrop)
self.canvas.update()
self.raindrops=remaining_raindrops
self.raindrop_heights=remaining_raindrop_heights
self.time+=1
def set_raindrop_schedule(self,schedule): #raindrop schedule should be a list of lists, of raindrop positions to add at time 0,1,2...
self.schedule=schedule
def start_rain(self):
while True:
if self.time<len(self.schedule):
self.add_raindrops_to_top(self.schedule[self.time])
time.sleep(.1)
self.fall()
if __name__=='__main__':
sky=Sky()
sky.set_raindrop_schedule([[0,7,100,140,150]])
sky.start_rain()
mainloop() #required to get the tkinter gui going
| true |
57de17b868dfccdd14d3b7fefccd1d3a9ea8ddfe
|
Python
|
lonyle/causal_bandit
|
/code+data/algorithm_framework.py
|
UTF-8
| 5,523 | 2.609375 | 3 |
[] |
no_license
|
import numpy as np
class AlgorithmFramework:
def __init__(self, algorithm, offline_data, match_machine, option='offline_online'):
self.algorithm = algorithm # the algorithm oracle has 'draw_arm' and 'update' two APIs
self.match_machine = match_machine
self.offline_data = offline_data
self.option = option
self.N_offline = len(self.offline_data[self.match_machine.treatment_name])
self.context_dim = len(offline_data.keys()) - 3 # exclude "action", "reward" and "propensity score"
# if the match_machine is ps_matching, the context is the propensity score
self.context_pool = []
self.choice_pool = [] # has the same index as the context
for idx in range(self.N_offline):
if self.match_machine.__class__.__name__ == 'PropensityScoreMatching':
self.context_pool.append(self.offline_data['propensity_score'][idx])
else:
context_vec = []
for context_name in self.match_machine.context_names:
context_vec.append(self.offline_data[context_name][idx])
self.context_pool.append(context_vec)
# added on Jan 29
if self.match_machine.choice_names != None:
choice_vec = []
for choice_name in self.match_machine.choice_names:
choice_vec.append(self.offline_data[choice_name][idx])
self.choice_pool.append(choice_vec)
self.context_generator = False
self.t = 0 # self.maintain a time count
self.batch_mode_status = None # by default, batch_mode_status=None which means we do not use the batch mode
# When batch_mode_status = True, we need to do the batch update
# When batch_mode_status = False, we are in the online phase and we skip the batch update
######## public ########
def real_draw_arm(self, context, choices=[], do_match=True, update_pending=True):
# sometimes, we choose from a selected subset
self.real_context = context
self.real_choices = choices
# do the matching first
if self.option != 'only_online':
if self.batch_mode_status == None:
if do_match:
self.match_all_possible()
action = self.match_machine.get_pending_action(context, update_pending)
if action: # short-cut
print ('context:', context, 'pending action:', action)
return action
elif self.batch_mode_status == True:
self.match_all_possible_batch()
elif self.batch_mode_status == False:
pass # do nothing
else:
print ('invalid value for batch_mode_status')
action = self.choose_arm(context, choices)
self.real_action = action
return action
def real_feedback(self, reward):
if self.option != 'only_offline':
# if only_offline, do not need to update the online feedback
self.update(self.real_context, self.real_action, reward, is_online=True)
if self.match_machine.__class__.__name__ != 'PropensityScoreMatching':
self.context_pool.append(self.real_context)
if self.match_machine.choice_names != None:
self.choice_pool.append(self.real_choices)
########################
def choose_arm(self, context, choices):
if self.algorithm.contextual == True:
if len(choices) == 0: # default choices from a fixed set of arms
action = self.algorithm.draw_arm(context, self.t, self.option)
else:
action = self.algorithm.draw_arm(context, choices, self.t, self.option)
else: # now, context-independent decisions support dynamic choices
action = self.algorithm.draw_arm(choices, self.t, self.option)
return action
def update(self, context, action, reward, is_online=False):
# depending on whether the algorithm has context or not
if self.algorithm.contextual == True:
self.algorithm.update(context, action, reward, is_online)
else:
self.algorithm.update(action, reward, is_online)
self.t += 1
def match_all_possible(self):
# match until there is no matched
while True:
sample = self.match(self.t)
if sample:
self.update(sample['context'], sample['action'], sample['reward'])
else:
return
def match(self, t):
# different algorithms
random_context, random_choice_set = self.generate_random_context()
if self.algorithm.contextual == True:
action = self.algorithm.draw_arm(random_context, random_choice_set, t)
else:
action = self.algorithm.draw_arm(random_choice_set, t)
reward = self.match_machine.find_sample_reward(random_context, action)
if not reward:
return False
else:
return {"context": random_context, "reward": reward, "action": action}
## updated on 2020-10-08: match all the data points in a batch mode
def match_all_possible_batch(self):
for action in range(self.algorithm.N_arm):
while True:
random_context, _ = self.generate_random_context()
reward = self.match_machine.find_sample_reward(random_context, action)
if not reward: # no more offline data
break
else:
self.update(random_context, action, reward)
def get_environment_for_context(self, env):
self.context_generator = True
self.env = env
def generate_random_context(self):
# generate a random context from the context distribution
# TODO: if we have both context and choice_set, we also need to generate the choice_set
if self.context_generator == True:
random_choice_set = [] # set the random choice set to empty (do not consider Yahoo data)
return self.env.generate_context(), random_choice_set
random_idx = np.random.randint(len(self.context_pool))
random_context = self.context_pool[random_idx]
if len(self.choice_pool) > 0:
random_choice_set = self.choice_pool[random_idx]
else:
random_choice_set = []
return random_context, random_choice_set
| true |
b149d514b231196b2e285416ee3177a3c7849695
|
Python
|
laperlej/hkmeans
|
/kmeans.py
|
UTF-8
| 6,091 | 3.078125 | 3 |
[] |
no_license
|
from itertools import chain, imap
import numpy as np
from scipy.stats import pearsonr
import random
from tree import Tree
import sklearn.metrics as metrics
class ClusteringAlgorithm(object):
def __init__(self, points):
self.points = points
self.idmaker = IdMaker()
self.clusters = None
def __str__(self):
string = ""
count = 1
for cluster in self.clusters:
string += "cluster%s:\n" % (count)
count += 1
tmp = [str(point)for point in cluster.points]
tmp.sort()
for point in tmp:
string += "\t%s\n" % (point)
return string
def get_labels(self):
points = []
labels_true = []
labels_pred = []
count=0
for cluster in self.clusters:
for point in cluster.points:
points.append(point)
label = self.idmaker.get_id(point.label)
labels_true.append(label)
labels_pred.append(count)
count+=1
return points, labels_true, labels_pred
def eval_clusters(self):
"""calculates the adjusted rand index of the clustering
based on the label of the points
"""
_, labels_true, labels_pred = self.get_labels()
ari = metrics.adjusted_rand_score(labels_true, labels_pred)
hom = metrics.homogeneity_score(labels_true, labels_pred)
comp = metrics.completeness_score(labels_true, labels_pred)
return ari, hom, comp
class KMeans(ClusteringAlgorithm):
def __init__(self, points, k):
super(KMeans, self).__init__(points)
self.k = k
self.clusters = []
def generate_clusters(self):
#initialise centers
centers = random.sample(self.points, self.k)
self.clusters = [Cluster([], centers[i]) for i in range(self.k)]
self.clusters[0].points = self.points
#repeat until no change
moves = [None]
while moves:
moves = []
#add points to clusters
for cluster in self.clusters:
for point in cluster.points:
dists = [clust.distance(point) for clust in self.clusters]
index_min = np.argmin(dists)
if cluster is not self.clusters[index_min]:
cluster.points.remove(point)
self.clusters[index_min].points.append(point)
moves.append(point)
#calculate new centers
for cluster in self.clusters:
try:
cluster.new_center()
except IndexError:
cluster.center = random.choice(self.points)
class HKMeans(ClusteringAlgorithm):
def __init__(self, points, k):
super(HKMeans, self).__init__(points)
self.k = k
self.clusters = Tree(Cluster(points))
def split_node(self, node):
cluster = node.content
kmeans = KMeans(cluster.points, k=2)
kmeans.generate_clusters()
node.left, node.right = [Tree(cluster) for cluster in kmeans.clusters]
def node_to_split(self):
return max(self.clusters)
def generate_clusters(self):
Tree(Cluster(self.points))
while len(self.clusters) < self.k:
self.split_node(self.node_to_split())
class Cluster(object):
def __init__(self, points, center=None):
self.points = points
self.center = center
def average_distance(self):
dists = [self.center.distance(point) for point in self.points]
return np.mean(dists)
def distance(self, point):
return self.center.distance(point)
def new_center(self):
if self.points:
new_data = []
for arrays in zip(*[point.data for point in self.points]):
new_array = np.mean(np.dstack(arrays), axis=2)
new_data.append(*new_array)
self.center = Dataset(new_data)
else:
raise IndexError
def __lt__(self, other):
return self.average_distance() < other.average_distance()
def __gt__(self, other):
return self.average_distance() > other.average_distance()
class Dataset(object):
def __init__(self, data, name="", label="", comment=""):
self.data = [np.array(array) for array in data]
self.name = name
self.label = label
self.comment = comment
def __str__(self):
return "{0} - {1}".format(self.label, self.comment)
def distance(self, dataset):
zipped_data = zip(*[self.data, dataset.data])
coeffs = [1-pearsonr(*arrays)[0] for arrays in zipped_data]
return np.mean(coeffs)
class IdMaker(object):
"""returns a unique id for any object
"""
def __init__(self):
self.items = {}
self.next_id = 0
def __len__(self):
return len(self.items)
def get_next_id(self):
"""returns the next id
"""
self.next_id += 1
return self.next_id
def get_id(self, item):
"""looks if object already assigned an id
otherwise creates a new id
"""
index = self.items.get(item)
if index == None:
self.items[item] = self.get_next_id()
return self.items[item]
def get_items(self):
"""returns the list of all items
"""
return self.items
def read_input():
"""test function which reads points from test_data.txt(iris3),
clusters it and outputs the clusters
"""
points = []
with open("test_data.txt") as test_file:
for line in test_file:
line = line.split()
values = [[float(x) for x in line[1:4]]]
point = Dataset(values, label=line[5])
points.append(point)
return points
if __name__ == '__main__':
POINTS = read_input()
#KMEANS = KMeans(POINTS, 3)
#KMEANS.generate_clusters()
#print KMEANS
HKMEANS = HKMeans(POINTS, 3)
HKMEANS.generate_clusters()
print HKMEANS
| true |
b8ece6cd2dd1b287de1a03dff8ca71ffdd62b5be
|
Python
|
alenzhao/Lux
|
/bf.py
|
UTF-8
| 4,986 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import sys
import os.path
import re
import numpy
import scipy.stats
import scipy.special
import argparse
def diff_dirichlet_density_at_origin(a,a2):
a = numpy.array(a)
a2 = numpy.array(a2)
if len(a) != 3 or len(a2) != 3:
sys.exit('error: both concentration parameters should have three elements')
return (scipy.special.gamma(numpy.sum(a))*scipy.special.gamma(numpy.sum(a2)))/ \
(numpy.prod(scipy.special.gamma(a))*numpy.prod(scipy.special.gamma(a2)))* \
numpy.prod(scipy.special.gamma(a+a2-1))/scipy.special.gamma(numpy.sum(a+a2)-3)
def calculate_bf(chains):
samples = [{},{}]
# read the HMC chain files
for n,filename in enumerate(chains):
with open(filename,'r') as f:
labels_flag = False
adaptation_flag = False
indices = {}
sample_index = 0
# go through the lines of the HMC chain file
for line in f:
# comment lines
if line[0] == '#':
# let us figure out how many samples there are
if line.strip().startswith('# num_samples ='):
n_samples = int(re.match('# num_samples = ([0-9]+) \(Default\)',line.strip()).group(1))
# to make sure that we do not take warmup samples
if line.strip() == '# Adaptation terminated':
adaptation_flag = True
continue
# the first nonempty and noncomment line has the column headers
if not labels_flag and len(line.strip().split(',')) > 1:
# get the column indices of the mu variables and initialize sample arrays
labels = line.strip().split(',')
mu_indices = numpy.where([label.startswith('mu.') for label in labels])[0]
for mu_index in mu_indices:
index,component = map(int,re.match('mu\.([0-9]+)\.([1-3]{1})',labels[mu_index]).group(1,2))
if not samples[n].has_key(index):
indices[index] = [None,None,None]
samples[n][index] = numpy.nan*numpy.zeros((n_samples,3))
if indices[index][component-1] != None:
sys.exit('error: the header %s is found more than once in the file %s'%(labels[mu_index],filename))
indices[index][component-1] = mu_index
labels_flag = True
continue
# after seeing the '# Adaptatation terminated' line all the nonempty and noncomment lines should be sample lines
if adaptation_flag and len(line.strip().split(',')) > 1:
fields = line.strip().split(',')
for mu_index in indices.iterkeys():
if indices[mu_index][0] == None:
sys.exit('error: the variable mu.%d.1 is not found in the file %s'%(mu_index,filename))
elif indices[mu_index][1] == None:
sys.exit('error: the variable mu.%d.2 is not found in the file %s'%(mu_index,filename))
elif indices[mu_index][2] == None:
sys.exit('error: the variable mu.%d.3 is not found in the file %s'%(mu_index,filename))
samples[n][mu_index][sample_index,0] = fields[indices[mu_index][0]]
samples[n][mu_index][sample_index,1] = fields[indices[mu_index][1]]
samples[n][mu_index][sample_index,2] = fields[indices[mu_index][2]]
sample_index += 1
# check that the chains have the same mu variables
if not (numpy.sort(samples[0].keys()) == numpy.sort(samples[1].keys())).all():
sys.exit('error: the number of mu variables or their indices differ between the two chains')
# go through all the mu variables
for mu_index in numpy.sort(samples[0].keys()):
# calculate all the pair-wise differences between the samples of the two chains
Delta_theta = numpy.vstack(((numpy.array([samples[0][mu_index][:,0]]).T - samples[1][mu_index][:,0]).flatten(1),(numpy.array([samples[0][mu_index][:,1]]).T - samples[1][mu_index][:,1]).flatten(1)))
# kernel density estimation
density = scipy.stats.kde.gaussian_kde(Delta_theta,bw_method='scott')
density.set_bandwidth(bw_method=density.factor/4.)
# calculate the savage-dickey density ratio
print 'mu[%d]\t%f'%(mu_index,diff_dirichlet_density_at_origin([0.8,0.8,0.8],[0.8,0.8,0.8])/density.evaluate([0,0]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculates Bayes factor between two conditions based on the Stan HMC output chains')
parser.add_argument('-c1','--chain-1',action='store',dest='chain1',type=str,required=True,help='output chain of the first condition')
parser.add_argument('-c2','--chain-2',action='store',dest='chain2',type=str,required=True,help='output chain of the second condition')
parser.add_argument('-v','--version',action='version',version='%(prog)s 0.666')
options = parser.parse_args()
if not os.path.isfile(options.chain1):
sys.exit('error: %s is not a file'%(options.chain1))
if not os.path.isfile(options.chain2):
sys.exit('error: %s is not a file'%(options.chain2))
calculate_bf([options.chain1,options.chain2])
| true |
e9f2fcb61cb0fad2d25349c7d1e68a66971b63b7
|
Python
|
GeforceTesla/ECE661_Computer_Vision_Combined
|
/Hw9/import/Ncc_Op.py
|
UTF-8
| 3,515 | 2.671875 | 3 |
[] |
no_license
|
import cv2
import numpy as np
from Epipole_Op import Epipole_Op
class Ncc_Op(object):
def __init__(self, des1, des2, img1, img2, matrix_F):
self.descriptor1 = des1
self.descriptor2 = des2
self.img1 = img1
self.img2 = img2
self.matrix_F = matrix_F
self.ncc_size = 13
self.threshold = 0.8
self.ncc_calculation()
def get_neighbor(self, x, y, gray_image):
size = self.ncc_size
size_half = int(size/2)
neighbor = np.zeros((size,size))
for m in range(-size_half, (size_half+1)):
for n in range(-size_half, (size_half+1)):
neighbor[n+size_half][m+size_half] = gray_image[y+n][x+m]
return neighbor
def get_ncc_point(self, neighbor1, neighbor2):
point = 0
mean1 = 0
mean2 = 0
ncc_numerator = 0
ncc_denominator1 = 0
ncc_denominator2 = 0
Wncc = self.ncc_size
for i in range(Wncc):
for j in range(Wncc):
mean1 += neighbor1[i][j]
mean2 += neighbor2[i][j]
mean1 = mean1/(Wncc*Wncc)
mean2 = mean2/(Wncc*Wncc)
for i in range(Wncc):
for j in range(Wncc):
ncc_numerator += (neighbor1[i][j]-mean1)*(neighbor2[i][j]-mean2)
ncc_denominator1 += np.square(neighbor1[i][j]-mean1)
ncc_denominator2 += np.square(neighbor2[i][j]-mean2)
ncc_denominator = np.sqrt(ncc_denominator1*ncc_denominator2)
point = ncc_numerator/ncc_denominator
return point
def ncc_calculation(self):
for i in self.descriptor1:
i.neighbor = self.get_neighbor(i.x, i.y, self.img1)
for i in self.descriptor2:
i.neighbor = self.get_neighbor(i.x, i.y, self.img2)
for i in range(len(self.descriptor1)):
point = 0
point_max = -1e10
matched_point = 0
x1 = np.zeros(3)
x2 = np.zeros(3)
row = int(self.descriptor1[i].y_rect)
for j in range(len(self.descriptor2)):
if(int(self.descriptor2[j].y_rect) < row -4 or int(self.descriptor2[j].y_rect) > row + 4):
continue
x2[0] = self.descriptor2[j].x
x2[1] = self.descriptor2[j].y
x2[2] = 1
constraint1 = np.dot(np.dot(x2.T, self.matrix_F), x1)
if constraint1 > 0.055:
continue
point = self.get_ncc_point(self.descriptor1[i].neighbor, self.descriptor2[j].neighbor)
if(point > point_max):
point_max = point
self.descriptor1[i].point = point_max
self.descriptor1[i].match = j
self.descriptor2[j].point = point_max
self.descriptor2[j].match = i
for j in range(i):
if(self.descriptor1[j].match == self.descriptor1[i].match):
if(self.descriptor1[j].point < self.descriptor1[i].point):
self.descriptor1[j].match = -1
self.descriptor1[j].score = -1
else:
self.descriptor1[i].match = -1
self.descriptor1[i].score = -1
break
def get_descriptor1(self):
return self.descriptor1
def get_descriptor2(self):
return self.descriptor2
| true |
d160b3ff6a0bc21d5382c1856630f4aa14848385
|
Python
|
jcyang36/YoutubeDemStats
|
/youtube_stats.py
|
UTF-8
| 1,526 | 2.625 | 3 |
[] |
no_license
|
#We break this out into separate files because of YouTube API Quota Limit
import pandas as pd
import urllib.request
import json
#api key
key = "key=YOURKEYHERE"
#video api and params
youtube_vid_stats = "https://www.googleapis.com/youtube/v3/videos?"
part_stats = "part=statistics"
video_views = []
video_likes = []
video_dislikes = []
video_comments = []
video_list = pd.read_excel('candidate_yt_vids.xlsx')
ids = video_list['Video ID']
for i in ids:
req = youtube_vid_stats+'&'+key+'&'+part_stats+'&id='+i
search_items = json.loads(urllib.request.urlopen(req).read())
print(req)
if search_items['pageInfo']['totalResults'] == 0:
views = 'removed'
likes = 'removed'
dislikes = 'removed'
comments = 'removed'
else:
stats = search_items['items'][0]['statistics']
views = stats['viewCount']
try:
likes = stats['likeCount']
except:
likes = 'disabled'
try:
dislikes = stats['dislikeCount']
except:
dislikes = 'disabled'
try:
comments = stats['commentCount']
except:
comments = 'disabled'
video_views.append(views)
video_likes.append(likes)
video_dislikes.append(dislikes)
video_comments.append(comments)
video_list['views'] = video_views
video_list['likes'] = video_likes
video_list['dislikes'] = video_dislikes
video_list['comments'] = video_comments
video_list.to_excel('candidate_yt_statistics.xlsx')
print('done')
| true |
8b082caef883032806350423486681638e760828
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02409/s115682449.py
|
UTF-8
| 1,207 | 2.6875 | 3 |
[] |
no_license
|
import sys
k1 = [[0 for i in xrange(10)] for j in xrange(3)]
k2 = [[0 for i in xrange(10)] for j in xrange(3)]
k3 = [[0 for i in xrange(10)] for j in xrange(3)]
k4 = [[0 for i in xrange(10)] for j in xrange(3)]
n = input()
for i in xrange(n):
m = map(int,raw_input().split())
if m[0] == 1:
k1[m[1]-1][m[2]-1] = k1[m[1]-1][m[2]-1] + m[3]
elif m[0] == 2:
k2[m[1]-1][m[2]-1] = k2[m[1]-1][m[2]-1] + m[3]
elif m[0] == 3:
k3[m[1]-1][m[2]-1] = k3[m[1]-1][m[2]-1] + m[3]
elif m[0] == 4:
k4[m[1]-1][m[2]-1] = k4[m[1]-1][m[2]-1] + m[3]
for i in xrange(3):
for j in xrange(10):
x = ' ' + str(k1[i][j])
sys.stdout.write(x)
if j == 9:
print ""
print"#"*20
for i in xrange(3):
for j in xrange(10):
y = ' ' + str(k2[i][j])
sys.stdout.write(y)
if j == 9:
print ""
print"#"*20
for i in xrange(3):
for j in xrange(10):
z = ' ' + str(k3[i][j])
sys.stdout.write(z)
if j == 9:
print ""
print"#"*20
for i in xrange(3):
for j in xrange(10):
zz = ' ' + str(k4[i][j])
sys.stdout.write(zz)
if j == 9:
print ""
| true |
a291ecb36ca1578c8511f1df3a37d38a44329229
|
Python
|
Rup-Royofficial/Codeforces_solutions
|
/Gravity_Flip.py
|
UTF-8
| 97 | 3.671875 | 4 |
[] |
no_license
|
a = int(input())
x = list(map(int,input().split()))
x.sort()
for i in x:
print(i,end=" ")
| true |
944b137529a001a634728bacacbb13aeb15c974f
|
Python
|
meethu/LeetCode
|
/solutions/0204.count-primes/count-primes.py
|
UTF-8
| 968 | 3.5 | 4 |
[] |
no_license
|
class Solution:
# def isPrime(self, n):
# for i in range(2, int(n ** 0.5) + 1):
# if n % i == 0:
# return False
# return True
# def countPrimes(self, n: int) -> int:
# if n <= 2: return 0
# cnt = 0
# for i in range(2, n):
# if self.isPrime(i):
# cnt += 1
# return cnt
def countPrimes(self, n: int) -> int:
if n <= 2: return 0
isPrime = [1] * n
cnt = 0
for i in range(2, n):
if isPrime[i]:
# 2x2 2x3 2x4 2x5 ...
# 3x2 3x3 3x4 3x5 ...
# 计算重复,直接 i * i 开始跳过重复部分
j = i * i
while j < n:
isPrime[j] = 0
j += i
return sum(isPrime[2:])
# https://leetcode-cn.com/problems/count-primes/solution/ru-he-gao-xiao-pan-ding-shai-xuan-su-shu-by-labula/
| true |
360f22b4e01448b12533e8c3590111eef8732b6e
|
Python
|
Kitsunekoyama/Area
|
/web/app.py
|
UTF-8
| 16,569 | 2.734375 | 3 |
[] |
no_license
|
"""
Documentation for the web module
File Handling the front-end of AREA's web interface
"""
from flask import Flask, render_template, request, redirect, url_for
import requests
import time
import json
import sys
app = Flask(__name__)
"""
**Global variables**
ID: Server generated UID
data: services and A/R index associated - json
user_data: Array of relations - A/R list
info_action: Informations about a specific Action (service, action, info)
info_reaction: Informations about a specific Reaction (service, reaction, info)
info_username: Information about the username (name of the user)
"""
ID = []
data = []
user_data = []
info_action = []
info_reaction = []
info_username = []
google_redirection = ""
"""
**Google module**
contains:
-google function
-google_callback function
google:
posts the information to the google service at: http://sv:8080/google
google_callback:
posts the user information to the authentication page and redirects to the google authentication service at: http://sv:8080/authGoogle
"""
@app.route('/google', methods=['GET', 'POST'])
def google():
response = requests.post("http://sv:8080/google")
return (redirect(response.text))
@app.route('/google_callback', methods=['GET', 'POST'])
def google_callback():
global relation
r = requests.post("http://sv:8080/authGoogle", json = {'id': ID, 'code': request.args.get('code')})
return (redirect(google_redirection))
"""
**fill_data & index functions**
index:
calls fill_data
fill_data:
fills the data global variable with the services and A/R index associated
"""
@app.route('/fill_data')
def fill_data():
global data
# jsn = []
# error_msg = []
# response = requests.get("https://api.jikan.moe/v3/search/anime?q=rezero")
# if response.status_code == 200:
# jsn = response.json()
# send and reception info to server
r = requests.post("http://sv:8080/service")
# check post and get
if r.status_code == 500:
print("error server", file=sys.stderr)
error_msg = "Error server"
# convert reception info to json
data = r.json()
print("Fill data", file=sys.stderr)
print(data, file=sys.stderr)
@app.route('/', methods=['GET', 'POST'])
def index():
if data == []:
fill_data()
return render_template('index.html', **locals())
"""
**logout function**
logout:
empties the global variables: ID, user_data and info_username
"""
@app.route('/logout', methods=['GET', 'POST'])
def logout():
global ID
global user_data
global info_username
ID = []
user_data = []
info_username = []
return redirect(url_for('index'))
"""
**login function**
login:
checks the login information
"""
@app.route("/login", methods=['POST', 'GET'])
def login(status = 'default'):
error_msg = ""
if data == []:
fill_data()
if (status == 'error'):
error_msg = "Invalid username or password"
print("Invalid username or password", file=sys.stderr)
if (status == 'error_post'):
error_msg = "Bad request method POST"
print("Bad request method POST", file=sys.stderr)
return render_template('login.html', **locals())
"""
**register function**
register:
callows for a user to register
"""
@app.route("/register", methods=['POST', 'GET'])
def register(status = 'default'):
error_msg = ""
if data == []:
fill_data()
if (status == 'error'):
error_msg = "Invalid username or password"
print("Invalid username or password", file=sys.stderr)
if (status == 'error_post'):
error_msg = "Bad request method POST"
print("Bad request method POST", file=sys.stderr)
return render_template('register.html', **locals())
"""
**check_register function**
check_register:
posts to the server the information the user filled in the registration page
"""
@app.route("/check_register", methods=['POST', 'GET'])
def check_register():
global ID
global info_username
print("CHECK_REGISTER", file=sys.stderr)
if request.method == 'POST':
u = request.form['username']
p = request.form['password']
# send and reception info to server
r = requests.post("http://sv:8080/signUp?username=" + u + "&password=" + p)
# check post and get
if r.status_code == 500:
print(r, file=sys.stderr)
register('error')
if r.status_code == 200:
ID = r.text
# convert reception info to json
# r = r.json()
print("GO HUB", file=sys.stderr)
info_username = u
return hub()
return register('error_post')
"""
**check_id function**
check_id:
checks the credentials the user filled in when logging in
"""
@app.route("/hub", methods=['POST', 'GET'])
def check_id():
global ID
global info_username
if request.method == 'POST':
u = request.form['username']
p = request.form['password']
print(u, file=sys.stderr)
print(p, file=sys.stderr)
# send and reception info to server
r = requests.post("http://sv:8080/login?username=" + u + "&password=" + p)
# check post and get
if r.status_code == 200:
ID = r.text
print("SUCESS:", file=sys.stderr)
print(ID, file=sys.stderr)
if r.status_code == 500:
print("ERROR: " + str(r), file=sys.stderr)
login('error')
info_username = u
return hub()
if ID == []:
return login('error_post')
return hub()
"""
**hub function**
hub:
prints out the relations the user chose on the /hub page
"""
@app.route("/hub", methods=['POST', 'GET'])
def hub(status = 'default'):
global user_data
error_msg = ""
if ID == []:
return login('error')
if data == []:
fill_data()
if user_data == []:
json = {"id": ID}
print(json, file=sys.stderr)
r = requests.post("http://sv:8080/hub", json=json)
if r.status_code == 200:
#print("r:", file=sys.stderr)
#print(r, file=sys.stderr)
print(r.json, file=sys.stderr)
user_data = r.json()
print("SUCESS:", file=sys.stderr)
print(user_data, file=sys.stderr)
print(type(user_data), file=sys.stderr)
if r.status_code == 500:
print("ERROR: " + str(r), file=sys.stderr)
login('error')
if status == 'error_delete':
error_msg = "Error delete"
if status == 'error_activation':
error_msg = "Error activation"
return render_template('hub.html', **locals(), user_data=user_data, info_username=info_username)
@app.route('/active/<int:n>', methods=['POST', 'GET'])
def active(n):
global user_data
print("[ACTIVE]", file=sys.stderr)
print(n, file=sys.stderr)
if ID == []:
return login('error')
if user_data == []:
return hub('error_activation')
data_to_send = {"id": ID, "ndx": n}
r = requests.post("http://sv:8080/active", json=data_to_send)
if r.status_code == 200:
user_data = r.json()
print("ACTIVE SUCESS", file=sys.stderr)
if r.status_code == 500:
print("ACTIVE FAILED", file=sys.stderr)
hub('error_active')
print("USER_DATA", file=sys.stderr)
print(user_data, file=sys.stderr)
return hub()
@app.route('/unactive/<int:n>', methods=['POST', 'GET'])
def unactive(n):
global user_data
print("[UNACTIVE]", file=sys.stderr)
print(n, file=sys.stderr)
if ID == []:
return login('error')
if user_data == []:
return hub('error_activation')
data_to_send = {"id": ID, "ndx": n}
r = requests.post("http://sv:8080/unactive", json=data_to_send)
if r.status_code == 200:
user_data = r.json()
print("UNACTIVE SUCESS", file=sys.stderr)
if r.status_code == 500:
print("UNACTIVE FAILED", file=sys.stderr)
hub('error_unactive')
print("USER_DATA", file=sys.stderr)
print(user_data, file=sys.stderr)
return hub()
"""
**delete function**
delete:
deletes the relation
"""
@app.route("/delete/<int:n>", methods=['POST', 'GET'])
def delete(n):
global user_data
if ID == []:
return login('error')
if user_data == []:
return hub('error_delete')
data_to_send = {"id": ID, "ndx": n}
# send and reception info to server
r = requests.post("http://sv:8080/delete", json=data_to_send)
# check post and get
if r.status_code == 200:
user_data = r.json()
print("DELETE SUCESS", file=sys.stderr)
if r.status_code == 500:
print(r, file=sys.stderr)
hub('error_delete')
return hub()
"""
**service function**
service:
displays the available services unless the id is empty
"""
@app.route("/service", methods=['POST', 'GET'])
def service():
if ID == []:
return login('error')
return render_template('service.html', **locals(), data=data)
"""
**action function**
-action:
displays the available actions unless the id is empty
"""
@app.route("/action/<int:n>", methods=['POST', 'GET'])
def action(n):
if ID == []:
return login('error')
print("SELECT ACTION n = " + str(n), file=sys.stderr)
return render_template('action.html', **locals(), data=data)
"""
**config_actions function**
config_actions:
displays the available options an action has
"""
@app.route("/config_action/<int:s>/<int:a>", methods=['POST', 'GET'])
def config_action(s, a):
if ID == []:
return login('error')
print("ACTION selected service = " + str(s) + " a = " + str(a), file=sys.stderr)
print("len " + str(len(data["services"][s]["action"][a]["arg"])), file=sys.stderr)
return render_template('config_action.html', **locals(), data=data)
"""
**reactions function**
reactions:
displays the available reactions corresponding to the service and action pair selected
"""
@app.route('/reaction/<int:s>/<int:a>', methods=['POST', 'GET'])
def reaction(s, a):
global info_action
global google_redirection
if ID == []:
return login('error')
print("REACTION selected service = " + str(s) + " a = " + str(a), file=sys.stderr)
if request.method == 'POST':
info = request.form.getlist('info[]')
print("[INFO]", file=sys.stderr)
print(info, file=sys.stderr)
info_action = { 'service' : s, 'action' : a, 'info' : info }
print("[SUCESS] tab_action", file=sys.stderr)
print(info_action, file=sys.stderr)
for i in range (0, len(data["services"][s]["action"][a]["arg"])):
if (data["services"][s]["action"][a]["arg"][i]["name"] == "google"):
print("GOOGLE", file=sys.stderr)
google_redirection = 'another_service'
return redirect(url_for('google'))
return another_service()
print("[INFO] ERROR !", file=sys.stderr)
return hub()
"""
**another_service function**
another_service:
lets the user add another service to add a reaction to, unless the id is empty
"""
@app.route('/another_service', methods=['POST', 'GET'])
def another_service():
if ID == []:
return login('error')
print("[INFO] INFO ACTION", file=sys.stderr)
print(info_action, file=sys.stderr)
return render_template('another_service.html', **locals(), data=data, info_action=info_action)
"""
**select_reaction and config_reactions functions**
select_reaction:
displays the available reactions unless the id is empty
config_reactions:
displays the available options a reaction has
"""
@app.route('/select_reaction/<int:s>', methods=['POST', 'GET'])
def select_reaction(s):
if ID == []:
return login('error')
print("SELECT REACTION s = " + str(s), file=sys.stderr)
return render_template('select_reaction.html', **locals(), data=data)
@app.route('/config_reaction/<int:s>/<int:r>', methods=['POST', 'GET'])
def config_reaction(s, r):
if ID == []:
return login('error')
return render_template('config_reaction.html', **locals(), data=data)
"""
**create function**
create:
adds a relation once the service, the action, its configuration, the reaction, its corresponding service and configuration are selected
"""
@app.route("/create/<int:s>/<int:r>", methods=['POST', 'GET'])
def create(s, r):
global user_data
global info_action
global info_reaction
global google_redirection
if ID == []:
return login('error')
if request.method == 'POST':
info = request.form.getlist('info[]')
print("[INFO]", file=sys.stderr)
print(info, file=sys.stderr)
info_reaction = { 'service' : s, 'reaction' : r, 'info' : info }
print("[SUCESS] tab_action", file=sys.stderr)
print(info_reaction, file=sys.stderr)
else:
print("[ERROR] POST FAIL", file=sys.stderr)
return login('error')
new_data = data.copy()
print("NEW_DATA", file=sys.stderr)
print(new_data, file=sys.stderr)
print("END NEW DATA", file=sys.stderr)
print(new_data["services"][info_action["service"]]["action"][info_action["action"]]["arg"], file=sys.stderr)
if info_action["info"] != []:
for i in range(0, len(new_data["services"][info_action["service"]]["action"][info_action["action"]]["arg"])):
#print("[CHECK] " + str(new_data["services"][info_action["service"]]["action"][info_action["action"]]["arg"][i]), file=sys.stderr)
if new_data["services"][info_action["service"]]["action"][info_action["action"]]["arg"][i]["type"] != "oauth":
new_data["services"][info_action["service"]]["action"][info_action["action"]]["arg"][i]["value"] = info_action["info"][i]
if info_reaction["info"] != []:
for i in range(0, len(new_data["services"][info_reaction["service"]]["reaction"][info_reaction["reaction"]]["arg"])):
if new_data["services"][info_reaction["service"]]["reaction"][info_reaction["reaction"]]["arg"][i]["type"] != "oauth":
new_data["services"][info_reaction["service"]]["reaction"][info_reaction["reaction"]]["arg"][i]["value"] = info_reaction["info"][i]
info_to_send = {"id": ID, "action": new_data["services"][info_action["service"]]["action"][info_action["action"]], "reaction": new_data["services"][info_reaction["service"]]["reaction"][info_reaction["reaction"]]}
print(info_to_send, file=sys.stderr)
# send and reception info to server
print("[BEFORE SEND INFO SERVER]", file=sys.stderr)
response = requests.post("http://sv:8080/create", json=info_to_send)
print("[AFTER SEND INFO SERVER]", file=sys.stderr)
info_action = []
info_reaction = []
# check post and get
if response.status_code == 200:
# convert reception info to json
user_data = response.json()
print("SUCESS CREATE", file=sys.stderr)
print(user_data, file=sys.stderr)
for i in range (0, len(data["services"][s]["reaction"][r]["arg"])):
if (data["services"][s]["reaction"][r]["arg"][i]["name"] == "google"):
print("GOOGLE", file=sys.stderr)
google_redirection = 'hub'
return redirect(url_for('google'))
elif response.status_code == 500:
print(response, file=sys.stderr)
login('error')
return hub()
"""
**error function**
error:
displays an error page with the 404 error code
"""
@app.errorhandler(404)
def error(error):
return render_template('error.html'), 404
"""
**debug function**
debug:
displays a page with essential debug information such as the data, user_data and id
"""
@app.route('/debug', methods=['GET', 'POST'])
def debug():
print(ID, file=sys.stderr)
print(data, file=sys.stderr)
print(user_data, file=sys.stderr)
if (data != []):
return (data)
if (user_data != []):
return (user_data)
if (ID != []):
return (ID)
return ("debug")
@app.route('/test', methods=['GET', 'POST'])
def test():
url = "https://api.jikan.moe/v3/search/anime?q="
animeID = None
if request.method == 'POST':
animeID = request.form['anime_name']
else:
animeID = "re:zero"
response = requests.get(url + animeID)
if response.status_code == 200:
jsn = response.json()
data = jsn["results"]
return render_template('test.html', **locals())
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
| true |
cf77327e5d9f5b1ea9357a3f80e6038bd1c23929
|
Python
|
DableUTeeF/sift_rep
|
/models.py
|
UTF-8
| 2,763 | 2.5625 | 3 |
[] |
no_license
|
from torch import nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResCift(nn.Module):
def __init__(self, layers):
self.inplanes = 64
super(ResCift, self).__init__()
block = BasicBlock
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.fc = nn.Linear(256 * block.expansion, 128)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = x[0]
x = x.permute(0, 3, 1, 2)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = nn.AdaptiveAvgPool2d((1, 1))(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = x.view(1, -1, 128)
return x
class Cift(nn.Module):
def __init__(self):
super().__init__()
def forward(self, image, kp):
pass
| true |
b2e88fcb1d5b040c6df708277b4db06bd0ce0fcc
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_199/2764.py
|
UTF-8
| 622 | 3.453125 | 3 |
[] |
no_license
|
def check(lista):
return sum(lista) == len(lista)
def convert(string):
return [True if i=='+' else False for i in string]
def invert(lis):
return [not i for i in lis]
def fun(string,n):
lista = convert(string)
count = 0
for i in range(len(lista)-n+1):
if not lista[i]:
lista[i:i+n] = invert(lista[i:i+n])
count += 1
res = 'IMPOSSIBLE'
if check(lista):
res = count
return res
t = int(input())
for i in range(1, t + 1):
n, m = input().split(" ") # read a list of integers, 2 in this case
print("Case #{}: {}".format(i, fun(n, int(m))))
| true |
57ed8830209f2efd772f23bbeeee6cd804d2af82
|
Python
|
MichalKacprzak99/WFiIS-IMN-2020
|
/lab07/src/nav_stokes_numba.py
|
UTF-8
| 5,045 | 2.53125 | 3 |
[] |
no_license
|
from numba import njit
import numpy as np
from chart_generator import map_generator, contour_generator
DELTA = 0.01
p = 1.0
mi = 1.0
N_X = 200
N_Y = 90
i1 = 50
j1 = 55
j2 = j1 + 2
IT_MAX = 20000
@njit
def y(i):
return DELTA * i
@njit
def x(i):
return DELTA * i
@njit
def psi_A(psi, Qwe):
for j in range(j1, N_Y + 1):
psi[0][j] = Qwe / (2 * mi) * (pow(y(j), 3) / 3 - pow(y(j), 2) / 2 * (y(j1) + y(N_Y)) + y(j) * y(
j1) * y(N_Y))
@njit
def psi_C(psi, Qwy, Qwe):
for j in range(0, N_Y + 1):
psi[N_X][j] = Qwy / (2 * mi) * (y(j) * y(j) * y(j) / 3 - y(j) * y(j) / 2 * y(N_Y)) + Qwe * y(j1) * y(
j1) * (-y(j1) + 3 * y(N_Y)) / (12 * mi)
@njit
def psi_B(psi):
for i in range(1, N_X):
psi[i][N_Y] = psi[0][N_Y]
@njit
def psi_D(psi):
for i in range(i1, N_X):
psi[i][0] = psi[0][j1]
@njit
def psi_E(psi):
for j in range(1, j1 + 1):
psi[i1][j] = psi[0][j1]
@njit
def psi_F(psi):
for i in range(1, i1 + 1):
psi[i][j1] = psi[0][j1]
@njit
def modify_psi(psi, Qwe, Qwy):
psi_A(psi, Qwe)
psi_C(psi, Qwy, Qwe)
psi_B(psi)
psi_D(psi)
psi_E(psi)
psi_F(psi)
@njit
def new_psi_i_j(i, j, psi, zeta):
return 0.25 * (psi[i + 1][j] + psi[i - 1][j] + psi[i][j + 1] + psi[i][j - 1] - DELTA * DELTA * zeta[i][j])
@njit
def zeta_A(zeta, Qwe):
for j in range(j1, N_Y + 1):
zeta[0][j] = Qwe / (2 * mi) * (2 * y(j) - y(j1) - y(N_Y))
@njit
def zeta_C(zeta, Qwe):
for j in range(0, N_Y + 1):
zeta[N_X][j] = Qwe / (2 * mi) * (2 * y(j) - y(N_Y))
@njit
def zeta_B(zeta, psi):
for i in range(1, N_X):
zeta[i][N_Y] = 2.0 / (DELTA * DELTA) * (psi[i][N_Y - 1] - psi[i][N_Y])
@njit
def zeta_D(zeta, psi):
for i in range(i1 + 1, N_X):
zeta[i][0] = 2.0 / (DELTA * DELTA) * (psi[i][1] - psi[i][0])
@njit
def zeta_E(zeta, psi):
for j in range(1, j1):
zeta[i1][j] = 2.0 / (DELTA * DELTA) * (psi[i1 + 1][j] - psi[i1][j])
@njit
def zeta_F(zeta, psi):
for i in range(1, i1 + 1):
zeta[i][j1] = 2.0 / (DELTA * DELTA) * (psi[i][j1 + 1] - psi[i][j1])
zeta[i1][j1] = 0.5 * (zeta[i1 - 1][j1] + zeta[i1][j1 - 1])
@njit
def modify_zeta(Qwe, Qwy, psi, zeta):
zeta_A(zeta, Qwe)
zeta_C(zeta, Qwy)
zeta_B(zeta, psi)
zeta_D(zeta, psi)
zeta_E(zeta, psi)
zeta_F(zeta, psi)
@njit
def new_zeta_i_j(omega, i, j, zeta, psi):
return 0.25 * (zeta[i + 1][j] + zeta[i - 1][j] + zeta[i][j + 1] + zeta[i][j - 1]) - omega * p / (16 * mi) * \
((psi[i][j + 1] - psi[i][j - 1]) * (zeta[i + 1][j] - zeta[i - 1][j]) - (psi[i + 1][j] - psi[i - 1][j]) *
(zeta[i][j + 1] - zeta[i][j - 1]))
@njit
def calculate_u_v(psi, u, v):
for i in range(1, N_X):
for j in range(1, N_Y):
if (i > i1 or j > j1):
# if i > i1 or j > j1:
u[i][j] = (psi[i][j + 1] - psi[i][j - 1]) / (2 * DELTA)
v[i][j] = -(psi[i + 1][j] - psi[i - 1][j]) / (2 * DELTA)
@njit
def error(zeta, psi):
result = 0.0
for i in range(1, N_X):
result += psi[i+1][j2] + psi[i-1][j2] + psi[i][j2+1] + psi[i][j2-1] - 4*psi[i][j2] - DELTA*DELTA*zeta[i][j2]
return result
@njit
def solve(psi, zeta, Qwe, Qwy):
errors = []
for it in range(1, IT_MAX + 1):
if it < 2000:
omega = 0.0
else:
omega = 1.0
for i in range(1, N_X):
for j in range(1, N_Y):
if (i <= i1 and j > j1) or (i > i1):
# if i > i1 or j > j1:
psi[i][j] = new_psi_i_j(i, j, psi, zeta)
zeta[i][j] = new_zeta_i_j(omega, i, j, zeta, psi)
modify_zeta(Qwe, Qwy, psi, zeta)
errors.append(error(zeta, psi))
return errors
def nav_stokes_numba(Qwe):
Qwy = Qwe * (pow(y(N_Y), 3) - pow(y(j1), 3) - 3 *
pow(y(N_Y), 2) * y(j1) + 3 * pow(y(j1), 2) *
y(N_Y)) / (pow(y(N_Y), 3))
psi = np.zeros((N_X + 1, N_Y + 1))
zeta = np.zeros((N_X + 1, N_Y + 1))
psi[0:i1, 0:j1] = np.nan
zeta[0:i1, 0:j1] = np.nan
modify_psi(psi, Qwe, Qwy)
# modify_zeta(Qwe, Qwy, psi, zeta)
errors = solve(psi, zeta, Qwe, Qwy)
with open(f'../error_data/errors_{Qwe}.txt', 'w') as f:
for err in errors:
f.write(f"{err}\n")
u = np.zeros((N_X + 1, N_Y + 1))
v = np.zeros((N_X + 1, N_Y + 1))
calculate_u_v(psi, u, v)
# tmp_x, tmp_y = np.mgrid[0:N_X / 100 + DELTA:DELTA, 0:N_Y / 100 + DELTA:DELTA]
tmp_x = np.linspace(0.0, (N_X+1)*DELTA, N_X+1, endpoint=True)
tmp_y = np.linspace(0.0, (N_Y+1)*DELTA, N_Y+1, endpoint=True)
map_generator(tmp_x, tmp_y, np.transpose(u), rf'Q={Qwe}, u(x,y)', f'u_{Qwe}.png')
map_generator(tmp_x, tmp_y, np.transpose(v), rf'Q={Qwe}, v(x,y)', f'v_{Qwe}.png')
contour_generator(tmp_x, tmp_y, np.transpose(zeta), rf'Q={Qwe}, $\zeta(x,y)$', f'zeta_{Qwe}.png')
contour_generator(tmp_x, tmp_y, np.transpose(psi), rf'Q={Qwe}, $\psi(x,y)$', f'psi_{Qwe}.png')
| true |
89d5fa2ae37d569134b6f7f503e8ebac45d310a8
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_155/793.py
|
UTF-8
| 386 | 3.203125 | 3 |
[] |
no_license
|
#!/usr/bin/python
import sys
def deficit(str):
result = 0
preceding = 0
for i in range(len(str)):
result = max(result, i-preceding)
preceding += int(str[i])
return result
if __name__ == "__main__":
sys.stdin.readline()
for i,line in enumerate(sys.stdin.readlines()):
maxShy, levels = line.strip().split()
print "Case #{0}: {1}".format(str(i+1), str(deficit(levels)))
| true |
8e518ccad4a48ea3af61458e9b1ba09eb36c29b1
|
Python
|
MarineGirardey/TP5_tests
|
/linkedlist_tp_unittest.py
|
UTF-8
| 4,915 | 3.875 | 4 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 3 15:54:03 2021
@author: Marine Girardey
"""
class Node:
"""Node class for a linked list
Attributes
----------
param_data : int
link : node.Node
"""
def __init__(self, data):
"""Class constructor
Parameters
----------
param_data : int
the data
Returns
-------
None
Node object
"""
self.data = data
self.next = None
def __repr__(self):
return self.data
class LinkedList:
"""LinkedList class to create a linked list
Attributes
----------
param_data : any
"""
def __init__(self, nodes=None):
"""Class constructor
Parameters
----------
data : any
The value of the given node.
Returns
-------
None
A class instance.
"""
self.head = None
if nodes is not None and len(nodes) != 0:
node = Node(data=nodes.pop(0))
self.head = node
for elem in nodes:
node.next = Node(data=elem)
node = node.next
def get(self, index):
"""
get the index of each node and print the node of a given index
Parameters
----------
index : index of a node
"""
if self.head is None:
raise Exception('Node vide')
else:
return self.leonardo_recurs(index, self.head)
def leonardo_recurs(self, index, node):
"""
recursive function to browse each node until index is not equal to 0
Parameters
----------
index : the wanted index enter from the get function
node : the node corresponding to a specific index
"""
print(index, node)
if node is None:
return node
if index == 0:
return node
else:
return self.leonardo_recurs(index - 1, node.next)
def add_after(self, data, new_node):
"""
insert a new node after the node with the value == data
Parameters
----------
data : searched data
new_node : node to insert
"""
if not self.head:
raise Exception("List is empty")
for node in self:
if node.data == data:
new_node.next = node.next
node.next = new_node
return
raise Exception("Node with data '{}' not found".format(data))
def add_before(self, data, new_node):
"""
insert a new node before the node with the value == data
Parameters
----------
data : searched data
new_node : node to insert
"""
if not self.head:
raise Exception("List is empty")
if self.head.data == data:
return self.add_first(new_node)
prev_node = self.head
for node in self:
if node.data == data:
prev_node.next = new_node
new_node.next = node
return
prev_node = node
raise Exception("Node with data '{}' not found".format(data))
def remove_node(self, data):
"""
delete all node(s) value == data
Parameters
----------
data : searched data to delete
"""
if not self.head:
raise Exception("List is empty")
if self.head.data == data:
self.head = self.head.next
return
previous_node = self.head
for node in self:
if node.data == data:
previous_node.next = node.next
return
previous_node = node
raise Exception("Node with data '{}' not found".format(data))
def add_first(self, node_to_add):
"""
insert a node as the first node of the linkedlist
Parameters
----------
node_to_add : node to insert
"""
node_to_add.next = self.head
self.head = node_to_add
def add_last(self, node_to_add):
"""
insert a node as the last one of the linkedlist
Parameters
----------
node_to_add : node to insert
"""
if self.head == None:
self.head = node_to_add
return
node = self.head
# while node.next is not None:*
while node.next is not None:
node = node.next
node.next = node_to_add
def __repr__(self):
node = self.head
nodes = []
while node is not None:
nodes.append(node.data)
node = node.next
#return "a"
return "{}".format(nodes)
def __iter__(self):
node = self.head
while node is not None:
yield node
node = node.next
| true |
39e80ab8015684d2830e725208ab34d82a717a33
|
Python
|
chloeeekim/TIL
|
/Algorithm/Leetcode/Codes/HappyNumber.py
|
UTF-8
| 1,111 | 3.984375 | 4 |
[] |
no_license
|
"""
202. Happy Number : https://leetcode.com/problems/happy-number/
양의 정수 하나가 주어졌을 때, 해당 숫자가 happy number인지 확인하는 문제
- happy number : 각 자릿수의 제곱을 더한 값을 구하는 방식을 반복했을 때, 1이 나오는 숫자
- happy number가 아니라면 위 방식을 반복했을 때, 1을 포함하지 않는 숫자들의 cycle이 반복된다
Example:
- Input : 19
- Output : true
- 1^2 + 9^2 = 82 / 8^2 + 2^2 = 68 / 6^2 + 8^2 = 100 / 1^2 + 0^2 + 0^0 = 1
Note:
반복되는지를 확인하기 위하여 seen 리스트를 사용
이전에 나왔었던 숫자가 나온다면 happy number가 아님
"""
class Solution:
def isHappy(self, n: int) -> bool:
seen = []
num = n
while True :
nxt = 0
while num != 0 :
nxt += (num % 10) ** 2
num = num // 10
if nxt == 1 :
return True
if nxt in seen :
return False
else :
seen.append(nxt)
num = nxt
return False
| true |
d69a4625529496d6f9e40011510c50659b6fe813
|
Python
|
PhoduCoder/PythonPractice
|
/Flatten_List.py
|
UTF-8
| 201 | 3.0625 | 3 |
[] |
no_license
|
#sec_vec=[[1,2,3],[2,3], 4]
sec_vec=[[1,2,3],5,[2,3], 4]
flat_list=[]
for i in sec_vec:
if(type(i)) is list:
for num in i:
flat_list.append(num)
else:
flat_list.append(i)
print (flat_list)
| true |
0a0e3be59583ac287b1771b68798399fd7672347
|
Python
|
sanggs/IndependentStudy-Fall2019
|
/JacobiSolver.py
|
UTF-8
| 3,433 | 2.59375 | 3 |
[] |
no_license
|
import torch
import numpy as np
class JacobiSolver:
def __init__(self, particles, femObject):
self.particles = torch.from_numpy(particles)
self.femObject = femObject
self.dampingFactor = 2.0/3.0
# def writeToFile(self, i):
# if i == 0:
# f = open("points.csv", "w")
# else:
# f = open("points.csv", "a")
# npArray = self.lattice.numpy()
# np.savetxt(f, npArray[None], delimiter=',')
# f.close()
# def multiplyWithA(self):
# #print(self.gridWidth)
# #print(self.h)
# self.lattice.resize_((1,1,self.gridWidth, self.gridWidth))
# centralWeight = 4.0/(self.h*self.h)
# edgeWeight = -1.0/(self.h*self.h)
# mask = torch.tensor([[0,edgeWeight,0],[edgeWeight,centralWeight,edgeWeight],[0,edgeWeight,0]], dtype=torch.float32)
# mask.resize_((1,1,3,3))
# output = torch.nn.functional.conv2d(self.lattice, mask, bias=None, stride=1, padding=0)
# b = torch.nn.functional.pad(output, (1,1,1,1), mode='constant', value=(-self.b/(self.h * self.h)))
# self.lattice = self.lattice.resize_((self.gridWidth*self.gridWidth))
# return b.view((self.gridWidth*self.gridWidth))
# def getResidual(self, rhs):
# q = self.multiplyWithA()
# #print("q from get Residual")
# #print(q.view((self.gridWidth, self.gridWidth)))
# residue = rhs.sub(q)
# #print("residue from getResidual")
# self.projectToZero(residue)
# #print(residue.view((self.gridWidth, self.gridWidth)))
# return residue
# def projectToZero(self, v):
# v = v.resize_((self.gridWidth, self.gridWidth))
# v[0,:] = 0
# v[self.gridWidth-1,:] = 0
# v[:,0] = 0
# v[:,self.gridWidth-1] = 0
# v = v.resize_((self.gridWidth*self.gridWidth))
def dampedJacobi(self, b, q, dInverse, r, num_iteration, residue):
self.residue = torch.tensor(residue, dtype = torch.float32)
self.maxIterations = num_iteration
q = self.multiplyWithA()
#print("printing q")
#print(q.view(self.gridWidth, self.gridWidth))
r = b.sub(q)
#print("Residue after 1st step")
self.projectToZero(r)
print(r.view((self.gridWidth, self.gridWidth)))
#print(b.shape)
convergence_norm = 0
self.writeToFile(0)
for i in range(0, self.maxIterations):
convergence_norm = torch.sqrt(torch.max(r*r))
print("printing convergence norm "+str(convergence_norm))
#print(convergence_norm)
#if convergence_norm < self.residue:
#print("Convergence Norm less than threshold")
#print(i)
#return
if i > self.maxIterations:
#print("printing convergence norm")
#print(convergence_norm)
print("Ideally should not have come here")
break
r = dInverse * r * self.dampingFactor
self.projectToZero(r)
self.lattice = self.lattice + r
#print("printing lattice after "+ str(i+1))
#print(self.lattice)
q = self.multiplyWithA()
r = b.sub(q)
self.projectToZero(r)
self.writeToFile(i+1)
#print("Ended after "+str(i)+ " iterations")
print(convergence_norm)
return
| true |
b8c0ef7b6a4ea0a7ba5b34b09072134d56c5e796
|
Python
|
william-letton/ASSIST
|
/test.py
|
UTF-8
| 1,637 | 3.21875 | 3 |
[
"Unlicense"
] |
permissive
|
##Perform the SVM classification on the data, using the C and gamma parameters
##chosen from the optimisation, and definining the training and testing data
## according the IDs output by the CreateTrainingData function.
def TrainClassifier(dataset,bestParams,IDgroups):
print("do nothing")
# Import train_test_split function.
from sklearn.model_selection import train_test_split
##Create X (dimensions array) for training
X=dataset.Data
##Create y (class array) for training
y=dataset.Target
##Create X (dimensions array) for testing
X=dataset.Data
##Create y (class array) for testing
# Import svm model
from sklearn import svm
# Create a svm Classifier. Linear Kernel
clf = svm.SVC(kernel='rbf',C=bestParams['C'],gamma=bestParams['gamma'])
#Train the model using the training sets
clf.fit(X,y)
#Predict the response for test dataset
y_pred = clf.predict(X)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy: how often is the classifier correct?
print("Accuracy: ",metrics.accuracy_score(y,y_pred))
# Model Precision: what percentage of positively labelled tuples are actually positive?
print("Precision: ",metrics.precision_score(y,y_pred))
#Model Recall: What percentage of positive tuples are labelled as positive?
print("Recall: ",metrics.recall_score(y,y_pred))
##Convert the y_pred numpy array to a list.
y_pred_list=list()
for item in y_pred:
y_pred_list.append(item)
print(y_pred_list)
| true |
0f2f6613c578a56eb555c6f872639d6eaa38e959
|
Python
|
anishsujanani/Beanstalk-Reporter
|
/beanstalk_reporter.py
|
UTF-8
| 10,272 | 2.515625 | 3 |
[] |
no_license
|
'''
Beanstalk-Reporter
-------------------
Reqs: boto3, python3.x
Usage: python3 beanstalk_report.py --profile <aws_cli_profile_name> --env <beanstalk_env_name>
Output: JSON to stdout
Author: Anish Sujanani
Date: November, 2021
'''
import boto3
import sys
import argparse
import json
session = None
def get_resource_info(environment_name):
eb = session.client('elasticbeanstalk')
try:
env_resources = eb.describe_environment_resources(EnvironmentName=environment_name)
except Exception as _:
print('Something went wrong when trying to get Beanstalk Environment information.')
sys.exit(0)
resources = {}
if len(env_resources['EnvironmentResources']['LoadBalancers']) > 0:
resources['LoadBalancers'] = get_load_balancer_info(env_resources['EnvironmentResources']['LoadBalancers'])
if len(env_resources['EnvironmentResources']['AutoScalingGroups']) > 0:
resources['AutoScalingGroups'] = get_auto_scaling_group_info(env_resources['EnvironmentResources']['AutoScalingGroups'])
if len(env_resources['EnvironmentResources']['Instances']) > 0:
resources['Instances'] = get_ec2_instance_info(env_resources['EnvironmentResources']['Instances'])
return resources
def get_load_balancer_info(env_resources_lb_chunk):
boto3_lb = session.client('elbv2')
all_lb_info = {}
for lb in env_resources_lb_chunk:
sec_groups = []
try:
lb_info = boto3_lb.describe_load_balancers(LoadBalancerArns=[lb['Name']])
except Exception as _:
print('Something went wrong when trying to get LoadBalancer information.')
sys.exit(0)
try:
for i in lb_info['LoadBalancers']:
lb_json = {}
lb_json['DNSName'] = i['DNSName']
lb_json['LoadBalancerName'] = i['LoadBalancerName']
lb_json['AvailabilityZones'] = []
for _ in i['AvailabilityZones']:
lb_json['AvailabilityZones'].append({'ZoneName': _['ZoneName'], 'SubnetId': _['SubnetId']})
sec_groups.extend(i['SecurityGroups'])
lb_json['SecurityGroups'] = sec_groups
lb_json['VpcId'] = i['VpcId']
all_lb_info['Details'] = lb_json
except Exception as _:
print('Something went wrong when trying to parse LoadBalancer information.')
sys.exit(0)
all_lb_info['Attributes'] = {}
try:
lb_attr = boto3_lb.describe_load_balancer_attributes(LoadBalancerArn=lb['Name'])
except Exception as _:
print('Something went wrong when trying to get LoadBalancer attributes.')
sys.exit(0)
for i in lb_attr['Attributes']:
all_lb_info['Attributes'][i['Key']] = i['Value']
try:
lb_lstn = boto3_lb.describe_listeners(LoadBalancerArn=lb['Name'])
except Exception as _:
print('Something went wrong when trying to get LoadBalancer listeners.')
sys.exit(0)
try:
all_lb_info['Listeners'] = []
for i in lb_lstn['Listeners']:
listener = {}
listener['Protocol'] = i['Protocol']
listener['Port'] = i['Port']
listener['TargetGroupStickiness'] = i['DefaultActions'][0]['ForwardConfig']['TargetGroupStickinessConfig']['Enabled']
all_lb_info['Listeners'].append(listener)
except Exception as _:
print('Something went wrong when trying to parse LoadBalancer listener information.')
sys.exit(0)
all_lb_info['SecurityGroups'] = []
if len(sec_groups) > 0:
for sg in sec_groups:
all_lb_info['SecurityGroups'].append(get_security_group_info(sg))
return all_lb_info
def get_auto_scaling_group_info(env_resources_asg_chunk):
boto3_asg = session.client('autoscaling')
all_asg_info = {}
for asg in env_resources_asg_chunk:
try:
asg_info = boto3_asg.describe_auto_scaling_groups(AutoScalingGroupNames=[asg['Name']])
except Exception as _:
print('Something went wrong when trying to get Autoscaling Group information.')
sys.exit(0)
try:
for i in asg_info['AutoScalingGroups']:
all_asg_info['AutoScalingGroupName'] = i['AutoScalingGroupName']
all_asg_info['AvailabilityZones'] = i['AvailabilityZones']
all_asg_info['DesiredCapacity'] = i['DesiredCapacity']
all_asg_info['MaxSize'] = i['MaxSize']
all_asg_info['MinSize'] = i['MinSize']
all_asg_info['NewInstancesProtectedFromScaleIn'] = i['NewInstancesProtectedFromScaleIn']
all_asg_info['Instances'] = []
for inst in i['Instances']:
all_asg_info['Instances'].append({
'InstanceId': inst['InstanceId'],
'AvailabilityZone': inst['AvailabilityZone'],
'HealthStatus': inst['HealthStatus']
})
except Exception as _:
print('Something went wrong when trying to parse Autoscaling Group information.')
sys.exit(0)
return all_asg_info
def get_ec2_instance_info(env_resources_ec2_instance_chunk):
boto3_ec2 = session.client('ec2')
all_ec2_info = []
for inst in env_resources_ec2_instance_chunk:
try:
inst_info = boto3_ec2.describe_instances(InstanceIds=[inst['Id']])
except Exception as _:
print('Something went wrong when trying to get EC2 instance information.')
sys.exit(0)
try:
for i in inst_info['Reservations'][0]['Instances']:
instance = {}
instance['InstanceId'] = i['InstanceId']
instance['InstanceType'] = i['InstanceType']
instance['ImageId'] = i['ImageId']
instance['PlatformDetails'] = i['PlatformDetails']
instance['AvailabilityZone'] = i['Placement']['AvailabilityZone']
instance['InstanceRole'] = i['IamInstanceProfile']['Arn']
instance['Monitoring'] = i['Monitoring']['State']
instance['NetworkInterfaces'] = []
for ni in i['NetworkInterfaces']:
netint_json = {}
netint_json['PrivateIPAddress'] = ni['PrivateIpAddress']
netint_json['PrivateDnsName'] = ni['PrivateDnsName']
netint_json['PublicIpAddress'] = ni['Association']['PublicIp']
netint_json['PublicDnsName'] = ni['Association']['PublicDnsName']
netint_json['MacAddress'] = ni['MacAddress']
netint_json['IPv6Address'] = ni['Ipv6Addresses']
instance['NetworkInterfaces'].append(netint_json)
instance['SecurityGroups'] = []
for sg in i['SecurityGroups']:
instance['SecurityGroups'].append(get_security_group_info(sg['GroupId']))
all_ec2_info.append(instance)
except Exception as _:
print('Something went wrong when trying to parse EC2 instance information.')
sys.exit(0)
return all_ec2_info
def get_security_group_info(security_group_id):
boto3_ec2 = session.client('ec2')
try:
sg_info = boto3_ec2.describe_security_groups(GroupIds=[security_group_id])
except Exception as _:
print('Something went wrong when trying to get security groups.')
sys.exit(0)
try:
sg = {}
for i in sg_info['SecurityGroups']:
sg['GroupId'] = i['GroupId']
sg['Description'] = i['Description']
sg['InboundRules'] = []
sg['OutboundRules'] = []
for rule in i['IpPermissions']:
rule_json = {}
if rule['IpProtocol'] != '-1':
rule_json['IpProtocol'] = rule['IpProtocol']
rule_json['ToPort'] = rule['ToPort']
else:
rule_json['IpProtocol'] = 'ALL TRAFFIC'
rule_json['ToPort'] = 'ALL PORTS'
if len(rule['IpRanges']) != 0:
rule_json['From'] = rule['IpRanges'][0]['CidrIp']
else:
rule_json['From'] = rule['UserIdGroupPairs'][0]['GroupId']
sg['InboundRules'].append(rule_json)
for rule in i['IpPermissionsEgress']:
rule_json = {}
if rule['IpProtocol'] != '-1':
rule_json['IpProtocol'] = rule['IpProtocol']
rule_json['ToPort'] = rule['ToPort']
else:
rule_json['IpProtocol'] = 'ALL TRAFFIC'
rule_json['ToPort'] = 'ALL PORTS'
if len(rule['IpRanges']) != 0:
rule_json['To'] = rule['IpRanges'][0]['CidrIp']
else:
rule_json['To'] = rule['UserIdGroupPairs'][0]['GroupId']
sg['OutboundRules'].append(rule_json)
except Exception as _:
print('Something went wrong when trying to parse security groups.')
sys.exit(0)
return sg
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Beanstalk Reporter')
argparser.add_argument('--profile', '-p', metavar='<profile_name>', type=str, help='AWS CLI Profile Name', required=True)
argparser.add_argument('--envname', '-e', metavar='<env_name>', type=str, help='Beanstalk Environment Name', required=True)
args = argparser.parse_args()
try:
session = boto3.session.Session(profile_name = args.profile)
except Exception as _:
print('Something went wrong when trying to create a boto3 session. Check your profile.')
sys.exit(0)
resources = get_resource_info(environment_name=args.envname)
print(json.dumps(resources, indent=4, sort_keys=False))
sys.exit(0)
| true |