blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
566e40984a3403a78906bda753981b785e5ab9fa
|
Python
|
mbhs/mbit
|
/archive/2021s/solutions/AppleOrchard.py
|
UTF-8
| 312 | 2.765625 | 3 |
[] |
no_license
|
from sys import stdin, stdout
n, m, x, y, a, b, c, d = (int(x) for x in stdin.readline().split())
def ans(n, m):
global a, b, x, y
return max(0, a-n)*x + max(0, b-m)*y
best = 10**10
for trade in range(-20000, 20000):
best = min(best, ans(n-c*trade, m+d*trade))
stdout.write(str(best) + "\n")
| true |
06a1cdfc3d07fe2bda067a95ed494717ec8ebb57
|
Python
|
JesusGuadiana/Nansus
|
/functions/pushOperand.py
|
UTF-8
| 1,354 | 2.5625 | 3 |
[] |
no_license
|
import sys
def push_operand_to_stack(current_program, identifier, index = 0):
#if current_program.current_dim == 0:
operand = current_program.func_directory.get_function_variable(current_program.scope_l, identifier)
if operand is None:
operand = current_program.func_directory.get_function_variable(current_program.scope_g, identifier)
if operand is not None:
current_program.operand_stack.append(variable['address'])
current_program.type_stack.append(variable['type'])
else:
print("Variable " + identifier + " is not declared in this scope.")
sys.exit()
else:
current_program.operand_stack.append(variable['address'])
current_program.type_stack.append(variable['type'])
#else:
# operand = current_program.func_directory.get_vector_or_matrix_index(current_program.scope_l, identifier, index)
# if operand is None:
# operand = current_program.func_directory.get_vector_or_matrix_index(current_program.scope_g, identifier, index)
# if operand is not None:
# current_program.operand_stack.append(variable['address'])
# current_program.type_stack.append(variable['type'])
# else:
# print("Variable " + identifier + " is not declared in this scope.")
# sys.exit()
#
# else:
# current_program.operand_stack.append(variable['address'])
# current_program.type_stack.append(variable['type'])
| true |
474941dae06f79bda21d44e4ec6c11e79820631b
|
Python
|
ivoryli/myproject
|
/class/phase1/project_month01/game2048_01.py
|
UTF-8
| 3,975 | 3.625 | 4 |
[] |
no_license
|
'''
2048核心算法
'''
#-------------------------------------------------------------------------------------------------
#练习1:定义函数,将零元素移动到末尾
#20 20 --> 2200
#02 20 --> 2200
#myself ok
# def move_zero_right(L):
# for x in range(len(L) - 1):
# for y in range(x + 1,len(L)):
# if L[x] == 0:
# #若列表有不为0的元素,令第x个元素不为零
# L[x],L[y] = L[y],L[x]
'''
方法1:teacher
def zero_to_end(list_target):
#将传入的列表中非零元素,拷贝到新列表中
new_list = [x for x in list_target if x != 0]
new_list += list_target.count(0) * [0]
list_target[:] = new_list
'''
#teacher
def zero_to_end(list_target):
#删除零元素,!!!!!从后往前删
for i in range(len(list_target) - 1,-1,-1):
if list_target[i] == 0:
del list_target[i]
list_target.append(0)
#-------------------------------------------------------------------------------------------------
#练习2: 定义合并一列函数
# myself !uncertainty
# def merge(list_target):
# zero_to_end(list_target)
# for x in range(len(list_target) - 1):
# if L[x] == L[x + 1]:
# L[x] += L[x + 1]
# L[x + 1] = 0
# zero_to_end(list_target)
#teacher
def merge(list_target):
zero_to_end(list_target)
for i in range(len(list_target) - 1):
if list_target[i] == list_target[i + 1]:
list_target[i] += list_target[i + 1]
list_target[i + 1] = 0
zero_to_end(list_target)
#-------------------------------------------------------------------------------------------------
#练习3:将二维列表,以表格的格式显示在屏幕中
list01 = [#8,4,2,2 8,4,4,0
[2,2,4,8], #4 4 8 0 0 4 4 8
[2,4,4,8],
[2,2,4,8],
[2,8,4,8]
]
list02 = [
[2,0,0,2],
[2,2,0,0],
[2,0,4,4],
[4,0,0,2]
]
#myself
# def print_map(list_target):
# for list_item in list_target:
# for item in list_item:
# print(item,end = " ")
# print()
#teacher
def print_map(map):
for r in range(len(map)):
for c in range(len(map[r])):
print(map[r][c],end = " ")
print()
# print_map(list01)
#-------------------------------------------------------------------------------------------------
#练习4:
'''
上下左右合并
'''
#myself
# def left_list(list_target):
# for item in list_target:
# merge(item)
#teacher
# 左移
def move_left(map):
for r in range(len(map)):
merge(map[r])
# teacher
# 右移
def move_right(map):
for r in range(len(map)):
list_merge = map[r][::-1]
merge(list_merge)
map[r] = list_merge[::-1]
# --------------------------------------#
#myself
# 上下移动需要的换位
# def change_map(map):
# L = []
# for r in range(len(map)):
# Lr = []
# #0-4
# for c in range(len(map[r])):
# Lr.append(map[c][r])
# L.append(Lr)
# return L
#
# # 上移
# def move_top(map):
# L = change_map(map)
# move_left(L)
# L = change_map(L)
# return L
#
# # 下移
# def move_bottom(map):
# L = change_map(map)
# move_right(L)
# L = change_map(L)
# return L
# --------------------------------------#
#teacher
def move_up(map):
for c in range(4):
list_merge = []
for r in range(4):
list_merge.append(map[r][c])
merge(list_merge)
for r in range(4):
map[r][c] = list_merge[r]
def move_down(map):
for c in range(4):
list_merge = []
for r in range(3,-1,-1):
list_merge.append(map[r][c])
merge(list_merge)
for r in range(3,-1,-1):
map[r][c] = list_merge[3 - r]
#-------------------------------------------------------------------------------------------------
#测试代码
move_down(list02)
print_map(list02)
| true |
c151bde9351f1a5c958ef0c2803b6950b19e5766
|
Python
|
takaratruong/Intelligent-Tutor-System-for-Algebraic-problems
|
/additional code used for the project/BankGenerator.py
|
UTF-8
| 822 | 2.890625 | 3 |
[] |
no_license
|
import sys
import FeatureExtractor
import csv
from csv import reader
KEY = "key"
VALUES = "values"
def import_feature_to_bank():
dict_map = FeatureExtractor.bins
w = csv.writer(open("data/problemBank.csv", "w"))
for key, val in dict_map.items():
w.writerow([key, val])
def update_feature_metrics():
# open file in read mode
csv.field_size_limit(sys.maxsize)
with open('data/problemBank.csv', 'r') as read_obj:
# pass the file object to reader() to get the reader object
csv_reader = reader(read_obj)
# Iterate over each row in the csv using reader object
for row in csv_reader:
# row variable is a list that represents a row in csv
print(row[0])
def main():
update_feature_metrics()
if __name__ == '__main__':
main()
| true |
aa1c3af5bf6c659bb351f77aed492d51d5dbdb05
|
Python
|
Clint-Portfolio/Graph-coloring
|
/Code/generate_random_valid_graph.py
|
UTF-8
| 1,426 | 2.953125 | 3 |
[] |
no_license
|
import sys
from helpers import generate_random_country, provinces, country_to_number, cost
if __name__ == '__main__':
countries, neighbors = provinces(sys.argv[1])
neighborlist = country_to_number(countries, neighbors)
full_transmitter_list = ["A", "B", "C", "D", "E", "F", "G"]
transmitter_cost_list = [[12, 26, 27, 30, 37, 39, 41],
[19, 20, 21, 23, 36, 37, 38],
[16, 17, 31, 33, 36, 56, 57],
[3, 34, 36, 39, 41, 43, 58]]
iterations = 100000
length_transmitter_list = len(full_transmitter_list)
for length in range(3, length_transmitter_list, 1):
print(full_transmitter_list[length])
writefile = open(f"random_valid_resultsA_{full_transmitter_list[length]}_{str(iterations)}.csv", "w")
for i in range(iterations):
new_country = "".join(generate_random_country(neighborlist,
full_transmitter_list))
writefile.write(f"{new_country};")
for transmitter_cost in transmitter_cost_list:
writestring = str(cost(new_country, transmitter_cost,
full_transmitter_list))
if transmitter_cost[0] == 3:
writefile.write(f"{writestring}\n")
else:
writefile.write(f"{writestring};")
| true |
9b0db4121f601c48fd81e86c97aa1c4c641d0f51
|
Python
|
krist7599555/2110101
|
/03_P.py
|
UTF-8
| 2,584 | 3.109375 | 3 |
[] |
no_license
|
# 03_P1
from operator import mul
from functools import reduce
def fac(n): return reduce(mul, range(1, n + 1))
print(fac(int(input())))
# 03_P2
from operator import mul
from functools import reduce
def fac(n): return reduce(mul, range(1, n + 1))
n, k, cm = map(int, input().split())
print (fac(n) // fac(n-k) // fac(1 if cm == 1 else k))
# 03_P3
print(sum(i for i in range(int(input())) if not i % 3 or not i % 5))
# 03_P4
n = int(input())
print(sum(float(input()) for _ in range(n)) / n if n else 'No Data')
# 03_P5
ls = []
while True:
vl = float(input())
if vl != -1: ls.append(vl)
else: break;
print(sum(ls) / len(ls) if ls else 'No Data')
# 03_P6
def grade(sc):
lm_ = [50, 55, 60, 65, 70, 75, 80, 101]
gd_ = ['F', 'D', 'D+', 'C', 'C+', 'B', 'B+', 'A']
return next((gd for lm, gd in zip(lm_, gd_) if sc < lm), 'Error')
while True:
sc = int(input())
if sc != -1: print(grade(sc))
else: break
# 03_P7
n, fnd = map(int, input().split())
print([int(input()) for _ in range(n)].count(fnd))
# 03_P8
import sys
vl = int(input())
for i in range(vl // 2, 1, -1):
for j in range(min(i, vl - i) - 1, 1, -1):
k = vl - i - j
if k > j: break
if i ** 2 == j ** 2 + k ** 2:
print(i)
sys.exit(0)
# 03_P9
from itertools import count
a, b, c, x, d = map(float, input().split())
f = lambda x: a * pow(x, 2) + b * x + c
f_= lambda x: a * x * 2 + b
for i in count(1):
nw_x = x - f(x) / f_(x)
if abs(nw_x - x) <= d:
print(i)
break
else: x = nw_x
# 03_P10
n = int(input())
l = [i for i in range(2, n) if not n % i]
if l: print(*l[::-1])
else: print('Prime Number')
# 03_P11
vl = int(input())
if vl < 0: print('input unavailable')
elif vl < 2: print('none')
else: print(*[i for i in range(2, vl + 1)
if all(i % j for j in range(2, i))])
# 03_P12
vl = int(input())
ls = []
for i in range(2, vl + 1):
if not vl % i:
ls.append(i)
while not vl % i:
vl //= i
print(*ls)
# 03_P13
r, c = map(int, input().split())
for i in range(1, r + 1):
print(*(i * j for j in range(1, c + 1)))
# 03_P14
from operator import le, ge
n, cm = map(int, input().split())
func = [None, le, ge, lambda i, j: i + j == n - 1][cm]
print(*["({},{})".format(i + 1, j + 1)
for i in range(n)
for j in range(n)
if func(i, j)], sep = '\n')
# 03_P15
n = int(input()); x = n // 2 - 1; z = n // 2 - n; y = (n & ~1) - 1
f = lambda i, j: '#' if i+j>=x and i-j<y and i-j>=z else '.'
for i in range(n + y):
ls = [f(i, j) for j in range(n + 1)]
print(*ls[:-1], *ls[::-1], sep = '')
# 03_P16
x = int(input())
y = int(input())
for i in range(1, y + 1):
print(x, i, x * i)
| true |
afeaa7e6babdf4dcc2c1ab7cb42c190ae9da8d3a
|
Python
|
reasonsolo/zchess
|
/chess/state.py
|
UTF-8
| 1,711 | 3.1875 | 3 |
[
"MIT"
] |
permissive
|
# ref http://mcts.ai/code/python.html
from chess.board import Board
import itertools
class InvalidActionError(Exception):
pass
class Action:
def __init__(self, piece, to):
self.piece = piece
self.piece_code = str(self.piece)
self._from = (self.piece.x, self.piece.y)
self._to = to
def __str__(self):
return "%s@%s>%s" % (self.piece_code, str(self._from), str(self._to))
def __hash__(self):
return hash(self.__str__())
def __eq__(self, other):
return self.piece_code == other.piece_code and self._from == other._from and self._to == other._to
class GameState:
def __init__(self, players, init=True, board=None):
if init:
self.board = Board()
else:
self.board = board
self.end = False
self.winner = None
self.current_player = players[0]
self.players = itertools.cycle(players)
self.all_actions = [Action(piece, move) for piece, move in self.board.all_moves()]
self.history = []
def take_action(self, action):
action_str = str(action)
if action_str not in self.all_actions:
raise InvalidActionError
self.history.append(action)
self.board.move(action.piece, *action.to)
self.update()
def update(self):
self.winner = self.board.winner()
self.end = True if winner is not None or self.board.draw() else False
self.current_player = next(self.players)
self.all_actions = [Action(piece, move) for piece, move in self.board.all_moves()]
def repeat_times(self):
# TODO
pass
def __repr__(self):
return self.board.state()
| true |
bec433367f0e1ed5b12a1c0420f0a5a7dc263a3e
|
Python
|
JosephLipinski/LeetCode-Problem-Solutions
|
/Median.py
|
UTF-8
| 1,612 | 3.03125 | 3 |
[] |
no_license
|
class Median:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
import numpy as np
m = nums1
n = nums2
len_m = len(m)
len_n = len(n)
total_len = len_m + len_n
if total_len == 2:
if m != [] and n != []:
return (m[0] + n[0]) / 2
elif m != []:
return (m[0] + m[1]) / 2
else:
return (n[0] + n[1]) / 2
elif total_len == 1:
return m[0] if m != [] else n[0]
else:
try:
m_0 = m[0]
except IndexError:
m_0 = np.NINF
try:
n_0 = n[0]
except IndexError:
n_0 = np.NINF
try:
m_i = m[-1]
except:
m_i = np.inf
try:
n_j = n[-1]
except:
n_j = np.inf
if m_0 >= n_0:
if n_0 != np.NINF:
n = n[1:]
else:
m = m[1:]
else:
if m_0 != np.NINF:
m = m[1:]
else:
n = n[1:]
if m_i >= n_j:
if m_i != np.inf:
m = m[:-1]
else:
n = n[:-1]
else:
if n_j != np.inf:
n = n[:-1]
else:
m = m[:-1]
return self.findMedianSortedArrays(m, n)
| true |
d7bdceb2e45518dba303ad4cd0d182a31a91af4e
|
Python
|
Tvo-Po/algorithms
|
/algotest/test_insort.py
|
UTF-8
| 804 | 2.71875 | 3 |
[] |
no_license
|
from .test_sort import BaseSortTestCases
from algo.insort import insert_sort
class TestInsertSort(BaseSortTestCases.TestSort):
sorting_function = {'foo': insert_sort}
def test_amount_of_operations(self):
insert_sort_amount_operations = (self.STRING_ARRAY_AMOUNT_ELEMENTS ** 2 +
self.STRING_ARRAY_AMOUNT_ELEMENTS) // 2 - 1
sorted_array = sorted(self.STRING_ARRAY)
result_array, amount_of_operations = self.sorting_function['foo'](self.STRING_ARRAY,
is_number_of_operations_needed=True)
self.assertEqual(result_array, sorted_array)
self.assertEqual(amount_of_operations, insert_sort_amount_operations)
if __name__ == '__main__':
pass
| true |
0846e39a59b6e33eb41d8c4369bf8f2edb22192d
|
Python
|
orange-eng/Leetcode
|
/easy/1523_Count_Odd_Numbers.py
|
UTF-8
| 532 | 3.359375 | 3 |
[] |
no_license
|
# 递归法
# 会超时
# class Solution:
# def countOdds(self, low: int, high: int) -> int:
# if low == high:
# if low % 2 == 1:
# return 1
# else:
# return 0
# mid = (low + high)//2
# return self.countOdds(low,mid) + self.countOdds(mid + 1,high)
class Solution:
def countOdds(self, low: int, high: int) -> int:
return (high+1)//2 - low//2
example = Solution()
low = 3
high = 7
output = example.countOdds(low,high)
print(output)
| true |
e8b01c7038016f74cc3258e97f125921edb85f56
|
Python
|
Hazeliii/DeepLearningClassWork
|
/MYWORK/work2/aaa.py
|
UTF-8
| 9,317 | 2.71875 | 3 |
[] |
no_license
|
# coding=utf-8
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import plot_model
from tensorflow.keras import Sequential,Model
from tensorflow.keras.layers import Dense, Flatten, Conv2D, concatenate,Input,add
import numpy as np
import os
num_classes=19
#由于需要本地读取MNIST,不用MNISTdatasets
#mnist = tf.keras.datasets.mnist
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
def PreparePlusData():
#本地读取MNIST流程
path = './mnist.npz'
f = np.load(path)
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()
h=x_train.shape[1]//2
w=x_train.shape[2]//2
#为便于评测,图像尺寸缩小为原来的一半
x_train = np.expand_dims(x_train, axis=-1)
x_train = tf.image.resize(x_train, [h,w]).numpy() # if we want to resize
x_test = np.expand_dims(x_test, axis=-1)
x_test = tf.image.resize(x_test, [h,w]).numpy() # if we want to resize
# 图像归一化,易于网络学习
x_train, x_test = x_train / 255.0, x_test / 255.0
# 注意,即使同一个数字也有很多不同图像,
# 需要产生的是尽可能多的数字图像样例对的组合,
# 下面会采用两个随机列输入配对的方式去产生
# 因此,为扩充更多的图像对加法实例,先扩充两个随机输入列的长度
len_train=len(x_train)
len_test=len(x_test)
len_ext_train=len_train*3
len_ext_test=len_test*3
#由于本实训采用线性全连接网络,需要将图片拉伸为一维向量
x_train=x_train.reshape((len_train,-1))
x_test=x_test.reshape((len_test,-1))
#由于MNIST是按数字顺序排列,故将其打乱,通过随机交叉样本产生更多随机的图片数字加法组合
left_train_choose = np.random.choice(len_train, len_ext_train, replace=True)
right_train_choose = np.random.choice(len_train, len_ext_train, replace=True)
left_test_choose = np.random.choice(len_test, len_ext_test, replace=True)
right_test_choose = np.random.choice(len_test, len_ext_test, replace=True)
x_train_l=x_train[left_train_choose]
x_train_r=x_train[right_train_choose]
x_test_l=x_test[left_test_choose]
x_test_r=x_test[right_test_choose]
#!!!!!!注意,本题标签不采用one-hot编码
y_train=y_train[left_train_choose]+y_train[right_train_choose]
y_test=y_test[left_test_choose]+y_test[right_test_choose]
#WORK1: --------------BEGIN-------------------
#请补充完整训练集和测试集的产生方法:
'''
features_dataset = tf.data.Dataset.from_tensor_slices((x_train_l, x_train_r))
labels_dataset = tf.data.Dataset.from_tensor_slices(y_train)
train_datasets = tf.data.Dataset.zip((features_dataset, labels_dataset)).batch(64)
#test_datasets = tf.data.Dataset.from_tensor_slices(({"input_1": x_test_l,"input_2": x_test_r},{"dense_2": y_test})).batch(64)
features_dataset1 = tf.data.Dataset.from_tensor_slices((x_test_l, x_test_r))
labels_dataset1 = tf.data.Dataset.from_tensor_slices(y_test)
test_datasets = tf.data.Dataset.zip((features_dataset1, labels_dataset1)).batch(64)
'''
train_datasets = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices((x_train_l, x_train_r)),
tf.data.Dataset.from_tensor_slices(y_train))).batch(64)
test_datasets = tf.data.Dataset.zip(
(tf.data.Dataset.from_tensor_slices((x_test_l, x_test_r)), tf.data.Dataset.from_tensor_slices(y_test))).batch(64)
#WORK1: ---------------END--------------------
return train_datasets, test_datasets
#WORK2: --------------BEGIN-------------------
#请补充完整自定义层实现 BiasPlusLayer([input1,input2])=input1+input2+bias:
class BiasPlusLayer(keras.layers.Layer):
#2.1如果变量不随输入维度的改变而改变,可以在初始化__init__中用add_weight添加bias变量
#否则,变量的添加在build方法中根据input_shape实现
#请在__init__中添加变量self.bias,实现BiasPlusLayer([input1,input2])=input1+input2+bias 功能
#注意,bias维度需和需要相加的input1,input2一致
def __init__(self, num_outputs, **kwargs):
super(BiasPlusLayer, self).__init__(**kwargs)
self.num_outputs = num_outputs
self.bias = self.add_weight(shape=(num_outputs,), initializer="zeros", trainable=True)
def build(self, input_shape):
super(BiasPlusLayer, self).build(input_shape) # Be sure to call this somewhere!
#2.2在调用中实现input1+input2+bias
def call(self, input):
return input[0]+input[1]+self.bias
#WORK2: ---------------END--------------------
#WORK3: --------------BEGIN-------------------
#请参考所给网络结构图,补充完整共享参数孪生网络siamese_net的实现:
#注意,我们用比较图片的方法来评测网络结构是否正确
#所以网络结构中的参数维度、名称等需和参考图中一致,否则不能通过评测
def BuildModel():
#3.1 shared_base是共享参数的骨干网,用sequential方式搭建
#其中包含两层64个节点的Dense全连接层,激活用relu
#注意!!!如果要让plot_model打印出嵌套的Sequential内部结构,
#需要给出输入的维度,例如,在第一层Desse中加入参数:input_shape=(xxx,)
#(注意一维向量大小这里一定写为"xxx,")
shared_base = tf.keras.Sequential([
Input(shape=(196,), name="dense_input"),
Dense(64, activation="relu", name="dense"),
Dense(64, activation="relu", name="dense_1")
],name='seq1')
#3.2 x1,x2 分别表示一对图片中两个图像输入,请补充完整输入维度信息
x1=Input(shape=(196))
x2=Input(shape=(196))
#3.3 b1,b2 表示应用共享骨干网的两个处理通道
b1=shared_base(x1)
b2=shared_base(x2)
#3.4 b1,b2 的处理结果放入我们的自定义层做b1+b2+bias处理
#注意,对于多个输入通道,输入用列表表示
#请补充BiasPlusLayer参数及输入
b=BiasPlusLayer(64,name='BiasPlusLayer')([b1,b2])
#3.5 加法实际用分类实现,用softmax激活,这之前有个全连接,请补充相关参数和输入
output=Dense(19,activation='softmax', name='dense_2')(b)
#3.6 最后构建 Keras.Model,请补充完整输入输出
siamese_net=Model(inputs=[x1,x2],
outputs=output)
#打印网络结构用于测试,请不要修改地址和参数
# plot_model(siamese_net, to_file='./test_figure/step1/siamese_net.png', show_shapes=True,expand_nested=True)
return siamese_net
#WORK3: ---------------END--------------------
#WORK4: --------------BEGIN-------------------
#实例化网络并进行训练
def test_fun():
siamese_net=BuildModel()
#4.1 配置模型,我们的加法用分类实现,故选择分类loss (注意根据标签y的形式,选择合适的loss),及评测metric,
#其他训练参数不用变
siamese_net.compile(loss=tf.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.001),metrics=['accuracy'])
#在给定训练参数下,一般12个迭代就可以完成训练任务(val_acc>0.7),用时200多秒
epochs=12
train_datasets, test_datasets=PreparePlusData()
#4.2 配置训练参数,开始训练,
history = siamese_net.fit(train_datasets, epochs=epochs, validation_data=test_datasets,verbose=2)
#返回要素都是评测所需,请不要更改
return siamese_net, history, test_datasets
#WORK4: ---------------END--------------------
#以下为测试代码
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
siamese_net, history, test_datasets=test_fun()
'''
#1.用图片对比的方法测试网络结构是否正确
test_img = mpimg.imread('./test_figure/step1/siamese_net.png')
answer_img= mpimg.imread('./answer/step1/answer.png')
assert((answer_img == test_img).all())
print('Network pass!')
'''
#2.测试BiasPlusLayer层功能
l=siamese_net.get_layer('BiasPlusLayer')
bias=l.get_weights()
r=l([1.,2.]).numpy()
r_np=1.+2.+bias[0]
assert((r == r_np).all())
print('BiasPlusLayer pass!')
#3.打印样例结果
iter_test=iter(test_datasets)
b_test=next(iter_test)
r_test=siamese_net.predict(b_test[0])
fig, ax = plt.subplots(nrows=2, ncols=5, sharex='all', sharey='all')
ax = ax.flatten()
for i in range(5):
img = b_test[0][0][i].numpy().reshape(14, 14)
ax[i].set_title('Label: '+str(b_test[1][i].numpy()))
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
for i in range(5):
img = b_test[0][1][i].numpy().reshape(14, 14)
ax[i+5].set_title('Prediction: '+str(np.argmax(r_test[i])))
ax[i+5].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.savefig("./PredictionExample.png")
print('Result pass!')
#4.测试网络训练是否达标
if history.history['val_accuracy'][-1] > 0.7:
print("Success!")
| true |
e4d1b524314fa36ccc70799a513cc9b2c69dd544
|
Python
|
WengTzu/LeetCode
|
/Algorithm/29_Divide_Two_Integers/29_fast.py
|
UTF-8
| 1,216 | 3.40625 | 3 |
[] |
no_license
|
class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
quotient = 0
sign = 1
if divisor < 0:
divisor = -divisor
sign = -sign
if dividend < 0:
dividend = -dividend
sign = -sign
print(sign, dividend, divisor)
while dividend >= divisor:
divisor_tmp = divisor
j = 1
dividend -= divisor
quotient += j
print(quotient , dividend, divisor)
while dividend >= divisor_tmp + divisor_tmp:
j += j
divisor_tmp += divisor_tmp
dividend -= divisor_tmp
quotient += j
print(" ",quotient , dividend, divisor_tmp)
print(quotient)
result = sign * quotient
if result > 2**31-1:
return 2**31-1
elif result < -2**31:
return -2**31
else:
return result
if __name__ == '__main__':
a = Solution()
sol = a.divide(-100, 3)
print("solution")
print(sol)
| true |
77ac6bcf6af297270b35f39c190b506f1a80e28f
|
Python
|
crackkillz/pokemonBatch
|
/assets/sprites/image_processorBACK.py
|
UTF-8
| 1,357 | 3.296875 | 3 |
[] |
no_license
|
'''
Description: Converts Gen I pokemon sprites to text for pokemonBatch
Author: Soda Adlmayer
Date: 2017.02.26
'''
from PIL import Image
#set filepath
'''
print ("POKEMON NAME")
poke = input(":")
print ("BACK SPRITE OR FRONT SPRITE (B/F)")
x = input(":")
if x == 'B' or 'b':
end = '_backSprite'
elif x == 'F' or 'f':
end == '_frontSprite'
name = poke + end +".png"
'''
filename = r"C:\Users\Rudi\Documents\SODA\BATCH\pokemonBatch\data\other\sprites\c.png"
#open image
im = Image.open(filename)
width, height = im.size
#remove comment fro back sprite
#resize image to half originl (as one square is four pixels)
size = int(height/2), int(width/2)
im = im.resize(size)
width, height = im.size
#set variables
n = 1
list1 = []
list2 = []
#loop rows
while n <= height:
#empty lists
del list1[:]
del list2[:]
#loop columns
for i in range (width):
xy = (i, n)
px = im.getpixel(xy)
#append pixel value to array
list1.append(px)
#choose text value based on pixel value
if list1[i] == 255:
list2.append(' ')
if list1[i] == 170:
list2.append('°')
if list1[i] == 85:
list2.append('±')
if list1[i] == 0:
list2.append('²')
#write to text file
f = open("BULBASAUR_backSprite.txt", 'a')
print(*list2, sep='', file=f)
#progres n
n += 1
| true |
d097baaa2970334a6eb86925e736e82f442d2249
|
Python
|
adityamagarde/TTH
|
/BaggageFitment/baggageFitmentIndex.py
|
UTF-8
| 1,315 | 3 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 14 19:17:43 2018
@author: ADITYA
"""
#BAGGAGE FITMENT INDEX:
import cv2
import serial
import numpy as np
#as soon as the bag crosses IR
arduinoData = serial.Serial('com29', 9600) #Here com29 is the port and 9600 is the baud rate
while(1 == 1):
myData = (arduinoData.readline().strip())
detectString = myData.decode('utf-8')
if(detectString == 'Motion Detected'):
capFront = cv2.VideoCapture(0)
capSide = cv2.VideoCapture(1)
_, imageFront = capFront.read()
_, imageSide = capSide.read()
cv2.imwrite('FrontView.png', imageFront)
cv2.imwrite('SideView.png', imageSide)
del(capFront)
del(capSide)
#Since we know the distance between camera and the bagagge(It will be premeasured),
#we can decide the scale i.e. for example if 5px = 1cm, then we can calculate the length,
#breadth and height of the bag
# We then use SSD(Single shot multibox detection) algorithm in order to draw bounding box around the baggages.
# We obtain the boundaries and then find the length and breadth of the boundaries and use our relation (5px = 1cm, say)
# in order to obtain the length, breadth and height of the baggage.
| true |
7de8c5676c23e27450e2e586e4964677a57b1da5
|
Python
|
ozericyer/class2-module-assigment-week05
|
/battleship/WEEK5-Q6(calculator using try except).py
|
UTF-8
| 1,562 | 4.46875 | 4 |
[] |
no_license
|
#print out the options you have
print("Welcome to calculator")
i=1
while i==1: #We set while loop for ask choices again.
print("1)Addition 2)Subtraction 3)Multiplication 4)Division 5)Quit calculator")
choice = input("choose your option: ") #print out the options you have
try: #We use try-except for ZeroDivisionError and ValueError in while loop
if choice=='1':
add1=int(input("first number:")) #We write all condition:Addition,Subtraction,Multiplication,Division,
add2=int(input("second number:")) #Quit calculator conditions in try.
print(add1,"+",add2,"=",add1+add2)
elif choice=='2':
sub1=int(input("first number:"))
sub2=int(input("second number:"))
print(sub1,"-",sub2,"=",sub1-sub2)
elif choice=='3':
mul1=int(input("first number:"))
mul2=int(input("second number:"))
print(mul1,"x",mul2,"=",mul1*mul2)
elif choice=='4':
div1=int(input("first number:"))
div2=int(input("second number:"))
print(div1,"/",div2,"=",div1/div2)
elif choice == '5':
i=0
print("Thank you for using calculator")
else:
print("Please enter 1,2,3,4,5 numbers.It is not valid input")
except ZeroDivisionError: # If there is ZeroDivisionError or ValueError, we write exception
print("Cannot divide by zero!You should be careful!!!!!")
except ValueError:
print("Please enter numbers.It is not number")
| true |
bd9e7795cfb6c119b9267e9bbf436a76681dcb61
|
Python
|
dair-iitd/TourismQA
|
/src/custom/process/Processor2.py
|
UTF-8
| 2,196 | 2.984375 | 3 |
[
"Apache-2.0"
] |
permissive
|
# https://arxiv.org/pdf/1909.03527.pdf
# Extracting entities for post
import nltk
from fuzzywuzzy import fuzz
from typing import Dict, List
from collections import defaultdict
class Processor:
def __init__(self, cities: List[str], city_entities: Dict[str, Dict[str, dict]], neighborhood_words: List[str]) -> None:
self.cities = cities
self.city_entities = city_entities
self.neighborhood_words = neighborhood_words
def isNotNeighborhood(self, x, y):
b1 = all("%s %s" % (x,z) not in y for z in ["road", "s"])
b2 = all("%s %s" % (z,x) not in y for z in ["in the", "head up", "head up the", "not"])
b3 = all(("%s %s" % (z, x) not in y) and ("%s the %s" % (z, x) not in y) and ("%s %s" % (x, z) not in y) for z in self.neighborhood_words)
return b1 and b2 and b3
def getEntitiesForPost(self, post: Dict[str, dict]) -> List[Dict[str, dict]]:
entity_counts = defaultdict(int)
city = self.cities.index(post["city"])
entities = self.city_entities[str(city)]
for answer in post["answers"]:
try:
chunk = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(answer["body"])))
for node in chunk:
x = ""
if(type(node) == nltk.Tree):
x = "".join([x[0] for x in node.leaves()])
elif(node[1][:2] == "NN"):
x = node[0]
if(x == ""):
continue
for entity_id, entity_item in entities.items():
if(fuzz.ratio(x, entity_item["name"]) > 95 and self.isNotNeighborhood(x.lower(), answer["body"].lower())):
entity_counts[entity_id] += 1
for entity_id, entity_item in entities.items():
if((len(entity_item["name"]) > 6) and (" " + entity_item["name"].lower() in answer["body"].lower()) and self.isNotNeighborhood(x.lower(), answer["body"].lower())):
entity_counts[entity_id] += 1
except:
pass
post_entities = defaultdict(dict)
for entity_id, count in entity_counts.items():
post_entities[entity_id] = entities[entity_id]
post_entities[entity_id]["count"] = count
return post_entities
def __call__(self, post: Dict[str, dict]) -> None:
post_entities = self.getEntitiesForPost(post)
post["entities"] = post_entities
if(len(post["entities"]) == 0):
raise Exception("No entities found")
| true |
283d8417a79527663a3a0764272deb275b1e98bb
|
Python
|
predsci/CHMAP
|
/chmap/data/corrections/degradation/dev/05_aia_timedepend_standalone_example.py
|
UTF-8
| 3,990 | 3.234375 | 3 |
[
"Apache-2.0"
] |
permissive
|
"""
Script to load in individual AIA 193 FITS files specified by the COSPAR
ISWAT team and perform our LBC transformation and EZseg detection directly
on the file.
** RUN THIS SCRIPT USING THE CHD INTERPRETER IN PYCHARM!
"""
import numpy as np
import json
import scipy.interpolate
import astropy.time
# ---------------------------------------------------------------------
# Functions For Computing a time-dependent correction
# ---------------------------------------------------------------------
def process_aia_timedepend_json(json_file):
"""
Read the raw JSON file of my time-depend struct that I generated with IDL.
Convert it to the proper data types
"""
with open(json_file, 'r') as json_data:
json_dict = json.load(json_data)
timedepend_dict = {}
# get the time-dependent factors as a dict with 1D arrays indexed
# by the integer wavelength specifier of the filter (converting from 2D array in the JSON)
factor_dict = {}
f2d = np.array(json_dict['FACTOR'])
for i, wave in enumerate(json_dict['WAVES']):
factor_dict[wave] = f2d[:,i]
timedepend_dict['factor'] = factor_dict
# get the dates as strings
timedepend_dict['dates'] = np.array(json_dict['DATES'], dtype=str)
# get the script that made this file and version
timedepend_dict['version'] = json_dict['VERSION']
timedepend_dict['idl_script'] = json_dict['SCRIPTNAME']
# get the times as an array of astropy.Time objects for interpolation
timedepend_dict['times'] = astropy.time.Time(timedepend_dict['dates'])
return timedepend_dict
def get_aia_timedepend_factor(timedepend_dict, datetime, wave):
"""
Get the time-dependent scaling factor for an AIA filter for
a given time and filter specifier. The idea is to account
for degridation of the detector/counts in time.
Parameters
----------
timedepend_dict: special dictionary returned by process_aia_timedepend_json
datetime: a datetime object for a given time of interest.
wave: an integer specifying the AIA filter (e.g. 193).
Returns
-------
factor: The scaling factor from 0 to 1. (1 is perfect, 0 is degraded).
"""
# convert to the astropy Time object
time = astropy.time.Time(datetime)
# get the values for interpolation
x = timedepend_dict['times'].mjd
y = timedepend_dict['factor'][wave]
# get the interpolator
interpolator = scipy.interpolate.interp1d(x, y)
factor = interpolator(time.mjd)
# now take the max because this gives an unshaped array...
factor = np.max(factor)
return factor
# ---------------------------------------------------------------------
# Script Starts here
# ---------------------------------------------------------------------
if __name__ == "__main__":
# JSON file with the AIA time-dependent corrections
aia_timedepend_file = 'SSW_AIA_timedepend_v10.json'
# read the time-dependent json file, turn it into a dictionary
timedepend_dict = process_aia_timedepend_json(aia_timedepend_file)
# print the keys of this dict
print(f'\n### Keys in the AIA timedependent correction dictionary: ')
for key in timedepend_dict.keys():
print(f' key: {key:16s} type: {type(timedepend_dict[key])}')
# now sample it at a few times using our custom function for interpolation (get_aia_timedepend_factor)
dates = ['2014-04-13T02:00:05.435Z', '2019-04-13T02:00:05.435Z']
for date in dates:
# astropy.time is a million times better than python's datetime for defining a time
time_now = astropy.time.Time(date)
print(f'\n### Factors for {str(time_now)}')
for wave in [94,131,171,193,211,335]:
# note time input to get_aia_timedepend_factor is a datetime for compatibility w/ our database/pandas
factor = get_aia_timedepend_factor(timedepend_dict, time_now.datetime, wave)
print(f' wavelength: {wave:3d}, factor: {factor:7.5f}')
| true |
aa95c4ee531eeb8ec3f7cebb7c587b390000b39f
|
Python
|
karan2808/Python-Data-Structures-and-Algorithms
|
/Arrays/PartitionEqualSubsetSum.py
|
UTF-8
| 1,316 | 3.578125 | 4 |
[
"MIT"
] |
permissive
|
class Solution:
def canPartition(self, nums):
sz = len(nums)
if sz == 1:
return False
# find the total sum
sum_ = 0
for i in range(sz):
sum_ += nums[i]
# if the sum is not divisible by 2 return false
if (sum_ % 2) != 0:
return False
# make a memoization array,
memo = [[-1 for i in range(sum_ // 2 + 1)] for i in range(sz + 1)]
def subSetSum(pos, currentSum):
# we found half partition
if currentSum == 0:
return True
# if we exceed number of elements or if current sum goes negative, we cant partition
elif pos >= sz or currentSum < 0:
return False
# if value in memo, return
if memo[pos][currentSum] > -1:
return memo[pos][currentSum]
# either include current number or dont
memo[pos][currentSum] = subSetSum(pos + 1, currentSum - nums[pos]) or subSetSum(pos + 1, currentSum)
return memo[pos][currentSum]
return subSetSum(0, sum_//2)
def main():
sol = Solution()
nums = [1, 5, 11, 5]
print("Can partition 1, 5, 11, 5? " + str(sol.canPartition(nums)))
if __name__ == "__main__":
main()
| true |
03c22d111e37687a3025c04fb7765b10e8612b61
|
Python
|
bigdata202005/PythonProject
|
/Selenium/test2.py
|
UTF-8
| 455 | 2.796875 | 3 |
[] |
no_license
|
import os
import time
import cv2
# pip install opencv-python
# 다운받을 이미지 url
url = "https://dispatch.cdnser.be/cms-content/uploads/2020/04/09/a26f4b7b-9769-49dd-aed3-b7067fbc5a8c.jpg"
# time check
# start = time.time()
# curl 요청
os.system("curl " + url + " > test.png")
# 이미지 다운로드 시간 체크
# print(time.time() - start)
# 저장 된 이미지 확인
a = cv2.imread('test.jpg')
cv2.imshow('test', a)
cv2.waitKey()
| true |
3525b72918a5f83e5f4cee57be21d62467700e00
|
Python
|
DrakeMistBorn/Asynchronous-Python-Client-Server-Chat
|
/root/client_v2.py
|
UTF-8
| 4,160 | 3.34375 | 3 |
[] |
no_license
|
import asyncio
import time
def close():
"""
Function used to close the connection between the client and the server.
"""
print('[!] Closing connection')
time.sleep(1)
print('[!] Exiting')
time.sleep(1)
print("------------- Connection Closed -------------\n")
def commands():
"""
Function used to print all the commands available.
"""
print("[*] Commands:\n\n")
print("[ register ]\n\t< Register a new user to the server using the <username> ")
print("\tand <password> provided. If a user is already registered with the")
print("\tprovided <username>, the request is to be denied with a proper message highlighting ")
print("\tthe error for the user. A new personal folder ")
print("\tnamed <username> should be created on the server. >")
print("\n[ login ]\n\t< Log in the user conforming with <username> onto the server if the ")
print("\t<password> provided matches the password used while registering.")
print("\tIf the <password> does not match or if the <username> does not exist, an error ")
print("\tmessage should be returned to the request for the client to present")
print("\tto the user. >")
print("\n[ create_folder ]\n\t< Create a new folder with the specified <name> in the current ")
print("\tworking directory for the user issuing the request. If a")
print("\tfolder with the given name already exists, the request is to be denied with a ")
print("\tproper message highlighting the error for the user. >")
print("\n[ write_file ]\n\t< Write the data in <input> to the end of the file <name> in ")
print("\tthe current working directory for the user issuing the request,")
print("\tstarting on a new line. If no file exists with the given <name>, a new file is to ")
print("\tbe created in the current working directory for the user. >")
print("\n[ read_file ]\n\t< Read data from the file <name> in the current working directory ")
print("\tfor the user issuing the request and return the first")
print("\thundred characters in it. Each subsequent call by the same client is to return the ")
print("\tnext hundred characters in the file, up until all characters")
print("\tare read. If a file with the specified <name> does not exist in the current ")
print("\tworking directory for the user, the request is to be denied with a")
print("\tproper message highlighting the error for the user. >")
print("\n[ change_folder ]\n\t< Move the current working directory for the current user to ")
print("\tthe specified folder residing in the current folder.")
print("\tIf the <name> does not point to a folder in the current working directory, the ")
print("\trequest is to be denied with a proper message highlighting")
print("\tthe error for the user. >")
print("\n[ list ]\n\t< Print all files and folders in the current working directory for the ")
print("\tuser issuing the request. This command is expected to give")
print("\tinformation about the name, size, date and time of creation, in an easy-to-read ")
print("\tmanner. Shall not print information regarding content in ")
print("\tsub-directories. >")
print("\n[ id ]\n\t< Show the current user >")
async def tcp_echo_client():
"""
Main Client function to establish the connection with the Server
"""
print('\n[SYSTEM] Client side: type < commands > to show all available commands.\n')
reader, writer = await asyncio.open_connection('127.0.0.1', 8088)
# Loop for sending and receiving messages
while True:
message = input('[$] > ')
# Message to the server
writer.write(message.encode())
if message == "commands":
commands()
continue
elif message == 'exit':
break
# Message from the server
data = await reader.read(2048)
print(f'{data.decode()}')
# Closes the connection.
close()
time.sleep(1)
writer.close()
asyncio.run(tcp_echo_client())
| true |
bc5fe5a787d1060aa6afe5a44e41ada38356027e
|
Python
|
steffejr/ExperimentalStimuli
|
/PartialTrialDIR/Scripts/PsychoPyTask/FileSelectClass.py
|
UTF-8
| 849 | 2.6875 | 3 |
[] |
no_license
|
from PySide import QtGui
# This is used to select the file(s) of interest
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
#self.initUI()
def initUI(self):
self.btn = QtGui.QPushButton('Dialog', self)
self.btn.move(20, 20)
self.btn.clicked.connect(self.showDialog)
self.le = QtGui.QLineEdit(self)
self.le.move(130, 22)
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('Input dialog')
self.show()
def showDialog(self):
self.fileName = QtGui.QFileDialog.getOpenFileNames(self, 'Dialog Title', '/Users/jason/Dropbox/SteffenerColumbia/Scripts', selectedFilter='*.csv')
if self.fileName:
print self.fileName
return self.fileName
| true |
364bd4f2871a0735cb09e7b656429b313c2079a7
|
Python
|
jzsiggy/python-server-client
|
/test_request.py
|
UTF-8
| 540 | 2.796875 | 3 |
[] |
no_license
|
import requests
import random
import time
import requests
import json
import sys
def randomize():
bool = random.choice([True, False])
return bool
while True:
# time.sleep(0.1)
bool = randomize()
for i in range(10):
bool = str(bool)
try:
payload = {'cam0': bool}
r = requests.post('http://127.0.0.1:8080/cam', data=payload)
except:
sys.exit(1)
print(r.url)
# print(r.text)
print(bool)
time.sleep(0.5)
| true |
2cbdfb1664ba177a2dba497a11e8c6cc20ae046e
|
Python
|
Sindhu983/Dictionary
|
/saral7.py
|
UTF-8
| 283 | 2.96875 | 3 |
[] |
no_license
|
dic={
"first":"1",
"second": "2",
"third": "1",
"four": "5",
"five":"5",
"six":"9",
"seven":"7"
}
result={}
for key,value in dic.items():
if value not in result.values():
result[key]=value
print(result)
| true |
a77c3d9f0b9817a64d51ac4cdd643003783a7ceb
|
Python
|
iitzex/tsedraw
|
/crawl.py
|
UTF-8
| 6,562 | 2.796875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
import csv
import time
import logging
import requests
import argparse
from lxml import html
from datetime import datetime, timedelta
from os import mkdir
from os.path import isdir
class Crawler():
def __init__(self, prefix="data"):
""" Make directory if not exist when initialize """
if not isdir(prefix):
mkdir(prefix)
self.prefix = prefix
def _clean_row(self, row):
""" Clean comma and spaces """
for index, content in enumerate(row):
row[index] = content.replace(',', '')
return row
def _record(self, stock_id, row):
""" Save row to csv file """
f = open('{}/{}.csv'.format(self.prefix, stock_id), 'a')
import os
s = os.stat('{}/{}.csv'.format(self.prefix, stock_id))
if s.st_size == 0:
f.write('date,amount,volume,open,high,low,close,diff,number\n')
cw = csv.writer(f, lineterminator='\n')
cw.writerow(row)
f.close()
def _get_tse_data(self, date_str):
payload = {
'download': '',
'qdate': date_str,
# 'selectType': 'ALL'
'selectType': 'ALLBUT0999'
}
url = 'http://www.twse.com.tw/ch/trading/exchange/MI_INDEX/MI_INDEX.php'
# Get html page and parse as tree
page = requests.post(url, data=payload)
if not page.ok:
logging.error("Can not get TSE data at {}".format(date_str))
return
# Parse page
tree = html.fromstring(page.text)
for tr in tree.xpath('//table[2]/tbody/tr'):
tds = tr.xpath('td/text()')
# self.get_stocklist(tds)
sign = tr.xpath('td/font/text()')
sign = '-' if len(sign) == 1 and sign[0] == u'-' else ''
# print(self.year.__str__() + self.month.__str__())
date_str = '{0}-{1:02d}-{2:02d}'.format(self.year, self.month, self.day)
row = self._clean_row([
date_str, # 日期
tds[2][:-4], # 成交股數
tds[4], # 成交金額
tds[5], # 開盤價
tds[6], # 最高價
tds[7], # 最低價
tds[8], # 收盤價
sign + tds[9], # 漲跌價差
tds[3], # 成交筆數
])
self._record(tds[0].strip(), row)
def _get_otc_data(self, date_str):
ttime = str(int(time.time()*100))
url = 'http://www.tpex.org.tw/web/stock/aftertrading/daily_close_quotes/stk_quote_result.php?l=zh-tw&d={}&_={}'.format(date_str, ttime)
page = requests.get(url)
if not page.ok:
logging.error("Can not get OTC data at {}".format(date_str))
return
result = page.json()
if result['reportDate'] != date_str:
logging.error("Get error date OTC data at {}".format(date_str))
return
for table in [result['mmData'], result['aaData']]:
for tr in table:
date_str = '{0}-{1:02d}-{2:02d}'.format(self.year, self.month, self.day)
row = self._clean_row([
date_str,
tr[8][:-4], # 成交股數
tr[9], # 成交金額
tr[4], # 開盤價
tr[5], # 最高價
tr[6], # 最低價
tr[2], # 收盤價
tr[3], # 漲跌價差
tr[10] # 成交筆數
])
self._record(tr[0], row)
def get_data(self, year, month, day):
self.year = year
self.month = month
self.day = day
date_str = '{0}/{1:02d}/{2:02d}'.format(year - 1911, month, day)
print('Crawling {}'.format(date_str))
self._get_tse_data(date_str)
self._get_otc_data(date_str)
def main():
parser = argparse.ArgumentParser(description='Crawl data at assigned day')
parser.add_argument('day', type=int, nargs='*', help='assigned day (format: YYYY MM DD), default is today')
parser.add_argument('-b', '--back', action='store_true', help='crawl back from assigned day until 2004/2/11')
parser.add_argument('-c', '--check', action='store_true', help='crawl the assigned day')
args = parser.parse_args()
print(args)
crawler = Crawler()
end = datetime.today()
try:
if args.back:
begin = datetime(args.day[0], args.day[1], args.day[2])
elif args.check:
begin = datetime(args.day[0], args.day[1], args.day[2])
end = begin
else:
begin = datetime.today()
except IndexError:
parser.error('Date should be assigned with (YYYY MM DD) or none')
return
print('BEGIN: ' + begin.__str__())
print('END : ' + end.__str__())
if args.back or args.check: # otc first day is 2007/04/20 # tse first day is 2004/02/11
max_error = 5
error = 0
while error < max_error and end >= begin:
try:
crawler.get_data(begin.year, begin.month, begin.day)
error = 0
except OSError:
date_str = begin.strftime('%Y/%m/%d')
# logging.error('Crawl raise error {}'.format(date_str))
logging.error('Crawl raise error {} {} {}'.format(begin.year, begin.month, begin.day))
error += 1
continue
finally:
begin += timedelta(1)
else:
crawler.get_data(end.year, end.month, end.day)
def auto_crawl():
with open('data/0050.csv', 'r') as f:
last_line = f.readlines()[-1]
last_day = last_line.split(',')[0]
begin = datetime.strptime(last_day, '%Y-%m-%d')
begin += timedelta(1)
end = datetime.today()
crawler = Crawler()
max_error = 5
error = 0
print('BEGIN: ' + begin.__str__())
print('END : ' + end.__str__())
while error < max_error and end >= begin:
try:
crawler.get_data(begin.year, begin.month, begin.day)
error = 0
except OSError:
date_str = begin.strftime('%Y/%m/%d')
# logging.error('Crawl raise error {}'.format(date_str))
logging.error('Crawl raise error {} {} {}'.format(begin.year, begin.month, begin.day))
error += 1
continue
finally:
begin += timedelta(1)
if __name__ == '__main__':
# main()
auto_crawl()
| true |
7f4aa05464dc39b98ce020d7c5e424adc0c9fa9d
|
Python
|
charliephsu/bkbdrs_first_load
|
/ref_image.py
|
UTF-8
| 2,767 | 2.671875 | 3 |
[] |
no_license
|
import csv
import os
from shutil import copyfile
import re
infile = 'saved_output/out_with_id.csv'
img_src_dir = 'orig_data/attachments/bbdir_entry'
out_image_dir = 'saved_output/images/'
image_load_file = 'saved_output/image_load.tsv'
image_prefix = 'directory/'
def read_id_from_table():
data_old_id = {}
with open(infile) as csv_infile:
reader = csv.DictReader(csv_infile, delimiter='\t')
for row in reader:
data_old_id[row['old_id']] = row
return data_old_id
def read_image_files(image_path,id_lookup):
images_by_path = {}
for item in os.listdir(image_path):
id_dir_fullpath = os.path.join(image_path,item)
for diritem in os.listdir(id_dir_fullpath):
if os.path.isdir(id_dir_fullpath):
# img_fullpath is the src path for the image
image_filename = diritem
img_fullpath = os.path.join(id_dir_fullpath,image_filename)
#print("id {} -- file: {}".format(item,img_fullpath))
images_by_path[img_fullpath] = {}
images_by_path[img_fullpath]['id'] = item
images_by_path[img_fullpath]['filename'] = image_filename
image_list = []
with open(image_load_file,'w') as imageout:
writer = csv.writer(imageout, delimiter='\t')
for path,value in images_by_path.items():
# path is the fullpath
filename = value['filename']
old_id = value['id']
table_data = id_lookup.get(old_id,{})
private_id = table_data.get('private_id',None)
new_id = table_data.get('new_id',None)
#print("{} -- {} :: {}".format(path,filename,new_id))
# create new filename
# split extension
fname,fext = os.path.splitext(filename)
# reaplce whitespace with underscore
new_filename = fname.replace(" ","_")
new_filename = new_filename.replace(".","_")
# collaspe double underscores
new_filename = re.sub('__+','_',new_filename)
new_filename = new_filename + fext
new_filename = private_id + "-" + new_filename
new_fullpath = os.path.join(out_image_dir,new_filename)
#print("Old: {} --> New: {}".format(filename,new_fullpath))
copyfile(path,new_fullpath)
# file will have image path, new_id
# file path will be directory/ + new file name
image_name_for_db = image_prefix + new_filename
writer.writerow((image_name_for_db,new_id))
if __name__ == "__main__":
data_id = read_id_from_table()
read_image_files(img_src_dir,data_id)
| true |
82386828e06a85350e834c807cd003896a97446e
|
Python
|
georgiedignan/she_codes_python
|
/Session2/conditionals_exercises.py
|
UTF-8
| 747 | 3.453125 | 3 |
[] |
no_license
|
#Exercise 1
# moths_in_house = bool(input("Are there moths in the hosue? "))
# if moths_in_house == True:
# print("Get the moths")
# else:
# print("No threats detected")
#Exercise 2
# light_color = "red"
# if light_color is "red":
# print("correct")
#Exercise 3
#Exercise 4
# height = 164
# if height > 120:
# print("Hop on!")
# else:
# print("Not today.")
#Exercise 5
# username = "georgie"
# password = "dignan"
# input_username = input("Username: ")
# input_password = input("Password: ")
# if input_username == username and input_password == password:
# print("Correct!")
# else:
# print("Incorrect")
#Exercise 6
# email = "georgiedignan@gmail.com"
# if "@" in email:
# print("Valid email address.")
| true |
48ab208577eae9346735afe1503d56f9680649a2
|
Python
|
xyztank/Appium_Test
|
/page_objects/base_page.py
|
UTF-8
| 2,925 | 2.90625 | 3 |
[] |
no_license
|
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from appium.webdriver.common.touch_action import TouchAction
from appium.webdriver.common.multi_action import MultiAction
from locators.iOS.siri_locators import SiriLocators
class BasePage(object):
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def find_element(self, *locator):
if locator.__len__() == 2:
return self.driver.find_element(*locator)
return self.driver.find_element(*(locator[1], locator[2] % locator[0]))
def find_elements(self, *locator):
if locator.__len__() == 2:
return self.driver.find_elements(*locator)
return self.driver.find_elements(*(locator[1], locator[2] % locator[0]))
def open_page(self, name):
self.driver.find_element_by_name(name).click()
def get_text(self, *el):
return self.find_element(*el).text
def is_elem_displayed(self, *el):
return self.find_element(*el).is_displayed()
def is_text_displayed(self, text):
return self.driver.find_element_by_name(text).is_displayed()
def go_back(self):
self.driver.back()
def tap(self, el):
action = TouchAction(self.driver)
action.tap(el).perform()
# iOS specific methods:
def scroll_by_name_ios(self, name):
self.driver.execute_script('mobile: scroll', {'name': name})
def scroll_by_direction_ios(self, direction):
self.driver.execute_script('mobile: scroll', {'direction': direction})
def hey_siri_command_ios(self, message):
self.driver.execute_script('mobile: siriCommand', {'text': message})
def get_hey_siri_text_ios(self):
return self.get_text(*SiriLocators.hey_siri)
def call_siri_by_contact_ios(self, message):
self.hey_siri_command_ios(message)
self.wait.until(EC.presence_of_element_located(SiriLocators.which_number))
self.hey_siri_command_ios("mobile")
def call_siri_by_number_ios(self, message, number):
self.hey_siri_command_ios(message)
self.wait.until(EC.presence_of_element_located(SiriLocators.who_to_call_message))
self.hey_siri_command_ios(number)
def get_siri_error_message_ios(self):
return self.wait.until(EC.presence_of_element_located(SiriLocators.error_cant_make_call)).text
# Android specific methods:
def scroll_by_coordinates_android(self,start_x, start_y, end_x, end_y, duration):
# Somehow I couldn't managed to make a scroll using Touchaction, so I used driver.swipe instead
# action = TouchAction(self.driver)
# action.press(els[0]).wait(500).move_to(els[12]).release().perform()
self.driver.swipe(start_x, start_y, end_x, end_y, duration)
self.driver.swipe(start_x, start_y, end_x, end_y, duration)
| true |
ef4cfb63a271d80fdfde22ff912cc866c49f344e
|
Python
|
whooie/scripts
|
/random_select.py
|
UTF-8
| 2,674 | 2.921875 | 3 |
[] |
no_license
|
#!/usr/bin/python2
# random_select.py
import os
import random
import getopt
import sys
#pDir = os.path.dirname(os.path.realpath(__file__))
pDir = os.getcwd()
ask1 = True
ask2 = True
save = ""
isDone = "n"
isFirst = True
listAll = False
help = "Usage: \033[1mrandom_select.py\033[0m [ -n \033[4mnum\033[0m ] [ -P ]\n \033[1mrandom_select.py\033[0m -h"
numItems = 1
def help():
print("Usage: \033[1mrandom_select.py\033[0m [ -n \033[4mnum\033[0m ] [ -P ]")
print(" \033[1mrandom_select.py\033[0m -h")
try:
opts, args = getopt.getopt(sys.argv[1:],"hn:P")
except getopt.GetoptError:
help()
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
help()
exit(0)
elif opt == "-n":
numItems = int(arg)
print("Select "+arg+" items")
elif opt == "-P":
listAll = True
while ask1:
userIn = save+raw_input("Directory?\n>> "+pDir+"/"+save)
ask2 = True
tDir = os.path.join(pDir,userIn)
if isFirst == True or userIn != save or isDone == "z" or isDone == "q" or isDone == "s":
stuff = os.listdir(tDir)
stuff.sort()
isFirst = False
# numItems = int(raw_input("How many items? Items remaining: "+str(len(stuff))+"\n>> "))
if numItems > len(stuff):
numItems = len(stuff)
print(":: Looking in "+tDir+"...")
if listAll == True:
print("---------------")
for i in stuff:
print(i)
print("---------------")
print("")
for i in range(0,numItems):
a = random.choice(range(0,len(stuff)))
choice = stuff[a]
print(" "+choice)
stuff.remove(stuff[a])
print("")
while ask2:
if numItems == 1:
isDone = raw_input("Continue? [y,n,q,a,s,z] ("+str(len(stuff))+")\n>> ")
else:
isDone = raw_input("Continue? [y,n,q,a,s] ("+str(len(stuff))+")\n>> ")
if isDone == "y":
ask2 = False
save = ""
elif isDone == "n":
ask2 = False
ask1 = False
elif isDone == "q":
ask2 = False
save = ""
pathItems = userIn.split("/")
for i in range(len(pathItems) - 1):
save = save+pathItems[i]
elif isDone == "a":
ask2 = False
save = userIn
elif isDone == "s":
ask2 = False
save = userIn
elif isDone == "z":
if numItems == 1:
ask2 = False
if userIn == "":
save = userIn+choice
else:
save = userIn+"/"+choice
else:
print("Invalid.")
else:
print("Invalid.")
| true |
ea801404650045bb1eedc8a06f10d8e06e33b2b8
|
Python
|
MatthewHallPena/FlightSoftware
|
/drivers/power/HITL_testing/HITL_table_test.py
|
UTF-8
| 4,741 | 2.625 | 3 |
[] |
no_license
|
# Commands we want to test on the HITL table in SP2020
import power_controller as pc
import power_structs as ps
import time
HITL_test = pc.Power()
ps.gom_logger.debug("Turning off all outputs")
OUTPUTS = ["comms", "burnwire_1", "glowplug_2", "glowplug", "solenoid", "electrolyzer"]
for i in range(0, 6):
HITL_test.set_single_output(OUTPUTS[i], 0, 0)
ps.gom_logger.debug(" --- TESTING displayAll --- \n")
HITL_test.displayAll()
WDT_pre_data = HITL_test.get_hk_wdt()
ps.gom_logger.debug("Pre-Test WDT data:")
ps.gom_logger.debug("I2C Time left: " + str(WDT_pre_data.wdt_i2c_time_left))
ps.gom_logger.debug("GND Time left: " + str(WDT_pre_data.wdt_gnd_time_left))
ps.gom_logger.debug("CSP Pings left: " + str(WDT_pre_data.wdt_csp_pings_left))
ps.gom_logger.debug("I2C Reboots: " + str(WDT_pre_data.counter_wdt_i2c))
ps.gom_logger.debug("GND Reboots: " + str(WDT_pre_data.counter_wdt_gnd))
ps.gom_logger.debug("CPS Reboots: " + str(WDT_pre_data.counter_wdt_csp))
ps.gom_logger.info("\nBeginning output testing in 5 seconds\n")
time.sleep(5)
# Turn every channel on then off sequentially using set_single_output
ps.gom_logger.debug("\n --- TESTING OUPUTS --- \n")
out_num = 0
for i in OUTPUTS:
current_output = i
ps.gom_logger.debug(" ### TESTING OUT_" + str(out_num) + " ###\n")
HITL_test.set_single_output(current_output, 1, 0) # Turns on channel
time.sleep(1) # wait one second
HK_data = HITL_test.get_hk_2() # get the housekeeping data
HITL_test.set_single_output(current_output, 0, 0) # Turn off channel
ps.gom_logger.debug("OUT_" + str(out_num) + " System Current: " + str(HK_data.cursys))
ps.gom_logger.debug("OUT_" + str(out_num) + " Battery Voltage: " + str(HK_data.vbatt))
ps.gom_logger.debug("\n")
out_num = out_num + 1
time.sleep(5)
# Test the component-functions
# test burnwire:
# TODO: Check with Aaron (either one) about software requirements (i.e. what data the component functions should return)
ps.gom_logger.debug("Testing component functions in 5 seconds")
ps.gom_logger.debug("\n--- TESTING COMPONENT FUNCTIONS --- \n")
time.sleep(5)
ps.gom_logger.debug("Testing burnwire:")
ps.gom_logger.debug("You should see HITL outputs 9 and 10 light up")
HITL_test.burnwire(1)
time.sleep(1)
ps.gom_logger.debug("Testing Glowplug")
ps.gom_logger.debug("You should see output 11 light up")
HITL_test.glowplug(1)
time.sleep(1)
ps.gom_logger.debug("Testing Solenoid")
ps.gom_logger.debug("You should see HITL output 12 light up")
HITL_test.solenoid(10, 990)
time.sleep(1)
ps.gom_logger.debug("Testing Electrolyzer")
ps.gom_logger.debug("You should see HITL output 13 light up for 10 seconds")
HITL_test.electrolyzer(True)
time.sleep(10)
HITL_test.electrolyzer(False)
ps.gom_logger.debug("\nComponent function testing done")
time.sleep(2)
ps.gom_logger.debug("\n--- Testing WDTs ---\n")
time.sleep(1)
# get wdt data
WDT_data = HITL_test.get_hk_wdt()
ps.gom_logger.debug("Initial post-Test WDT data:")
ps.gom_logger.debug("I2C Time left: " + str(WDT_data.wdt_i2c_time_left))
ps.gom_logger.debug("GND Time left: " + str(WDT_data.wdt_gnd_time_left))
ps.gom_logger.debug("CSP Pings left: " + str(WDT_data.wdt_csp_pings_left))
ps.gom_logger.debug("I2C Reboots: " + str(WDT_data.counter_wdt_i2c))
ps.gom_logger.debug("GND Reboots: " + str(WDT_data.counter_wdt_gnd))
ps.gom_logger.debug("CPS Reboots: " + str(WDT_data.counter_wdt_csp))
time.sleep(5)
# test i2c wdt
HITL_test.ping(1)
WDT_data_i2c_test = HITL_test.get_hk_wdt()
ps.gom_logger.debug("\nWDT data after I2C ping")
ps.gom_logger.debug("I2C Time left: " + str(WDT_data_i2c_test.wdt_i2c_time_left))
ps.gom_logger.debug("GND Time left: " + str(WDT_data_i2c_test.wdt_gnd_time_left))
ps.gom_logger.debug("CSP Pings left: " + str(WDT_data_i2c_test.wdt_csp_pings_left))
ps.gom_logger.debug("I2C Reboots: " + str(WDT_data_i2c_test.counter_wdt_i2c))
ps.gom_logger.debug("GND Reboots: " + str(WDT_data_i2c_test.counter_wdt_gnd))
ps.gom_logger.debug("CPS Reboots: " + str(WDT_data_i2c_test.counter_wdt_csp))
time.sleep(5)
# reset ground wdt
HITL_test.reset_wdt()
# see if it worked
WDT_data_ground_test = HITL_test.get_hk_wdt()
ps.gom_logger.debug("\nWDT data after Ground timer reset")
ps.gom_logger.debug("I2C Time left: " + str(WDT_data_ground_test.wdt_i2c_time_left))
ps.gom_logger.debug("GND Time left: " + str(WDT_data_ground_test.wdt_gnd_time_left))
ps.gom_logger.debug("CSP Pings left: " + str(WDT_data_ground_test.wdt_csp_pings_left))
ps.gom_logger.debug("I2C Reboots: " + str(WDT_data_ground_test.counter_wdt_i2c))
ps.gom_logger.debug("GND Reboots: " + str(WDT_data_ground_test.counter_wdt_gnd))
ps.gom_logger.debug("CPS Reboots: " + str(WDT_data_ground_test.counter_wdt_csp))
ps.gom_logger.debug("WDT Testing Done.")
| true |
b8946ba9f9c81c79c3bf51295c428c7a17586215
|
Python
|
leohanwww/Python-Scripts
|
/keras_fashion_mnist.py
|
UTF-8
| 1,114 | 2.828125 | 3 |
[] |
no_license
|
import tensorflow as tf
import keras
import numpy as np
import matplotlib.pyplot as plt
fashion = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
'''
plt.imshow(train_images[0])
plt.show()
'''
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.compile(
optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.fit(train_images, train_labels)
test_loss, test_accuracy = model.evaluate(test_images, test_labels)
print('test accuracy:', test_accuracy)
predictions = model.predict(test_images)
pre_cpunt = 0
for i in range(len(predictions)):
if np.argmax(predictions[i]) == test_labels[i]:
pre_cpunt += 1
print(pre_cpunt)
| true |
bef4ec5f48743ef1ad4551e2079746c26ba8953e
|
Python
|
Harsha2319/Estimation-of-Rainfall-Quantity-using-Hybrid-Ensemble-Regression
|
/codes/Main - BAG WA.py
|
UTF-8
| 1,623 | 2.609375 | 3 |
[] |
no_license
|
import pandas as pd
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import median_absolute_error as mdae
from sklearn.metrics import explained_variance_score as evs
from sklearn.metrics import r2_score as r2
from itertools import combinations
def rmse(y, p):
return mse(y, p)**0.5
data = pd.read_csv('C:\\Users\\Preetham G\\Documents\\Research Projects\\Ensemble Rainfall\\Results\\Main - BAG Pred.csv')
name = ['MLR', 'DTR(6)', 'PR(4)']
r2_v = [0.833, 0.667, 0.5]
comb_names = []
comb_r2 = []
for i in range(1, len(name)+1):
m = combinations(name, i)
for j in m:
comb_names.append(list(j))
for i in range(1, len(r2_v)+1):
m = combinations(r2_v, i)
for j in m:
comb_r2.append(list(j))
mse_f = []
rmse_f = []
mae_f = []
mdae_f = []
evs_f = []
r2_f = []
y = data['Actual']
for i, j in zip(comb_names, comb_r2):
print(i)
df = data[i]
for k, l in zip(i, j):
df[k] = (l/sum(j))*df[k]
p = df.sum(axis=1)
mse_f.append(mse(y, p))
rmse_f.append(rmse(y, p))
mae_f.append(mae(y, p))
mdae_f.append(mdae(y, p))
evs_f.append(evs(y, p))
r2_f.append(r2(y, p))
d = {}
d['Combinations'] = comb_names
d['MSE'] = mse_f
d['RMSE'] = rmse_f
d['MAE'] = mae_f
d['MDAE'] = mdae_f
d['EVS'] = evs_f
d['R2'] = r2_f
df = pd.DataFrame(d, columns=['Combinations', 'MSE', 'RMSE', 'MAE', 'MDAE', 'EVS', 'R2'])
print(df)
df.to_csv('C:\\Users\\Preetham G\\Documents\\Research Projects\\Ensemble Rainfall\\Results\\Main - BAR WA.csv', index=False)
| true |
a0182be7e619f5bc16119879d11fb3f65e43a08d
|
Python
|
diedrebrown/pfch-spring2020-blue
|
/Code/Blue_GetRijksmuseum2-1.py
|
UTF-8
| 2,686 | 3.21875 | 3 |
[
"MIT"
] |
permissive
|
# Blue at the Rijksmuseum - Get Data
# This code is based on lessons from Matt Miller's INFO 644 Programming for Cultural Heritage Course at Pratt Institute
# Objectives:
# 1. Get information about blue objects at the Rijksmuseum using the Rijksmuseum API.
# 2. Store information as text dictionary.
# 3. Access the individual artObjects from the dictionary and store to a CSV.
import requests, json
import csv
import pandas as pd
# get information from the rijksmuseum api and use to get works that are/mention blue and have images
bluerijks = requests.get("https://www.rijksmuseum.nl/api/en/collection?key=????????&q=blue&imgonly")
# print(bluerijks.text)
# store retreived api info as a dictionary
bluerijksdata = json.loads(bluerijks.text)
# write retrived data to a json file backup
with open('bluerijksinfo.json', 'w') as outfile:
json.dump(bluerijksdata, outfile)
# print the dictionary keys
print(bluerijksdata.keys())
# print(len(bluerijksdata))
# keys include 'elapsedMilliseconds', 'count', 'countFacets', 'artObjects', 'facets'
# 'count' is the number of records/items that match my search = 452
# 'countFacets' gives specifics on those items:
# how many have an image? 370
# how many are on display in the museum? 66
# 'artObjects' contains the information on the items as lists of dictionaries
# 'facets' contains nested dictionaries of a key-value pairs of counts of information, such as:
# artist:number of their works
# country: number of items from country
# hex code: number of images with hex code
# ...and more
# store artObects into a variable as a list of dictionaries which we may use later
bluerijksObjects = bluerijksdata.get('artObjects')
# print(type(bluerijksObjects), len(bluerijksObjects))
# bluerijksObjects is a list of 10 items/dictionaries
# print(bluerijksObjects[0])
# print(bluerijksObjects[2])
# create a csv file with the information of the object
with open('blueitem3.csv', mode='w') as blueitems_file:
blueitemswriter = csv.writer(blueitems_file, delimiter=',')
writecount = 0
for item in bluerijksObjects:
if writecount == 0:
header = item.keys()
blueitemswriter.writerow(header)
writecount += 1
blueitemswriter.writerow(item.values())
# let's modify the csv for only the information we need
# objectNumber [item 2], title [item 3], principleOrFirstMaker [item 5], permitDownload [item 8], webImage[item 9] (url from webImage [item 5])
readdf = pd.read_csv('blueitem3.csv')
moddf = readdf.drop(columns=['links','id','headerImage','productionPlaces'], axis=1)
# print(moddf)
# store moddf as a csv
moddf.to_csv('blueitemsfinal.csv', index=False)
| true |
8da95bc87a2b2f73f7c7b1e29ca13fbd02f3c374
|
Python
|
masfell/AAyMineria
|
/Práctica 6/Parte 2.py
|
UTF-8
| 5,451 | 2.625 | 3 |
[] |
no_license
|
from process_email import email2TokenList
import codecs
from get_vocab_dict import getVocabDict
import numpy as np
import os
from sklearn import svm
import matplotlib.pyplot as plt
vocab_dict = getVocabDict()
def convertToIndices(token):
indicesOfWords = [vocab_dict[t] for t in token if t in vocab_dict]
result = np.zeros((len(vocab_dict), 1))
for index in indicesOfWords:
result[index-1] = 1
return result
def read_spam():
spam_emails = []
directorio = "spam"
i = 1
for spam_email in os.listdir(directorio):
email_contents = codecs.open(
'{0}/{1:04d}.txt'.format(directorio, i), 'r', encoding='utf-8', errors='ignore').read()
tokens = email2TokenList(email_contents)
tokens = convertToIndices(tokens)
i += 1
spam_emails.append(tokens)
print("Spam Readed: ", i - 1)
return spam_emails
def read_easyHam():
no_spam_emails = []
directorio = "easy_ham"
i = 1
for no_spam in os.listdir(directorio):
email_contents = codecs.open(
'{0}/{1:04d}.txt'.format(directorio, i), 'r', encoding='utf-8', errors='ignore').read()
tokens = email2TokenList(email_contents)
tokens = convertToIndices(tokens)
i += 1
no_spam_emails.append(tokens)
print("Easy Ham Readed: ", i-1)
return no_spam_emails
def separate_sets(spam_emails, no_spam_emails):
# Cogemos el 60% de los spam y no spams como set de entrenamiento
n_nonspam_train = int(len(no_spam_emails)*0.6)
n_spam_train = int(len(spam_emails) * 0.6)
nonspam_train = no_spam_emails[:n_nonspam_train]
spam_train = spam_emails[:n_spam_train]
# Unimos los spam y no spam
Xtrain = np.concatenate(nonspam_train+spam_train, axis=1).T
ytrain = np.concatenate(
(np.zeros((n_nonspam_train, 1)),
np.ones((n_spam_train, 1))
), axis=0)
# Por otro lado el 20% para el set de validacion
n_nonspam_cv = int(len(no_spam_emails)*0.2)
n_spam_cv = int(len(spam_emails) * 0.2)
nonspam_cv = no_spam_emails[n_nonspam_train:n_nonspam_train+n_nonspam_cv]
spam_cv = spam_emails[n_spam_train:n_spam_train+n_spam_cv]
Xval = np.concatenate(nonspam_cv+spam_cv, axis=1).T
yval = np.concatenate(
(np.zeros((n_nonspam_cv, 1)),
np.ones((n_spam_cv, 1))
), axis=0)
# Por ultimo el 20% restante para el conjunto de prueba
n_nonspam_test = len(no_spam_emails) - n_nonspam_train - n_nonspam_cv
n_spam_test = len(spam_emails) - n_spam_train - n_spam_cv
nonspam_test = no_spam_emails[-n_nonspam_test:]
spam_test = spam_emails[-n_spam_test:]
Xtest = np.concatenate(nonspam_test+spam_test, axis=1).T
ytest = np.concatenate(
(np.zeros((n_nonspam_test, 1)),
np.ones((n_spam_test, 1))
), axis=0)
return Xtrain, ytrain, Xval, yval, Xtest, ytest
def draw_C_values(C_test_values, error_train, error_val):
plt.figure(figsize=(8, 5))
plt.plot(C_test_values, error_val, 'or--', label='Validation Set Error')
plt.plot(C_test_values, error_train, 'bo--', label='Training Set Error')
plt.xlabel('$C$ Value', fontsize=16)
plt.ylabel('Classification Error [%]', fontsize=14)
plt.title('Finding Best C Value', fontsize=18)
plt.xscale('log')
plt.legend()
plt.show()
def find_better_C(Xtrain, ytrain, Xval, yval):
C_test_values = [0.0001, 0.001, 0.01, 0.03, 0.1, 1.0, 3.0, 10.0, 30.0]
error_train = []
error_val = []
print('C\tTrain Error\tValidation Error\n')
for testing_c in C_test_values:
linear_svm = svm.SVC(C=testing_c, kernel='linear')
# Ajustamos el kernel a los ejemplos de entrenamiento
linear_svm.fit(Xtrain, ytrain.flatten())
# Comprobamos el error con el set de validacion
cv_predictions = linear_svm.predict(Xval).reshape((yval.shape[0], 1))
validation_error = 100. * \
float(sum(cv_predictions != yval))/yval.shape[0]
error_val.append(validation_error)
# comprobamos tambien el error con el set de entrenamiento
train_predictions = linear_svm.predict(
Xtrain).reshape((ytrain.shape[0], 1))
train_error = 100. * \
float(sum(train_predictions != ytrain))/ytrain.shape[0]
error_train.append(train_error)
print('{}\t{}\t{}\n'.format(testing_c, train_error, validation_error))
draw_C_values(C_test_values, error_train, error_val)
# De la gráfica y los valores de los errores podemos observar que los mejores valores de C son 0.1 y 3.0
# aunque parece mejor 0.1 ya que 3.0 sobreajusta a los ejemplos de entrenamiento
def best_c_testing(Cval, Xtrain, ytrain, Xtest, ytest):
best_svm = svm.SVC(C=Cval, kernel='linear')
best_svm.fit(Xtrain, ytrain.flatten())
test_predictions = best_svm.predict(Xtest).reshape((ytest.shape[0], 1))
test_acc = 100. * float(sum(test_predictions == ytest))/ytest.shape[0]
print(f'Test set accuracy using C ={Cval} = %0.2f%%' % test_acc)
def main():
spam_set = read_spam()
noSpam_set = read_easyHam()
Xtrain, ytrain, Xval, yval, Xtest, ytest = separate_sets(
spam_set, noSpam_set)
find_better_C(Xtrain, ytrain, Xval, yval)
best_c_testing(0.1, Xtrain, ytrain, Xtest, ytest)
best_c_testing(3.0, Xtrain, ytrain, Xtest, ytest)
# podemos observar que 0.1 es un valor que se ajusta mejor que 3.0
main()
| true |
a28385f19bc05f9fd5e634091292eb5df0ff6253
|
Python
|
abbalcerek/nbd4
|
/zadanie11/rozwiazanie.py
|
UTF-8
| 1,302 | 2.9375 | 3 |
[] |
no_license
|
#!/usr/bin/env python
from datetime import datetime
import string
import riak
# initialize riak client
client = riak.RiakClient(pb_port=8087, protocol='pbc')
marleen = {'user_name': 'marleenmgr',
'full_name': 'Marleen Manager',
'email': 'marleen.manager@riak.com'}
# create new bucket
myBucket = client.bucket('nbd_riak')
# save record to the buket
record = myBucket.new(marleen["email"], data=marleen).store()
# record.store()
print(f"Rekord inicjalnie zapisany w bazie:\n key: {record.key}, value: {record.data}")
# fetch and print saved record
record_fetched = myBucket.get(record.key)
print(f"Rekord pobrany z bazy po inicjalnym zapisie:\n {record_fetched.data}")
# update record - capitalize username
data = record_fetched.data
data["user_name"] = record_fetched.data["user_name"].upper()
record_fetched.data = data
record_fetched.store()
# fetch record and print after update
record_fetched_after_update = myBucket.get(record.key)
print(f"Rekord pobrany z bazy po aktualizacji pola user_name:\n {record_fetched_after_update.data}")
# remove record with given key
key = record_fetched_after_update.key
myBucket.delete(key)
# get data after record for given key was deleted
print(f"Wartosc pola 'data' po usunieciu rekordu dla klucza:\n {myBucket.get(record_fetched_after_update.key).data}")
| true |
5b4fcb433d8aca94169fb7f1b0018d61a637d6fd
|
Python
|
ludansir/py290_course
|
/py290_魯業群_hw3.py
|
UTF-8
| 2,885 | 3.0625 | 3 |
[] |
no_license
|
text = '''2015年7月21日蘋果公司發表2015年第二季財報,Apple Watch的銷售狀況和營收與iPod、
Beats耳機和機上盒化為「其他產品」統計,蘋果公司未公開這款產品的具體銷售狀況,各類研究機構對於
Apple Watch的銷量評估也大相徑庭,單季銷量從190萬台到430萬台不等,顯然 Apple Watch 的銷量並沒有達到市場預期。
在蘋果公司的財報會議上,CEO Tim Cook 沒有正面回應分析師有關 Apple Watch 銷量的問題,蘋果公司暫時不關注
Apple Watch 的銷量,重點是打造一個生態體系,為 2015 年的聖誕購物季做準備。之前曾有消息稱 Apple Watch
進入6月後日銷量暴跌,Tim Cook 表示這款產品在 6 月的銷量高於上市初期。
據市場研究公司 Canalys 的報告顯示,2015 年第二季 Apple Watch 的銷量大約為 430 萬台,憑藉這一款產品,
蘋果公司輕鬆地超過了 Fitbit、小米等廠商,在穿戴式裝置市場佔據領先地位。但 Apple Watch
在該季的銷量出現了下滑的趨勢,僅為 2015 年第一季 60%。Canalys 認為蘋果公司在穿戴式裝置市場表現出了強大的市場號召力,
Apple Watch 的銷售均價遠高於其他競爭對手,但還是創造了非常驚人的銷售業績,Apple Watch 的目標客戶主要是蘋果產品的忠實消費者,
普通消費者對於 Apple Watch 的興趣不大。隨著電子產品銷售旺季的到來,Apple Watch 的銷量有望反彈。
2015 年第二季蘋果公司「其他產品」總營收為 26 億美元,2014 年同期為 17 億美元,
這表明Apple Watch至少為蘋果公司帶來了 10 億美元的營收,據 Bloomberg 的資料顯示,
Apple Watch 的銷售均價為 499 美元,據此估算 Apple Watch 在 2015 年第二季的銷量至少為 190 萬台,
若產品均價高於 550 美元,則意味著蘋果公司只售出了大約 100 萬台 Apple Watch,與市場平均 400 萬台的預期相去甚遠。
以往蘋果公司在發表新品後,銷售初期就會及時公開產品銷量,Apple Watch 上市數月至今仍未公開任何官方銷售資料,
蘋果公司只是一再表示 Apple Watch 賣得很好。這樣反常的表現加深了外界對於 Apple Watch 銷量的質疑,
從 Tim Cook 在財報會議上的表態來說,Apple Watch 在 2015 年 6 月之後已經進入了供貨穩定期,
也就是說 Apple Watch 已經開始有庫存,對於一款上市 3 個月的新品而言,這不是一個好消息。'''
find_str = input('請輸入要找的字:')
match = text.count(find_str)
i = 0
while i <= text.find('。',-1):
i = text.find(find_str, i)
if i == -1:
break
print(i)
i = i + 1
#print('總共有%d個%s',%(text.count(find_str)),'find_str')
print('總共有%d個%s'%(match,find_str))
| true |
92f700d67e263a06d18253bdf77807134054282d
|
Python
|
usnistgov/core_explore_example_app
|
/core_explore_example_app/utils/query_builder.py
|
UTF-8
| 10,108 | 2.625 | 3 |
[
"NIST-Software",
"BSD-3-Clause"
] |
permissive
|
"""Utils for the query builder
"""
from os.path import join
from django.template import loader
from core_main_app.settings import MONGODB_INDEXING
from xml_utils.xsd_types.xsd_types import (
get_xsd_numbers,
get_xsd_gregorian_types,
)
from core_explore_example_app.utils.xml import get_enumerations
class BranchInfo:
"""Store information about a branch from the xml schema while it is being processed for field selection"""
def __init__(self, keep_the_branch=False, selected_leaves=None):
self.keep_the_branch = keep_the_branch
self.selected_leaves = (
selected_leaves if selected_leaves is not None else []
)
def add_selected_leaf(self, leaf_id):
"""add_selected_leaf
Args:
leaf_id:
Returns
"""
self.selected_leaves.append(leaf_id)
self.keep_the_branch = True
# Util functions
def prune_html_tree(html_tree):
"""Create a custom HTML tree from fields chosen by the user
Args:
html_tree:
Returns:
"""
any_branch_checked = False
list_ul = html_tree.findall("./ul")
for ul in list_ul:
branch_info = prune_ul(ul)
if branch_info.keep_the_branch:
any_branch_checked = True
return any_branch_checked
def prune_ul(ul):
"""Process the ul element of an HTML list
Args:
ul:
Returns:
"""
list_li = ul.findall("./li")
branch_info = BranchInfo()
for li in list_li:
li_branch_info = prune_li(li)
if li_branch_info.keep_the_branch:
branch_info.keep_the_branch = True
branch_info.selected_leaves.extend(li_branch_info.selected_leaves)
checkbox = ul.find("./input[@type='checkbox']")
if checkbox is not None:
if "value" in checkbox.attrib and checkbox.attrib["value"] == "true":
# set element class
parent_li = ul.getparent()
element_id = parent_li.attrib["class"]
add_selection_attributes(parent_li, "element", element_id)
# tells to keep this branch until this leaf
branch_info.add_selected_leaf(element_id)
if not branch_info.keep_the_branch:
add_selection_attributes(ul, "none")
return branch_info
def prune_li(li):
"""Process the li element of an HTML list
Args:
li:
Returns:
"""
list_ul = li.findall("./ul")
branch_info = BranchInfo()
if len(list_ul) != 0:
selected_leaves = []
for ul in list_ul:
ul_branch_info = prune_ul(ul)
if ul_branch_info.keep_the_branch:
branch_info.keep_the_branch = True
selected_leaves.extend(ul_branch_info.selected_leaves)
# sub element queries available when more than one selected elements under the same element,
# and data stored in MongoDB
if MONGODB_INDEXING and len(selected_leaves) > 1:
# not for the choices
if li[0].tag != "select":
# TODO: check if test useful
if "select_class" not in li.attrib:
leaves_id = " ".join(selected_leaves)
add_selection_attributes(li, "parent", leaves_id)
if not branch_info.keep_the_branch:
add_selection_attributes(li, "none")
return branch_info
else:
try:
checkbox = li.find("./input[@type='checkbox']")
if checkbox.attrib["value"] == "false":
add_selection_attributes(li, "none")
return branch_info
else:
element_id = li.attrib["class"]
add_selection_attributes(li, "element", element_id)
# tells to keep this branch until this leaf
branch_info.add_selected_leaf(element_id)
return branch_info
except Exception:
return branch_info
def add_selection_attributes(element, select_class, select_id=None):
"""Add css attribute to selected element
Args:
element:
select_class:
select_id:
Returns:
"""
element.attrib["select_class"] = select_class
if select_id is not None:
element.attrib["select_id"] = select_id
# Rendering functions
def render_yes_or_not():
"""Return a string that represents an html select with yes or not options
Returns:
"""
return _render_template(
join(
"core_explore_example_app", "user", "query_builder", "yes_no.html"
)
)
def render_and_or_not():
"""Return a string that represents an html select with AND, OR, NOT options
Returns:
"""
return _render_template(
join(
"core_explore_example_app",
"user",
"query_builder",
"and_or_not.html",
)
)
def render_numeric_select():
"""Return a string that represents an html select with numeric comparisons
Returns:
"""
return _render_template(
join(
"core_explore_example_app",
"user",
"query_builder",
"numeric_select.html",
)
)
def render_value_input():
"""Return an input to type a value
Returns:
"""
return _render_template(
join("core_explore_example_app", "user", "query_builder", "input.html")
)
def render_gregorian_strict_match():
"""Return an input to type a value
Returns:
"""
return _render_template(
join(
"core_explore_example_app",
"user",
"query_builder",
"gregorian_strict_match.html",
)
)
def render_string_select():
"""Return an input to type a value
Returns:
"""
return _render_template(
join(
"core_explore_example_app",
"user",
"query_builder",
"string_select.html",
)
)
def render_initial_form():
"""Render the initial Query Builder
Returns:
"""
return _render_template(
join(
"core_explore_example_app",
"user",
"query_builder",
"initial_form.html",
)
)
def render_remove_button():
"""Return html of a remove button
Returns:
"""
return _render_template(
join(
"core_explore_example_app", "user", "query_builder", "remove.html"
)
)
def render_add_button():
"""Return html of an add button
Returns:
"""
return _render_template(
join("core_explore_example_app", "user", "query_builder", "add.html")
)
def render_enum(enums):
"""Return html select from an enumeration
Args:
enums:
Returns:
"""
context = {
"enums": enums,
}
return _render_template(
join("core_explore_example_app", "user", "query_builder", "enum.html"),
context,
)
def render_new_query(tag_id, query, is_first=False):
"""Return an html string for a new query
Args:
tag_id:
query:
is_first:
Returns:
"""
context = {"tagID": tag_id, "query": query, "first": is_first}
return _render_template(
join(
"core_explore_example_app",
"user",
"query_builder",
"new_query.html",
),
context,
)
def render_new_criteria(tag_id):
"""Return an html string for a new query
Args:
tag_id:
Returns:
"""
context = {
"tagID": tag_id,
}
return _render_template(
join(
"core_explore_example_app",
"user",
"query_builder",
"new_criteria.html",
),
context,
)
def render_sub_elements_query(parent_name, form_fields):
"""Return an html string for a query on sub-elements
Args:
Returns:
"""
context = {
"parent_name": parent_name,
"form_fields": form_fields,
}
return _render_template(
join(
"core_explore_example_app",
"user",
"query_builder",
"sub_elements_query.html",
),
context,
)
def get_element_value(element_field):
"""Get value from field
Args:
element_field:
Returns:
"""
return element_field["value"] if "value" in element_field else None
def get_element_comparison(element_field):
"""Get comparison operator from field
Args:
element_field:
Returns:
"""
return (
element_field["comparison"] if "comparison" in element_field else "is"
)
def get_user_inputs(element_type, data_structure_element, default_prefix):
"""Get user inputs from element type
Args:
element_type:
data_structure_element:
default_prefix:
Returns:
"""
try:
if element_type is not None and element_type.startswith(
"{0}:".format(default_prefix)
):
# numeric
if element_type in get_xsd_numbers(default_prefix):
user_inputs = render_numeric_select() + render_value_input()
# gregorian date
elif element_type in get_xsd_gregorian_types(default_prefix):
user_inputs = (
render_gregorian_strict_match() + render_value_input()
)
# string
else:
user_inputs = render_string_select() + render_value_input()
else:
# enumeration
enums = get_enumerations(data_structure_element)
user_inputs = render_enum(enums)
except Exception:
# default renders string form
user_inputs = render_string_select() + render_value_input()
return user_inputs
def _render_template(template_path, context=None):
"""Return an HTML string rendered from the template
Args:
template_path:
Returns:
"""
if context is None:
context = {}
template = loader.get_template(template_path)
return template.render(context)
| true |
ca4ae8be723218d9dc499b5ee4622580efce0834
|
Python
|
Kolwankar-Siddhiraj/MushroomClassificationProjectML
|
/Mashroom/Logger/logger.py
|
UTF-8
| 697 | 2.96875 | 3 |
[] |
no_license
|
from datetime import datetime
class Logs:
def __init__(self, file):
self.filename = file
now = datetime.now()
current_time = now.strftime("%Y-%m-%d <> %H:%M:%S")
file_obj = open(self.filename, "a+")
file_obj.write("\n"+ current_time+ "<:>" +"New Logger instance created !\n\n")
file_obj.close()
def addLog(self, log_level, log_message):
print("Logger file : Logs class")
now = datetime.now()
current_time = now.strftime("%Y-%m-%d <> %H:%M:%S")
logfile = open(self.filename, "a+")
logfile.write(current_time + " <:> " + log_level + " <:> " + log_message + "\n")
logfile.close()
| true |
b740b1143d72ca43750a47af25bd194132756084
|
Python
|
DataScienceResearchPeru/epidemiologic-calculator
|
/epical/models/covid_seir_d.py
|
UTF-8
| 1,839 | 2.59375 | 3 |
[] |
no_license
|
import numpy as np
from scipy.integrate import odeint
from .base import Covid19Interface
# Parametros Epidemiologicos
A1 = 0.415 # contagio de SUSCEPTIBLE con INFECTADO
A2 = 0.70 # Periodo latente
A3 = 0.05 # Recuperacion
A4 = 0.00 # Muerte
class CovidSeirD(Covid19Interface):
def model(self, initial_conditions, duration, epidemiological_parameters=None):
"""POBLACIONES EPIDEMIOLOGICAS.
Susceptibles (S) : initial_conditions[0]
Expuestos (E) : initial_conditions[1]
Infectados (I) : initial_conditions[2]
Recuperados (R) : initial_conditions[3]
Pob. Muertos (D) : initial_conditions[4]
POBLACION EPIDEMIOLOGICA TOTAL
population = S + E + I + R + D
"""
population = (
initial_conditions[0]
+ initial_conditions[1]
+ initial_conditions[2]
+ initial_conditions[3]
+ initial_conditions[4]
)
time = np.arange(0, duration, 1)
# FIX
# In the next function, please validate the use of t to avout disbling
# pylint checks
def seird(
x, t, a1, a2, a3, a4
): # pylint: disable=unused-argument, too-many-arguments
"""SISTEMA DE ECUACIONES.
dS/dt = -a1*(SI/N)
dE/dt = +a1*(SI/N) - a2*E
dI/dt = +a2*E - a3*I - a4*I
dR/dt = a3*I
dD/dt = a4*I
"""
return np.array(
[
-a1 * x[0] * x[2] / population,
a1 * x[0] * x[2] / population - a2 * x[1],
a2 * x[1] - a3 * x[2] - a4 * x[2],
a3 * x[2],
a4 * x[2],
]
)
return odeint(seird, initial_conditions, time, (A1, A2, A3, A4)), time
| true |
8a2c0b7ec3d0b02c1a8073959a01408531790b71
|
Python
|
Lash-360/Coursera_Capstone
|
/Week 2/Analysis.py
|
UTF-8
| 5,194 | 2.8125 | 3 |
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import matplotlib as mpl
import matplotlib.ticker as ticker
from sklearn import preprocessing
%matplotlib inline
!conda install -c anaconda xlrd --yes
#Download Seattle Police Department Accident data
!wget -O Data_Collisions.csv https://s3.us.cloud-object-storage.appdomain.cloud/cf-courses-data/CognitiveClass/DP0701EN/version-2/Data-Collisions.csv
df = pd.read_csv('Data_Collisions.csv')
df.head()
df.shape
df.columns
###Clean Data
Data visualization and pre-processing
Let’s see how many of each class is in our data set
<h4>Evaluating for Missing Data</h4>
The missing values are converted to Python's default. We use Python's built-in functions to identify these missing values. There are two methods to detect missing data:
missing_data = df.isnull()
missing_data.head
<h4>Count missing values in each column</h4>
for column in missing_data.columns.values.tolist():
print(column)
print (missing_data[column].value_counts())
print("")
Based on the summary above, each column has 205 rows of data, seven columns containing missing data:
<ol>
<li>"ADDRTYPE": 1926 missing data</li>
<li>"INTKEY": 65070 missing data</li>
<li>"LOCATION": 2677 missing data</li>
<li>"EXCEPTRSNCODE": 84811 missing data</li>
<li>"EXCEPTRSNDESC": 5638 missing data</li>
<li>"COLLISIONTYPE": 4904 missing data </li>
<li>"JUNCTIONTYPE": 6329 missing data</li>
<li>"INATTENTIONIND": 164868 missing data</li>
<li>"UNDERINFL": 4884 missing data</li>
<li>"WEATHER": 5081 missing data</li>
<li>"ROADCOND": 5012 missing data</li>
<li>"LIGHTCOND": 5170 missing data</li>
<li>"PEDROWNOTGRNT": 190006 missing data</li>
<li>"SDOTCOLNUM": 79737 missing data</li>
<li>"SPEEDING": 185340 missing data</li>
<li>"ST_COLCODE": 18 missing data</li>
<li>"ST_COLDESC": 4904 missing data</li>
<li>"X": 5334 missing data</li>
<li>"Y": 5334 missing data</li>
</ol>
The following columns will dropped as they have move missing datas under them which would affect the analysis:
<ol>
<li>"INATTENTIONIND": 164868 missing data</li>
<li>"PEDROWNOTGRNT": 190006 missing data</li>
<li>"SPEEDING": 185340 missing data</li>
</ol>
#Drop data that are either irrelevant or the True value is more than 20%
to_drop =['SPEEDING', 'PEDROWNOTGRNT', 'INATTENTIONIND', 'INTKEY', 'SDOTCOLNUM',
'INATTENTIONIND', 'JUNCTIONTYPE', 'EXCEPTRSNCODE', 'X', 'Y', 'OBJECTID',
'COLDETKEY', 'EXCEPTRSNDESC', 'INCDATE', 'INCDTTM', 'SDOT_COLCODE',
'SDOT_COLDESC', 'SDOTCOLNUM', 'ST_COLCODE', 'ST_COLDESC', 'SEGLANEKEY',
'CROSSWALKKEY', 'INTKEY', 'REPORTNO', 'STATUS', 'HITPARKEDCAR', 'LOCATION',
'SEVERITYDESC', 'COLLISIONTYPE', 'INCKEY', 'PEDCOUNT', 'PEDCYLCOUNT',
'SEVERITYCODE.1', 'UNDERINFL', 'LIGHTCOND']
df.drop(to_drop, axis = 1, inplace = True)
df.shape
df.columns
df.info()
df['SEVERITYCODE'].value_counts()
df['ADDRTYPE'].value_counts()
df['PERSONCOUNT'].value_counts()
df['VEHCOUNT'].value_counts()
df['WEATHER'].value_counts()
df['ROADCOND'].value_counts()
# Remove values from ROADCOND because they are unknown
df = df [df['ROADCOND'] != 'Unknown']
# Remove values from WEATHER because they are unknown
df = df [df['WEATHER'] != 'Unknown']
df.info()
#The number columns that contain blank cells
df.isnull().sum(axis = 0)
#Drop all null values
df.dropna(inplace=True)
#install seaborn
!conda install -c anaconda seaborn -y
bins = np.arange(df.PERSONCOUNT.min(),8,1)
plt.hist(df.VEHCOUNT, bins = bins)
plt.title('No of Vehicles In Accidents')
plt.ylabel('Number of Accidents')
plt.xlabel('Number of Vehicle')
bins = np.arange(df.PERSONCOUNT.min(),17,2)
plt.hist(df.PERSONCOUNT, bins = bins)
plt.title('No of People In Accidents')
plt.ylabel('Number of Accidents')
plt.xlabel('Number of People')
X = df.ADDRTYPE.unique()
Data = df.ADDRTYPE.value_counts()
plt.bar(X, height=Data)
plt.xlabel('Location')
plt.ylabel('No of Accidents')
plt.title('No of Accidents In Reltions to Locations')
X = df.WEATHER.unique()
Data = df.WEATHER.value_counts()
plt.bar(X, height=Data)
plt.xlabel('Weather')
plt.ylabel('No of Accidents')
plt.title('No of Accidents In Reltions to Weather')
plt.xticks(rotation= 90)
plt.show()
X = df.ROADCOND.unique()
Data = df.ROADCOND.value_counts()
plt.bar(X, height=Data)
plt.xlabel('Road Condiction')
plt.ylabel('No of Accidents')
plt.title('No of Accidents In Reltions to Road Condiction')
plt.xticks(rotation= 90)
plt.show()
import seaborn as sns
bins = np.linspace(df.VEHCOUNT.min(), df.VEHCOUNT.max(), 10)
g = sns.FacetGrid(df, col="ADDRTYPE", hue="SEVERITYCODE", palette="Set1", col_wrap=2)
g.map(plt.hist, 'VEHCOUNT', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
bins = np.linspace(df.PERSONCOUNT.min(), df.PERSONCOUNT.max(), 18)
g = sns.FacetGrid(df, col="ADDRTYPE", hue="SEVERITYCODE", palette="Set1", col_wrap=2)
g.map(plt.hist, 'PERSONCOUNT', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
| true |
4075cbec01ec1e955e752f1e08ede5be06e29ccf
|
Python
|
prarthanasigedar/CARLA_2
|
/navigation/local_planner_behavior.py
|
UTF-8
| 13,333 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# Copyright (c) 2018 Intel Labs.
# authors: German Ros (german.ros@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
""" This module contains a local planner to perform
low-level waypoint following based on PID controllers. """
from collections import deque
from enum import Enum
import numpy as np
import math
import cv2
import matplotlib.pyplot as plt
import carla
from agents.navigation.controller import VehiclePIDController
from agents.tools.misc import distance_vehicle, draw_waypoints
from agents.navigation.rrt_grid import RRT
class RoadOption(Enum):
"""
RoadOption represents the possible topological configurations
when moving from a segment of lane to other.
"""
VOID = -1
LEFT = 1
RIGHT = 2
STRAIGHT = 3
LANEFOLLOW = 4
CHANGELANELEFT = 5
CHANGELANERIGHT = 6
class LocalPlanner(object):
"""
LocalPlanner implements the basic behavior of following a trajectory
of waypoints that is generated on-the-fly.
The low-level motion of the vehicle is computed by using two PID controllers,
one is used for the lateral control
and the other for the longitudinal control (cruise speed).
When multiple paths are available (intersections)
this local planner makes a random choice.
"""
# Minimum distance to target waypoint as a percentage
# (e.g. within 80% of total distance)
# FPS used for dt
FPS = 20
def __init__(self, agent):
"""
:param agent: agent that regulates the vehicle
:param vehicle: actor to apply to local planner logic onto
"""
self._vehicle = agent.vehicle
self._map = agent.vehicle.get_world().get_map()
self._target_speed = None
self.sampling_radius = None
self._min_distance = None
self._current_waypoint = None
self.target_road_option = None
self._next_waypoints = None
self.target_waypoint = None
self._vehicle_controller = None
self._global_plan = None
self._pid_controller = None
self.waypoints_queue = deque(maxlen=20000) # queue with tuples of (waypoint, RoadOption)
self._buffer_size = 5
self._waypoint_buffer = deque(maxlen=self._buffer_size)
self.rrt_buffer = deque(maxlen=10000)
self.cw_x = None
self.cy_y = None
self.tw_x = None
self.tw_y = None
self._dist = None
self._alpha = None
self._b = None
self._a = None
self.path = None
self._init_controller() # initializing controller
def reset_vehicle(self):
"""Reset the ego-vehicle"""
self._vehicle = None
print("Resetting ego-vehicle!")
def _init_controller(self):
"""
Controller initialization.
dt -- time difference between physics control in seconds.
This is can be fixed from server side
using the arguments -benchmark -fps=F, since dt = 1/F
target_speed -- desired cruise speed in km/h
min_distance -- minimum distance to remove waypoint from queue
lateral_dict -- dictionary of arguments to setup the lateral PID controller
{'K_P':, 'K_D':, 'K_I':, 'dt'}
longitudinal_dict -- dictionary of arguments to setup the longitudinal PID controller
{'K_P':, 'K_D':, 'K_I':, 'dt'}
"""
# Default parameters
self.args_lat_hw_dict = {
'K_P': 0.75,
'K_D': 0.02,
'K_I': 0.4,
'dt': 1.0 / self.FPS}
self.args_lat_city_dict = {
'K_P': 0.58,
'K_D': 0.02,
'K_I': 0.5,
'dt': 1.0 / self.FPS}
self.args_long_hw_dict = {
'K_P': 0.37,
'K_D': 0.024,
'K_I': 0.032,
'dt': 1.0 / self.FPS}
self.args_long_city_dict = {
'K_P': 0.15,
'K_D': 0.05,
'K_I': 0.07,
'dt': 1.0 / self.FPS}
self._current_waypoint = self._map.get_waypoint(self._vehicle.get_location())
self._global_plan = False
self._target_speed = self._vehicle.get_speed_limit()
self._min_distance = 3
def set_speed(self, speed):
"""
Request new target speed.
:param speed: new target speed in km/h
"""
self._target_speed = speed
def set_global_plan(self, current_plan, clean=False):
"""
Sets new global plan.
:param current_plan: list of waypoints in the actual plan
"""
for elem in current_plan:
self.waypoints_queue.append(elem)
if clean:
self._waypoint_buffer.clear()
for _ in range(self._buffer_size):
if self.waypoints_queue:
self._waypoint_buffer.append(
self.waypoints_queue.popleft())
else:
break
self._global_plan = True
def get_incoming_waypoint_and_direction(self, steps=3):
"""
Returns direction and waypoint at a distance ahead defined by the user.
:param steps: number of steps to get the incoming waypoint.
"""
if len(self.waypoints_queue) > steps:
return self.waypoints_queue[steps]
else:
try:
wpt, direction = self.waypoints_queue[-1]
return wpt, direction
except IndexError as i:
print(i)
return None, RoadOption.VOID
return None, RoadOption.VOID
def occupancy_grid(self,img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = np.asarray(img)
#print(np.unique(img, return_counts=True))
pixel_value, pixel_freq = np.unique(img, return_counts=True)
vehicle_pixels = [pixel_value[i] for i in range (len(pixel_value)) if pixel_freq[i]<500]
vehicle_color = 164 #pixel value of the spawned vehicle in the BEV image
#start_pos = np.where(img == vehicle_color)
#print("start position is " , start_pos)
#centre_pos = np.asarray(((start_pos[0][0] + start_pos[0][-1])/2, (start_pos[-1][0] + start_pos[-1][-1])/2), dtype=np.int32)
#print("vehicle_centre is ", centre_pos)
#print("image shape is ", np.shape(img))
grid = np.ones((img.shape[0], img.shape[1]))
#print('grid shape is ', grid.shape)
grid[img == 0] = 0
grid[img == 150] = 0
for i in vehicle_pixels: # for pedestrians and other small moving objects
grid[img == i] = 0
grid[np.where(img == vehicle_color)]= 0.5
#cv2.imshow("Grid", grid)
return grid
def pixel_to_world(self,a,b):
dx = abs(int(a)-75)
d = math.sqrt((int(a)-75)**2 + (int(b)-168)**2)
print(dx)
print(d)
alpha = math.asin(dx/d)
gamma = math.radians(self.cw_yaw) + alpha # vehicle angle + alpha
d = d/4 # in pixel per metre
l_x = d * math.sin(gamma)
l_y = d * math.cos(gamma)
l = carla.Location(x= self.cw_x + l_x, y= self.cw_y + l_y)
return l
def run_step(self, target_speed=None,rgb=None, debug=True):
"""
Execute one step of local planning which involves
running the longitudinal and lateral PID controllers to
follow the waypoints trajectory.
:param target_speed: desired speed
:param debug: boolean flag to activate waypoints debugging
:return: control
"""
if target_speed is not None:
self._target_speed = target_speed
else:
self._target_speed = self._vehicle.get_speed_limit()
if len(self.waypoints_queue) == 0:
control = carla.VehicleControl()
control.steer = 0.0
control.throttle = 0.0
control.brake = 1.0
control.hand_brake = False
control.manual_gear_shift = False
return control
# Buffering the waypoints
if not self._waypoint_buffer:
for i in range(self._buffer_size):
if self.waypoints_queue:
for i in range(4):
#print(self.waypoints_queue[0])
self.waypoints_queue.popleft()
self._waypoint_buffer.append(
self.waypoints_queue.popleft())
print(self._waypoint_buffer)
else:
break
# Current vehicle waypoint
self._current_waypoint = self._map.get_waypoint(self._vehicle.get_location())
#getting the cordinates of current vehicle location
self.cw_x = self._current_waypoint.transform.location.x
self.cw_y = self._current_waypoint.transform.location.y
self.cw_yaw = self._current_waypoint.transform.rotation.yaw
print( "x and y of current wap", self.cw_x,self.cw_y)
print("current yaw", self.cw_yaw)
# Target waypoint
print("waypoint buffer value", self._waypoint_buffer[0])
self.target_waypoint, self.target_road_option = self._waypoint_buffer[0]
#getting the cordinates of target vehicle location
self.tw_x = self.target_waypoint.transform.location.x
self.tw_y = self.target_waypoint.transform.location.y
self.tw_yaw = self.target_waypoint.transform.rotation.yaw
print("target yaw", self.tw_yaw)
print( "x and y of target wap", self.tw_x,self.tw_y)
self._dist = math.sqrt((self.cw_x - self.tw_x)**2 + (self.cw_y - self.tw_y)**2)
print("hypotenuse is", self._dist)
self._dist = self._dist * 4 # pixel per metre = 4
self._alpha = self.cw_yaw - self.tw_yaw # absolute angle
print("alpha",self._alpha)
self._a = self._dist * math.sin(self._alpha) # finding the height and width according
self._b = self._dist * math.cos(self._alpha)
print(" a and b are", self._a,self._b)
self.oc_grid = self.occupancy_grid(rgb)
self.start_pos = cv2.circle(self.oc_grid, (75,168), 3, (0,255,0), 3)
self.target_pos = cv2.circle(self.start_pos, (75-int(self._a),168-int(self._b)), 3, (0,0,255), 3)
#print(75 -int(self._a))
#print(168-int(self._b))
cv2.imshow("grid", self.target_pos)
cv2.waitKey(1)
# end of part 1
#rrt_buffer
if not self.rrt_buffer:
if self._waypoint_buffer:
self.target_waypoint, self.target_road_option = self._waypoint_buffer[0]
self._waypoint_buffer.popleft()
print("goal is", 75-int(self._a), 168-int(self._b))
rrt = RRT(
start=[75, 168],
goal=[75-int(self._a), 168-int(self._b)],
grid = self.oc_grid)
#print(rrt)
self.path = rrt.planning(animation= True)
print("MAIN PATH",self.path)
self.path = self.path[:-2]
self.pathss = self.path
print("excluding path",self.pathss)
for (x,y) in self.path:
m = self.pixel_to_world(x,y)
m_waypoint = self._map.get_waypoint(m)
self.rrt_buffer.appendleft(m_waypoint)
if self.rrt_buffer:
self.local_target = self.rrt_buffer.popleft()
print(self.local_target, "local_target")
# plt.imshow(self.oc_grid, cmap='gray')
# plt.plot([x for (x, y) in self.path], [y for (x, y) in self.path], '-r')
# plt.plot(self.cw_x, self.cw_y, "xr")
# plt.plot(self.tw_x, self.tw_y, "xr")
# plt.grid(True)
# plt.axis([0, 336, 0, 150])
# plt.pause(0.01) # Need for Mac
# plt.show()
if target_speed > 50:
args_lat = self.args_lat_hw_dict
args_long = self.args_long_hw_dict
else:
args_lat = self.args_lat_city_dict
args_long = self.args_long_city_dict
self._pid_controller = VehiclePIDController(self._vehicle,
args_lateral=args_lat,
args_longitudinal=args_long)
control = self._pid_controller.run_step(self._target_speed, self.local_target)
# Purge the queue of obsolete waypoints
vehicle_transform = self._vehicle.get_transform()
#print(vehicle_transform)
max_index = -1
for i, (waypoint, _) in enumerate(self._waypoint_buffer):
if distance_vehicle(
waypoint, vehicle_transform) < self._min_distance:
max_index = i
if max_index >= 0:
for i in range(max_index + 1):
self._waypoint_buffer.popleft()
if debug:
draw_waypoints(self._vehicle.get_world(),
[self.local_target], 1.0)
return control
| true |
a4d0f98ca74e931a054881d6ff608f0631a0772a
|
Python
|
andrewreece/gauging-debate
|
/streaming/jobs/utils.py
|
UTF-8
| 17,281 | 2.5625 | 3 |
[
"LicenseRef-scancode-other-permissive",
"MIT"
] |
permissive
|
import time, json, boto3, re
from dateutil import parser, tz
from datetime import datetime, timedelta
from sentiment import *
from pyspark.sql import SQLContext, Row
import pyspark.sql.functions as sqlfunc
from pyspark.sql.types import *
search_terms = []
n_parts = 10 # number of paritions for RDD
def get_search_json(bucket,key):
''' Retrieves json of debate-related search terms from s3
Note: If we eventually start allowing custom search terms, we'll need to make sure
that the temporary file holding the custom search terms has a similar structure. '''
# Load nested JSON of search terms
s3 = boto3.resource('s3')
jdata = json.loads(s3.Object(bucket,key).get()['Body'].read())
return jdata
def pool_search_terms(j):
''' Short recursive routine to pull out all search terms in search-terms.json into a flattened list '''
if isinstance(j,dict):
for j2 in j.values():
pool_search_terms(j2)
else:
search_terms.extend( j )
return search_terms
def get_hostname():
''' Determines whether we have a cluster up and running,
If so, returns master node private IP address for cluster coordination in spark-output.py '''
import boto3
client = boto3.client('emr')
''' list_clusters() is used here to find the current cluster ID
WARNING: this is a little shaky, as there may be >1 clusters running in production
better to search by cluster name as well as state
UPDATE 09 DEC: The GD cluster has the name "gauging_debate", so we can definitely restrict
list_clusters by that. (I think we actually already do that in another script,
maybe just find that and copy the code over here.)
'''
clusters = client.list_clusters(ClusterStates=['RUNNING','WAITING','BOOTSTRAPPING'])['Clusters']
clusters_exist = len(clusters) > 0
if clusters_exist:
cid = clusters[0]['Id']
master_instance = client.list_instances(ClusterId=cid,InstanceGroupTypes=['MASTER'])
hostname = master_instance['Instances'][0]['PrivateIpAddress']
else:
hostname = None
return hostname
def update_tz(d,dtype,only_tstamp=False):
''' Updates time zone for date stamp to US EST (the time zone of the debates) '''
if only_tstamp:
tstamp = d
else:
tstamp = d[1]
def convert_timezone(item,item_is_only_tstamp=False):
''' This interior function to update_tz does the actual conversion of timezones '''
if item_is_only_tstamp:
dt = item
else:
ts = item['timestamp']
dt = parser.parse(ts)
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/New_York')
utc = dt.replace(tzinfo=from_zone)
return utc.astimezone(to_zone)
if dtype == "sql": # if our return value is for Spark SQL
return Row(id=d[0], time=convert_timezone(tstamp))
elif dtype == "pandas": # if our return value is for non-Spark SQL (probably Pandas)
return convert_timezone(tstamp,only_tstamp)
def make_json(tweet,interval):
''' Get stringified JSOn from Kafka, attempt to convert to JSON
Note: The interval argument is BATCH_DURATION, ie. how many seconds each DStream collects for.
This is important here because we use it to round all tweet timestamps to a 'batchtime'
timestamp, which is rounded to the floor of the nearest interval.
Eg. interval = 30s, tstamp = 08:10:28 --> batchtime = 08:10:00
interval = 30s, tstamp = 08:10:32 --> batchtime = 08:10:30
We still retain the actual tweet timestamp, but the batchtime is what we use to store and
retrieve data from SDB (and it's how we render the chart on the front-end
WARNING: There seems to be a problem with the way we adjust for localtime here.
As of 09 DEC, the epoch timestamp that comes out of this function is not correctly
adjusting to EST (GMT-0400 or GMT-0500 depending on DST).
You currently take care of this on the front-end with a conditional offset (because
you do store the timezoned-timestamp correctly with the archival data, so you need an
if-statement to figure out whether to add an extra offset).
But you should really fix this here. It shouldn't be too hard to fix it. You're just
short on time at this writing. '''
try:
dt = datetime.now()
tstamp = datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute,interval*(dt.second // interval))
local_tstamp = update_tz(tstamp,"pandas",only_tstamp=True)
batchtime = local_tstamp.strftime('%s')
return (batchtime, json.loads(tweet[1].decode('utf-8')))
except:
return "error on make_json"
def filter_tweets(item,terms):
''' Filters out the tweets we do not want. Filters include:
* No non-tweets (eg. delete commands)
* No retweets
* English language only
* No tweets with links
- We need to check both entities and media fields for this (is that true?)
* Matches at least one of the provided search terms '''
# Define regex pattern that covers all search terms
pattern = '|(\s|#|@)'.join(terms)
try:
return (isinstance(item,dict) and
('delete' not in item.keys()) and
('limit' not in item.keys()) and
('retweeted_status' not in item.keys()) and
(item['lang']=='en') and
(len(item['entities']['urls'])==0) and
('media' not in item['entities'].keys()) and
(re.search(pattern,item['text'],re.I) is not None)
)
except Exception, e:
return str(e)+"...We have this error under control"
#print
#print "This item is funny. Funny how?"
#print str(e)
#print 'here is the item'
#print item
#print
def get_relevant_fields(item,json_terms,debate_party):
''' Reduce the full set of metadata down to only those we care about, including:
* timestamp
* username
* text of tweet
* hashtags
* geotag coordinates (if any)
* location (user-defined in profile, not necessarily current location)
'''
the_tweet = item[1]
batchtime = item[0]
cands = json_terms['candidates'][debate_party]
mentioned = []
# loop over candidates, check if tweet mentions each one
for name, terms in cands.items():
p = '|(\s|#|@)'.join(terms) # regex allows for # hashtag, @ mention, or blank space before term
rgx = re.search(p,the_tweet['text'],re.I)
if rgx: # if candidate-specific search term is matched
mentioned.append( name ) # add candidate surname to mentioned list
if len(mentioned) == 0: # if no candidates were mentioned specifically
mentioned.append( "general" ) # then tweet must be a general reference to the debate
tweet_timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(the_tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
try:
return (the_tweet['id'],
{"timestamp": tweet_timestamp,
"batchtime": batchtime,
"username": the_tweet['user']['screen_name'],
"text": the_tweet['text'].encode('utf8').decode('ascii','ignore'),
"hashtags": [el['text'].encode('utf8').decode('ascii','ignore') for el in the_tweet['entities']['hashtags']],
"first_term": mentioned[0],
"search_terms": mentioned,
"multiple_terms": len(mentioned) > 1
}
)
except Exception,e:
print "this error is coming from get_relevant_fields"
print str(e)
print "this is item:"
print item
print
def make_row(d,doPrint=False):
tid = d[0]
tdata = d[1]
return Row(id =tid,
username =tdata['username'],
timestamp =tdata['timestamp'],
batchtime =tdata['batchtime'],
hashtags =tdata['hashtags'] if tdata['hashtags'] is not None else '',
text =tdata['text'],
search_terms =tdata['search_terms'],
multiple_terms =tdata['multiple_terms'],
first_term =tdata['first_term']
)
def process(rdd,json_terms,debate_party,domain_name='sentiment',n_parts=10,doPrint=False):
rdd.cache()
candidate_dict = {}
candidate_names = json_terms['candidates'][debate_party].keys()
candidate_names.append( 'general' )
for candidate in candidate_names:
candidate_dict[candidate] = {'party':debate_party if candidate is not 'general' else 'general',
'batchtime':'',
'num_tweets':'0',
'sentiment_avg':'',
'sentiment_std':'',
'highest_sentiment_tweet':'',
'lowest_sentiment_tweet':''
}
# default settings remove words scored 4-6 on the scale (too neutral).
# adjust with kwarg stopval, determines 'ignore spread' out from 5. eg. default stopval = 1.0 (4-6)
labMT = emotionFileReader()
# Get the singleton instance of SQLContext
sqlContext = getSqlContextInstance(rdd.context)
schema = StructType([StructField("batchtime", StringType() ),
StructField("first_term", StringType() ),
StructField("hashtags", ArrayType(StringType())),
StructField("id", IntegerType() ),
StructField("multiple_terms", BooleanType() ),
StructField("search_terms", ArrayType(StringType())),
StructField("text", StringType() ),
StructField("timestamp", StringType() ),
StructField("username", StringType() )
]
)
# Convert RDD[String] to RDD[Row] to DataFrame
row_rdd = rdd.map(lambda data: make_row(data))
df = sqlContext.createDataFrame(row_rdd, schema)
# how many tweets per candidate per batch?
df2 = (df.groupBy("first_term")
.count()
.alias('df2')
)
counts = (df2.map(lambda row: row.asDict() )
.map(lambda row: (row['first_term'],row['count']))
)
cRdd = rdd.context.parallelize( candidate_names, n_parts )
def update_dict(d):
data = d[0]
data['num_tweets'] = str(d[1]) if d[1] is not None else data['num_tweets']
return data
tmp = (cRdd.map( lambda c: (c, candidate_dict[c]), preservesPartitioning=True )
.leftOuterJoin( counts, numPartitions=n_parts )
.map( lambda data: (data[0], update_dict(data[1])) )
.collect()
)
candidate_dict = { k:v for k,v in tmp }
# Register as table
df.registerTempTable("tweets")
# loop over candidates, check if tweet mentions each candidate
for candidate in candidate_names:
if doPrint:
print
print 'CANDIDATE NAME:'
print candidate
print
try:
accum = rdd.context.accumulator(0)
query = "SELECT batchtime, text FROM tweets WHERE first_term='{}'".format(candidate)
result = sqlContext.sql(query)
scored = result.map( lambda x: (x.batchtime, (emotion(x.text,labMT), x.text)) ).cache()
scored.foreach(lambda x: accum.add(1))
batchtime = scored.first()[0]
if accum.value > 0:
accum2 = rdd.context.accumulator(0)
scored = scored.filter(lambda score: score[1][0][0] is not None).cache()
scored.foreach(lambda x: accum2.add(1))
if accum2.value > 1: # we want at least 2 tweets for highest and lowest scoring
high_parts = scored.takeOrdered(1, key = lambda x: -x[1][0][0])[0][1]
high_scores, high_tweet = high_parts
high_avg = str(high_scores[0])
high_tweet = high_tweet.encode('utf8').decode('ascii','ignore')
low_parts = scored.takeOrdered(1, key = lambda x: x[1][0][0])[0][1]
low_scores, low_tweet = low_parts
low_avg = str(low_scores[0])
low_tweet = low_tweet.encode('utf8').decode('ascii','ignore')
else:
high_avg = low_avg = high_tweet = low_tweet = ''
candidate_dict[candidate]['highest_sentiment_tweet'] = '_'.join([high_avg,high_tweet])
candidate_dict[candidate]['lowest_sentiment_tweet'] = '_'.join([low_avg,low_tweet])
sentiment = (result.map(lambda x: (1,x.text))
.reduceByKey(lambda x,y: ' '.join([str(x),str(y)]))
.map( lambda text: emotion(text[1],labMT) )
.collect()
)
sentiment_avg, sentiment_std = sentiment[0]
candidate_dict[candidate]['sentiment_avg'] = str(sentiment_avg)
candidate_dict[candidate]['sentiment_std'] = str(sentiment_std)
candidate_dict[candidate]['batchtime'] = batchtime
except Exception,e:
if doPrint:
print "Looks like this candidate doesn't have any data"
print str(e)
continue
import boto3,json
client = boto3.client('sdb')
for cname,cdata in candidate_dict.items():
attrs = []
attrs.append( {'Name':"data",'Value':json.dumps(candidate_dict[cname]),'Replace':False} )
attrs.append( {"Name":"timestamp", "Value": batchtime, "Replace":False} )
attrs.append( {"Name":"candidate", "Value": cname, "Replace":False} )
item_name = '_'.join([cname,batchtime])
if doPrint:
print
print "We are ready to store in db"
print item_name
#print attrs
try:
# write row of data to SDB
client.put_attributes(
DomainName= domain_name,
ItemName = item_name,
Attributes= attrs
)
except Exception,e:
print 'sdb write error: {}'.format(str(e))
#rdd.foreachPartition(lambda p: write_to_db(p,level='group'))
#except Exception, e:
# print
# print 'THERE IS AN ERROR!!!!'
# print str(e)
# print
# pass
# From Thouis 'Ray' Jones CS205
def quiet_logs(sc):
''' Shuts down log printouts during execution '''
logger = sc._jvm.org.apache.log4j
logger.LogManager.getLogger("org").setLevel(logger.Level.WARN)
logger.LogManager.getLogger("akka").setLevel(logger.Level.WARN)
logger.LogManager.getLogger("amazonaws").setLevel(logger.Level.WARN)
def set_end_time(minutes_forward=2):
''' This function is only for initial test output. We'll probably delete it soon.
It defines the amount of minutes we keep the tweet stream open for ingestion.
In production this will be open-ended, or it will be set based on when the debate ends.
'''
year = time.localtime().tm_year
month = time.localtime().tm_mon
day = time.localtime().tm_mday
hour = time.localtime().tm_hour
minute = time.localtime().tm_min
newmin = (minute + minutes_forward) % 60 # if adding minutes_forward goes over 60 min, take remainder
if newmin < minute:
hour = hour + 1
minute = newmin
else:
minute += 2
return {"year":year,"month":month,"day":day,"hour":hour,"minute":minute}
# from docs: http://spark.apache.org/docs/latest/streaming-programming-guide.html#dataframe-and-sql-operations
def getSqlContextInstance(sparkContext):
''' Lazily instantiated global instance of SQLContext '''
if ('sqlContextSingletonInstance' not in globals()):
globals()['sqlContextSingletonInstance'] = SQLContext(sparkContext)
return globals()['sqlContextSingletonInstance']
def write_to_db(iterator,level='tweet',domain_name='tweets'):
''' Write output to AWS SimpleDB table after analysis is complete
- Uses boto3 and credentials file. (If AWS cluster, credentials are associated with creator.)
- UTF-8 WARNING!
* SDB does not like weird UTF-8 characters, including emojis.
* Currently we remove them entirely with .encode('utf8').decode('ascii','ignore')
* If we actually want to use emojis (or even reprint tweets accurately), we'll need to
figure out a way to preserve UTF weirdness.
* This is not only emojis, some smart quotes and apostrophes too, and other characters.
'''
''' NOTE: We ran into issues when we had a global import for boto3 in this script.
Assuming this has something to do with child nodes running this function but not the whole
script?
When we import boto3 inside this function, everything works.
'''
import boto3 # keep local boto import!
client = boto3.client('sdb', region_name='us-east-1')
''' write_to_db() is called by foreachPartition(), which passes in an iterator object automatically.
The iterator rows are each entry (for now, that means "each tweet") in the dataset.
Below, we use the implicitly-passed iterator to loop through each data point and write to SDB.
NOTE: Keep an eye on the UTF mangling needed. If you don't mangle, it barfs.
* The standard solutions (simple encode/decode conversions) do NOT work.
* See the process book (somewhere around NOV 21) for a few links discussing this problem.
* It's actually an issue with the way SDB has its HTTP headers set up, and it's fixable if you
hack the Ruby source code, but since we're using Boto3 it seems we can't get at the headers.
* You added a comment on the Boto3 source github page where this issue was being discussed,
make sure to check and see if the author has answered you!
'''
for row in iterator:
k,v = row
attrs = []
try:
for k2,v2 in v.items():
# If v2 IS A LIST: join as comma-separated string
if isinstance(v2,list):
v2 = ','.join([val for val in v2]) if len(v2)>0 else ''
# If v2 is BOOL: convert to string
elif isinstance(v2,bool):
v2 = str(v2)
# If v2 IS EMPTY: convert to empty string
elif v2 is None:
v2 = ''
# Get rid of all UTF-8 weirdness, including emojis.
if k2 != "batchtime":
v2 = v2.encode('utf8').decode('ascii','ignore')
attrs.append( {'Name':k2,'Value':v2,'Replace':False} )
except Exception, e:
print 'This error is from write_to_db'
print str(e)
print v
try:
# write row of data to SDB
client.put_attributes(
DomainName= domain_name,
ItemName = str(k),
Attributes= attrs
)
except Exception, e:
print "This error is from write_to_db"
print str(e)
print attrs
print
| true |
10e7acf216e7068dc184ca07d36a68b537e0accd
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03145/s678078916.py
|
UTF-8
| 67 | 2.59375 | 3 |
[] |
no_license
|
abc = list(map(int, input().split()))
print((abc[0] * abc[1]) // 2)
| true |
d3960d5662f223d4dd9c5e23b2f2db2033897b07
|
Python
|
developer579/Practice
|
/Python/Python Lesson/Second/Lesson9/Sample4.py
|
UTF-8
| 298 | 4 | 4 |
[] |
no_license
|
str = input("文字列を入力してください。")
key = input("検索する文字を入力してください。")
res = str.find(key)
if res != -1:
print(str,"の",res,"の位置に",key,"がみつかりました。")
else:
print(str,"の中に",key,"はみつかりませんでした。")
| true |
29021130cb8bd712d2427d2772f5ed003d3cc6dc
|
Python
|
clodiap/PY4E
|
/15_sqlite.py
|
UTF-8
| 1,771 | 3.796875 | 4 |
[] |
no_license
|
import sqlite3
#The connect operation makes a "connection" to the database stored in the file music.sqlite3 in the current directory. If the file does not exist, it will be created. The reason this is called a "connection" is that sometimes the database is stored on a separate "database server" from the server on which we are running our application.
conn = sqlite3.connect('music.sqlite')
# A cursor is like a file handle that we can use to perform operations on the data stored in the database. Calling cursor() is very similar conceptually to calling open() when dealing with text files.
cur = conn.cursor()
#The first SQL command removes the Tracks table from the database if it exists. This pattern is simply to allow us to run the same program to create the Tracks table over and over again without causing an error. Note that the DROP TABLE command deletes the table and all of its contents from the database (i.e., there is no "undo").
cur.execute('DROP TABLE IF EXISTS Tracks')
cur.execute('CREATE TABLE Tracks(title TEXT, plays INTEGER)')
#The SQL INSERT command indicates which table we are using and then defines a new row by listing the fields we want to include (title, plays) followed by the VALUES we want placed in the new row. We specify the values as question marks (?, ?) to indicate that the actual values are passed in as a tuple ( 'My Way', 15 ) as the second parameter to the execute() call.
cur.execute('INSERT INTO Tracks (title, plays) VALUES (?, ?)',
('Thunderstruck', 20))
cur.execute('INSERT INTO Tracks (title, plays) VALUES (?, ?)',
('My Way', 15))
conn.commit()
print('Tracks:')
cur.execute('SELECT title, plays FROM Tracks')
for row in cur:
print(row)
cur.execute('DELETE FROM Tracks WHERE plays < 100')
cur.close()
| true |
f61b1d315656aa3226467b755421d614aa04f969
|
Python
|
Jtaylorapps/Python-Algorithm-Practice
|
/recursivePractice.py
|
UTF-8
| 1,290 | 4.21875 | 4 |
[
"Apache-2.0"
] |
permissive
|
# Recursively reverse a string
def reverse_string(s):
if len(s) < 2:
return s
return reverse_string(s[1:]) + s[0]
print(reverse_string("1234") == "4321") # True
# Maps a given function over nested list
def map_f(f, arr, result=None):
if result is None:
result = []
for x in arr:
if isinstance(x, list):
map_f(f, x, result)
else:
result.append(f(x))
return result
print(map_f(lambda x: x * x, [1, 2, [3, 4, [5]]]) == [1, 4, 9, 16, 25]) # True
# Count the number of ways to change any given amount
# Note: Not working right, counts duplicate denominations
def count_change(val, coins, count=None):
if count is None:
count = 0
if val < 0 or coins is None:
return 0
if val == 0:
return 1
for c in coins:
count += count_change(val - c, coins)
return count
print(count_change(10, [1, 5]) == 3)
# Generate all permutations of a list recursively
def permute(arr, start=0):
end = len(arr)
if start == end and arr is not None:
print(arr) # O(n)
for i in range(start, end): # O(n!)
arr[start], arr[i] = arr[i], arr[start]
permute(arr, start + 1)
arr[start], arr[i] = arr[i], arr[start]
permute([1, 2, 3])
| true |
a47b9a1b251674c750a1f307f063136a006e62d9
|
Python
|
FloLangenfeld/RosettaSilentToolbox
|
/rstoolbox/analysis/sequence.py
|
UTF-8
| 30,382 | 2.640625 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Jaume Bonet <jaume.bonet@gmail.com>
.. affiliation::
Laboratory of Protein Design and Immunoengineering <lpdi.epfl.ch>
Bruno Correia <bruno.correia@epfl.ch>
.. func:: sequential_frequencies
.. func:: sequence_similarity
.. func:: positional_sequence_similarity
.. func:: binary_similarity
.. func:: binary_overlap
.. func:: selector_percentage
.. func:: label_percentage
.. func:: positional_enrichment
"""
# Standard Libraries
import copy
import collections
import re
import operator
# External Libraries
import pandas as pd
import numpy as np
# This Library
from .SimilarityMatrix import SimilarityMatrix as SM
__all__ = ['sequential_frequencies', 'sequence_similarity',
'positional_sequence_similarity', 'binary_similarity',
'binary_overlap', 'selector_percentage', 'label_percentage',
'label_sequence', 'positional_enrichment']
def _get_sequential_table( seqType ):
"""
Generates the table to fill sequence data in order to create
a :class:`.SequenceFrame`
:param seqType: Type of sequence: ``protein``, ``protein_sse``,
``dna``, ``rna``.
:type seqType: :class:`str`
:return: :class:`dict`
:raise:
:ValueError: If ``seqType`` is not known.
"""
table = {}
extra = []
if seqType.lower() == "protein":
# X = UNKNOWN # * = GAP
# B = N or D # Z = E or Q
table = {
'C': [], 'D': [], 'S': [], 'Q': [], 'K': [],
'I': [], 'P': [], 'T': [], 'F': [], 'N': [],
'G': [], 'H': [], 'L': [], 'R': [], 'W': [],
'A': [], 'V': [], 'E': [], 'Y': [], 'M': [],
'X': [], '*': [], 'B': [], 'Z': []
}
extra = ['X', '*', 'B', 'Z']
elif seqType.lower() in ["dna", "rna"]:
# B = C or G or T # D = A or G or T
# H = A or C or T # K = G or T
# M = A or C # N = A or C or G or T
# R = A or G # S = C or G
# V = A or C or G # W = A or T
# Y = C or T
table = {
'C': [], 'A': [], 'T': [], 'G': [], 'X': [], '*': [],
'B': [], 'D': [], 'H': [], 'K': [], 'M': [], 'N': [],
'R': [], 'S': [], 'V': [], 'W': [], 'Y': []
}
if seqType.lower() == "rna":
table.setdefault( 'U', [])
table.pop('T', None)
extra = ['X', '*', 'B', 'D', 'H', 'K', 'M', 'N', 'R', 'S', 'V', 'W', 'Y']
elif seqType.lower() == "protein_sse":
table = {
'H': [], 'E': [], 'L': [], '*': [], 'G': []
}
extra = ['*', 'G']
else:
raise ValueError("sequence type {0} unknown".format(seqType))
return table, extra
def _sequence_similarity( qseq, rseq, matrix ):
if len(qseq) != len(rseq):
raise ValueError("Comparable sequences have to be the same size.")
raw, idn, pos, neg = 0, 0, 0, 0
ali = []
pres = []
for i, qseqi in enumerate(qseq):
sc = matrix.get_value(qseqi, rseq[i])
pres.append(sc)
raw += sc
if qseqi == rseq[i]:
idn += 1
pos += 1
ali.append(rseq[i])
elif sc > 0:
pos += 1
ali.append("+")
else:
neg += 1
ali.append(".")
return raw, idn, pos, neg, "".join(ali), pres
def _positional_similarity( qseq, rseq, matrix ):
raw, idn, pos, neg = 0, 0, 0, 0
for _, qseqi in enumerate(qseq):
sc = matrix.get_value(qseqi, rseq)
raw += sc
if qseqi == rseq:
idn += 1
if sc > 0:
pos += 1
else:
neg += 1
return raw, idn, pos, neg
def sequential_frequencies( df, seqID, query="sequence", seqType="protein",
cleanExtra=True, cleanUnused=-1 ):
"""Generates a :class:`.SequenceFrame` for the frequencies of the sequences in the
:class:`.DesignFrame` with ``seqID`` identifier.
If there is a ``reference_sequence`` for this ``seqID``, it will also
be attached to the :class:`.SequenceFrame`.
All letters in the sequence will be capitalized. All symbols that
do not belong to ``string.ascii_uppercase`` will be transformed to `"*"`
as this is the symbol recognized by the substitution matrices as ``gap``.
This function is directly accessible through some :class:`.DesignFrame` methods.
:param df: |df_param|.
:type df: Union[:class:`.DesignFrame`, :class:`~pandas.DataFrame`]
:param str seqID: |seqID_param|.
:param str query: |query_param|.
:param str seqType: |seqType_param| and ``protein_sse``.
:param bool cleanExtra: |cleanExtra_param|.
:param float cleanUnused: |cleanUnused_param|.
:return: :class:`.SequenceFrame`
.. seealso::
:meth:`.DesignFrame.sequence_frequencies`
:meth:`.DesignFrame.sequence_bits`
:meth:`.DesignFrame.structure_frequencies`
:meth:`.DesignFrame.structure_bits`
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.analysis import sequential_frequencies
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz",
...: {'scores': ['score'], 'sequence': 'AB'})
...: df = sequential_frequencies(df, 'B')
...: df.head()
"""
from rstoolbox.components import SequenceFrame
def count_instances( seq, table ):
t = copy.deepcopy(table)
c = collections.Counter(seq)
for aa in table:
_ = c[aa]
if _ > 0:
t[aa] = float(_) / len(seq)
else:
t[aa] = 0
return t
# Cast if possible, so that we can access the different methods of DesignFrame
if df._subtyp != 'design_frame' and isinstance(df, pd.DataFrame):
from rstoolbox.components import DesignFrame
df = DesignFrame(df)
# Get all sequences; exclude empty ones (might happen) and uppercase all residues.
sserie = df.get_sequential_data(query, seqID).replace('', np.nan).dropna().str.upper()
# Get the table to fill
table, extra = _get_sequential_table( seqType )
# Fill the table with the frequencies
sserie = sserie.apply(lambda x: pd.Series(list(x)))
sserie = sserie.apply(lambda x: pd.Series(count_instances(x.str.cat(), table))).T
# Create the SequenceFrame
dfo = SequenceFrame(sserie)
dfo.measure("frequency")
dfo.extras( extra )
# Attach the reference sequence if there is any
if df.has_reference_sequence(seqID):
dfo.add_reference(seqID, sequence=df.get_reference_sequence(seqID),
shift=df.get_reference_shift(seqID))
dfo.delete_extra( cleanExtra )
dfo.delete_empty( cleanUnused )
dfo.clean()
shft = df.get_reference_shift(seqID)
# Shift the index so that the index of the SequenceFrame == PDB count
if isinstance(shft, int):
dfo.index = dfo.index + shft
else:
dfo.index = shft
return dfo
def sequence_similarity( df, seqID, key_residues=None, matrix="BLOSUM62" ):
"""Evaluate the sequence similarity between each decoy and the ``reference_sequence``
for a given ``seqID``.
Sequence similarity is understood in the context of substitution matrices. Thus, a part from
identities, also similarities can be evaluated.
It will return the input data container with several new columns:
=============================== ===================================================
New Column Data Content
=============================== ===================================================
**<matrix>_<seqID>_raw** Score obtained by applying ``<matrix>``
**<matrix>_<seqID>_perc** Score obtained by applying ``<matrix>`` over score \
of reference_sequence against itself
**<matrix>_<seqID>_identity** Total identity matches
**<matrix>_<seqID>_positive** Total positive matches according to ``<matrix>``
**<matrix>_<seqID>_negative** Notal negative matches according to ``<matrix>``
**<matrix>_<seqID>_ali** Representation of aligned residues
**<matrix>_<seqID>_per_res** Per position score of applying ``<matrix>``
=============================== ===================================================
Matrix name in each new column is setup in lowercase.
.. tip::
If ``key_residues`` are applied, the scoring is only used on those, but nothing in the
naming of the columns will indicate a partial evaluation. It is important to keep that in
mind moving forward on whatever analysis you are performing.
Running this function multiple times (different key_residue selections, for example)
adds suffix to the previously mentioned columns following pandas' merge naming
logic (_x, _y, _z, ...).
:param df: |df_param|.
:type df: Union[:class:`.DesignFrame`, :class:`~pandas.DataFrame`]
:param str seqID: |seqID_param|.
:param key_residues: |keyres_param|.
:type key_residues: |keyres_types|
:param str matrix: |matrix_param|. Default is ``BLOSUM62``.
:return: :class:`.DesignFrame`.
:raises:
:AttributeError: |designframe_cast_error|.
:KeyError: |seqID_error|.
:AttributeError: |reference_error|.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.analysis import sequence_similarity
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz",
...: {'scores': ['score'], 'sequence': 'B'})
...: df.add_reference_sequence('B', df.get_sequence('B').values[0])
...: df = sequence_similarity(df.iloc[1:], 'B')
...: df.head()
"""
from rstoolbox.components import DesignFrame
# We don't need to try to cast, as reference_sequence is needed anyway
if not isinstance(df, DesignFrame):
raise AttributeError("Input data has to be a DesignFrame with a reference sequence.")
if not df.has_reference_sequence(seqID):
raise AttributeError("There is no reference sequence for seqID {}".format(seqID))
if not "sequence_{}".format(seqID) in df:
raise KeyError("Sequence {} not found in decoys.".format(seqID))
# Get matrix data
mat = SM.get_matrix(matrix)
# Get total score of the reference (depending on the matrix, identities != 1)
ref_seq = df.get_reference_sequence(seqID, key_residues)
ref_raw, _, _, _, _, _ = _sequence_similarity(ref_seq, ref_seq, mat)
# Get only the key residues and apply similarity analysis
df2 = df._constructor(df.get_sequence(seqID, key_residues))
df2 = df2.apply(
lambda x: pd.Series(_sequence_similarity(x.get_sequence(seqID), ref_seq, mat)), axis=1)
df2[6] = df2[0] / ref_raw
df2 = df2.rename(pd.Series(["{0}_{1}_raw".format(matrix.lower(), seqID),
"{0}_{1}_identity".format(matrix.lower(), seqID),
"{0}_{1}_positive".format(matrix.lower(), seqID),
"{0}_{1}_negative".format(matrix.lower(), seqID),
"{0}_{1}_ali".format(matrix.lower(), seqID),
"{0}_{1}_per_res".format(matrix.lower(), seqID),
"{0}_{1}_perc".format(matrix.lower(), seqID)]),
axis="columns").rename(pd.Series(df.index.values), axis="rows")
return pd.concat([df.reset_index(drop=True),
df2.reset_index(drop=True)], axis=1)
def positional_sequence_similarity( df, seqID=None, ref_seq=None,
key_residues=None, matrix="BLOSUM62" ):
"""Per position identity and similarity against a ``reference_sequence``.
Provided a data container with a set of sequences, it will evaluate the percentage of
identities and similarities that the whole set has against a ``reference_sequence``.
It would do so by sequence position instead that by each individual sequence.
In a way, this generates an extreme simplification from a :class:`.SequenceFrame`.
:param df: |df_param|.
:type df: Union[:class:`.DesignFrame`, :class:`.FragmentFrame`]
:param str seqID: |seqID_param|. Required when input is :class:`.DesignFrame`.
:param str ref_seq: Reference sequence. Required when input is :class:`.FragmentFrame`.
Will overwrite the reference sequence of :class:`.DesignFrame` if provided.
:param key_residues: |keyres_param|.
:type key_residues: |keyres_types|
:param str matrix: |matrix_param|. Default is ``BLOSUM62``.
:return: :class:`~pandas.DataFrame` - where rows are sequence positions and
columns are ``identity_perc`` and ``positive_perc``.
:raises:
:AttributeError: if the data passed is not in Union[:class:`.DesignFrame`,
:class:`.FragmentFrame`]. It will *not* try to cast a provided
:class:`~pandas.DataFrame`, as it would not be possible to know into which of
the two possible inputs it needs to be casted.
:AttributeError: if input is :class:`.DesignFrame` and ``seqID`` is not provided.
:KeyError: |seqID_error| when input is :class:`.DesignFrame`.
:AttributeError: |reference_error| when input is :class:`.DesignFrame`.
:AttributeError: if input is :class:`.FragmentFrame` and ``ref_seq`` is not provided.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.analysis import positional_sequence_similarity
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz",
...: {'scores': ['score'], 'sequence': 'B'})
...: df.add_reference_sequence('B', df.get_sequence('B').values[0])
...: df = positional_sequence_similarity(df.iloc[1:], 'B')
...: df.head()
"""
from rstoolbox.components import DesignFrame, FragmentFrame
from rstoolbox.components import get_selection
data = {"identity_perc": [], "positive_perc": []}
# Get matrix data
mat = SM.get_matrix(matrix)
if isinstance(df, DesignFrame):
if seqID is None:
raise AttributeError("seqID needs to be provided")
if not df.has_reference_sequence(seqID):
raise AttributeError("There is no reference sequence for seqID {}".format(seqID))
if not "sequence_{}".format(seqID) in df:
raise KeyError("Sequence {} not found in decoys.".format(seqID))
ref_seq = ref_seq if ref_seq is not None else df.get_reference_sequence(seqID)
seqdata = df.get_sequence(seqID)
seqdata = seqdata.apply(lambda x: pd.Series(list(x)))
for _, i in enumerate(seqdata.columns.values):
qseq = "".join(seqdata[i].tolist())
_, idn, pos, _ = _positional_similarity( qseq, ref_seq[_], mat )
data["identity_perc"].append(float(idn) / float(len(qseq)))
data["positive_perc"].append(float(pos) / float(len(qseq)))
elif isinstance(df, FragmentFrame):
if ref_seq is None:
raise AttributeError("ref_seq needs to be provided")
for i in df["position"].drop_duplicates().values:
qseq = "".join(df[df["position"] == i]["aa"].values)
_, idn, pos, _ = _positional_similarity( qseq, ref_seq[i - 1], mat )
data["identity_perc"].append(float(idn) / float(len(qseq)))
data["positive_perc"].append(float(pos) / float(len(qseq)))
else:
raise AttributeError("Input data has to be a DesignFrame with a "
"reference sequence or a FragmentFrame.")
dfo = pd.DataFrame(data)
# Get shift only from DesignFrame; FragmentFrame does not have one
shft = df.get_reference_shift(seqID) if isinstance(df, DesignFrame) else 1
# Shift the index so that index == PDB count
if isinstance(shft, int):
dfo.index = dfo.index + shft
else:
dfo.index = shft
selection = list(get_selection(key_residues, seqID, list(dfo.index)))
selection = [x - 1 for x in selection] # -1 for array like count
return dfo.iloc[selection]
def binary_similarity( df, seqID, key_residues=None, matrix="IDENTITY"):
"""Binary profile for each design sequence against the ``reference_sequence``.
Makes a :class:`DesignFrame` with a new column to map binary identity (0/1) with
the ``reference_sequence``. If a different matrix than ``IDENTITY`` is provides,
the binary sequence sets to ``1`` all the positive values.
=============================== ===================================================
New Column Data Content
=============================== ===================================================
**<matrix>_<seqID>_binary** Binary representation of the match with the
``reference_sequence``.
=============================== ===================================================
:param df: |df_param|.
:type df: Union[:class:`.DesignFrame`, :class:`~pandas.DataFrame`]
:param str seqID: |seqID_param|.
:param key_residues: |keyres_param|.
:type key_residues: |keyres_types|
:param str matrix: |matrix_param|. Default is ``IDENTITY``.
:return: :class:`.DesignFrame`.
:raises:
:AttributeError: |designframe_cast_error|.
:KeyError: |seqID_error|.
:AttributeError: |reference_error|.
.. seealso::
:func:`.sequence_similarity`
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.analysis import binary_similarity
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz",
...: {'scores': ['score'], 'sequence': 'B'})
...: df.add_reference_sequence('B', df.get_sequence('B').values[0])
...: df = binary_similarity(df.iloc[1:], 'B')
...: df.head()
"""
dfss = sequence_similarity( df, seqID, key_residues, matrix=matrix )
alicolumn = "{0}_{1}_ali".format(matrix.lower(), seqID)
bincolumn = "{0}_{1}_binary".format(matrix.lower(), seqID)
dfss[bincolumn] = dfss.apply(lambda row: re.sub(r'\D', '1', re.sub(r'\.', '0', row[alicolumn])),
axis=1)
return pd.concat([df.reset_index(drop=True),
dfss[bincolumn].reset_index(drop=True)],
axis=1)
def binary_overlap( df, seqID, key_residues=None, matrix="IDENTITY" ):
"""Overlap the binary similarity representation of all decoys in a
:class:`.DesignFrame`.
:param df: |df_param|.
:type df: Union[:class:`.DesignFrame`, :class:`~pandas.DataFrame`]
:param str seqID: |seqID_param|.
:param key_residues: |keyres_param|.
:type key_residues: |keyres_types|
:param str matrix: |matrix_param|. Default is ``IDENTITY``.
:return: :func:`list` of :class:`int` - ones and zeros for each
position of the length of the sequence
.. seealso::
:func:`.binary_similarity`
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.analysis import binary_overlap
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz",
...: {'scores': ['score'], 'sequence': 'B'})
...: df.add_reference_sequence('B', df.get_sequence('B').values[0])
...: binoverlap = binary_overlap(df.iloc[1:], 'B')
...: "".join([str(_) for _ in binoverlap])
"""
bincolumn = "{0}_{1}_binary".format(matrix.lower(), seqID)
if bincolumn not in df.columns.values:
df = binary_similarity(df, seqID, key_residues, matrix)
a = df[bincolumn].values
x = len(a[0])
result = [0] * x
for seq in a:
for _, b in enumerate(seq):
if bool(int(b)):
result[_] = 1
return result
def selector_percentage( df, seqID, key_residues, selection_name='selection' ):
"""Calculate the percentage coverage of a :class:`.Selection`
over the sequence.
Depends on sequence information for the ``seqID``.
Adds a new column to the data container:
==================================== =======================================================
New Column Data Content
==================================== =======================================================
**<selection_name>_<seqID>_perc** Percentage of the sequence covered by the key_residues.
==================================== =======================================================
:param df: |df_param|.
:type df: Union[:class:`.DesignFrame`, :class:`.DesignSeries`]
:param str seqID: |seqID_param|.
:param key_residues: |keyres_param|.
:type key_residues: |keyres_types|
:param str selection_name: Prefix to add to the selection. Default is ``selection``.
:return: Union[:class:`.DesignFrame`, :class:`.DesignSeries`]
:raises:
:NotImplementedError: if the data passed is not in Union[:class:`.DesignFrame`,
:class:`.DesignSeries`].
:KeyError: |seqID_error|.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.analysis import selector_percentage
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_ssebig.minisilent.gz",
...: {'scores': ['score'], 'sequence': 'C'})
...: df = selector_percentage(df, 'C', '1-15')
...: df.head()
"""
from rstoolbox.components import DesignFrame, DesignSeries
colname = '{0}_{1}_perc'.format(selection_name, seqID)
if isinstance(df, DesignFrame):
df2 = df.apply(lambda row: selector_percentage(row, seqID, key_residues, selection_name),
axis=1, result_type='expand')
return df2
elif isinstance(df, DesignSeries):
seq1 = list(df.get_sequence(seqID))
seq2 = list(df.get_sequence(seqID, key_residues))
return df.append(pd.Series([float(len(seq2)) / len(seq1)], [colname]))
else:
raise NotImplementedError
def label_percentage( df, seqID, label ):
"""Calculate the percentage coverage of a ``label`` over the sequence.
Depends on sequence information and label data for the ``seqID``.
Adds a new column to the data container:
=========================== ====================================================
New Column Data Content
=========================== ====================================================
**<label>_<seqID>_perc** Percentage of the sequence covered by the ``label``.
=========================== ====================================================
:param df: |df_param|.
:type df: Union[:class:`.DesignFrame`, :class:`.DesignSeries`]
:param str seqID: |seqID_param|.
:param str lable: Label identifier.
:param key_residues: |keyres_param|.
:type key_residues: |keyres_types|
:return: Union[:class:`.DesignFrame`, :class:`.DesignSeries`]
:raises:
:NotImplementedError: if the data passed is not in Union[:class:`.DesignFrame`,
:class:`.DesignSeries`].
:KeyError: |lblID_error|.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.analysis import label_percentage
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz",
...: {'scores': ['score'], 'sequence': '*',
...: 'labels': ['MOTIF']})
...: df = label_percentage(df, 'B', 'MOTIF')
...: df.head()
"""
from rstoolbox.components import DesignFrame, DesignSeries
colname = '{0}_{1}_perc'.format(label.upper(), seqID)
if isinstance(df, DesignFrame):
df2 = df.apply(lambda row: label_percentage(row, seqID, label),
axis=1, result_type='expand')
return df2
elif isinstance(df, DesignSeries):
try:
seq1 = list(df.get_sequence(seqID))
seq2 = list(df.get_sequence(seqID, df.get_label(label, seqID)))
return df.append(pd.Series([float(len(seq2)) / len(seq1)], [colname]))
except KeyError:
return df.append(pd.Series([0], [colname]))
else:
raise NotImplementedError
def label_sequence( df, seqID, label, complete=False ):
"""Gets the sequence of a ``label``.
Depends on label data for the ``seqID``.
Adds a new column to the data container:
=========================== ====================================================
New Column Data Content
=========================== ====================================================
**<label>_<seqID>_seq** Trimmed sequence by the ``label``.
=========================== ====================================================
:param df: |df_param|.
:type df: Union[:class:`.DesignFrame`, :class:`.DesignSeries`]
:param str seqID: |seqID_param|.
:param str label: Label identifier.
:param bool complete: Only applies when input is a :class:`.DesignFrame`.
Generates a gapped alignment considering the maches of ``label`` as those
of the highest matching decoy.
:return: Union[:class:`.DesignFrame`, :class:`.DesignSeries`]
:raises:
:NotImplementedError: if the data passed is not in Union[:class:`.DesignFrame`,
:class:`.DesignSeries`].
:KeyError: |lblID_error|.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.analysis import label_sequence
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_2seq.minisilent.gz",
...: {'scores': ['score'], 'sequence': '*',
...: 'labels': ['MOTIF']})
...: df = label_sequence(df, 'B', 'MOTIF')
...: df.head()
"""
from rstoolbox.components import DesignFrame, DesignSeries
colname = '{0}_{1}_seq'.format(label.upper(), seqID)
def get_all_decoy_labels(row, seqID, label):
try:
return list(np.array(row.get_label(label.upper(), seqID).to_list()) - 1)
except KeyError:
return []
if isinstance(df, DesignFrame):
if complete:
complete = set().union(*df.apply(get_all_decoy_labels, axis=1, args=(seqID, label)))
df2 = df.apply(lambda row: label_sequence(row, seqID, label, complete), axis=1, result_type='expand')
return df2
elif isinstance(df, DesignSeries):
try:
sele = list(np.array(df.get_label(label.upper(), seqID).to_list()) - 1) # Correct str count
seq = df.get_sequence(seqID)
if isinstance(complete, set):
gaps = complete.difference(set(sele))
seq = [s if i not in gaps else '-' for i, s in enumerate(list(seq))]
sele = sorted(list(complete))
return df.append(pd.Series(''.join(operator.itemgetter(*sele)(list(seq))), [colname]))
except KeyError:
return df.append(pd.Series('', [colname]))
else:
raise NotImplementedError
def positional_enrichment(df, other, seqID):
"""Calculates per-residue enrichment from sequences in the first :class:`.DesignFrame`
with respect to the second.
.. note::
Position / AA type pairs present in ``df`` but not ``other`` will have a value of
:data:`~np.inf`.
:param df: |df_param|.
:type df: Union[:class:`.DesignFrame`, :class:`~pandas.DataFrame`]
:param other: |df_param|.
:type other: Union[:class:`.DesignFrame`, :class:`~pandas.DataFrame`]
:param str seqID: |seqID_param|.
:return: :class:`.FragmentFrame` - with enrichment percentages.
:raises:
:NotImplementedError: if the data passed is not in Union[:class:`.DesignFrame`,
:class:`~pandas.DataFrame`].
:KeyError: |seqID_error|.
"""
from rstoolbox.components import DesignFrame
for i, x in enumerate([df, other]):
if not isinstance(x, DesignFrame):
if not isinstance(x, pd.DataFrame):
raise NotImplementedError('Unknow input format')
else:
if i == 0:
df = DesignFrame(df)
else:
other = DesignFrame(other)
result = df.sequence_frequencies(seqID) / other.sequence_frequencies(seqID)
if df._reference == other._reference:
result.transfer_reference(df)
return result.replace(np.nan, 0)
| true |
98671700fd0dc0de7f9f5aa93d5edac157de70ab
|
Python
|
julianandrews/adventofcode
|
/2017/d06.py
|
UTF-8
| 1,235 | 3.265625 | 3 |
[] |
no_license
|
from utils import read_data
from utils.iterables import cycle_detect, repeat_apply
def redistribute(memory_banks):
result = list(memory_banks)
max_value = max(memory_banks)
max_ix = memory_banks.index(max_value)
result[max_ix] = 0
value, remainder = divmod(max_value, len(memory_banks))
for i in range(len(memory_banks)):
result[i] += value
for i in range(1, remainder + 1):
result[(max_ix + i) % len(memory_banks)] += 1
return tuple(result)
def steps_to_repeat(memory_banks):
return sum(cycle_detect(repeat_apply(redistribute, memory_banks)))
def cycle_length(memory_banks):
return cycle_detect(repeat_apply(redistribute, memory_banks))[1]
def run_tests():
assert redistribute((0, 2, 7, 0)) == (2, 4, 1, 2)
assert redistribute((2, 4, 1, 2)) == (3, 1, 2, 3)
assert redistribute((3, 1, 2, 3)) == (0, 2, 3, 4)
assert steps_to_repeat((0, 2, 7, 0)) == 5
assert cycle_length((0, 2, 7, 0)) == 4
if __name__ == "__main__":
run_tests()
print("All tests passed")
memory_banks = tuple(int(x) for x in read_data(6).split())
print("Part 1: {}".format(steps_to_repeat(memory_banks)))
print("Part 2: {}".format(cycle_length(memory_banks)))
| true |
db6166931fd13e2a55fe645c8b39ae5b5a97b03a
|
Python
|
jkelly37/Jack-kelly-portfolio
|
/CSCI-University of Minnesota Work/UMN-1133/Labs/py.py
|
UTF-8
| 159 | 3.03125 | 3 |
[] |
no_license
|
# CSci 1133 lecture2
# Jack Kelly
# Tf to tc
import random
list1 = [1]
i=0
while i<100:
list1.append(i+1) = random.rand(1,1000)
i = i + 1
print(list1)
| true |
2ea327e55fe5813f15c1979ad08daa0c37463eab
|
Python
|
NormanGadenya/DateOfBirthCode
|
/DateOfBirth.py
|
UTF-8
| 674 | 3.140625 | 3 |
[] |
no_license
|
# DateOfBirthCode
import calendar
from datetime import datetime
now=datetime.now()
ne=now.date()
yea=list(str(ne))
year=int(yea[0]+yea[1]+yea[2]+yea[3])
age=input('Enter your age: ')
yr=int(year)-int(age)
mt=input('Enter the month: ')
dy=input('Enter the date of the month: ')
cal=calendar.weekday(int(yr),int(mt),int(dy))
day=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']#list of the days
month=['january','february','march','april','may','june','july','august','september','october','november','december'] #list of the months
print('You where born on ',day[cal],dy, month[int(mt)-1], yr)
# Wolimbwa Gadenya Norman Reg 16/u/12408/ps
| true |
478dc4a4920e61abc12b829cbd635b8c981bcaa8
|
Python
|
YodhaJi/MY-PROJECTS
|
/stat4.py
|
UTF-8
| 595 | 4.125 | 4 |
[] |
no_license
|
#digits = [1, 2, 3] # digits: Sample input
def stat4(digits): # stat1(): function for statment 1
import random
#digits = (1, 2, 3) # digits: Sample input in the form of tuple so that it won't change its value.
s1 = list(digits) # s1: a variable used to store the value of digits in the form of list so as to make it mutable.
for i in range(3):
while True:
r = random.randint(0, 9)
if r not in s1 and r not in digits:
s1[i] = r
break
print(s1[0], '', s1[1], '', s1[2], ": Nothing is correct")
| true |
f7d1910afa187b7121e818fbe24fff0721245e4f
|
Python
|
yeesian/NUS-Bidding-History
|
/scripts/process_bidding_summary.py
|
UTF-8
| 2,533 | 2.890625 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# Cleaning NUS Bidding Summary
# ---
# (this is a continuation of the instructions (from step 7 onwards) at the [NUS-Bidding-History](https://github.com/yeesian/NUS-Bidding-History) repository.)
#
#
# libraries used:
# <codecell>
import pandas as pd
#print('pandas',pd.version.version)
# <codecell>
bid_summary = pd.read_csv('bidding_summary.csv',sep='|')
#bid_summary # notice how there are missing values in the first 2 columns"
# <codecell>
#print(bid_summary.head())
# <codecell>
bid_summary = bid_summary.fillna(method='pad') # we fill downwards
#bid_summary
# <codecell>
#print(bid_summary.head()) # now, observe the duplicate header rows (eg. row 0)
# <codecell>
bid_summary = bid_summary[bid_summary['Module'] != 'Module'] # filter out [duplicate] headers
#bid_summary
# <codecell>
#print(bid_summary.head())
# <codecell>
#set(bid_summary['Student_Type'])
# the source of a number of problems
# i) "NUS Students [P, G]" has a comma inside
# ii) 'Returning Students [P] and ' was cropped off
# iii) 'Returning Students and ' was also cropped off
# <codecell>
# remove the comma in the field: 'NUS Students [P, G]'
bid_summary.ix[bid_summary.Student_Type == 'NUS Students [P, G]', 'Student_Type'] = 'NUS Students [PG]'
#set(bid_summary['Student_Type']) # doublecheck, so we can save it as comma-separated values later
# <codecell>
bid_summary.ix[bid_summary.Student_Type == 'Returning Students [P] and ', 'Student_Type'] = 'Returning Students [P] and NUS Students [G]'
bid_summary.ix[bid_summary.Student_Type == 'Returning Students and ', 'Student_Type'] = 'Returning Students and New Students [P]'
#set(bid_summary['Student_Type']) # doublecheck
# <codecell>
#set(bid_summary['Acad_Yr']) # some of the years (0405 and 0506) are inconsistent
# <codecell>
bid_summary.ix[bid_summary.Acad_Yr == 405, 'Acad_Yr'] = 20042005
bid_summary.ix[bid_summary.Acad_Yr == 506, 'Acad_Yr'] = 20052006
#set(bid_summary['Acad_Yr']) # some of the years are broken
# <codecell>
#bid_summary.dtypes # let's check the datatype for the rest of the fields
# <codecell>
for header in ['Quota','No_of_Bidders','Lowest_Bid','Lowest_Succ_Bid','Highest_Bid']:
bid_summary[header] = bid_summary[header].map(int) # convert to Int64
#bid_summary
# <codecell>
#bid_summary.dtypes
# <codecell>
bid_summary.to_csv('nus_bidding_summary.csv',index=False)
# <codecell>
#bid_summary = pd.read_csv('nus_bidding_summary.csv')
#bid_summary # and we're done
| true |
4b893c3555ff812b23628cb4cba15a6633d6a88d
|
Python
|
nathancy/stackoverflow
|
/57850107-preprocess-text-remove-noise/preprocess_text_remove_noise.py
|
UTF-8
| 693 | 2.796875 | 3 |
[
"MIT"
] |
permissive
|
import cv2
import numpy as np
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area < 150:
cv2.drawContours(opening, [c], -1, (0,0,0), -1)
result = 255 - opening
cv2.imshow('thresh', thresh)
cv2.imshow('opening', opening)
cv2.imshow('result', result)
cv2.waitKey()
| true |
142fd5ce4a1a1fc44646e721f0765cd8ee8239e4
|
Python
|
greenorca/ECMan
|
/ui/ecLoginWizard.py
|
UTF-8
| 4,965 | 2.96875 | 3 |
[
"MIT"
] |
permissive
|
"""
Created on Feb 22, 2019
@author: sven
"""
from socket import gaierror
from PySide2.QtWidgets import QLabel, QLineEdit, QWizard, QWizardPage, QApplication, \
QGridLayout
from worker.sharebrowser import ShareBrowser
class EcLoginWizard(QWizard):
"""
wizard for selection of CIFS/SMB based exam shares based on user specified login credentials and serverName names
"""
PAGE_LOGON = 1
def __init__(self, parent=None, username="", servername="", domain=""):
"""
Constructor
"""
super(EcLoginWizard, self).__init__(parent)
self.setWizardStyle(QWizard.ModernStyle)
self.title = "An Netzwerk anmelden"
self.setPage(self.PAGE_LOGON, LoginPage(self, username, servername, domain))
self.setWindowTitle("ECMan - {}".format(self.title))
self.resize(450, 350)
self.server = None
self.defaultShare = None
class LoginPage(QWizardPage):
def __init__(self, parent, username="", servername="", domain=""):
super(LoginPage, self).__init__(parent)
self.setTitle("Server Authentifizierung")
lblUsername = QLabel("Netzwerk - Benutzername")
editUsername = QLineEdit(username)
self.registerField("username", editUsername)
lblUsername.setBuddy(editUsername)
lblDomainName = QLabel("Domäne")
editDomainName = QLineEdit(domain)
self.registerField("domainname", editDomainName)
lblDomainName.setBuddy(editDomainName)
lblPasswort = QLabel("Passwort")
editPasswort = QLineEdit()
editPasswort.setEchoMode(QLineEdit.Password)
self.registerField("password*", editPasswort)
lblPasswort.setBuddy(editPasswort)
lblServerName = QLabel("Servername")
editServerName = QLineEdit(servername)
self.registerField("servername", editServerName)
lblServerName.setBuddy(editServerName)
layout = QGridLayout()
layout.addWidget(lblUsername)
layout.addWidget(editUsername)
layout.addWidget(lblDomainName)
layout.addWidget(editDomainName)
layout.addWidget(lblPasswort)
layout.addWidget(editPasswort)
layout.addWidget(lblServerName)
layout.addWidget(editServerName)
self.setButtonText(QWizard.FinishButton,"Ok")
self.setButtonText(QWizard.BackButton,"Zurück")
self.setButtonText(QWizard.CancelButton,"Abbrechen")
self.setLayout(layout)
def validatePage(self):
"""
only proceed to next wizard page if given credentials and serverName name are valid
"""
print("validating page")
serverName = self.wizard().field("servername")
serverName = serverName.replace("\\", "/") # get rid of those sick backslashes
serverName = serverName.replace("//", "") # remove leading //
parts = serverName.split("/")
serverName = parts[0]
hiddenShareName = parts[1] if len(parts) > 1 else None # fetch hidden share name
server = ShareBrowser(serverName,
self.wizard().field("username"),
self.wizard().field("password"),
self.wizard().field("domainname"))
try:
if server.connect() == True:
self.wizard().server = server
if hiddenShareName is None: # in case of regular smb/cifs shares
shares = server.getShares()
if shares != None and len(shares) > 0:
return True
else:
print("connecting to a hidden share")
server.defaultShare = hiddenShareName
return True
else:
raise Exception("logon error")
except gaierror as ex:
# we probably want to distinguish beteween logon errors and serverName not found errors,
# then disable OK button
self.setSubTitle("Server nicht gefunden: " + str(ex))
except Exception as ex:
self.setSubTitle("Anmeldefehler")
print(ex)
return False
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
wizard = EcLoginWizard(parent=None, username="sven.schirmer@wiss-online.ch",
domain="", servername="NSSGSC01/LBV")
wizard = EcLoginWizard(parent=None, username="sven",
domain="HSH", servername="odroid")
wizard.setModal(True)
result = wizard.exec_()
print("I'm done, wizard result=" + str(result))
'''
smbclient -k //win-serverName/share$/folder
tree connect failed: NT_STATUS_BAD_NETWORK_NAME
smbclient -k //win-serverName/share$
Try "help" to get a list of possible commands.
smb: \>
So he can only connect if I use the path to the hidden share. I can't directly connect to sub-directories inside the hidden parent share.
'''
| true |
0b88a4bd64df0717ec2c4763bc4d2d7003a4008a
|
Python
|
VikonLBR/tkinter_tutorial
|
/t1.py
|
UTF-8
| 490 | 3.3125 | 3 |
[] |
no_license
|
import tkinter as tk
win = tk.Tk()
win.title('my 1s tk window')
win.geometry('400x500')
var = tk.StringVar()
label = tk.Label(win, textvariable=var, bg='orange', font=('Arial', 12), width=12, height=2)
label.pack()
flag = False
def hit_me():
global flag
if not flag:
flag = True
var.set('I\'m here')
else:
flag = False
var.set('')
button = tk.Button(win, text='hit me', width=5, height=5, command=hit_me)
button.pack()
win.mainloop()
| true |
9a2b8ba620bda3ba1e8ad77d0041dae1899945e8
|
Python
|
Carl-Chinatomby/ridecell
|
/api/v1/scooters/models.py
|
UTF-8
| 2,463 | 2.828125 | 3 |
[] |
no_license
|
from django.db import models
from django.utils import timezone
class Scooter(models.Model):
latitude = models.DecimalField(max_digits=9, decimal_places=6) # ideally use a spatial db and geodjango
longitude = models.DecimalField(max_digits=9, decimal_places=6)
is_reserved = models.BooleanField(default=False)
def __repr__(self):
return '<{}, {}>'.format(self.latitude, self.longitude)
def __str__(self):
return '<{}, {}>'.format(self.latitude, self.longitude)
@classmethod
def get_available_scooters_by_radius(cls, latitude, longitude, radius):
min_latitude = min(latitude - radius/2, latitude + radius/2)
max_latitude = max(latitude - radius/2, latitude + radius/2)
min_longitude = min(longitude - radius/2, longitude + radius/2)
max_longitude = max(longitude - radius/2, longitude + radius/2)
return cls.objects.filter(
latitude__lte=max_latitude,
latitude__gte=min_latitude,
longitude__lte=max_longitude,
longitude__gte=min_longitude,
is_reserved=False,
).all()
@classmethod
def get_scooter_by_id(cls, scooter_id):
return cls.objects.filter(pk=scooter_id).first()
def reserve(self):
self.is_reserved = True
self.save()
def end_reservation(self):
self.is_reserved = False
self.save()
class Payments(models.Model):
scooter = models.ForeignKey(Scooter, on_delete=models.CASCADE)
distance_traveled = models.DecimalField(max_digits=9, decimal_places=6)
payment_rate = models.DecimalField(max_digits=9, decimal_places=2)
is_paid = models.BooleanField(default=False)
refund_date = models.DateTimeField(default=None, null=True, blank=True)
@classmethod
def create(cls, scooter, distance_traveled, payment_rate):
payment = cls(
scooter=scooter,
distance_traveled=distance_traveled,
payment_rate=payment_rate,
)
payment.save()
return payment
@classmethod
def get_payment_by_id(cls, payment_id):
return cls.objects.filter(pk=payment_id).first()
def get_payment_amount(self):
return round(float(self.distance_traveled * self.payment_rate), 2)
def pay(self):
self.is_paid = True
self.save()
def refund(self):
self.is_paid = False
self.refund_date = timezone.now()
self.save()
| true |
a9634816e3deb4eaa97b6b97ed3b790619ed82ed
|
Python
|
danelia/CS131
|
/hw6_release/compression.py
|
UTF-8
| 1,106 | 3.609375 | 4 |
[] |
no_license
|
import numpy as np
def compress_image(image, num_values):
"""Compress an image using SVD and keeping the top `num_values` singular values.
Args:
image: numpy array of shape (H, W)
num_values: number of singular values to keep
Returns:
compressed_image: numpy array of shape (H, W) containing the compressed image
compressed_size: size of the compressed image
"""
compressed_image = None
compressed_size = 0
# Steps:
# 1. Get SVD of the image
# 2. Only keep the top `num_values` singular values, and compute `compressed_image`
# 3. Compute the compressed size
u, s, v = np.linalg.svd(image)
u, s, v = u[:, :num_values], np.diag(s[:num_values]), v[:num_values, :]
compressed_image = np.dot(np.dot(u, s), v)
compressed_size = u.size + num_values + v.size
assert compressed_image.shape == image.shape, \
"Compressed image and original image don't have the same shape"
assert compressed_size > 0, "Don't forget to compute compressed_size"
return compressed_image, compressed_size
| true |
a71866d5049b9b675fa61fd7d9bad96c63f8fea6
|
Python
|
limingzhang513/lmzrepository
|
/train_module/src/Data_Processing/DataSet/token/auths.py
|
UTF-8
| 3,044 | 2.515625 | 3 |
[] |
no_license
|
# !/usr/bin/python2
# -*- coding:utf-8 -*-
import jwt
import json
import requests
from flask import current_app, g
from DataSet.utils.serial_code import RET
from DataSet.utils import commons
class Auth():
@staticmethod
def encode_auth_token(user_id, login_time):
"""
生成认证Token
:param user_id: int
:param login_time: int(timestamp)
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=10),
'iat': datetime.datetime.utcnow(),
'iss': 'ken',
'data': {
'id': user_id,
'login_time': login_time
}
}
return jwt.encode(
payload,
current_app.config['SECRET_KEY'],
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
验证Token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, current_app.config['SECRET_KEY'], options={'verify_exp': False})
if 'data' in payload and 'id' in payload['data']:
return payload
else:
raise jwt.InvalidTokenError
except jwt.ExpiredSignatureError:
return 'Token过期'
except jwt.InvalidTokenError:
return '无效Token'
def identify(self, request):
"""
用户权鉴
:return: list
"""
auth_header = request.headers.get('Authorization')
if auth_header:
auth_tokenArr = auth_header.split(" ")
if not auth_tokenArr or auth_tokenArr[0] != 'JWT' or len(auth_tokenArr) != 2:
result = commons.falseReturn(RET.PARAMERR, '', '请传递正确的验证头信息')
else:
auth_token = auth_tokenArr[1]
payload = self.decode_auth_token(auth_token)
if not isinstance(payload, str):
headers = {'Authorization': auth_header}
r = requests.get(url=current_app.config['TOKEN_IDENTIFY_URL'], headers=headers)
try:
user_id = json.loads(r.text)['data']['id']
except Exception:
result = r.text
user_id = None
if user_id is None:
result = commons.falseReturn(RET.DATAERR, '', '找不到该用户信息')
else:
g.user_id = user_id
result = commons.trueReturn(user_id, '请求成功')
else:
result = commons.falseReturn(RET.DATAERR, '', payload)
else:
result = commons.falseReturn(RET.NODATA, '', '没有提供认证token')
return result
auth = Auth()
| true |
8ef6a0d4566acbc7e7749111fa265bf4ef16c1c9
|
Python
|
ngudkov/sdp
|
/factory_method/concrete_workers.py
|
UTF-8
| 1,263 | 2.9375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python3
from __future__ import annotations
from abstract_workers import WorkerCreator, Job
class DocManCreator(WorkerCreator):
"""
Класс инициализации работника
Работник документалист. Хочет производить документы, но только собирает их
"""
def factory_method(self) -> DocMan1:
return DocMan1()
class ManagerCreator(WorkerCreator):
"""
Класс инициализации работника
Управляющий. Хочет управлять, но ничем не управляет.
Думает что выбирает кто какой документ будет делать,
но на самом деле даже это делает не он.
"""
def factory_method(self) -> Manager1:
return Manager1()
class DocMan1(Job):
def create_documentation(self) -> str:
return 'Документалист весь день сидел на совещании и ничего не делал.'
class Manager1(Job):
def create_documentation(self) -> str:
return 'РП весь день сидел на совещании и ничего не делал.'
| true |
805836b396164c8a1ef317285fb1e40cbeb1ffee
|
Python
|
grrrr/nsgt
|
/nsgt/audio.py
|
UTF-8
| 5,014 | 2.515625 | 3 |
[
"Artistic-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
# -*- coding: utf-8
"""
Python implementation of Non-Stationary Gabor Transform (NSGT)
derived from MATLAB code by NUHAG, University of Vienna, Austria
Thomas Grill, 2011-2021
http://grrrr.org/nsgt
Austrian Research Institute for Artificial Intelligence (OFAI)
AudioMiner project, supported by Vienna Science and Technology Fund (WWTF)
"""
import numpy as np
import subprocess as sp
import os.path
import re
import sys
from functools import reduce
try:
from pysndfile import PySndfile, construct_format
except:
PySndfile = None
def sndreader(sf, blksz=2**16, dtype=np.float32):
frames = sf.frames()
if dtype is float:
dtype = np.float64 # scikits.audiolab needs numpy types
if blksz < 0:
blksz = frames
if sf.channels() > 1:
channels = lambda s: s.T
else:
channels = lambda s: s.reshape((1,-1))
for offs in range(0, frames, blksz):
data = sf.read_frames(min(frames-offs, blksz), dtype=dtype)
yield channels(data)
def sndwriter(sf, blkseq, maxframes=None):
written = 0
for b in blkseq:
b = b.T
if maxframes is not None:
b = b[:maxframes-written]
sf.write_frames(b)
written += len(b)
def findfile(fn, path=os.environ['PATH'].split(os.pathsep), matchFunc=os.path.isfile):
for dirname in path:
candidate = os.path.join(dirname, fn)
if matchFunc(candidate):
return candidate
return None
class SndReader:
def __init__(self, fn, sr=None, chns=None, blksz=2**16, dtype=np.float32):
fnd = False
if not fnd and (PySndfile is not None):
try:
sf = PySndfile(fn, mode='r')
except IOError:
pass
else:
if (sr is None or sr == sf.samplerate()) and (chns is None or chns == sf.channels()):
# no resampling required
self.channels = sf.channels()
self.samplerate = sf.samplerate()
self.frames = sf.frames()
self.rdr = sndreader(sf, blksz, dtype=dtype)
fnd = True
if not fnd:
ffmpeg = findfile('ffmpeg') or findfile('avconv')
if ffmpeg is not None:
pipe = sp.Popen([ffmpeg,'-i', fn,'-'],stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)
fmtout = pipe.stderr.read()
if (sys.version_info > (3, 0)):
fmtout = fmtout.decode()
m = re.match(r"^(ffmpeg|avconv) version.*Duration: (\d\d:\d\d:\d\d.\d\d),.*Audio: (.+), (\d+) Hz, (.+), (.+), (\d+) kb/s", " ".join(fmtout.split('\n')))
if m is not None:
self.samplerate = int(m.group(4)) if not sr else int(sr)
chdef = m.group(5)
if chdef.endswith(" channels") and len(chdef.split()) == 2:
self.channels = int(chdef.split()[0])
else:
try:
self.channels = {'mono':1, '1 channels (FL+FR)':1, 'stereo':2, 'hexadecagonal':16}[chdef] if not chns else chns
except:
print(f"Channel definition '{chdef}' unknown")
raise
dur = reduce(lambda x,y: x*60+y, list(map(float, m.group(2).split(':'))))
self.frames = int(dur*self.samplerate) # that's actually an estimation, because of potential resampling with round-off errors
pipe = sp.Popen([ffmpeg,
'-i', fn,
'-f', 'f32le',
'-acodec', 'pcm_f32le',
'-ar', str(self.samplerate),
'-ac', str(self.channels),
'-'],
# bufsize=self.samplerate*self.channels*4*50,
stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)
def rdr():
bufsz = (blksz//self.channels)*self.channels*4
while True:
data = pipe.stdout.read(bufsz)
if len(data) == 0:
break
data = np.fromstring(data, dtype=dtype)
yield data.reshape((-1, self.channels)).T
self.rdr = rdr()
fnd = True
if not fnd:
raise IOError("Format not usable")
def __call__(self):
return self.rdr
class SndWriter:
def __init__(self, fn, samplerate, filefmt='wav', datafmt='pcm16', channels=1):
fmt = construct_format(filefmt, datafmt)
self.sf = PySndfile(fn, mode='w', format=fmt, channels=channels, samplerate=samplerate)
def __call__(self, sigblks, maxframes=None):
sndwriter(self.sf, sigblks, maxframes=None)
| true |
7d3e959f480ddcc0b3125317128a551524734d5f
|
Python
|
yuedy/TensorFlow-cn
|
/source/_static/code/en/basic/graph/variable.py
|
UTF-8
| 510 | 3.3125 | 3 |
[] |
no_license
|
import tensorflow as tf
a = tf.get_variable(name='a', shape=[])
initializer = tf.assign(a, 0) # tf.assign(x, y) will return a operation “assign Tensor y's value to Tensor x”
a_plus_1 = a + 1 # Equal to a + tf.constant(1)
plus_one_op = tf.assign(a, a_plus_1)
sess = tf.Session()
sess.run(initializer)
for i in range(5):
sess.run(plus_one_op) # Do plus one operation to a
a_ = sess.run(a) # Calculate a‘s value and put the result to a_
print(a_)
| true |
e9a3d23194f94daf8b83dfd0e22288d9579562a5
|
Python
|
samyev/clientes_django
|
/projeto/projeto/views.py
|
UTF-8
| 1,260 | 3.390625 | 3 |
[] |
no_license
|
from django.http import HttpResponse
from django.shortcuts import render
def hello(request):
# função que retorna um 'olá mundo! importado de index.html'
return render(request, 'index.html')
def articles(request, year):
# função que retorna o ano que o usuário informar na url
return HttpResponse("O ano informado foi: " + str(year))
def lerDoBanco(nome):
# Função que procura o nome solicitado pelo usuário dentro do "banco" lista_nomes
returned_pessoa = {'Nome': 'N encontrado', 'idade': 0}
lista_nomes = [
{'nome': 'Ana', 'idade': 20},
{'nome': 'Pedro', 'idade': 25},
{'nome': 'Joaquim', 'idade': 27},
]
for pessoa in lista_nomes:
if nome in pessoa.values():
returned_pessoa = pessoa
return returned_pessoa
def fname(nome):
# essa função retorna o nome da pessoa solicitada direto do banco e moatra na tela do site
result = lerDoBanco(nome)
if result['idade'] != 0:
return print('A pessoa foi encontrada, ela tem ' + str(result['idade']) + ' anos')
else:
return print('A pessoa não foi encontrada')
def fname2(request, nome):
idade = lerDoBanco(nome)['idade']
return render(request, 'pessoa.html', {'v_idade': idade})
| true |
318650c57544bff716e847ae2002f79962bbd3af
|
Python
|
ericlavega96/Python-Tutorial
|
/Django - Python Course/PythonBootcamp/app.py
|
UTF-8
| 8,599 | 4.21875 | 4 |
[] |
no_license
|
# First exercise
# name = 'John Smith'
# age = 20
# is_new = True
#
# name = input('What is your name? ')
# print('Hi ' + name)
# favorite_color = input('What is your favorite color? ')
# print(name + ' likes '+ favorite_color)
# Second Exersice
# birth_year = input('Birth year: ')
# age = 2019 - int(birth_year)
# print(age)
# Third exercise
# weight_lbs = input('Weight (lbs): ')
# weight_kg = float(weight_lbs) * 0.45
# print("You are " + str(weight_kg) + ' kg')
# Forth exercise
# course = "Python's Course For Beginners"
# email_message = ''''
# Hi Eric,
#
# Here is the first mail I send you through Python
#
# Thank you,
# Paco
#
# '''
#
# print(email_message)
# #First letter
# print(course[1])
#
# #Last letter
# print(course[-1])
#
# #Range
# print(course[0:3])
#
# first = 'John'
# last = 'Doe'
# message = first + ' [' + last + '] ' + 'is a coder'
# #String format
# msg = f'{first} [{last}] is a coder'
# print(msg)
#
# print(len(course))
#
# #String functions
# print(course.upper())
#
# print(course.replace('Beginners','Absolute Beginners'))
# #Exist
# print('Python' in course)
# Fifth exercise
#
# import math
#
# x = 2.9
# print(round(x))
# print(abs(x))
# print(math.ceil(x))
#
# price = 1000000
# has_good_credit = True
# has_high_income = True
#
# if has_good_credit and has_high_income:
# payment = price * 0.10
# else:
# payment = price * 0.20
# print(f'Payment: {payment}$')
#
# has_criminal_record = False
#
# if has_good_credit and not has_criminal_record:
# print("He's clean!")
#
# name = input("Who's your name? ")
# if len(name) < 3:
# print("Name must be at least 3 characters")
# elif len(name) > 50:
# print("Name can be a maximum of 50 characters")
# else:
# print("Your name looks good!")
# Sixth exercise
# weight = float(input('Weight: '))
# unit = input('Lbs(L) or Kg(K): ')
# if unit.upper() == 'L':
# converted = weight * 0.45
# print(f'Converted weight: {converted} pounds')
# elif unit.upper() == 'K':
# converted = weight / 0.45
# print(f'Converted weight: {converted} pounds')
# else:
# print("Incorrect unit")
# Seventh exercise - Loops
# i = 1
# while i <= 5:
# print('*' * i)
# i += 1
#
# # Game 1 - Guess the secret number
# secret_number = 9
# opportunities = 3
# while opportunities > 0:
# opportunities -= 1
# guess = int(input("Guess: "))
# if guess == secret_number:
# print("You won")
# break
# else:
# print("You failed!")
# Game 1 - Car game
# command = ""
# started = False
# while True:
# command = input("> ").lower()
# if command == "start":
# if started:
# print("Car is alredy started!")
# else:
# started = True
# print("Car started...")
# elif command == "stop":
# if not started:
# print("Car is alredy stopped!")
# else:
# started = False
# print("Car stopped")
# elif command == "help":
# print('''
# start - to start the car
# stop - to stop the car
# quit - to exit
# ''')
# elif command == "quit":
# print("Bye bye!")
# break
# else:
# print("Incorrect command")
# Eighth exercise - For loop
# for char in 'Python':
# print(char)
#
# for item in ['Alex','Lidia','Paula']:
# print(item)
#
# for n in range(5, 10):
# print(n)
#
# prices = [30,40,50,60]
# total = 0
# for price in prices:
# total += price
# print(f'Total: {total}')
#
# for x in range(4):
# for y in range(3):
# print(f'({x},{y})')
# numbers = [5, 2, 5, 2, 2]
# for number in numbers:
# print('x' * number)
#
# for number in numbers:
# output = ''
# for x in range(number):
# output += 'x'
# print(output)
# Nineth exercise
# names = ['Lidia','Eric','Josefa','Epi']
# print(names[1])
# numbers = [4, 5, 1, 3, 6, 1, 2]
# max = numbers[0]
# for number in numbers:
# if number > max:
# max=number
# print(max)
#
# matrix = [
# [1, 2, 3],
# [4, 5, 6],
# [7, 8, 9]
# ]
# matrix[0][0] = 59
# print(matrix[0][0])
#
# for row in matrix:
# for item in row:
# print(item)
# numbers.sort()
# numbers.reverse()
# numbers2 = numbers.copy()
# print(numbers)
# numbers.append(20)
# numbers.insert(0,10)
# print(numbers.index(5))
# numbers.remove(5)
# numbers.pop()
# numbers2.clear()
# print(numbers)
#
# #Game 2 - Find duplicates
# list = []
# for number in numbers:
# if number not in list:
# list.append(number)
# print(list)
# Exercise Tenth - Tuples and unpacking
# numbers = (1, 2, 3)
# print(numbers[0])
#
# coordinates = (1, 2, 3)
# coordinates2 = [1, 2, 3]
#
# x = coordinates[0]
# y = coordinates[1]
# z = coordinates[2]
#
# x1, y1, z1 = coordinates
#
# x2, y2, z2 = coordinates2
#
# print(x1)
# print(x2)
#
# #Eleventh exercise - Dictionaries
#
# customer = {
# "name": "John Doe",
# "age": 20,
# "is_verified": True
# }
#
# customer["name"] = "Jack Doe"
# customer["birthday"] = "11-11-1996"
#
# print(customer["name"])
# print(customer.get("name"))
# print(customer.get("birthday"))
# numbers = {
# 1: "One",
# 2: "Two",
# 3: "Three",
# 4: "Four",
# 5: "Five",
# 6: "Six",
# 7: "Seven",
# 8: "Eight",
# 9: "Nine"
# }
# phone = input("Phone: ")
# output = ""
# for number in phone:
# output += numbers.get(int(number)) + ' '
# print(output)
#
# emojis = {
# ":)": "😊",
# ";)": "😉",
# ":(": "😢",
# ":d": "😁",
# "xd": "😆",
# ":p": "😜"
# }
#
# while True:
# msg = input("> ")
# words = msg.split(' ')
# output = ""
# if msg == "quit":
# print("Bye bye")
# break
# for word in words:
# output += emojis.get(word.lower(),word) + ' '
# print(output)
# Twelve exercise - Functions
# def greet_user(name,last_name):
# print(f"Hi there {name} {last_name}")
# print("Welcome aboard")
# #Positional argument
# greet_user("Eric","Nunez")
#
# #Keyword argument
# greet_user(last_name="Nunez",name="Eric")
#
# #Mixed
# greet_user("Eric",last_name="Nunez")
#
# def square(number):
# return number * number
#
# print(square(2))
#
# def emojis_converter(msg):
# emojis = {
# ":)": "😊",
# ";)": "😉",
# ":(": "😢",
# ":d": "😁",
# "xd": "😆",
# ":p": "😜"
# }
#
# words = msg.split(' ')
# output = ""
# for word in words:
# output += emojis.get(word.lower(),word) + ' '
# return output
#
# msg = input("> ")
# print(emojis_converter(msg))
# Thirdteenth - Exceptions
# try:
# age = int(input("Age: "))
# income = 20000
# risk = income / age
# print(age)
# except ZeroDivisionError:
# print("Age cannot be zero!")
# except ValueError:
# print("Invalid value")
# Fourteenth exercise - Classes
#
# class Point:
# #Constructor
# def __init__(self,x,y):
# self.x = x
# self.y = y
#
# def move(self):
# print("move")
# def draw(self):
# print("draw")
#
#
# point1 = Point(10,20)
# print(point1.x)
#
# class Person:
# def __init__(self,name):
# self.name = name
#
# def talk(self):
# print(f"Hi, I'm {self.name}")
#
#
# person = Person("Bob")
# print(person.name)
# person.talk()
# class Mammal:
# def walk(self):
# print("walk")
#
# class Dog(Mammal):
# def bark(self):
# print("bark")
#
# class Cat(Mammal):
# def be_annoying(self):
# print("annoying")
#
# dog1 = Dog()
# dog1.walk()
# Fifteenth exercise - Modules and packages
# Python 3 module index
# import converters, utils
# from converters import lbs_to_kg
# import ecommerce.shipping
# from ecommerce.shipping import calc_shipping
# from ecommerce import shipping
#
# print(converters.kg_to_lbs(70))
#
# print(utils.find_max([3,4,5,6,9,10,56,12,45,67]))
#
# shipping.calc_shipping()
# import random
#
# # for i in range(3):
# # print(random.randint(10, 20))
#
# members = ["John", "Mary", "Bob", "Charles"]
# leader = random.choice(members)
# print(leader)
#
#
# class Dice:
# def roll(self):
# x = random.randint(1, 6)
# y = random.randint(1, 6)
# return x, y
#
#
# dice = Dice()
# print(dice.roll())
# from pathlib import Path
# #Absolute path
# #c:\Program Files\Microsoft
# # /usr/local/bin
# # Relative path
#
# path = Path()
# # path.mkdir()
# # path.rmdir()
# # print(path.exists())
# for file in path.glob('*.py'):
# print(file)
#Sixteenth exercise - Working with pip
#openpyxl - Lib for working with Excel
import openpyxl as xl
from openpyxl.chart import BarChart, Reference
def process_workbook(filename):
wb = xl.load_workbook(filename)
sh1 = wb['Sheet1']
for row in range(2, sh1.max_row + 1):
cell = sh1.cell(row, 3)
corrected_price = cell.value * 0.9
corrected_price_cell = sh1.cell(row, 4)
corrected_price_cell.value = corrected_price
values = Reference(sh1, min_row=2,
max_row=sh1.max_row,
min_col=4,
max_col=4)
chart = BarChart()
chart.add_data(values)
sh1.add_chart(chart, 'e2')
wb.save(filename)
process_workbook('transactions.xlsx')
| true |
1b4920be99ae83218f513aec1d53715715ae3524
|
Python
|
Masluss2903/covid19_report
|
/covid/get_summary_database.py
|
UTF-8
| 2,025 | 2.9375 | 3 |
[] |
no_license
|
import json
import boto3
from urllib.parse import parse_qs
def get_global_summary(covid_summary):
global_data = covid_summary['Item']['Global_information']
answer = 'Right now there are {:,} new confirmed cases, {:,} total confirmed, {:,} new deaths, {:,} total deaths and {:,} total recovered.'.format(
global_data['NewConfirmed'],
global_data['TotalConfirmed'],
global_data['NewDeaths'],
global_data['TotalDeaths' ],
global_data['TotalRecovered'])
return answer
def get_data_by_country(covid_summary, search):
countries = covid_summary['Item']['Countries']
try:
country = next(c for c in countries if c['Country'].lower() == search)
except:
return 'Try again, we do not have what you are looking for'
answer = 'In {} there are {:,} new confirmed cases, {:,} total confirmed, {:,} new deaths, {:,} total deaths and {:,} total recovered.'.format(
country['Country'],
country['NewConfirmed'],
country['TotalConfirmed'],
country['NewDeaths'],
country['TotalDeaths' ],
country['TotalRecovered'])
return answer
def get_connection_database():
dynamodb = boto3.resource("dynamodb")
tables = dynamodb.Table('summary')
covid_summary = tables.get_item(Key = {'Date' : 'latest'})
return covid_summary
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': str(err) if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
},
}
def lambda_handler(event, context):
covid_summary = get_connection_database()
message_from_slack = parse_qs(event["body"])
search = str(message_from_slack['text'][0])
search = search.lower()
if search == 'global':
info = get_global_summary(covid_summary)
else:
info = get_data_by_country(covid_summary, search)
return respond(None, {
'response_type': 'in_channel',
'text': info
})
| true |
2587609f83a4156eb57b22d450c2d4b62b7a691b
|
Python
|
VieetBubbles/holbertonschool-higher_level_programming
|
/0x05-python-exceptions/4-list_division.py
|
UTF-8
| 695 | 3.578125 | 4 |
[] |
no_license
|
#!/usr/bin/python3
def list_division(my_list_1, my_list_2, list_length):
new = []
for _ in range(list_length):
try:
result = my_list_1[_] / my_list_2[_]
new.append(result)
except ValueError:
result = 0
new.append(result)
except ZeroDivisionError:
result = 0
new.append(result)
print("division by 0")
except TypeError:
result = 0
new.append(result)
print("wrong type")
except IndexError:
result = 0
new.append(result)
print("out of range")
finally:
pass
return new
| true |
8403890a896f39cbecd7af056e2e39ae43dd9843
|
Python
|
Grinch101/dentist_website
|
/t1.py
|
UTF-8
| 2,857 | 2.75 | 3 |
[] |
no_license
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
def updater(fig=None, title='TITLE', xaxistitle='x title', yaxistitle='y title', font='Arial', fontsize=12):
Config = {
'displayModeBar': True,
'displaylogo': False,
"fillFrame": False,
'scrollZoom': True,
'modeBarButtonsToAdd': ['drawline',
'drawopenpath',
'drawclosedpath',
'drawcircle',
'drawrect',
'eraseshape'
]
}
if fig is not None:
fig.layout = {'font': {'color': 'black', 'family': font, fontsize: 12},
'legend': {'title': {'text': 'Legend Title'}},
'margin': {'b': 10, 'l': 10, 'r': 10, 't': 51},
'template': 'ggplot2',
'legend': {'title': {'text': 'Legend Title'}},
'title': {'font': {'size': 18}, 'text': title},
'xaxis': {'rangeslider': {'visible': False}, 'title': {'text': xaxistitle}, 'type': 'linear'},
'yaxis': {'rangeslider': {'visible': False}, 'title': {'text': yaxistitle}, 'type': 'linear'},
'transition_duration': 500}
return fig, Config
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv')
app = dash.Dash(__name__)
a, config = updater()
app.layout = html.Div([
dcc.Graph(id='graph-with-slider', config=config),
dcc.Slider(
id='year-slider',
min=df['year'].min(),
max=df['year'].max(),
value=df['year'].min(),
marks={str(year): str(year) for year in df['year'].unique()},
step=None
)
])
@app.callback(
Output('graph-with-slider', 'figure'),
Input('year-slider', 'value'))
def update_figure(selected_year):
filtered_df = df[df.year == selected_year]
fig = px.scatter(filtered_df, x="gdpPercap", y="lifeExp",
size="pop", color="continent", hover_name="country",
log_x=True, size_max=55)
fig.update_layout(transition_duration=500)
# fig, config = updater(fig, title='test')
fig.layout = {
'legend': {'itemsizing': 'constant', 'title': {'text': 'continent'}, 'tracegroupgap': 0},
'margin': {'t': 30,'l':2},
'template': 'ggplot2',
'transition': {'duration': 500},
'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'gdpPercap'}, 'type': 'log'},
'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'lifeExp'}}
}
return fig
if __name__ == '__main__':
app.run_server(debug=False)
| true |
e5abd95d0840587743bb0f779afd3f2b89748f61
|
Python
|
sipocz/ImageManipulation
|
/04_hazi.py
|
UTF-8
| 2,307 | 2.859375 | 3 |
[] |
no_license
|
from sklearn.datasets import load_wine
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
wine = load_wine()
#print(data.DESCR)
cols=["Alcohol","Malic acid","Ash","Alcalionity","Magnesium","Total Phenol","Flavanoids",
"Nonflavor","Proanthocyanins","Color intensity","Hue","OD280_OD315","Proline"]
df=pd.DataFrame(wine["data"][:,0:13],columns=cols)
'''
- Alcalinity of ash
- Magnesium
- Total phenols
- Flavanoids
- Nonflavanoid phenols
- Proanthocyanins
- Color intensity
- Hue
- OD280/OD315 of diluted wines
- Proline])
'''
print(df)
maxi=df.max()
mini=df.min()
print(maxi,mini)
df2=(df-mini)
delta=maxi-mini
df_feature=df2/delta
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.cluster import Birch
from sklearn.cluster import MeanShift
import sklearn.cluster as cluster
from sklearn.decomposition import PCA
n_cluster_num=3
clusterer = KMeans(n_clusters=n_cluster_num, random_state=10)
cluster_labels_Kmeans = clusterer.fit_predict(df_feature)
clusterer=DBSCAN(eps=0.45)
cluster_label_DBScan=clusterer.fit_predict(df_feature)
clusterer=Birch(n_clusters=n_cluster_num)
cluster_label_Birch=clusterer.fit_predict(df_feature)
bandwidth = cluster.estimate_bandwidth(df_feature, quantile=0.15)
clusterer=MeanShift(bin_seeding=True,bandwidth=bandwidth)
cluster_label_MeanShift=clusterer.fit_predict(df_feature)
a_pca=PCA(n_components=3)
data_pca=a_pca.fit_transform(df_feature)
Y=wine.target
# Kezdjünk új ábrát (plt.figure)!
plt.figure(figsize=(20,5))
# Rajzoljunk a plt.scatter segítségével!
# Segítség: X_pca[:, 0], X_pca[:, 1], c=Y
plt.subplot(151)
plt.xlabel("Kmeans")
plt.scatter(data_pca[:,0],data_pca[:,1],c=cluster_labels_Kmeans)
# Állítsuk be a tengelyek címkéit és a címet!
plt.subplot(152)
plt.xlabel("DBScan")
plt.scatter(data_pca[:,0],data_pca[:,1],c=cluster_label_DBScan)
plt.subplot(153)
plt.xlabel("Birch")
plt.scatter(data_pca[:,0],data_pca[:,1],c=cluster_label_Birch)
plt.subplot(154)
plt.xlabel("MeanShift")
plt.scatter(data_pca[:,0],data_pca[:,1],c=cluster_label_MeanShift)
plt.subplot(155)
plt.xlabel("PCA Y")
plt.scatter(data_pca[:,0],data_pca[:,1],c=Y)
...
# Jelenítsük meg a plt.show metódus segítségével!
plt.show()
| true |
a6476108d4cf99bec2b51ce2cd145e1f6faaf045
|
Python
|
martinber/agglomerate
|
/agglomerate/settings.py
|
UTF-8
| 7,926 | 3.375 | 3 |
[
"MIT"
] |
permissive
|
import agglomerate.math
import agglomerate.util
class Settings:
"""
Keeps track of a group settings, this instance is for algorithms, can
represent an entire sheet or a group.
**Settings**
algorithm
name of the algorithm to use
allow
dictionary containing allowed settings
require
dictionary containing required settings
size
Vector2 that contains size of the generated sprite sheet image,
values can be "auto"
**Allowed dictionary**
- rotation: True if the user allows the rotation of sprites
- cropping: True if the user allows cropping of sprites
**Required dictionary**
- square_size: True if a squared sheet is required
- power_of_two_size: True if power-of-two dimensions are required
- padding: Padding to apply to sprites, can be False or an integer
"""
def __init__(self, algorithm=None):
"""
Creates a settings object.
Sets remaining options to default values, must specify an algorithm,
because there is no default algorithm.
**Default values**
- algorithm: None
- allow
- "rotation": False
- "cropping": False
- require
- "square_size": False
- "power_of_two_size": False
- "padding": False
- size: both x and y set to auto
"""
self.algorithm = algorithm
self.allow = {
"rotation": False,
"cropping": False
}
self.require = {
"square_size": False,
"power_of_two_size": False,
"padding": False
}
self.size = agglomerate.math.Vector2("auto", "auto")
@classmethod
def from_dict(cls, dictionary):
"""
Returns a Settings instance with values set from a dictionary.
All values must be in the dictionary, size value must be also a
dictionary, background_color must be a hex value string
"""
# create a settings instance
s = Settings()
# fill it with the dictionary values
s.algorithm = dictionary["algorithm"]
s.allow = dictionary["allow"]
s.require = dictionary["require"]
# size is a dictionary, we need to create a Vector2 instance
# Vector2 can be initialized from a dict
s.size = agglomerate.math.Vector2.from_dict(dictionary["size"])
return s
def to_dict(self):
"""
Returns a dictionary of the fields in the settings instance. Also
converts size to a dictionary and beckground_color to a hex code
"""
return {
"algorithm": self.algorithm,
"allow": self.allow,
"require": self.require,
# sheet size is an object, we need to store it also as a dict
"size": self.size.to_dict(),
}
class SheetSettings(Settings):
"""
Keeps track of all the sheet settings, this is valid only for the entire
sheet, groups use Settings instead. This object is used by the packer and
the formats. A Parameters instance contains a SheetSettings instance.
Inherits the settings set in the Settings class and adds new ones:
**Added settings**
format
name of the coordinates file format
output_sheet_path
where to save the generated sprite sheet, if no extension is given,
output_sheet_format is necessary, keep in mind that the saved file
will lack extension
output_coordinates_path
where to save the generated coordinates file, if no extension is given
no extension is added automatically, you can add one looking at the
format suggested extension
output_sheet_format
image format used for saving. if None the format will be determined by
the output_coordinates_path extension, this value is given to Pillow's
Image.save() method, see Pillow documentation for more info
output_sheet_color_mode
color mode used for saving, this argument is given to Pillow's
Image.new() method, see Pillow documentation for more info
background_color
color to use as the background of the sheet
**Tested output sheet image formats**
- None: determined from the output_sheet_path extension
- "png"
- "jpeg" ("jpg" doesn't work)
- "tiff"
**Tested output sheet color modes**
- "RGBA"
- "RGB"
- "CYMK" but messes colors, I don't know how it works
- "1"
- "L"
- "P" doesn't work, needs more arguments
"""
def __init__(self, algorithm=None, format=None,
output_sheet_path=None, output_coordinates_path=None):
"""
Creates a settings object.
Sets remaining options to default values, must specify an algorithm,
format, output_sheet_path and output_coordinates_path because these
have no default values.
output_sheet_path must have extension.
If output_coordinates_path doesn't have extension, the packer will use
a default one based on the format chosen
**Default values**
- format: None
- output_sheet_path: None
- output_coordinates_path: None
- output_sheet_format: None
- output_sheet_color_mode: "RGBA"
- background_color: transparent (#00000000)
"""
super().__init__(algorithm)
self.format = format
self.output_sheet_path = None
self.output_coordinates_path = None
self.output_sheet_format = None
self.output_sheet_color_mode = "RGBA"
self.background_color = \
agglomerate.util.Color.from_hex("#00000000")
@classmethod
def from_dict(cls, dictionary):
"""
Returns a Settings instance with values set from a dictionary.
All values must be in the dictionary, size value must be also a
dictionary, background_color must be a hex value string
"""
# create a settings instance
s = SheetSettings()
# fill it with the dictionary values
s.algorithm = dictionary["algorithm"]
s.format = dictionary["format"]
s.output_sheet_path = dictionary["output_sheet_path"]
s.output_coordinates_path = dictionary["output_coordinates_path"]
s.output_sheet_format = dictionary["output_sheet_format"]
s.output_sheet_color_mode = dictionary["output_sheet_color_mode"]
s.allow = dictionary["allow"]
s.require = dictionary["require"]
# size is a dictionary, we need to create a Vector2 instance
# Vector2 can be initialized from a dict
s.size = agglomerate.math.Vector2.from_dict(dictionary["size"])
# background_color is a hex code string, we need a Color instance
s.background_color = agglomerate.util.Color.from_hex(
dictionary["background_color"])
return s
def to_dict(self):
"""
Returns a dictionary of the fields in the settings instance. Also
converts size to a dictionary and beckground_color to a hex code
"""
return {
"algorithm": self.algorithm,
"format": self.format,
"output_sheet_path": self.output_sheet_path,
"output_coordinates_path": self.output_coordinates_path,
"output_sheet_format": self.output_sheet_format,
"output_sheet_color_mode": self.output_sheet_color_mode,
"allow": self.allow,
"require": self.require,
# sheet size is an object, we need to store it also as a dict
"size": self.size.to_dict(),
# background_color is a object, we need to store it as a hex string
"background_color": self.background_color.to_hex()
}
| true |
e792589e4c043a1f2fb6d41dd1a6b418afa569a7
|
Python
|
kho903/data-structure-and-algorithm-in-Python
|
/정렬, 탐색/이진탐색(Binary Search).py
|
UTF-8
| 594 | 3.75 | 4 |
[] |
no_license
|
# 탐색하려는 리스트가 이미 정렬되어 있는 경우에만 적용 가능
# 크기 순으로 정렬되어 있다는 성질 이용
# 한 번 비교가 일어날 때마다 리스트를 반씩 줄임
# O(log n)
def solution(L, x):
answer = -1
lower = 0
upper = len(L) - 1
while lower <= upper:
middle = (lower + upper) // 2
if L[middle] == x:
return middle
elif L[middle] < x:
lower = middle + 1
else:
upper = middle - 1
return answer
L = [1, 2, 3, 4, 5, 6, 7, 9]
a = solution(L, 9)
print(a)
| true |
518731afed6a833dbe3802bcb27958973b79fcca
|
Python
|
shayhan-ameen/Beecrowd-URI
|
/Beginner/URI 1064.py
|
UTF-8
| 250 | 3.953125 | 4 |
[] |
no_license
|
numbers = []
for _ in range(6):
numbers.append(float(input()))
count = 0
sum = 0.0
for number in numbers:
if number >= 0:
sum += number
count += 1
print(f'{count} valores positivos')
print(f'{sum/count:.1f}')
| true |
a6b8d5387b61accfdb79bcd0230f4159c135eadb
|
Python
|
IUCVLab/ctscan
|
/src/data/Dataset.py
|
UTF-8
| 2,519 | 2.59375 | 3 |
[] |
no_license
|
import h5py as h5
from pathlib import Path
from pydicom import dcmread
import numpy as np
class TagError(KeyError):
pass
class Dataset:
def __init__ (self, file='dataset.hdf5'):
self.__file = h5.File(file)
if "counter" not in self.__file.attrs:
self.__file.attrs['counter'] = 0
self.__update_studylist()
def __del__(self):
self.__file.close()
def __getitem__(self, key):
if type(key) == slice or type(key) == int:
return self.__studylist.__getitem__(key)
elif type(key) == str:
return self.__getattr__(self, key)
def __getattr__(self, tag):
return self.__studylist.__getattr__(tag)
def __update_studylist(self):
self.__studylist = StudyList([item for item in self.__file.values() if type(item) if h5.Dataset])
def add_dicom_study(self, directory, file_format="*", tags=None):
directory = Path(directory)
slices = [dcmread(str(f)) for f in directory.glob(file_format) if f.is_file()]
slices.sort(key=lambda x: x.SliceLocation)
image = np.array([slice_.pixel_array + slice_.RescaleIntercept for slice_ in slices], dtype=np.int16)
scale = float(slices[0].PixelSpacing[0]), float(slices[0].PixelSpacing[1]), float(slices[0].SliceThickness)
self.__file.attrs['counter'] += 1
self.__file.create_dataset(name=f"Study{self.__file.attrs['counter']}", data=image)
self.__update_studylist()
class StudyList(list):
def __init__(self, *args, tags=[], **kwargs):
super().__init__(*args, **kwargs)
self.__tags = tags
def __str__(self):
return f"<StudyList> tags:{self.__tags} length:{len(self)}"
def __getattr__(self, tag):
if tag in self.__tags:
raise TagError("Tag {tag} already present")
return StudyList([dataset for dataset in self if tag in dataset.attrs['tags']], self.__tags + [tag])
class Study:
def __init__(self, dataset):
self.__dataset = dataset
self.__space = dataset.attrs["space"]
self.reset()
def __getitem__(self, key):
if type(key) == slice or type(key) == int:
return self.__dataset[key]
else:
super().__getitem__(key)
@property
def space(self):
return self.__space
def reset(self):
self.tags = list(self.__dataset.attrs['tags'])
def save(self):
self.__dataset.attrs['tags'] = self.tags
| true |
d4b4d2ca19d29873e27699a8d43595f0bdaecc5f
|
Python
|
SamuelMiddendorp/SamieTools
|
/lib/helpers.py
|
UTF-8
| 348 | 2.765625 | 3 |
[] |
no_license
|
import json
def load_assets() -> dict:
"""Returns key-value pairs from the json configuration file"""
try:
with open("cfg/assets.json", "r") as f:
return f.json()
except Exception as e:
print(f"An error has been encountered while loading a file of type {type(e)}")
exit()
def test():
print("foo")
| true |
0c1b4515556d1bced032cc31c7a8d1fb67d27b35
|
Python
|
TangoJP/BasicFeatureAnalysis
|
/feature_comparison.py
|
UTF-8
| 8,989 | 2.90625 | 3 |
[] |
no_license
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import cm
from .feature import (ColumnData, Feature, CategoricalFeature,
OrdinalFeature, ClassTarget)
from .feature_collection import (FeatureCollection,
CategoricalFeatureCollection,
OrdinalFeatureCollection)
# Classes for comparing features with classes and among each other
class FeatureComparison:
'''
#Once this is implemented, Binary- and CategoricalComparison class
#will inherit from this parent class to reduce code redundancy.
'''
def __init__(self, feature1, feature2, target):
self.features = pd.concat([feature1, feature2], axis=1)
self.target = target
self._table = sm.stats.Table.from_data(self.features)
self.contingency_table_ = self._table.table_orig
self.chi_result_ = self._table.test_nominal_association()
self.chi_pvalue_ = self.chi_result_.pvalue
self._f1 = self.features.iloc[:, 0]
self._f2 = self.features.iloc[:, 1]
class BinaryComparison:
def __init__(self, feature1, feature2, target):
self.features = pd.concat([feature1, feature2], axis=1)
self.target = target
self._table = sm.stats.Table.from_data(self.features)
self.contingency_table_ = self._table.table_orig
self.chi_result_ = self._table.test_nominal_association()
self.chi_pvalue_ = self.chi_result_.pvalue
self._f1 = self.features.iloc[:, 0]
self._f2 = self.features.iloc[:, 1]
def test_independence(self, significance_level=0.01):
if self.chi_pvalue_ < significance_level:
text = 'Feature association is significant (p-value=%.3f)' \
% self.chi_pvalue_
else:
text = 'Feature association is NOT significant (p-value=%.3f)' \
% self.chi_pvalue_
print(text)
return
def calculate_individual_probas(self):
fs = [self._f1, self._f2]
fs_labels = ['feature1_proba', 'feature2_proba']
individual_probas = {}
for i, f in enumerate(fs):
num_val0 = len(f[f == 0])
num_val1 = len(f[f == 1])
num_class1_given_val0 = len(f[(f == 0) & (self.target == 1)])
num_class1_given_val1 = len(f[(f == 1) & (self.target == 1)])
try:
proba_class1_given_val0 = num_class1_given_val0 / num_val0
except ZeroDivisionError:
proba_class1_given_val0 = 0
try:
proba_class1_given_val1 = num_class1_given_val1 / num_val1
except ZeroDivisionError:
proba_class1_given_val1 = 0
individual_probas[fs_labels[i]] = \
(proba_class1_given_val0, proba_class1_given_val1)
return individual_probas
def calculate_join_probas(self):
data = pd.concat([self.features, self.target], axis=1)
f10_f20 = len(data[(data.iloc[:, 0] == 0) & (data.iloc[:, 1] == 0)])
f11_f20 = len(data[(data.iloc[:, 0] == 1) & (data.iloc[:, 1] == 0)])
f10_f21 = len(data[(data.iloc[:, 0] == 0) & (data.iloc[:, 1] == 1)])
f11_f21 = len(data[(data.iloc[:, 0] == 1) & (data.iloc[:, 1] == 1)])
class1_given_f10_f20 = len(data[(data.iloc[:, 0] == 0) & \
(data.iloc[:, 1] == 0) & (data.iloc[:, 2] == 1)])
class1_given_f11_f20 = len(data[(data.iloc[:, 0] == 1) & \
(data.iloc[:, 1] == 0) & (data.iloc[:, 2] == 1)])
class1_given_f01_f21 = len(data[(data.iloc[:, 0] == 0) & \
(data.iloc[:, 1] == 1) & (data.iloc[:, 2] == 1)])
class1_given_f11_f21 = len(data[(data.iloc[:, 0] == 1) & \
(data.iloc[:, 1] == 1) & (data.iloc[:, 2] == 1)])
try:
proba_00 = class1_given_f10_f20 / f10_f20
except ZeroDivisionError:
proba_00 = 0
try:
proba_10 = class1_given_f11_f20 / f11_f20
except ZeroDivisionError:
proba_10 = 0
try:
proba_01 = class1_given_f01_f21 / f10_f21
except ZeroDivisionError:
proba_01 = 0
try:
proba_11 = class1_given_f11_f21 / f11_f21
except ZeroDivisionError:
proba_11 = 0
join_proba_table = pd.DataFrame(
{self._f1.name: [0, 1, 0, 1],
self._f2.name: [0, 0, 1, 1],
'join_proba': [proba_00, proba_10, proba_01, proba_11]}
)
return join_proba_table
def assess_joint_result(self, mode='ratio'):
individual_probas = self.calculate_individual_probas()
ind_probas = [i
for k, v in individual_probas.items()
for i in v
]
joint_probas = self.calculate_join_probas()
best_ind_probas = np.max(ind_probas)
best_joint_probas = joint_probas['join_proba'].max()
if mode == 'ratio':
gain = best_joint_probas / best_ind_probas
elif mode == 'subtraction':
gain = best_joint_probas - best_ind_probas
else:
print('Error: mode has to be ratio or subtraction')
return gain
class CategoricalComparison:
def __init__(self, feature1, feature2, target):
self.features = pd.concat([feature1, feature2], axis=1)
self.target = target
self._table = sm.stats.Table.from_data(self.features)
self.contingency_table_ = self._table.table_orig
self.chi_result_ = self._table.test_nominal_association()
self.chi_pvalue_ = self.chi_result_.pvalue
self._f1 = self.features.iloc[:, 0]
self._f2 = self.features.iloc[:, 1]
self.category_values_ = {self._f1.name: self._f1.unique(),
self._f2.name: self._f2.unique()}
self.num_category_values_ = {self._f1.name: len(self._f1.unique()),
self._f2.name: len(self._f2.unique())}
def test_independence(self, significance_level=0.01):
if self.chi_pvalue_ < significance_level:
text = 'Feature association is significant (p-value=%.3f)' \
% self.chi_pvalue_
else:
text = 'Feature association is NOT significant (p-value=%.3f)' \
% self.chi_pvalue_
print(text)
return
def calculate_individual_probas(self):
ind_probas_f1 = pd.DataFrame()
ind_probas_f1['total_count'] = self._f1.value_counts()
ind_probas_f1['class1_count'] = \
self._f1[self.target == 1].value_counts()
ind_probas_f1['proba_class1_given_val'] = \
ind_probas_f1['class1_count'] / ind_probas_f1['total_count']
ind_probas_f2 = pd.DataFrame()
ind_probas_f2['total_count'] = self._f2.value_counts()
ind_probas_f2['class1_count'] = \
self._f2[self.target == 1].value_counts()
ind_probas_f2['proba_class1_given_val'] = \
ind_probas_f2['class1_count'] / ind_probas_f2['total_count']
probas = pd.DataFrame()
probas[(self._f1.name + '_probas')] = \
ind_probas_f1['proba_class1_given_val']
probas[(self._f2.name + '_probas')] = \
ind_probas_f2['proba_class1_given_val']
return {self._f1.name: ind_probas_f1,
self._f2.name: ind_probas_f2,
'probas': probas}
def calculate_join_probas(self):
total_contingency = pd.crosstab(self._f1, self._f2)
class1_contingency = \
pd.crosstab(self._f1[self.target == 1], self._f2[self.target == 1])
joint_probas = class1_contingency / total_contingency
return joint_probas
def assess_joint_result(self, mode='ratio', printout=False):
ind_probas = self.calculate_individual_probas()['probas']
joint_probas = self.calculate_join_probas()
best_ind_probas = ind_probas.replace({np.NaN: 0}).values.max()
best_joint_probas = joint_probas.replace({np.NaN: 0}).values.max()
if mode == 'ratio':
gain = best_joint_probas / best_ind_probas
elif mode == 'subtraction':
gain = best_joint_probas - best_ind_probas
else:
print('Error: mode has to be ratio or subtraction')
if printout:
print('Max Individual Probability=%f' % best_ind_probas)
print('Max Joint Probability=%f' % best_joint_probas)
return gain
| true |
c3c0ab902bc199eb716917ca5a85c62b1d13f6d3
|
Python
|
Team5892Steamworks/FRC2017
|
/pi_pixy_vision/get_blocks.py
|
UTF-8
| 2,818 | 3.234375 | 3 |
[] |
no_license
|
"""
Uses NetworkTables and the Pixy camera to send the raw block data to the robot.
More specifically, it sends the x and y positions of the two biggest blocks, which should be the boiler tape.
Presumably later I will make a program that gives the robot more directly useful information.
However, right now I just want the Pixy, Pi, and RoboRIO communicating.
Also, I feel like we need a standard for when programs use NetworkTables so that we can easily tell where to get and put stuff.
So here one is.
== NetworkTables info ==
This program puts data at:
/PixyVision/get_blocks/xpos1 (Number)
/PixyVision/get_blocks/ypos1 (Number)
/PixyVision/get_blocks/xpos2 (Number)
/PixyVision/get_blocks/ypos2 (Number)
This program gets data from:
"""
from networktables import NetworkTables
from pixy import *
from ctypes import *
# Pixy Python SWIG get blocks example #
print ("Pixy Python SWIG Example -- Get Blocks (NetworkTables edition)")
# Initialize Pixy Interpreter thread #
pixy_init()
# Initialize NetworkTables #
NetworkTables.initialize(server="10.58.92.2")
table = NetworkTables.getTable("PixyVision")
ntable = table.getSubTable("get_blocks")
class Blocks (Structure):
_fields_ = [ ("type", c_uint),
("signature", c_uint),
("x", c_uint),
("y", c_uint),
("width", c_uint),
("height", c_uint),
("angle", c_uint) ]
blocks = BlockArray(100)
frame = 0
# Wait for blocks #
while 1:
count = pixy_get_blocks(100, blocks)
if count > 0:
# Blocks found #
print 'frame %3d:' % (frame)
frame = frame + 1
max_blocks = [None, None]
for index in range (0, count):
print '[BLOCK_TYPE=%d SIG=%d X=%3d Y=%3d WIDTH=%3d HEIGHT=%3d]' % (blocks[index].type, blocks[index].signature, blocks[index].x, blocks[index].y, blocks[index].width, blocks[index].height)
if blocks[index].signature == 1:
if max_blocks[0] is None or blocks[index].width * blocks[index].height > max_blocks[0].width * max_blocks[0].height:
max_blocks[1] = max_blocks[0]
max_blocks[0] = blocks[index]
elif max_blocks[1] is None or blocks[index].width * blocks[index].height > max_blocks[1].width * max_blocks[1].height:
max_blocks[1] = blocks[index]
if max_blocks[0] is not None:
ntable.putNumber("xpos1", max_blocks[0].x)
ntable.putNumber("ypos1", max_blocks[0].y)
if max_blocks[1] is not None:
ntable.putNumber("xpos2", max_blocks[1].x)
ntable.putNumber("ypos2", max_blocks[1].y)
else:
ntable.putNumber("xpos2", -1) # -1 denotes that there is not a block. Which is kind of obvious but w/e.
ntable.putNumber("ypos2", -1)
else:
ntable.putNumber("xpos1", -1)
ntable.putNumber("ypos1", -1)
| true |
4f20348a0672c5d8a68dfe59703451446167c202
|
Python
|
YorikSar/gh-mirror
|
/gh-mirror.py
|
UTF-8
| 4,817 | 2.640625 | 3 |
[] |
no_license
|
#!/usr/bin/env python
"""Mirrors number of GitHub repositories."""
import argparse
import logging
import os.path
import re
import shutil
import signal
import subprocess
import sys
import urllib2
import HTMLParser
class GHRepoListParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.level = 0
self.repos = []
def handle_starttag(self, tag, attrs):
if self.level == 0:
if tag == 'li':
try:
classes = filter(lambda p: p[0] == 'class', attrs)[0][1]
except IndexError:
pass
else:
if classes == 'public source':
self.level = 1
elif self.level == 1:
if tag == 'h3':
self.level = 2
elif self.level == 2:
if tag == 'a':
self.level = 3
def handle_data(self, data):
if self.level == 3:
self.repos.append(data)
self.level = 0
def get_user_repos(user):
url = 'http://github.com/%s/' % (user,)
logging.debug('Fetching URL %s', url)
response = urllib2.urlopen(url)
logging.debug('Got response code %d', response.code)
if response.code != 200:
raise Exception('Got failure from GitHub server')
data = response.read()
logging.debug('Got %d bytes', len(data))
try:
content_type = response.headers['content-type']
m = re.search('charset=([^; ]+)', content_type)
encoding = m.group(1)
except KeyError:
encoding = 'ascii'
parser = GHRepoListParser()
parser.feed(data.decode(encoding))
result = parser.repos
logging.debug('Found repos: %s', result)
return result
def ensure_exists(args, username):
user_dir = os.path.join(args.target_dir, username)
if not os.path.exists(user_dir):
logging.info('Creating missing dir %s', user_dir)
os.mkdir(user_dir)
return False, user_dir
return True, user_dir
class GitError(Exception):
pass
def git(*args):
cmd = ('git',) + args
logging.debug("Executing command '%s'", ' '.join(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
if p.returncode == -signal.SIGINT:
raise KeyboardInterrupt
elif p.returncode < 0:
raise GitError("Process '%s' was terminated by signal %d" % (
' '.join(cmd), -p.returncode))
elif p.returncode > 0:
raise GitError(
"Process '%s' returned code %d.\nstdout:\n%s\nstderr:%s\n" % (
' '.join(cmd), p.returncode, out, err))
def sync_repo(user_dir, username, repo):
repo_path = os.path.join(user_dir, repo)
repo_url = 'git://github.com/%s/%s' % (username, repo)
logging.info('Syncing %s with %s.', repo_path, repo_url)
try:
if os.path.exists(repo_path):
git('--git-dir', repo_path, 'fetch')
else:
git('clone', '--mirror', repo_url, repo_path)
except GitError as ex:
log.error('Sync failed: %s', ex)
return False
else:
return True
def main():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument('repos', metavar='SPEC', nargs='+', type=unicode,
help="repository or user spec (e.g. username or username/repo)")
argparser.add_argument('--target-dir', '-D',
help="directory to store repositories")
argparser.add_argument('--verbose', '-v', dest='verbose',
action='store_const', const=1, default=0)
argparser.add_argument('--debug', '-d', dest='verbose',
action='store_const', const=2)
args = argparser.parse_args()
logging.basicConfig(
level=(logging.WARNING, logging.INFO, logging.DEBUG)[args.verbose])
repos = []
good = True
for spec in args.repos:
spl = spec.split('/')
if len(spl) == 1:
username = spl[0]
repos = get_user_repos(username)
existed, user_dir = ensure_exists(args, username)
for item in os.listdir(user_dir):
path = os.path.join(user_dir, item)
if os.path.isdir(path) and item not in repos:
logging.info('Deleting repo missing at GitHub %s', path)
shutil.rmtree(path)
for repo in repos:
good = good and sync_repo(user_dir, username, repo)
elif len(spl) == 2:
existed, user_dir = ensure_exists(args, spl[0])
good = good and sync_repo(user_dir, *spl)
else:
logging.error('Bad spec: %s', spec)
return 1
if __name__ == '__main__':
sys.exit(main())
| true |
553fe516352b922e2cae3240f9668c5f9b90786f
|
Python
|
papalagichen/leet-code
|
/0190 - Reverse Bits.py
|
UTF-8
| 776 | 3.234375 | 3 |
[] |
no_license
|
class Solution:
def reverseBits(self, n):
s = "{:b}".format(n)
return int(('0' * (32 - len(s)) + s)[::-1], 2)
class Solution2:
def reverseBits(self, n):
s = self.int_to_binary_string(n)
return self.binary_string_to_int(('0' * (32 - len(s)) + s)[::-1])
def binary_string_to_int(self, s):
return reduce(lambda x, y: x + y, [int(x) * 2 ** y for x, y in zip(list(s), range(len(s) - 1, -1, -1))])
def int_to_binary_string(self, n):
s = ''
while n:
s = str(n & 1) + s
n >>= 1
return s
if __name__ == '__main__':
import Test
Test.test((Solution().reverseBits, Solution2().reverseBits), [
(0, 0),
(1, 2 ** 31),
(43261596, 964176192),
])
| true |
fb06cea0c548c34f800478c91b87cb4b199736b9
|
Python
|
lyyanjiu1jia1/OrderPreservingEncryption
|
/plot/analysis_tools.py
|
UTF-8
| 319 | 3.234375 | 3 |
[] |
no_license
|
import numpy as np
def linear_regression(x, y):
"""
:param x: n-by-m matrix, will be expanded to (m + 1)-columns
:param y: n-by-1 matrix
:return:
"""
x = np.concatenate((x, np.ones((x.shape[0], 1))), axis=1)
w = np.linalg.inv(x.transpose().dot(x)).dot(x.transpose()).dot(y)
return w
| true |
bb9e8b681880bc64c93133b2626ab4c2c36e95d8
|
Python
|
Edixon112/EBGYM
|
/altiria/rest/restPythonAltiriaCert.py
|
UTF-8
| 3,371 | 2.703125 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Altiria TIC SL
# All rights reserved.
# El uso de este código de ejemplo es solamente para mostrar el uso de la pasarela de envío de SMS de Altiria
# Para un uso personalizado del código, es necesario consultar la API de especificaciones técnicas, donde también podrás encontrar
# más ejemplos de programación en otros lenguajes de programación y otros protocolos (http, REST, web services)
import requests
import json as JSON
def altiriaCert(destination, fType):
print 'Enter altiriaCert: destination='+destination+', type: '+fType
try:
#Se fija la URL base de los recursos REST
baseUrl = 'http://www.altiria.net/apirest/ws'
#Se construye el mensaje JSON
#XX, YY y ZZ se corresponden con los valores de identificación del usuario en el sistema.
#domainId solo es necesario si el login no es un email
#credentials = {'domainId': 'XX', 'login': 'YY', 'passwd': 'ZZ'}
credentials = {'login': 'YY', 'passwd': 'ZZ'}
document = {'destination': destination, 'type': fType, 'webSig': True}
jsonData = {'credentials': credentials, 'document': document}
#Se fija el tipo de contenido de la peticion POST
contentType = {'Content-Type':'application/json;charset=UTF-8'}
#Se añade el JSON al cuerpo de la petición
#Se fija el tiempo máximo de espera para conectar con el servidor (5 segundos)
#Se fija el tiempo máximo de espera de la respuesta del servidor (60 segundos)
#timeout(timeout_connect, timeout_read)
#Se envía la petición y se recupera la respuesta
r = requests.post(baseUrl+'/certPdfFile', data=JSON.dumps(jsonData), headers=contentType, timeout=(5, 60))
#Error en la respuesta del servidor
if str(r.status_code) != '200':
print 'ERROR GENERAL: '+str(r.status_code)
print r.text
else:
#Se procesa la respuesta capturada
print 'Codigo de estado HTTP cmd: '+str(r.status_code)
jsonParsed = JSON.loads(r.text)
status = str(jsonParsed['status'])
print 'Codigo de estado Altiria: '+status
if status != '000':
print 'Error: '+r.text
else:
print 'Respuesta cmd: '+str(r.text)
f = open("file.pdf", "rb")
try:
contentType = {'Content-Type':'application/pdf'}
r = requests.post(str(jsonParsed['url']),
data=f,
headers=contentType,
#Se fija el tiempo máximo de espera para conectar con el servidor (5 segundos)
#Se fija el tiempo máximo de espera de la respuesta del servidor (60 segundos)
timeout=(5, 60)) #timeout(timeout_connect, timeout_read)
finally:
f.close()
if str(r.status_code) != '200': #Error en la respuesta del servidor
print 'Error general subiendo fichero: '+str(r.status_code)
else: #Se procesa la respuesta
print 'Codigo de estado HTTP subiendo fichero: '+str(r.status_code)
print 'Respuesta subiendo fichero: '+str(r.text)
parsed_json = JSON.loads(r.text)
status = parsed_json['status']
if status == '000':
print 'Proceso terminado con exito'
else:
print "Error Altiria. Codigo de estado: "+status
except requests.ConnectTimeout:
print "Tiempo de conexión agotado"
except requests.ReadTimeout:
print "Tiempo de respuesta agotado"
except Exception as ex:
print "Error interno: "+str(ex)
altiriaCert('346xxxxxxxx','simple')
| true |
64525ed451267a374985aaf26a81befe828c7533
|
Python
|
AlpesMachines/mpd-utils
|
/utils/keygroup.py
|
UTF-8
| 8,274 | 2.78125 | 3 |
[] |
no_license
|
'''
Python script to manipulate keygroups for MPCv2.3 and MPC Essentials.
A keygroup file is XML which declares how samples will be trigger from
the midi data and how they are tuned. This script allows the triggers
to be moved around the keyboard and to merge keygroup files - effectively
creating a keyboard split between multiple instruments.
'''
import xml.etree.ElementTree as ET
from optparse import OptionParser
usage = "usage: %prog [options] FILENAME"
parser = OptionParser(usage)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-O", "--same", dest="samefile",
action="store_true", help="write data to same file")
parser.add_option("-o", "--output", dest="outfile",
help="write data instead to OUTFILE")
parser.add_option("-n", "--name", dest="name",
help="change name of the keygroup")
parser.add_option("-m", "--merge", dest="merge",
help="merge in the samples from a second keygroup file")
parser.add_option("-d", "--delete", dest="delete",
help="delete a specific instrument")
parser.add_option("-D", "--delrange", dest="delrange",
help="delete a range of instruments (positive=up-to, negative=up-from)")
parser.add_option("-s", "--semi", dest="semi",
help="change tuning of instruments by number of SEMI-tones (positive or negative)")
parser.add_option("-S", "--semisamp", dest="semisamp",
help="change tuning of samples by number of SEMI-tones (positive or negative)")
parser.add_option("-M", "--move", dest="movekeys",
action="store_true", help="move keys by same number of semitones")
parser.add_option("-k", "--keyspan", dest="keyspan",
help="limit the keyspan of instruments by number of SEMI-tones (positive or negative)")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("input FILE not specified")
if options.verbose:
print("Reading %s..." % args[0])
# Open primary XML file
tree = ET.parse(args[0])
root = tree.getroot()
# Find the instruments section
program = root.find("Program")
instruments = program.find("Instruments")
if options.name:
name = program.find("ProgramName")
name.text = options.name
lowest = 128
highest = 0
last_inst = 0
# print out all the high and low notes for each instrument
for instrument in list(instruments.iter("Instrument")):
if options.verbose:
print(instrument.tag, instrument.attrib)
inst_active = False
for layers in instrument.iter("Layers"):
for layer in layers.iter("Layer"):
for sample_name in layer.iter("SampleName"):
if options.verbose:
print(layer.tag, layer.attrib, sample_name.text)
if sample_name.text:
inst_active = True
# ignore instruments which are not used
if not inst_active:
if options.merge:
if options.verbose:
print("Removing unused instrument:", instrument.attrib)
instruments.remove(instrument)
continue
# ignore instruments which are marked for deletion
if options.delete:
if int(options.delete) == int(instrument.attrib['number']):
if options.verbose:
print("Deleting instrument:", instrument.attrib)
instruments.remove(instrument)
continue
if options.delrange:
for note in instrument.iter("LowNote"):
low_note = int(note.text)
for note in instrument.iter("HighNote"):
high_note = int(note.text)
# delete outright
if ((int(options.delrange) > 0 and int(options.delrange) >= high_note) or
(int(options.delrange) < 0 and (0-int(options.delrange)) <= low_note)):
if options.verbose:
print("Deleting range:", instrument.attrib)
instruments.remove(instrument)
continue
# modify to limit high/low note range
if (int(options.delrange) > 0 and int(options.delrange) >= low_note):
if options.verbose:
print("Limiting LowNote:", instrument.attrib, options.delrange)
for low_note in instrument.iter("LowNote"):
low_note.text = str(int(options.delrange)+1)
if (int(options.delrange) < 0 and (0-int(options.delrange)) <= high_note):
if options.verbose:
print("Limiting HighNote:", instrument.attrib, options.delrange)
for high_note in instrument.iter("HighNote"):
high_note.text = str(1-int(options.delrange))
# have to re-write instrument numbers as one (or more)
# may have been deleted
last_inst = last_inst + 1
instrument.attrib['number'] = str(last_inst)
ignore_base_note = False
root_note = False
if instrument.find("IgnoreBaseNote").text == "True":
ignore_base_note = True
tune_coarse = instrument.find("TuneCoarse")
if options.semi and (not ignore_base_note):
tune_coarse.text = str(int(tune_coarse.text) - int(options.semi))
if options.verbose:
print("adjusting instrument tune:", tune_coarse.text)
for layers in instrument.iter("Layers"):
for layer in layers.iter("Layer"):
tune_coarse = layer.find("TuneCoarse")
sample_name = layer.find("SampleName")
if sample_name.text:
root_note = int(layer.find("RootNote").text)
if options.semisamp and (not ignore_base_note):
tune_coarse.text = str(int(tune_coarse.text) - int(options.semisamp))
if options.verbose:
print("adjusting sample tune:", tune_coarse.text)
semi_adjust = 0
if options.movekeys and options.semi and (not ignore_base_note):
semi_adjust = semi_adjust + int(options.semi)
if options.movekeys and options.semisamp and (not ignore_base_note):
semi_adjust = semi_adjust + int(options.semisamp)
if options.verbose:
print("Root Note:", root_note)
for high_note in instrument.iter("HighNote"):
if options.semi or options.semisamp:
high_note.text = str(int(high_note.text) + semi_adjust)
if options.keyspan and int(options.keyspan) > -1:
high_note.text = str(root_note + int(options.keyspan))
if options.verbose:
print("High Note:", high_note.text)
if int(high_note.text) > highest:
highest = int(high_note.text)
for low_note in instrument.iter("LowNote"):
if options.semi or options.semisamp:
low_note.text = str(int(low_note.text) + semi_adjust)
if options.keyspan and int(options.keyspan) < 1:
low_note.text = str(root_note + int(options.keyspan))
if options.verbose:
print("Low Note:", low_note.text)
if int(low_note.text) < lowest:
lowest = int(low_note.text)
if options.verbose:
print("Lowest Note:", lowest)
print("Highest Note:", highest)
print("Last Instrument:", last_inst)
# --------
# Merge in the instruments from a second keygroup file
if options.merge:
# Open primary XML file
merge_tree = ET.parse(options.merge)
merge_root = merge_tree.getroot()
# Find the instruments section
merge_program = merge_root.find("Program")
merge_instruments = merge_program.find("Instruments")
for instrument in merge_instruments.iter("Instrument"):
if options.verbose:
print(instrument.tag, instrument.attrib)
inst_active = False
for layers in instrument.iter("Layers"):
for layer in layers.iter("Layer"):
for sample_name in layer.iter("SampleName"):
if options.verbose:
print(layer.tag, layer.attrib, sample_name.text)
if sample_name.text:
inst_active = True
# ignore instruments which are not used
if not inst_active:
continue
last_inst = last_inst + 1
if options.verbose:
print("Appending Instrument as:", last_inst)
instrument.attrib['number'] = str(last_inst)
# this is the original XML tree
instruments.append(instrument)
# Correct the number of keygroups
keygroups = program.find("KeygroupNumKeygroups")
keygroups.text = str(last_inst)
# ---------------------
# write out the changes
if options.outfile:
tree.write(options.outfile, encoding='utf-8', xml_declaration=True)
if options.samefile:
tree.write(args[0], encoding='utf-8', xml_declaration=True)
| true |
8006721ac6e9adb1060c889f3a4eafbbdd3e7735
|
Python
|
dantsub/holbertonschool-higher_level_programming
|
/0x04-python-more_data_structures/101-square_matrix_map.py
|
UTF-8
| 126 | 2.828125 | 3 |
[] |
no_license
|
#!/usr/bin/python3
def square_matrix_map(matrix=[]):
return list(map(lambda a: list(map(lambda n: n * n, a)), matrix[:]))
| true |
0e991a46a638912fc6585967a81aab6452431422
|
Python
|
compagnb/SP20-IntermediatePython
|
/codeExercises/wk1_moonFunction.py
|
UTF-8
| 222 | 3.703125 | 4 |
[] |
no_license
|
def moon_weight(weight, increase, years):
years = years + 1
for year in range(1, years):
weight = weight + increase
moon_weight = weight * 0.165
print('Year %s is %s' % (year, moon_weight))
moon_weight(35, 0.3, 5)
| true |
8f97bc2e181bb2bf2095660e55e60e76884d4ab9
|
Python
|
JonasJR/examen
|
/MachineLearning/errorRate/testCrossVal.py
|
UTF-8
| 5,945 | 3.34375 | 3 |
[] |
no_license
|
from sklearn.datasets import load_digits, load_iris
from sklearn.svm import SVC
from sklearn import tree
from sklearn import linear_model
from sklearn import neighbors
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
import numpy as np
import math
import random
#Two lines to ignore an error message about falling back to a gles driver
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
######### Note for IVAN!!!! ############
# Just an intro to how python works. A python program works kind of like a C-program.
# It starts by reading from the top and executing everything it encounters on the way.
# If it encounters something that has not yet been declared it will generate an error.
# We can execute commands directly in the code and create functions that we later call.
# The easiest is therefore to define all the functions in the beginning and then execute commands.
#We start of by loading the iris datasets and store them in data and target
iris = load_iris()
data, target = iris.data, iris.target
#We create the cross validator
linreg = linear_model.LinearRegression()
svc = SVC(kernel='linear', C=1.0)
tree = tree.DecisionTreeClassifier()
sgd = linear_model.SGDClassifier()
knn = neighbors.KNeighborsClassifier()
gnb = GaussianNB()
# To simplify the looping later on I stole some code online that generates an array
# of "possible divisions" of a given number. So passing in 150 would yeild
# following result: [1,2,3,5,6,10,15,25,30,50,75,150]
# We will remove the first and last item in the list since we are not using them
def divisorGenerator(n):
large_divisors = []
for i in range(1, int(math.sqrt(n) + 1)):
if n % i == 0:
yield i
if i*i != n:
large_divisors.append(n/i)
for divisor in reversed(large_divisors):
yield divisor
#We define a function for creating the randomized order of the dataset iris
#This function scrambles the data using numpy witch is a good tool for math
def randomize():
#first we need to tell the function that data nad target are global
global data, target
#We create an empty copy of the dataset and use numpy (np) to make sure they are the same size
shuffle = np.arange(len(data))
#we then shuffle the indexes of the dataset using numpy
np.random.shuffle(shuffle)
#we then store the shuffled data in data and the shuffled target in target
#this way we use the same indexing and make sure that the correct target and data is asosiated
data = data[shuffle]
target = target[shuffle]
#We define a function that gets the score of the kfold
#we loop through the splits and for each split we
#calculate the score and create the avarage score
def getScore(algorithm,data,target,i):
#First we create a temporary vector to store all scores for later avarage calculation
tempScores = cross_val_score(algorithm,data,target,cv=i)
#We loop through the splits
#Now we just have to get the avarage of the score and then return it
return reduce(lambda x, y: x + y, tempScores) / len(tempScores)
#Test to return lowest score:
#return min(float(s) for s in tempScores)
#We define a function that loops through the kfold split array and gets the scores and create a median of it.
#v is the vector that was split by the kfold function
def main():
#here we just get the list of divisors for the looping and remove first and last items
loop = list(divisorGenerator(len(data)))
loop.pop(0)
loop.pop(len(loop)-1)
loop.pop(len(loop)-1)
print(loop)
temp = []
counter = 0
for i in loop:
temp.append(counter)
counter += 1
#We start with calling the randomize function to make sure that the data is shuffled
randomize()
#We create a vector for storing the scores
scores1 = []
scores2 = []
scores3 = []
scores4 = []
scores5 = []
scores6 = []
#We create a loop that loops through the lenght of the dataset devided by two.
#This makes the last iteration create kfold with 2 elements in each group. (2 training data and 148 test data)
#We start at 2 because 0 and 1 is not possible when doing a kfold
for i in loop:
#We the get cross_val_score of the datase
score1 = getScore(svc,data,target, i)
score2 = getScore(linreg,data,target, i)
score3 = getScore(tree,data,target, i)
score4 = getScore(sgd,data,target, i)
score5 = getScore(knn,data,target, i)
score6 = getScore(gnb,data,target, i)
#And add the calculated score to the scores vector
scores1.append(score1)
scores2.append(score2)
scores3.append(score3)
scores4.append(score4)
scores5.append(score5)
scores6.append(score6)
#We create a plt to visualize the curves
#Create the figure
plt.figure()
#Set title
plt.title("")
#set x and y lables
plt.xlabel("K-fold (number of splits)")
plt.ylabel("Score")
#set it to grid style
plt.grid()
#set plot for svc and GaussianNB with coloring
plt.plot(scores1, 'o-', label="SVC", color="r", linestyle="--")
plt.plot(scores2, 'o-', label="LinReg", color="g", linestyle="--")
plt.plot(scores3, 'o-', label="Tree", color="b", linestyle="--")
plt.plot(scores4, 'o-', label="SGDClassifier", color="black", linestyle="--")
plt.plot(scores5, 'o-', label="KNeighborsClassifier", color="grey", linestyle="--")
plt.plot(scores6, 'o-', label="GaussianNB", color="pink", linestyle="--")
#set the axis to correct values
plt.xticks(temp,loop)
#plt.axis([0,100,0.0,1.0])
#place the label in the top right
plt.legend(loc="best")
#show the figure
plt.show()
main()
| true |
daa532d1e3b1376324786614d00af53ee59e1024
|
Python
|
FaisalWant/ObjectOrientedPython
|
/Threading/IntSet.py
|
UTF-8
| 1,189 | 3.671875 | 4 |
[] |
no_license
|
class IntSet(object):
""" An intset is a set of integers"""
# Information about the implementation (not abstraction)
# The value of the et is represented by a list of ints
# Each int in thin set occurs in self.vals exactly once
def __init__(self):
self.vals=[]
def insert(self, e):
""" Assume e is an integer an insert e into self"""
if not e in self.vals:
self.vals.append(e)
def member(self,e):
""" Assume e is an integer
Returns True if e is in self and Fals otherwise"""
return e in self.vals
def remove(self,e):
""" Assume e is an integer and removes e from self raises
value Error if e is not in self"""
try:
self.vals.remove(e)
except:
raise ValeError(str(e)+'Not found')
def getMembers(self):
""" Returns a list containing the elements of self.
Nothing can be assumed about the order of the elements"""
return self.vals[:]
def __str__(self):
"""Returns a string representation of self"""
self.vals.sort()
result=''
for e in self.vals:
result= result+str(e)+','
return '{' + result[:-1] +'}'
s= IntSet()
s.insert(3)
s.insert(4)
print(s)
| true |
58955630df0c3c52faaa17216b025e14610bcf99
|
Python
|
duddles/nytimes_set_puzzle
|
/nytimes_set_puzzle.py
|
UTF-8
| 871 | 3.03125 | 3 |
[] |
no_license
|
import itertools
class Shape(object):
def __init__(self, index, symbol, color, number, shading):
self.index = index
self.symbol = symbol
self.color = color
self.number = number
self.shading = shading
def check_combo(combo):
# to do
shapes = []
shapes.append(Shape((0,0),'squiggle', 'red', 1, 'full'))
shapes.append(Shape((0,1),'squiggle', 'red', 2, 'empty'))
shapes.append(Shape((0,2),'triangle', 'red', 3, 'full'))
shapes.append(Shape((1,0),'oval', 'red', 2, 'full'))
shapes.append(Shape((1,1),'squiggle', 'red', 3, 'dash'))
shapes.append(Shape((1,2),'oval', 'red', 1, 'empty'))
shapes.append(Shape((2,0),'oval', 'red', 1, 'full'))
shapes.append(Shape((2,1),'triangle', 'red', 2, 'empty'))
shapes.append(Shape((2,2),'triangle', 'red', 2, 'dash'))
# to do - solve the combinations
| true |
1aef5abff96878aa72a5fc8930520190ceb6a923
|
Python
|
mbollmann/perceptron
|
/mmb_perceptron/feature_extractor/pos_honnibal.py
|
UTF-8
| 2,844 | 2.625 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
from .feature_extractor import FeatureExtractor
class Honnibal(FeatureExtractor):
"""Feature extractor based on the POS tagger by Matthew Honnibal.
<https://honnibal.wordpress.com/2013/09/11/a-good-part-of-speechpos-tagger-in-about-200-lines-of-python/>
"""
_minimum_left_context_size = 1
_minimum_right_context_size = 1
def _get_sequenced(self, seq, pos, history=None):
word = seq[pos]
features = {}
features[u'bias'] = 1.0
features[u'this_word ' + word] = 1.0
features[u'this_suffix ' + word[-3:]] = 1.0
features[u'this_prefix ' + word[0]] = 1.0
features[u'left_suffix ' + seq[pos - 1][-3:]] = 1.0
features[u'right_suffix ' + seq[pos + 1][-3:]] = 1.0
for i in range(1, self._left_context_size + 1):
features[u'left_{0}_tag {1}'.format(i, history[pos - i])] = 1.0
features[u'left_{0}_word {1}'.format(i, seq[pos - i])] = 1.0
if i == 1:
features[u'this_word_left_tag {0} {1}'\
.format(word, history[pos - i])] = 1.0
else:
features[u'left_upto_{0}_tags {1}'\
.format(i, ' '.join(history[(pos - i):pos]))] = 1.0
for i in range(1, self._right_context_size + 1):
features[u'right_{0}_word {1}'.format(i, seq[pos + i])] = 1.0
return features
############################################################################
#### For Viterbi decoding, i.e. probably not needed ATM ####################
############################################################################
def get_fixed(self, seq, pos):
word = seq[pos]
features = {}
features[u'bias'] = 1.0
features[u'this_word ' + word] = 1.0
features[u'this_suffix ' + word[-3:]] = 1.0
features[u'this_prefix ' + word[0]] = 1.0
features[u'left_suffix ' + seq[pos - 1][-3:]] = 1.0
features[u'right_suffix ' + seq[pos + 1][-3:]] = 1.0
for i in range(1, self._left_context_size + 1):
features[u'left_{0}_word {1}'.format(i, seq[pos - i])] = 1.0
for i in range(1, self._right_context_size + 1):
features[u'right_{0}_word {1}'.format(i, seq[pos + i])] = 1.0
return features
def get_dynamic(self, seq, pos, history=None):
features = {}
for i in range(1, self._left_context_size + 1):
features[u'left_{0}_tag {1}'.format(i, history[pos - i])] = 1.0
if i == 1:
features[u'this_word_left_tag {0} {1}'\
.format(seq[pos], history[pos - i])] = 1.0
else:
features[u'left_upto_{0}_tags {1}'\
.format(i, ' '.join(history[(pos - i):pos]))] = 1.0
return features
| true |
923923c14af690754bd6d329afc70ce7634e12bb
|
Python
|
ahmadraouf/oop_exercise
|
/oop_exercises.py
|
UTF-8
| 1,539 | 3.75 | 4 |
[] |
no_license
|
class Employee:
def __init__(self , employee_number, name, address, salary, job_title):
self.employee_number = employee_number
self.__name = name
self.__address = address
self.__salary = salary
self.__job_title = job_title
def get_name(self):
return self.__name
def get_address(self):
return self.__address
def set_address(self, address):
self.__address = address
def get_salary(self):
return self.__salary
def get_job_title(self):
return self.__job_title
def print_result_horizantal(self):
print("Employee Information :""Employee Number = ",self.employee_number ,"Name = ", self.__name ,"Address = ", self.__address , "Salary = " , self.__salary , "Job title = ",self.__job_title)
def print_result_vretical(self):
print("Employee Information :","\nEmployee Number = ",self.employee_number ,"\nName = ", self.__name ,"\nAddress = ", self.__address , "\nSalary = " , self.__salary , "\nJob title = ",self.__job_title)
def __del__(self):
print(self.__name + " has been deleted")
first_employee = Employee(1,"Mohammad Khaled", "Amman,Jordan", 500, "Consultant")
second_employee = Employee(2,"Hala Rana", "Aqaba,Jordan", 750, "Manager")
first_employee.set_address("USA")
first_employee.print_result_horizantal()
first_employee.print_result_vretical()
print("First employee address :", first_employee.get_address())
del first_employee
del second_employee
| true |
f2d368525021c5a2d1a01bfcfb31e21d90adcd94
|
Python
|
ddtkra/atcoder
|
/abc076/C/main.py
|
UTF-8
| 580 | 2.703125 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
# Failed to predict input format
S = input().replace('?', '.')
T = input()
import re
import sys
for i in range(len(S)-len(T),-1,-1):
if(re.match(S[i:i+len(T)],T)):
S = S.replace('.', 'a')
print(S[:i]+T+S[i+len(T):])
exit()
else:
print("UNRESTORABLE")
if __name__ == '__main__':
main()
| true |
c774de124f79f545af197812c70eacb83585d451
|
Python
|
bochuxt/mini_psp
|
/src/mini_psp/utils/metric_utils.py
|
UTF-8
| 4,026 | 3.03125 | 3 |
[] |
no_license
|
import numpy as np
from sklearn import metrics
def get_iou(target,prediction):
'''Returns Intersection over Union (IoU).'''
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / np.sum(union)
return iou_score
def get_class_iou(target,prediction,n_classes):
'''Returns class IoUs.'''
assert len(target.shape)==4
assert len(prediction.shape)==4
sum =0
IoU = {}
for i in range(n_classes):
cur_iou = get_iou(prediction[:,:,:,i],target[:,:,:,i])
sum+=cur_iou
IoU[i+1] = cur_iou
IoU['mean'] = sum/n_classes
return IoU
def get_class_f1(target,prediction,n_classes):
'''Returns class F1-scores.'''
assert len(target.shape)==4
assert len(prediction.shape)==4
sum =0
f1 = {}
for i in range(n_classes):
cur_f1 = metrics.f1_score(prediction[:,:,:,i].reshape(-1,1),target[:,:,:,i].reshape(-1,1))
sum+=cur_f1
f1[i+1] = cur_f1
f1['mean'] = sum/n_classes
return f1
def evaluate(target,prediction,n_classes):
'''Returns class accuracies, IoUs and F1-scores.'''
#acc = get_class_accuracies(target,prediction,n_classes)
iou = get_class_iou(target,prediction,n_classes)
f1 = get_class_f1(target,prediction,n_classes)
#return acc,iou,f1
return iou,f1
def conf_matrix(target,prediction,n_classes):
'''Returns confusion matrix.'''
# Need to remove the 0 values in the target mask if any.
prediction = np.reshape(prediction,(-1,n_classes))
target = np.reshape(target,(-1,n_classes))
cm = metrics.confusion_matrix(prediction.argmax(axis=1),target.argmax(axis=1))
return cm
def eval_conf_matrix(cm,n_classes):
'''Returns evaluation metrics from confusion matrix.'''
cm = np.array(cm)
sum=0;
total =0;
prod_acc = [0]*n_classes
user_acc = [0]*n_classes
total_pred = [0]*n_classes
total_test = [0]*n_classes
gc =0
for i in range(n_classes):
for j in range(n_classes):
total_pred[i]+= cm[i][j]
total_test[j]+=cm[i][j]
if i==j:
sum+=cm[i][j]
total+=cm[i][j]
# User and Producer Accuracies
for i in range(n_classes):
gc+=total_pred[i]*total_test[i]
prod_acc[i] = cm[i][i]/total_test[i]
user_acc[i] = cm[i][i]/total_pred[i]
# Overall Accuracy
ovAc = sum/total
# Kappa coefficient
kappa = (total*sum - gc)/(total*total - gc)
print("Total pred :",total_pred)
print("Total target :",total_test)
print("Total :",total)
return ovAc, kappa, prod_acc, user_acc
if __name__=='__main__':
######################################################################
#### TESTING
######################################################################
n_classes = 5
prediction = np.load('prediction.npy')
target = np.load('target.npy')
iou, f1 = evaluate(target,prediction,n_classes)
print("IoU : ",iou)
print("F1 : ",f1)
#cm = conf_matrix(target,prediction,n_classes)
#Combined1
# cm = [ [119397,540,304,12182,7327],
# [243,7169,43,4319,1737],
# [134,0,5776,721,200],
# [827,2,28,7655,811],
# [793,0,57,278,31494]
# ]
#Combined2
cm = [ [119320,540,372,12259,7327],
[243,7169,43,4319,1737],
[266,0,6445,1636,248],
[827,2,28,7655,811],
[793,0,57,278,31494]
]
ovAc, kappa, prod_acc, user_acc = eval_conf_matrix(cm,n_classes)
print("Overall Accuracy : ",ovAc)
print("Kappa coeff : ",kappa)
print("Producer Accuracy : ",prod_acc)
print("User Accuracy : ",user_acc)
# Kappa checks
# prediction = np.reshape(prediction,(-1,n_classes))
# target = np.reshape(target,(-1,n_classes))
# print("Kappa score : ",metrics.cohen_kappa_score(target.argmax(axis=1),prediction.argmax(axis=1)))
| true |
421169389393a8288bbb04a72ccaf56716057b35
|
Python
|
Matheus-Barros/Objects_Recognition
|
/Detect_Objects.py
|
UTF-8
| 3,742 | 2.546875 | 3 |
[] |
no_license
|
import sys
import dlib
import cv2
import time
from datetime import datetime
import pandas as pd
import warnings
import glob
warnings.filterwarnings("ignore")
def Percent(value):
if value >= 1.0:
return 100
else:
x = str('{:.0%}'.format(value))
return int(x.split('%')[0])
#INICIALIZAÇÃO DE LISTAS DE LOGS
timestamp = []
produtoNomeLog = []
assertividade = []
pula_quadros = 1
captura = cv2.VideoCapture(0)
contadorQuadros = 0
font = cv2.cv2.FONT_HERSHEY_DUPLEX
#============== PARAMETERS ==========================
#SEGUNDOS PARA EXIBIR O QRCODE
segundosExbicao = 5
taxaDeErro = 50 #Capture objects above this percent
resolucao = 1
#====================================================
path = 'SVMs Processed\\'
pathSvms = glob.glob(path + '*')
qrCodeNome = []
qrCodeImages = []
#LOADING SVMS
produtosTreinados = []
nomeProdutosTreinados = []
for svm in pathSvms:
produtosTreinados.append(dlib.fhog_object_detector(svm))
nomeProdutosTreinados.append(svm.split('-')[1].replace('.svm',''))
qrCodeImages.append(cv2.imread('QR\\qr-code-{produto}.png'.format(produto = svm.split('-')[1].replace('.svm',''))))
qrCodeNome.append(svm.split('-')[1].replace('.svm',''))
#Resize Imgs
ind = 0
for x in qrCodeImages:
qrCodeImages[ind] = cv2.resize(qrCodeImages[ind],(100,100))
ind+=1
while captura.isOpened():
conectado, frame = captura.read()
[boxes, confidences, detector_idxs] = dlib.fhog_object_detector.run_multiple(produtosTreinados, frame, upsample_num_times=1, adjust_threshold=0.0)
#TRATATIVA PARA N LER TODOS OS FRAMES
contadorQuadros += 1
if contadorQuadros % pula_quadros == 0:
index_nome = 0
for o in boxes:
e, t, d, f = (int(o.left()), int(o.top()), int(o.right()), int(o.bottom()))
if Percent(confidences[index_nome]) >= taxaDeErro:
#SQUARE
cv2.rectangle(frame, (e, t), (d, f), (0, 0, 255), 2)
#PRODUCT NAME
cv2.putText(frame, nomeProdutosTreinados[detector_idxs[index_nome]], (e,f +30), font, 1.0, (0, 0, 255), 2)
#CONFIDENCE
cv2.putText(frame, str(Percent(confidences[index_nome])) + '%', (e,t-10), font, 1.0, (0, 0, 255), 2)
#SHOW QRCode
frame[10:110,10:110] = qrCodeImages[detector_idxs[index_nome]]
cv2.putText(frame,'Visite o site para mais informacoes',(120, 70),font,.65,(255,255,255),2)
#Logs
timestamp.append(datetime.now())
assertividade.append(str(Percent(confidences[index_nome])) + '%')
produtoNomeLog.append(nomeProdutosTreinados[detector_idxs[index_nome]])
index_nome+=1
cv2.imshow("Preditor de Objetos", frame) #ESC TO EXIT
if cv2.waitKey(1) & 0xFF == 27:
break
df = pd.DataFrame(data = {'NomeProduto':produtoNomeLog,'Assertividade':assertividade,'Timestamp':timestamp})
path = 'Logs\\'
df.to_excel(path+'Log_{day}_{month}_{year}_{hour}_{min}_{secs}.xlsx'.format(day = datetime.now().day,
month = str(datetime.now().month),
year = str(datetime.now().year),
hour = str(datetime.now().hour),
min = str(datetime.now().minute),
secs = str(datetime.now().second)),index = False)
captura.release()
cv2.destroyAllWindows()
sys.exit(0)
| true |
55065c6dfccd66bc724deac4ff9eda85afd04860
|
Python
|
MageJohn/EMPR_Scanner
|
/src/python/remote_function_call.py
|
UTF-8
| 743 | 2.984375 | 3 |
[] |
no_license
|
import serial
from exceptions import *
port = '/dev/ttyACM0'
baud = 9600
ser = serial.Serial(port,baud)
func_codes = {} # e.g dico = {'funcname': b'\x01'}
func_params = {} # e.g dico = {'funcname': [b'param1', b'param2', ...]}
def check_func_param_match(funcname, params):
if (func_params[funcname] == params):
return True
else:
return False
def remote_function_call(funcname, *args):
func_code = func_codes[funcname]
try:
if (check_func_param_match(funcname,args)):
ser.write(func_code)
for param in args:
ser.write(param)
else:
raise NoFuncParamMatch
except:
print("The parametres did not match the function")
| true |
587a383a1c84c2242457bad9a949fe7c7dd8dabf
|
Python
|
tynski/Algorithms
|
/Sorting/mergeSort.py
|
UTF-8
| 686 | 3.8125 | 4 |
[] |
no_license
|
def mergeSort(array):
N = len(array)
if N == 1:
return array
arrayHalf = N // 2
firstHalf = mergeSort(array[arrayHalf:])
secondHalf = mergeSort(array[:arrayHalf])
return merge(firstHalf, secondHalf)
def merge(p, r):
i = 0
j = 0
sortedArray = []
while i < len(p) and j < len(r):
if p[i] < r[j]:
sortedArray.append(p[i])
i += 1
else:
sortedArray.append(r[j])
j += 1
if i < len(p):
sortedArray.extend(p[i:])
if j < len(r):
sortedArray.extend(r[j:])
return sortedArray
print(mergeSort([4,5,8,4,2,4,1]))
print(mergeSort([12, 11, 13, 5, 6, 7]))
| true |
dc32ef5520de3a2aa2f2105d363df1a4cd7403af
|
Python
|
Stefan228/Simich-PM20-6
|
/Зачччет цсв.py
|
UTF-8
| 856 | 3.015625 | 3 |
[] |
no_license
|
import csv
def availability(name_of_book, adress_of_store):
store_id = ''
try:
with open('shops.csv') as f:
reader = csv.reader(f, delimiter=';')
head = next(reader)
body = [line for line in reader]
for store in body:
if store[1] == adress_of_store:
store_id = store[0]
with open('books.csv') as f:
reader = csv.reader(f, delimiter=';')
head = next(reader)
body = [line for line in reader]
for book in body:
if name_of_book == book[1] and store_id in book[3].split(','):
return True
else:
return False
except:
return 'Произошла ошибка, проверьте введенные данные.'
| true |
7aa3fd54ae219a8c3324e2118c2c26d2b8cd85f7
|
Python
|
actcheng/leetcode-solutions
|
/0388_Longest_Absolute_File_Path.py
|
UTF-8
| 581 | 3.109375 | 3 |
[] |
no_license
|
# Problem 388
# Date completed: 2019/11/11
# 28 ms (96%)
class Solution:
def lengthLongestPath(self, input: str) -> int:
arr = input.split('\n')
longest = 0
stack = []
level = 0
while arr:
a = arr.pop(0)
split = a.split('\t')
nt = len(split)-1
name = split[-1]
stack = stack[:nt]
stack.append(name)
if '.' in name:
longest = max(longest, len('/'.join(stack)))
# print('/'.join(stack))
return longest
| true |
cec85a5dd76df207e06a9b777b79f0cf9afad4f8
|
Python
|
scottenriquez/jitterbug
|
/jitterbug.py
|
UTF-8
| 118 | 2.671875 | 3 |
[] |
no_license
|
import pyautogui
import time
while True:
pyautogui.moveRel(0, 10)
pyautogui.moveRel(0, -10)
time.sleep(5)
| true |
cec88dfc73674af60900ee3757a0bc9bda9b092a
|
Python
|
Hyperdraw/FridayClub
|
/ai/generate.py
|
UTF-8
| 1,321 | 3.25 | 3 |
[] |
no_license
|
from json import loads, dumps
from os.path import exists
print('=====')
print('This tool will help you ceate an NPC JSON file.')
print('You will be continuously asked for questions and responses until you press stop.')
print('=====')
npc_path = input('Enter the name of a file to edit or create. (Should end in .json): ')
if exists(npc_path):
with open(npc_path, 'r') as npc_file:
npc = loads(npc_file.read())
else:
npc = []
print()
while True:
print('Add Rule #' + str(len(npc)))
print('-----')
print('Begin entering patterns (messages similar to what the user will enter to trigger this rule).')
matches = []
while True:
match = input('Enter a pattern or leave blank to end: ')
if len(match.strip()) == 0:
break
else:
matches.append(match.strip().casefold())
print()
print('Begin entering possible responses. (The bot will choose a random response from this list when this rule is triggered.)')
responses = []
while True:
response = input('Enter a response or leave blank to end: ')
if len(response.strip()) == 0:
break
else:
responses.append(response.strip().casefold())
npc.append({"match": matches, "responses": responses})
with open(npc_path, 'w+') as npc_file:
npc_file.write(dumps(npc))
print('-----')
print()
| true |
2a9f9bee3a96782f0ff81544e206d7a6f2f13603
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_78/127.py
|
UTF-8
| 1,106 | 3.078125 | 3 |
[] |
no_license
|
#! /usr/bin/python
import sys
cases = int(sys.stdin.readline()[:-1])
actual_case = 0
while actual_case < cases:
# reading and so
actual_case += 1
#nacteni 2 cisel
numbers = sys.stdin.readline()[:-1].split()
n = int(numbers[0])
pd = int(numbers[1])
pg = int(numbers[2])
ok_pd = False
ok_pg = True
if ((pd == 0) or (pd == 100)):
ok_pd = True
else:
min_n = 100
pd_pom = pd
for i in range(2):
if (pd_pom % 2) == 0:
min_n = min_n / 2
pd_pom = pd_pom / 2
if (pd_pom % 5) == 0:
min_n = min_n / 5
pd_pom = pd_pom / 5
if (min_n <= n):
ok_pd = True
if not ok_pd:
print "Case #%d: Broken" %(actual_case)
else:
if (pg == 0):
if (pd != 0):
ok_pg = False
if (pg == 100):
if (pd != 100):
ok_pg = False
if ok_pg:
print "Case #%d: Possible" %(actual_case)
else:
print "Case #%d: Broken" %(actual_case)
| true |
bbab972ce09308c01e2641fc35004ac7bac96487
|
Python
|
AlishaKochhar/KnowYourWords
|
/ProjectGUI.py
|
UTF-8
| 3,252 | 3.078125 | 3 |
[] |
no_license
|
from tkinter import *
import tkinter
from PIL import Image,ImageTk
import sqlite3
root=Tk()
image=Image.open("Background.JPG")
tkimage=ImageTk.PhotoImage(image)
w = tkimage.width()
h = tkimage.height()
root.geometry("%dx%d+0+0" % (w, h))
MainLabel=Label(root,image=tkimage)
MainLabel.pack(side='top', fill='both', expand='yes')
LabelM1=Label(MainLabel,text="Enter Word :")
val=StringVar()
v1=StringVar()
v2=StringVar()
v3=StringVar()
v4=StringVar()
v5=StringVar()
v6=StringVar()
entry=Entry(MainLabel,textvariable=val)
def click() :
conn=sqlite3.connect('Project.db')
c=conn.cursor()
SimWords=[]
MeanWords=[]
InputWord=val.get()
c.execute("SELECT * FROM Dictionary")
dataD=c.fetchall()
c.execute("SELECT * FROM WordMeaning")
dataWM=c.fetchall()
flag=0
i=0
j=0
for row in dataD :
if(row[0] == InputWord) :
flag=1
break
else :
flag=2
if(flag==1) :
NewFrame1=Toplevel(MainLabel)
labelNF11=Label(NewFrame1,image=tkimage)
labelNF11.pack(side='top', fill='both', expand='yes')
MsgExists=Message(labelNF11,textvariable=v1,relief=RAISED)
v1.set("Word Exists")
MsgExists.pack()
for i in dataD :
if(i[1]==len(InputWord) and i[2]==InputWord[0] and i[3]==InputWord[-1] and i[0]!=InputWord) :
SimWords.append(i[0])
k=0
for j in dataWM :
if(j[0] == InputWord) :
k=k+1
MeanWords.append(str(k))
MeanWords.append(j[1])
print("Done")
MsgSim=Message(labelNF11,textvariable=v2,relief=RAISED)
v2.set("Similar words : ")
MsgSim.pack()
scroll1=Scrollbar(labelNF11)
scroll1.pack(fill=Y)
listing1=Listbox(labelNF11,yscrollcommand=scroll1.set,width=50)
for i in range (0,len(SimWords)):
listing1.insert(END,SimWords[i])
listing1.pack()
scroll1.config(command=listing1.yview)
MsgMean=Message(labelNF11,textvariable=v4,relief=RAISED,width=150)
v4.set("Meanings : ")
MsgMean.pack()
scroll2=Scrollbar(labelNF11)
scroll2.pack(fill=Y)
listing2=Listbox(labelNF11,yscrollcommand=scroll2.set,width=150)
for i in range (0,len(MeanWords)):
listing2.insert(END,MeanWords[i])
listing2.pack()
scroll2.config(command=listing2.yview)
elif (flag==2) :
NewFrame2=Toplevel(MainLabel)
labelNF21=Label(NewFrame2,image=tkimage)
labelNF21.pack(side='top', fill='both', expand='yes')
MsgNotExists=Message(labelNF21,textvariable=v6,relief=RAISED)
v6.set("Word does not exists")
MsgNotExists.pack()
button=Button(MainLabel,text="OK",command=click)
entry.place(x=625,y=200)
button.place(x=660,y=400)
LabelM1.pack()
root.mainloop()
| true |
92fb970b22c6832fe2c9190e956cd17676196bd6
|
Python
|
sergiooli1997/lector-escritor
|
/lector-escritor.py
|
UTF-8
| 2,067 | 3.234375 | 3 |
[] |
no_license
|
import logging
import threading
import time
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
class Dato(object):
def __init__(self, start=''):
self.value = start
def cambiar(self, variable):
self.value = variable
def lector(lock, barrier, dato):
num_acquire = 0
print(threading.current_thread().name,
'Esperando en la barrera con {} hilos más'.format(barrier.n_waiting))
worker_id = barrier.wait()
print(threading.current_thread().name, 'Después de la barrera', worker_id)
time.sleep(1)
logging.debug('Intento acceder al dato.')
have_it = lock.acquire()
while num_acquire < 1:
try:
if have_it:
logging.debug('Accedio al dato. Lee {}'.format(dato.value))
num_acquire += 1
else:
logging.debug('Ocupado')
finally:
time.sleep(4.0)
if have_it:
lock.release()
def escritor(lock, var, dato):
logging.debug('Intento acceder a la BD.')
lock.acquire()
try:
logging.debug('Accedio a la BD.')
dato.cambiar(var)
logging.debug('Modifico el dato = {}'.format(dato.value))
time.sleep(1.0)
finally:
logging.debug('Dejo de modificar el dato.')
lock.release()
lock = threading.Lock()
dato = Dato()
NUM_THREADS = 2
barrier = threading.Barrier(NUM_THREADS)
threads_escritor = [threading.Thread(name='Escritor%s' % i, target=escritor, args=(lock, 'Hola Soy E%s' % i, dato,), )
for i in range(NUM_THREADS)]
threads_lector = [threading.Thread(name='Lector%s' % i, target=lector, args=(lock, barrier, dato,), )
for i in range(NUM_THREADS)]
for e in threads_escritor:
print(e.name, 'Iniciando')
time.sleep(0.5)
e.start()
for e in threads_escritor:
e.join()
for t in threads_lector:
print(t.name, 'Iniciando')
time.sleep(0.5)
t.start()
for t in threads_lector:
t.join()
| true |
1352006645380cf930cb2e8b9f897e8a77dab920
|
Python
|
Sangheun/programming-dev-5th
|
/decorators2.py
|
UTF-8
| 191 | 2.78125 | 3 |
[] |
no_license
|
import time
def memoize(fn):
cached = {}
def wrap(x,y):
key = (x,y)
if key not in cached:
cached[key] = fn(x,y)
return cached[key]
return wrap
| true |
d62d67f8f95803d8c85f6edb7d7232d59f100340
|
Python
|
teriyakichicken/doublemeat
|
/test.py
|
UTF-8
| 4,499 | 2.640625 | 3 |
[] |
no_license
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import product
from sklearn.ensemble import RandomForestRegressor
def read_district(filename):
cols = ['district_hash', 'district_id']
df = pd.read_csv(filename, header=None, sep='\t', names=cols)
return df
def read_order(filename):
cols = ['order_id', 'driver_id', 'passenger_id', 'start_district_hash', 'dest_district_hash', 'price', 'time']
df = pd.read_csv(filename, header=None, sep='\t', names=cols)
return df
def read_weather(filename):
cols = ['Time', 'Weather', 'temperature', 'PM2.5']
df = pd.read_csv(filename, header=None, sep='\t', names=cols)
return df
def read_traffic(filename):
cols = ['district_hash', 'tj_level', 'tj_time']
df = pd.read_csv(filename, header=None, sep='\t', names=cols)
return df
def read_poi(filename):
cols = ['district_hash', 'poi_class']
df = pd.read_csv(filename, header=None, sep='\t', names=cols)
return df
def read_submit(filename):
cols = ['year','month','day','slot']
df = pd.read_csv(filename, header=0, sep='-', names=cols)
df['time'] = pd.read_csv(filename, header=0, names=['time'])
return df
def process_order(df):
df["answered"] = df['driver_id'].notnull().astype(int)
df["time"] = pd.to_datetime(df["time"])
df["day"] = df["time"].dt.day
df["slot"] = df["time"].dt.hour * 6 + df["time"].dt.minute // 10 + 1
cols = ["order_id","driver_id","passenger_id","dest_district_hash","time","price"]
df.drop(cols, axis = 1, inplace = True)
def mape(y_true, y_pred):
y_pred = y_pred[y_true > 0]
y_true = y_true[y_true > 0]
return np.mean(np.abs((y_true - y_pred) / y_true))
#%%
if __name__ == '__main__':
path = ".\\citydata\\season_1"
train_path = path + "\\training_data"
test_path = path + "\\test_set_1"
order_path = "\\order_data\\order_data_2016-01-"
train_data = pd.concat(read_order(train_path + order_path + str(i).zfill(2)) for i in range(1, 22))
test_data = pd.concat(read_order(test_path + order_path + str(i).zfill(2) + "_test") for i in [22,24,26,28,30])
submit_data = read_submit(test_path + "\\read_me_1.txt")
id_data = read_district(test_path + "\\cluster_map\\cluster_map")
#%%
df1 = pd.concat([train_data, test_data])
df1 = pd.merge(df1, id_data, left_on=['start_district_hash'], right_on=['district_hash'])
df1.drop(["start_district_hash", "district_hash"], axis = 1, inplace = True)
process_order(df1)
#%%
df2 = df1.groupby(['district_id', 'day', 'slot'])['answered'].agg({'request':'count', 'answer':'sum'}).reset_index()
no_data = pd.DataFrame(list(product(list(range(1,67)),list(range(1,145)))), columns=['district_id', 'slot'])
no_data["day"] = 21
no_data["answer"] = 0
no_data["request"] = 0
df3 = pd.concat([df2, no_data]).drop_duplicates(subset=['district_id', 'day', 'slot'], keep='first')
df3["gap"] = df3["request"] - df3["answer"]
df3.sort_values(['district_id','day','slot'], inplace=True)
#%%
df4 = df3[(df3["district_id"]==3)&(df3["day"]==21)]
#plt.plot(df4["slot"], df4["request"])
plt.plot(df4["slot"], df4["gap"])
plt.show()
#%%
df_train = df3[(df3["day"]<=21)]
df_test = df3[(df3["day"]>=22)]
cols = ['district_id','slot']
reg_req = RandomForestRegressor(random_state = 0)
reg_req.fit(df_train[cols], df_train['request'])
predict_req = reg_req.predict(df_test[cols])
reg_ans = RandomForestRegressor(random_state = 0)
reg_ans.fit(df_train[cols], df_train['answer'])
predict_ans = reg_ans.predict(df_test[cols])
predict_gap = predict_req - predict_ans
predict_gap[predict_gap < 0] = 0
#df_test.insert(0, "predict_gap", predict_gap)
error = mape(df_test["gap"].values, predict_gap)
print(error)
#%%
df_submit = pd.DataFrame(list(range(1,67)),columns=['district_id'])
df_submit["key"] = 0
submit_data["key"] = 0
df_submit = pd.merge(df_submit, submit_data, how='outer', on='key')
df_submit.drop(['key'], axis = 1, inplace = True)
predict_req = reg_req.predict(df_submit[cols])
predict_ans = reg_ans.predict(df_submit[cols])
predict_gap = predict_req - predict_ans
predict_gap[predict_gap < 0] = 0
df_submit['gap'] = predict_gap
df_submit.to_csv('submit.csv', header=False, index=False, columns=['district_id','time','gap'])
| true |
5c7edda17893603a0d3c43251a4bbf85eb14df3d
|
Python
|
kltjrcks/move_test
|
/programmers/pSolution36.py
|
UTF-8
| 360 | 3.53125 | 4 |
[] |
no_license
|
# -*- coding : utf-8 -*-
# 올바른 괄호
def solution(s):
answer = 0
for i in s:
if answer == -1:
return False
else:
if i == "(":
answer += 1
elif i == ")":
answer -= 1
if answer != 0:
return False
else:
return True
print(solution("()()"))
| true |