blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
f76b3b840b3db0b3d8c980dc620327745383a006
|
Python
|
17BTEC005/virat-kohli
|
/rohit sharma.py
|
UTF-8
| 135 | 3.03125 | 3 |
[] |
no_license
|
#multiply 2 no.s
a=10
b=20
c=a*b
print("multiplication of 10 and 20",a,"*",b,"=", c)
| true |
8b1781f3d1abb887e332ddcd453bef5c9b05fa8d
|
Python
|
ravitejavemuri/ML-Algorithms
|
/K-means/k-means.py
|
UTF-8
| 2,282 | 3.546875 | 4 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 13:13:09 2019
Psudo :
1.Get a Dataset
2.arbitarily choose K centroids in random
3.Assign the closest data points by distance to a centroid/cluster
4.Compute mean of the datapoints in the clusters excluding the centroids
5.The mean would be the new centroid and repeat from step 3 until the centroid doesnt change.
@author: Ravi
"""
from copy import deepcopy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
plt.rcParams['figure.figsize'] = (16, 9)
plt.style.use('ggplot')
# Importing the data sets
data = pd.read_csv('data.csv')
#print(data.shape)
data.head()
#Plotting the values
d1= data['Dset1'].values
d2= data['Dset2'].values
X=np.array(list(zip(d1,d2)))
print('x iss',X)
plt.scatter(d1, d2, c='blue', s=7)
#Distance
def dist(a, b, ax=1):
return np.linalg.norm(a-b, axis=ax)
#Picking centroids at random
k=4
C_x = np.random.randint(0, np.max(X)-20, size=k)
C_y = np.random.randint(0, np.max(X)-20, size=k)
C= np.array(list(zip(C_x, C_y)), dtype=np.float32)
print(C)
#plotting with cetroids
plt.scatter(d1,d2, c ='#050505', s=7)
plt.scatter(C_x, C_y, marker='*', s=200, c='g')
#Storing the value of centroids when it updates
C_old = np.zeros(C.shape)
#print(C_old)
clusters = np.zeros(len(X))
#distance between the new centroid and old centroid
Cdist = dist(C,C_old, None)
while Cdist != 0 :
for i in range(len(X)):
print( 'x i is',X[i])
distances = dist(X[i], C)
print(distances)
cluster = np.argmin(distances)
clusters[i] = cluster
#storing the old centroid
C_old = deepcopy(C)
#finding the new centroids by taking the average value
for i in range(k):
points = [X[j] for j in range(len(X)) if clusters[j] == i]
#print(points)
C[i] = np.mean(points, axis=0)
Cdist = dist(C, C_old, None)
colors = ['r','g','b','y','c','m']
fig, ax = plt.subplots()
for i in range(k):
points = np.array([X[j] for j in range(len(X))if clusters[j] == i])
ax.scatter(points[:, 0], points[:,1], s=7, c=colors[i])
ax.scatter(C[:,0], C[:, 1], marker='*', s=200, c='#050505')
| true |
cb74f99d17f6f3e2d592fe812390f6036acfd879
|
Python
|
Elcoss/Python-Curso-em-Video
|
/Mundo1/desafio6.py
|
UTF-8
| 159 | 3.59375 | 4 |
[] |
no_license
|
n1=int(input('digite seu numero: '))
n2= n1*2
n3= n1*3
n4= n1**(1/2)
print(f'o dobro do seu numero e {n2} o triplo e {n3} a raiz quadrada dele e {n4}')
| true |
e1c8441b35d68c6c440ce5d1359a7d254a953005
|
Python
|
ursho/Project-Euler
|
/tests/testFibonacciGenerator.py
|
UTF-8
| 1,217 | 3.625 | 4 |
[] |
no_license
|
import unittest
from problems.FibonacciGenerator import FibonacciGenerator
class TestFibonacciGenerator(unittest.TestCase):
def test_Fibonacci(self):
self.assertEqual(0, fibonacci(1))
self.assertEqual(1, fibonacci(2))
self.assertEqual(1, fibonacci(3))
self.assertEqual(2, fibonacci(4))
self.assertEqual(3, fibonacci(5))
self.assertEqual(5, fibonacci(6))
def test_nextFibonacci(self):
fg = FibonacciGenerator()
self.assertEqual(fibonacci(1), fg.next())
self.assertEqual(fibonacci(2), fg.next())
self.assertEqual(fibonacci(3), fg.next())
self.assertEqual(fibonacci(4), fg.next())
self.assertEqual(fibonacci(5), fg.next())
self.assertEqual(fibonacci(6), fg.next())
def test_fibonacciOverflow(self):
with self.assertRaises(OverflowError):
list(FibonacciGenerator())
def fibonacci(n):
if n <= 0:
print("Incorrect input")
# First Fibonacci number is 0
elif n == 1:
return 0
# Second Fibonacci number is 1
elif n == 2:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
if __name__ == '__main__':
unittest.main()
| true |
3707c418d6dce0abd30f17853a48ef57190a93fd
|
Python
|
j1fig/euler
|
/16/main.py
|
UTF-8
| 230 | 2.609375 | 3 |
[] |
no_license
|
import sys
import cProfile
def brute(arg):
return reduce(lambda x, y: x + int(y), str(2**arg), 0)
if __name__ == "__main__":
arg = int(sys.argv[1])
def main():
print brute(arg)
cProfile.run('main()')
| true |
cc6979eb902a306740989885480d0063a98bc1fd
|
Python
|
SUREYAPRAGAASH09/ArrayQuestions
|
/25.2ndSmallestNumber/2ndSmallestNumber.py
|
UTF-8
| 261 | 3.109375 | 3 |
[] |
no_license
|
import find_min
def secondsmallestNumber(array):
v = find_min.findMin(array)
for i in array:
if v == i:
array.remove(i)
maxi = find_min.findMin(array)
return maxi
array = [3,1,6,9,3]
print(secondsmallestNumber(array))
| true |
dddccee9cd8d45f0702060530c95388c1656c218
|
Python
|
Catxiaobai/project
|
/lxd_Safety(out)/graphTraversal-submit2/mymodules/sclexer.py
|
UTF-8
| 2,844 | 2.671875 | 3 |
[] |
no_license
|
# An lexer for simple C Langrage
import lex
#from ply import *
reserved = (
# 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE',
# 'ELSE', 'ENUM', 'EXTERN', 'FLOAT', 'FOR', 'GOTO', 'IF', 'INT', 'LONG', 'REGISTER',
# 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', 'SWITCH', 'TYPEDEF',
# 'UNION', 'UNSIGNED', 'VOID', 'VOLATILE', 'WHILE',
# 'READ','PRINT','WRITE',
'TRUE','FALSE',
)
tokens = reserved + (
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=)
'EQUALS',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
)
# Completely ignored characters
t_ignore = ' \t\x0c'
# Newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
reserved_map = { }
for r in reserved:
reserved_map[r.lower()] = r
def t_ID(t):
r'[A-Za-z_][\w_]*'
t.type = reserved_map.get(t.value,"ID")
return t
# Integer literal
t_ICONST = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FCONST = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CCONST = r'(L)?\'([^\\\n]|(\\.))*?\''
# # String literal
# t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# #t_STRING = r'\".*?\"'
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
# # Comments
# def t_comment(t):
# r'/\*(.|\n)*?\*/'
# t.lexer.lineno += t.value.count('\n')
def t_error(t):
print "Illegal character", t.value[0]
t.lexer.skip(1)
lex.lex()
| true |
cad792f0c8f6a47486fa4d6fe971ec48089dbe00
|
Python
|
syurskyi/Python_Topics
|
/115_testing/_exercises/_templates/temp/Github/_Level_1/Python_Unittest_Suite-master/Python_Unittest_Patch_Methods.py
|
UTF-8
| 2,885 | 3.46875 | 3 |
[] |
no_license
|
# Python Unittest
# unittest.mock � mock object library
# unittest.mock is a library for testing in Python.
# It allows you to replace parts of your system under test with mock objects and make assertions about how they have been used.
# unittest.mock provides a core Mock class removing the need to create a host of stubs throughout your test suite.
# After performing an action, you can make assertions about which methods / attributes were used and arguments they were called with.
# You can also specify return values and set needed attributes in the normal way.
#
# Additionally, mock provides a patch() decorator that handles patching module and class level attributes within the scope of a test, along with sentinel
# for creating unique objects.
#
# Mock is very easy to use and is designed for use with unittest. Mock is based on the �action -> assertion� pattern instead of �record -> replay� used by
# many mocking frameworks.
#
#
# patch methods: start and stop.
# All the patchers have start() and stop() methods.
# These make it simpler to do patching in setUp methods or where you want to do multiple patches without nesting decorators or with statements.
# To use them call patch(), patch.object() or patch.dict() as normal and keep a reference to the returned patcher object.
# You can then call start() to put the patch in place and stop() to undo it.
# If you are using patch() to create a mock for you then it will be returned by the call to patcher.start.
#
patcher _ patch('package.module.ClassName')
____ package ______ module
original _ module.ClassName
new_mock _ patcher.start()
a.. module.ClassName __ no. original
a.. module.ClassName __ new_mock
patcher.stop()
a.. module.ClassName __ original
a.. module.ClassName __ no. new_mock
#
# A typical use case for this might be for doing multiple patches in the setUp method of a TestCase:
#
c_ MyTest(T..
___ setUp
patcher1 _ patch('package.module.Class1')
patcher2 _ patch('package.module.Class2')
MockClass1 _ patcher1.start()
MockClass2 _ patcher2.start()
___ tearDown
patcher1.stop()
patcher2.stop()
___ test_something
a.. package.module.Class1 __ MockClass1
a.. package.module.Class2 __ MockClass2
MyTest('test_something').run()
#
# Caution:
# If you use this technique you must ensure that the patching is �undone� by calling stop.
# This can be fiddlier than you might think, because if an exception is raised in the setUp then tearDown is not called.
# unittest.TestCase.addCleanup() makes this easier:
#
c_ MyTest(T..
___ setUp
patcher _ patch('package.module.Class')
MockClass _ patcher.start()
addCleanup(patcher.stop)
___ test_something
a.. package.module.Class __ MockClass
| true |
7a6db244d6501882789016473d740863701e660a
|
Python
|
jcmarsh/drseus
|
/scripts/socket_file_server.py
|
UTF-8
| 2,401 | 2.84375 | 3 |
[] |
no_license
|
#!/usr/bin/env python3
from socket import AF_INET, SOCK_STREAM, socket
from threading import Thread
from os import remove
def receive_server():
with socket(AF_INET, SOCK_STREAM) as sock:
sock.bind(('', 60124))
sock.listen(5)
while True:
connection, address = sock.accept()
file_to_receive = connection.recv(4096).decode('utf-8', 'replace')
while '\n' not in file_to_receive:
file_to_receive += connection.recv(4096).decode('utf-8',
'replace')
file_to_receive = file_to_receive.split('\n')[0]
connection.close()
connection, address = sock.accept()
with open(file_to_receive, 'wb') as file_to_receive:
data = connection.recv(4096)
while data:
file_to_receive.write(data)
data = connection.recv(4096)
connection.close()
def send_server():
with socket(AF_INET, SOCK_STREAM) as sock:
sock.bind(('', 60123))
sock.listen(5)
while True:
connection, address = sock.accept()
file_to_send = connection.recv(4096).decode('utf-8', 'replace')
while '\n' not in file_to_send:
file_to_send += connection.recv(4096).decode('utf-8', 'replace')
file_to_send = file_to_send.split('\n')[0]
if ' ' in file_to_send:
args = file_to_send.split(' ')
file_to_send = args[0]
delete = args[1] == '-r'
else:
delete = False
try:
with open(file_to_send, 'rb') as data:
connection.sendall(data.read())
except:
print('socket_file_server.py: could not open file:',
file_to_send)
else:
try:
if delete:
remove(file_to_send)
print('socket_file_server.py: deleted file:',
file_to_send)
except:
print('socket_file_server.py: could not delete file:',
file_to_send)
finally:
connection.close()
Thread(target=receive_server).start()
Thread(target=send_server).start()
| true |
e8669d2f92a66e62d55904b67217aba188e06c20
|
Python
|
kevinqqnj/sudo-dynamic-solve
|
/sudo_recur.py
|
UTF-8
| 18,986 | 3 | 3 |
[
"Apache-2.0"
] |
permissive
|
# coding:utf-8
# python3
# original: u"杨仕航"
# modified: @kevinqqnj
import logging
import numpy as np
from queue import Queue, LifoQueue
import time
import copy
# DEBUG INFO WARNING ERROR CRITICAL
logging.basicConfig(level=logging.WARN,
format='%(asctime)s %(levelname)s %(message)s')
# format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)-7s %(message)s')
logger = logging.getLogger(__name__)
class Record:
point = None # 进行猜测的点
point_index = 0 # 猜测候选列表使用的值的索引
value = None # 回溯记录的值
class Sudo:
def __init__(self, _data):
# 数据初始化(二维的object数组)
self.value = np.array([[0] * 9] * 9, dtype=object) # 数独的值,包括未解决和已解决的
self.new_points = Queue() # 先进先出,新解(已解决值)的坐标
self.record_queue = LifoQueue() # 先进后出,回溯器
self.guess_times = 0 # 猜测次数
self.time_cost = '0' # 猜测time
self.time_start = time.time()
self.guess_record = [] # 记录猜测,用于回放
# 九宫格的基准列表
self.base_points = [[0, 0], [0, 3], [0, 6], [3, 0], [3, 3], [3, 6], [6, 0], [6, 3], [6, 6]]
# 整理数据
self.puzzle = np.array(_data).reshape(9, -1)
for r in range(0, 9):
for c in range(0, 9):
if self.puzzle[r, c]: # if not Zero
# numpy default is int32, convert to int
self.value[r, c] = int(self.puzzle[r, c])
# 新的确认的值添加到列表中,以便遍历
self.new_points.put((r, c))
# logger.debug(f'init: answer={self.value[r, c]} at {(r, c)}')
else: # if Zero, guess no. is 1-9
self.value[r, c] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# self.guess_record.append({'value':eval(f'{self.value.tolist()}'),'desc':f'载入数据'})
# 剔除数字
def _cut_num(self, point):
r, c = point
val = self.value[r, c]
remove_r = remove_c = remove_b = False
value_snap = eval(f'{self.value.tolist()}')
point_snap = value_snap[r][c]
# 行
for i, item in enumerate(self.value[r]):
if isinstance(item, list):
if item.count(val):
item.remove(val)
remove_r = True
# 判断移除后,是否剩下一个元素
if len(item) == 1:
self.new_points.put((r, i)) # 添加坐标到“已解决”列表
logger.debug(f'only one in row: answer={self.value[r, i]} at {(r, i)}')
self.value[r, i] = item[0]
# if remove_r: self.guess_record.append({'value':self.value.tolist(),'desc':'排除同一行已有的数字'})
# 列
for i, item in enumerate(self.value[:, c]):
if isinstance(item, list):
if item.count(val):
item.remove(val)
remove_c = True
# 判断移除后,是否剩下一个元素
if len(item) == 1:
self.new_points.put((i, c))
logger.debug(f'only one in col: answer={self.value[i, c]} at {(i, c)}')
self.value[i, c] = item[0]
# if remove_c: self.guess_record.append({'value':self.value.tolist(),'desc':'排除同一列已有的数字'})
# 所在九宫格(3x3的数组)
b_r, b_c = map(lambda x: x // 3 * 3, point) # 九宫格基准点
for m_r, row in enumerate(self.value[b_r:b_r + 3, b_c:b_c + 3]):
for m_c, item in enumerate(row):
if isinstance(item, list):
if item.count(val):
item.remove(val)
remove_b = True
# 判断移除后,是否剩下一个元素
if len(item) == 1:
r = b_r + m_r
c = b_c + m_c
self.new_points.put((r, c))
logger.debug(f'only one in block: answer={self.value[r, c]} at {(r, c)}')
self.value[r, c] = item[0]
if remove_b or remove_c or remove_r:
self.guess_record.append({
'value': value_snap,
'desc':f'排除同一行、列、九宫格: {point_snap}',
'highlight': point})
# 同一行、列或九宫格中, List里,可能性只有一个的情况
def _check_one_possbile(self):
# 同一行只有一个数字的情况
for r in range(0, 9):
# 只取出是这一行是List的格子
values = list(filter(lambda x: isinstance(x, list), self.value[r]))
for c, item in enumerate(self.value[r]):
if isinstance(item, list):
for value in item:
if sum(map(lambda x: x.count(value), values)) == 1:
self.value[r, c] = value
self.new_points.put((r, c))
logger.debug(f'list val is only one in row: answer={self.value[r, c]} at {(r, c)}')
return True
# 同一列只有一个数字的情况
for c in range(0, 9):
values = list(filter(lambda x: isinstance(x, list), self.value[:, c]))
for r, item in enumerate(self.value[:, c]):
if isinstance(item, list):
for value in item:
if sum(map(lambda x: x.count(value), values)) == 1:
self.value[r, c] = value
self.new_points.put((r, c))
logger.debug(f'list val is only one in col: answer={self.value[r, c]} at {(r, c)}')
return True
# 九宫格内的单元格只有一个数字的情况
for r, c in self.base_points:
# reshape: 3x3 改为1维数组
values = list(filter(lambda x: isinstance(x, list), self.value[r:r + 3, c:c + 3].reshape(1, -1)[0]))
for m_r, row in enumerate(self.value[r:r + 3, c:c + 3]):
for m_c, item in enumerate(row):
if isinstance(item, list):
for value in item:
if sum(map(lambda x: x.count(value), values)) == 1:
self.value[r + m_r, c + m_c] = value
self.new_points.put((r + m_r, c + m_c))
logger.debug(f'list val is only one in block: answer={self.value[r + m_r, c +m_c]} at '
f'{(r + m_r, c +m_c)}')
return True
# 同一个九宫格内数字在同一行或同一列处理(同行列隐性排除)
def _check_implicit(self):
for b_r, b_c in self.base_points:
block = self.value[b_r:b_r + 3, b_c:b_c + 3]
# 判断数字1~9在该九宫格的分布情况
_data = block.reshape(1, -1)[0]
for i in range(1, 10):
result = map(lambda x: 0 if not isinstance(x[1], list) else x[0] + 1 if x[1].count(i) else 0,
enumerate(_data))
result = list(filter(lambda x: x > 0, result))
r_count = len(result)
if r_count in [2, 3]:
# 2或3个元素才有可能同一行或同一列
rows = list(map(lambda x: (x - 1) // 3, result))
cols = list(map(lambda x: (x - 1) % 3, result))
if len(set(rows)) == 1:
# 同一行,去掉其他行的数字
result = list(map(lambda x: b_c + (x - 1) % 3, result))
row = b_r + rows[0]
for col in range(0, 9):
if col not in result:
item = self.value[row, col]
if isinstance(item, list):
if item.count(i):
item.remove(i)
# 判断移除后,是否剩下一个元素
if len(item) == 1:
self.new_points.put((row, col))
logger.debug(
f'block compare row: answer={self.value[row, col]} at {(row, col)}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'九宫格隐性排除row: {i}',
'highlight': (row, col)})
self.value[row, col] = item[0]
return True
elif len(set(cols)) == 1:
# 同一列
result = list(map(lambda x: b_r + (x - 1) // 3, result))
col = b_c + cols[0]
for row in range(0, 9):
if row not in result:
item = self.value[row, col]
if isinstance(item, list):
if item.count(i):
item.remove(i)
# 判断移除后,是否剩下一个元素
if len(item) == 1:
self.new_points.put((row, col))
logger.debug(
f'block compare col: answer={self.value[row, col]} at {(row, col)}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'九宫格隐性排除col: {i}',
'highlight': (row, col)})
self.value[row, col] = item[0]
return True
# 排除法解题
def sudo_exclude(self):
implicit_exist = True
new_point_exist = True
while implicit_exist:
while new_point_exist:
# 剔除数字
while not self.new_points.empty():
point = self.new_points.get() # 先进先出
self._cut_num(point)
# 检查List里值为单个数字的情况,如有新answer则加入new_points Queue,立即_cut_num
new_point_exist = self._check_one_possbile()
# 检查同行或列的情况
implicit_exist = self._check_implicit()
new_point_exist = True
# 得到有多少个确定的数字
def get_num_count(self):
return sum(map(lambda x: 1 if isinstance(x, int) else 0, self.value.reshape(1, -1)[0]))
# 评分,找到最佳的猜测坐标
def get_best_point(self):
best_score = 0
best_point = (0, 0)
for r, row in enumerate(self.value):
for c, item in enumerate(row):
point_score = self._get_point_score((r, c))
if best_score < point_score:
best_score = point_score
best_point = (r, c)
return best_point
# 计算某坐标的评分
def _get_point_score(self, point):
# 评分标准 (10-候选个数) + 同行确定数字个数 + 同列确定数字个数
r, c = point
item = self.value[r, c]
if isinstance(item, list):
score = 10 - len(item)
score += sum(map(lambda x: 1 if isinstance(x, int) else 0, self.value[r]))
score += sum(map(lambda x: 1 if isinstance(x, int) else 0, self.value[:, c]))
return score
else:
return 0
# 验证有没错误
def verify_value(self):
# 行
r = 0
for row in self.value:
nums = []
lists = []
for item in row:
(lists if isinstance(item, list) else nums).append(item)
if len(set(nums)) != len(nums):
# logger.error(f'verify failed. dup in row {r}')
logger.debug(f'verify failed. dup in row {r}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'验证错误 in row: {r+1}'
})
return False # 数字要不重复
if len(list(filter(lambda x: len(x) == 0, lists))):
return False # 候选列表不能为空集
r += 1
# 列
for c in range(0, 9):
nums = []
lists = []
col = self.value[:, c]
for item in col:
(lists if isinstance(item, list) else nums).append(item)
if len(set(nums)) != len(nums):
logger.debug(f'verify failed. dup in col {c}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'验证错误 in col: {c+1}'
})
return False # 数字要不重复
if len(list(filter(lambda x: len(x) == 0, lists))):
return False # 候选列表不能为空集
# 九宫格
for b_r, b_c in self.base_points:
nums = []
lists = []
block = self.value[b_r:b_r + 3, b_c:b_c + 3].reshape(1, -1)[0]
for item in block:
(lists if isinstance(item, list) else nums).append(item)
if len(set(nums)) != len(nums):
logger.debug(f'verify failed. dup in block {b_r, b_c}')
self.guess_record.append({
'value':eval(f'{self.value.tolist()}'),
'desc':f'验证错误 in block: {b_r+1, b_c+1}'
})
return False # 数字要不重复
if len(list(filter(lambda x: len(x) == 0, lists))):
return False # 候选列表不能为空集
return True
def add_to_queue(self, point, index):
record = Record()
record.point = point
record.point_index = index
# recorder.value = self.value.copy() #numpy的copy不行
record.value = copy.deepcopy(self.value)
self.record_queue.put(record)
items = self.value[point]
self.value[point] = items[index]
self.new_points.put(point)
return items
def sudo_solve_iter(self):
# 排除法解题
self.sudo_exclude()
# logger.debug(f'excluded, current result:\n{self.value}')
if self.verify_value():
if self.get_num_count() == 81:
# solve success
self.time_cost = f'{time.time() - self.time_start:.3f}'
self.guess_record.append({
'value':self.value.tolist(),
'desc':f'恭喜你,solved!'
})
return
else:
logger.info(f'current no. of fixed answers: {self.get_num_count()}')
point = self.get_best_point()
index = 0
items = self.add_to_queue(point, index)
logger.info(f'add to LIFO queue and guessing {items[index]}/{items}: '
f'{[x.point for x in self.record_queue.queue]}')
self.guess_times += 1
self.guess_record.append({
'value':self.value.tolist(),
'desc':f'第{self.guess_times}次猜测, 从{items}里选 {items[index]}',
'highlight': point,
'highlight_type': 'assume',
'type': 'assume',
})
return self.sudo_solve_iter()
while True:
if self.record_queue.empty():
# raise Exception('Sudo is wrong, no answer!')
self.time_cost = f'{time.time() - self.time_start:.3f}'
logger.error(f'Guessed {self.guess_times} times. Sudo is wrong, no answer!')
exit()
# check value ERROR, need to try next index or rollback
record = self.record_queue.get()
point = record.point
index = record.point_index + 1
items = record.value[point]
self.value = record.value
logger.info(f'Recall! Pop previous point, {items} @{point}')
# 判断索引是否超出范围
# if not exceed,则再回溯一次
if index < len(items):
items = self.add_to_queue(point, index)
logger.info(f'guessing next index: answer={items[index]}/{items} @{point}')
self.guess_times += 1
self.guess_record.append({
'value':self.value.tolist(),
'desc':f'回溯, 第{self.guess_times}次猜测, 从{items}里选 {items[index]}',
'highlight': point,
'highlight_type': 'assume',
'type': 'assume',
})
return self.sudo_solve_iter()
if __name__ == '__main__':
# 数独题目 http://cn.sudokupuzzle.org/
# data[0]: 号称最难的数独
data = [[8, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 3, 6, 0, 0, 0, 0, 0,
0, 7, 0, 0, 9, 0, 2, 0, 0,
0, 5, 0, 0, 0, 7, 0, 0, 0,
0, 0, 0, 0, 4, 5, 7, 0, 0,
0, 0, 0, 1, 0, 0, 0, 3, 0,
0, 0, 1, 0, 0, 0, 0, 6, 8,
0, 0, 8, 5, 0, 0, 0, 1, 0,
0, 9, 0, 0, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 5, 0, 2, 0, 0,
0, 9, 0, 0, 0, 0, 0, 4, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 4, 0, 6, 0, 8, 0,
0, 0, 7, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 8, 0, 9, 4,
2, 0, 1, 0, 7, 0, 0, 0, 0,
0, 0, 5, 0, 0, 0, 0, 0, 0]]
# try:
t1 = time.time()
puzzle = data[0]
sudo = Sudo(puzzle)
sudo.sudo_solve_iter()
logger.warning(f'Done! guessed {sudo.guess_times} times, in {sudo.time_cost}sec')
logger.warning(f'Puzzle:\n{sudo.puzzle}\nAnswer:\n{sudo.value}')
# except:
# logger.error(f'ERROR: {sudo.value}', exc_info=True)
| true |
c93277bff968a3ad0d5f66d03d54812ead49a4bf
|
Python
|
ajleeson/Self-Driving-Car-Simulation
|
/Algorithm/track.py
|
UTF-8
| 1,811 | 3.46875 | 3 |
[] |
no_license
|
class Track():
"""creates the tracks for all of the cars"""
# makes sure pixel resolution is high
def __init__(self, rows, cols, width, height, timeStep):
self.rows = rows # number of horizontal lanes
self.cols = cols # number of vertical lanes
self.width = width # pixels wides
self.height = height # pixels high
#######################################################
# returns the number of horizontal lanes on the track
def getRows(self):
return self.rows
# returns the number of vertical lanes on the track
def getCols(self):
return self.cols
# returns the width of the track in pixels
def getWidth(self):
return self.width
# returns the height of the track in pixels
def getHeight(self):
return self.height
# returns the number of pixels between each row
def getRowSpacing(self):
rowSpacing = (self.height-self.rows)/(self.rows+1)
return rowSpacing
# returns the number of pixels between each column
def getColSpacing(self):
colSpacing = (self.width-self.cols)/(self.cols+1)
return colSpacing
# returns a list of tuples, with the x and y coordinate of each intersection contained in the tuple
def getIntersections(self):
intersections = []
for i in range(self.rows):
for j in range(self.cols):
# account fot the width of each lane
# determine the coordinate of each intersection
x_intersect = (j+1)*self.getColSpacing() + i
y_intersect = (i+1)*self.getRowSpacing() + j
intersection = [(x_intersect, y_intersect)]
intersections += intersection
return intersections
| true |
5a89d53a45a842faa0f9d05d78b2e45f98841e81
|
Python
|
rizwan2000rm/interview-prep
|
/Python/DS/tuple.py
|
UTF-8
| 381 | 4.4375 | 4 |
[
"MIT"
] |
permissive
|
# Tuples are immutable
print("============ tuples ============")
print()
tuples = (12345, 54321, 'hello!')
print(tuples)
u = tuples, (1, 2, 3, 4, 5)
print(u)
# The statement t = 12345, 54321, 'hello!' is an example of tuple packing:
# the values 12345, 54321 and 'hello!'
# are packed together in a tuple. The reverse operation is also possible
x, y, z = tuples
print(x, y, z)
| true |
83b08e56b1c76fbe0d232cfd74b5e55a6ba091d2
|
Python
|
CashFu/selenium3Fu
|
/seleium3/selenium_lfj/find_element.py
|
UTF-8
| 836 | 2.59375 | 3 |
[] |
no_license
|
#coding=utf-8
from util.read_ini import ReadIni
class FindElement():
def __init__(self,driver):
self.driver = driver
def get_element(self,key):
read_ini = ReadIni()
data_ini = read_ini.get_value(key)
by = data_ini.split('>')[0]
value = data_ini.split('>')[1]
try:
if by =='id':
return self.driver.find_element_by_id(value)
elif by=='name':
return self.driver.find_element_by_name(value)
elif by=='className':
return self.driver.find_element_by_class_name(value)
else:
return self.driver.find_element_by_xpath(value)
except:
self.driver.save_screenshot(r"G:\unittets_lfj\seleium3\Image\%s.png"%value)
return None
| true |
3291ec6e6d7d563367a61be37d23a743077a9ad7
|
Python
|
poweihuang17/practice_leetcode_and_interview
|
/Leetcode/Greedy/757_Set_Intersection_Size_At_Least_Two.py
|
UTF-8
| 1,200 | 3.296875 | 3 |
[] |
no_license
|
class Solution(object):
def intersectionSizeTwo(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort()
filtered_intervals=[]
#print intervals
for interval in intervals:
while filtered_intervals and filtered_intervals[-1][1]>=interval[1]:
filtered_intervals.pop()
filtered_intervals.append(interval)
result=0
p1,p2=float('-inf'),float('-inf')
#print filtered_intervals
for interval in filtered_intervals:
condition1=interval[0]<=p1<=interval[1]
condition2=interval[0]<=p2<=interval[1]
if condition1 and condition2:
continue
elif condition1:
p2=interval[1]
result+=1
elif condition2:
p1=interval[1]
result+=1
else:
p2=interval[1]
p1=p2-1
result+=2
return result
s=Solution()
print s.intersectionSizeTwo([[1, 3], [1, 4], [2, 5], [3, 5]])
print s.intersectionSizeTwo([[1, 2], [2, 3], [2, 4], [4, 5]])
print s.intersectionSizeTwo([[1,24],[10,16],[14,25],[0,18],[16,17]])
| true |
57c24a8a95e689732d67e4281cc5dbcb69a729d3
|
Python
|
git208/AutoFrameRegressionTestF10
|
/common/test_cases_select.py
|
UTF-8
| 3,230 | 2.703125 | 3 |
[] |
no_license
|
import json
import os
from common.yaml_RW import YamlRW
from config.logConfig import LogCustom,logging
from common.parse_excel import ParseExcel
def testCaseSelect(file,file_type='excel',
testcase_matching=None,
sheet_names=None,
isFuzzy=False,
isAll=False):
"""
:param file: 用例所在的路径
:param file_type: 用例所属文件类型,可以是yaml,excel
:param testcase_matching:list 用例匹配关键字列表,isFuzzy为True时,根据列表中关键字模糊匹配用例,isFuzzy为False时,该参数为用例列表
:param isFuzzy: 是否根据关键字列表进行模糊匹配
:param isAll: 为True时执行所有用例
"""
print('开始创建用例执行列表')
ym = YamlRW('../source/testCaseDriver.yaml')
if isAll == False:
if isFuzzy == False:
ym.wightYaml(testcase_matching)
else:
temp_list = []
if file_type.lower() == 'yaml':
for _ in os.listdir('../testCase/yamls'):
__: str
for __ in testcase_matching:
if __ in _:
temp_list.append(_)
elif file_type.lower() == 'excel':
excel = ParseExcel(file)
excel.get_excel_new()
if sheet_names != None:
if type(sheet_names) == list:
for sheet_name in sheet_names:
for _ in excel.excel_data[sheet_name][1].keys:
temp_list.append((sheet_name,_))
else:
for _ in excel.excel_data[sheet_names][1].keys:
temp_list.append((sheet_names, _))
else:
for sheet_name in testcase_matching.keys():
for __ in excel.excel_data[sheet_name][1].keys:
for _ in testcase_matching[sheet_name]:
if __ in _:
temp_list.append((sheet_name, _))
else:
LogCustom().logger().error('文件类型非yaml、excel,创建用例执行列表失败')
ym.wightYaml(temp_list)
else:
if file_type.lower() == 'yaml':
ym.wightYaml(os.listdir('../testCase/yamls'))
elif file_type.lower() == 'excel':
temp_list = []
excel = ParseExcel(file)
excel.get_excel_new()
i = 0
A = excel.excel_data.keys()
for _ in A:
i += 1
j = 0
B = excel.excel_data[_][1].keys()
for __ in B:
j += 1
temp_list.append([_,__])
print(f'总共{len(A)}个接口,当前为第{i}个接口,总共包含{len(B)}条用例,选择至第{j}条用例,数据{(_,__)}')
ym.wightYaml(temp_list)
# if __name__ == '__main__':
# testCaseSelect(FILE,isAll=True)
# with open('../source/testCaseDriver.json', mode='r', encoding='utf-8') as a:
# print(a.read())
| true |
6b916309205853e112e1ce746da8af660b2ea869
|
Python
|
francosbenitez/unsam
|
/04-listas-y-listas/01-debugger/debugger.py
|
UTF-8
| 1,073 | 4.28125 | 4 |
[] |
no_license
|
"""
Ejercicio 4.1: Debugger
Ingresá y corré el siguiente código en tu IDE:
def invertir_lista(lista):
'''Recibe una lista L y la develve invertida.'''
invertida = []
i=len(lista)
while i > 0: # tomo el último elemento
i=i-1
invertida.append (lista.pop(i)) #
return invertida
l = [1, 2, 3, 4, 5]
m = invertir_lista(l)
print(f'Entrada {l}, Salida: {m}')
Deberías observar que la función modifica el valor de la lista de entrada. Eso no debería ocurrir: una función nunca debería modificar los parámetros salvo que sea lo esperado. Usá el debugger y el explorador de variables para determinar cuál es el primer paso clave en el que se modifica el valor de esta variable.
"""
def invertir_lista(lista):
'''Recibe una lista L y la develve invertida.'''
invertida = []
i=len(lista)
while i > 0: # tomo el último elemento
i=i-1
invertida.append(lista[i]) # la función pop quita
return invertida
l = [1, 2, 3, 4, 5]
m = invertir_lista(l)
print(f'Entrada {l}, Salida: {m}')
| true |
1493757adaaa0918e76b211c85051a989bd94c95
|
Python
|
pdekeulenaer/sennai
|
/simulation.py
|
UTF-8
| 1,035 | 3.03125 | 3 |
[] |
no_license
|
import game, population
import random
# config parameters
# Population
n_cars = 10
start = (50,50)
# Brain
layers = 10
neurons = 20
# evolution
mutation_rate = 0.10
parents_to_keep = 0.33
# generate the brains
# brains = []
# for i in range(0, n_cars):
# seed = random.random()
# brains += [population.NeuronBrain(1,1,layers,neurons, seed)]
# print seed
brains = [population.NeuronBrain(5,2,layers,neurons, random.random()) for i in range(0,n_cars)]
cars = [population.CarSpecimen(start[0],start[1], brain=brains[i], name="Car {0}".format(i)) for i in range(0,n_cars)]
# # # nparents to keep
# for b in brains:
# print b.dimension()
# parents = cars[1:int(n_cars * parents_to_keep)]
# parent_brains = [x.brain for x in parents]
# mutation_func = lambda l: population.mutate(l, mutation_rate, 0.5)
# nbrains = population.breed(parent_brains, n_cars, mutation_func, True)
# print nbrains
# print [x in nbrains for x in parent_brains]
# create the application
# print cars
app = game.App(players=cars)
app.on_execute()
| true |
59acb29b1e14e36b1c69230bfc320e122295e66f
|
Python
|
jeremyperthuis/UVSQ_BioInformatique
|
/td2/pgm8.py
|
UTF-8
| 241 | 3.609375 | 4 |
[] |
no_license
|
sq1 = raw_input("inserer une sequence ADN :")
i=0
n=len(sq1)-1
x=0
while i<n :
if sq1[i]==sq1[n] :
x=x+1
i=i+1
if x == (len(sq1)-1)/2 :
print "cette sequence est un palindrome"
else :
print"cette sequence n'est pas un palindrome"
| true |
1b276b69af3d8b7c304ffbfee9d891bb2a5fc6c7
|
Python
|
wadimiusz/hseling-repo-diachrony-webvectors
|
/hseling_lib_diachrony_webvectors/hseling_lib_diachrony_webvectors/algos/global_anchors.py
|
UTF-8
| 2,582 | 3.140625 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
import gensim
import numpy as np
import copy
from tqdm.auto import tqdm
from utils import log, intersection_align_gensim
from gensim.matutils import unitvec
class GlobalAnchors(object):
def __init__(self, w2v1, w2v2, assume_vocabs_are_identical=False):
if not assume_vocabs_are_identical:
w2v1, w2v2 = intersection_align_gensim(copy.copy(w2v1),
copy.copy(w2v2))
self.w2v1 = w2v1
self.w2v2 = w2v2
def __repr__(self):
return "GlobalAnchors"
def get_global_anchors(self, word: str, w2v: gensim.models.KeyedVectors):
"""
This takes in a word and a KeyedVectors model and returns a vector of cosine distances
between this word and each word in the vocab.
:param word:
:param w2v:
:return: np.array of distances shaped (len(w2v.vocab),)
"""
word_vector = w2v.get_vector(word)
similarities = gensim.models.KeyedVectors.cosine_similarities(word_vector, w2v.vectors)
return unitvec(similarities)
def get_score(self, word: str):
w2v1_anchors = self.get_global_anchors(word, self.w2v1)
w2v2_anchors = self.get_global_anchors(word, self.w2v2)
score = np.dot(w2v1_anchors, w2v2_anchors)
return score
def get_changes(self, top_n_changed_words: int):
"""
This method uses approach described in
Yin, Zi, Vin Sachidananda, and Balaji Prabhakar.
"The global anchor method for quantifying linguistic shifts and
domain adaptation." Advances in Neural Information Processing Systems. 2018.
It can be described as follows. To evaluate how much the meaning of a given word differs in
two given corpora, we take cosine distance from the given word to all words
in the vocabulary; those values make up a vector with as many components as there are words
in the vocab. We do it for both corpora and then compute the cosine distance
between those two vectors
:param top_n_changed_words: we will output n words that differ the most in the given corpora
:return: list of pairs (word, score), where score indicates how much a word has changed
"""
log('Doing global anchors')
result = list()
for word in tqdm(self.w2v1.wv.vocab.keys()):
score = self.get_score(word)
result.append((word, score))
result = sorted(result, key=lambda x: x[1])
result = result[:top_n_changed_words]
log('\nDone')
return result
| true |
7bde88600f52f45f9e8b1f99707aa6a01e719b72
|
Python
|
PAVANANUTHALAPATI/python-
|
/range.py
|
UTF-8
| 88 | 3.296875 | 3 |
[] |
no_license
|
pav=int(raw_input())
if pav in range (1,10):
print("yes")
else:
print("no")
| true |
97b0a68ee463f34a5ef2d2d429dad41b49121f51
|
Python
|
GPUOpen-Drivers/llpc
|
/script/gc-amdvlk-docker-images.py
|
UTF-8
| 4,235 | 2.8125 | 3 |
[
"MIT",
"Apache-2.0",
"NCSA"
] |
permissive
|
#! /usr/bin/env python3
"""Script to garbage collect old amdvlk docker images created by the public CI on GitHub.
Requires python 3.8 or later.
"""
import argparse
import json
import logging
import subprocess
import sys
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple
def _run_cmd(cmd: List[str]) -> Tuple[bool, str]:
"""
Runs a shell command capturing its output.
Args:
cmd: List of strings to invoke as a subprocess
Returns:
Tuple: success, stdout. The first value is True on success.
"""
logging.info('Running command: %s', ' '.join(cmd))
result = subprocess.run(cmd, capture_output=True, check=False, text=True)
if result.returncode != 0:
logging.info('%s', result.stderr)
return False, ''
return True, result.stdout
def query_images(artifact_repository_url: str) -> Optional[List[Dict[str, Any]]]:
"""
Returns a list of JSON objects representing docker images found under
|artifact_repository_url|, or None on error.
Sample JSON object:
{
"createTime": "2022-07-11T20:20:23.577823Z",
"package": "us-docker.pkg.dev/stadia-open-source/amdvlk-public-ci/amdvlk_release_gcc_assertions",
"tags": "",
"updateTime": "2022-07-11T20:20:23.577823Z",
"version": "sha256:e101b6336fa78014e4008df59667dd84616dc8d1b60c2240f3246ab9a1ed6b20"
}
"""
ok, text = _run_cmd(['gcloud', 'artifacts', 'docker', 'images', 'list',
artifact_repository_url, '--format=json', '--quiet'])
if not ok:
return None
return list(json.loads(text))
def find_images_to_gc(images: List[Dict[str, Any]], num_last_to_keep) -> List[Dict[str, Any]]:
"""
Returns a subset of |images| that should be garbage collected. Preserves tagged
images and also the most recent |num_last_to_keep| for each package.
"""
package_to_images = defaultdict(list)
for image in images:
package_to_images[image['package']].append(image)
to_gc = []
for _, images in package_to_images.items():
# Because the time format is ISO 8601, the lexicographic order is also chronological.
images.sort(key=lambda x: x['createTime'])
for image in images[:-num_last_to_keep]:
if not image['tags']:
to_gc.append(image)
return to_gc
def delete_images(images: List[Dict[str, Any]], dry_run: bool) -> None:
"""
Deletes all |images| from the repository. When |dry_run| is True, synthesizes the delete
commands and logs but does not execute them.
"""
for image in images:
image_path = image['package'] + '@' + image['version']
cmd = ['gcloud', 'artifacts', 'docker', 'images', 'delete',
image_path, '--quiet']
if dry_run:
logging.info('Dry run: %s', ' '.join(cmd))
continue
ok, _ = _run_cmd(cmd)
if not ok:
logging.warning('Failed to delete image:\n%s', image)
def main() -> int:
logging.basicConfig(
format='%(levelname)s %(asctime)s %(filename)s:%(lineno)d %(message)s',
level=logging.INFO)
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help='Do not delete or modify any docker images (default: %(default)s).')
parser.add_argument(
'--keep-last',
default=8,
help='The number of the most recent images to keep for each package (default: %(default)s).')
parser.add_argument(
'--repository-url',
default='us-docker.pkg.dev/stadia-open-source/amdvlk-public-ci',
help='The repository with docker images to garbage collect (default: %(default)s).'
)
args = parser.parse_args()
all_images = query_images(args.repository_url)
if all_images is None:
logging.error('Failed to list docker images under \'%s\'', args.repository_url)
return 1
to_gc = find_images_to_gc(all_images, args.keep_last)
delete_images(to_gc, args.dry_run)
return 0
if __name__ == '__main__':
sys.exit(main())
| true |
d710863243bb183e1be2960d5b8fc0b1602a7756
|
Python
|
Dhirajpatel121/IPL-Predictive-Analytics
|
/IPL/MIvsCSK.py
|
UTF-8
| 7,233 | 2.8125 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings; warnings.simplefilter('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
deliveries = pd.read_csv('C:/Users/SONY/Desktop/IPL/deliveries.csv')
deliveries
# In[3]:
matches = pd.read_csv('C:/Users/SONY/Desktop/IPL/matches.csv')
matches
# In[4]:
#### Records of MI vs CSK (matches dataset)
micsk =matches[np.logical_or(np.logical_and(matches['team1']=='Mumbai Indians',matches['team2']=='Chennai Super Kings'),np.logical_and(matches['team2']=='Mumbai Indians',matches['team1']=='Chennai Super Kings'))]
micsk
# In[5]:
# Head to head MI vs CSK across all season's
sns.set(style='dark')
fig=plt.gcf()
fig.set_size_inches(8,5.2)
sns.countplot(micsk['winner'],order=micsk['winner'].value_counts().index)
plt.text(-0.1,5,str(micsk['winner'].value_counts()['Mumbai Indians']),size=29,color='black')
plt.text(0.9,3,str(micsk['winner'].value_counts()['Chennai Super Kings']),size=29,color='black')
plt.xlabel('Winner',fontsize=15)
plt.ylabel('Count',fontsize=15)
plt.yticks(fontsize=0)
plt.title('MI vs CSK - head to head')
plt.show()
# In[6]:
#H2H previous 2 season's
df_season_record =micsk[micsk['season'] >=2018]
df_season_record_df = df_season_record[['season','winner','id']]
df_season_record_df
# In[7]:
##### Head to head mi vs csk last 2 season's
sns.set(style='dark')
fig=plt.gcf()
fig.set_size_inches(10,8)
sns.countplot(df_season_record_df['winner'],order=df_season_record_df['winner'].value_counts().index)
plt.text(-0.1,2.5,str(df_season_record_df['winner'].value_counts()['Mumbai Indians']),size=29,color='black')
plt.text(0.95,0.5,str(df_season_record_df['winner'].value_counts()['Chennai Super Kings']),size=29,color='black')
plt.xlabel('Winner',fontsize=15)
plt.ylabel('Count',fontsize=15)
plt.yticks(fontsize=0)
plt.title('MI vs CSK - head to head last 2 season')
plt.show()
# # Looking at previous 2 season's record(recent form) & overall season's record in head to head,MI have dominated CSK.
#
# # Hence according to the recent form and analysis MI will win today's match.
# In[8]:
#For deliveries dataset
cskmi=deliveries[np.logical_or(np.logical_and(deliveries['batting_team'] == 'Mumbai Indians',deliveries['bowling_team']== 'Chennai Super Kings'),np.logical_and(deliveries['bowling_team']=='Mumbai Indians',deliveries['batting_team']=='Chennai Super Kings'))]
cskmi
# In[9]:
# Previous 2 season's records of deliveries dataset (filtered with the help of match_id)
cskmi_2season = cskmi[cskmi['match_id'] >= 7894]
cskmi_2season
# # De kock
# In[10]:
## Records of Q de kock against CSK in first 10 balls (played only 1 season with MI & 4 matches against CSK)
def dekock(matchid):
df_dekock = cskmi_2season[cskmi_2season.batsman == 'Q de Kock']
x = df_dekock[df_dekock['match_id'] == matchid]
y = x[['match_id','batsman_runs']].head(10)
z = y[y.batsman_runs >= 4 ]
print(z['batsman_runs'].sum())
# In[11]:
# 1st match against csk in 2019;total boundary runs=4
dekock(11151)
# In[12]:
# 2nd match against csk in 2019;total boundary runs=10
dekock(11335)
# In[13]:
# 3rd match against csk in 2019;total boundary runs=8
dekock(11412)
# ## Looking at last 3 matches of qdk against csk in twice of those matches he has scored less than 10 runs in boundaries in first 10 balls.
# ## Hence, according to analysis & prediction today qdk will score total runs of boundaries in range less than 10 runs in first 10 balls.
# # Dot balls ratio
# In[15]:
# dot balls
def dotballs(bowler_name):
df_bowler = cskmi_2season[cskmi_2season.bowler == bowler_name]
total_dot = df_bowler[df_bowler['total_runs'] == 0]
dots_bowler = total_dot['total_runs'].count()
total_balls=df_bowler['ball'].count() - df_bowler[df_bowler.wide_runs >= 1].count() - df_bowler[df_bowler.noball_runs >= 1].count()
total_balls_df = total_balls['ball']
print((dots_bowler/total_balls_df).round(3)*100)
# In[16]:
# Chahar dot balls ratio
print("Rahul Chahar dot balls in % :")
dotballs('RD Chahar')
# In[17]:
# bumrah dot balls ratio
print("Jasprit Bumrah dot balls in % :")
dotballs('JJ Bumrah')
# In[18]:
# hardik dotball ratio
print("Hardik pandya dot balls in % :")
dotballs('HH Pandya')
# In[19]:
# krunal dot ball ratio
print("Krunal Pandya dot balls in % :")
dotballs('KH Pandya')
# In[20]:
## For boult dot ball ratio
csk = deliveries[deliveries.batting_team == 'Chennai Super Kings']
boult_against_csk = csk[csk.bowler == 'TA Boult']
dotballs = boult_against_csk[boult_against_csk['total_runs'] == 0].count()
final_dotballs = dotballs['total_runs']
total_balls = boult_against_csk['ball'].count() - boult_against_csk[boult_against_csk.wide_runs >= 1].count() - boult_against_csk[boult_against_csk.noball_runs >= 1].count()
dfx = (final_dotballs/total_balls)*100
print("Boult dot balls ratio against csk in % =",dfx['ball'].round())
# ## Dot balls ratio of two highest performers have been rahul chahar and krunal.
# ## However in current season chahar has bowled more dot balls and has better economy than krunal
# ## Hence,according to current form and analysis Rahul chahar will have highest dot ball ratio among Mumbai Bowlers.
# # BLS
# In[21]:
## BLS
def BLS(bowlername):
record = cskmi_2season[cskmi_2season.bowler == bowlername]
record_wickets = record['dismissal_kind'].count()
avg_wickets = record_wickets/cskmi_2season['match_id'].nunique()
total_dot = record[record['total_runs'] == 0]
avg_dots_bowler = total_dot['total_runs'].count()/cskmi_2season['match_id'].nunique()
total_balls= record['ball'].count() - record[record.wide_runs >= 1].count() - record[record.noball_runs >= 1].count()
total_balls_df = total_balls['ball'] /cskmi_2season['match_id'].nunique()
total_boundaries = record[record.batsman_runs >= 4]
total_boundaries_final =total_boundaries['batsman_runs'].count()
total_boundaries_runs = total_boundaries['batsman_runs'].sum()
final = (avg_wickets + avg_dots_bowler - (total_boundaries_runs/total_boundaries_final))/total_balls_df
print('BLS score =' ,final.round(3))
# In[22]:
print("1. Bumrah")
BLS('JJ Bumrah')
print("2. Rahul chahar")
BLS('RD Chahar')
print("3. Krunal pandya")
BLS('KH Pandya')
print("4. Tahir")
BLS('Imran Tahir')
print("5. Deepak chahar")
BLS('DL Chahar')
print("6. SN Thakur")
BLS('SN Thakur')
print("7. HH Pandya")
BLS('HH Pandya')
print("RA Jadeja")
BLS('RA Jadeja')
# ## The BLS score has been highest for deepak chahar.
# ## Hence, according to analysis deepak chahar will have highest BLS score.
# In[27]:
## Looking for last 3 matches 4 & 6 in same over
def match(matchid):
df = cskmi_2season[cskmi_2season.match_id == matchid]
dfx = df[df.batsman_runs >= 4]
dataframe = pd.DataFrame(dfx.groupby(['match_id','over','ball','inning']).sum()['batsman_runs'])
print(dataframe)
# In[24]:
match(11335)
# In[25]:
match(11412)
# In[26]:
match(11415)
# ## Looking at last 3 matches record & recent season record(2020) an average of 5-6 overs have happened at sharjah where 4 & 6 have been scored in same over
# ## Hence according to analysis 5-6 overs is the answer.
| true |
3541096c6c8edd5bcc12e74e32dadbffe14fcc02
|
Python
|
helsinkithinkcompany/wide
|
/FennicaTrends/serious-spin-master/data/python/countRelativeWeights.py
|
UTF-8
| 1,828 | 2.890625 | 3 |
[
"MIT"
] |
permissive
|
import json, sys
from math import pow
# FILE HANDLING #
def writeJsonToFile(json_data, file_path):
try:
with open(file_path, 'w') as outfile:
json.dump(json_data, outfile)
return True
except Exception as e:
print(e)
print('Failed to dump json to file ' + file_path)
return False
def getJsonFromFile(file_path):
try:
with open(file_path) as infile:
json_data = json.load(infile)
return json_data
except Exception as e:
print(e)
print('Failed to get json from file ' + file_path)
return False
if len(sys.argv) < 2:
print("Usage: %s fennica-all.json"%sys.argv[0])
sys.exit()
fennica_all = getJsonFromFile(sys.argv[1])
PATH_TO_FENNICA_ALL_JSON_FILE = './fennica-graph.json'
# DATA HANDLING #
def countMagicValue(this, mean, max):
if int(this) - int(mean) == 0:
return 50
elif int(this) < int(mean):
diff = 1 + (int(mean) - int(this)) / mean
return int(50 - 50 * (1 - 1 / diff))
elif int(this) > int(mean):
diff = 1 + (int(this) - int(mean))/ (max - mean)
return int(50 + 50 * (1 - 1 / diff))
else:
return 50
def getMeanAndMaxOfYear(json_data, year):
sum = 0
count = 0
max = 0
for word in json_data[year]:
count = count + 1
sum = sum + json_data[year][word]
if max < json_data[year][word]:
max = json_data[year][word]
return float(sum)/float(count), float(max)
def changeWordWeightsToRelativeOfMeanByYear(json_data, year):
mean, max = getMeanAndMaxOfYear(json_data, year)
for word in json_data[year]:
json_data[year][word] = countMagicValue(float(json_data[year][word]), mean, max)
def changeWordWeightsToRelative(json_data):
for year in json_data:
changeWordWeightsToRelativeOfMeanByYear(json_data, year)
return json_data
fennica_all_relative = changeWordWeightsToRelative(fennica_all)
writeJsonToFile(fennica_all_relative, 'fennica-graph.json')
| true |
66f5a6d7bef3707c974e3210da2db94f6e393a4a
|
Python
|
schuCS50/CS33a
|
/finalproject/games/cards/models.py
|
UTF-8
| 4,162 | 2.703125 | 3 |
[] |
no_license
|
from django.contrib.auth.models import AbstractUser
from django.db import models
# Extended user class
class User(AbstractUser):
def __str__(self):
return f"User {self.id}: {self.username}"
# Two Player Game extendable
class TwoPlayerGame(models.Model):
player1 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="p1_games")
player2 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="p2_games")
createdTimestamp = models.DateTimeField(auto_now_add=True)
updatedTimestamp = models.DateTimeField(auto_now=True)
winner = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="won_games",
blank=True,
null=True)
class Meta:
abstract = True
#TicTacToe Game
class TicTacToe(TwoPlayerGame):
cell1 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell1",
blank=True,
null=True)
cell2 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell2",
blank=True,
null=True)
cell3 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell3",
blank=True,
null=True)
cell4 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell4",
blank=True,
null=True)
cell5 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell5",
blank=True,
null=True)
cell6 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell6",
blank=True,
null=True)
cell7 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell7",
blank=True,
null=True)
cell8 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell8",
blank=True,
null=True)
cell9 = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="cell9",
blank=True,
null=True)
# Function to return formatted output
def serialize(self):
return {
"player1": self.player1.username,
"player2": self.player2.username,
"createdTimestamp": self.createdTimestamp.strftime(
"%b %d %Y, %I:%M %p"),
"updatedTimestamp": self.updatedTimestamp.strftime(
"%b %d %Y, %I:%M %p"),
"winner": self.winner.username if self.winner else None,
"cells": [self.cell1.username if self.cell1 else None,
self.cell2.username if self.cell2 else None,
self.cell3.username if self.cell3 else None,
self.cell4.username if self.cell4 else None,
self.cell5.username if self.cell5 else None,
self.cell6.username if self.cell6 else None,
self.cell7.username if self.cell7 else None,
self.cell8.username if self.cell8 else None,
self.cell9.username if self.cell9 else None]
}
| true |
20ed11ef0f52d20c8f5abfc8c2e88cd5aa19a6d4
|
Python
|
alaalial/relancer-artifact
|
/relancer-exp/original_notebooks/pavansubhasht_ibm-hr-analytics-attrition-dataset/imbalanceddata-predictivemodelling-by-ibm-dataset.py
|
UTF-8
| 19,857 | 3.390625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
# # IBM HR Employee Attrition & Performance.
# ## [Please star/upvote in case you find it helpful.]
# In[ ]:
from IPython.display import Image
Image("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/imagesibm/image-logo.png")
# ## CONTENTS ::->
# [ **1 ) Exploratory Data Analysis**](#content1)
# [ **2) Corelation b/w Features**](#content2)
# [** 3) Feature Selection**](#content3)
# [** 4) Preparing Dataset**](#content4)
# [ **5) Modelling**](#content5)
#
# Note that this notebook uses traditional ML algorithms. I have another notebook in which I have used an ANN on the same dataset. To check it out please follow the below link-->
#
# https://www.kaggle.com/rajmehra03/an-introduction-to-ann-keras-with-ibm-hr-dataset/
# [ **6) Conclusions**](#content6)
# <a id="content1"></a>
# ## 1 ) Exploratory Data Analysis
# ## 1.1 ) Importing Various Modules
# In[ ]:
# Ignore the warnings
import warnings
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
# data visualisation and manipulation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
import seaborn as sns
import missingno as msno
#configure
# sets matplotlib to inline and displays graphs below the corressponding cell.
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
#import the necessary modelling algos.
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
#model selection
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score,precision_score,recall_score,confusion_matrix,roc_curve,roc_auc_score
from sklearn.model_selection import GridSearchCV
from imblearn.over_sampling import SMOTE
#preprocess.
from sklearn.preprocessing import MinMaxScaler,StandardScaler,Imputer,LabelEncoder,OneHotEncoder
# ## 1.2 ) Reading the data from a CSV file
# In[ ]:
df=pd.read_csv(r"../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv")
# In[ ]:
df.head()
# In[ ]:
df.shape
# In[ ]:
df.columns
# ## 1.3 ) Missing Values Treatment
# In[ ]:
df.info() # no null or 'Nan' values.
# In[ ]:
df.isnull().sum()
# In[ ]:
msno.matrix(df) # just to visualize. one final time.
# ## 1.4 ) The Features and the 'Target'
# In[ ]:
df.columns
# In[ ]:
df.head()
# In all we have 34 features consisting of both the categorical as well as the numerical features. The target variable is the
# 'Attrition' of the employee which can be either a Yes or a No. This is what we have to predict.
# **Hence this is a Binary Classification problem. **
# ## 1.5 ) Univariate Analysis
# In this section I have done the univariate analysis i.e. I have analysed the range or distribution of the values that various features take. To better analyze the results I have plotted various graphs and visualizations wherever necessary. Univariate analysis helps us identify the outliers in the data.
# In[ ]:
df.describe()
# Let us first analyze the various numeric features. To do this we can actually plot a boxplot showing all the numeric features. Also the distplot or a histogram is a reasonable choice in such cases.
# In[ ]:
sns.factorplot(data=df,kind='box',size=10,aspect=3)
# Note that all the features have pretty different scales and so plotting a boxplot is not a good idea. Instead what we can do is plot histograms of various continuously distributed features.
#
# We can also plot a kdeplot showing the distribution of the feature. Below I have plotted a kdeplot for the 'Age' feature.
# Similarly we plot for other numeric features also. Similarly we can also use a distplot from seaborn library which combines most..
# In[ ]:
sns.kdeplot(df['Age'],shade=True,color='#ff4125')
# In[ ]:
sns.distplot(df['Age'])
# Similarly we can do this for all the numerical features. Below I have plotted the subplots for the other features.
# In[ ]:
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
fig,ax = plt.subplots(5,2, figsize=(9,9))
sns.distplot(df['TotalWorkingYears'], ax = ax[0,0])
sns.distplot(df['MonthlyIncome'], ax = ax[0,1])
sns.distplot(df['YearsAtCompany'], ax = ax[1,0])
sns.distplot(df['DistanceFromHome'], ax = ax[1,1])
sns.distplot(df['YearsInCurrentRole'], ax = ax[2,0])
sns.distplot(df['YearsWithCurrManager'], ax = ax[2,1])
sns.distplot(df['YearsSinceLastPromotion'], ax = ax[3,0])
sns.distplot(df['PercentSalaryHike'], ax = ax[3,1])
sns.distplot(df['YearsSinceLastPromotion'], ax = ax[4,0])
sns.distplot(df['TrainingTimesLastYear'], ax = ax[4,1])
plt.tight_layout()
print()
# Let us now analyze the various categorical features. Note that in these cases the best way is to use a count plot to show the relative count of observations of different categories.
# In[ ]:
cat_df=df.select_dtypes(include='object')
# In[ ]:
cat_df.columns
# In[ ]:
def plot_cat(attr,labels=None):
if(attr=='JobRole'):
sns.factorplot(data=df,kind='count',size=5,aspect=3,x=attr)
return
sns.factorplot(data=df,kind='count',size=5,aspect=1.5,x=attr)
# I have made a function that accepts the name of a string. In our case this string will be the name of the column or attribute which we want to analyze. The function then plots the countplot for that feature which makes it easier to visualize.
# In[ ]:
plot_cat('Attrition')
# **Note that the number of observations belonging to the 'No' category is way greater than that belonging to 'Yes' category. Hence we have skewed classes and this is a typical example of the 'Imbalanced Classification Problem'. To handle such types of problems we need to use the over-sampling or under-sampling techniques. I shall come back to this point later.**
# **Let us now similalry analyze other categorical features.**
# In[ ]:
plot_cat('BusinessTravel')
# The above plot clearly shows that most of the people belong to the 'Travel_Rarely' class. This indicates that most of the people did not have a job which asked them for frequent travelling.
# In[ ]:
plot_cat('OverTime')
# In[ ]:
plot_cat('Department')
# In[ ]:
plot_cat('EducationField')
# In[ ]:
plot_cat('Gender')
# Note that males are present in higher number.
# In[ ]:
plot_cat('JobRole')
# ** Similarly we can continue for other categorical features. **
# **Note that the same function can also be used to better analyze the numeric discrete features like 'Education','JobSatisfaction' etc...
# In[ ]:
# just uncomment the following cell.
# In[ ]:
# num_disc=['Education','EnvironmentSatisfaction','JobInvolvement','JobSatisfaction','WorkLifeBalance','RelationshipSatisfaction','PerformanceRating']
# for i in num_disc:
# plot_cat(i)
# similarly we can intrepret these graphs.
# <a id="content2"></a>
# ## 2 ) Corelation b/w Features
#
# In[ ]:
#corelation matrix.
cor_mat= df.corr()
mask = np.array(cor_mat)
mask[np.tril_indices_from(mask)] = False
fig=plt.gcf()
fig.set_size_inches(30,12)
# ###### SOME INFERENCES FROM THE ABOVE HEATMAP
#
# 1. Self relation ie of a feature to itself is equal to 1 as expected.
#
# 2. JobLevel is highly related to Age as expected as aged employees will generally tend to occupy higher positions in the company.
#
# 3. MonthlyIncome is very strongly related to joblevel as expected as senior employees will definately earn more.
#
# 4. PerformanceRating is highly related to PercentSalaryHike which is quite obvious.
#
# 5. Also note that TotalWorkingYears is highly related to JobLevel which is expected as senior employees must have worked for a larger span of time.
#
# 6. YearsWithCurrManager is highly related to YearsAtCompany.
#
# 7. YearsAtCompany is related to YearsInCurrentRole.
#
#
# **Note that we can drop some highly corelated features as they add redundancy to the model but since the corelation is very less in genral let us keep all the features for now. In case of highly corelated features we can use something like Principal Component Analysis(PCA) to reduce our feature space.**
# In[ ]:
df.columns
# <a id="content3"></a>
# ## 3 ) Feature Selection
#
# ## 3.1 ) Plotting the Features against the 'Target' variable.
# #### 3.1.1 ) Age
# Note that Age is a continuous quantity and therefore we can plot it against the Attrition using a boxplot.
# In[ ]:
sns.factorplot(data=df,y='Age',x='Attrition',size=5,aspect=1,kind='box')
# Note that the median as well the maximum age of the peole with 'No' attrition is higher than that of the 'Yes' category. This shows that people with higher age have lesser tendency to leave the organisation which makes sense as they may have settled in the organisation.
# #### 3.1.2 ) Department
# Note that both Attrition(Target) as well as the Deaprtment are categorical. In such cases a cross-tabulation is the most reasonable way to analyze the trends; which shows clearly the number of observaftions for each class which makes it easier to analyze the results.
# In[ ]:
df.Department.value_counts()
# In[ ]:
sns.factorplot(data=df,kind='count',x='Attrition',col='Department')
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.Department],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Note that most of the observations corresspond to 'No' as we saw previously also. About 81 % of the people in HR dont want to leave the organisation and only 19 % want to leave. Similar conclusions can be drawn for other departments too from the above cross-tabulation.
# #### 3.1.3 ) Gender
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.Gender],margins=True,normalize='index') # set normalize=index to view rowwise %.
# About 85 % of females want to stay in the organisation while only 15 % want to leave the organisation. All in all 83 % of employees want to be in the organisation with only being 16% wanting to leave the organisation or the company.
# #### 3.1.4 ) Job Level
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobLevel],margins=True,normalize='index') # set normalize=index to view rowwise %.
# People in Joblevel 4 have a very high percent for a 'No' and a low percent for a 'Yes'. Similar inferences can be made for other job levels.
# #### 3.1.5 ) Monthly Income
# In[ ]:
sns.factorplot(data=df,kind='bar',x='Attrition',y='MonthlyIncome')
# Note that the average income for 'No' class is quite higher and it is obvious as those earning well will certainly not be willing to exit the organisation. Similarly those employees who are probably not earning well will certainly want to change the company.
# #### 3.1.6 ) Job Satisfaction
# In[ ]:
sns.factorplot(data=df,kind='count',x='Attrition',col='JobSatisfaction')
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobSatisfaction],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Note this shows an interesting trend. Note that for higher values of job satisfaction( ie more a person is satisfied with his job) lesser percent of them say a 'Yes' which is quite obvious as highly contented workers will obvioulsy not like to leave the organisation.
# #### 3.1.7 ) Environment Satisfaction
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.EnvironmentSatisfaction],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Again we can notice that the relative percent of 'No' in people with higher grade of environment satisfacftion which is expected.
# #### 3.1.8 ) Job Involvement
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.JobInvolvement],margins=True,normalize='index') # set normalize=index to view rowwise %.
# #### 3.1.9 ) Work Life Balance
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.WorkLifeBalance],margins=True,normalize='index') # set normalize=index to view rowwise %.
# Again we notice a similar trend as people with better work life balance dont want to leave the organisation.
# #### 3.1.10 ) RelationshipSatisfaction
# In[ ]:
pd.crosstab(columns=[df.Attrition],index=[df.RelationshipSatisfaction],margins=True,normalize='index') # set normalize=index to view rowwise %.
# ###### Notice that I have plotted just some of the important features against out 'Target' variable i.e. Attrition in our case. Similarly we can plot other features against the 'Target' variable and analye the trends i.e. how the feature effects the 'Target' variable.
# ## 3.2 ) Feature Selection
# The feature Selection is one of the main steps of the preprocessing phase as the features which we choose directly effects the model performance. While some of the features seem to be less useful in terms of the context; others seem to equally useful. The better features we use the better our model will perform. **After all Garbage in Garbage out;)**.
#
# We can also use the Recusrive Feature Elimination technique (a wrapper method) to choose the desired number of most important features.
# The Recursive Feature Elimination (or RFE) works by recursively removing attributes and building a model on those attributes that remain.
#
# It uses the model accuracy to identify which attributes (and/or combination of attributes) contribute the most to predicting the target attribute.
#
# We can use it directly from the scikit library by importing the RFE module or function provided by the scikit. But note that since it tries different combinations or the subset of features;it is quite computationally expensive and I shall ignore it here.
# In[ ]:
df.drop(['BusinessTravel','DailyRate','EmployeeCount','EmployeeNumber','HourlyRate','MonthlyRate','NumCompaniesWorked','Over18','StandardHours', 'StockOptionLevel','TrainingTimesLastYear'],axis=1,inplace=True)
#
# <a id="content4"></a>
# ## 4 ) Preparing Dataset
#
# ## 4.1 ) Feature Encoding
# I have used the Label Encoder from the scikit library to encode all the categorical features.
# In[ ]:
def transform(feature):
le=LabelEncoder()
df[feature]=le.fit_transform(df[feature])
print(le.classes_)
# In[ ]:
cat_df=df.select_dtypes(include='object')
cat_df.columns
# In[ ]:
for col in cat_df.columns:
transform(col)
# In[ ]:
df.head() # just to verify.
# ## 4.2 ) Feature Scaling.
# The scikit library provides various types of scalers including MinMax Scaler and the StandardScaler. Below I have used the StandardScaler to scale the data.
# In[ ]:
scaler=StandardScaler()
scaled_df=scaler.fit_transform(df.drop('Attrition',axis=1))
X=scaled_df
Y=df['Attrition'].as_matrix()
# ## 4.3 ) Splitting the data into training and validation sets
# In[ ]:
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.25,random_state=42)
# <a id="content5"></a>
# ## 5 ) Modelling
#
# ## 5.1 ) Handling the Imbalanced dataset
# Note that we have a imbalanced dataset with majority of observations being of one type ('NO') in our case. In this dataset for example we have about 84 % of observations having 'No' and only 16 % of 'Yes' and hence this is an imbalanced dataset.
#
# To deal with such a imbalanced dataset we have to take certain measures, otherwise the performance of our model can be significantly affected. In this section I have discussed two approaches to curb such datasets.
# ## 5.1.1 ) Oversampling the Minority or Undersampling the Majority Class
#
#
# In an imbalanced dataset the main problem is that the data is highly skewed ie the number of observations of certain class is more than that of the other. Therefore what we do in this approach is to either increase the number of observations corressponding to the minority class (oversampling) or decrease the number of observations for the majority class (undersampling).
#
# Note that in our case the number of observations is already pretty low and so oversampling will be more appropriate.
#
# Below I have used an oversampling technique known as the SMOTE(Synthetic Minority Oversampling Technique) which randomly creates some 'Synthetic' instances of the minority class so that the net observations of both the class get balanced out.
#
# One thing more to take of is to use the SMOTE before the cross validation step; just to ensure that our model does not overfit the data; just as in the case of feature selection.
# In[ ]:
oversampler=SMOTE(random_state=42)
x_train_smote, y_train_smote = oversampler.fit_sample(x_train,y_train)
# ## 5.1.2 ) Using the Right Evaluation Metric
# Another important point while dealing with the imbalanced classes is the choice of right evaluation metrics.
#
# Note that accuracy is not a good choice. This is because since the data is skewed even an algorithm classifying the target as that belonging to the majority class at all times will achieve a very high accuracy.
# For eg if we have 20 observations of one type 980 of another ; a classifier predicting the majority class at all times will also attain a accuracy of 98 % but doesnt convey any useful information.
#
# Hence in these type of cases we may use other metrics such as -->
#
#
# **'Precision'**-- (true positives)/(true positives+false positives)
#
# **'Recall'**-- (true positives)/(true positives+false negatives)
#
# **'F1 Score'**-- The harmonic mean of 'precision' and 'recall'
#
# '**AUC ROC'**-- ROC curve is a plot between 'senstivity' (Recall) and '1-specificity' (Specificity=Precision)
#
# **'Confusion Matrix'**-- Plot the entire confusion matrix
# ## 5.2 ) Building A Model & Making Predictions
# In this section I have used different models from the scikit library and trained them on the previously oversampled data and then used them for the prediction purposes.
# In[ ]:
def compare(model):
clf=model
clf.fit(x_train_smote,y_train_smote)
pred=clf.predict(x_test)
# Calculating various metrics
acc.append(accuracy_score(pred,y_test))
prec.append(precision_score(pred,y_test))
rec.append(recall_score(pred,y_test))
auroc.append(roc_auc_score(pred,y_test))
# In[ ]:
acc=[]
prec=[]
rec=[]
auroc=[]
models=[SVC(kernel='rbf'),RandomForestClassifier(),GradientBoostingClassifier()]
model_names=['rbfSVM','RandomForestClassifier','GradientBoostingClassifier']
for model in range(len(models)):
compare(models[model])
d={'Modelling Algo':model_names,'Accuracy':acc,'Precision':prec,'Recall':rec,'Area Under ROC Curve':auroc}
met_df=pd.DataFrame(d)
met_df
# ## 5.3 ) Comparing Different Models
# In[ ]:
def comp_models(met_df,metric):
sns.factorplot(data=met_df,x=metric,y='Modelling Algo',size=5,aspect=1.5,kind='bar')
sns.factorplot(data=met_df,y=metric,x='Modelling Algo',size=7,aspect=2,kind='point')
# In[ ]:
comp_models(met_df,'Accuracy')
# In[ ]:
comp_models(met_df,'Precision')
# In[ ]:
comp_models(met_df,'Recall')
# In[ ]:
comp_models(met_df,'Area Under ROC Curve')
# The above data frame and the visualizations summarize the resuts after training different models on the given dataset.
# <a id="content6"></a>
# ## 6) Conclusions
#
# ###### Hence we have completed the analysis of the data and also made predictions using the various ML models.
# In[ ]:
# In[ ]:
Image("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/imagesibm/image-hr.jpg")
# In[ ]:
# # THE END.
# ## [Please star/upvote if u found it helpful.]##
# In[ ]:
| true |
1b75c4e46d9e350474eef9c2c62b0a8be7811c3f
|
Python
|
CapAsdour/code-n-stitch
|
/Password_strength_Checker/output.py
|
UTF-8
| 384 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
import re
v=input("Enter the password to check:")
if(len(v)>=8):
if(bool(re.match('((?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*]).{8,30})',v))==True):
print("Good going Password is Strong.")
elif(bool(re.match('((\d*)([a-z]*)([A-Z]*)([!@#$%^&*]*).{8,30})',v))==True):
print("try Something stronger!!")
else:
print("You have entered an invalid password.")
| true |
8c42b2cb1e91f49b57b8b67eda41cea9289907e8
|
Python
|
wmm1996528/movie
|
/maoyan.py
|
UTF-8
| 3,578 | 2.734375 | 3 |
[] |
no_license
|
import requests
from pyquery import PyQuery as pq
import re
import time
import pymongo
from movie_douban import HotMovie
class mongdbs():
def __init__(self):
self.host = '127.0.0.1'
self.port = 27017
self.dbName = 'maoyan'
self.conn = pymongo.MongoClient(self.host, self.port)
self.db = self.conn[self.dbName]
class maoyan(object):
def __init__(self,keyword):
self.url='http://maoyan.com/query?&kw='+keyword
self.baseurl = 'http://maoyan.com/cinemas?movieId='
self.headers={
'Host':'maoyan.com',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36'
}
res = requests.get(self.url, headers=self.headers)
html = pq(res.text)
self.movieId = re.findall('movieid:(\d+)',html('div.movie-item').eq(0).find('a').attr('data-val'))[0]
self.mongo = mongdbs()
self.session = requests.Session()
self.movieName = keyword
def page(self):
page = 0
cinemas_list = []
while True:
cinemasHtml = requests.get(self.baseurl + str(self.movieId) + '&offset=' + str(page), headers=self.headers)
print(cinemasHtml.url)
if '抱歉,没有找到相关结果' in cinemasHtml.text:
break
cinemasHtml = pq(cinemasHtml.text)
cinemas = cinemasHtml('div.cinema-cell')
for i in range(len(cinemas)):
name = cinemas.eq(i).find('div.cinema-info a').text()
addr = cinemas.eq(i).find('div.cinema-info p').text()
url = cinemas.eq(i).find('div.buy-btn a').attr('href')
data = {
'name':name,
'addr':addr,
'url':'http://maoyan.com'+url
}
print('this is ',data)
self.mongo.db[self.movieName+'cinemas'].insert(data)
cinemas_list.append(data)
time.sleep(4)
page += 12
for i in cinemas_list:
print(i['name'])
url = i['url']
time.sleep(2)
self.search_price(url,i['name'])
def search_price(self,url,name):
req = self.session.get(url,headers=self.headers)
html = pq(req.text)
divs = html('div.show-list')
lists = []
for i in range(len(divs)):
if '黑豹' in divs.eq(i).find('div.movie-info h3').text():
for j in range(len(divs.eq(i).find('table.plist tbody tr'))):
begin = divs.eq(i).find('div.plist-container table.plist tbody tr').eq(j).find(
'td span.begin-time').text()
end = divs.eq(i).find('div.plist-container table.plist tbody tr').eq(j).find(
'td span.end-time').text()
price = divs.eq(i).find('div.plist-container table.plist tbody tr').eq(j).find(
'td span.sell-price span.stonefont').html().encode()
appearance = {
'begin': begin+'开场',
'end': end,
'price': price
}
lists.append(appearance)
print(lists)
self.mongo.db[self.movieName].insert({
'name':name,
'changci':lists
})
hotmovie = HotMovie()
print(hotmovie)
for i in hotmovie:
print('正在爬:%s' % i)
s = maoyan(i)
s.page()
| true |
72bd060c16cf2e8034334c0643533764f52687d6
|
Python
|
vvilq27/Python_port_OV7675
|
/port/busCheck.py
|
UTF-8
| 1,977 | 2.625 | 3 |
[] |
no_license
|
import time
import serial
from serial import Serial
import os
import re
import numpy as np
from matplotlib import pyplot as plt
a = list()
t = []
pic = [ ['00' for i in range(320)] for j in range(240)]
s = serial.Serial('COM8', 2000000)
while b"\r\n" not in s.readline():
pass
c = 0
while True:
l = s.readline()
y = l.decode("UTF-8").rstrip('\r\n').split(',')
c += 1
if c > 239:
break
a.append(l.decode("UTF-8").rstrip('\r\n').split(','))
# for row in a:
# print(row)
for row in a:
pic[int(row[-1])] = row;
s.close()
# DATA gathered, make PIC
for row in pic:
# pop frame number if frame not "zero"
if len(set(row)) != 1:
row.pop()
# else:
tmp = []
for i in row:
for j in re.findall('..', i):
tmp.append(int(j, 16))
if len(tmp) > 319:
break
# fill missing data cells with '00'
r = 320 - len(tmp)
for i in range(r):
tmp.append('00')
if len(tmp) > 320:
# print(tmp)
# print(len(tmp))
for i in range(len(tmp) - 320):
tmp.pop()
t.append(tmp)
# print(len(t))
# for row in t:
# print(len(row))
# tab = [[0 for i in range(320)] for j in range(240)]
# print(len(pic))
# print(len(pic[0]))
# for i in range(240):
# for j in range(320):
# try:
# tab[i][j] = int(t[i][j])
# except (IndexError, ValueError) as e:
# print('ero {}, {}'.format(i,j))
plt.imshow(np.array(t, dtype='uint8'), interpolation='nearest', cmap='gray')
plt.show()
# img = Image.frombytes(mode='L', size =tuple([50,50]), data= np.array(tab))
# img.save('test.png')
# file = open("data.txt", "w")
# for line in pic:
# lineInt = ''
# for i in line:
# lineInt += '{},'.format(str(i))
# # print(lineInt)
# file.write(lineInt)
# print(pic[0])
# for row in pic:
# for i in row:
# if i == '':
# print('hey')
# continue
# i = bytes(int(i))
# # print(pic[0])
# k = s.readline().decode("UTF-8").rstrip('\r\n').split(',')[-1]
# if int(k) == 100:
# print(k)
# mode L is for 8bit gray scale
| true |
0c466cb695f150472a3e3494053fe474d629708b
|
Python
|
bluecube/heating
|
/db_net/registers.py
|
UTF-8
| 6,678 | 2.796875 | 3 |
[] |
no_license
|
import numpy
import re
import itertools
import struct
import db_net
def group(lst, n):
"""group([0,3,4,10,2,3], 2) => iterator
Group an iterable into an n-tuples iterable. Incomplete tuples
are discarded e.g.
>>> list(group(range(10), 3))
[(0, 1, 2), (3, 4, 5), (6, 7, 8)]
from http://code.activestate.com/recipes/303060-group-a-list-into-sequential-n-tuples/#c5
"""
return zip(*[itertools.islice(lst, i, None, n) for i in range(n)])
class Type:
TYPES = {
'I': (0x00, struct.Struct('<H'), numpy.int16),
'L': (0x01, struct.Struct('<I'), numpy.int32),
'F': (0x02, struct.Struct('<f'), numpy.float32)
}
MATRIX_MASK = 0x20
TYPE_MASK = 0x03
@classmethod
def _tuple_by_code(cls, code):
for letter, (c, unpacker, dtype) in cls.TYPES.items():
if code & cls.TYPE_MASK == c & cls.TYPE_MASK:
return letter, unpacker, dtype
raise Exception('Invalid type')
def __init__(self, type_code, matrix = False):
if isinstance(type_code, str):
self.code, self.unpacker, self.dtype = self.TYPES[type_code]
else:
letter, self.unpacker, self.dtype = self._tuple_by_code(type_code)
self.code = type_code
self.matrix = matrix
if matrix:
self.code |= self.MATRIX_MASK
if matrix:
self.size = matrix[0] * matrix[1] * self.unpacker.size
else:
self.size = self.unpacker.size
@classmethod
def from_string(cls, string):
matched = re.match(r'^(M?)([A-Z])(?:\[(\d+),(\d+)\])?$', string)
if not matched:
raise Exception("Invalid DBNet type string")
matrix, type_code, rows, columns = matched.groups()
if matrix == 'M':
try:
rows = int(rows)
columns = int(columns)
except (TypeError, ValueError):
raise Exception("Invalid or missing matrix dimensions")
matrix = (rows, columns)
else:
matrix = False
return cls(type_code, matrix)
def __str__(self):
letter, unpacker, dtype = self._tuple_by_code(self.code)
if self.matrix:
return 'M{}[{},{}]'.format(letter, self.matrix[0], self.matrix[1])
else:
return letter
class ReadRequest:
PACKER = struct.Struct('<BBBBHHHH')
def __init__(self):
self.wid = None
self.type = None
self.i0 = None
self.j0 = None
self.rows = None
self.cols = None
self.msg_id = 0x4D
@classmethod
def from_bytes(cls, data):
if data[0] != 0x01:
raise Exception('Read request has invalid start byte')
if (data[1] & Type.MATRIX_MASK) != Type.MATRIX_MASK:
raise Exception('Only matrix reads are supported now')
self = cls()
unused, code, wid_lo, wid_hi, self.i0, self.j0, self.rows, self.cols = \
cls.PACKER.unpack(data)
self.wid = wid_hi << 8 | wid_lo
self.type = Type(code, (self.rows, self.cols))
return self
def __bytes__(self):
wid_lo = self.wid & 0xff
wid_hi = (self.wid >> 8) & 0xff
return self.PACKER.pack(
0x1,
self.type.code,
wid_lo, wid_hi,
self.i0, self.j0, self.rows, self.cols)
def details_to_string(self):
return 'WID = {} ({}), {}x{} items from {}, {}'.format(
self.wid, self.type, self.rows, self.cols, self.i0, self.j0)
def __str__(self):
return 'Read request: ' + self.details_to_string()
class ReadResponse:
def __init__(self):
self.value = None
@classmethod
def from_bytes(cls, data, request):
if data is None:
raise Exception('Error processing the request (data is None).')
if data[0] != 0x81:
raise Exception('Read response has invalid start byte')
self = cls()
self.request = request
if len(data) != request.type.unpacker.size * request.rows * request.cols + 1:
raise Exception('Invalid length of reply')
flat_values = [request.type.unpacker.unpack(bytes(x))[0]
for x in group(data[1:], request.type.unpacker.size)]
self.value = list(group(flat_values, request.cols))
return self
def __str__(self):
return 'Read response: {}\n{}'.format(
self.request.details_to_string(), str(self.value))
class Register:
def __init__(self, connection, wid, data_type, auto_update = False):
self._connection = connection
self._wid = wid
self._type = Type.from_string(data_type)
self._auto_update = auto_update
if not self._type.matrix:
raise Exception('Only matrix registers are supported now.')
shorter_dim = min(*self._type.matrix)
longer_dim = max(*self._type.matrix)
# Maximal number of matrix items per transfer
max_items = (db_net.Packet.PAYLOAD_SIZE_LIMIT - 1) // self._type.unpacker.size
rows_per_batch = max_items // shorter_dim
if rows_per_batch == 0:
raise Exception('The matrix is too big, sorry')
batches = ((x, min(rows_per_batch, longer_dim - x))
for x in range(0, longer_dim, rows_per_batch))
self._batches = []
if self._type.matrix[0] > self._type.matrix[1]:
for start, length in batches:
self._batches.append((start, 0, length, self._type.matrix[1]))
else:
for start, length in batches:
self._batches.append((0, start, self._type.matrix[0], length))
def auto_update(self, val):
self._auto_update = val
def update(self):
self._value = numpy.empty(self._type.matrix, dtype = self._type.dtype)
for i0, j0, rows, cols in self._batches:
rq = ReadRequest()
rq.wid = self._wid
rq.type = self._type
rq.i0 = i0
rq.j0 = j0
rq.rows = rows
rq.cols = cols
resp_msg_id, resp_bytes = self._connection.transfer(rq.msg_id, bytes(rq))
if resp_bytes is None:
raise Exception("Reply: error!")
resp = ReadResponse.from_bytes(resp_bytes, rq)
for i, row in zip(range(i0, i0 + rows), resp.value):
for j, x in zip(range(j0, j0 + cols), row):
self._value[i, j] = x
@property
def value(self):
if self._auto_update:
self.update()
return self._value
| true |
4857fe4164e37c4dd73b20c5d7278b92bac48458
|
Python
|
SchoofsEbert/ASTinPython
|
/AST.py
|
UTF-8
| 586 | 2.921875 | 3 |
[] |
no_license
|
import ast
import astor
class AST:
def __init__(self, filename, transformer):
self.filename = filename
with open(filename, "r") as source:
self.AST = ast.parse(source.read())
self.transformer = transformer
def transform(self):
self.transformer.visit(self.AST)
def print(self):
print(astor.dump_tree(self.AST))
def compile(self, filename):
with open(filename, "w") as output:
output.write(astor.to_source(self.AST))
def execute(self, scope):
exec(astor.to_source(self.AST), scope)
| true |
d1a665d626b2aa17707165e080d4fe699022d763
|
Python
|
shlampley/learning
|
/learn python/fizz_buzz3.py
|
UTF-8
| 1,604 | 3.9375 | 4 |
[] |
no_license
|
number = 0
variables = ""
def fizz_buzz(num, var):
fizzarray = []
# doneTODO: Create an empty array outside the loop to store data
var = variables
num = number
while num < 100:
# Reset var to prevent issues of adding buzz to buzz or buzz to fizzbuz
var = ""
#print(num)
num += 1
# if num % 3 == 0 and num % 5 == 0:
# var = "fizz_buzz"
# continue
# DONE: Depending on which conditions are met set var to either the number of fizz or buzz or both
if num % 3 == 0:
var = "fizz"
if num % 5 == 0:
var += "buzz"
# else statement used in this instance will cause overwrighting of saved fizz data, must use if statement.
if var == "":
var = num
# doneTODO: add var to the end of the array
fizzarray.append(var)
# look up storing as list in an array
return fizzarray
def print_array(arr):
for item in arr:
print(item)
def out_put(fizzarray):
# for idx, x in enumerate(fizzarray):
# #print(idx, x)
for ind, x in enumerate(fizzarray):
if (ind + 1) % 2 == 0:
print("\t" + str(x))
else:
print(x)
#results.append(fizzarray)
#print(fizzarray)
# TODO: if the index is odd no indent
# TODO: if the index is even indent
# while i < fizzarray.len():
# # instead of x you would use:
# fizzarray[i]
# i = i + 1
fizzarray = fizz_buzz(number, variables)
# print_array(fizzarray)
out_put(fizzarray)
| true |
736d96574422b7b00e7b8756628dcf418e473476
|
Python
|
inhyuck222/python-ch2.4
|
/for.py
|
UTF-8
| 990 | 4.375 | 4 |
[] |
no_license
|
# 반복문
a = ['cat', 'cow', 'tiger']
for animal in a:
print(animal, end=' ')
else:
print('')
# 복합 자료형을 사용하는 for문
l = [('루피', 10), ('상디', 20), ('조로', 30)]
for data in l:
print('이름: %s, 나이: %d' % data)
for name, age in l:
print('이름: {0}, 나이: {1}'.format(name, age))
l = [{'name': '루피', 'age': 30}, {'name': '루', 'age': 31}, {'name': '피', 'age': 32}]
for data in l:
print('이름: %(name)s, 나이: %(age)d' % data)
# 1 ~ 10 합 구하기
s = 0
for i in range(1, 11):
s += i
else:
print(s)
# break
for i in range(10):
if i > 5:
break
print(i, end=' ')
else:
print('')
print('')
print('----------------')
# continue
for i in range(10):
if i < 5:
continue
print(i, end=' ')
else:
print()
# 구구단
for x in range(1, 10):
for y in range(1, 10):
print(str(y) + ' * ' + str(x) + ' = ' + str(x*y).rjust(2), end='\t')
else:
print('')
| true |
f66dd9735cfdcffa7c2070e219d517ff72496676
|
Python
|
queenie0708/Appium-Python
|
/alipay.py
|
UTF-8
| 1,075 | 2.59375 | 3 |
[] |
no_license
|
from appium import webdriver
import threading
from appium.webdriver.common.touch_action import TouchAction
from time import sleep
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '9'
desired_caps['deviceName'] = 'PDP'
desired_caps['appPackage'] = 'com.eg.android.AlipayGphone'
desired_caps['appActivity'] = '.AlipayLogin'
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
sleep(40)
print('time to wake up')
driver.find_element_by_android_uiautomator('text(\"Ant Forest")').click()
driver.implicitly_wait(5)
sleep(8)
def swipeDown(driver, n):
'''向下滑动屏幕'''
for i in range(n):
driver.swipe(300, 1000, 300, 100)
swipeDown(driver,7)
sleep(5)
print('is more friend there?')
driver.find_element_by_android_uiautomator('text(\"View more friends")').click()
driver.implicitly_wait(5)
friends = driver.find_element_by_class('android.view.View')
for friend in friend:
friend.click()
sleep(5)
driver.find_element_by_android_uiautomator('text(\"")').click()
driver.quit()
| true |
99636db6bcf420043a9a2c894ebfd7f9fbbb8042
|
Python
|
YOODS/rovi_utils
|
/mesh_aid/samples/degraded.py
|
UTF-8
| 1,987 | 3.265625 | 3 |
[] |
no_license
|
import open3d as o3d
import numpy as np
def degraded_copy_point_cloud(cloud, normal_radius, n_newpc):
"""
与えられた点群データからPoisson表面を再構成し、頂点データからランダムに点を取り出して
新たな点群を作成する.
Parameters
----------
cloud : open3d.geometry.PointCloud
入力点群
normal_radius : float
法線ベクトル計算時の近傍半径(Poisson表面構成では必要なので).既に法線ベクトルが
計算されていれば無視される.
n_newpc : int
新しい点群データに含まれる点の数
"""
if np.asarray(cloud.normals).shape[0] == 0:
cloud.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=normal_radius, max_nn=30))
# Poisson表面作成
mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(cloud, depth=9)
# 密度値が低い所の面を削除
mesh.remove_vertices_by_mask(densities < np.quantile(densities, 0.1))
# メッシュの頂点データから入力の点群と同じ数の点を取り出す(randomで)
n_vertic = np.asarray(mesh.vertices).shape[0]
if n_vertic > n_newpc:
indices = np.random.choice(np.arange(n_vertic), size=n_newpc, replace=False)
points = np.asarray(mesh.vertices)[indices]
else:
print("Warning: Mesh vertices is {} (< {}).".format(n_vertic, n_newpc))
points = np.asarray(mesh.vertices)
# 新たな点群を作成する
newcl = o3d.geometry.PointCloud()
newcl.points = o3d.utility.Vector3dVector(points)
return newcl
if __name__ == '__main__':
cloud = o3d.io.read_point_cloud("surface.ply")
print("Original Point Cloud")
print(cloud)
print(np.asarray(cloud.normals).shape)
degcl = degraded_copy_point_cloud(cloud, 0.2, 10000)
degcl.paint_uniform_color([1, 0.706, 0])
o3d.visualization.draw_geometries([cloud, degcl])
| true |
94946ebf1eada337cbf53b95fa700d32e8c8d9a6
|
Python
|
HesterXu/Home
|
/Public/api/api_03_天气接口.py
|
UTF-8
| 437 | 2.53125 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Time : 2018/12/3/16:15
# @Author : Hester Xu
# Email : xuruizhu@yeah.net
# @File : api_03_天气接口.py
# @Software : PyCharm
import requests
weather_url_1 = 'http://t.weather.sojson.com/api/weather/city/101030100'
weather_res_1 = requests.get(weather_url_1)
print(weather_res_1)
print(weather_res_1.text)
# weather_res_2 = requests.get(weather_url_2)
# print(weather_res_2)
# print(weather_res_2.text)
| true |
e3e53969c31c9d312829564901ad8861c6e11f72
|
Python
|
yhshu/OpenKE-Embedding-Service
|
/freebase_embedding_server.py
|
UTF-8
| 6,993 | 2.578125 | 3 |
[] |
no_license
|
import numpy as np
import datetime
from flask import Flask, request, jsonify, json
class FreebaseEmbeddingServer:
dir_path: str
entity_to_id: dict # entity mid -> entity id
relation_to_id: dict
entity_vec: np.memmap
relation_vec: np.memmap
dim: int # embedding dimension for each entity or relation
id_adj_list: dict # adjacency list
id_inverse_adj_list: dict # inverse adjacency list
def __init__(self, freebase_embedding_dir_path):
start_time = datetime.datetime.now()
print("[INFO] Loading OpenKE TransE for Freebase...")
# file paths
self.dir_path = freebase_embedding_dir_path.rstrip("/").rstrip("\\")
entity_emb_filepath = self.dir_path + "/embeddings/dimension_50/transe/entity2vec.bin"
relation_emb_filepath = self.dir_path + "/embeddings/dimension_50/transe/relation2vec.bin"
entity_to_id_filepath = self.dir_path + "/knowledge_graphs/entity2id.txt"
relation_to_id_filepath = self.dir_path + "/knowledge_graphs/relation2id.txt"
triple_to_id_filepath = self.dir_path + "/knowledge_graphs/triple2id.txt"
# initialize variables
self.entity_to_id = dict()
self.relation_to_id = dict()
self.id_adj_list = dict()
self.id_inverse_adj_list = dict()
self.entity_vec = np.memmap(entity_emb_filepath, dtype='float32', mode='r')
self.relation_vec = np.memmap(relation_emb_filepath, dtype='float32', mode='r')
self.dim = 50
# build self.entity_to_id
entity_to_id_file = open(entity_to_id_filepath)
for line in entity_to_id_file.readlines():
line.rstrip("\n")
if "\t" in line:
line_split = line.split("\t")
elif " " in line:
line_split = line.split(" ")
else:
continue
self.entity_to_id[line_split[0]] = line_split[1]
entity_to_id_file.close()
# build self.relation_to_id
relation_to_id_file = open(relation_to_id_filepath)
for line in relation_to_id_file.readlines():
line.rstrip("\n")
if "\t" in line:
line_split = line.split("\t")
elif " " in line:
line_split = line.split(" ")
else:
continue
self.relation_to_id[line_split[0]] = line_split[1]
relation_to_id_file.close()
# build adj_list and inverse_adj_list
triple_to_id_file = open(triple_to_id_filepath)
for line in triple_to_id_file.readlines():
line.rstrip("\n")
if "\t" in line:
line_split = line.split("\t")
elif " " in line:
line_split = line.split(" ")
else:
continue
subject_id = int(line_split[0])
object_id = int(line_split[1])
predicate_id = int(line_split[2])
# for adj list
if not (subject_id in self.id_adj_list.keys()):
self.id_adj_list[subject_id] = []
self.id_adj_list[subject_id].append((subject_id, object_id, predicate_id))
# for inverse adj list
if not (object_id in self.id_inverse_adj_list.keys()):
self.id_inverse_adj_list[object_id] = []
self.id_inverse_adj_list[object_id].append((subject_id, object_id, predicate_id))
triple_to_id_file.close()
print("[INFO] OpenKE TransE for Freebase has been loaded")
print("[INFO] time consumed: " + str(datetime.datetime.now() - start_time))
def get_entity_id_by_mid(self, mid: str) -> int:
return self.entity_to_id[mid]
def get_relation_id_by_relation(self, relation: str) -> int:
return self.relation_to_id[relation]
def get_entity_embedding_by_mid(self, mid: str):
return self.get_entity_embedding_by_eid(int(self.entity_to_id[mid]))
def get_entity_embedding_by_eid(self, idx: int):
return self.entity_vec[self.dim * idx:self.dim * (idx + 1)]
def get_relation_embedding_by_relation(self, relation: str):
return self.get_relation_embedding_by_rid(int(self.relation_to_id[relation]))
def get_relation_embedding_by_rid(self, idx: int):
return self.relation_vec[self.dim * idx:self.dim * (idx + 1)]
def get_adj_list(self, mid: str):
idx = int(self.entity_to_id[mid])
if idx in self.id_adj_list:
return self.id_adj_list[idx]
return None
def get_inverse_adj_list(self, mid: str):
idx = int(self.entity_to_id[mid])
if idx in self.id_inverse_adj_list:
return self.id_inverse_adj_list[idx]
return None
app = Flask(__name__)
service = FreebaseEmbeddingServer("/home2/yhshu/yhshu/workspace/Freebase")
@app.route('/entity_embedding_by_mid/', methods=['POST'])
def entity_embedding_by_mid_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_entity_embedding_by_mid(params['mid']).tolist()
return jsonify({'entity_embedding': res})
@app.route('/entity_embedding_by_eid/', methods=['POST'])
def entity_embedding_by_eid_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_entity_embedding_by_eid(params['eid']).tolist()
return jsonify({'entity_embedding': res})
@app.route('/relation_embedding_by_relation/', methods=['POST'])
def relation_embedding_by_relation_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_relation_embedding_by_relation(params['relation']).tolist()
return jsonify({'relation_embedding': res})
@app.route('/relation_embedding_by_rid/', methods=['POST'])
def relation_embedding_by_rid_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_relation_embedding_by_rid(params['rid']).tolist()
return jsonify({'relation_embedding': res})
@app.route('/adj_list/', methods=['POST'])
def adj_list_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_adj_list(params['mid'])
return jsonify({'adj_list': res})
@app.route('/inverse_adj_list/', methods=['POST'])
def inverse_adj_list_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_inverse_adj_list(params['mid'])
return jsonify({'inverse_adj_list': res})
@app.route('/entity_id_by_mid/', methods=['POST'])
def entity_id_by_mid_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_entity_id_by_mid(params['mid'])
return jsonify({'entity_id': res})
@app.route('/relation_id_by_relation/', methods=['POST'])
def relation_id_by_relation_service():
params = json.loads(request.data.decode("utf-8"))
res = service.get_relation_id_by_relation(params['relation'])
return jsonify({'relation_id': res})
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False, port=8898) # '0.0.0.0' is necessary for visibility
| true |
50dcf5643d28de1a3059967341b2a596ed7b40fa
|
Python
|
rohanaggarwal7997/Studies
|
/Python new/inheritance.py
|
UTF-8
| 318 | 3.578125 | 4 |
[] |
no_license
|
class Parent:
def printlastname(self):
print('Aggarwal')
class Child(Parent): #inherited Parent class
def print_name(self):
print('Rohan')
def printlastname(self): #overwriting parent function
print('Aggar')
bucky=Child()
bucky.print_name()
bucky.printlastname()
| true |
0a60ed50abd1dcb8af906bf377beeed159d4e47f
|
Python
|
thautwarm/gkdtex
|
/gkdtex/wrap.py
|
UTF-8
| 859 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
import warnings
warnings.filterwarnings('ignore', category=SyntaxWarning, message='"is" with a literal')
from gkdtex.parse import *
from gkdtex.lex import *
_parse = mk_parser()
def parse(text: str, filename: str = "unknown"):
tokens = lexer(filename, text)
status, res_or_err = _parse(None, Tokens(tokens))
if status:
return res_or_err
msgs = []
lineno = None
colno = None
filename = None
offset = 0
msg = ""
for each in res_or_err:
i, msg = each
token = tokens[i]
lineno = token.lineno + 1
colno = token.colno
offset = token.offset
filename = token.filename
break
e = SyntaxError(msg)
e.lineno = lineno
e.colno = colno
e.filename = filename
e.text = text[offset - colno:text.find('\n', offset)]
e.offset = colno
raise e
| true |
13c7664efff8eb0ab25d6dd0f8e73e276b631438
|
Python
|
andreiqv/rotate_network
|
/make_data_dump.py
|
UTF-8
| 3,182 | 2.78125 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path
import sys
from PIL import Image, ImageDraw
import _pickle as pickle
import gzip
import random
import numpy as np
np.set_printoptions(precision=4, suppress=True)
def load_data(in_dir, shape=(540,540,1)):
img_size = shape[0], shape[1]
data = dict()
data['filenames'] = []
data['images'] = []
data['labels'] = []
files = os.listdir(in_dir)
random.shuffle(files)
for file_name in files:
file_path = in_dir + '/' + file_name
img = Image.open(file_path)
if shape[2]==3:
img = img.resize(img_size, Image.ANTIALIAS)
elif shape[2]==1:
img_gray = img.convert('L')
img = img_gray.resize(img_size, Image.ANTIALIAS)
else:
raise Exception('Bad shape[2]')
arr = np.array(img, dtype=np.float32) / 256
name = ''.join(file_name.split('.')[:-1])
angle = name.split('_')[-1]
lable = np.array([float(angle) / 360.0], dtype=np.float64)
if type(lable[0]) != np.float64:
print(lable[0])
print(type(lable[0]))
print('type(lable)!=float')
raise Exception('lable type is not float')
print('{0}: {1:.3f}, {2}' .format(angle, lable[0], file_name))
data['images'].append(arr)
data['labels'].append(lable)
data['filenames'].append(file_name)
return data
#return train, valid, test
def split_data(data, ratio=(6,1,3)):
len_data = len(data['images'])
assert len_data == len(data['labels'])
len_train = len_data * ratio[0] // sum(ratio)
len_valid = len_data * ratio[1] // sum(ratio)
len_test = len_data * ratio[2] // sum(ratio)
print(len_train, len_valid, len_test)
data_train = dict()
data_valid = dict()
data_test = dict()
data_train['images'] = data['images'][ : len_train]
data_train['labels'] = data['labels'][ : len_train]
data_train['filenames'] = data['filenames'][ : len_train]
data_valid['images'] = data['images'][len_train : len_train + len_valid]
data_valid['labels'] = data['labels'][len_train : len_train + len_valid]
data_valid['filenames'] = data['filenames'][len_train : len_train + len_valid]
data_test['images'] = data['images'][len_train + len_valid : ]
data_test['labels'] = data['labels'][len_train + len_valid : ]
data_test['filenames'] = data['filenames'][len_train + len_valid : ]
data_train['size'] = len(data_train['images'])
data_valid['size'] = len(data_valid['images'])
data_test['size'] = len(data_test['images'])
splited_data = {'train': data_train, 'valid': data_valid, 'test': data_test}
return splited_data
def get_data(in_dir, shape, ratio):
data1 = load_data(in_dir, shape=shape)
print(len(data1['images']))
print(len(data1['labels']))
data = split_data(data1, ratio=ratio)
print('train', data['train']['size'])
print('valid', data['valid']['size'])
print('test', data['test']['size'])
return data
if __name__ == '__main__':
data = get_data(shape=(540,540,1), ratio=(6,1,3))
# add_pickle
dump = pickle.dumps(data)
print('dump.pickle')
GZIP = True
if GZIP:
with gzip.open('dump.gz', 'wb') as f:
f.write(dump)
print('gzip dump was written')
else:
with open('dump.pickle', 'wb') as f:
pickle.dump(dump, f, protocol=4)
print('dump was written')
| true |
9594b96038df1f0d2c02de4bbf9ca543ed97ab5c
|
Python
|
MiguelAbadia/TIC-Abad-a
|
/Programms Python/ejercicio8.py
|
UTF-8
| 198 | 3.40625 | 3 |
[] |
no_license
|
def ejercicio8():
n=input("Dime un numero entero positivo")
if n>0:
print "Los cuadrados son",n,n*n,n*n*n,n*n*n*n
else:
print "Eso es negativo"
ejercicio8()
| true |
ad3b213de470c3a58659c325ac83fb9671b5ebf8
|
Python
|
segimanzanares/acs-djangoapi
|
/src/shows/serializers.py
|
UTF-8
| 1,764 | 2.546875 | 3 |
[] |
no_license
|
from rest_framework import serializers
from shows.models import Show, Episode
from django.utils import timezone
import os
class EpisodeSerializer(serializers.ModelSerializer):
class Meta:
model = Episode
fields = ('id', 'show', 'title', 'description', 'cover')
def create(self, validated_data):
"""
Create and return a new `Episode` instance, given the validated data.
"""
instance = Episode.objects.create(**validated_data)
# Save file into the model directory
instance.cover.save(os.path.basename(instance.cover.name), instance.cover, save=True)
return instance
def update(self, instance, validated_data):
"""
Update and return an existing `Show` instance, given the validated data.
"""
cover = validated_data.get('cover', None)
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get('description', instance.description)
if cover:
instance.cover = cover
instance.save()
return instance
class ShowSerializer(serializers.ModelSerializer):
episodes = EpisodeSerializer(many=True, read_only=True)
class Meta:
model = Show
fields = ('id', 'title', 'episodes')
def create(self, validated_data):
"""
Create and return a new `Show` instance, given the validated data.
"""
return Show.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Show` instance, given the validated data.
"""
instance.title = validated_data.get('title', instance.title)
instance.save()
return instance
| true |
2792d988960038d69fa8cf4df7c84be7733a9751
|
Python
|
TangleSpace/swole
|
/swole/core/application.py
|
UTF-8
| 3,849 | 2.78125 | 3 |
[] |
no_license
|
import os
import enum
from typing import Dict
from fastapi import FastAPI
from starlette.responses import FileResponse
import uvicorn
from swole.core.page import Page, HOME_ROUTE
from swole.core.utils import route_to_filename
from swole.widgets import Widget
SWOLE_CACHE = "~/.cache/swole" #: Default directory to use for caching.
class Application():
""" Class representing an application. Application are used to serve
declared pages.
Attributes:
fapi (`fastapi.FastAPI`): FastAPI app.
"""
def __init__(self):
self.files = None
self.fapi = FastAPI()
def assign_orphan_widgets(self):
""" Method finding orphan widgets if any, and assigning it to the Home
page. If the home page does not exist, create it. This allow a very
simple and easy way to use the library.
"""
if HOME_ROUTE not in Page._dict:
# No home page : create one
Page()
home = Page._dict[HOME_ROUTE]
assigned_widgets = set().union(*[page.widgets for page in Page._dict.values()])
for w in Widget._declared:
if w not in assigned_widgets: # Orphan !
home.add(w)
def write(self, folder=SWOLE_CACHE):
""" Method to write the HTML of the application to files, in order to
later serve it.
Arguments:
folder (`str`, optional): Folder where to save HTML files. Defaults
to :const:`~swole.core.application.SWOLE_CACHE`.
"""
os.makedirs(folder, exist_ok=True)
self.files = {} # Route -> HTML file
self.callbacks = {} # Callback ID -> (Page, Ajax)
for route, page in Page._dict.items():
# Write HTML of the page
html_str = page.html().render()
path = os.path.join(folder, "{}.html".format(route_to_filename(route)))
with open(path, 'w') as f:
f.write(html_str)
self.files[route] = path
# Save also callbacks (along with their page)
for aj in page.ajax():
self.callbacks[aj.id] = (page, aj)
def define_routes(self):
""" Method defining the routes in the FastAPI app, to display the right
HTML file.
"""
# Define the pages' routes
for route, html_file in self.files.items():
@self.fapi.get(route)
def index():
return FileResponse(html_file)
# Define the callback route
if len(self.callbacks) != 0:
# Define a dynamic enum to ensure only specific callback ID are valid
cbe = enum.IntEnum('CallbackEnum', {str(c_id): c_id for c_id in self.callbacks.keys()})
@self.fapi.post("/callback/{callback_id}")
def callback(callback_id: cbe, inputs: Dict[str, str]):
page, ajax = self.callbacks[callback_id]
return ajax(page, inputs)
def serve(self, folder=SWOLE_CACHE, host='0.0.0.0', port=8000, log_level='info'):
""" Method to fire up the FastAPI server !
Arguments:
folder (`str`, optional): Folder where to save HTML files. Defaults
to :const:`~swole.core.application.SWOLE_CACHE`.
host (`str`, optional): Run FastAPI on this host. Defaults to
`0.0.0.0`.
port (`int`, optional): Run FastAPI on this port. Defaults to
`8000`.
log_level (`str`, optional): Log level to use for FastAPI. Can be
[`critical`, `error`, `warning`, `info`, `debug`, `trace`].
Defaults to `info`.
"""
self.assign_orphan_widgets()
self.write(folder=folder)
self.define_routes()
uvicorn.run(self.fapi, host=host, port=port, log_level=log_level)
| true |
6adaa62ac1986dcd4d811aeec82cad429178a601
|
Python
|
chen19901225/SimplePyCode
|
/SimpleCode/PY_CookBook/chapter11/1_http_simple_get.py
|
UTF-8
| 189 | 2.6875 | 3 |
[] |
no_license
|
import urllib
url='http://www.baidu.com'
params=dict(name1='value1',name2='value2')
querystring=urllib.urlencode(params)
u=urllib.urlopen(url+'?'+querystring)
resp=u.read()
print resp
| true |
02087c6ead589bf24ddcbcd6f0309fa0df9bf0cd
|
Python
|
andoniabedul/cmd-cryptocurrency-watcher
|
/services/Ticket.py
|
UTF-8
| 2,456 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
import json
#from helpers.format_response import exchanges as format_response
from helpers.messages import messages as messages
from api.call import exchanges as api_call
class Ticket:
def __init__(self, base_pair, pair):
self.base_pair = base_pair
self.pair = pair
self.exchanges = ['coinbase', 'bitfinex', 'poloniex', 'gemini']
def valid_pair(self, exchange):
with open('./constants/pairs_by_exchange.json') as pairs:
pairs_exchange = json.load(pairs)[exchange]
if self.base_pair in pairs_exchange:
if self.pair in pairs_exchange[self.base_pair]:
return True
return False
def get_url(self, exchange):
with open('./constants/urls.json') as urls:
tickets_url = json.load(urls)
url = tickets_url['tickets'][exchange]
formated_pairs = self.format_pairs(self.base_pair, self.pair, exchange)
return url.format(formated_pairs)
def get_pairs(self):
return '{}{}'.format(self.base_pair, self.pair)
def format_pairs(self, base_pair, pair, exchange):
if exchange is 'bitfinex':
return '{}{}'.format(base_pair, pair)
if exchange is 'poloniex':
return '{}_{}'.format(base_pair, pair)
if exchange is 'coinbase':
return '{}-{}'.format(base_pair.upper(), pair.upper())
if exchange is 'gemini':
return '{}{}'.format(base_pair, pair)
@staticmethod
def get(ticket):
response_list = []
for exchange in ticket.exchanges:
pairs = ticket.format_pairs(ticket.base_pair, ticket.pair, exchange)
if ticket.valid_pair(exchange):
response = ticket.get_data(exchange, pairs)
if response['success']:
formated_response = messages['responses']['success'](response['data'], exchange)
response_list.append(formated_response)
else:
formated_response = messages['responses']['error'](response['error'], exchange)
response_list.append(formated_response)
else:
formated_response = messages['responses']['invalid_pair'](ticket.get_pairs(), exchange)
response_list.append(formated_response)
return response_list
def get_data(self, exchange, pairs):
url = self.get_url(exchange)
response = api_call[exchange](url, pairs)
if not hasattr(response, 'error'):
return {
'success': True,
'data': response
}
else:
return {
'error': response['error']
}
| true |
e3c5999afa33a13d1a3271b55e648f365694c35e
|
Python
|
luckydimdim/grokking
|
/in_place_reversal_of_a_linked_list/reverse_every_k_element_sub_list/main.py
|
UTF-8
| 3,388 | 4.125 | 4 |
[] |
no_license
|
from __future__ import print_function
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def print_list(self):
temp = self
while temp is not None:
print(temp.value, end=" ")
temp = temp.next
print()
def reverse_every_k_elements2(head, k):
'''
Given the head of a LinkedList and a number ‘k’,
reverse every ‘k’ sized sub-list starting from the head.
If, in the end, you are left with a sub-list with
less than ‘k’ elements, reverse it too.
'''
if k <= 1 or head is None:
return head
curr, prev, new_head = head, None, False
while True:
last_node_of_previous_part = prev
last_node_of_sub_list = curr
counter = 0
while counter < k and curr is not None:
next = curr.next
curr.next = prev
prev = curr
curr = next
counter += 1
# connect with the previous part
if last_node_of_previous_part is not None:
last_node_of_previous_part.next = prev
if new_head == False:
new_head = True
head = prev
# connect with the next part
last_node_of_sub_list.next = curr
if curr is None:
break
prev = last_node_of_sub_list
return head
def reverse_every_k_elements3(head, k):
'''
Given the head of a LinkedList and a number ‘k’,
reverse every ‘k’ sized sub-list starting from the head.
If, in the end, you are left with a sub-list with
less than ‘k’ elements, reverse it too.
'''
if k <= 1 or head is None:
return head
counter, prev, curr = 0, None, head
is_new_head = False
while True:
link = prev
tail = curr
counter = 0
while counter < k and curr is not None:
counter += 1
next = curr.next
curr.next = prev
prev = curr
curr = next
if is_new_head == False:
head = prev
is_new_head = True
if link is not None:
link.next = prev
tail.next = curr
if curr is None:
break
prev = tail
return head
def reverse_every_k_elements(head, k):
'''
Given the head of a LinkedList and a number ‘k’,
reverse every ‘k’ sized sub-list starting from the head.
If, in the end, you are left with a sub-list with
less than ‘k’ elements, reverse it too.
'''
if head is None or k <= 0:
return head
prev, curr = None, head
is_new_head = False
while True:
tail_of_first_part = prev
tail_of_second_part = curr
counter = 0
while counter < k and curr is not None:
next = curr.next
curr.next = prev
prev = curr
curr = next
counter += 1
if is_new_head == False:
is_new_head = True
head = prev
if tail_of_first_part is not None:
tail_of_first_part.next = prev
tail_of_second_part.next = curr
if curr is None:
break
prev = tail_of_second_part
return head
def main():
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
head.next.next.next.next.next.next = Node(7)
head.next.next.next.next.next.next.next = Node(8)
print("Nodes of original LinkedList are: ", end='')
head.print_list()
result = reverse_every_k_elements(head, 3)
print("Nodes of reversed LinkedList are: ", end='')
result.print_list()
main()
| true |
252753f358e2106a377fe0abd8311512c351cc0d
|
Python
|
znorm/Euler
|
/006.py
|
UTF-8
| 221 | 3.796875 | 4 |
[] |
no_license
|
sumofsquares = 0
squareofsum = 0
for x in range(1,101):
sumofsquares = sumofsquares + (x**2)
squareofsum = squareofsum + x
squareofsum = squareofsum ** 2
print( squareofsum - sumofsquares)
#269147
| true |
28ea5a719de789bf35197e427659db6fbe96093a
|
Python
|
AndersonHJB/PyCharm_Coder
|
/Coder_Old/pycharm_daima/爬虫大师班/插件合集/参数转换/headers_app.py
|
UTF-8
| 468 | 3.203125 | 3 |
[] |
no_license
|
import re
def headers_to_dict(data):
global headers
for value in data:
try:
hea_data = re.findall(r'(.*?): (.*?)\n', value)[0]
headers.setdefault(hea_data[0], hea_data[1])
except IndexError:
hea_data = value.split(': ', 1)
headers.setdefault(hea_data[0], hea_data[1])
headers = {}
res = input('请输入你所复制的请求头:>>>\n')
headers_to_dict(res)
print(headers)
| true |
9d03c8b7f3b90341d68c9b4d8e4a99f9863befb9
|
Python
|
Eric-cv/QF_Python
|
/day2/input_demo.py
|
UTF-8
| 638 | 4.125 | 4 |
[] |
no_license
|
# 输入:input()
#name = input()
#print(name)
#name = input('请输入你的名字:') #阻塞式
#print(name)
'''
练习:
游戏:捕鱼达人
输入参与游戏者用户名
输入密码:
充值: 500
'''
print('''
*********************
捕鱼达人
*********************
''')
username = input('请输入参与游戏者的用户名:\n')
password = input('输入密码:\n')
print('%s请充值才能进入游戏\n' %username)
coins = input('请充值:\n') # input键盘输入的都是字符串类型
print(type(coins))
coins = int(coins)
print('%s充值成功!当前游戏币是:%d'%(username, coins))
| true |
d41df6088fd195dc8263760aeef440c05b77b30a
|
Python
|
AngieGD/MCTS_Juego_uno
|
/TicTacToe/juegoAlternativa.py
|
UTF-8
| 3,936 | 3.5625 | 4 |
[] |
no_license
|
import numpy as np
import pandas as pd
from os import system
class Table():
def __init__(self):
self.table = [[' ',' ',' '],
[' ',' ',' '],
[' ',' ',' ']]
def getMoves(self):
moves = []
for x in range(3):
for y in range(3):
if self.table[x][y] == ' ':
moves.append((x,y))
return moves
def insertarMarca(self,pos,marca):
#método para insertar una marca
self.table[pos[0]][pos[1]] = marca
#función que me muestra el tablero
def mostrarTablero(self):
salida = ""
for fila in self.table:
salida+= repr(fila)+"\n"
print(salida)
class SuperTable():
tablero = np.zeros((3,3)).astype('object')
def __init__(self):
self.crearTablero()
def crearTablero(self):
for row in range(self.tablero.shape[0]):
for column in range(self.tablero.shape[1]):
self.tablero[row,column] = Table()
def mostrarTablero(self):
salida = ""
matriz = ""
for fila in self.tablero:
for i in range(3):
for tablero in fila:
salida += repr(tablero.table[i])+" "
matriz+=salida+"\n"
salida = ""
matriz+="\n"
print(matriz)
def numeroMovimientos(self):
count = 0
for fila in self.tablero:
for table in fila:
count+=len(table.getMoves())
return count
#método para obtener la posición
def obtenerTablero(opcion, tablero):
i = 0
for row in range(tablero.tablero.shape[0]):
for column in range(tablero.tablero.shape[1]):
if i==opcion-1:
return (row,column)
i+=1
return None
#método para validar la jugada
def validarJugada(pos, tablero):
if pos in tablero.getMoves():
return True
return False
def seleccionarMovimiento(pos, tablero, jugador):
print("\nTablero: ", pos)
#tablero.tablero[pos].mostrarTablero()
print(tablero.tablero[pos].mostrarTablero())
print('\nMovimientos disponibles: ', tablero.tablero[pos].getMoves())
coordenada = input('Seleccione la el movimiento que desea realizar(x y): ')
posicion = coordenada.split(' ')
posicion = (int(posicion[0]), int(posicion[1])) #recibe la posicion
while not validarJugada(pos, tablero.tablero[pos]):
print('Por favor, digite un movimiento correspondiente')
print('\nMovimientos disponibles: ', tablero[pos].getMoves())
coordenada = input('Seleccione la el movimiento que desea realizar(x y): ')
posicion = coordenada.split(' ')
posicion = (int(posicion[0]), int(posicion[1])) #recibe la posicion
tablero.tablero[pos].insertarMarca(posicion, jugador['marca'])
return tablero, posicion
# implementación
def turnoJugador(jugador):
pass
# implementación
def turnoMaquina(maquina):
pass
if __name__ =='__main__':
system('clear')
#primero se crea un tablero
tablero = SuperTable()
tablero.mostrarTablero()
print('Numero de movimientos: ',tablero.numeroMovimientos())
jugadores = [{'nombre':'Eduardo','movimientos':[], 'marca':'X','tipo':'normal'},{'nombre':'bot','movimientos':[],'marca':'O','tipo':'IA'}]
#generarTurnoAleatorio
jugador = jugadores[np.random.randint(len(jugadores))]
opcion = int(input('Seleccione un tablero(1:9): '))
#valida la opción
while (opcion-1 <0) or (opcion-1>8):
print('Por favor, seleccione un tablero: ')
opcion = int(input('Seleccione un tablero(1:9): '))
#posición del tablero
pos = obtenerTablero(opcion, tablero)
tablero, pos = seleccionarMovimiento(pos, tablero, jugador)
tablero.mostrarTablero()
| true |
52de409311d4836172986571fec8825b316f644d
|
Python
|
mithem/serverly
|
/test_utils.py
|
UTF-8
| 6,154 | 3 | 3 |
[
"MIT"
] |
permissive
|
import pytest
import serverly.utils
from serverly.utils import *
def test_parse_role_hierarchy():
e1 = {
"normal": "normal",
"admin": "normal"
}
e2 = {
"normal": "normal",
"admin": "normal",
"staff": "admin",
"root": "staff",
"god": "staff",
}
e3 = {
"normal": "normal",
"admin": "normal",
"staff": "normal",
"root": {"admin", "staff"},
"god": "root"
}
r1 = parse_role_hierarchy(e1)
r2 = parse_role_hierarchy(e2)
r3 = parse_role_hierarchy(e3)
assert r1 == {"normal": {"normal"}, "admin": {"normal"}}
assert r2 == {"normal": {"normal"}, "admin": {"normal"}, "staff": {
"admin", "normal"}, "root": {"admin", "normal", "staff"}, "god": {"admin", "normal", "staff"}}
assert r3 == {"normal": {"normal"}, "admin": {"normal"},
"staff": {"normal"}, "root": {"admin", "normal", "staff"}, "god": {"admin", "normal", "staff", "root"}}
def test_ranstr():
s = []
for _ in range(10000):
r = ranstr()
assert len(r) == 20
assert not r in s
s.append(r)
def test_guess_response_headers():
c1 = "<html lang='en_US'><h1>Hello World!</h1></html>"
h1 = {"content-type": "text/html"}
assert guess_response_headers(c1) == h1
c2 = "Hello there!"
h2 = {"content-type": "text/plain"}
assert guess_response_headers(c2) == h2
c3 = {"hello": True}
h3 = {"content-type": "application/json"}
assert guess_response_headers(c3) == h3
c4 = {"id": 1, "password": "totallyhashed",
"salt": "totallyrandom", "username": "oh yeah!"}
h4 = {"content-type": "application/json"}
assert guess_response_headers(c4) == h4
c5 = open("test_utils.py", "r")
h5 = {"content-type": "text/x-python"}
assert guess_response_headers(c5) == h5
c5.close()
c6 = open("temporary.notanactualfilenamesomimetypescantguessit", "w+")
h6 = {"content-type": "text/plain"}
assert guess_response_headers(c6) == h6
c6.close()
os.remove("temporary.notanactualfilenamesomimetypescantguessit")
c7 = bytes("hello world", "utf-8")
h7 = {"content-type": "application/octet-stream"}
assert guess_response_headers(c7) == h7
def test_get_server_address():
valid = ("localhost", 8080)
assert get_server_address(("localhost", 8080)) == valid
assert get_server_address("localhost,8080") == valid
assert get_server_address("localhost, 8080") == valid
assert get_server_address("localhost;8080") == valid
assert get_server_address("localhost; 8080") == valid
assert get_server_address("localhost:8080") == valid
assert get_server_address("localhost::8080") == valid
assert get_server_address("localhost|8080") == valid
assert get_server_address("localhost||8080") == valid
def test_get_server_address_2():
valid = ("localhost", 20000)
typy_errory = [True, {"hostname": "localhost", "port": 20000}, 42]
value_errory = [(True, "localhost"), ("whats", "up"), (42, 3.1415926535)]
assert get_server_address((20000, "localhost")) == valid
for i in typy_errory:
with pytest.raises(TypeError):
get_server_address(i)
for i in value_errory:
with pytest.raises(ValueError):
get_server_address(i)
def test_get_server_address_3():
valid = ("localhost", 8080)
with pytest.raises(Exception):
with pytest.warns(UserWarning):
serverly.Server._get_server_address((8080, "local#ost"))
def test_check_relative_path():
falsy_values = ["hello", "whatsupp", ""]
typy_errors = [bytes("hello there", "utf-8"),
open("test_utils.py", "r"), True, 23.7]
goodish_ones = ["/hello", "/hello-world", "/whatss/up"]
for i in falsy_values:
with pytest.raises(ValueError):
check_relative_path(i)
for i in typy_errors:
with pytest.raises(TypeError):
check_relative_path(i)
for i in goodish_ones:
assert check_relative_path(i)
def test_check_relative_file_path():
with pytest.raises(FileNotFoundError):
check_relative_file_path(
"definetelynotafile.definetelynotafile.definetelynotafile!")
bad_ones = [True, open("test_utils.py", "r"), 42]
for i in bad_ones:
with pytest.raises(TypeError):
check_relative_file_path(i)
assert check_relative_file_path("test_utils.py") == "test_utils.py"
def test_get_http_method_type():
false_ones = ["GETT", "PooST", "puUT", "DEL", "del",
"head", "CONNECT", "options", "TRACE", "patch"]
good_ones = {"GET": "get", "PoSt": "post",
"Put": "put", "DelEtE": "delete"}
for i in false_ones:
with pytest.raises(ValueError):
get_http_method_type(i)
for k, v in good_ones.items():
assert get_http_method_type(k) == v
def test_parse_scope_list():
assert parse_scope_list("hello;world;whatsup") == [
"hello", "world", "whatsup"]
assert parse_scope_list("19;") == ['19']
assert parse_scope_list("42;1829;sajki;") == ["42", "1829", "sajki"]
assert parse_scope_list("") == []
def test_get_scope_list():
assert get_scope_list("admin") == "admin"
assert get_scope_list(["admin", "financial"]) == "admin;financial"
assert get_scope_list("") == ""
def test_get_chunked_response():
r = serverly.objects.Response(body="Hello world")
assert get_chunked_response(r) == ["Hello world"]
r.bandwidth = 4
assert get_chunked_response(r) == ["Hell", "o wo", "rld"]
def test_lowercase_dict():
d = {"Hello World": True, "WhatssUpp": "Yoo"}
assert lowercase_dict(d) == {"hello world": True, "whatssupp": "Yoo"}
assert lowercase_dict(d, True) == {"hello world": True, "whatssupp": "yoo"}
def test_get_bytes():
assert get_bytes("hello world") == bytes("hello world", "utf-8")
assert get_bytes(
"hello world", "application/octet-stream") == b"hello world"
assert get_bytes(
{"helele": 42}, "application/octet-stream") == {"helele": 42}
assert get_bytes(True) == True
| true |
e3c75609405865a1b44c1a4c295c56e6027268a9
|
Python
|
coy0725/leetcode
|
/python/405_Convert_a_Number_to_Hexadecimal.py
|
UTF-8
| 848 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return '0'
# letter map
mp = '0123456789abcdef'
ans = ''
for _ in range(8):
# get last 4 digits
# num & 1111b
n = num & 15
# hex letter for current 1111
c = mp[n]
ans = c + ans
# num = num / 16
num = num >> 4
#strip leading zeroes
return ans.lstrip('0')
# def toHex(self, num):
# def tohex(val, nbits):
# return hex((val + (1 << nbits)) % (1 << nbits))
# return tohex(num, 32)[2:]
# def toHex(self, num, h=''):
# return (not num or h[7:]) and h or self.toHex(num / 16, '0123456789abcdef'[num % 16] + h)
| true |
0c1c2070bd92dca3273bc5db8f336d924f16755a
|
Python
|
lollyxsrinand/ChristmasGame
|
/main.py
|
UTF-8
| 3,190 | 3.4375 | 3 |
[] |
no_license
|
import math
from random import randint
import pygame as pg
from pygame import mixer as mx
""" INITIALISING PYGAME """
pg.init()
""" CREAITNG SCREEN """
screen = pg.display.set_mode((800, 600))
""" BACKGROUND MUSIC """
# mx.music.load('lofi_background.wav')
# mx.music.set_volume(0.8)
# mx.music.play(-1)
background_music = mx.Sound("lofi_background.wav")
background_music.set_volume(0.8)
background_music.play()
""" TITLE """
pg.display.set_caption("ChristmasGame")
""" CREATING BACKGROUND IMAGE """
background = pg.image.load('bg.jpg')
""" CREATING PLAYER """
playerImg = pg.image.load("player.png")
playerX = 52
playerY = 5
playerX_change = 0
playerY_change = 0
""" CREATING CANDY """
candy = pg.image.load("candy.png")
candyX = randint(0,750)
candyY = randint(0,550)
score = 0
font = pg.font.Font("freesansbold.ttf",32)
over_text = pg.font.Font("freesansbold.ttf",64)
time_left = pg.font.Font("freesansbold.ttf",32)
""" SHOWING SCORE """
def show_score():
score_text = font.render(f"Score : {score}",True, (255,255,255))
screen.blit(score_text,(10,10))
""" SHOWING DEATH SCREEN """
def show_death(score):
won = score>=15
# if not done:win_music()
text = "You Won!" if won else "You lose"
over_text = font.render(text, True, (255,255,255))
screen.blit(over_text, (300,300))
""" UPDATING THE PLAYER POSITION """
def player(x, y):
screen.blit(playerImg,(x, y))
ticks = pg.time.get_ticks()
player_alive = True
""" GAME LOOP """
running = True
while running:
screen.fill((0, 0, 0)) #FILLING BACKGROUND WITH BLACK, CHANGING THIS SPOILS EVERYTHING
screen.blit(background,((0,0)))
screen.blit(candy, (candyX,candyY))
""" COLLISION """
if math.sqrt((candyX-playerX)**2+(candyY-playerY)**2)<=40:
candyX=randint(0,750)
candyY=randint(0,550)
score += 1
# print(score)
""" O(n^2) stuff """
for event in pg.event.get():
if player_alive or event.type == pg.QUIT:
if event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
playerY_change=-0.4
if event.key == pg.K_LEFT:
playerX_change = -0.5
if event.key == pg.K_RIGHT:
playerX_change = 0.5
if event.type == pg.KEYUP:
if event.key == pg.K_LEFT or event.key == pg.K_RIGHT:
playerX_change = 0
""" QUITTING GAME ON EXIT BUTTON """
if event.type == pg.QUIT:running = False
""" FAKE GRAVITY """
playerY_change+=0.0009
playerY+=playerY_change
if playerY>=540:playerY=540
if playerY<=5:playerY=5
""" MOVING LEFT OR RIGHT """
playerX+=playerX_change
if playerX<=0:
playerX=0
elif playerX>=736:
playerX=736
show_score()
""" CHANGING POSITION OF PLYER"""
player(playerX, playerY)
seconds = (pg.time.get_ticks()-ticks)/1000
if seconds>20:
player_alive = False
show_death(score)
pg.display.update()
| true |
57d5f1d8de021f5a9aee01fb1af5d80bb2bf811d
|
Python
|
ddannenb/sentence-transformers
|
/examples/training_quora_duplicate_questions/application_Information_Retrieval.py
|
UTF-8
| 3,060 | 3.234375 | 3 |
[
"Apache-2.0"
] |
permissive
|
"""
This is an interactive demonstration for information retrieval. We will encode a large corpus with 500k+ questions.
This is done once and the result is stored on disc.
Then, we can enter new questions. The new question is encoded and we perform a brute force cosine similarity search
and retrieve the top 5 questions in the corpus with the highest cosine similarity.
For larger datasets, it can make sense to use a vector index server like https://github.com/spotify/annoy or https://github.com/facebookresearch/faiss
"""
from sentence_transformers import SentenceTransformer, util
import os
from zipfile import ZipFile
import pickle
import time
model_name = 'distilbert-base-nli-stsb-quora-ranking'
embedding_cache_path = 'quora-embeddings-{}.pkl'.format(model_name.replace('/', '_'))
max_corpus_size = 100000
model = SentenceTransformer(model_name)
#Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
dataset_path = 'quora-IR-dataset'
if not os.path.exists(dataset_path):
print("Dataset not found. Download")
zip_save_path = 'quora-IR-dataset.zip'
util.http_get(url='https://public.ukp.informatik.tu-darmstadt.de/reimers/sentence-transformers/datasets/quora-IR-dataset.zip', path=zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(dataset_path)
corpus_sentences = []
with open(os.path.join(dataset_path, 'graph/sentences.tsv'), encoding='utf8') as fIn:
next(fIn) #Skip header
for line in fIn:
qid, sentence = line.strip().split('\t')
corpus_sentences.append(sentence)
if len(corpus_sentences) >= max_corpus_size:
break
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({'sentences': corpus_sentences, 'embeddings': corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data['sentences'][0:max_corpus_size]
corpus_embeddings = cache_data['embeddings'][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question)
hits = util.information_retrieval(question_embedding, corpus_embeddings)
end_time = time.time()
hits = hits[0] #Get the hits for the first query
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time-start_time))
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit['score'], corpus_sentences[hit['corpus_id']]))
print("\n\n========\n")
| true |
f07a84f01826b5b7d196bcedeaf3f7cfc1802d30
|
Python
|
WolfireGames/overgrowth
|
/Libraries/freetype-2.12.1/builds/meson/extract_freetype_version.py
|
UTF-8
| 2,997 | 3 | 3 |
[
"FTL",
"GPL-1.0-or-later",
"BSD-3-Clause",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"GPL-3.0-only",
"LicenseRef-scancode-unknown",
"Zlib",
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/usr/bin/env python3
#
# Copyright (C) 2020-2022 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
"""Extract the FreeType version numbers from `<freetype/freetype.h>`.
This script parses the header to extract the version number defined there.
By default, the full dotted version number is printed, but `--major`,
`--minor` or `--patch` can be used to only print one of these values
instead.
"""
from __future__ import print_function
import argparse
import os
import re
import sys
# Expected input:
#
# ...
# #define FREETYPE_MAJOR 2
# #define FREETYPE_MINOR 10
# #define FREETYPE_PATCH 2
# ...
RE_MAJOR = re.compile(r"^ \#define \s+ FREETYPE_MAJOR \s+ (.*) $", re.X)
RE_MINOR = re.compile(r"^ \#define \s+ FREETYPE_MINOR \s+ (.*) $", re.X)
RE_PATCH = re.compile(r"^ \#define \s+ FREETYPE_PATCH \s+ (.*) $", re.X)
def parse_freetype_header(header):
major = None
minor = None
patch = None
for line in header.splitlines():
line = line.rstrip()
m = RE_MAJOR.match(line)
if m:
assert major == None, "FREETYPE_MAJOR appears more than once!"
major = m.group(1)
continue
m = RE_MINOR.match(line)
if m:
assert minor == None, "FREETYPE_MINOR appears more than once!"
minor = m.group(1)
continue
m = RE_PATCH.match(line)
if m:
assert patch == None, "FREETYPE_PATCH appears more than once!"
patch = m.group(1)
continue
assert (
major and minor and patch
), "This header is missing one of FREETYPE_MAJOR, FREETYPE_MINOR or FREETYPE_PATCH!"
return (major, minor, patch)
def main():
parser = argparse.ArgumentParser(description=__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--major",
action="store_true",
help="Only print the major version number.",
)
group.add_argument(
"--minor",
action="store_true",
help="Only print the minor version number.",
)
group.add_argument(
"--patch",
action="store_true",
help="Only print the patch version number.",
)
parser.add_argument(
"input",
metavar="FREETYPE_H",
help="The input freetype.h header to parse.",
)
args = parser.parse_args()
with open(args.input) as f:
header = f.read()
version = parse_freetype_header(header)
if args.major:
print(version[0])
elif args.minor:
print(version[1])
elif args.patch:
print(version[2])
else:
print("%s.%s.%s" % version)
return 0
if __name__ == "__main__":
sys.exit(main())
| true |
b5043ccef979bc25c293b102dbcd993d3c1b5ef5
|
Python
|
Maxpa1n/modules_pytorch
|
/models/MAML-wave/learnmodel.py
|
UTF-8
| 1,275 | 2.796875 | 3 |
[] |
no_license
|
import torch
import torch.nn as nn
from torch.nn import functional as F
class Compute(nn.Module):
def __init__(self, hid_dim):
super(Compute, self).__init__()
self.input_layer = nn.Linear(1, hid_dim)
# self.hid_layer = nn.Linear(hid_dim, hid_dim)
self.output_layer = nn.Linear(hid_dim, 1)
self.relu = nn.ReLU()
def forward(self, X):
hid = self.relu(self.input_layer(X))
# hid = self.relu(self.hid_layer(hid))
output = self.output_layer(hid)
return output
class Learner(nn.Module):
def __init__(self, hid_dim):
super().__init__()
self.com = Compute(hid_dim)
# self.com_temp = Compute(hid_dim)
def forward(self, x, com=None):
if com is not None:
x = F.linear(x, com[0], com[1])
# x = F.linear(x, com[2], com[3])
y = F.linear(x, com[2], com[3])
return y
else:
y = self.com(x)
return y
if __name__ == '__main__':
x = torch.randn(25, 1)
com = Compute(64)
lea = Learner(64)
para_dic = dict()
for key, val in lea.named_parameters():
para_dic[key] = val
print(key, val.grad)
y = lea(x, com)
print('output shape {}'.format(y.shape))
| true |
8e779cf6391028a37be8cb20c5b01c587ab0362c
|
Python
|
MohammadAsif206/BankAPI-Pzero
|
/ProjectZero/services/account_service.py
|
UTF-8
| 1,366 | 2.765625 | 3 |
[] |
no_license
|
from abc import ABC, abstractmethod
from entities.account import Account
class AccountService(ABC):
# General CRUD functionality
@abstractmethod
def create_account_by_customer_id(self, account: Account, customer_id: int):
pass
@abstractmethod
def retrieve_all_accounts_by_cid(self, customer_id: int) -> list[Account]:
pass
@abstractmethod
def retrieve_account_by_cid_and_balance_range(self, customer_id: int, lower_limit: float, upper_limit: float) ->[Account]:
pass
@abstractmethod
def retrieve_account_by_cid_and_aid(self, customer_id: int, account_number: int) ->Account:
pass
@abstractmethod
def update_account_by_cid_and_aid(self, account: Account, customer_id: int, account_number: int) -> Account:
pass
@abstractmethod
def delete_account_by_cid_and_aid(self, customer_id: int, account_number: int) -> bool:
pass
@abstractmethod
def do_trans_on_account_by_cid_and_aid(self, customer_id: int, account_number: int,
withdraw: float, deposit: float) -> Account:
pass
@abstractmethod
def transfer_fund_between_account_of_a_client_by_aids(self, s_account_number: int, r_account_number: int,
amount: float) -> [Account]:
pass
| true |
8def9170ec61069e564024dd50482b1f999e365d
|
Python
|
rosechellejoy/cmsc128-ay2015-16-assign001-py
|
/oraa_pa.py
|
UTF-8
| 7,813 | 3.484375 | 3 |
[] |
no_license
|
"""
Rosechelle Joy C. Oraa
2013-11066
CMSC 128 AB-3L
"""
import sys
"""
numToWords() accepts an input number and outputs its equivalent in words
temp_num : input by the user
"""
def numToWords(temp_num):
if len(temp_num)>7: #if number inputed is greater than 7 digits: invalid
print 'Invalid: input can only have at most 7 digits'
return
str_num = '' #word equivalent of temp_num
length = len(temp_num) #input length
pos =0 #current position
j=0
str_form = ''
for i in range(6, -1, -1): #places input value in an array of 7 elements(for each digit)
if i>length-1: #if current length is less than current max number of digits
str_num=str_num+'0'
else:
while j!=length: #while input is not fully transferred to str_num
str_num=str_num+temp_num[j]
j=j+1
x=str_num #holds input in its 7 digit representation
while pos < 7 :
if pos == 4 and (x[pos-1]!='0' or x[pos-2]!='0' or x[pos-3]!='0') and length>3:
str_form = str_form+'thousand ' #if at 4th pos and 3 previous digits are not == 0
if x[pos]=='0': #if number is 0
if pos == 6 and length == 1:
str_form = str_form + 'zero'
elif pos != 2 and pos != 5: #ones digit
if x[pos]=='1':
str_form = str_form+'one '
elif x[pos]=='2':
str_form=str_form+'two '
elif x[pos]=='3':
str_form=str_form+'three '
elif x[pos] =='4':
str_form=str_form+'four '
elif x[pos]=='5':
str_form=str_form+'five '
elif x[pos]=='6':
str_form=str_form+'six '
elif x[pos]=='7':
str_form=str_form+'seven '
elif x[pos]=='8':
str_form=str_form+'eight '
elif x[pos]=='9':
str_form=str_form+'nine '
if pos == 0:
str_form = str_form+'million '
elif pos == 1 or pos == 4:
str_form = str_form+'hundred '
else: #tens digit
if pos == 2 or pos == 5:
if x[pos]== '1':
pos=pos+1
if x[pos]== '0':
str_form = str_form+'ten '
elif x[pos]== '1':
str_form = str_form+'eleven '
elif x[pos]== '2':
str_form = str_form+'twelve '
elif x[pos]== '3':
str_form = str_form+'thirteen '
elif x[pos]== '4':
str_form = str_form+'fourteen '
elif x[pos]== '5':
str_form = str_form+'fifteen '
elif x[pos]== '6':
str_form = str_form+'sixteen '
elif x[pos]== '7':
str_form = str_form+'seventeen '
elif x[pos]== '8':
str_form = str_form+'eighteen '
elif x[pos]== '9':
str_form = str_form+'nineteen '
elif x[pos]== '2':
str_form = str_form+'twenty '
elif x[pos]== '3':
str_form = str_form+'thirty '
elif x[pos]== '4':
str_form = str_form+'forty '
elif x[pos]== '5':
str_form = str_form+'fifty '
elif x[pos]== '6':
str_form = str_form+'sixty '
elif x[pos]== '7':
str_form = str_form+'seventy '
elif x[pos]== '8':
str_form = str_form+'eighty '
elif x[pos]== '9':
str_form = str_form+'ninety '
if pos == 2 or pos == 5:
pos= pos+1
if x[pos]=='1': #single digit after tens
str_form = str_form+'one '
elif x[pos]=='2':
str_form=str_form+'two '
elif x[pos]=='3':
str_form=str_form+'three '
elif x[pos] =='4':
str_form=str_form+'four '
elif x[pos]=='5':
str_form=str_form+'five '
elif x[pos]=='6':
str_form=str_form+'six '
elif x[pos]=='7':
str_form=str_form+'seven '
elif x[pos]=='8':
str_form=str_form+'eight '
elif x[pos]=='9':
str_form=str_form+'nine '
pos = pos+1 #increment pos
print str_form #print word representation
return
"""
wordsToNum() accepts word(s) then prints its numerical equivalent
word - string input
"""
def wordsToNum(word):
word = word.split() #word- list of words from word
gen_num = 0 #total value
temp = 0 #current integer
mill_num=0 #total million value of word
hund_thou = 0 #total hundred thousand value of word
hund = 0 #total hundred value of word
wLen = len(word)# number of words in word
flag=0 # is equal to 1 if there should be no more thousands
i=0
while i < wLen: #iterates through each word in word(list)
if word[i] == 'one':
temp+=1
elif word[i] == 'two':
temp+=2
elif word[i] == 'three':
temp+=3
elif word[i] == 'four':
temp+=4
elif word[i] == 'five':
temp+=5
elif word[i] == 'six':
temp+=6
elif word[i] == 'seven':
temp+=7
elif word[i] == 'eight':
temp+=8
elif word[i] == 'nine':
temp+=9
elif word[i] == 'ten':
temp += 10
elif word[i] == 'eleven':
temp += 11
elif word[i] == 'twelve':
temp += 12
elif word[i] == 'thirteen':
temp += 13
elif word[i] == 'fourteen':
temp += 14
elif word[i] == 'fifteen':
temp += 15
elif word[i] == 'sixteen':
temp += 16
elif word[i] == 'seventeen':
temp += 17
elif word[i] == 'eighteen':
temp += 18
elif word[i] == 'nineteen':
temp += 19
elif word[i] == 'twenty':
temp += 20
elif word[i] == 'thirty':
temp += 30
elif word[i] == 'forty':
temp += 40
elif word[i] == 'fifty':
temp += 50
elif word[i] == 'sixty':
temp += 60
elif word[i] == 'seventy':
temp += 70
elif word[i] == 'eighty':
temp += 80
elif word[i] == 'ninety':
temp += 90
elif word[i] == 'million': #multiply previous number(temp) to 1000000
mill_num= temp*1000000 #place in mill_num
temp=0
elif word[i] == 'hundred': #multiply value in temp to 100
temp= temp*100
elif word[i] == 'thousand': #multiply hund to 1000 then place in hund_thou
hund_thou = hund*1000
hund=0
temp=0
hund = temp;
i+=1 #increment i then next iteration
gen_num= mill_num+hund_thou+hund #gen_num= accumulated value of millions, hundred thousands, and hundreds
print gen_num #print total number
return
"""
wordsToCurrency() accepts two inputs then generates string in number form with given currency
word - number in words inputed by the user
cur- currency given by the user
"""
def wordsToCurrency(word, cur):
if cur=='USD' or cur=='JPY' or cur == 'PHP': #checks if currency given is valid
sys.stdout.write(cur) #print currency
wordsToNum(word) #print word in its numerical value
else:
print 'Invalid!'
return
"""
numberDelimitered() accepts three inputs then prints the number with a delimiter in the position given by the user
temp - number
delimiter - delimiter given by the user
jumps - # of jumps from the right
"""
def numberDelimitered(temp, delimiter, jumps):
temp= str(temp) #typecast temp to a string
rev='' #will hold temp in reverse
i=0
if len(temp) > 7:
print 'Invalid!: exceeded max no. of digits'
return
for i in range(0, len(temp)): #reverse number input
rev=temp[i]+rev
temp=''
for i in range(0, len(rev)): #iterates through all digits in rev
if jumps== i: #if i == jumps
temp= delimiter+temp #concatenate delimiter with temp
temp= rev[i]+temp #concatenate rev[i] with temp
print temp
return
"""
prints menu and lets the user choose feature to be used
"""
print 'MENU'
print '[1] Number to Word'
print '[2] Word to Number'
print '[3] Word to Currency'
print '[4] Number Delimitered '
ch = input('choice: ')
if(ch==1): #number to words
temp_num = raw_input('Enter number: ')
numToWords(temp_num);
elif(ch==2): #word to number
word= raw_input("Enter input: ")
wordsToNum(word);
elif(ch==3): #number to currency
word= raw_input("Enter number in words: ")
cur= raw_input("Enter currency: ")
wordsToCurrency(word, cur)
elif(ch==4): #number delimitered
temp = raw_input('Enter number: ')
delimiter = raw_input('Enter delimiter: ')
jumps = input('Enter # of jumps: ')
numberDelimitered(temp, delimiter, jumps);
else:
print 'Invalid!'
| true |
499c8883c63e328da19afada8b240fd244c777d8
|
Python
|
tushargupta14/compositional_semantics
|
/create_fastText_dict2.py
|
UTF-8
| 2,091 | 2.78125 | 3 |
[] |
no_license
|
import json
from collections import defaultdict
def create_dictionaries(path_to_files):
print "Creating Dictionaries"
count = 0
source_fastText_dict = defaultdict(list)
with open(path_to_files+"source_fastText_output.txt","r") as source_file:
for line in source_file :
vector = []
vector = [value for value in line.rstrip().split(" ")]
source_word = vector[0]
source_fastText_dict[source_word] = vector[1:]
count+=1
print count,len(vector)
derived_fastText_dict = defaultdict(list)
count =0
with open(path_to_files+"derived_fastText_output.txt","rb+") as source_file:
for line in source_file :
vector = []
vector = [value for value in line.rstrip().split(" ")]
derived_word = vector[0]
derived_fastText_dict[derived_word] = vector[1:]
count+=1
print count, len(vector)
affix_fastText_dict = defaultdict(list)
count = 0
with open(path_to_files+"affix_fastText_output.txt","rb+") as source_file:
for line in source_file :
vector = []
vector = [value for value in line.rstrip().split(" ")]
affix_word = vector[0]
affix_fastText_dict[affix_word] = vector[1:]
count+=1
print count, len(vector)
with open(path_to_files+"source_fastText_350dim.json","wb+") as f:
json.dump(source_fastText_dict,f)
with open(path_to_files+"derived_fastText_350dim.json","wb+") as f:
json.dump(derived_fastText_dict,f)
if __name__ == "__main__" :
create_dictionaries("/home/du3/13CS30045/affix_final/lazaridou/create_vectors/data_files/")
| true |
a819b7c9b02372e48784b4ddced181e7c151cb7b
|
Python
|
jack-mcivor/afl-predictor
|
/afl/models.py
|
UTF-8
| 4,931 | 3.0625 | 3 |
[] |
no_license
|
from collections import defaultdict
from math import exp, log
import pandas as pd # optional
class Elo:
"""Base class to generate elo ratings
Includes the ability for some improvements over the original methodology:
* k decay: use a higher update speed early in the season
* crunch/carryover: shift every team's ratings closer to the mean between seasons
* interstate and home_advantage
* optimised initial ratings
Hyperparameters can be fit with a grid search, eg. sklearn.model_selection.GridSearchCV
Initial ratings can be fit with a logistic regression (equivalent to a static elo) eg. sklearn.linear_model.LogisticRegression
By default assumes a logistic distribution of ratings
"""
def __init__(self, k=30, home_advantage=20, interstate_advantage=5, width=400/log(10), carryover=0.75, k_decay=0.95,
initial_ratings=None, mean_rating=1500, target='home_win_draw_loss'):
self.k = k
self.home_advantage = home_advantage
self.interstate_advantage = interstate_advantage
self.width = width
self.carryover = carryover
self.k_decay = k_decay
self.mean_rating = mean_rating
self.initial_ratings = initial_ratings or {}
self.target = target # home_win_draw_loss, home_points_ratio, home_squashed_margin
def iterate_fixtures(self, fixtures, as_dataframe=True):
"""
Parameters
----------
fixtures : list of dict or pd.DataFrame
Must be ordered. Each record (row) must have (columns): home_team, away_team, round_number, is_interstate, <self.target>
Prefer a list of records as it's much faster
We use the python stdlib math.exp which seems faster in single computation than numpy's version and therefore speeds up parameter fitting
Profile code with lprun:
%load_ext line_profiler
elo = Elo()
%lprun -f elo.iterate_fixtures elo.iterate_fixtures(fxtrain, as_dataframe=True)
"""
# new teams are given self.initial_ratings
self.current_ratings_ = defaultdict(lambda: self.mean_rating, self.initial_ratings)
if isinstance(fixtures, pd.DataFrame):
# A list of records is faster and less prone to errors on update than a DataFrame
fixtures = fixtures.reset_index().to_dict('records')
for fx in fixtures:
home_team = fx['home_team']
away_team = fx['away_team']
home_actual_result = fx[self.target]
round_number = fx['round_number']
is_interstate = fx['is_interstate']
# home_expected_result = self.predict_result(home_team, away_team, is_interstate, round_number)
# -------
home_rating_pre = self.current_ratings_[home_team]
away_rating_pre = self.current_ratings_[away_team]
if round_number == 1:
# Crunch the start of the season
# Warning: this will make an in-place change the current ratings for the end of season
# TODO: don't crunch the first round of training
home_rating_pre = self.carryover*home_rating_pre + (1-self.carryover)*self.mean_rating
away_rating_pre = self.carryover*away_rating_pre + (1-self.carryover)*self.mean_rating
ratings_diff = home_rating_pre - away_rating_pre + self.home_advantage + self.interstate_advantage*is_interstate
home_expected_result = 1.0 / (1 + exp(-ratings_diff/self.width))
# self.update_ratings(home_actual_result, home_expected_result, round_number)
# ------
change_in_home_elo = self.k*self.k_decay**round_number*(home_actual_result - home_expected_result)
home_rating_post = home_rating_pre + change_in_home_elo
away_rating_post = away_rating_pre - change_in_home_elo
# update ratings
self.current_ratings_[home_team] = home_rating_post
self.current_ratings_[away_team] = away_rating_post
fx['home_rating_pre'] = home_rating_pre
fx['away_rating_pre'] = away_rating_pre
fx['home_expected_result'] = home_expected_result # proba
# fx['binary_expected_home_result'] = int(expected_home_result > 0.5) # prob
if as_dataframe:
# return pd.DataFrame(fixtures, columns=['matchid', 'home_expected_result']).set_index('matchid')
return pd.DataFrame(fixtures).set_index('matchid')
return fixtures
def fit(self, X):
# the only thing we really need to store is the *latest* rating (the system is memoryless)
# self.teams_ = ['myteam']
# self.current_ratings_ = {'myteam': 1500}
return X
def predict_proba(self):
return expected_home_result
def predict(self):
return int(expected_home_result > 0.5)
| true |
48f064838cbf993b4e54813f86f3344080368bf9
|
Python
|
noahwang07/python_start
|
/fresher_class.py
|
UTF-8
| 419 | 3.734375 | 4 |
[] |
no_license
|
class Human(object):
def __init__(self, name):
self.name = name
def walk(self):
print (self.name + " is walking")
def get_name(self):
return (self. name)
def set_name(self, name):
if len(name) <= 10:
self.name = name
human_a = Human("alan")
print (human_a.name)
human_a.set_name('bob')
print(human_a.name)
xy = Human('noah')
print (xy.name)
xy.walk()
xy.set_name('nova')
xy.walk()
| true |
903f20ef4856582979bf4e1ec40019d250d83726
|
Python
|
owns/pycleverclicker
|
/packages/pymybase/myjson2csv.py
|
UTF-8
| 26,036 | 2.71875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Elias Wood (owns13927@yahoo.com)
2015-04-13
a class for simplifying flattening json (dict) objects - not just top level!
"""
import os
from csv import writer as csv_writer
from csv import QUOTE_MINIMAL as csv_QUOTE_MINIMAL
from csv import QUOTE_ALL as csv_QUOTE_ALL
from csv import QUOTE_NONE as csv_QUOTE_NONE
from json import loads as json_loads
from json import dumps as json_dumps
from decimal import Decimal
from string import Formatter
from datetime import datetime
from myloggingbase import MyLoggingBase
# python2 compatible
try: s = basestring # @UndefinedVariable pylint: disable=invalid-name
except NameError: basestring = str #@ReservedAssignment pylint: disable=invalid-name
else: del s
'''class MyDict(dict):
def __missing__(self,*args,**keys):
return MyDict()'''
class MyJSON2CSV(MyLoggingBase):
"""
a class for simplifying flattening json (dict) objects - not just top level!
to use:
a = MyJSON2CSV('filename.csv')
#a.set_headers('key1','key2.subkey1','key2.subkey2','key2.subkey2.ssk3','key..name')
#a.begin_writing() # alt way to set things...
#a.add_row_number() # can add a row number
for json_obj in json_objects:
a.write_json_object(json_obj)
a.close()
NOTE:
if filename already exists a number is appended,
but there is a race condition...
DOESN'T CHECK TO MAKE SURE ALL KEYS (& SUB KEYS) ARE USED/EXPORTED!
CHANGELOG:
2016-06-22 v1.1.2: Elias Wood
small bug fixes
2016-06-22 v1.1.1: Elias Wood
set default null
also forgot to add formatting for datetime... just returned ''... ops.
2016-06-22 v1.1.0: Elias Wood
added support for datetime
2016-06-22 v1.0.0: Elias Wood
first major version! changed several var/fns to public, renamed, etc
"""
#TODO: add check if we've missing anything (only top lvl ATM)!
#TODO: add ability to handle lists! flatten lists!
__version__ = '1.1.2'
_filename = None
_headers = None
_top_level_headers = None
#_header_formating = None # parallel array with _headers a calc of _headers for what to print...
_csv = None
_file = None
CSV_PARAMS_DEFAULT = dict(delimiter=',',quotechar='"',doublequote=True,
skipinitialspace=True,quoting=csv_QUOTE_MINIMAL)
csv_params = None
missed_headers = None
datatypes = None
_add_row_num = False
_cur_row_num = None
_row_num_header_name = 'autogenerated_row_number'
ENCODING_DEFAULT = 'UTF-8'
encoding = None
DT_FORMAT_DEFAULT = '%Y-%m-%dT%H:%M:%S.%f'
dt_format = None
_formatter = None
_expand_lists = False
_LIST_FLAG = '<LIST_{0}>'
NULL_DEFAULT = ''
null = None
def __init__(self,filename=None,**keys):
"""initialize class.
keys filename: is the file name of the output file (csv)
"""
MyLoggingBase.__init__(self,**keys)
self.csv_params = self.CSV_PARAMS_DEFAULT.copy()
self.encoding = self.ENCODING_DEFAULT
self.dt_format = self.DT_FORMAT_DEFAULT
self.null = self.NULL_DEFAULT
self.missed_headers = set()
self.datatypes = set()
self._cur_row_num = 0
self._formatter = Formatter()
self._formatter.get_field = lambda field_name,args,kwargs: (kwargs[field_name],field_name)
if filename is not None:
self.set_filename(filename)
#===========================================================================
#============================= GETTERS/SETTERS =============================
#===========================================================================
def set_filename(self,filename):
"""set the filename. Returns True if success, False if failed.
NOTE: cannot change the name if the file is already open!"""
if not self.is_open():
self._filename = filename
return True
else: return False
def get_filename(self):
"""returns the set file name. NOTE: if not set, will return None"""
return self._filename
def option_expand_lists_on(self):
"""set flag on whether to flatten lists!"""
self.logger.warning("expanding lists isn\'t a functioning feature yet!")
#self._expand_lists = True
def option_expand_lists_off(self):
"""set flag off whether to flatten lists!"""
self._expand_lists = False
def set_separator(self,s):
"""set the separator for the csv. Must be a one-character string,
defaults to a comma. returns True if set correctly, False otherwise."""
if not isinstance(s,basestring) or len(s)!=1:
self.logger.warning('%r is not a valid separator. must be one-character string.',s)
return False
# set if open
if self.is_open(): return False
else:
self.csv_params['delimiter'] = s
return True
def get_separator(self):
"""returns the current separator, defaults to a comma."""
return self.csv_params['delimiter']
def set_qouting_default(self): self.csv_params['quoting']=csv_QUOTE_MINIMAL
def set_qouting_none(self): self.csv_params['quoting']=csv_QUOTE_NONE
def set_qouting_minimal(self): self.csv_params['quoting']=csv_QUOTE_MINIMAL
def set_qouting_all(self): self.csv_params['quoting']=csv_QUOTE_ALL
def add_row_number(self,b=True):
"""Makes the first column the row number with the title self._row_num_header_name.
Fails if the file is already open.
returns None is not set, returns b (what was passed) if successful."""
if not self.is_open():
self._add_row_num = b
# add/remove to headers if needed
if self._add_row_num:
# check if need to add the row number
if self._headers and self._row_num_header_name not in self._headers:
self.set_headers(*self._headers)
else:
# check if need to remove the row number
if self._headers and self._row_num_header_name in self._headers:
self.set_headers(*self._headers[1:])
return b
return None
def set_headers(self,*headers):
"""set headers for the csv (in order). To refer to a key in a dict
object, for example with {'a':{'a':2,b':4},'b':1,'a.b':7}:
'a.b' --> 4
'b' --> 1
'a' --> '{"a": 2,"b": 4}'
'a..b' --> 7
optionally, {key:'the key in the dict object explained above',
name:'rename the column header'
default:'default value'
key_fn: # function to transform the key value.
Must have 'key': what key to get.
e.g. key_fn(key='abc',value='123',default=None): return value+'abc'
dict_fn: # row dict and default are passed.
e.g. dict_fn(d={},default=None): return d.get('a',0)+d.get('b',0)
eval: # formula to eval. keys must be between {...}
Must have 'keys': finite, iterable list of keys that are used.
'on_error(execption,column,values,equation_w_values)': function to handle exceptions.
if no function is given, 'default' is returned
e.g. column: eval='{a}/{b}',keys=('a','b')
priority: 1: dict_fn
2: key_fn (must have 'key' key)
3. eval (must have 'keys' key!)
4. key
NOTE: it's recommend to use name when using a custom function.
otherwise, it will be blank!
"""
# process what the headers will be
new_headers = [self._row_num_header_name] \
if self._add_row_num else []
for h in headers:
if isinstance(h, basestring): new_headers.append(h)
elif isinstance(h,dict):
if 'dict_fn' in h:
if callable(h['dict_fn']): pass
else: # dict_fn is not callable!!!
raise BadCustomHeaderObject("the key 'dict_fn' must be"+
' callable, not a %s. %r' % (type(h['dict_fn']),h))
elif 'key_fn' in h:
if callable(h['key_fn']) and 'key' in h and \
isinstance(h.get('key'),basestring): pass
else: # key_fn is not callable or no ;key' key!!!!
raise BadCustomHeaderObject("the key 'key_fn' must be "+
"callable and 'key' must be a string valued key. "+repr(h))
elif 'eval' in h:
if isinstance(h['eval'],basestring) and \
hasattr(h.get('keys'), '__iter__'): h['vformat'] = self._formatter.vformat
else:
raise BadCustomHeaderObject("the key 'eval' must be "+
"a string and 'keys' must be an iterable object. "+repr(h))
elif 'key' not in h or not isinstance(h['key'],basestring):
# at least key has to be provided!
raise BadCustomHeaderObject('you at least need to populate'+
" the 'key' string valued key... "+repr(h))
# at least one passed - add the header!
new_headers.append(h)
else:
raise BadCustomHeaderObject('header object must be a dict or '+
'string base, not a '+str(type(h)))
# convert list to tuple (a tuple is faster!)
self._headers = tuple(new_headers)
# get top level headers so we can see if we miss any
self._top_level_headers = {i.get('name',i.get('key',''))
if isinstance(i,dict)
else (i[:i.find('.')]
if '.' in i else i)
for i in headers}
def get_headers(self):
"""returns the set headers. NOTE: if not set, returns None"""
return self._headers
def headers_set(self):
"""returns True if the headers have been set, False otherwise"""
return self._headers!=None and len(self._headers) != 0
def is_open(self):
"""returns True if the file is open, i.e. writing has started"""
return self._csv != None
''
#===========================================================================
# Helpful file Wrappers
#===========================================================================
def flush(self):
"""Flushes file if open. Returns True if flushed successfully."""
if self._file!=None:
self._file.flush()
return True
else: return False
def close_writing(self):
"""closes if the file is open; same as self.close()."""
if self._file==None: self.logger.warning('nothing to close %s',self._filename)
else:
# {0:04} rows written + headers'.format(0 if self._cur_row_num==None else self._cur_row_num)
self.logger.info('closing csv %s',self._filename)
self._file.close()
self._csv = None
def close(self):
"""see self.close_writing(..."""
self.close_writing()
''
#===========================================================================
# Begin Writing!
#===========================================================================
def begin_writing(self,filename=None,*headers):
"""opens the file, checks for things. the method is called automatically
if the file isn't already open - no need to call explicitly."""
# set filename if passed
if filename!=None: self.set_filename(filename)
# fail if not file to same to!
if self._filename == None:
self.logger.critical('no filename provided!!!')
return False
# if filename is used, generate a new one...
#### if we were using python 3.3, we could do open('','x')...
uf,fe = os.path.splitext(self._filename)
n = ''
while os.path.exists('{}{}{}'.format(uf,n,fe)):
if n=='': n = 1
else: n += 1
# set the new filename if needed
filename = '{}{}{}'.format(uf,n,fe)
if filename != self.get_filename():
if not self.set_filename(filename):
filename = self.get_filename()
# try to open the file
try:f = open(filename,'wb')
except (IOError,WindowsError) as e:
self.logger.critical('failed to open file %s b/c %r',
self.get_filename(),e)
else:
self._file = f
self._csv = self._get_default_csv(self._file)
self._cur_row_num = 0
# write headers if given... prefer what is passed
if not headers:
if not self.headers_set():
self.logger.warning('no headers provided, ...will use keys'+
' from first records for headers! this'+
' means there may be new keys in a lat'+
"er record which won't be written to t"+
'he csv!')
return True
self._write_headers() # write headers!
else: self._write_headers(*headers) # write headers!
# success!
return True
def _write_headers(self,*headers):
"""helper function - writes the headers. If headers are passed, those
are used, else, whatever was already set.
"""
if headers: self.set_headers(*headers)
self.logger.debug('writing headers %s',self.get_headers())
self._csv.writerow(tuple(i.get('name','') if isinstance(i,dict) else i
for i in self.get_headers()))
''
#===========================================================================
# Write things
#===========================================================================
def writerow(self,row):
"""simply writes to the csv directly, adding row number if requested
and formating datatypes the standard way!"""
# open if needed
if not self.is_open(): self.begin_writing()
# add row number if requested!
self._cur_row_num += 1
nrow = [self._cur_row_num] if self._add_row_num else []
# make sure writing it wont break anything - convert to strings!
nrow.extend(self.item_to_str(i) for i in row)
# write the row
self._csv.writerow(nrow)
def write_json_text(self,json_text):
"""converts to json before sending to self.write_json_object(...)"""
return self.write_json_object(json_loads(json_text))
def write_json_object(self,json_obj):
"""write to json object to the csv. see self.set_headers(...) for how
the headers work.
"""
# test is dict (json object - although, lists are valid json objects...
if not isinstance(json_obj,dict):
self.logger.warning('%s object provided. Only a json object '+
'(dict or sub-class of dict) is allowed!')
raise TypeError('MyJSON2CSV.write_json_object: json_obj is '+
str(type(json_obj))+', not a dictionary.')
# open if needed
if not self.is_open(): self.begin_writing()
# write the row!!!
self._csv.writerow(tuple(self.iter_row_from_json(json_obj)))
return True
def iter_row_from_json(self,json_obj):
""" transforms the JSON object to a row using the headers
"""
# set headers if not set before...
if self._headers == None:
self._write_headers(*tuple(self.rec_list_headers(json_obj)))
# print which keys that won't be in csv (only checks top level...)
missed_headers = []
for k in json_obj.iterkeys():
# is there? and not already noted as missing?
if k not in self.missed_headers and \
k not in self._top_level_headers:
missed_headers.append(k)
if missed_headers:
self.logger.warning('the following headers will not be in the csv:'+
'%s',','.join(missed_headers))
self.missed_headers.update(missed_headers)
# add row number if requested!
self._cur_row_num += 1
if self._add_row_num:
json_obj[self._row_num_header_name] = self._cur_row_num
# iter items
for h in self._headers:
yield self.item_to_str(self._get_item(json_obj,h))
''
#===========================================================================
# Find All The Headers
#===========================================================================
def rec_list_headers(self,obj,name='',lvl=0):
"""Go through the dict and list out the keys (going into sub keys if
present. e.g. {'a':{'b':1},'b':2} -- > ['a.b','b']"""
if not isinstance(obj,(dict,list)) or len(obj)==0:
self.logger.critical('trying to get headers for a non list or dict object (or empty obj...)!!!')
raise ValueError("bad value passed to rec_list_headers obj={0} name={1} lvl={2}".format(obj,name,lvl))
# if dict
if isinstance(obj, dict):
# yield all keys
for k in obj:
# if is number or string, or an emtpy list/dict - yield key
if self._is_simple_object(obj[k]) or \
isinstance(obj[k],datetime) or len(obj[k])==0:
yield k if name=='' else (name+'.'+k)
# if non-empty list
elif isinstance(obj[k], (list,tuple)):
if self._expand_lists:
#for i in self.rec_list_headers(obj[k], k,lvl+1): yield (name+'.'+i) if name!='' else i
yield k if name=='' else (name+'.'+k)
else: yield k if name=='' else (name+'.'+k)
# if non-empty dict
elif isinstance(obj[k], dict):
for i in self.rec_list_headers(obj[k], k,lvl+1): yield k if name=='' else (name+'.'+k) # @UnusedVariable
# IDK what it is... assume it's simple...
else: yield k if name=='' else (name+'.'+k)
# if list (assume each item in the list is the same as the first!)
elif isinstance(obj,(tuple,list)):
if self._is_simple_object(obj[0]) or len(obj[0])==0:
yield self._LIST_FLAG.format(lvl) if name=='' else (name+'.'+self._LIST_FLAG.format(lvl))
# if non-empty list
elif isinstance(obj[0], (list,tuple)):
for i in self.rec_list_headers(obj[0], self._LIST_FLAG.format(lvl),lvl+1): yield k if name=='' else (name+'.'+k) # @UnusedVariable
# if non-empty dict
elif isinstance(obj[0], dict):
for i in self.rec_list_headers(obj[0], self._LIST_FLAG.format(lvl),lvl+1): yield k if name=='' else (name+'.'+k) # @UnusedVariable
# IDK what it is... assume it's simple...
else: yield k if name=='' else (name+'.'+k)
#===========================================================================
# Static Method
#===========================================================================
@staticmethod
def _is_simple_object(obj):
"""helper function - determine if the type is simple: just write it."""
return (obj==None or isinstance(obj,(basestring,int,long,float,bool,Decimal)))
@staticmethod
def _get_item(obj,loc,default=None,loc_ind=0):
"""get the item out of dict described. by the loc (e.g. 'a.b.c')"""
if obj is None: return None
if isinstance(loc, dict):
# ---------------- dict_fn ----------------
if 'dict_fn' in loc:
try: a = loc['dict_fn'](d=obj,
default=loc.get('default',default))
except Exception as e:
raise CustomColumnFunctionException(repr(loc)+'\n'+repr(e))
else: return a
# ---------------- key_fn -----------------
if 'key_fn' in loc:
try: a = loc['key_fn'](key=loc['key'],
value=MyJSON2CSV._get_item(obj,loc['key'],
loc.get('default',default)),
default=loc.get('default'))
except Exception as e:
raise CustomColumnFunctionException(repr(loc)+'\n'+repr(e))
else: return a
if 'eval' in loc:
vals = {key:MyJSON2CSV._get_item(obj,key)
for key in loc['keys']}
eq = None
try:
eq = loc['vformat'](loc['eval'],tuple(), vals)
val = eval(eq,{},{})
except Exception as e:
if callable(loc.get('on_error')):
return loc['on_error'](e,loc,vals,eq)
else: return default
else: return val
# ---------------- key -----------------
return MyJSON2CSV._get_item(obj,loc['key'],
loc.get('default',default))
else:
ind = loc.find('.',loc_ind)
#print obj,loc,loc_ind,ind
if ind==-1: return obj.get(loc,default)
elif loc.find('.',ind+1) == ind+1:
# there's a .. --> convert to 1 . but keep it
return MyJSON2CSV._get_item(obj,loc[:ind]+loc[ind+1:],default,ind+1)
else:
return MyJSON2CSV._get_item(obj.get(loc[:ind]),loc[ind+1:],default)
#if '.' in loc:
# a = loc.find('.')
# return MyJSON2CSV._get_item(obj.get(loc[:a]),loc[a+1:])
#else: return obj.get(loc,default)
#===========================================================================
# Turn whatever the item it into a string (writable to the csv)
#===========================================================================
def item_to_str(self,value):
"""format whatever the value is into a str in a specific way!
"""
# keep track of all data types! (not used however...)
self.datatypes.add(type(value))
# None
if value == None: return self.null
# Simple (view '_is_simple_object' for details)
elif self._is_simple_object(value): return self.apply_encoding(unicode(value))
# dict
elif isinstance(value, dict):
if value: # non-empty
return self.apply_encoding(json_dumps(value))
#return ','.join(self.apply_encoding(k)+':'+self.apply_encoding(v) for k,v in value.iteritems())
else: return ''
# list
elif isinstance(value,(list,tuple)):
if value: # non-empty
return self.apply_encoding(json_dumps(value)) #','.join(self.apply_encoding(i) for i in value)
else: return ''
# just in case...
elif isinstance(value,datetime):
return value.strftime(self.dt_format)
else:
self.logger.warning('flattening for datatype {} has not been explicitly set... using repr(...)'.format(type(value)))
return repr(value)
def apply_encoding(self,s,encoding=None):
#replaceWith.join(i for i in s if ord(i)<128)
if isinstance(s,basestring): return s.encode(encoding if encoding else self.encoding)
else: return self.item_to_str(s)
#===========================================================================
# Get Default CSV
#===========================================================================
def _get_default_csv(self,open_file):
# quoting=csv.QUOTE_MINIMAL - default
return csv_writer(open_file,**self.csv_params)
#===========================================================================
# get summary info
#===========================================================================
def get_summary_info(self):
a = MyLoggingBase.get_summary_info(self)
a.extend(('file: {}'.format(self._filename),
'rows: {:,}'.format(self._cur_row_num)))
return a
#===============================================================================
# Exception classes
#===============================================================================
class CustomColumnFunctionException(Exception):
""" custom column function (either dict_fn or key_fn)
has raised an error """
class BadCustomHeaderObject(Exception):
""" the header object doesn't have the needed key(s) """
#===============================================================================
# Main
#===============================================================================
if __name__ == '__main__':
try: from tests import test_myjson2csv
except ImportError: print('no test for myjson2csv')
else: test_myjson2csv.run_test()
| true |
034d2bdd2f39fec0236727c740ed0d7803a51c86
|
Python
|
silky/bell-ppls
|
/env/lib/python2.7/site-packages/observations/r/polio_trials.py
|
UTF-8
| 2,671 | 2.53125 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def polio_trials(path):
"""Polio Field Trials Data
The data frame `PolioTrials` gives the results of the 1954 field
trials to test the Salk polio vaccine (named for the developer, Jonas
Salk), conducted by the National Foundation for Infantile Paralysis
(NFIP). It is adapted from data in the article by Francis et al. (1955).
There were actually two clinical trials, corresponding to two
statistical designs (`Experiment`), discussed by Brownlee (1955). The
comparison of designs and results represented a milestone in the
development of randomized clinical trials.
A data frame with 8 observations on the following 6 variables.
`Experiment`
a factor with levels `ObservedControl` `RandomizedControl`
`Group`
a factor with levels `Controls` `Grade2NotInoculated`
`IncompleteVaccinations` `NotInoculated` `Placebo`
`Vaccinated`
`Population`
the size of the population in each group in each experiment
`Paralytic`
the number of cases of paralytic polio observed in that group
`NonParalytic`
the number of cases of paralytic polio observed in that group
`FalseReports`
the number of cases initially reported as polio, but later
determined not to be polio in that group
Kyle Siegrist, "Virtual Laboratories in Probability and Statistics",
http://www.math.uah.edu/stat/data/Polio.html
Thomas Francis, Robert Korn, et al. (1955). "An Evaluation of the 1954
Poliomyelitis Vaccine Trials", *American Journal of Public Health*, 45,
(50 page supplement with a 63 page appendix).
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `polio_trials.csv`.
Returns:
Tuple of np.ndarray `x_train` with 8 rows and 6 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'polio_trials.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HistData/PolioTrials.csv'
maybe_download_and_extract(path, url,
save_file_name='polio_trials.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| true |
d0cfa366f1a98a1e0298f2242e6532c991c47f08
|
Python
|
wothard/neteasefloat
|
/music_hot_cato.py
|
UTF-8
| 2,536 | 2.859375 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# encoding: utf-8
import urllib
import re
import threading
import requests
import Queue
import pygal
province = []
injuries = []
class GetAllcolum(threading.Thread):
"""获取分类标签的所有歌单"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.headers = {
'Referer':'http://music.163.com/',
'Host':'music.163.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
def run(self):
while 1:
url = self.queue.get()
self.getallcolum(url)
self.queue.task_done()
def getallcolum(self, url):
req = requests.get(url, headers=self.headers).content
filter_colum_name = re.compile(r'<a title="(.*?)class="msk"></a>')
filter_play_number = re.compile(r'<span class="nb">(.*?)</span>')
result0 = filter_colum_name.findall(req)
result1 = filter_play_number.findall(req)
for i in range(len(result1)):
colum_name = result0[i].split('\"')
colum_name = str(colum_name[0]).decode('string_escape')
colum_name = colum_name.decode('utf-8')
if '万' in result1[i]:
i = result1[i].replace("万", "0000")
injuries.append(int(i)/1000)
else:
injuries.append(int(result1[i])/1000)
province.append(colum_name[:4])
def main():
all_url = []
queue = Queue.Queue()
firsturl = raw_input('请输入音乐分类:')
firsturl = urllib.quote(firsturl)
first_url = 'http://music.163.com/discover/playlist/?cat=' + firsturl + '&order=hot'
second_url = 'http://music.163.com/discover/playlist/?order=hot&cat=' + firsturl + '&limit=35&offset='
all_url.append(first_url)
for i in range(42):
last_url = second_url + (str((i+1)*35))
all_url.append(last_url)
for url in all_url:
queue.put(url)
for i in range(10):
t = GetAllcolum(queue)
t.setDaemon(True)
t.start()
queue.join()
# 制作成条形图(svg格式)
line_chart = pygal.HorizontalBar()
line_chart.title = u"网易云 "
for i in range(len(injuries)):
if injuries[i] > 100:
line_chart.add(province[i], injuries[i])
line_chart.render_to_file('data_file/music_hot_cato.svg')
main()
| true |
d0798bbdffac2cdadf868d3f9fb3123297fa0889
|
Python
|
v1ztep/pizza_TG_bot
|
/cache_menu.py
|
UTF-8
| 2,364 | 2.515625 | 3 |
[
"MIT"
] |
permissive
|
import json
import os
from dotenv import load_dotenv
from connect_to_redis_db import get_database_connection
from moltin import get_all_categories
from moltin import get_image
from moltin import get_products_by_category
def get_categories_id(moltin_token, moltin_secret):
categories_id = {}
all_categories = get_all_categories(moltin_token, moltin_secret)
for category in all_categories['data']:
categories_id.update({category['name']: category['id']})
return categories_id
def get_products_by_categories(moltin_token, moltin_secret, categories_id):
page_offset = 0
limit_per_page = 10
category_names = ('Основные', 'Особые', 'Сытные', 'Острые')
products_by_categories = {}
for category in category_names:
products_by_category = get_products_by_category(
moltin_token, moltin_secret,
page_offset, limit_per_page,
category_id=categories_id[category]
)
products_by_category_with_image = get_products_by_category_with_image(
products_by_category, moltin_token, moltin_secret
)
products_by_categories[category] = products_by_category_with_image
return products_by_categories
def get_products_by_category_with_image(
products_by_category, moltin_token, moltin_secret
):
products_by_category_with_image = []
for product in products_by_category['data']:
image_id = product['relationships']['main_image']['data']['id']
image_url = get_image(moltin_token, moltin_secret, image_id)
products_by_category_with_image.append({
'title': product['name'],
'image_url': image_url,
'subtitle': product['description'],
'id': product['id']
})
return products_by_category_with_image
def main():
load_dotenv()
moltin_token = os.environ['ELASTICPATH_CLIENT_ID']
moltin_secret = os.environ['ELASTICPATH_CLIENT_SECRET']
db = get_database_connection()
categories_id = get_categories_id(moltin_token, moltin_secret)
db.set('categories_id', json.dumps(categories_id))
products_by_categories = get_products_by_categories(
moltin_token, moltin_secret, categories_id
)
db.set('products_by_categories', json.dumps(products_by_categories))
if __name__ == '__main__':
main()
| true |
11833cbfcac8db0e35105a4eb15241453f36d8a4
|
Python
|
tianshanghong/huobi-1
|
/huobitrade/datatype.py
|
UTF-8
| 13,344 | 2.515625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/6/13 0013 16:36
# @Author : Hadrianl
# @File : datatype.py
# @Contact : 137150224@qq.com
import pandas as pd
from .service import HBRestAPI
from .utils import PERIOD, DEPTH, logger
from itertools import chain
__all__ = ['HBMarket', 'HBAccount', 'HBMargin']
_api = HBRestAPI(get_acc=True)
class HBKline:
def __init__(self, symbol):
self.__symbol = symbol
def __getattr__(self, item):
global _api
if item[0] == '_':
args = item[1:].split('_')
if args[0] not in PERIOD:
raise Exception('period not exist.')
else:
reply = _api.get_kline(self.__symbol, args[0], int(args[1]))
klines = pd.DataFrame(reply['data'])
return klines
elif item == 'last':
reply = _api.get_last_1m_kline(self.__symbol)
last_kline = pd.Series(reply['tick'])
return last_kline
elif item == 'last_24_hour':
reply = _api.get_last_24h_kline(self.__symbol)
last_24h = pd.Series(reply['tick'])
return last_24h
else:
raise AttributeError
def __repr__(self):
return f'<{self.__class__} for {self.__symbol}>'
def __str__(self):
return f'<{self.__class__} for {self.__symbol}>'
class HBDepth:
def __init__(self, symbol):
self.__symbol = symbol
def __getattr__(self, item):
global _api
if item in (d for d in DEPTH.values()):
reply = _api.get_last_depth(self.__symbol, item)
bids, asks = reply['tick']['bids'], reply['tick']['asks']
df_bids = pd.DataFrame(bids, columns=['bid', 'bid_qty'])
df_asks = pd.DataFrame(asks, columns=['ask', 'ask_qty'])
depth = pd.concat([df_bids, df_asks], 1)
return depth
else:
raise AttributeError
def __repr__(self):
return f'<{self.__class__} for {self.__symbol}>'
def __str__(self):
return f'<{self.__class__} for {self.__symbol}>'
class HBTicker:
def __init__(self, symbol):
self.__symbol = symbol
def __getattr__(self, item):
global _api
if item == 'last':
reply = _api.get_last_ticker(self.__symbol)
last_ticker = pd.DataFrame(reply['tick']['data'])
return last_ticker
elif 'last' in item:
args = item.split('_')
size = int(args[1])
reply = _api.get_ticker(self.__symbol, size)
ticker_list = [
t for t in chain(*[i['data'] for i in reply['data']])
]
tickers = pd.DataFrame(ticker_list)
return tickers
def __repr__(self):
return f'<{self.__class__} for {self.__symbol}>'
def __str__(self):
return f'<{self.__class__} for {self.__symbol}>'
class HBSymbol:
def __init__(self, name, **kwargs):
self.name = name
self.attr = kwargs
for k, v in kwargs.items():
k = k.replace('-', '_')
setattr(self, k, v)
self.kline = HBKline(self.name)
self.depth = HBDepth(self.name)
self.ticker = HBTicker(self.name)
def __repr__(self):
return f'<Symbol:{self.name}-{self.attr}>'
def __str__(self):
return f'<Symbol:{self.name}-{self.attr}>'
class HBMarket:
"""
火币的市场数据类,快捷获取数据
"""
def __init__(self):
self.symbols = []
self._update_symbols()
def add_symbol(self, symbol):
setattr(self, symbol.name, symbol)
def _update_symbols(self):
global _api
_symbols = _api.get_symbols()
if _symbols['status'] == 'ok':
for d in _symbols['data']: # 获取交易对信息
name = d['base-currency'] + d['quote-currency']
self.add_symbol(HBSymbol(name, **d))
self.symbols.append(name)
else:
raise Exception(f'err-code:{_symbols["err-code"]} err-msg:{_symbols["err-msg"]}')
def __repr__(self):
return f'<HBData>:{self.symbols}'
def __str__(self):
return f'<HBData>:{self.symbols}'
def __getattr__(self, item):
global _api
if item == 'all_24h_kline':
return _api.get_all_last_24h_kline()
class HBOrder:
def __init__(self, acc_id):
self.acc_id = acc_id
def send(self, amount, symbol, _type, price=0):
ret = _api.send_order(self.acc_id, amount, symbol, _type, price)
logger.debug(f'send_order_ret:{ret}')
if ret and ret['status'] == 'ok':
return ret['data']
else:
raise Exception(f'send order request failed!--{ret}')
def cancel(self, order_id):
ret = _api.cancel_order(order_id)
logger.debug(f'cancel_order_ret:{ret}')
if ret and ret['status'] == 'ok':
return ret['data']
else:
raise Exception(f'cancel order request failed!--{ret}')
def batchcancel(self, order_ids:list):
ret = _api.batchcancel_order(order_ids)
logger.debug(f'batchcancel_order_ret:{ret}')
if ret and ret['status'] == 'ok':
return ret['data']
else:
raise Exception(f'batchcancel order request failed!--{ret}')
def get_by_id(self, order_id):
oi_ret = _api.get_order_info(order_id, _async=True)
mr_ret = _api.get_order_matchresults(order_id, _async=True)
ret = _api.async_request([oi_ret, mr_ret])
logger.debug(f'get_order_ret:{ret}')
d = dict()
if all(ret):
if ret[0]['status'] == 'ok':
d.update({'order_info': ret[0]['data']})
else:
d.update({'order_info':{}})
if ret[1]['status'] == 'ok':
d.update({'match_result': ret[1]['data']})
else:
d.update({'match_result': {}})
return d
else:
raise Exception(f'get order request failed!--{ret}')
def get_by_symbol(self, symbol, states, types=None, start_date=None, end_date=None, _from=None, direct=None, size=None):
ret = _api.get_orders_info(symbol, states, types, start_date, end_date, _from, direct, size)
logger.debug(f'get_orders_ret:{ret}')
if ret and ret['status'] == 'ok':
data = ret['data']
df = pd.DataFrame(data).set_index('id')
return df
else:
raise Exception(f'get orders request failed!--{ret}')
def __getitem__(self, item):
return self.get_by_id(item)
class HBTrade:
def __init__(self, acc_id):
self.acc_id = acc_id
def get_by_id(self, order_id):
ret = _api.get_order_matchresults(order_id)
logger.debug(f'trade_ret:{ret}')
if ret and ret['status'] == 'ok':
data = ret['data']
df = pd.DataFrame(data).set_index('id')
return df
else:
raise Exception(f'trade results request failed!--{ret}')
def get_by_symbol(self, symbol, types, start_date=None, end_date=None, _from=None, direct=None, size=None):
ret = _api.get_orders_matchresults(symbol, types, start_date, end_date, _from, direct, size)
logger.debug(f'trade_ret:{ret}')
if ret and ret['status'] == 'ok':
data = ret['data']
df = pd.DataFrame(data).set_index('id')
return df
else:
raise Exception(f'trade results request failed!--{ret}')
def __getitem__(self, item):
return self.get_by_id(item)
class HBAccount:
def __init__(self):
ret = _api.get_accounts()
logger.debug(f'get_order_ret:{ret}')
if ret and ret['status'] == 'ok':
data = ret['data']
self.Detail = pd.DataFrame(data).set_index('id')
else:
raise Exception(f'get accounts request failed!--{ret}')
def __getattr__(self, item):
try:
args = item.split('_')
if int(args[1]) in self.Detail.index.tolist():
if args[0] == 'balance':
bal = HBBalance(args[1])
setattr(self.__class__, item, bal)
return bal
elif args[0] == 'order':
order = HBOrder(args[1])
setattr(self, item, order)
return order
elif args[0] == 'trade':
trade = HBTrade(args[1])
setattr(self, item, trade)
return trade
else:
raise AttributeError
else:
raise AttributeError
except Exception as e:
raise e
def __repr__(self):
return f'<HBAccount>Detail:\n{self.Detail}'
def __str__(self):
return f'<HBAccount>Detail:\n{self.Detail}'
class HBBalance:
def __init__(self, account_id):
self.acc_id = account_id
self.update()
def update(self):
ret = _api.get_balance(self.acc_id)
if ret and ret['status'] == 'ok':
data = ret['data']
self.Id = data['id']
self.Type = data['type']
self.State = data['state']
self.Detail = pd.DataFrame(data['list']).set_index('currency')
else:
raise Exception(f'get balance request failed--{ret}')
def __get__(self, instance, owner):
bal = instance.__dict__.setdefault('balance', {})
bal[self.acc_id] = self
self.update()
return self
def __repr__(self):
return f'<HBBalance>ID:{self.Id} Type:{self.Type} State:{self.State}'
def __str__(self):
return f'<HBBalance>ID:{self.Id} Type:{self.Type} State:{self.State}'
def __getitem__(self, item):
return self.Detail.loc[item]
class HBMargin:
def __init__(self):
...
def transferIn(self, symbol, currency, amount):
ret = _api.exchange_to_margin(symbol, currency, amount)
logger.debug(f'transferIn_ret:{ret}')
if ret and ret['status'] == 'ok':
return ret['data']
else:
raise Exception(f'transferIn request failed!--{ret}')
def transferOut(self, symbol, currency, amount):
ret = _api.exchange_to_margin(symbol, currency, amount)
logger.debug(f'transferOut_ret:{ret}')
if ret and ret['status'] == 'ok':
return ret['data']
else:
raise Exception(f'transferOut request failed!--{ret}')
def applyLoan(self, symbol, currency, amount):
ret = _api.apply_loan(symbol, currency, amount)
logger.debug(f'apply_loan_ret:{ret}')
if ret and ret['status'] == 'ok':
return ret['data']
else:
raise Exception(f'apply_loan request failed!--{ret}')
def repayLoan(self, symbol, currency, amount):
ret = _api.repay_loan(symbol, currency, amount)
logger.debug(f'repay_loan_ret:{ret}')
if ret and ret['status'] == 'ok':
return ret['data']
else:
raise Exception(f'repay_loan request failed!--{ret}')
def getLoan(self, symbol, currency, states=None, start_date=None, end_date=None, _from=None, direct=None, size=None):
ret = _api.get_loan_orders(symbol, currency, states, start_date, end_date, _from, direct, size)
logger.debug(f'get_loan_ret:{ret}')
if ret and ret['status'] == 'ok':
df = pd.DataFrame(ret['data']).set_index('id')
return df
else:
raise Exception(f'get_loan request failed!--{ret}')
def getBalance(self, symbol):
return HBMarginBalance(symbol)
def __getitem__(self, item):
return self.getBalance(item)
class HBMarginBalance:
def __init__(self, symbol):
ret = _api.get_margin_balance(symbol)
logger.debug(f'<保证金结余>信息:{ret}')
if ret and ret['status'] == 'ok':
balance = {}
for d in ret['data']:
data = balance.setdefault(d['id'], {})
data['id'] = d['id']
data['type'] = d['type']
data['state'] = d['state']
data['symbol'] = d['symbol']
data['fl-price'] = d['fl-price']
data['fl-type'] = d['fl-type']
data['risk-rate'] = d['risk-rate']
data['detail'] = pd.DataFrame(d['list']).set_index('currency')
else:
raise Exception(f'get balance request failed--{ret}')
self.__balance = balance
def __repr__(self):
info = []
for b in self._balance.values():
info.append(f'<HBMarginBalance: {b["symbol"]}>ID:{b["id"]} Type:{b["type"]} State:{b["state"]} Risk-rate:{b["risk-rate"]}')
info = '\n'.join(info)
return info
def __str__(self):
info = []
for b in self.__balance:
info.append(f'<HBMarginBalance: {b["symbol"]}>ID:{b["id"]} Type:{b["type"]} State:{b["state"]} Risk-rate:{b["risk-rate"]}')
info = '\n'.join(info)
return info
def __getitem__(self, item):
return self.__balance[item]
@property
def balance(self):
return self.__balance
| true |
7ed0f42766d47e5916a002d8ff624bc088c709d7
|
Python
|
cdprf/tools
|
/web/smallcrawl.py
|
UTF-8
| 469 | 3.25 | 3 |
[] |
no_license
|
#!/usr/bin/python
import urllib2
site = raw_input("site : ") # http://www.google.com/ ---> this must be in this form
list = open((raw_input("list with folders : "))) # a textfile , one folder/line
for folder in list :
try :
url = site+folder
urllib2.urlopen(url).read()
msg = "[-] folder " + folder + " exist"
print msg
except :
msg = "[-] folder " + folder + "does not exist"
print msg
print ""
print "[-] done"
| true |
32214d2c57ba6633f2285e133c22c295f22e4cf7
|
Python
|
Kwabena-Kobiri/CodingProjects
|
/coding.py
|
UTF-8
| 513 | 4.46875 | 4 |
[] |
no_license
|
#INPUT
"""Our input is the fahrenheit temperature from the woman"""
fahrenheit_value = float(input("Please Enter the Fahrenheit temperature: \n"))
#PROCESSING
"""The conversion from fahrenheit to celsius"""
celsius_value = (fahrenheit_value - 32) * (5/9)
print(round(celsius_value, 2))
#OUTPUT
"""Output is the equivalent celsius temperature"""
number = 98.76453
new_number = round(number, 2)
print(new_number)
range()
print('all even numbers from 1 to 10')
print(range(0, 10, 2))
| true |
c6b0b9f01908ef1e9182c2f8b3c211d7f7e10ea2
|
Python
|
tchamberlin/quantum_bg
|
/quantum_nologs.py
|
UTF-8
| 11,799 | 2.90625 | 3 |
[] |
no_license
|
"""Take interval-based images from a webcam"""
from datetime import datetime, timedelta
from pathlib import Path
from pprint import pprint
import argparse
import logging
import math
import operator
import pickle
import random
import subprocess
# logger = logging.getLogger(__name__)
CARDS = [
"ferocious",
"relentless",
"cruel",
"scrappy",
"strategic",
"rational",
"stubborn",
]
def load(path):
with open(path, "rb") as file:
results = pickle.load(file)
return results
def save(results, path):
with open(path, "wb") as file:
pickle.dump(results, file, protocol=5)
class Side:
def __init__(self, ship_die, cards=None, combat_die_rolls=None):
if not (1 <= ship_die <= 6):
raise ValueError(f"ship_die must be between 1 and 6! Got: {ship_die}")
self.ship_die = ship_die
self.combat_die = None
if cards:
invalid_cards = [card for card in cards if card not in CARDS]
if invalid_cards:
raise ValueError(f"Invalid cards: {invalid_cards}")
self.cards = tuple(cards)
else:
self.cards = tuple()
self.predefined_combat_die_rolls = (
combat_die_rolls if combat_die_rolls is not None else []
)
self.combat_die_rolls = []
for combat_die in self.predefined_combat_die_rolls:
if not (1 <= combat_die <= 6):
raise ValueError(
f"All combat_die_rolls must be between 1 and 6! Got: {combat_die}"
)
self.roll_counter = 0
self.combat_log = []
def __repr__(self):
return f"{self.__class__.__name__}(ship_die={self.ship_die}, combat_die={self.combat_die}, cards={self.cards})"
@classmethod
def to_string(cls, ship_die, combat_die, cards):
if combat_die:
string = f"{ship_die}+{combat_die}={ship_die+ combat_die}"
else:
string = f"{ship_die}"
if cards:
string = f"{string} [{', '.join(cards)}]"
return f"{cls.__name__} {string}"
def __str__(self):
return self.to_string(self.ship_die, self.combat_die, self.cards)
def __eq__(self, die):
return hash(self) == hash(die)
def __hash__(self):
return hash((self.ship_die, self.cards, self.combat_die))
def roll(self):
if "rational" in self.cards:
the_roll = 3
elif self.predefined_combat_die_rolls:
the_roll = self.predefined_combat_die_rolls[self.roll_counter]
else:
the_roll = random.randint(1, 6)
self.combat_die = the_roll
self.combat_die_rolls.append(the_roll)
self.roll_counter += 1
if not (1 <= the_roll <= 6):
raise ValueError(f"Combat die must be between 1 and 6! Got: {the_roll}")
def total(self, dice_only=False):
total = self.ship_die + self.combat_die
if dice_only:
return total
if "ferocious" in self.cards:
total -= 1
if "strategic" in self.cards:
total -= 2
return total
def reset(self):
self.combat_die = None
self.roll_counter = 0
self.combat_die_rolls = []
def history(self):
history = []
for attacker, defender, attacker_wins in self.combat_log:
attacker_ship_die, attacker_combat_die, attacker_total = attacker
defender_ship_die, defender_combat_die, defender_total = defender
vstring = "Attacker" if attacker_wins else "Defender"
history.append(
f"{attacker_ship_die}+{attacker_combat_die}={attacker_total} vs. "
f"{defender_ship_die}+{defender_combat_die}={defender_total}: {vstring}"
)
return "\n".join(history)
class Attacker(Side):
def recalc(self, attacker, defender, comparator=operator.le):
attacker_total = attacker.total()
defender_total = defender.total()
attacker_wins = comparator(attacker_total, defender_total)
winner, loser = (attacker, defender) if attacker_wins else (defender, attacker)
self.combat_log.append(
(
(attacker.ship_die, attacker.combat_die, attacker.total()),
(defender.ship_die, defender.combat_die, defender.total()),
attacker == winner,
)
)
return winner, loser
def attack(self, defender):
attacker = self
attacker.roll()
defender.roll()
winner, loser = self.recalc(attacker, defender)
# If the LOSER holds Relentless, they can re-roll
# We assume that the loser will ALWAYS do this, and that the winner
# NEVER will
if "relentless" in loser.cards:
prev_winner = winner
loser.roll()
winner, loser = self.recalc(attacker, defender)
# If the ATTACKER holds Scrappy, they can re-roll
# We assume that, if they lose, they will ALWAYS do this, and that
# they never will if they win
if loser == attacker and "scrappy" in attacker.cards:
prev_winner = winner
loser.roll()
winner, loser = self.recalc(attacker, defender)
# If the LOSER holds Cruel, they can force the WINNER to re-roll
# We assume that they will ALWAYS do this, and that the LOSER will
# never do this
if "cruel" in loser.cards:
prev_winner = winner
winner.roll()
winner, loser = self.recalc(attacker, defender)
# If the DEFENDER holds Stubborn, then they break ties
if loser == defender and "stubborn" in defender.cards:
prev_winner = winner
# So, instead of the attacker winning if it is LESS THAN OR
# EQUAL TO, it only wins if it is strictly LESS THAN the defender
# total
winner, loser = self.recalc(attacker, defender, comparator=operator.lt)
if (
attacker.predefined_combat_die_rolls
and attacker.predefined_combat_die_rolls != attacker.combat_die_rolls
):
raise AssertionError(
"Attacker predefined_combat_die_rolls "
f"{attacker.predefined_combat_die_rolls} don't match actual: "
f"{attacker.combat_die_rolls}"
)
if (
defender.predefined_combat_die_rolls
and defender.predefined_combat_die_rolls != defender.combat_die_rolls
):
raise AssertionError(
"Defender predefined_combat_die_rolls "
f"{defender.predefined_combat_die_rolls} don't match actual: "
f"{defender.combat_die_rolls}"
)
return winner == attacker
class Defender(Side):
pass
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--save", type=Path)
parser.add_argument("-i", "--compare-to", type=Path)
parser.add_argument("-a", "--attacker-cards", nargs="+", choices=CARDS)
parser.add_argument(
"-A", "--attackers", dest="attacker_ship_dice", nargs="+", type=int
)
parser.add_argument("-d", "--defender-cards", nargs="+", choices=CARDS)
parser.add_argument(
"-D", "--defenders", dest="defender_ship_dice", nargs="+", type=int
)
parser.add_argument(
"--rolls",
metavar="ATTACKER_SHIP ATTACKER_ROLL DEFENDER_SHIP DEFENDER_ROLL",
nargs=4,
type=int,
)
parser.add_argument("-n", "--num-trials", type=int, default=1000)
parser.add_argument("-v", "--verbose", action="store_true")
return parser.parse_args()
def init_logging(level):
"""Initialize logging"""
logging.getLogger().setLevel(level)
_logger = logging.getLogger(__name__)
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(message)s"))
def do_iterations(attacker, defender, num_trials):
attacker_win_count = 0
for __ in range(num_trials):
attacker.reset()
defender.reset()
result = attacker.attack(defender)
attacker_win_count += int(result)
return attacker_win_count
def get_results(
num_trials,
attacker_cards=None,
defender_cards=None,
attacker_ship_dice=None,
defender_ship_dice=None,
):
if attacker_ship_dice:
attacker_ship_dice = [*attacker_ship_dice]
else:
attacker_ship_dice = range(1, 7)
if defender_ship_dice:
defender_ship_dice = [*defender_ship_dice]
else:
defender_ship_dice = range(1, 7)
attackers = [Attacker(ship_die=n, cards=attacker_cards) for n in attacker_ship_dice]
defenders = [Defender(ship_die=n, cards=defender_cards) for n in defender_ship_dice]
results = {}
for attacker in attackers:
for defender in defenders:
attacker_win_count = do_iterations(attacker, defender, num_trials)
attacker_win_ratio = attacker_win_count / num_trials
results[(attacker, defender)] = attacker_win_ratio
return results
def do_stats(args):
if args.compare_to:
with open(args.compare_to, "rb") as file:
base_results = pickle.load(file)
else:
base_results = None
results = get_results(
args.num_trials,
attacker_cards=args.attacker_cards,
defender_cards=args.defender_cards,
attacker_ship_dice=args.attacker_ship_dice,
defender_ship_dice=args.defender_ship_dice,
)
for key, attacker_win_ratio in results.items():
attacker, defender = key
if base_results:
attacker_win_ratio_base = base_results[
(attacker.ship_die, defender.ship_die)
]
# diff_from_base = attacker_win_ratio - attacker_win_ratio_base
diff_from_base = (
attacker_win_ratio - attacker_win_ratio_base
) / attacker_win_ratio_base
compare_str = (
f" (vs. {attacker_win_ratio_base:.2%}; {diff_from_base:.2%} "
"diff from base)"
)
else:
compare_str = ""
attacker.reset()
defender.reset()
print(
f"<{attacker}> wins against <{defender}> "
f"{attacker_win_ratio:.2%} of the time (over {args.num_trials} trials){compare_str}"
)
if args.save:
with open(args.save, "wb") as file:
results = {
(key[0].ship_die, key[1].ship_die): value
for key, value in results.items()
}
pickle.dump(results, file)
def do_specific(args):
(
attacker_ship_die,
attacker_combat_die,
defender_ship_die,
defender_combat_die,
) = args.rolls
attacker = Attacker(
ship_die=attacker_ship_die,
combat_die=attacker_combat_die,
cards=args.attacker_cards,
)
defender = Defender(
ship_die=defender_ship_die,
combat_die=defender_combat_die,
cards=args.defender_cards,
)
res = attacker.attack(defender)
print(f"Winner: {attacker if res else defender}")
def main():
args = parse_args()
if args.verbose:
init_logging(logging.DEBUG)
else:
init_logging(logging.INFO)
if args.attacker_cards and args.defender_cards:
shared = set(args.attacker_cards).intersection(set(args.defender_cards))
if shared:
raise ValueError(
f"Attacker and defender cannot have the same card! Shared cards: {shared}"
)
results = {}
if args.rolls:
do_specific(args)
else:
do_stats(args)
if __name__ == "__main__":
main()
| true |
f3d6403dbb1591cd68e7901c7b267a57ff96579c
|
Python
|
rubana13/check
|
/72.py
|
UTF-8
| 183 | 3.734375 | 4 |
[] |
no_license
|
s=input("Enter string:")
count=0
vowels = set("aeiou")
for letter in s:
if letter in vowels:
count+=1
if(count>0):
print("yes")
else:
print("no")
| true |
5bff558f92e8acfaa3bbf499c6f9247afab256e4
|
Python
|
asihacker/python3_bookmark
|
/python笔记/aaa基础内置/python内置模块/signal信号模块/接收信号绑定处理对应的事件.py
|
UTF-8
| 811 | 2.78125 | 3 |
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/6/15 19:42
# @Author : AsiHacker
# @File : 接收信号绑定处理对应的事件.py
# @Software: PyCharm
# @notice : True masters always have the heart of an apprentice.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import signal
import time
def receive_signal(signum, stack):
"""用于接收信号,对signum的值区分信号,实现不同的信号做对应的处理"""
print('接收的signum', signum)
# 注册处理信号的事件,此处对用户定义信号1、用户定义信号2,绑定事件
signal.signal(signal.SIGUSR1, receive_signal)
signal.signal(signal.SIGUSR2, receive_signal)
print('我的PID: %s' % os.getpid())
# 开启循环监听信号
while True:
print('Waiting...')
time.sleep(3)
| true |
991fa70e6785d2aee9cde8bb69ea17f5eef877cb
|
Python
|
swatantragoswami09/Amazon_SDE_Test_Series_solutions
|
/Find whether path exist.py
|
UTF-8
| 798 | 3.203125 | 3 |
[] |
no_license
|
adj=[]
row, col = key // n, key % n
if (col - 1) >= 0:
adj.append((row*n) + (col - 1))
if (col + 1) < n:
adj.append((row*n) + (col + 1))
if (row - 1) >= 0:
adj.append((row-1)*n + col)
if (row + 1) < n:
adj.append((row + 1)*n + col)
return adj
def find_path(source, arr, n):
visited = [False] * pow(n, 2)
queue = []
queue.append(source)
while queue:
popped = queue.pop(0)
visited[popped] = True
for i in adjacent(popped, arr, n):
if arr[i] == 2:
return 1
elif arr[i] == 3 and visited[i] == False:
queue.append(i)
return 0
t = int(input())
for i in range(t):
n = int(input())
arr = map(int, input().split())
x = list(arr)
source = x.index(1)
print (find_path(source, x, n))
| true |
a708ef2274312d404d761eba89b4a514502f6863
|
Python
|
bhaktichiplunkar/readingProduct
|
/project1_TextToSpeech.py
|
UTF-8
| 5,223 | 2.609375 | 3 |
[] |
no_license
|
# Import the required module for text to speech conversion
# This module is imported so that we can play the converted audio
from flask import Flask, render_template, request, redirect, jsonify, make_response, flash
from gtts import gTTS
from pygame import mixer
from flask_sqlalchemy import SQLAlchemy
import os
import pyttsx3
import json
from datetime import datetime
converter = pyttsx3.init()
converter.setProperty('rate', 150)
local_server=True
with open("config.json", "r")as c:
params=json.load(c)["params"]
app = Flask(__name__ , template_folder='template')
if local_server:
app.config['SQLALCHEMY_DATABASE_URI'] = params["local_uri"]
else:
app.config['SQLALCHEMY_DATABASE_URI'] = params["production_uri"]
db = SQLAlchemy(app)
#database name:contact
#database column:id,name,email,phone,message,date
class Contact(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
email = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(12), nullable=False)
message = db.Column(db.String(120), nullable=False)
date = db.Column(db.String(120), nullable=True)
class Content(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80), nullable=False)
subtitle = db.Column(db.String(120), nullable=False)
word="No Text-to-speek"
@app.route("/")
def root():
return render_template('index.html')
count=0
def increment():
global count
count +=1
return count
def get_filename():
global c
c=increment()
filename = 'sample' + str(c) + '.mp3'
return filename
def del_file():
global c
if c>1:
file_no_del=c-1
file_name_to_del= 'sample' + str(file_no_del) + '.mp3'
os.remove(file_name_to_del)
mixer.init()
@app.route("/upload_data", methods=['GET','POST'])
def upload_data():
if request.method==('POST'):
heading=request.form.get("title")
subheading=request.form.get("subtitle")
if heading and subheading:
entry=Content(title=heading, subtitle =subheading)
db.session.add(entry)
db.session.commit()
return render_template('index.html')
@app.route("/get_word", methods=['GET','POST'])
def get_word():
word = request.form.get("word")
if word!='':
s=word.split(" ")
dict1={}
i=0
for item in s:
dict1[i]=item
i+=1
audio = gTTS(text=word, lang='en-uk', slow=False)
file=get_filename()
audio.save(file)
mixer.music.load(file)
mixer.music.play()
del_file()
return render_template('showSpan.html',dictnory=dict1)
return render_template('showSpan.html',dictnory=dict1)
# return jsonify({'success':'True'},{'string':word})
# return jsonify({'error':'missing data'})
pause = False
@app.route("/pause", methods=['GET', 'POST'])
def pause():
global pause
pause=True
mixer.music.pause()
return jsonify({'success':'paused'})
@app.route("/unpause", methods=['GET', 'POST'])
def unpause():
if pause:
mixer.music.unpause()
return jsonify({'success':'unpaused'})
reset =False
@app.route("/reset", methods=['GET', 'POST'])
def reset():
global reset
reset=True
mixer.music.stop()
return render_template('index.html')
@app.route("/about")
def about():
return render_template('about.html')
#database column:id,name,email,phone,message,date#database
@app.route("/contact",methods=['GET', 'POST'])
def contact():
if request.method==('POST'):
uname = request.form.get('name')
email = request.form.get('email')
phone = request.form.get('phone')
message = request.form.get('message')
if uname and email and phone and message:
entry = Contact(name=uname, email=email, phone=phone, message=message, date=datetime.now())
db.session.add(entry)
db.session.commit()
return render_template('contact.html',params=params)
return render_template('contact.html',params=params)
@app.route("/get_title",methods=['GET','POST'])
def get_title():
if request.method==('POST'):
data = Content.query.all()
return render_template('get_title.html',data=data)
@app.route("/get_content<string:id>",methods=['GET','POST'])
def content(id):
content=Content.query.filter_by(id=id).first()
if (request.method == 'POST'):
global word
word = request.form.get("word")
s=word.split(" ")
dict1={}
i=0
for item in s:
dict1[i]=item
i+=1
audio = gTTS(text=word, lang='en-uk', slow=False)
file=get_filename()
audio.save(file)
mixer.music.load(file)
mixer.music.play()
del_file()
return render_template('showSpan.html',dictnory=dict1)
return render_template('get_content.html',content=content,id='id')
app.run(use_reloader = True,debug=True)
| true |
5d375f832a7743f98cc465e44117731721c25b1d
|
Python
|
tomamic/paradigmi
|
/python/e4_2013_3_tictactoe.py
|
UTF-8
| 2,947 | 3.796875 | 4 |
[] |
no_license
|
#!/usr/bin/env python3
'''
@author Michele Tomaiuolo - http://www.ce.unipr.it/people/tomamic
@license This software is free - http://www.gnu.org/licenses/gpl.html
'''
import sys
class TicTacToe:
NONE = '.'
PLR1 = 'X'
PLR2 = 'O'
DRAW = 'None'
OUT = '!'
def __init__(self, side=3):
self._side = side
self._matrix = [TicTacToe.NONE] * (self._side * self._side)
self.clear()
def clear(self):
for i in range(len(self._matrix)):
self._matrix[i] = TicTacToe.NONE
self._turn = 0
def play_at(self, x: int, y: int):
if self.get(x, y) == TicTacToe.NONE:
i = x + y * self._side
if self._turn % 2 == 0:
self._matrix[i] = TicTacToe.PLR1
else:
self._matrix[i] = TicTacToe.PLR2
self._turn += 1
def get(self, x: int, y: int) -> str:
if 0 <= x < self._side and 0 <= y < self._side:
return self._matrix[x + y * self._side]
else:
return TicTacToe.OUT
def _check_line(self, x: int, y: int, dx: int, dy: int) -> bool:
'''Check a single line, starting at (x, y) and
advancing for `side` steps in direction (dx, dy).
If a single player occupies all cells, he's won.'''
player = self.get(x, y)
if player == TicTacToe.NONE:
return False
for i in range(self._side):
if self.get(x + dx * i, y + dy * i) != player:
return False
return True
def winner(self) -> str:
'''Check all rows, columns and diagonals.
Otherwise, check if the game is tied.'''
for x in range(self._side):
if self._check_line(x, 0, 0, 1):
return self.get(x, 0)
for y in range(self._side):
if self._check_line(0, y, 1, 0):
return self.get(0, y)
if self._check_line(0, 0, 1, 1):
return self.get(0, 0)
if self._check_line(self._side - 1, 0, -1, 1):
return self.get(self._side - 1, 0)
if self._turn == self._side * self._side:
return TicTacToe.DRAW
return TicTacToe.NONE
def side(self) -> int:
return self._side
def __str__(self):
out = '' # Using a StringIO is more efficient
for y in range(self._side):
for x in range(self._side):
out += self._matrix[y * self._side + x]
out += '\n'
return out
def main():
game = TicTacToe(4)
print(game)
x = int(input('x? '))
y = int(input('y? '))
while x >= 0 and y >= 0:
game.play_at(x, y)
print(game)
winner = game.winner()
if winner != TicTacToe.NONE:
print('Game finished.', winner, 'has won!')
game.clear()
print(game)
x = int(input('x? '))
y = int(input('y? '))
if __name__ == '__main__':
main()
| true |
5663622b172a34ee7b3f85d7e33b03b0cdcbea81
|
Python
|
vpolyakov/stepik_tours
|
/tours/templatetags/tour_extras.py
|
UTF-8
| 715 | 3.125 | 3 |
[] |
no_license
|
from django import template
register = template.Library()
@register.filter(name='star_multiply')
def star_multiply(value, arg: str = '★', sep: str = ' ') -> str:
"""
Размножитель символов
:param value: количество символов в пакете
:param arg: повторяемый символ
:param sep: разделитель между символами
:return: пакет символов
"""
return sep.join(arg * int(value))
@register.filter
def tour_min(dictionary, key):
return min([dictionary[i][key] for i in dictionary])
@register.filter
def tour_max(dictionary, key):
return max([dictionary[i][key] for i in dictionary])
| true |
9f67f346ff822444f5b993983f02054d48818e36
|
Python
|
krupalvora/DSA
|
/12merge2sort.py
|
UTF-8
| 438 | 2.859375 | 3 |
[] |
no_license
|
l1=[1,3,5]
l2=[0,2,4]
for i in l2:
l1.append(i)
for i in range(len(l1)):
for j in range(len(l1)):
if l1[i]<l1[j]:
l1[i],l1[j]=l1[j],l1s[i]
print(l1)
#new
def merge(self, nums1, nums2, n, m):
# code here
for i in range(n):
for j in range(m):
if nums1[i]>nums2[j]:
nums2[j],nums1[i]=nums1[i],nums2[j]
nums1.sort()
nums2.sort()
| true |
b2d586ef92b9b1830a1acc067076aa74a494b49f
|
Python
|
chdzq/crawldictwebapp-flask
|
/webapp/exception/error.py
|
UTF-8
| 427 | 2.96875 | 3 |
[] |
no_license
|
# encoding: utf-8
class CustomError(Exception):
def __init__(self, error_code, message, data=None):
self._error_code = error_code
self._message = message
self._data = data
@property
def error_code(self):
return self._error_code
@property
def message(self):
return self._message if self._message else ""
@property
def data(self):
return self._data
| true |
62f5c9824f47aaa427e169319347b165af697910
|
Python
|
gabriellaec/desoft-analise-exercicios
|
/backup/user_226/ch24_2020_09_09_20_46_41_670214.py
|
UTF-8
| 165 | 3.09375 | 3 |
[] |
no_license
|
def calcula_aumento(salario):
float(salario)
if salario > 1250.00:
return salario * 1.10
elif salario <= 1250.00:
return salario * 1.15
| true |
25e418b61d7101ce16d81f8d1f4df7b409cb7d5a
|
Python
|
4or5trees/azure-iot-starter-kits
|
/seeed/2-image-classifier/modules/image-classifier-python/main.py
|
UTF-8
| 1,455 | 2.515625 | 3 |
[
"MIT"
] |
permissive
|
import argparse
import imageclassifier
from flask import Flask, request, jsonify
classifier = None
# Start web server
application = Flask(__name__)
@application.route('/classify', methods=['POST'])
def classify_image():
file = request.files['image']
result = classifier.run_inference_on_image(file)
return jsonify(result)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
default='/model/mobilenet_v2_1.0_224_frozen.pb',
help='Path to frozen GraphDef model'
)
parser.add_argument(
'--label_path',
type=str,
default='/model/imagenet_lsvrc_2015_synsets.txt',
help='Path to labels (node ids) used in the model.'
)
parser.add_argument(
'--label_metadata_path',
type=str,
default='/model/imagenet_metadata.txt',
help='Path to file with node ids -> human readable string.'
)
parser.add_argument(
'--port',
type=int,
default=8080,
help='Port for http server to listen on.'
)
parser.add_argument(
'--num_top_predictions',
type=int,
default=3,
help='Return this many predictions.'
)
FLAGS, unparsed = parser.parse_known_args()
# Create MobileNet image classifier.
classifier = imageclassifier.ImageClassifier(FLAGS)
application.run(host='0.0.0.0', port=FLAGS.port)
| true |
6de3dc92424c3b7af4f8648ee3182d06b13d002b
|
Python
|
guocheng45/Projects
|
/GTExercises/PyInterface/Regisintfa.py
|
UTF-8
| 2,291 | 2.859375 | 3 |
[] |
no_license
|
#-*- coding: UTF-8 -*-
#————urllib库有一个很智能的毛病。data不给值,访问方式就是GET,data给了值,方式就会变成POST;
#用post方法请求api:这种方式把参数放在请求内容中传递,比较安全
import urllib,urllib2
class RegisIntfa:
def CN_regist_username(self,Uname,gid): #注意方法中要有self才能使方法作为类方法
url = 'http://testapi.ktplay.cn:3011/2/user/account/login_by_nickname' #接口url链接
body={"game_id":4398,"username":'',"password":'123456'} #接口要传的参数
body["game_id"]=gid
body["username"]= Uname
body=urllib.urlencode(body) #把参数进行编码
request=urllib2.Request(url,body)# 用Request来发送POST请求,指明请求目标是之前定义过的url,请求内容放在data里
response = urllib2.urlopen(request) # 用urlopen打开上一步返回的结果,得到请求后的响应内容
apicontent = response.read() #将响应内容用read()读取出来
print apicontent #打印读取到的内容
ret =apicontent.find('username') #位置从0开始算,如果没找到则返回-1。
print ret
return ret
# if ret >=0:
# return ret
# else:
# return
def EN_regist_username(self,Uname,gid):
url = 'http://testapi.ktplay.com:4011/2/user/account/login_by_nickname' #接口url链接
body={"game_id":4401,"username":'',"password":'123456'} #接口要传的参数
body["game_id"]=gid
body["username"]=Uname
body=urllib.urlencode(body) #把参数进行编码
request=urllib2.Request(url,body)# 用Request来发送POST请求,指明请求目标是之前定义过的url,请求内容放在data里
response = urllib2.urlopen(request) # 用urlopen打开上一步返回的结果,得到请求后的响应内容
apicontent = response.read() #将响应内容用read()读取出来
print apicontent #打印读取到的内容
ret =apicontent.find('username') #位置从0开始算,如果每找到则返回-1。
print ret
return ret
# names = 'gzc1'
# CN_regist_username(names,4398)
| true |
da275343ea5c5fa4f2653ca45a374f1f47988bac
|
Python
|
XJDKC/University-Code-Archive
|
/Course Design/Data Analysis Based on Big Data Platform/PythonMR/HdfsSort/mapper.py
|
UTF-8
| 179 | 3.015625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import sys
list1=[]
for line in sys.stdin:
line=line.strip()
words=line.split("\n")
list1.append(words[0])
for x in xrange(len(list1)):
print list1[x]
| true |
5d5235d741d8cfac9a4ba49d7cf30f2f8287b452
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03140/s889642214.py
|
UTF-8
| 265 | 3.03125 | 3 |
[] |
no_license
|
n = int(input())
s = [input() for _ in range(3)]
ans = 0
for v in zip(*s):
len_ = len(set(v))
ans += len_ - 1
# if len_ == 3:
# ans += 2
# elif len_ == 2:
# ans += 1
# else:
# # len_ == 1
# ans += 0
print(ans)
| true |
323a794b5b61c1a18106f54387cabdf369ee7d36
|
Python
|
ChiPhanThanh/Python-Basic
|
/Nhap 3 so va in ra thu tu tang dan.py
|
UTF-8
| 465 | 3.203125 | 3 |
[] |
no_license
|
#Nhap vao ba so a, b, c
a = int(input(" Nhap vao gia tri a="));
b = int(input(" nhap vao gia trị b ="));
c = int(input(" nhap vao gia trị c ="));
if a <= b <= c:
print("%d %d %d" % (a, b, c))
elif a <= c <= b:
print("%d %d %d" % (a, c, b))
elif b <= a <= c:
print("%d %d %d" % (b, a, c))
elif b <= c <= a:
print("%d %d %d" % (b, c, a))
elif c <= a <= b:
print("%d %d %d" % (c, a, b))
else: # c <= b <= a
print("%d %d %d" % (c, b, a))
| true |
0c468cdd2fe649b9babff43109aaa01ddff460e4
|
Python
|
greyhill/pfeebles
|
/bin/mkdb.py
|
UTF-8
| 3,380 | 2.5625 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
import sqlite3
import csv
import os
dbconn = sqlite3.connect('pfeebles.sqlite3')
dbconn.text_factory = str
curr = dbconn.cursor()
if not os.path.exists('spells.csv'):
raise ValueError('download spells.csv first!')
spell_col_names = ( \
'name', 'school', 'subschool', 'descriptor',
'spell_level', 'casting_time',
'components', 'costly_components',
'range', 'area', 'effect', 'targets', 'duration',
'dismissible', 'shapeable',
'saving_throw', 'spell_resistence',
'description',
'source',
'full_text',
'verbal', 'somatic', 'material', 'focus', 'divine_focus',
'sor', 'wiz', 'cleric', 'druid', 'ranger', 'bard', 'paladin', 'alchemist',
'summoner', 'witch', 'inquisitor', 'oracle', 'antipaladin', 'magus',
'adept', 'deity',
'sla_level',
'air', 'chaotic', 'cold', 'curse', 'darkness', 'death', 'disease', 'earth',
'electricity', 'emotion', 'evil', 'fear', 'fire', 'force', 'good',
'language_dependent', 'lawful', 'light', 'mind_affecting',
'pain', 'poison', 'shadow', 'sonic', 'water',
'id',
'material_costs',
'bloodline',
'patron',
'mythic_text',
'augmented',
'mythic'
)
feat_col_names = ( \
'id', 'name',
'type', 'description',
'prerequisites', 'prerequisite_feats',
'benefit', 'normal', 'special',
'source',
'fulltext',
'teamwork',
'critical',
'grit',
'style',
'performance',
'racial',
'companion_familiar',
'race_name',
'note',
'goal',
'completion_benefit',
'multiples',
'suggested_traits' )
trait_col_names = ( \
'name',
'type',
'category',
'prerequisites',
'pfs_legal',
'description',
'source',
'version' )
def setup_table(colnames, csvname, tablename, translator = {}):
curr.execute('''
CREATE TABLE %s
(%s)
''' % (tablename, ', '.join(colnames)))
data_csv = csv.reader(open(csvname, 'r'))
data_header = data_csv.next()
data_rtable = {}
for n, colname in enumerate(data_header):
print colname
if colname.lower() not in translator:
if colname.lower() in colnames:
data_rtable[colname.lower()] = n
else:
if translator[colname.lower()] in colnames:
data_rtable[translator[colname.lower()]] = n
def extract_values(row):
tr = [ row[data_rtable[colname]] for colname in colnames ]
print row[data_rtable['name']]
return tr
curr.executemany('insert into %s (%s) values (%s)' \
% ( tablename,
','.join(colnames),
', '.join(['?' for n in colnames])),
( extract_values(r) for r in data_csv ))
dbconn.commit()
setup_table(spell_col_names, 'spells.csv', 'spells')
setup_table(feat_col_names, 'feats.csv', 'feats')
setup_table(trait_col_names, 'traits.csv', 'traits',
translator = {'trait name':'name', 'prerequisite(s)':'prerequisites', \
'pfs legal':'pfs_legal'})
dbconn.close()
| true |
ee7a8d5f5b461166fe8751df0efd25c50696ecb6
|
Python
|
francisdmnc/Feb2018-PythonWorkshop
|
/Exer 1_Yaya, Francis Dominic S..py
|
UTF-8
| 417 | 4.0625 | 4 |
[] |
no_license
|
print "Each game console cost 22000"
money = input("How much money do you have in your account now? Php ")
y=(money)/22000
print "The number of game consoles with a price of Php 22000 you can buy is ", y
n = (money) - y*22000
print "After buying the consoles, you will have", n,"pesos left on your account"
e = 22000 - n
print "You will need an additional amount of at least", e,"pesos to buy a new console"
| true |
cecce1b055dc9cbe916d84ee791fe17c194b13af
|
Python
|
alluri1/algos_epi
|
/10_heaps/01_merge_sorted_arrays.py
|
UTF-8
| 705 | 3.90625 | 4 |
[] |
no_license
|
"""
Merge k sorted arrays
<3,5,7> <0,6> <0,6,28>
Approach 1 : merge sort
Approach 2 : use a min heap(size<=k) to keep current min elements from each array
Each node in the heap is a tuple (value, array_index, element_index)
: pop min element from min heap to add to output array
: push the element at next index of popped element in the same array
"""
import heapq as hq
def merge_k_sorted_arrays(sorted_arrays):
output = []
# add min/first element from each array into the heap
h = []
for array_index, current_array in enumerate(sorted_arrays):
if len(current_array) > 1:
hq.heappush(h, (current_array[0], array_index, 0))
| true |
041020d5afd55d7ba7229ab5b4b32cac146d297a
|
Python
|
jsoma/openrecipes
|
/scrapy_proj/openrecipes/pipelines.py
|
UTF-8
| 2,697 | 2.953125 | 3 |
[] |
no_license
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/0.16/topics/item-pipeline.html
from scrapy.exceptions import DropItem
import hashlib
import bleach
class MakestringsPipeline(object):
"""
This processes all the properties of the RecipeItems, all of which are
lists, and turns them into strings
"""
def process_item(self, item, spider):
if item.get('ingredients', False):
for k, v in item.iteritems():
if k == 'ingredients':
# with ingredients, we want to separate each entry with a
# newline character
item[k] = "\n".join(v)
else:
# otherwise just smash them together with nothing between.
# We expect these to always just be lists with 1 or 0
# elements, so it effectively converts the list into a
# string
item[k] = "".join(v)
# Use Bleach to strip all HTML tags. The tags could be a source
# of code injection, and it's generally not safe to keep them.
# We may consider storing a whitelisted subset in special
# properties for the sake of presentation.
item[k] = bleach.clean(item[k], tags=[], attributes={},
styles=[], strip=True)
return item
else:
# if ingredients is not present, we say this is not a RecipeItem
# and drop it
raise DropItem("Missing ingredients in %s" % item)
class DuplicaterecipePipeline(object):
"""
This tries to avoid grabbing duplicates within the same session.
Note that it does not persist between crawls, so it won't reject duplicates
captured in earlier crawl sessions.
"""
def __init__(self):
# initialize ids_seen to empty
self.ids_seen = set()
def process_item(self, item, spider):
# create a string that's just a concatenation of name & url
base = "%s%s" % (item['name'].encode('utf-8'),
item['url'].encode('utf-8'))
# generate an ID based on that string
hash_id = hashlib.md5(base).hexdigest()
# check if this ID already has been processed
if hash_id in self.ids_seen:
#if so, raise this exception that drops (ignores) this item
raise DropItem("Duplicate name/url: %s" % base)
else:
# otherwise add the has to the list of seen IDs
self.ids_seen.add(hash_id)
return item
| true |
806b84ab640f6253a1169df07b85b74fc5514387
|
Python
|
yashagrawal5757/FraudDetection
|
/fraud.py
|
UTF-8
| 18,966 | 2.984375 | 3 |
[
"MIT"
] |
permissive
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation,Dropout
from tensorflow.keras.callbacks import EarlyStopping
from imblearn.under_sampling import CondensedNearestNeighbour
df = pd.read_csv('frauddata.csv')
df.columns
df = df.rename(columns={'nameOrig':'senderid','oldbalanceOrg':'senderoldbal'
,'newbalanceOrig':'sendernewbal','nameDest':'destid',
'oldbalanceDest':'destoldbal','newbalanceDest':'destnewbal'
})
df.info()
summary = df.describe()
df.dtypes # 3 string values.Only 'type' is categorical value
df.isnull().sum() #No NaN values.
sns.countplot(x='type',data=df)
#-----------------------------------------------------------------------
#VISUALIZATION to understand data columns
sns.boxplot(x=df['amount'])
sns.lineplot(x='step',y='amount',data=df)
#EDA--------------------------------------------------------------------
# 16 transfers where amount transacted was 0-> strange
zero_amount = df[df['amount']==0] # FRAUD
sns.countplot(x='isFraud',data=df) #highly imbalanced data
len(df[df['isFraud']==1])
#Total frauds=8213
8213/(df.shape[0]) # 0.12% of cases are fraud
# type of fraud transfers
df[df['isFraud']==1].groupby(by='type')['type'].value_counts()
# There are only 2 types of payment for all fraud cases- CASHOUT AND TRANSFER.
#4116 CASHOUTS AND 4097 TRANSFERS
frauds = df[df.isFraud==1]
sns.countplot(x='type',data=frauds)
#For 4097 cases fraud is committed by first transferring out funds to # another account and subsequently cashing it out. 19 cases involve #direct cash out
notfrauds = df[df.isFraud==0]
# Checking if dest of transfers act as sender for cashouts which is #expected in a transfer first then cash out fraud
fraudtransfer = frauds[frauds.type=='TRANSFER']
fraudcashout = frauds[frauds.type=='CASH_OUT']
fraudtransfer.destid.isin (fraudcashout.senderid).any()
# This is weird that dest of transfer is never a sender in cashout for frauds #no pattern here-> we can drop senderid and destid
#-----------------------------------------------------------------------
#eda on isflaggedfraud
flaggedfraud = df[df.isFlaggedFraud==1]
sns.countplot(x='isFlaggedFraud',data=df)
# 16rows
#only transfer types
# we need to see if there are any patterns for flagging fraud
#is flagged fraud is said to be set according to kaggle's data description when amt>200,000.We need to check this
print("The number of times when isFlaggedfraud is not set,\
despite the amount being transacted was >200,000 = {}".format(
len(df[(df.isFlaggedFraud==0) & (df.amount>200000)])))
#This shows that isflaggedfraud is not a proper right variable
print("Maxm amount transacted when isflagged was not set ={}".format(
max(df[(df.isFlaggedFraud==0) & (df.amount>200000)]['amount'])))
#92445516 not flagged fraud-> bad variable
#Moreover sender and recipient's old and new balance remained same. This is # because maybe these transactions were halted by banks.WE shall check if #there are cases where old and new balance is same, yet not flagged as fraud
df[(df.isFlaggedFraud==0) & (df['type']=='TRANSFER') & (df['destoldbal']== 0) & (df['destnewbal']== 0)]
#4158 rows-> Those are not flagged fraud-> This is not a definite pattern
#Are the people who have been flagged fraud transacted any other time
notflaggedfraud = df[df.isFlaggedFraud==0]
flaggedfraud['senderid'].isin (pd.concat([notflaggedfraud['senderid'],notflaggedfraud['destid']])).any()
# False means no flagged fraud originator ever transacted. It was only one time he transacted
flaggedfraud['destid'].isin (pd.concat([notflaggedfraud['senderid'],notflaggedfraud['destid']]))
#True means a flagged recipient have transacted more than once
#index 2736446 and index 3247297
notflaggedfraud.iloc[2736446]
notflaggedfraud.iloc[3247297]
# This person has conducted a genuine transaction as well-> impt data point
#since there are 16 rows of isflagged and no definite pattern can be found by our eda,we will drop this column
#-----------------------------------------------------------------------
#eda on MERCHANTS
#NOW WE FIND INFO ABOUT rows containing sender or receiver name starting with #M.These are the merchants.we see the type of payment they are involved in #and if they can be involved in any fraud
merchants =df[df.senderid.str.contains('M')]
# merchants are never senders
merchants =df[df.destid.str.contains('M')]
#merchants have always been recipients
merchants['type'].nunique()
merchants['type'].unique()
# all the merchants have been involved in payment type
merchants[merchants['isFraud']==1]
# empty dataframe means merchants are not involved in any fraud. We can safely drop these merchant rows
#--------------------------------------------------------------------------
#some other eda
# check if there are cases where amount sent>senderoldbalance as this should not be possible
df[df.amount>df.senderoldbal]
##4 million rows have been incorrectly calculated
# check if there are cases where amount received>destnewdbalance as this should not be possible
df[df.amount>df.destnewbal]
# again 2.6 million incorrect calculations
#checking occurences where amt>0 but destold and new balance are both 0
df[(df['amount']>0) & (df['destoldbal']==0) &(df['destnewbal'] == 0) &
(df['isFraud']==1)]
4070/len(frauds)
#50% of fraud transactions see old and new dest bal same. We cant impute these values
df[(df['amount']>0) & (df['destoldbal']==0) &(df['destnewbal'] == 0) &
(df['isFraud']==0)]
2313206 /len(notfrauds) # 36% rows
#checking occurences where amt>0 but senderold and new balance are both 0
df[(df['amount']>0) & (df['senderoldbal']==0) &(df['sendernewbal'] == 0) &
(df['isFraud']==1)]
25/len(frauds) #0.3% cases
df[(df['amount']>0) & (df['senderoldbal']==0) &(df['sendernewbal'] == 0) &
(df['isFraud']==0)]
2088944/len(notfrauds)
#32% rows
# for fraud cases, dest balance remains 0 most of the times, but thats not the case for sender in a fraud cases.
#-----------------------------------------------------------------------
#making variables sendererror and desterror to determine error noticed in a transaction
df['sendererror'] = df['senderoldbal']+df['amount']-df['sendernewbal']
df['desterror'] = df['destoldbal']+df['amount']-df['destnewbal']
head = df.head(50)
#making variables from step- hours,day,dayofweek
num_hours = 24
frauds['hour'] = frauds.step % num_hours
notfrauds['hour'] = notfrauds.step % num_hours
df['hour'] = df.step % num_hours
list=[]
for i in range(0,len(frauds)):
step = frauds.iloc[i].step
list.append(np.ceil(step/num_hours))
frauds['day']=list
frauds['dayofweek'] = frauds['day']%7
# result from 0->6 where 0 can be
#any day. if 0 was monday, 1 would be tuesday but if 0 is tue , 1 is wed
plt.hist(x='day',data=frauds)
# no definite pattern based off the day
plt.hist(x='hour',data=frauds)
# no definite pattern based off hour
plt.hist(x='dayofweek',data=frauds)
# no definite pattern based off dayofweek
#-----------------------------------------------------------------------
#visualization
sns.scatterplot(y='amount',x='step',data=df,hue='isFraud')
#heatmap
plt.figure(figsize=(8,5))
sns.heatmap(df.corr(),annot=True)
#-----------------------------------------------------------------------
#DATA PREPROCESSING
df2 = df.copy()
#since frauds are only for type=cashout or transfer acc to analysis
X = df.loc[(df.type == 'TRANSFER') | (df.type == 'CASH_OUT')]
#SINCE TYPE=PAYMENT IS NOT INCLUDED ALL MERCHANTS HAVE #BEEN DROPPED
#Acc to analysis we dont consider flagged fraud for dependent variable
y = X['isFraud']
#DROP COLUMNS NOT NECESSARY
X = X.drop(['senderid', 'destid', 'isFlaggedFraud'], axis = 1)
X = X.drop('isFraud',axis=1)
# remove dep variable from matrix of features
#check proportion of fraud data
sns.countplot(y,data=y) #imbalanced data
y[y==1] #8213 frauds
8213/len(y) # 0.3% fraud cases
#_----------------------------------------------------------------------
#HANDLING MISSING DATA
Xfraud = X.loc[y == 1] #All fraud cases
XnonFraud = X.loc[y == 0] # all non fraud cases
X[(X['amount']!=0) & (X['destoldbal']==0) &(X['destnewbal'] == 0)]
X[(X['amount']!=0) & (X['senderoldbal']==0) & (X['sendernewbal'] == 0)]
1308566/len(X)
#Around 47% of senders have 0 before and after values. Since 47% is a big value we wont impute it.Rather we can replace 0 by -1 #which will give a clear distinction and also it will be good for our model
#lets see how many are fraud in these 47%rows
index = X[(X['amount']!=0) & (X['senderoldbal']==0) & (X['sendernewbal'] == 0) ].index
li=[]
li.append(y.loc[index])
li[0].value_counts()
# only 25 cases are fraud, maxm cases are genuine for above pattern
X.loc[(X.senderoldbal == 0) & (X.sendernewbal == 0) & (X.amount != 0), \
['senderoldbal', 'sendernewbal']] = - 1
X[X['senderoldbal']==-1]
X[X['sendernewbal']==-1].head()
#-----------------------------------------------------------------------
#Categorical Values(ENCODING)
X.dtypes #Type datatype is object
X.loc[X.type == 'TRANSFER','type' ] = 0 #TRANSFER =0
X.loc[X.type == 'CASH_OUT', 'type'] = 1 # CASHOUT =1
X.dtypes #Type datatype is int
#----------------------------------------------------------------------
#TRAIN TEST SPLITTING
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test= train_test_split(X,y,test_size=0.3,random_state=0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#-----------------------------------------------------------------------
#CLASSIFICATION TECHNIQUES
#1)LOGISTIC REGRESSION
from sklearn.linear_model import LogisticRegression
lclassifier = LogisticRegression(random_state = 0)
lclassifier.fit(X_train, y_train)
y_predl = lclassifier.predict(X_test)
from sklearn.metrics import confusion_matrix,r2_score,classification_report
cmL = confusion_matrix(y_test, y_predl)
cmL
r2_score(y_test,y_predl) #46%
report = classification_report(y_test,y_predl)
# as expected logistic regression works bad on imbalanced data
#-------------------------------------------------------------------------
#2)Random Forest
# Fitting Random Forest Classification to the Training set
from sklearn.ensemble import RandomForestClassifier
rfclassifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
rfclassifier.fit(X_train, y_train)
y_predrf = rfclassifier.predict(X_test)
rfcm = confusion_matrix(y_test, y_predrf)
rfcm
r2_score(y_test,y_predrf) # 81%
report = classification_report(y_test,y_predrf)
# changing the number of estimators
rfclassifier = RandomForestClassifier(n_estimators = 50, criterion = 'entropy', random_state = 0)
rfclassifier.fit(X_train, y_train)
y_predrf = rfclassifier.predict(X_test)
rfcm = confusion_matrix(y_test, y_predrf)
rfcm
r2_score(y_test,y_predrf) # 82%
report = classification_report(y_test,y_predrf)
#precision as well as recall both are good
#-----------------------------------------------------------------------
#implement xgbosot
from xgboost import XGBClassifier
xgbclassifier = XGBClassifier()
xgbclassifier.fit(X_train, y_train)
y_predxgb = xgbclassifier.predict(X_test)
cmxgb = confusion_matrix(y_test, y_predxgb)
cmxgb
r2_score(y_test,y_predxgb) # 87%
reportxgb = classification_report(y_test,y_predxgb)
#-----------------------------------------------------------------------
#ANN
#converting dep variable in array for neural nets
y_train = y_train.values
y_test = y_test.values
model = Sequential()
#1st hidden layer
model.add(Dense(units=5,input_dim=10,activation='relu'))
model.add(Dense(units=30,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(units=40,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(units=1,activation='sigmoid'))
# For a binary classification problem
model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)
model.fit(x=X_train,
y=y_train,
epochs=25,
validation_data=(X_test, y_test),
callbacks=[early_stop]
)
#scale back y_predann
print(r2_score(y_test,y_predann)) #71%
# Mediocre performance
#-----------------------------------------------------------------------
#evaluating kfold and rforest by stratified k fold cross validation
accuracyrf=[]
accuracyxgb=[]
#empty list to store accuracies of all folds
from sklearn.model_selection import StratifiedKFold
skf = StratifiedKFold(n_splits=5)
skf.get_n_splits(X,y)
for train_index, test_index in skf.split(X, y):
print("TRAIN:", train_index, "TEST:", test_index)
X_train_strat, X_test_strat = X.iloc[train_index], X.iloc[test_index]
y_train_strat, y_test_strat = y.iloc[train_index], y.iloc[test_index]
from sklearn.metrics import accuracy_score
#evaluating rf classifier on 10 folds
rfclassifier.fit(X_train_strat,y_train_strat)
y_predrfstrat = rfclassifier.predict(X_test_strat)
score = accuracy_score(y_test_strat,y_predrfstrat)
accuracyrf.append(score)
np.array(accuracyrf).mean()
# Drop in accuracy for last fold. It means that the model must have started overfitting at some point
"""Feature importance"""
importance = rfclassifier.feature_importances_
feature_importance = pd.DataFrame(importance,columns=['importance'])
feature_importance = feature_importance.sort_values(by=['importance'],ascending=False)
colname=[]
for i in feature_importance.index:
colname.append(X_train_strat.columns[i])
feature_importance['colname']=colname
#step variable has maxm contribution towards results
#evaluating xgboost classifier on 5 folds
from sklearn.model_selection import StratifiedKFold
skf = StratifiedKFold(n_splits=5)
skf.get_n_splits(X,y)
for train_index, test_index in skf.split(X, y):
print("TRAIN:", train_index, "TEST:", test_index)
X_train_strat, X_test_strat = X.iloc[train_index], X.iloc[test_index]
y_train_strat, y_test_strat = y.iloc[train_index], y.iloc[test_index]
xgbclassifier.fit(X_train_strat,y_train_strat)
y_predxgbstrat = xgbclassifier.predict(X_test_strat)
score = accuracy_score(y_test_strat,y_predxgbstrat)
accuracyxgb.append(score)
np.array(accuracyxgb).mean()
#97% accuracy after all the folds-> really good accuracy, No overfitting
#-----------------------------------------------------------------------
#xgboost and random forest performed well as expected for imbalanced
#data. Lets try to balance it and then try again
""" TAKES LOT OF TIME
#Performing CONDENSED NEAREST NEIGBOURING(CNN) undersampling technique
undersample = CondensedNearestNeighbour(n_neighbors=1)
#undersampling only the train set and not test set since doing on both
# the model may perform well, but will do bad on new data which comesimbalanced
X1_train, y1_train = undersample.fit_resample(X_train, y_train)
"""
# we need to resort to random under sub sampling
X_train_df = pd.DataFrame(X_train)
y_train_df = pd.DataFrame(y_train)
fraudlen = len(y_train_df[y_train_df==1].dropna()) #5748 frauds in train set
#fetching indices of frauds
fraudindices = y_train_df==1
fraudindices = fraudindices[fraudindices==1]
fraudindices = fraudindices.dropna().index.values
# fetching indices of genuine transactions
genuineindices = y_train_df==0
genuineindices = genuineindices[genuineindices]
genuineindices = genuineindices.dropna().index.values
#randomly select indices from majority class
rand_genuineindex = np.random.choice(genuineindices, fraudlen, replace = False)
#concatenate fraud and random genuine index to make final training df
index = np.concatenate([fraudindices,rand_genuineindex])
#sampling to be done only on train set
X_train_balanced = X_train_df.iloc[index]
y_train_balanced = y_train_df.iloc[index]
sns.countplot(0,data=y_train_balanced) # balanced data
#-----------------------------------------------------------------------
#applying models now
#1)LOGISTIC REGRESSION
lclassifier.fit(X_train_balanced, y_train_balanced)
#prediction to be done on imbalanced test set
y_predl = lclassifier.predict(X_test)
cmL = confusion_matrix(y_test, y_predl)
cmL
r2_score(y_test,y_predl) #46%
report = classification_report(y_test,y_predl)
#-------------------------------------------------------------------------
#2)Random Forest
rfclassifier1 = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
rfclassifier1.fit(X_train_balanced, y_train_balanced)
y_predrf = rfclassifier1.predict(X_test)
rfcm = confusion_matrix(y_test, y_predrf)
rfcm
r2_score(y_test,y_predrf)
report = classification_report(y_test,y_predrf)
# Recall increased as we wanted but precision dropped badly
# changing the number of estimators
rfclassifier2 = RandomForestClassifier(n_estimators = 50, criterion = 'entropy', random_state = 0)
rfclassifier2.fit(X_train_balanced, y_train_balanced)
y_predrf = rfclassifier2.predict(X_test)
rfcm = confusion_matrix(y_test, y_predrf)
rfcm
r2_score(y_test,y_predrf)
report = classification_report(y_test,y_predrf)
#No improvement
#-----------------------------------------------------------------------
#3) implement xgboost
xgbclassifier.fit(X_train_balanced.values, y_train_balanced.values)
y_predxgb = xgbclassifier.predict(X_test)
cmxgb = confusion_matrix(y_test, y_predxgb)
cmxgb
r2_score(y_test,y_predxgb)
report = classification_report(y_test,y_predxgb)
#no improvement
#-----------------------------------------------------------------------
#After undersampling randomly, the model performed worse. Although it
#managed to increase recall, precision and f1 score worsened. This is
#highly possible because of 2 reasons:
#A) We performed random sampling and couldnt do CNN undersampling as it takes lot of time in transforming such huge data
#B) our test set is not balanced(since we want practical real life test set)), so it is expected model performs bad
# Results- The best performance was given for imbalanced dataset by XGBoost model. Although random forest gave similar
# r2 score on imbalanced data(little less), but the model stability was much greaeter in xgboost and hence is expected to work better on new data
| true |
0d9cfa84b034fbb1ad6d97c6d80dba987e082fb1
|
Python
|
daejong123/wb-py-sdk
|
/wonderbits/WBHall.py
|
UTF-8
| 1,162 | 2.921875 | 3 |
[
"MIT"
] |
permissive
|
from .wbits import Wonderbits
def _format_str_type(x):
if isinstance(x, str):
x = str(x).replace('"', '\\"')
x = "\"" + x + "\""
return x
class Hall(Wonderbits):
def __init__(self, index = 1):
Wonderbits.__init__(self)
self.index = index
def register_magnetic(self, cb):
self._register_event('hall{}'.format(self.index), 'magnetic', cb)
def get_magnetic(self):
"""
获取磁场强度值:rtype: float
"""
command = 'hall{}.get_magnetic()'.format(self.index)
return self._get_command(command)
def calibrate(self, block = None):
"""
校准霍尔传感器注意:校准过程中请确保没有磁性物体靠近模块,否则会导致校准后不准确。校准时,模块指示灯会变为黄色,等待指示灯变蓝说明校准完成了。
:param block: 阻塞参数 False: 不阻塞 True: 阻塞
"""
args = []
if block != None:
args.append(str(block))
command = 'hall{}.calibrate({})'.format(self.index, ",".join(args))
self._set_command(command)
| true |
434879ed30780b0a41706c65901e81ad8618fab7
|
Python
|
JonasPapmeier/Masterarbeit
|
/statistics.py
|
UTF-8
| 3,856 | 3.046875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 30 17:49:57 2017
@author: brummli
"""
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from dataLoader import dataPrep
"""
Calculates standard statistics
:params:
data: numpy array containing the data of form (examples,timesteps,dims)
:return:
tuple containing (mean,var,min,max)
"""
def totalStats(data):
mean = np.mean(data)
var = np.var(data)
minimum = np.min(data)
maximum = np.max(data)
return (mean,var,minimum,maximum)
"""
Calculates example wise standard statistics
:params:
data: numpy array containing the data of form (examples,timesteps,dims)
:return:
tuple containing (mean,var,min,max)
"""
def exampleStats(data):
if len(data.shape) == 3:
mean = np.mean(data,axis=(1,2))
var = np.var(data,axis=(1,2))
minimum = np.min(data,axis=(1,2))
maximum = np.max(data,axis=(1,2))
elif len(data.shape) == 2:
mean = np.mean(data,axis=(1))
var = np.var(data,axis=(1))
minimum = np.min(data,axis=(1))
maximum = np.max(data,axis=(1))
else:
#Make sure there are values to return so that plotting doesn't produce an error
mean = 0
var = 0
minimum = 0
maximum = 0
return (mean,var,minimum,maximum)
"""
Calculates time wise standard statistics
:params:
data: numpy array containing the data of form (examples,timesteps,dims)
:return:
tuple containing (mean,var,min,max)
"""
def timeStats(data):
if len(data.shape) == 3:
mean = np.mean(data,axis=(0,2))
var = np.var(data,axis=(0,2))
minimum = np.min(data,axis=(0,2))
maximum = np.max(data,axis=(0,2))
else:
#Make sure there are values to return so that plotting doesn't produce an error
mean = 0
var = 0
minimum = 0
maximum = 0
return (mean,var,minimum,maximum)
"""
Calculates feature wise standard statistics
:params:
data: numpy array containing the data of form (examples,timesteps,dims)
:return:
tuple containing (mean,var,min,max)
"""
def featureStats(data):
if len(data.shape) == 3:
mean = np.mean(data,axis=(0,1))
var = np.var(data,axis=(0,1))
minimum = np.min(data,axis=(0,1))
maximum = np.max(data,axis=(0,1))
elif len(data.shape) == 2:
mean = np.mean(data,axis=(0))
var = np.var(data,axis=(0))
minimum = np.min(data,axis=(0))
maximum = np.max(data,axis=(0))
else:
#Make sure there are values to return so that plotting doesn't produce an error
mean = 0
var = 0
minimum = 0
maximum = 0
return (mean,var,minimum,maximum)
def producePlot(func,data,descString,log=True):
processed = func(data)
fig = plt.figure()
if log:
plt.yscale('log')
plt.plot(processed[0],'go-',label='mean')
plt.plot(processed[1],'ro-',label='var')
plt.plot(processed[2],'bo-',label='min')
plt.plot(processed[3],'ko-',label='max')
plt.legend()
plt.savefig('stats/'+descString+'_'+func.__name__+'.png')
plt.close(fig)
def plotWrapper(data,descString,log=True):
producePlot(totalStats,data,descString,log=log)
producePlot(exampleStats,data,descString,log=log)
producePlot(timeStats,data,descString,log=log)
producePlot(featureStats,data,descString,log=log)
if __name__ == '__main__':
loader = dataPrep()
dev_data_x,dev_data_y = loader.prepareDevTestSet(loader.devData)
train_data_x,train_data_y = loader.prepareTrainSet(loader.augment(loader.trainData,0.8,1.2,3,0.8,1.2,3))
fig = plt.figure(1)
plt.boxplot(np.mean(np.mean(train_data_x,axis=1),axis=1))
fig.show()
fig = plt.figure(2)
plt.boxplot(np.mean(train_data_x,axis=1).T)
fig.show()
| true |
a3d2feaa05f81889538470b940fcbe565f133cb2
|
Python
|
callmekungfu/daily
|
/interviewcake/hashing-and-hashtables/practices/in_flight_entertainment.py
|
UTF-8
| 1,942 | 4.15625 | 4 |
[
"MIT"
] |
permissive
|
'''
You've built an inflight entertainment system with on-demand movie streaming.
Users on longer flights like to start a second movie right when their first one ends,
but they complain that the plane usually lands before they can see the ending.
So you're building a feature for choosing two movies whose total runtimes will
equal the exact flight length.
Write a function that takes an integer flight_length (in minutes) and a list of
integers movie_lengths (in minutes) and returns a boolean indicating whether there
are two numbers in movie_lengths whose sum equals flight_length.
When building your function:
- Assume your users will watch exactly two movies
- Don't make your users watch the same movie twice
- Optimize for runtime over memory
'''
# My solution
# Build a dictionary by iterating through the array
# Then check every element in the list until one satisfies the requirements
# We can do this in O(n) time, where n is the length of movie_lengths.
def has_two_movies_with_good_runtime(flight_length, movie_lengths):
movie_map = {}
# Build Map
for length in movie_lengths:
movie_map[length] = movie_map[length] + 1 if length in movie_map else 1
# Check for valid cases
for movie in movie_lengths:
minutes_needed = flight_length - movie
if (minutes_needed <= 0) or ((minutes_needed in movie_map) and (minutes_needed != movie or movie_map[movie] > 1)):
return True
return False
# Interview cake solution
def can_two_movies_fill_flight(movie_lengths, flight_length):
# Movie lengths we've seen so far
movie_lengths_seen = set()
for first_movie_length in movie_lengths:
matching_second_movie_length = flight_length - first_movie_length
if matching_second_movie_length in movie_lengths_seen:
return True
movie_lengths_seen.add(first_movie_length)
# We never found a match, so return False
return False
print(has_two_movies_with_good_runtime(40, [20, 20, 40]))
| true |
0a4367ccd5ad9353f2652b56b73a5ee1a51b4568
|
Python
|
anhbh/cousera
|
/py4e/py_test_time.py
|
UTF-8
| 913 | 3.265625 | 3 |
[] |
no_license
|
import time
stat1=dict()
stat2=dict()
#numbers_sizes = (i*10**exp for exp in range(4, 8, 1) for i in range(1, 11, 5))
numbers_sizes = [10**exp for exp in range(4, 10, 1)]
print(str(numbers_sizes))
for input in numbers_sizes:
# prog 1
start_time=time.time()
cube_numbers=[]
for n in range(0,input):
if n % 2 == 1:
cube_numbers.append(n**3)
process_time=time.time()-start_time
print('Process1 time for', input, '\tis', time.time()-start_time)
stat1[str(input)]=process_time
# prog 2
cube_numbers=[]
start_time=time.time()
cube_numbers = [n**3 for n in range(0,input) if n%2 == 1]
process_time=time.time()-start_time
print('Process2 time for', input, '\tis', time.time()-start_time)
stat2[str(input)]=process_time
for i in numbers_sizes:
print('Input:',i,'\t',stat1[i],'\t', stat2[i],'\t', stat2[i]-stat1[i])
| true |
85df58ca59424c34c2ffa3747bb0e986112a376f
|
Python
|
VNG-Realisatie/gemma-zaken-demo
|
/src/zac/demo/mijngemeente/management/commands/debug_runconsumer.py
|
UTF-8
| 2,777 | 2.53125 | 3 |
[] |
no_license
|
import datetime
from django.core.management.base import BaseCommand
import pika
from zac.demo.models import SiteConfiguration
class Command(BaseCommand):
"""
Example:
$ ./manage.py runconsumer foo.bar.*'
"""
help = 'Start consumer that connects to an AMQP server to listen for notifications.'
def add_arguments(self, parser):
parser.add_argument(
'filters',
nargs='*',
type=str,
help='Een of meer filters waar berichten voor ontvangen moeten worden.'
)
def handle(self, *args, **options):
# Reading guide:
#
# * A producer is a user application that sends messages (ie. ZRC).
# * An exchange is where messages are sent to (ie. zaken).
# * A queue is a buffer that stores (relevant) messages (ie. zaken with
# zaaktype X).
# * A consumer is a user application that receives messages (ie. this
# demo: Mijn Gemeente).
filters = options.get('filters')
if len(filters) == 0:
binding_keys = '#' # All messages
else:
binding_keys = filters
# Grab configuration
config = SiteConfiguration.get_solo()
nc_host = 'localhost' # config.nc_host
nc_port = 5672 # config.nc_port
nc_exchange = 'zaken' # config.nc_exchange
# Set up connection and channel
connection = pika.BlockingConnection(pika.ConnectionParameters(host=nc_host, port=nc_port))
channel = connection.channel()
# Make sure we attach to the correct exchange.
channel.exchange_declare(
exchange=nc_exchange,
exchange_type='topic'
)
# Create a (randomly named) queue just for me
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
for binding_key in binding_keys:
channel.queue_bind(
exchange=nc_exchange,
queue=queue_name,
routing_key=binding_key
)
filters = ', '.join(binding_keys)
self.stdout.write(
f'Starting consumer connected to amqp://{nc_host}:{nc_port}\n'
f'Listening on exchange "{nc_exchange}" with topic: {filters}\n'
f'Quit with CTRL-BREAK.'
)
def callback(ch, method, properties, body):
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%I')
self.stdout.write(f'[{now}] Ontvangen met kenmerk "{method.routing_key}": {body}')
channel.basic_consume(
callback,
queue=queue_name,
no_ack=True
)
try:
channel.start_consuming()
except KeyboardInterrupt:
return
| true |
ea81dcc5c1d17c05380454bca11d66efcff2c3c2
|
Python
|
shravan002/SIH-2019
|
/sih_fnode_Desktop/SIH_Edge_Node/Weather_download.py
|
UTF-8
| 463 | 2.578125 | 3 |
[] |
no_license
|
#import configparser
import requests
import sys
import json
#def get_weather():
#if(r.json() == null ):
#print("Error Occured")
#return r.json()
url = "https://api.openweathermap.org/data/2.5/forecast?id=3369157&appid=e141245f76fbb881dfba64a59a75ac71"
r = requests.get(url)
#print(r.json())
#weather = get_weather();
f = open('weather.json','w')
#weather = r.json()
json.dump(r.json(),f);
#f.write(weather)
f.close()
#print(weather)
| true |
e399d913af98718223c1081fbc9a6c570023a8d8
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02629/s262585495.py
|
UTF-8
| 223 | 2.625 | 3 |
[] |
no_license
|
N=int(input())
c=0;
while N-26**c>=0:
N-=26**c
c+=1
d=[0]*(c-1)
i=0
for i in range(c):
d.insert(c-1,N%26)
N=(N-d[i])//26
i+=1
e=[]
s=''
for i in range(2*c-1):
e.append(chr(97+d[i]))
s+=e[i]
print(s[c-1:2*c-1])
| true |
4fadf6dfbd50a12290937a0b8b7c4e65bf0962da
|
Python
|
paul-mcnamee/ClipConcatenator
|
/DownloadTwitchClips.py
|
UTF-8
| 17,807 | 2.640625 | 3 |
[] |
no_license
|
import shutil
import glob
import base64
import json
import os
import datetime
import requests as re
import re as regex
import logging
import time
output_directory = 'C:/temp/'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# create a file handler
handler = logging.FileHandler(datetime.date.today().strftime('%Y-%m-%d') + '_downloads' + '.log')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
start_time = datetime.datetime.now()
num_downloaded_clips = 0
with open('twitch_headers.json') as json_data:
headers = json.load(json_data)
headers = headers[0]
def increase_downloaded_clip_count():
global num_downloaded_clips
num_downloaded_clips = num_downloaded_clips + 1
def parse_twitch_clip_url_response(content):
"""
parse the initial url that we get from the twitch API to get the mp4 download links
the first link is the highest or source quality
:param content: text response from the get request to parse through and find the clip download links
:return: url containing the mp4 download link
"""
# Examples:
# https://clips-media-assets.twitch.tv/vod-184480263-offset-8468.mp4
# https://clips-media-assets.twitch.tv/26560534848-offset-21472.mp4
# https://clips-media-assets.twitch.tv/26560534848.mp4
match = regex.findall(r'https\:\/\/clips-media-assets.twitch.tv\/\w*\-*\d+\-*\w*\-*\d*\.mp4', content)
if match.__len__() > 0:
# NOTE: the first one is always the highest quality
logger.info("found clip url: %s", match[0])
return match[0]
else:
return ""
def download(url, file_name):
"""
download the clip to a local folder
:param url: url which contains a clip download link (mp4 in most cases)
:param file_name: file name to generate and output content of the url to
:return: none
"""
os.makedirs(os.path.dirname(file_name), exist_ok=True)
logger.info("downloading %s from %s", file_name, url)
with open(file_name, "wb") as file:
response = re.get(url)
file.write(response.content)
def add_optional_query_params(url, channel, cursor, game_name, language, limit, period, trending):
"""
Not all of the parameters are required, and the behavior is different if some are omitted.
This whole thing is probably not necessary, but it's ok because it works right? Ship it.
:param url:
:param channel:
:param cursor:
:param game_name:
:param language:
:param limit:
:param period:
:param trending:
:return:
"""
new_url = url + "?"
if channel != "":
new_url = new_url + "channel=" + channel + "&"
if cursor != "":
new_url = new_url + "cursor=" + cursor + "&"
if game_name != "":
new_url = new_url + "game=" + game_name + "&"
if language != "":
new_url = new_url + "language=" + language + "&"
if limit != "":
new_url = new_url + "limit=" + limit + "&"
if period != "":
new_url = new_url + "period=" + period + "&"
if trending != "":
new_url = new_url + "trending=" + trending + "&"
return new_url
def delete_clips_from_list(clips, indices_to_delete):
for index in sorted(indices_to_delete, reverse=True):
del clips[index]
return clips
def delete_clips_with_close_times(current_clip, clips_to_check):
"""
Delete the duplicate clips for any given channel if the times are close
Multiple clips might be generated of the same content but they both may show up if they are popular
If there is a duplicate then we will keep the longest duration clip, and delete the shorter one
:param current_clip: clip that we are comparing
:param clips_to_check: list of clips that we will compare against
:return: list of clips without duplicates
"""
tolerance = 30
need_to_delete = False
index_to_delete = clips_to_check.index(current_clip)
indices_to_delete = set()
for index, clip_to_check in enumerate(clips_to_check):
if current_clip['slug'] == clip_to_check['slug']:
continue
if clip_to_check['vod'] is None:
indices_to_delete.add(index)
logger.info("clip_to_check['vod'] is none for %s", clip_to_check)
continue
if current_clip['vod'] is None:
logger.info("current_clip['vod'] is none for %s", current_clip)
indices_to_delete.add(index)
continue
current_clip_offset = current_clip['vod']['offset']
clip_to_check_offset = clip_to_check['vod']['offset']
min_offset = current_clip_offset - tolerance
max_offset = current_clip_offset + tolerance
if (min_offset <= clip_to_check_offset <= max_offset) \
and (clip_to_check['broadcaster']['display_name'] == current_clip['broadcaster']['display_name']):
logger.info("Similar clip offsets found, clip_to_check_offset=%s current_clip_offset=%s",
clip_to_check_offset, current_clip_offset)
if current_clip['views'] > clip_to_check['views']:
logger.info("current_clip['views']=%s clip_to_check['views']=%s deleting %s"
, current_clip['views'], clip_to_check['views'], clip_to_check)
index_to_delete = index
else:
logger.info("current_clip['views']=%s clip_to_check['views']=%s deleting %s"
, current_clip['views'], clip_to_check['views'], current_clip)
index_to_delete = clips_to_check.index(current_clip)
if index_to_delete not in indices_to_delete:
indices_to_delete.add(index_to_delete)
logger.info("indices_to_delete=%s", str(indices_to_delete))
return delete_clips_from_list(clips_to_check, indices_to_delete)
def delete_clips_with_low_views(clips_to_check, min_number_of_views):
"""
There are too many clips to encode, so we only want the really popular ones
therefore, we are removing the clips if the views are under a certain threshold.
:param min_number_of_views: minimum number of views required for clips to be downloaded
:param clips_to_check: clip array to look at to remove clips with low views
:return:
"""
indices_to_delete = set()
for index, clip_to_check in enumerate(clips_to_check):
if clip_to_check['views'] < min_number_of_views:
indices_to_delete.add(index)
return delete_clips_from_list(clips_to_check, indices_to_delete)
def delete_excess_clips(clips):
"""
We want to remove additional clips to minimize the amount of clips we need to download
Check the total length of clips that we will combine
Remove clips with the least number of views until the length is suitable
:param clips: list of clips to evaluate
:return:
"""
indices_to_delete = set()
combined_clip_time_seconds = 0
logger.info("finding excess clips to delete")
# sort clips in order of views
clips = sorted(clips, key=lambda k: k['views'], reverse=True)
# iterate through the list until the max length is reached (10 minutes)
for index, clip in enumerate(clips):
if combined_clip_time_seconds >= 600:
indices_to_delete.add(index)
continue
combined_clip_time_seconds = combined_clip_time_seconds + int(clip['duration'])
logger.info("combined_clip_time_seconds=%s", combined_clip_time_seconds)
logger.info("excess clip indices to delete=%s", str(indices_to_delete))
if combined_clip_time_seconds < 60:
logger.info("Not enough time in clips, returning nothing, combined_clip_time_seconds=%s"
, combined_clip_time_seconds)
clips = []
return delete_clips_from_list(clips, indices_to_delete)
def copy_existing_clip(clip, base_directory, path_to_copy_file, copy_clip_info=True, look_for_encoded_clip=False):
"""
Check if we already downloaded the same clip
Copy the clip to the new location
:param clip:
:param base_directory:
:param path_to_copy_file:
:param copy_clip_info:
:param look_for_encoded_clip:
:return:
"""
clip_exists = False
res = [f for f in glob.iglob(base_directory + "/**/*.mp4", recursive=True)
if str(clip['slug'] + "_encoded" if look_for_encoded_clip else "") in f]
if res.__len__() > 0:
# clip found as a duplicate already downloaded elsewhere
logger.info("Clip %s already exists at %s", str(clip['slug']), str(res[0]))
clip_exists = True
res2 = [f for f in glob.iglob(os.path.dirname(path_to_copy_file) + "/**/*.mp4", recursive=True) if
str(clip['slug'] + "_encoded" if look_for_encoded_clip else "") in f]
if not res2.__len__() > 0:
# clip is not copied to the current folder, copy the clip
logger.info("Found already downloaded file at %s copying file to %s", res[0], path_to_copy_file)
shutil.copy2(res[0], path_to_copy_file)
# also copy clip info
if copy_clip_info:
res3 = [f for f in glob.iglob(os.path.dirname(base_directory) + "/**/*.txt", recursive=True) if
str(clip['slug']) in f and 'clipInfo' in f]
if res3.__len__() > 0:
shutil.copy2(res3[0], os.path.dirname(path_to_copy_file))
return clip_exists
def get_clips_from_twitch(channel, cursor, game_name, language, limit, period, trending, category, game=''):
"""
Gets the clips from the twitch api for the given parameters
https://dev.twitch.tv/docs/v5/reference/clips#get-top-clips
:param channel: string Channel name. If this is specified, top clips for only this channel are returned; otherwise, top clips for all channels are returned. If both channel and game are specified, game is ignored.
:param cursor: string Tells the server where to start fetching the next set of results, in a multi-page response.
:param game_name: string Game name. (Game names can be retrieved with the Search Games endpoint.) If this is specified, top clips for only this game are returned; otherwise, top clips for all games are returned. If both channel and game are specified, game is ignored.
:param language: string Comma-separated list of languages, which constrains the languages of videos returned. Examples: es, en,es,th. If no language is specified, all languages are returned. Default: "". Maximum: 28 languages.
:param limit: long Maximum number of most-recent objects to return. Default: 10. Maximum: 100.
:param period: string The window of time to search for clips. Valid values: day, week, month, all. Default: week.
:param trending: boolean If true, the clips returned are ordered by popularity; otherwise, by viewcount. Default: false.
:param category: the type of clips we are getting combining together for the end video -- channel, game, etc.
:return:
"""
url = ''
try:
base_url = "https://api.twitch.tv/kraken/clips/top"
url = add_optional_query_params(base_url, channel, cursor, game_name, language, limit, period, trending)
response = re.get(url, headers=headers)
game_info_was_saved = False
if response.status_code == 200:
clips = response.json()['clips']
for index, clip in enumerate(clips):
logger.info("Attempting to remove duplicate clips from the retrieved list.")
clips = delete_clips_with_close_times(clip, clips)
clips = delete_clips_with_low_views(clips, 200)
clips = delete_excess_clips(clips)
for clip in clips:
clip_response_page = re.get(clip['url']).text
download_url = parse_twitch_clip_url_response(clip_response_page)
if download_url.__len__() > 0:
broadcaster_name = clip['broadcaster']['display_name']
if channel == '' and game_name == '':
broadcaster_name = "all_top_twitch"
elif channel == '' and game_name != '':
# some games have unsafe characters (CS:GO) so we have to do the encoding for names
# https://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename
broadcaster_name = base64.urlsafe_b64encode(game_name.encode('ascii'))
output_path = output_directory + datetime.date.today().strftime('%Y-%m-%d') \
+ "/" + category + "/" + str(broadcaster_name) + "/"
os.makedirs(os.path.dirname(output_path), exist_ok=True)
if not game_info_was_saved and game != '':
logger.info("Saving game info for %s", game)
with open(output_path + 'game_info.txt', 'w', encoding='utf-8') as outfile:
json.dump(game, outfile)
game_info_was_saved = True
clip_file_name = output_path + str(clip['views']) + "_" + clip['slug'] + ".mp4"
if not copy_existing_clip(clip, output_directory, clip_file_name):
logger.info("Starting a clip download for %s", str(broadcaster_name))
download(download_url, clip_file_name)
increase_downloaded_clip_count()
logger.info("Dumping clip info for %s", str(broadcaster_name))
with open(output_path + "clipInfo_" + clip['slug'] + '.txt', 'w', encoding='utf-8') as outfile:
json.dump(clip, outfile)
logger.info("Waiting some time before attempting to download the next clip")
time.sleep(2)
else:
logger.info("Download url was empty for clip=%s", clip)
else:
logger.warning("Failed to get a valid response when attempting to retrieve clips"
", response=%s for url=%s", response, url)
except:
logger.warning("Failed to download a clip for url=%s", url)
def get_popular_games_list(number_of_games):
"""
Generate the list of games from twitch
:return: list of games
"""
url = "https://api.twitch.tv/kraken/games/top?limit=" + str(number_of_games)
response = re.get(url, headers=headers)
if response.status_code == 200:
return response.json()['top']
else:
logger.warning("failed to retrieve top games list with url=%s", url)
return ['']
def get_popular_channel_list():
# https://socialblade.com/twitch/top/100/followers
# TODO: we could use some api call to populate this, though I don't think it would really change all that much...
# would be good to get this list from a text file so we don't have to update this array
# return ['lirik']
# removed: 'riotgames', 'mlg_live', 'mlg', 'dreamhackcs', 'sgdq', 'gamesdonequick', 'faceittv', 'Faceit', 'eleaguetv', 'thenadeshot', 'twitch', 'e3', 'nalcs1', 'starladder5', 'pgl', 'bobross',
return ['syndicate', 'summit1g', 'nightblue3', 'imaqtpie', 'lirik', 'sodapoppin',
'meclipse', 'shroud', 'tsm_bjergsen', 'joshog', 'dyrus', 'gosu', 'castro_1021', 'timthetatman',
'captainsparklez', 'goldglove', 'boxbox', 'speeddemosarchivesda',
'drdisrespectlive', 'nl_kripp', 'trick2g', 'swiftor', 'c9sneaky', 'doublelift',
'sivhd', 'iijeriichoii', 'Voyboy', 'faker', 'izakooo',
'tsm_theoddone', 'pewdiepie', 'cohhcarnage', 'pashabiceps', 'amazhs', 'anomalyxd', 'ungespielt',
'loltyler1', 'trumpsc', 'kinggothalion', 'omgitsfirefoxx',
'nadeshot', 'kittyplays', 'stonedyooda', 'yoda', 'Gronkh', 'GiantWaffle', 'nick28t',
'monstercat', 'gassymexican', 'montanablack88', 'cryaotic', 'reckful', 'a_seagull', 'm0e_tv',
'forsenlol', 'kaypealol', 'sovietwomble', 'ProfessorBroman', 'nickbunyun',
'dansgaming', 'yogscast', 'zeeoon', 'rewinside', 'legendarylea', 'ninja',
'markiplier', 'pokimane', 'froggen', 'aphromoo', 'olofmeister', 'followgrubby', 'bchillz']
def main():
logger.info("Starting Downloader Process.")
cursor = ""
game_name = ""
language = "en"
limit = "30"
period = "day"
trending = ""
# TODO: add these back once we figure out how to encode videos faster, right now it's taking way too long...
#
# logger.info("Getting the top clips from the top channels.")
# channels = get_popular_channel_list()
# for channel in channels:
# get_clips_from_twitch(channel, cursor, game_name, language, limit, period, trending, category='channels')
logger.info("Getting the top clips from the top games.")
channel = ""
games = get_popular_games_list(15)
category = 'games'
for game in games:
get_clips_from_twitch(channel, cursor, game['game']['name'], language, limit, period, trending, category, game)
logger.info("Getting the top clips from all of twitch.")
channel = ""
period = "day"
limit = "30"
category = 'twitch'
get_clips_from_twitch(channel, cursor, game_name, language, limit, period, trending, category)
end_time = datetime.datetime.now()
total_processing_time_sec = (end_time - start_time).total_seconds()
logger.info("Downloaded %s clips in %s seconds", num_downloaded_clips, total_processing_time_sec)
logger.info("FINISHED!!!")
if __name__ == "__main__":
main()
| true |
afee02d6be8dc4d6f871ed615fac59c3c4b3f0f0
|
Python
|
zdravkob98/Fundamentals-with-Python-May-2020
|
/Dictionaries - Exercise/demo.py
|
UTF-8
| 74 | 2.8125 | 3 |
[] |
no_license
|
d = {'a': [10, 14, 16], 'b': [15]}
k = 14
if k in d['a']:
print('yes')
| true |
c78b11ce80d2dc615cc631c72fa39b00cd94ff04
|
Python
|
dingzishidtc/dzPython
|
/调用浏览器.py
|
UTF-8
| 590 | 2.578125 | 3 |
[] |
no_license
|
#encoding=utf8
#!/usr/bin/python3
from selenium import webdriver
import time
driver=webdriver.Chrome()
driver.get(r'https://wenku.baidu.com/view/c5d88d850408763231126edb6f1aff00bed570ea.html')
time.sleep(1)
js = "window.scrollTo(0,3500)"
driver.execute_script(js)
time.sleep(1)
ele=driver.find_element_by_xpath("//p[@class='down-arrow goBtn']")
ele.click()
time.sleep(1)
for i in range(1,11):
js = "window.scrollBy(0,500)"
driver.execute_script(js)
elem=driver.find_element_by_id("pageNo-"+str(i))
print (elem.text,1111111111111111111111111111111111111111111111111111111111111111111)
| true |
2b3bcaedc3a4613a9f9d82b43f77619389ac608b
|
Python
|
PyPhy/Python
|
/AI/Simple_Perceptron.py
|
UTF-8
| 2,137 | 3.84375 | 4 |
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from numpy import random, exp, dot, array
#%% Neural Network
class Neural_Network():
#%% Sigmoid function
def φ(self, x):
return 1/(1 + exp(-x))
# Sigmoid function's derivative
def dφ(self, x):
return exp(x)/ (1 + exp(x))**2
#%% Let's train our Neuron
def __init__(self, x, y, lr, epochs):
'''
x: training input (dimentions: parameters* data)
y: training output (dimentions: parameters* data)
lr: learning rate
epochs: iterations
'''
# same random number
random.seed(1)
# weights (dimentions: perceptrons* parameters)
self.w = 2* random.random( (1,3) ) - 1
print('Initial weights: ', self.w)
for epoch in range(epochs):
# learning output
Y = self.φ( dot(self.w, x) )
# error = training output - learning output
error = y - Y
# adjustments to minimize the error
adjustments = error* self.dφ(Y)
# adjusted weights
self.w += lr* dot(adjustments, x.T)
print('Trained weights: ', self.w)
#%% I shall give a problem
def think(self, inputs):
return self.φ( dot(self.w, inputs) )
#%% Main file
if __name__ == '__main__':
#%% Train the neuron first
# 3 rows means 3 input types i.e. 3 xi
training_inputs = array([[0, 1, 1, 0],
[0, 1, 0, 1],
[1, 1, 1, 1] ])
# each output correspondces to input 1 row
training_outputs = array([ [0, 1, 1, 0] ])
# object created
NN = Neural_Network(training_inputs, training_outputs, 0.1, 10000)
#%% Now guess the output
Guess_Input = array([ [0],
[0],
[1] ])
print('Guessed output is...')
print( NN.think(Guess_Input))
| true |
7392d7185d47b8e03cd6f6703a70cd2886411a83
|
Python
|
mikeludemann/helperFunctions_Python
|
/src/Set/maxSet.py
|
UTF-8
| 46 | 2.625 | 3 |
[
"MIT"
] |
permissive
|
x = set([71, 12, 3, 18, 2, 21])
print(max(x))
| true |
2ffe545e06630f9a96ba023367bc13c66eb5fdc3
|
Python
|
id774/sandbox
|
/python/numpy/anova.py
|
UTF-8
| 1,128 | 3.25 | 3 |
[] |
no_license
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
data = np.array([[5., 7., 12.],
[6., 5., 10.],
[3., 4., 8.],
[2., 4., 6.]])
s_mean = np.zeros(data.shape)
for i in range(data.shape[1]):
s_mean[:, i] = data[:, i].mean()
print("水準平均 " + str(s_mean))
kouka = s_mean - np.ones(data.shape) * data.mean()
print("水準間偏差(因子の効果) := 水準平均 - 全体平均 " + str(kouka))
Q1 = (kouka * kouka).sum()
print("水準間変動(効果の偏差平方和(SS)) " + str(Q1))
f1 = data.shape[1] - 1
print("自由度 " + str(f1))
V1 = Q1 / f1
print("水準間偏差(効果)の平均平方(MS)(不変分散) " + str(V1))
error = data - s_mean
print("水準内偏差(統計誤差) " + str(error))
Q2 = (error * error).sum()
print("誤差の偏差平方和(SS) " + str(Q2))
f2 = (data.shape[0] - 1) * data.shape[1]
print("自由度(DF) " + str(f2))
V2 = Q2 / f2
print("水準内偏差(誤差)の平均平方(MS)(不変分散) " + str(V2))
F = V1 / V2
print("分散比(F値) " + str(F))
| true |