blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
eae4c1adc77a1774f42e73aacef024f969c44f06
|
Python
|
gunzigun/Python-Introductory-100
|
/47.py
|
UTF-8
| 482 | 4.125 | 4 |
[] |
no_license
|
# -*- coding: UTF-8 -*-
"""
้ข็ฎ๏ผไธคไธชๅ้ๅผไบๆขใ
็จๅบๅๆ๏ผๆ
"""
def exchange(a,b):
a,b = b,a
return (a,b)
"""
if __name__ == '__main__':
x = 10
y = 20
print 'x = %d,y = %d' % (x,y)
x,y = exchange(x,y)
print 'x = %d,y = %d' % (x,y)
"""
A = int(raw_input("please input A:"))
B = int(raw_input("please input B:"))
print "before exchange: %s, %s" % (A, B)
A,B = B,A #exchange(A,B)
print "after exchange: %s, %s" % (A, B)
| true |
b6bfac492780baa41f0465cbf9317648bd0dfdb0
|
Python
|
kitsuneninetails/python-utils
|
/python_utils/tests/utils/test_utils.py
|
UTF-8
| 279 | 2.8125 | 3 |
[] |
no_license
|
import unittest
def run_unit_test(test_case_name):
suite = unittest.TestLoader().loadTestsFromTestCase(test_case_name)
try:
unittest.TextTestRunner(verbosity=2).run(suite)
except Exception as e:
print('Exception: ' + e.message + ', ' + str(e.args))
| true |
fa2ba6a4346bd244249fc73775afffe5d14ca949
|
Python
|
Lamppost122/Controlled-assessment-Final
|
/AddTeam.py
|
UTF-8
| 12,279 | 2.796875 | 3 |
[] |
no_license
|
import json
import tkinter as tk
from tkinter import font as tkfont
from tkinter import messagebox
from tkinter import ttk
from Gui import *
import Config
class AddTeam :
"""
Method:
SaveTeam
GetPlayer
updateListboxes
MovePlayer
RemovePlayer
Variables:
allPlayers - Contains a instance of the player file
orderedList - Contains player instances to be loaded into the PlayerList
TeamPlayer - Contains player instances to be loaded into the TeamList
"""
def SaveTeam(self):
"""
Adds the current team to the Team File
Validates the Team Number
Calls the Home Frame
"""
Team = SystemToolKit.readFile(Config.TeamFile)
TeamId = uuid.uuid4()
if Validation.newTeam(self.txtTeamNumber.get()) == True:
Data = {}
for i ,j in enumerate(self.TeamPlayers):
Data[i] = j
Data["Team Number"] = self.txtTeamNumber.get()
Team[str(TeamId)] = Data
with open(Config.TeamFile,"w") as fp:
json.dump(Team,fp)
self.controller.show_frame("Home")
def GetPlayer(self):
"""
Searchs for players in the player file by First Name, Last Name and (First Name + Last Name)
If Not a duplicate will add these results to the orderedList Class Variable
Calls a Listbox Update
"""
data =self.txtPlayer.get()
if Validation.PresentsCheck(data) == True:
data = data.lower()
self.allPlayers = SystemToolKit.readFile(Config.PlayerFile)
for i,j in enumerate(self.allPlayers):
if self.allPlayers[j]["First name"].lower() == data or self.allPlayers[j]["Last name"].lower() == data or self.allPlayers[j]["First name"].lower() + " " + self.allPlayers[j]["Last name"].lower() == data:
if j not in self.orderedList and j not in self.TeamPlayers:
self.orderedList.append(j)
else:
Duplicates = False
self.updateListboxes()
def updateListboxes(self):
"""
Updates the TeamList and PlayerList with the current contents of orderedList and TeamPlayers respectively
"""
self.TeamList.delete(0, tk.END)
self.PlayerList.delete(0, tk.END)
for i in self.orderedList:
text = str(self.allPlayers[i]["First name"]) + " " + str(self.allPlayers[i]["Last name"])
self.PlayerList.insert(tk.END,text)
for j in self.TeamPlayers:
text = str(self.allPlayers[j]["First name"]) + " " + str(self.allPlayers[j]["Last name"])
self.TeamList.insert(tk.END,text)
def MovePlayer(self):
"""
Switches a player instance from orderedList to TeamPlayer
Calls a Listbox Update
"""
if self.PlayerList.index(tk.ANCHOR) < len(self.orderedList):
j = self.orderedList[self.PlayerList.index(tk.ANCHOR)]
self.TeamPlayers.append(j)
self.orderedList.remove(j)
self.updateListboxes()
def RemovePlayer(self):
"""
Switches a player instance from TeamPlayer to orderedList
Calls a Listbox Update
"""
if self.TeamList.index(tk.ANCHOR) < len(self.TeamPlayers):
j = self.TeamPlayers[self.TeamList.index(tk.ANCHOR)]
self.orderedList.append(j)
self.TeamPlayers.remove(j)
self.updateListboxes()
class AddTeamCoach(tk.Frame,AddTeam):
"""
Methods:
__init__
Variables:
controller
orderedList - Contains player instances to be loaded into the PlayerList
TeamPlayer - Contains player instances to be loaded into the TeamList
Title - Title Label Widget
lblPlayerName - Player Name Label Widget
lblPlayer - Player Heading Label Widget
lblTeam - Team Heading Label Widget
txtTeamNumber - Team Number Entry Widget
lblTeamNumber - Team Number Label Widget
txtPlayer - Player Entry Widget
getPlayerButton - Get Player Button Widget
PlayerList - Player List Listbox Widget
TeamList -Team List Listbo Widget
BackButton - Back Button Widget
RemovePlayerButton - Remove Player Button Widget
SaveButton - Save Button Widget
"""
def __init__(self, parent, controller):
"""
Initalises a frame instance of Add Team At Coach Access Level
"""
tk.Frame.__init__(self, parent)
self.controller = controller
self.TeamPlayers = []
self.orderedList = []
""" Widget Declearations """
self.Title = tk.Label(self,text = "Create Team" ,font = controller.title_font)
self.lblPlayerName = tk.Label(self,text = "Player Name: ")
self.lblPlayer = tk.Label(self,text = "Players ")
self.lblTeam = tk.Label(self,text="Team")
self.txtTeamNumber = ttk.Entry(self)
self.lblTeamNumber = tk.Label(self,text="Team Number: ")
self.txtPlayer = ttk.Entry(self)
self.getPlayerButton = tk.Button(self,text = "Get Player",command = self.GetPlayer)
self.PlayerList = tk.Listbox(self)
self.TeamList = tk.Listbox(self)
b = tk.Button(self, text="Move Player",command=self.MovePlayer )
self.BackButton= tk.Button(self, text="Back",command=lambda:SystemToolKit.BackButtonRun(controller))
self.RemovePlayerButton = tk.Button(self,text= "Remove Player",command = self.RemovePlayer)
self.SaveButton = tk.Button(self,text = "Save",command = self.SaveTeam)
""" Widget Stylings """
self.lblPlayerName.config(justify="right",fg = "black",background="#8ABFD9",font=("Arial", 10, 'bold'))
self.lblPlayer.config(justify="right",fg = "black",background="#8ABFD9",font=("Arial", 10, 'bold'))
self.lblTeam.config(justify="right",fg = "black",background="#8ABFD9",font=("Arial", 10, 'bold'))
self.lblTeamNumber.config(justify="right",fg = "black",background="#8ABFD9",font=("Arial", 10, 'bold'))
self.getPlayerButton.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
b.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
self.BackButton.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
self.RemovePlayerButton.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
self.SaveButton.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
self.Title.config(background="#8ABFD9",fg = "#404040",pady="5")
""" Widget Positions """
self.Title.grid(row = 0,column = 0,columnspan = 3)
self.lblPlayerName.grid(row = 1,column = 0)
self.txtPlayer.grid(row = 1,column = 1 )
self.getPlayerButton.grid(row= 1 , column = 2)
self.lblTeamNumber.grid(row= 1,column = 4)
self.txtTeamNumber.grid(row= 1 ,column =5)
self.lblPlayer.grid(row = 2,column = 1)
self.lblTeam.grid(row= 2,column = 3)
self.PlayerList.grid(row = 3,column = 1)
b.grid(row = 3,column = 2)
self.TeamList.grid(row = 3 ,column = 3 )
self.RemovePlayerButton.grid(row =3,column = 4)
self.SaveButton.grid(row = 3,column = 5)
self.BackButton.grid(row =1,column = 3)
class AddTeamAdmin(tk.Frame,AddTeam):
"""
Methods:
__init__
Variables:
controller
orderedList - Contains player instances to be loaded into the PlayerList
TeamPlayer - Contains player instances to be loaded into the TeamList
Title - Title Label Widget
lblPlayerName - Player Name Label Widget
lblPlayer - Player Heading Label Widget
lblTeam - Team Heading Label Widget
txtTeamNumber - Team Number Entry Widget
lblTeamNumber - Team Number Label Widget
txtPlayer - Player Entry Widget
getPlayerButton - Get Player Button Widget
PlayerList - Player List Listbox Widget
TeamList -Team List Listbo Widget
BackButton - Back Button Widget
RemovePlayerButton - Remove Player Button Widget
SaveButton - Save Button Widget
"""
def __init__(self, parent, controller):
"""
Initalises a frame instance of Add Team At Admin Access Level
"""
tk.Frame.__init__(self, parent)
self.controller = controller
self.TeamPlayers = []
self.orderedList = []
""" Widget Declearations """
self.Title = tk.Label(self,text = "Create Team" ,font = controller.title_font)
self.lblPlayerName = tk.Label(self,text = "Player Name: ")
self.lblPlayer = tk.Label(self,text = "Players ")
self.lblTeam = tk.Label(self,text="Team")
self.txtTeamNumber = ttk.Entry(self)
self.lblTeamNumber = tk.Label(self,text="Team Number: ")
self.txtPlayer = ttk.Entry(self)
self.getPlayerButton = tk.Button(self,text = "Get Player",command = self.GetPlayer)
self.PlayerList = tk.Listbox(self)
self.TeamList = tk.Listbox(self)
b = tk.Button(self, text="Move Player",command=self.MovePlayer )
self.BackButton= tk.Button(self, text="Back",command=lambda:SystemToolKit.BackButtonRun(controller))
self.RemovePlayerButton = tk.Button(self,text= "Remove Player",command = self.RemovePlayer)
self.SaveButton = tk.Button(self,text = "Save",command = self.SaveTeam)
""" Widget Stylings """
self.lblPlayerName.config(justify="right",fg = "black",background="#8ABFD9",font=("Arial", 10, 'bold'))
self.lblPlayer.config(justify="right",fg = "black",background="#8ABFD9",font=("Arial", 10, 'bold'))
self.lblTeam.config(justify="right",fg = "black",background="#8ABFD9",font=("Arial", 10, 'bold'))
self.lblTeamNumber.config(justify="right",fg = "black",background="#8ABFD9",font=("Arial", 10, 'bold'))
self.getPlayerButton.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
b.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
self.BackButton.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
self.RemovePlayerButton.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
self.SaveButton.config(compound="left",background="#307292",relief="flat",font=("Arial", 12, 'bold'),padx=5)
self.Title.config(background="#8ABFD9",fg = "#404040",pady="5")
""" Widget Positions """
self.Title.grid(row = 0,column = 0,columnspan = 3)
self.lblPlayerName.grid(row = 1,column = 0)
self.txtPlayer.grid(row = 1,column = 1 )
self.getPlayerButton.grid(row= 1 , column = 2)
self.lblTeamNumber.grid(row= 1,column = 4)
self.txtTeamNumber.grid(row= 1 ,column =5)
self.lblPlayer.grid(row = 2,column = 1)
self.lblTeam.grid(row= 2,column = 3)
self.PlayerList.grid(row = 3,column = 1)
b.grid(row = 3,column = 2)
self.TeamList.grid(row = 3 ,column = 3 )
self.RemovePlayerButton.grid(row =3,column = 4)
self.SaveButton.grid(row = 3,column = 5)
self.BackButton.grid(row =1,column = 3)
class AddTeamPlayer(tk.Frame,AddTeam):
"""
Methods:
__init__
Variables:
controller
"""
def __init__(self, parent, controller):
"""
Initalises a frame instance of Add Team At Player Access Level
"""
tk.Frame.__init__(self, parent)
self.controller = controller
""" Widget Declearations """
""" Widget Stylings """
""" Widget Positions """
| true |
755e74b8993de8389e4c4e4337b710e438f8520d
|
Python
|
mo1233/floor
|
/les 4/Pe 1.py
|
UTF-8
| 251 | 3.375 | 3 |
[] |
no_license
|
leeftijd = eval(input('Geef u leeftijd: '))
Paspoort = (input('bent u in bezit van een Nederlandse Paspoort?: '))
if leeftijd >= 18 and Paspoort =='ja' :
print('Gefeliciteerd je mag stemmen')
else :
print('je mag nog niet stemmen')
| true |
a32ded4ecfe0af9b5dcb304f5a896dc6a9d1981a
|
Python
|
Garretming/cocos2dx-lua-tools
|
/tools/python/cocosstudio/parseText.py
|
UTF-8
| 3,929 | 2.625 | 3 |
[] |
no_license
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
def parse(node,array):
isRichText = False
string = ''
if (array.get('UserData') != None and array.get('UserData') != ''):
UserData = array['UserData'].lower()
if UserData.find('richtext') >= 0:
isRichText = True
if (isRichText):
string += "\t%s = RichText:create();\n" % node
else:
string += "\t%s = Text:create();\n" % node
if (array.get('FontSize') != None):
if (isRichText):
string += "\t%s:setDefaultFontSize(%d);\n" % (node, array['FontSize'])
else:
string += "\t%s:setFontSize(%d);\n" % (node, array['FontSize'])
if (array.get('LabelText') != None):
if (array['LabelText'] == 'Text Label'):
string += "\t%s:setString([[]]);\n" % (node)
else:
string += "\t%s:setString([[%s]]);\n" % (node, array['LabelText'])
if (array.get('FontResource') != None and array['FontResource']['Path'] != ""):
if (isRichText):
string += "\t%s:setDefaultFontName('%s');\n" % (node, array['FontResource']['Path'])
else:
string += "\t%s:setFontName('%s');\n" % (node, array['FontResource']['Path'])
if (array.get('IsCustomSize') != None and array['IsCustomSize']):
string += "\t%s:setTextAreaSize({width = %d, height = %d});\n" %(node, array['Size']['X'], array['Size']['Y'])
if (array.get('HorizontalAlignmentType') != None):
if (array['HorizontalAlignmentType'] == 'HT_Center'):
string += "\t%s:setTextHorizontalAlignment(1);\n" % (node)
elif (array['HorizontalAlignmentType'] == 'HT_Right'):
string += "\t%s:setTextHorizontalAlignment(2);\n" % (node)
if array.get('VerticalAlignmentType') != None and (not isRichText):
if (array['VerticalAlignmentType'] == 'VT_Center'):
string += "\t%s:setTextVerticalAlignment(1);\n" % (node)
elif (array['VerticalAlignmentType'] == 'VT_Bottom'):
string += "\t%s:setTextVerticalAlignment(2);\n" % (node)
if (array.get('OutlineEnabled') != None):
OutlineSize = 1
if (array.get('OutlineSize') != None):
OutlineSize = array['OutlineSize']
OutlineColor = [255,255,255]
if (array.get('OutlineColor')):
OutlineColor[0] = array['OutlineColor'].get('R', 255)
OutlineColor[1] = array['OutlineColor'].get('G', 255)
OutlineColor[2] = array['OutlineColor'].get('B', 255)
if (isRichText):
string += "\t%s:setDefaultOutline({r = %d, g = %d, b = %d, a = 255}, %.2f);\n" % (node, OutlineColor[0], OutlineColor[1], OutlineColor[2], OutlineSize)
else:
string += "\t%s:enableOutline({r = %d, g = %d, b = %d, a = 255}, %.2f);\n" % (node, OutlineColor[0], OutlineColor[1], OutlineColor[2], OutlineSize)
if (array.get('ShadowEnabled') != None):
ShadowOffsetX = 0
ShadowOffsetY = 0
if (array.get('ShadowOffsetX') != None):
ShadowOffsetX = array['ShadowOffsetX']
if (array.get('ShadowOffsetY') != None):
ShadowOffsetY = array['ShadowOffsetY']
ShadowColor = [255,255,255]
if (array.get('ShadowColor') != None) :
ShadowColor[0] = array['ShadowColor'].get('R', 255)
ShadowColor[1] = array['ShadowColor'].get('G', 255)
ShadowColor[2] = array['ShadowColor'].get('B', 255)
if (isRichText):
string += "\t%s:setDefaultShadow({r = %d, g = %d, b = %d, a = 255}, {width = %f, height = %f});\n" % (node, ShadowColor[0], ShadowColor[1], ShadowColor[2], ShadowOffsetX, ShadowOffsetY)
else:
string += "\t%s:enableShadow({r = %d, g = %d, b = %d, a = 255}, {width = %f, height = $%f});\n" % (node, ShadowColor[0], ShadowColor[1], ShadowColor[2], ShadowOffsetX, ShadowOffsetY)
return string
| true |
837950768b7508aa6da9895b916fc31821980053
|
Python
|
keesvanginkel/OSdaMage
|
/preproc_functions.py
|
UTF-8
| 4,847 | 2.796875 | 3 |
[
"MIT"
] |
permissive
|
"""
Preprocessing functions of OSdaMage 1.0.
Contains the preprocessing functions required for running the OSdaMage model. The functions are called from a Jupyter Notebook 'Preproc_split_OSM.ipynb'
This code is maintained on a GitHub repository: github.com/keesvanginkel/OSdaMage
@author: Elco Koks and Kees van ginkel
"""
import geopandas as gpd
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from shapely.geometry import MultiPolygon
from geopy.distance import vincenty
logging.basicConfig(filename='OSM_extracts.log',level=logging.INFO)
def poly_files_europe(out_path, NUTS_shape,filter_out):
"""
This function will create the .poly files from the Europe shapefile.
.poly files are used to extract data from the openstreetmap files.
This function is adapted from the OSMPoly function in QGIS, and Elco Koks GMTRA model.
Arguments:
*out_path* (string): path to the directory where the .poly files should be written
*NUTS_shape* (string) : path to the NUTS-3 shapefile (CRS=EPSG:3035)
*filter_out* (list of strings): names of NUTS-3 regions not to include in the analysis
Returns:
.poly file for each country in a new dir in the working directory (CRS=WGS:84).
"""
NUTS_poly = gpd.read_file(NUTS_shape)
#Remove regions that are to be filtered out
NUTS_poly = NUTS_poly[~NUTS_poly['NUTS_ID'].isin(filter_out)]
NUTS_poly = NUTS_poly.to_crs(epsg=4326) #Change into the WGS84 = EPSG4326 coordinate system of OSM.
num = 0
# iterate over the counties (rows) in the Europe shapefile
for f in NUTS_poly.iterrows():
f = f[1]
num = num + 1
geom=f.geometry
try:
# this will create a list of the different subpolygons
if geom.geom_type == 'MultiPolygon':
polygons = geom
# the list will be length 1 if it is just one polygon
elif geom.geom_type == 'Polygon':
polygons = [geom]
# define the name of the output file, based on the NUTS_ID
nuts_id = f['NUTS_ID']
# start writing the .poly file
f = open(out_path + "/" + nuts_id +'.poly', 'w')
f.write(nuts_id + "\n")
i = 0
# loop over the different polygons, get their exterior and write the
# coordinates of the ring to the .poly file
for polygon in polygons:
polygon = np.array(polygon.exterior)
j = 0
f.write(str(i) + "\n")
for ring in polygon:
j = j + 1
f.write(" " + str(ring[0]) + " " + str(ring[1]) +"\n")
i = i + 1
# close the ring of one subpolygon if done
f.write("END" +"\n")
# close the file when done
f.write("END" +"\n")
f.close()
except Exception as e:
print("Exception {} for {}" .format(e,f['NUTS_ID']))
def clip_osm_multi(dirs): #called from the Preproc_split_OSM file
""" Clip the an area osm file from the larger continent (or planet) file and save to a new osm.pbf file.
This is much faster compared to clipping the osm.pbf file while extracting through ogr2ogr.
This function uses the osmconvert tool, which can be found at http://wiki.openstreetmap.org/wiki/Osmconvert.
Either add the directory where this executable is located to your environmental variables or just put it in the 'scripts' directory.
Arguments (stored in a list to enable multiprocessing):
*dirs[0] = osm_convert_path* (string): path to the osm_convert executable
*dirs[1] = planet_path* (string): path to the .planet file
*dirs[2] = area_poly* (string): path to the .poly file, made by create_poly_files_europe()
*dirs[3] = area_pbf* (string): output directory
Returns:
*region.osm.pbf* (os.pbf file) : the clipped (output) osm.pbf file
"""
osm_convert_path = dirs[0]
planet_path = dirs[1]
area_poly = dirs[2]
area_pbf = dirs[3]
print('{} started!'.format(area_pbf))
logging.info('{} started!'.format(area_pbf))
try:
if (os.path.exists(area_pbf) is not True):
os.system('{} {} -B={} --complete-ways --hash-memory=500 -o={}'.format(osm_convert_path,planet_path,area_poly,area_pbf))
print('{} finished!'.format(area_pbf))
logging.info('{} finished!'.format(area_pbf))
else:
print('{} already exists'.format(area_pbf))
logging.info('{} already exists'.format(area_pbf))
except Exception as e:
logging.error('{} did not finish because of {}'.format(area_pbf,str(e)))
| true |
dd611ce6333cadf48311b1d94884adcb6e543a3d
|
Python
|
heitorchang/learn-code
|
/battles/challenges/dwarvenTreasure.py
|
UTF-8
| 3,464 | 3.015625 | 3 |
[
"MIT"
] |
permissive
|
def checkinvalidchars(s):
s = s.upper()
return "J" in s or "K" in s or "Q" in s or "V" in s
def compress(s):
return "".join(sorted(set(s)))
commands = "NORTH SOUTH EAST WEST ONE TWO THREE FOUR SIX EIGHT NINE TEN"
tomb_message = "HERE LIES THE EXALTED DWARF KING DWALIN THE FOURTH"
print(set(compress(commands)) - set(compress(tomb_message)))
def dwarvenTreasure(mapInscription):
dists = {'แฉแพแ': 1,
'แแนแฉ': 2,
'แฆแฑแแ': 3,
'แ แฉแขแฑ': 4,
'แแแ': 6,
'แแแทแปแ': 8,
'แพแแพแ': 9,
'แแแพ': 10}
dirs = {'แแซแแ': 1,
'แแฉแขแฆ': 2,
'แนแแแ': 3,
'แพแฉแฑแฆ': 4}
x = 0
y = 0
words = mapInscription.split()
instr = [words[i:i+3] for i in range(0, len(words), 3)]
def parseInstr(instr):
nonlocal x, y
num, step, direc = instr
n = dists[num]
d = dirs[direc]
if d == 1:
y += n
elif d == 2:
x += n
elif d == 3:
y -= n
else:
x -= n
for i in instr:
print(i)
parseInstr(i)
return [x, y]
def genword(w):
words = {'one': 'แฉแพแ',
'two': 'แแนแฉ',
'three': 'แฆแฑแแ',
'four': 'แ แฉแขแฑ',
'six': 'แแแ',
'eight': 'แแแทแปแ',
'nine': 'แพแแพแ',
'ten': 'แแแพ',
'east': 'แแซแแ',
'south': 'แแฉแขแฆ',
'west': 'แนแแแ',
'north': 'แพแฉแฑแฆ',
'step': 'แแแแ',
'steps': 'แแแแแ'}
return words[w]
def gens(s):
return " ".join(map(genword, s.split()))
pairtest(dwarvenTreasure("แฉแพแ แแแแ แแซแแ"), [0, 1],
dwarvenTreasure("แ แฉแขแฑ แแแแแ แแฉแขแฆ แแนแฉ แแแแแ แนแแแ"), [4, -2],
dwarvenTreasure("แแแพ แแแแแ แพแฉแฑแฆ"), [-10, 0],
dwarvenTreasure("แฆแฑแแ แแแแแ แนแแแ แพแแพแ แแแแแ แพแฉแฑแฆ"), [-9, -3],
dwarvenTreasure("แ แฉแขแฑ แแแแแ แพแฉแฑแฆ แแแ แแแแแ แแซแแ แแแพ แแแแแ แแฉแขแฆ"), [6, 6],
dwarvenTreasure("แแนแฉ แแแแแ แแซแแ แฉแพแ แแแแ แแฉแขแฆ แฆแฑแแ แแแแแ แนแแแ แแแทแปแ แแแแแ แพแฉแฑแฆ"), [-7, -1],
dwarvenTreasure("แฆแฑแแ แแแแแ แแฉแขแฆ แแแ แแแแแ แพแฉแฑแฆ"), [-3, 0],
dwarvenTreasure("แแแทแปแ แแแแแ แแซแแ แแนแฉ แแแแแ แพแฉแฑแฆ"), [-2, 8],
dwarvenTreasure("แพแแพแ แแแแแ แพแฉแฑแฆ แฉแพแ แแแแ แนแแแ แฉแพแ แแแแ แแฉแขแฆ"), [-8, -1],
dwarvenTreasure("แแแ แแแแแ แแซแแ แแนแฉ แแแแแ แแฉแขแฆ"), [2, 6],
dwarvenTreasure("แฉแพแ แแแแ แพแฉแฑแฆ"), [-1, 0],
dwarvenTreasure("แ แฉแขแฑ แแแแแ แแซแแ แ แฉแขแฑ แแแแแ แแฉแขแฆ แ แฉแขแฑ แแแแแ แนแแแ"), [4, 0])
| true |
05939965b4b81719bd62d478a43910477044b606
|
Python
|
Youka/botlet
|
/tests/test_utils.py
|
UTF-8
| 1,137 | 3.015625 | 3 |
[
"Apache-2.0"
] |
permissive
|
""" Test internal utilities """
from os import chmod
from os.path import isdir, isfile
from stat import S_IREAD
from unittest import TestCase
from botlet.utils import SafeTemporaryDirectory, SafeQueue
class TestUtils(TestCase):
""" Test suite for utility methods """
def test_tempdir(self):
""" Check a temporary directory with files inside works """
with SafeTemporaryDirectory(prefix='test_') as dir_path:
self.assertTrue(isdir(dir_path))
file_path = dir_path + '/test'
open(file_path, 'w').close()
self.assertTrue(isfile(file_path))
# Test permission to delete read-only files
chmod(file_path, S_IREAD)
self.assertFalse(isdir(dir_path))
def test_queue(self):
""" Fill and empty queue with restrictions """
queue = SafeQueue[int](2)
self.assertIsNone(queue.get())
self.assertTrue(queue.put(1))
self.assertTrue(queue.put(2))
self.assertFalse(queue.put(3))
self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(), 2)
self.assertIsNone(queue.get())
| true |
dd18149260890d47f70cfae065c2712074df7d29
|
Python
|
SSKim76/Python_OpenCV
|
/OpenCV/49_Background_Subtraction.py
|
UTF-8
| 3,547 | 3.0625 | 3 |
[] |
no_license
|
"""
49๊ฐ. ์์์์ ๋ฐฐ๊ฒฝ์ ๊ฑฐํ๊ธฐ
๋ฐฐ๊ฒฝ์ ๊ฑฐ๋ ๋ง์ ์ข
๋ฅ์ ๋น์ ๊ธฐ๋ฐ ์ดํ๋ฆฌ์ผ์ด์
์์ ํ์ฉ๋๋ ์ฃผ๋ ์ ์ฒ๋ฆฌ ํ๋ก์ธ์ฑ ๊ณผ์
CCTV ์ ๊ฐ์ ๊ณ ์ ๋ ์นด๋ฉ๋ผ๋ฅผ ์ด์ฉํด์ ์ถ์
ํ๋ ๋ฐฉ๋ฌธ๊ฐ ์๋ฅผ ๊ณ์ฐํ๋ ํ๋ก๊ทธ๋จ์ด๋, ๊ตํต๋ ์กฐ์ฌ๋ฅผ ์ํ ํ๋ก๊ทธ๋จ์์ ํ์ฉ
๋ณดํต ๊ณ ์ ๋ ์นด๋ฉ๋ผ์์ ์ดฌ์ํ๋ ์์์ ๊ฒฝ์ฐ, ๋ฐฐ๊ฒฝ์ด ๋๋ ๋ถ๋ถ์ ์์ง์ด์ง ์๋ ์ ์ง์์์ด๊ณ , ์ถ์
ํ๋ ์ฌ๋์ด๋ ์๋์ฐจ๋ฑ์ ์์ง์ด๋ ๊ฐ์ฒด
๋ฐ๋ผ์ ๊ธฐ์ ์ ์ผ๋ก ๋ฐฐ๊ฒฝ์ ๊ฑฐ๋ ์ ์ง๋ ๋ถ๋ถ์์ ์์ง์ด๋ ๋ถ๋ถ๋ง ์ถ์ถํ๋ฉด ๊ฐ๋ฅํจ.
์์ง์ด๋ ๊ฐ์ฒด์ ๊ทธ๋ฆผ์๊ฐ ํฌํจ๋์ด ์์ผ๋ฉด ๋ฐฐ๊ฒฝ์ ์ถ์ถํ๋์ผ์ด ๋ณต์กํด์ง.
OpenCV์์ ์ ๊ณตํ๋ ์๊ณ ๋ฆฌ์ฆ
1. BackgroundSubtractorMOG
๊ฐ์ฐ์์ ๋ฏน์ค์ณ ๊ธฐ๋ฐ ๋ฐฐ๊ฒฝ/์ ๊ฒฝ ๋ถํ ์๊ณ ๋ฆฌ์ฆ
K๊ฐ์ด 3 ๋๋ 5์ธ ๊ฐ์ฐ์์ ๋ถํฌ ๋ฏน์ค์ณ๋ฅผ ๋ฐฐ๊ฒฝ ํฝ์
์ ์ ์ฉํจ์ผ๋ก์จ ๋ฐฐ๊ฒฝ์ ๊ฑฐ๋ฅผ ์ํ
๋ฏน์ค์ณ์ ๋ํ ๊ฐ์ค์๋ ์์์์ ๋ฐฐ๊ฒฝ ์ ๊ฑฐ๋ฅผ ์ํ ํน์ฑ ํฝ์
์ด ๋์ผํ ์ฅ์์ ๋จธ๋ฌผ๊ณ ์๋ ์๊ฐ ๋น์จ์ ๋ํ๋.
2. BackgroundSubtractorMOG2
๊ฐ์ฐ์์ ๋ฏน์ค์ณ ๊ธฐ๋ฐ ๋ฐฐ๊ฒฝ/์ ๊ฒฝ ๋ถํ ์๊ณ ๋ฆฌ์ฆ
๊ฐ ํฝ์
์ ์ ์ ํ ๊ฐ์ฐ์์ ๋ถํฌ๊ฐ์ ์ ํ
์กฐ๋ช
์ํ์ ๋ณํ๋ก ์ฅ๋ฉด์ด ๋ณํด๋ ์ ๋๋ก ๋ฐฐ๊ฒฝ์ ์ ๊ฑฐํจ
detectShadow = True ; ๊ทธ๋ฆผ์ ๊ฒ์ถ์ค์ (Default = True), ์ ์ธํ๋ ค๋ฉด False
detectShadow = True ; ๊ทธ๋ฆผ์๋ ํ์์ผ๋ก ํ์
๊ทธ๋ฆผ์ ๊ฒ์ถ์ ์ค์ ํ๋ฉด ์ฒ๋ฆฌ์๋๋ ์ฝ๊ฐ ๋๋ ค์ง์ง
3 BackgroundSubtractorGMG(๋ค์์ ์ํฉ์์ ๊ฐ์ฅ ํ๋ฅญํ๊ฒ ๋ฐฐ๊ฒฝ์ ๊ฑฐ๋ฅผ ํด์ค๋ค๊ณ ์๋ ค์ ธ ์์)
ํต๊ณ์ ๋ฐฐ๊ฒฝ ์ด๋ฏธ์ง ์ ๊ฑฐ์ ํฝ์
๋จ์ ๋ฒ ์ด์ง์ ๋ถํ ์ ๊ฒฐํฉํ ์๊ณ ๋ฆฌ์ฆ
GMG ์๊ณ ๋ฆฌ์ฆ์ ์ต์ด ๋ช ํ๋ ์(๋ณดํต 120ํ๋ ์)์ ๋ฐฐ๊ฒฝ ๋ชจ๋ธ๋ง์ ์ํด ์ฌ์ฉ
๋ฐฐ๊ฒฝ์ด ์๋ ์ ๊ฒฝ์ด๋ ์์ง์ผ๋ ๊ฐ์ฒด๋ฅผ ์ถ์ถํ๊ธฐ ์ํด ๋ฒ ์ด์ง์ ์ถ๋ก ์ ์ด์ฉ
๋
ธ์ด์ฆ๋ฅผ ์ ๊ฑฐํ๊ธฐ ์ํด opening ๊ธฐ๋ฒ์ ์ ์ฉํ๋ ๊ฒ์ด ์ข๋ค๊ณ ์๋ ค์ ธ ์์
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2
import default_import as impDef
def backSubtractionMOG():
cap = cv2.VideoCapture(0)
#cap.set(3, 480)
#cap.set(4, 320)
mog = cv2.bgsegm.createBackgroundSubtractorMOG()
mog2 = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
fgmask = mog.apply(frame)
fgmask2 = mog2.apply(frame)
cv2.imshow('MOG', fgmask)
cv2.imshow('MOG2', fgmask2)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
# End of while True:
# End of backSubtractionMOG():
def backSubtractionGMG():
cap = cv2.VideoCapture(0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
cv2.imshow('GMG', fgmask)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# End of while Ture
cap.release()
cv2.destroyAllWindows()
# End of backSubtractionGMG()
backSubtractionGMG()
#backSubtractionMOG()
| true |
527bfead7139ccf94563487642fb91083daf0eb5
|
Python
|
HSabbir/Python-Challange
|
/day 7.py
|
UTF-8
| 180 | 3.390625 | 3 |
[] |
no_license
|
# counting bus system carries people per year
daily_carry = 1200000
yearly_carry = 0
for i in range(365):
yearly_carry +=daily_carry
print(yearly_carry)
#print(365 * 1200000)
| true |
5b0b1c5991569e57b7239a7f72474f75c4e6108c
|
Python
|
jscheiber22/lazylister
|
/lazylister/lazylister.py
|
UTF-8
| 1,243 | 3.78125 | 4 |
[
"MIT"
] |
permissive
|
# I always have to write custom code to pull items off of a text document list, so this will be a nice little
# auto list creator "library" to return each individual line of a text document as its own element in a list.
# Written by James Scheiber, 4am 9/19/2020 :)
class Lister:
def __init__(self, filePath = "list.txt"):
self.filePath = filePath
def returnList(self):
list = []
f = None
# Tries to iterate through the file and add each new line to the list variable created above
try:
f = open(self.filePath, 'r')
lines = f.readlines()
for line in lines:
if not line.startswith(' ') and not line.startswith('\n'):
list.append(line)
f.close()
# Will throw in event of file not existing or being able to be found
except FileNotFoundError:
print("File did not exist, check that first.")
except:
print("File is okay but something else is causing problems. :/ sorry bro")
finally:
# Safety shutdown as last thing done
if f != None:
f.close()
# Finally returns the list to the original call
return(list)
| true |
1c1d1021ef5628ef581a6af729a8c662a917caf3
|
Python
|
mendovitskaya/autotests_stepik_homework
|
/lesson2_list.py
|
UTF-8
| 613 | 2.859375 | 3 |
[] |
no_license
|
from selenium import webdriver
import math
link = "http://suninjuly.github.io/selects2.html"
browser = webdriver.Chrome(executable_path = '/Users/mendovitskaya/environments/selenium_env/Scripts/chromedriver')
browser.get(link)
x_element = browser.find_element_by_id("num1")
x = x_element.text
y_element = browser.find_element_by_id("num2")
y = y_element.text
z=str(str(int(x)+int(y)))
print(z)
dropdown= browser.find_element_by_id("dropdown")
dropdown.click()
value=browser.find_element_by_css_selector("[value='" + z + "']").click()
button = browser.find_element_by_css_selector("button.btn")
button.click()
| true |
1b867d819adf765eb0c1686d2e2506a7a7289174
|
Python
|
jnottin/bookClub
|
/bookClub_project/views.py
|
UTF-8
| 2,939 | 2.734375 | 3 |
[] |
no_license
|
from django.shortcuts import render
from .models import Book
import requests
from random import randint
# from functions import getCover
# import functions as f
# Create your views here.
nyt_apiKey = 'i2gBHvKpbi0ZxGtoRAzX85JBi8iOot7t'
googleBooks_apiKey ='AIzaSyDs_cKUw7nk8l-QIVUVwyPlx1a3aRo_s2Q'
background_colors = ['153, 31, 0,', '153, 221, 255,', '255, 230, 255']
def book_list(request):
rand_color = randint(0, (len(background_colors)-1))
backgroundColor = background_colors[rand_color]
selected_genre = 'hardcover-fiction'
#Get NYT Best Seller genres
response_genres = requests.get('https://api.nytimes.com/svc/books/v3/lists/names.json?&api-key=' + nyt_apiKey)
genres_data = response_genres.json()
genres = genres_data['results']
if request.method == "POST":
selected_genre = request.POST['value']
print(selected_genre)
#Get NYT Best Seller List
response_books = requests.get('https://api.nytimes.com/svc/books/v3/lists.json?list-name=' + selected_genre +'&api-key=' + nyt_apiKey)
books = response_books.json()
books_results = books['results']
# #Get Covers
for book in books_results:
if book['book_details'][0]['primary_isbn10'] != 'None':
isbn = book['book_details'][0]['primary_isbn10']
response_covers = requests.get('https://www.googleapis.com/books/v1/volumes?q=isbn:' + isbn + "&key=" + googleBooks_apiKey)
cover_data = response_covers.json()
#while loop, while cover_data['totalItems'] == 0 keep book['isbns'][i + 1][0]['isbn10']
try:
if cover_data['totalItems'] == 0 or book['book_details'][0]['primary_isbn10'] == 'None':
isbn = book['isbns'][0]['isbn10']
response_covers = requests.get('https://www.googleapis.com/books/v1/volumes?q=isbn:' + isbn + "&key=" + googleBooks_apiKey)
cover_data = response_covers.json()
if cover_data['totalItems'] == 0:
isbn = book['isbns'][1]['isbn10']
response_covers = requests.get('https://www.googleapis.com/books/v1/volumes?q=isbn:' + isbn + "&key=" + googleBooks_apiKey)
cover_data = response_covers.json()
except IndexError:
cover_data = "Missing Cover"
else:
cover_data = "Missing Cover"
book['cover'] = cover_data
return render(request, 'book_list.html', {
'books': books,
'genres': genres,
'selected_genre': selected_genre,
'backgroundColor' : backgroundColor,
})
# TEST API LINK FOR NYT API BOOK
# https://api.nytimes.com/svc/books/v3/lists.json?list-name=hardcover-fiction&api-key=i2gBHvKpbi0ZxGtoRAzX85JBi8iOot7t
# TEST API LINK FOR NYT API BOOK USING NYT ISBN
# https://www.googleapis.com/books/v1/volumes?q=isbn:0399179364&key=AIzaSyDs_cKUw7nk8l-QIVUVwyPlx1a3aRo_s2Q
#List of names like fiction Biography etc
#https://api.nytimes.com/svc/books/v3/lists/names.json?&api-key=i2gBHvKpbi0ZxGtoRAzX85JBi8iOot7t
| true |
c69f1bfb039af6c86956641c4f6f2a5dcb2d438a
|
Python
|
YuzhenWANG/kaggle-KDD
|
/volume_info_pretreat.py
|
UTF-8
| 8,152 | 2.640625 | 3 |
[] |
no_license
|
# rewrite volume info pretreat file
# save all volume info into a good matrix
import numpy as np
import pandas as pd
from datetime import datetime,timedelta
from matplotlib import pyplot
import seaborn
import statsmodels.api as sm
from sklearn.model_selection import KFold
# step 1 : change table 6 rural data into 20 minute time window and calculate them by vehicle model
# input: table 6 output: matrix 2088 * 40 (8 vehicle models and each 5 tollgate)
#########################################################################################################
print 'step1 : change table 6 into matrix'
index = pd.date_range('19/9/2016', periods=(4*7+1)*24*3, freq='20T')
print index
info = pd.read_csv('input/volume(table 6)_training.csv')
vehicle_models = info['vehicle_model'].unique()
print vehicle_models
tollgate_id = info['tollgate_id'].unique()
volume_info = np.zeros(((4*7+1)*24*3,len(vehicle_models)*5),dtype=int)
print volume_info.shape
start = '2016-09-19 00:00:00'
FMT = "%Y-%m-%d %H:%M:%S"
start = datetime.strptime(start, FMT)
tollgate_dict = {(1,0):0,(1,1):1,(2,0):2,(3,0):3,(3,1):4}
# read line by line and hash table into relative matrix
fr = open('input/volume(table 6)_training.csv', 'r')
fr.readline()
txt = fr.readlines() # skip the header
fr.close()
for str_line in txt:
str_line = str_line.replace('"', '').split(',')
tollgate_id = str_line[1]
direction = str_line[2]
vehile_model = str_line[3]
pass_time = str_line[0]
pass_time = datetime.strptime(pass_time, "%Y-%m-%d %H:%M:%S")
# calculating time delta
delta = pass_time - start
day_diff = delta.days
hour_diff = delta.seconds//3600
minute_diff = (delta.seconds % 3600) // 60
total_diff_minute = day_diff*24*60 + hour_diff*60 + minute_diff
row_num = total_diff_minute // 20
# calculating column num
column_num = int(vehile_model)*5 + tollgate_dict[(int(tollgate_id),int(direction))]
volume_info[row_num,column_num]+=1
np.savetxt('volume_files/volume_training_set.csv',volume_info,fmt='%d')
print volume_info.shape
# pyplot.figure()
# pyplot.plot(volume_info)
# pyplot.show()
# step2: delete national day 9 days
# separate total volume into 3 part (model1 , model2 , model extra)
#######################################################################################
print 'step 2 : delete national days '
model_1_volume = volume_info[:,5:10]
model_2_volume = volume_info[:,10:15]
model_extra_volume = volume_info[:,0:5] + volume_info[:,15:20] + volume_info[:,20:25] + volume_info[:,25:30] + volume_info[:,30:35] + volume_info[:,35:]
model_1_volume = np.vstack((model_1_volume[:792,:],model_1_volume[-648:,:]))
model_2_volume = np.vstack((model_2_volume[:792,:],model_2_volume[-648:,:]))
model_extra_volume = np.vstack((model_extra_volume[:792,:],model_extra_volume[-648:,:]))
print model_1_volume.shape
print model_2_volume.shape
print model_extra_volume.shape
np.savetxt('volume_files/volume_model1_trainset.csv',model_1_volume,fmt='%d')
np.savetxt('volume_files/volume_model2_trainset.csv',model_2_volume,fmt='%d')
np.savetxt('volume_files/volume_modelextra_trainset.csv',model_extra_volume,fmt='%d')
for i in range(5):
pyplot.figure()
pyplot.plot(model_1_volume[:,i])
pyplot.plot(model_2_volume[:,i])
pyplot.plot(model_extra_volume[:,i])
pyplot.show()
# step3: prepare test set 1
# save testset1 into 3 (7 * 24 *3, 5) matrix
#################################################################################
print 'step 3 prepare test set 1 into 3 matrix '
volume_test_1 = np.zeros((7*24*3,len(vehicle_models)*5),dtype=int)
print volume_test_1.shape
start = '2016-10-18 00:00:00'
FMT = "%Y-%m-%d %H:%M:%S"
start = datetime.strptime(start, FMT)
fr = open('input/volume(table 6)_test1.csv', 'r')
fr.readline()
txt = fr.readlines() # skip the header
fr.close()
for str_line in txt:
str_line = str_line.replace('"', '').split(',')
tollgate_id = str_line[1]
direction = str_line[2]
vehile_model = str_line[3]
pass_time = str_line[0]
pass_time = datetime.strptime(pass_time, "%Y-%m-%d %H:%M:%S")
# calculating time delta
# print str_line
delta = pass_time - start
day_diff = delta.days
hour_diff = delta.seconds//3600
minute_diff = (delta.seconds % 3600) // 60
total_diff_minute = day_diff*24*60 + hour_diff*60 + minute_diff
row_num = total_diff_minute // 20
# calculating column num
column_num = int(vehile_model)*5 + tollgate_dict[(int(tollgate_id),int(direction))]
volume_test_1[row_num,column_num]+=1
model_1_volume_testset_1 = volume_test_1[:,5:10]
model_2_volume_testset_1 = volume_test_1[:,10:15]
model_extra_volume_testset_1 = volume_test_1[:,0:5] + volume_test_1[:,15:20] + volume_test_1[:,20:25] + volume_test_1[:,25:30] + volume_test_1[:,30:35] + volume_test_1[:,35:]
np.savetxt('volume_files/volume_model1_test_set_1.csv',model_1_volume_testset_1,fmt='%d')
np.savetxt('volume_files/volume_model2_test_set_1.csv',model_2_volume_testset_1,fmt='%d')
np.savetxt('volume_files/volume_modelextra_test_set_1.csv',model_extra_volume_testset_1,fmt='%d')
print model_1_volume_testset_1.shape
print model_2_volume_testset_1.shape
print model_extra_volume_testset_1.shape
# pyplot.figure()
# pyplot.plot(volume_test_1)
# pyplot.show()
# step 4: create n fold fies:
# change 20 days data into 5 fold train and CV set and also test set
# generating train test files
#####################################################################
days = range(20)
relative_weed = [2,2,3,4,5,6,7,1,2,3,4,2,2,3,3,4,5,6,7,1]
kf = KFold(n_splits=5,random_state = 5)
fold = 1
for train_index,CV_index in kf.split(days):
print 'fold:' +str(fold)
print train_index,CV_index
file_path = 'volume_files/fold'+str(fold)+'/'
fold = fold + 1
f = open(file_path+'train_CV_set.txt','w')
f.write(str(train_index))
f.write(str(CV_index))
f.close()
print 'save model 1'
# save train total set
train_set_fold = np.empty((0,5),dtype=int)
for i in train_index:
train_set_fold = np.vstack((train_set_fold,model_1_volume[72*i:72*(i+1),:]))
print train_set_fold.shape
np.savetxt(file_path+'model_1_trainset.csv',train_set_fold,'%d')
# save CV total set
CV_set_fold = np.empty((0,5),dtype=int)
for i in CV_index:
CV_set_fold = np.vstack((CV_set_fold,model_1_volume[72*i:72*(i+1),:]))
print CV_set_fold.shape
np.savetxt(file_path+'model_1_CV_set.csv',CV_set_fold,'%d')
# save test set
test_set_fold = model_1_volume_testset_1
print test_set_fold.shape
np.savetxt(file_path+'model_1_testset.csv',test_set_fold,'%d')
print 'save model 2'
# save train total set
train_set_fold = np.empty((0,5),dtype=int)
for i in train_index:
train_set_fold = np.vstack((train_set_fold,model_2_volume[72*i:72*(i+1),:]))
print train_set_fold.shape
np.savetxt(file_path+'model_2_trainset.csv',train_set_fold,'%d')
# save CV total set
CV_set_fold = np.empty((0,5),dtype=int)
for i in CV_index:
CV_set_fold = np.vstack((CV_set_fold,model_2_volume[72*i:72*(i+1),:]))
print CV_set_fold.shape
np.savetxt(file_path+'model_2_CV_set.csv',CV_set_fold,'%d')
# save test set
test_set_fold = model_2_volume_testset_1
print test_set_fold.shape
np.savetxt(file_path+'model_2_testset.csv',test_set_fold,'%d')
print 'save model extra'
# save train total set
train_set_fold = np.empty((0,5),dtype=int)
for i in train_index:
train_set_fold = np.vstack((train_set_fold,model_extra_volume[72*i:72*(i+1),:]))
print train_set_fold.shape
np.savetxt(file_path+'model_extra_trainset.csv',train_set_fold,'%d')
# save CV total set
CV_set_fold = np.empty((0,5),dtype=int)
for i in CV_index:
CV_set_fold = np.vstack((CV_set_fold,model_extra_volume[72*i:72*(i+1),:]))
print CV_set_fold.shape
np.savetxt(file_path+'model_extra_CV_set.csv',CV_set_fold,'%d')
# save test set
test_set_fold = model_extra_volume_testset_1
print test_set_fold.shape
np.savetxt(file_path+'model_extra_testset.csv',test_set_fold,'%d')
| true |
3d0503b3be6fd54a6cd036d0711bc79ed05f2f17
|
Python
|
brandoneng000/LeetCode
|
/medium/921.py
|
UTF-8
| 590 | 3.578125 | 4 |
[] |
no_license
|
class Solution:
def minAddToMakeValid(self, s: str) -> int:
stack = []
for p in s:
if p == "(":
stack.append(p)
elif not stack and p == ")":
stack.append(")")
elif stack[-1] == "(" and p == ")":
stack.pop()
else:
stack.append(p)
return len(stack)
def main():
sol = Solution()
print(sol.minAddToMakeValid("()))"))
print(sol.minAddToMakeValid("())"))
print(sol.minAddToMakeValid("((("))
if __name__ == '__main__':
main()
| true |
0b035fc5168d98069a11bfd330cd03541c5583a5
|
Python
|
t-ye/pysha3
|
/hw5/utils.py
|
UTF-8
| 580 | 3.484375 | 3 |
[
"MIT"
] |
permissive
|
def expcount(base, modulus=None, coeff=1) :
"""
Yield coeff, coeff*base, coeff*(base**2), ...
"""
if modulus != None :
exp = coeff % modulus
else :
exp = coeff
while True :
yield exp
if modulus != None :
exp = (exp * base) % modulus
else :
exp = exp * base
def isqrt(n) :
from math import sqrt
return int(round(sqrt(n)))
def prod(itr, start=1) :
"""
Compute product between all elements of an iterable.
"""
val = start
for el in itr :
val *= el
return val
| true |
43ff17579a95811026c9f8e69ee0cf100edfe977
|
Python
|
lengmoXXL/code-repo
|
/2019/9/random_file_image_gen.py
|
UTF-8
| 855 | 3.109375 | 3 |
[] |
no_license
|
import random
from PIL import Image
from io import BytesIO
def generate_random_file(size=random.randint(5000, 10000)):
ret = BytesIO()
for _ in range(size):
ret.write(random.randint(0, 255).to_bytes(1, byteorder='little'))
ret.seek(0)
return ret
def generate_random_picture(height=random.randint(100, 300), width=random.randint(100, 300)):
testImage = Image.new("RGB", (height, width), (255,255,255))
pixel = testImage.load()
for x in range(height):
for y in range(width):
red = random.randrange(0,255)
blue = random.randrange(0,255)
green = random.randrange(0,255)
pixel[x,y]=(red,blue,green)
ret = BytesIO()
testImage.save(ret, format='PNG')
ret.seek(0)
return ret
with open('test.txt', 'wb') as f:
f.write(generate_random_file().read())
| true |
7c6793b531a2b2fe6dad7818f50649b5e4b5c9fa
|
Python
|
Safery/RSX-Tracker
|
/v0.0/RSX_TrackerV2.py
|
UTF-8
| 4,286 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
import matplotlib.pyplot as plt
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, \
AnnotationBbox
from matplotlib.cbook import get_sample_data
import matplotlib.image as mpimg
import numpy as np
fig, ax = plt.subplots()
class RSX_Mapper():
'''Initiate RSX_Mapper Class'''
def __init__(self, rcord=None, lcord=None):
'''(RSX_Mapper, [float, float], [float, float]) -> NoneType
'''
self.MapName = []
self.rcord = []
self.lcord = []
if ((rcord == None) or (lcord == None)):
cords = get_cord()
self.rcord = cords[0]
self.lcord = cords[1]
return None
else:
self.rcord = rcord[0]
self.lcord = lcord[1]
ax.set_xlim(float(lcord[0]),float(rcord[0]))
ax.set_ylim(float(lcord[1]),float(rcord[1]))
return None
def __str__(self):
'''(RSX_Mapper) -> str
'''
pass
def set_custom_cord(self, rcord, lcord):
'''
'''
ax.set_xlim(float(lcord[0]),float(rcord[0]))
ax.set_ylim(float(lcord[1]),float(rcord[1]))
self.lcord = [float(lcord[0]),float(rcord[0])]
self.rcord = [float(lcord[1]),float(rcord[1])]
plt.draw()
plt.show()
return None
def set_range(self, auto=True, ticks=None):
'''
'''
if (auto == True):
ax.set_xticks([0.000964875])
ax.set_yticks([0.000964875])
else:
ax.set_xticks([float(ticks)])
ax.set_yticks([float(ticks)])
return None
def set_img(self, longi=None, lat=None, MapName='map.png', lcord, rcord):
'''
'''
if ((longi == None) or (lat == None)):
get_inp = input('Must provide Longitude and Latitude [Lat, Longi]\n >>> ')
longi=get_inp[1]
lat=[0]
if (MapName in self.MapName):
get_inp = input('Map Image already exists. Continue (y/n)?\n >>> ')
if (get_inp == 'y'):
img = mpimg.imread("img/"+str(MapName))
imagebox = OffsetImage(img, zoom=0.5)
ab = AnnotationBbox(imagebox,
(longi, lat), xybox=(0, 0),
xycoords='data',
boxcoords="offset points",
pad=0, frameon=False)
self.MapName.append("img/"+str(MapName))
ax.add_artist(ab)
plt.draw()
else:
return 'No Map Image added.'
else:
'''img = mpimg.imread("img/"+str(MapName))
imagebox = OffsetImage(img, zoom=0.5)
ab = AnnotationBbox(imagebox, (longi, lat),
xybox=(0, 0),
xycoords='data',
boxcoords="offset points", pad=0, frameon=False)
self.MapName.append("img/"+str(MapName))
ax.add_artist(ab)
plt.draw()
'''
img = mpimg.imread("img/"+str(MapName))
plt.imshow(img, extent = [float(lcord[0]),float(rcord[0]),float(lcord[1]),float(rcord[1])])
plt.show()
def get_cord():
'''
'''
# Gets the top right Coordinate (Longitude and Latitude)
_top_rcord = []
get_input = raw_input('What is the top right Coordinate?\n>>> ')
_top_rcord.append(get_input[:int(str(get_input.find(',')))])
_top_rcord.append(get_input[int(str(get_input.find(',')))+1:])
# Gets the bottom left Coordinate (Longitude and Latitude)
_bottom_rcord = []
get_input = raw_input('What is the bottom left Coordinate?\n>>> ')
_bottom_rcord.append(get_input[:int(str(get_input.find(',')))])
_bottom_rcord.append(get_input[int(str(get_input.find(',')))+1:])
# Sets the axis length
ax.set_xlim(float(_bottom_rcord[0]),float(_top_rcord[0]))
ax.set_ylim(float(_bottom_rcord[1]),float(_top_rcord[1]))
return [_top_rcord, _bottom_rcord]
| true |
21609beae7007d6a4eece82362be724754d7a535
|
Python
|
TrevistoAG/DigitalFestival2019-DeepReinforcementLearning
|
/__main__.py
|
UTF-8
| 2,034 | 2.59375 | 3 |
[] |
no_license
|
import argparse
from agents import *
FLAGS = None
def main(FLAGS):
agent = PPO2(FLAGS)
if FLAGS.train == True:
print('---- TRAIN ----')
agent.train(game=FLAGS.game, state=FLAGS.state,num_e=FLAGS.envs)
if FLAGS.eval == True:
print('---- EVAL ----')
agent.evaluate(game=FLAGS.game, state=FLAGS.state)
if FLAGS.retrain == True:
print('---- RETRAIN ----')
agent.retrain(game=FLAGS.game, state=FLAGS.state,num_e=FLAGS.envs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--game',
type=str,
default='SonicTheHedgehog-Genesis',
help='Select Retro environment'
)
parser.add_argument(
'--state',
nargs='+',
default='GreenHillZone.Act1',
help='Select Retro environment Levels'
)
parser.add_argument(
'--eval',
default=False,
action='store_true',
help='Evaluate after training if set'
)
parser.add_argument(
'--train',
default=False,
action='store_true',
help='Train the algorithm if set'
)
parser.add_argument(
'--retrain',
default=False,
action='store_true',
help='Retrain the algorithm if set'
)
parser.add_argument(
'--render',
default=False,
action='store_true',
help='Render the environment if set'
)
parser.add_argument(
'--logdir',
type=str,
default='./logs',
help='Directory to save the tensorboard logfiles'
)
parser.add_argument(
'--model',
type=str,
default='./green_hill_1.pkl',
help='path and name of model file to evaluate'
)
parser.add_argument(
'--envs',
type=int,
default=1,
help='Amount of environments to train simultaneously'
)
# execute only if run as the entry point into the program
FLAGS, unparsed = parser.parse_known_args()
main(FLAGS)
| true |
fd9b81a23f258ada1d668bdaee68bce64ddb7347
|
Python
|
Shubzedm007/ShowCase
|
/BMI Calculator/BMI calculator.py
|
UTF-8
| 2,242 | 3.25 | 3 |
[] |
no_license
|
print('Welcome to my BMI scanner and workout planner')
print('Are you ready?')
answer = str(input())
if answer == 'yes':
print('lets get some measurements!')
print('what is your height in meters?')
height = float(input())
print('what is your weight in Kg?')
weight = float(input())
chest = ['bench press', 'incline press', 'cable flies']
legs = ['squats', 'leg press', 'lunges', 'deadlifts']
back = ['bent over rows', 'lat pull downs', 'pull-ups', 'reverse flys']
arms = ['barbell curls', 'tricep push-downs', 'hammer curles', 'tricep dips']
BMI = weight / (height ** 2)
print(BMI)
if (3 < BMI < 18.5):
print('you are underweight, but beautfiul')
print('A caloric Surplus is recomended')
print('how many calories do you intake?')
calorie = int(input())
goalc = str(calorie + 500)
print('aim for ' + goalc + ' calories!')
print('train for 6-8 heavy reps for 5 sets')
print('What muscle group are you planning to workout today?')
plan = str(input())
print('do this')
if plan == 'chest':
print(chest)
if plan == 'arms':
print(arms)
if plan == 'legs':
print(legs)
if plan == 'back':
print(back)
if 18.5 <= BMI <= 25:
print('you are normal and healthy!')
if BMI > 25.1:
print('you are overweight and beautiful')
print('A caloric defict is recomended')
print('how many calories do you intake? ')
calorie = int(input())
goalc = str(calorie - 500)
print('aim for ' + goalc + ' calories!')
print('train for 8-12 moderate reps for 5 sets')
print('What muscle group are you planning to workout today?')
plan = str(input())
print('do this')
if plan == 'chest':
print(chest)
if plan == 'legs':
print(legs)
if plan == 'back':
print(back)
if plan == 'arms':
print(arms)
if BMI < 1:
print('double check if your measurements are inputed correctly')
else:
print('Come back when you are ready!')
| true |
bc896cc30fac89d9aa0efccb14c03f46c3122012
|
Python
|
GiuliaSim/Community_Prediction
|
/csv_from_user_infos.py
|
UTF-8
| 924 | 2.578125 | 3 |
[] |
no_license
|
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode, avg, col, count
spark = SparkSession \
.builder \
.appName("BigData") \
.config("spark.mongodb.input.uri", "mongodb://127.0.0.1/BigData.user_infos") \
.getOrCreate()
df_user_infos = spark.read.format("com.mongodb.spark.sql.DefaultSource").load()
df = df_user_infos.select("user", "info.interests.all").toDF("user_id", "interests")
df.printSchema()
df = df.rdd \
.filter(lambda x: x.interests is not None) \
.flatMap(lambda x: [(x.user_id, interest[0], interest[1].score) for interest in x.interests.items()])
#.flatMap(lambda x: [(x.user_id, interest, x["score"]) for interest in x.interests])
df.toDF().printSchema()
df.toDF().show(10, False)
filepath = "/home/giulia/Documenti/BigData/Community_Prediction/data_user_interest_score"
df.toDF().write.format("csv").save(filepath)
print('Cartella creata: ' + filepath)
| true |
26652cbfdaaa91eb630398872230f235b4d77827
|
Python
|
Nikkuniku/AtcoderProgramming
|
/ABC/ABC200~ABC299/ABC240/d.py
|
UTF-8
| 360 | 2.78125 | 3 |
[] |
no_license
|
n=int(input())
a=list(map(int,input().split()))
from collections import deque
d=deque()
ans=0
last=-1
for i in range(n):
if last==a[i]:
d[-1][1]+=1
ans+=1
if d[-1][1]==a[i]:
ans-=d[-1][1]
d.pop()
else:
d.append([a[i],1])
ans+=1
last=-1
if d:
last=d[-1][0]
print(ans)
| true |
d93106a57a1e42fdc3cc8e959c66a5d73f8f672f
|
Python
|
Cphayim/learn-py
|
/src/package/decorate/c1.py
|
UTF-8
| 211 | 3.546875 | 4 |
[] |
no_license
|
import time
# ๅผ้ญๅๅ๏ผๅฏนไฟฎๆนๆฏๅฐ้ญ็๏ผๅฏนๆฉๅฑๆฏๅผๆพ็
def f1():
print('This is a function')
def print_current_time(func):
print(time.time())
func()
print_current_time(f1)
| true |
b74261a22c483427091e8c9fbdf99f1ebc7e5466
|
Python
|
althetinkerer/price-comparison-app
|
/API/app.py
|
UTF-8
| 4,000 | 2.75 | 3 |
[] |
no_license
|
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
import re
import requests
import json
# Read from config.json file
with open('config.json') as f:
config = json.load(f)
f.close()
app = Flask(__name__)
CORS(app)
def physicalPrices(upc):
value = {'target': 'Product not found!', 'walgreens': 'Product not found!', 'staples': 'Product not found!', 'walmart': 'Product not found!', 'bestbuy': 'Product not found!'}
# Target, Staples, Walgreens, Walmart API.
r = requests.get("https://api.barcodelookup.com/v2/products?barcode={}&formatted=y&key={}".format(upc, config['barcodelookup_apikey']))
response = r.json()
for i in range(len(response["products"][0]['stores'])):
if response["products"][0]['stores'][i]["store_name"] == "Target":
value["target"] = response["products"][0]['stores'][i]["store_price"]
elif response["products"][0]['stores'][i]["store_name"] == "Walgreens":
value["walgreens"] = response["products"][0]['stores'][i]["store_price"]
elif response["products"][0]['stores'][i]["store_name"] == "Staples":
value["staples"] = response["products"][0]['stores'][i]["store_price"]
elif response["products"][0]['stores'][i]["store_name"] == "Walmart":
value["walmart"] = response["products"][0]['stores'][i]["store_price"]
else:
pass
# Best Buy API
r = requests.get("https://api.upcitemdb.com/prod/trial/lookup?upc={}".format(upc))
response = r.json()
for i in range(len(response['items'][0]['offers'])):
if response['items'][0]['offers'][i]['merchant'] == 'Best Buy':
value['bestbuy'] = str(response['items'][0]['offers'][i]['price'])
else:
pass
return value
def onlinePrices(upc):
value = {'tigerdirect': 'Product not found!'}
# Tigerdirect API
r = requests.get("https://api.upcitemdb.com/prod/trial/lookup?upc={}".format(upc))
response = r.json()
for i in range(len(response['items'][0]['offers'])):
if response['items'][0]['offers'][i]['merchant'] == 'TigerDirect':
value['tigerdirect'] = str(response['items'][0]['offers'][i]['price'])
else:
pass
return value
def productInfo(upc):
value = {}
r = requests.get("https://api.barcodelookup.com/v2/products?barcode={}&formatted=y&key={}".format(upc,config['barcodelookup_apikey']))
response = r.json()
value['productname'] = response["products"][0]['product_name']
value['productimage'] = response["products"][0]['images'][0]
return value
def status():
value = {'target': False, 'walgreens': False, 'staples': False, 'walmart': False, 'bestbuy': False, 'tigerdirect': False}
# API CHECKS - BarcodeLookup
r = requests.get("https://api.barcodelookup.com/v2/products?barcode=190199098428&formatted=y&key={}".format(config['barcodelookup_apikey']))
if r.status_code == 200:
value['target'] = True
value['walgreens'] = True
value['staples'] = True
value['walmart'] = True
# UPCITEMDB API CHECK
r2 = requests.get("https://api.upcitemdb.com/prod/trial/lookup?upc=190199098428")
if r2.status_code == 200:
value['bestbuy'] = True
value['tigerdirect'] = True
return value
@app.route('/getstatus', methods=['GET'])
def statusCall():
return jsonify(status())
@app.route('/getphysicalprice', methods=['GET'])
def physicalPricesCall():
upc = request.args.get("upc")
status = physicalPrices(upc)
return jsonify(status)
@app.route('/getonlineprice', methods=['GET'])
def onlinePricesCall():
upc = request.args.get("upc")
status = onlinePrices(upc)
return jsonify(status)
@app.route('/getproductinfo', methods=['GET'])
def productInfoCall():
upc = request.args.get("upc")
status = productInfo(upc)
return jsonify(status)
| true |
752a3df3e91b6647158afa6d92d16f936db7c2a8
|
Python
|
khs50851/git-crawling
|
/section02_1/section02_1/spiders/class02_1.py
|
UTF-8
| 1,195 | 3.234375 | 3 |
[] |
no_license
|
import scrapy
class Class021Spider(scrapy.Spider):
name = 'test2'
allowed_domains = ['blog.scrapinghub.com']
start_urls = ['https://blog.scrapinghub.com/']
def parse(self, response):
"""
:param : response
:return : Title Text(์ ๋ชฉ ํ
์คํธ ๋ฆฌํดํ ๊ฑฐ)
"""
# 2๊ฐ์ง(CSS Selector, XPATH)
# get() ์ด๊ฑด ํ๋๋ง ๊ฐ์ ธ์ด<->getall(),extract ์ด๊ฑด ์ ์ฒด <-> extract_first() ์ด๊ฒ๋ ํ๋๋ง ๊ฐ์ ธ์ด
# css
# ::textํ๋ฉด ํ
์คํธ๋ง ๋ฝ์์ด
# getallํ๋ฉด ๋ฆฌ์คํธ๋ก ๋์ด์ด
# ์ถ๋ ฅ ์ต์
# -o ํ์ผ๋ช
.ํ์ฅ์ , -t ํ์ผ ํ์
(json,jsonlines,jl,csv,xml,marshal,pickle)
# for text in response.css('div.post-header > h2 > a::text').getall():
# # return type : Request,BaseItem,Dictionary,None ์ด๋ ๊ฒ 4๊ฐ์ง์ค ํ๋ ๋ฆฌํด
# yield {'title': text}
# XPATH
# ํ
์คํธ ๋ฝ์์ค๋๊ฑด XPATH์์ ํจ์ํ์ผ๋กํจ
for i, text in enumerate(response.xpath('//div[@class="post-header"]/h2/a/text()').getall(), 1):
yield{
'number': i,
'text': text
}
| true |
3e6a9694d8d10ebd0884779ab2defc8a788be814
|
Python
|
jnlorenzi/rRNA_Evolution_2022
|
/make_ani.py
|
UTF-8
| 2,613 | 3.109375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import os
import re
import sys
import shutil
import argparse
def parseArguments():
# Create argument parser
parser = argparse.ArgumentParser(description = 'This script computes all the pairwise ANIb for each genome in the given repository (sequence must be in fasta format). \n\t # Command line example: python make_ani.py C:/Users/Lorenzi/Documents/Streptomyces/fasta/ C:/Users/Lorenzi/Documents/Streptomyces/anib/')
# Positional mandatory arguments
parser.add_argument("fasta_directory", help="absolute or relative path to the fasta directory", type=str)
parser.add_argument("output_anib", help="absolute or relative path to the outfile anib directory", type=str)
# Print version
parser.add_argument("--version", action="version", version='%(prog)s - Version 1.5 - 31.03.2022')
# Parse arguments
args = parser.parse_args()
return args
def ANIb_calculator(path_to_genome, path_to_output):
""" Computes the ANIb matrix for genomes in the repository.
ANI method follows the basic algorithm:
- Align the genome of organism 1 against that of organism 2, and identify
the matching regions
- Calculate the percentage nucleotide identity of the matching regions, as
an average for all matching regions
Methods differ on: (1) what alignment algorithm is used, and the choice of
parameters (this affects the aligned region boundaries); (2) what the input
is for alignment (typically either fragments of fixed size, or the most
complete assembly available); (3) whether a reciprocal comparison is
necessary or desirable.
ANIb: uses BLASTN to align 1000nt fragments of the input sequences
This script takes as main input a directory containing a set of
correctly-formatted FASTA multiple sequence files. All sequences for a
single organism should be contained in only one sequence file. The names of
these files are used for identification, so it would be advisable to name
them sensibly.
"""
os.system('nice -n 19 ./average_nucleotide_identity.py -i ' + path_to_genome + ' -o ' + path_to_output + ' -m ANIb -v -f')
''' Calculates the ANIb distance matrix for all the species in the collection '''
def main():
args = parseArguments()
fasta_directory = args.fasta_directory
output_anib = args.output_anib
ANIb_calculator(fasta_directory, output_anib)
if __name__ == "__main__":
main()
| true |
7b3b9eb229506e75c63d345689e77a0345f01f87
|
Python
|
sajinchrisantony/pythonprograms
|
/functionalprogramming/filter.py
|
UTF-8
| 140 | 3.234375 | 3 |
[] |
no_license
|
lst=[2,3,4,5,6]
#even nos
evens=list(filter(lambda num:num%2==0,lst))
print(evens)
odd=list(filter(lambda num:num%2!=0,lst))
print(odd)
| true |
194963bd5509804430a768a920c1cd6840a472a7
|
Python
|
17111748/Falcon-the-Gym-Pro-Assistant-
|
/Signal_Processing/legRaiseAnalysis.py
|
UTF-8
| 6,529 | 2.96875 | 3 |
[] |
no_license
|
import numpy as np
import os
expectedHipAngle = 100
parallel = 180
perpendicular = 90
# bodyParts[0] = Shoulder
# bodyParts[1] = Elbow
# bodyParts[2] = Wrists
# bodyParts[3] = Hip
# bodyParts[4] = DefaultKnee
# bodyParts[5] = OtherKnee
# bodyParts[6] = DefaultAnkle
# bodyParts[7] = OtherAnkle
class LegRaiseResult:
# Feedback
def __init__(self):
self.feedback = []
self.invalid = []
self.check1 = False
self.check2 = False
self.check3 = False
self.check4 = False
def processResult(self):
self.feedback = []
if (self.check1):
tuple = ("Raise Your Legs Higher", os.path.join("audioFiles", "legRaise", "raiseHigh.mp3"))
self.feedback.append(tuple)
elif (self.check2):
tuple = ("Over-Extending", os.path.join("audioFiles", "legRaise", "overExtend.mp3"))
self.feedback.append(tuple)
elif (self.check3):
tuple = ("Knees are Bent", os.path.join("audioFiles", "legRaise", "kneeBent.mp3"))
self.feedback.append(tuple)
else:
tuple = ("Perfect Rep!", os.path.join("audioFiles", "perfect.mp3"))
self.feedback.append(tuple)
if (self.invalid):
self.feedback = []
str = "Invalid Joints Detected:"
if (0 in self.invalid):
str += " Shoulder,"
elif (1 in self.invalid):
str += " Elbow,"
elif (2 in self.invalid):
str += " Wrist,"
elif (3 in self.invalid):
str += " Hip,"
elif (4 in self.invalid):
str += " Knee,"
elif (5 in self.invalid):
str += " Other Knee,"
elif (6 in self.invalid):
str += " Ankle,"
elif (7 in self.invalid):
str += " Other Ankle,"
str = str[:-1] + "!"
tuple = (str, os.path.join("audioFiles", "invalid.mp3"))
self.feedback.append(tuple)
def getResult(self):
self.check1 = False
self.check2 = False
self.check3 = False
self.invalid = []
return self.feedback
class LegRaisePostureAnalysis:
def __init__(self):
self.legRaise = LegRaiseResult()
# Helper Functions
def getSlope(self, pos0, pos1):
height = 120
y1 = height - pos1[0]
x1 = pos1[1]
y0 = height - pos0[0]
x0 = pos0[1]
if ((x1 - x0) == 0):
if (y1 != y0):
return float("inf")
else:
return 0
return (y1-y0)/(x1-x0)
def getAngle(self, Point1, MidPoint, Point2):
a = np.array(Point1)
b = np.array(MidPoint)
c = np.array(Point2)
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return np.degrees(angle)
def sameSlope(self, slope1, slope2, threshold = 0.1):
return abs(slope1 - slope2) < threshold
def sameAngle(self, angle1, angle2, threshold = 0.1):
return abs(angle1 - angle2) < threshold
def samePos(self, pos0, pos1, threshold = 0):
return abs(pos0 - pos1) <= threshold
def lessThan(self, pos0, pos1, threshold = 0):
return (pos0 - threshold) <= pos1
def greaterThan(self, pos0, pos1, threshold = 0):
return pos0 >= (pos1 - threshold)
# Line 1: Shoulder - Hip
# Line 2: Hip - Knee
# Line 3: Knee - Ankle
def feedbackCalculation(self, bodyParts, default=True):
shoulder = (int(bodyParts[0][0]), int(bodyParts[0][1]))
hip = (int(bodyParts[3][0]), int(bodyParts[3][1]))
knee = (int(bodyParts[4][0]), int(bodyParts[4][1]))
ankle = (int(bodyParts[6][0]), int(bodyParts[6][1]))
if (shoulder[0] == 0 and shoulder[1] == 0):
self.legRaise.invalid.append(0)
if (hip[0] == 0 and hip[1] == 0):
self.legRaise.invalid.append(3)
if (knee[0] == 0 and knee[1] == 0):
self.legRaise.invalid.append(4)
if (ankle[0] == 0 and ankle[1] == 0):
self.legRaise.invalid.append(6)
# line1Slope = self.getSlope(shoulder, hip)
# line2Slope = self.getSlope(hip, knee)
# line3Slope = self.getSlope(knee, ankle)
angleHip = self.getAngle(shoulder, hip, knee)
angleKnee = self.getAngle(hip, knee, ankle)
if not (self.sameAngle(angleHip, expectedHipAngle, 10)):
if(angleHip > expectedHipAngle):
self.legRaise.check1 = True
if not (self.lessThan(perpendicular, angleHip, 3)):
self.legRaise.check2 = True
if not (self.sameAngle(angleKnee, parallel, 15)):
self.legRaise.check3 = True
self.legRaise.processResult()
def getResult(self):
return self.legRaise.getResult()
# NOTE:
# Raise your Legs Higher. Since we are tracking the hip and not the butt. Therefore, a pefect would be slanted. 110 degrees with +/- 5 degrees
#############################################################
# perfect = [(91.5, 33.5), (96.0, 49.0), (96.0, 65.5), (88.0, 55.5), (41.5, 66.5), (46.0, 72.0), (22.0, 70.0), (27.0, 75.0)]
# over = [(93.0, 32.0), (96.5, 47.5), (97.0, 66.0), (83.5, 49.0), (40.5, 36.5), (42.5, 42.5), (21.5, 35.0), (24.5, 37.5)]
# under = [(92.5, 32.5), (96.5, 48.5), (97.0, 66.0), (89.5, 56.0), (49.5, 81.5), (90.0, 59.0), (33.5, 90.5), (0.0, 0.0)]
# kneeBent = [(92.0, 33.0), (96.5, 48.5), (97.0, 65.0), (89.5, 56.5), (45.5, 63.5), (51.0, 70.0), (31.0, 77.0), (35.5, 80.5)]
# invalid = [(0.0, 0.0), (29.5, 80.5), (15.5, 87.5), (49.5, 88.5), (65.0, 127.5), (96.5, 66.5), (84.5, 118.5), (92.0, 48.5)]
# legRaise = LegRaisePostureAnalysis()
# legRaise.feedbackCalculation(invalid)
# result = legRaise.getResult()
# print("Invalid: " + str(result))
# print("\n")
# legRaise.feedbackCalculation(perfect)
# result = legRaise.getResult()
# print("Perfect: " + str(result))
# print("\n")
# legRaise.feedbackCalculation(over)
# result = legRaise.getResult()
# print("Over: " + str(result))
# print("\n")
# legRaise.feedbackCalculation(under)
# result = legRaise.getResult()
# print("Under: " + str(result))
# print("\n")
# legRaise.feedbackCalculation(kneeBent)
# result = legRaise.getResult()
# print("Knee Bent: " + str(result))
| true |
602fc46cb419baa4d5b2cc37c1b54d5172f6c685
|
Python
|
Fumipo-Theta/matdat
|
/matdat/dotdict.py
|
UTF-8
| 1,472 | 3.40625 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
class dotdict(object):
"""ใชใใธใงใฏใใฐใฉใๅ
ใฎ่พๆธ่ฆ็ด ใใใญใใใฃ้ขจใซใขใฏใปในใใใใจใๅฏ่ฝใซใใใฉใใใผใ
DotAccessible( { 'foo' : 42 } ).foo==42
ใกใณใใผใๅธฐ็ด็ใซใฏใใใใใใจใซใใใใฎๆๅใไธๅฑคใชใใธใงใฏใใซใไธใใใ
DotAccessible( { 'lst' : [ { 'foo' : 42 } ] } ).lst[0].foo==42
"""
def __init__(self, obj):
self.obj = obj
def __repr__(self):
return "DotAccessible(%s)" % repr(self.obj)
def __getitem__(self, i):
"""ใชในใใกใณใใผใใฉใใ"""
return self.wrap(self.obj[i])
def __getslice__(self, i, j):
"""ใชในใใกใณใใผใใฉใใ"""
return map(self.wrap, self.obj.__getslice__(i, j))
def __getattr__(self, key):
"""่พๆธใกใณใใผใใใญใใใฃใจใใฆใขใฏใปในๅฏ่ฝใซใใใ
่พๆธใญใผใจๅใๅใฎใใญใใใฃใฏใขใฏใปในไธๅฏใซใชใใ
"""
if isinstance(self.obj, dict):
try:
v = self.obj[key]
except KeyError:
v = self.obj.__getattribute__(key)
else:
v = self.obj.__getattribute__(key)
return self.wrap(v)
def wrap(self, v):
"""่ฆ็ด ใใฉใใใใใใใฎใใซใใผ"""
if isinstance(v, (dict, list, tuple)): # xx add set
return self.__class__(v)
return v
| true |
e55a8f002fc22762e4072037f8f82192b28503d1
|
Python
|
trychiOO/python_all
|
/learn/python_all/porint/xiami.py
|
UTF-8
| 1,161 | 3.34375 | 3 |
[] |
no_license
|
import requests
from lxml import etree
url_ = "https://www.xiami.com/artist?spm=a1z1s.2943549.1110925385.4.PnoO3b"
headers_ = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87'
}
page_source = requests.get(url=url_, headers=headers_)
print("ๅๅบ็ปๆ๏ผ\n", page_source.text)
model = etree.HTML(page_source.text)
songs_list = model.xpath('//div[@class="info"]/p[1]/strong/a/text()')
songer = model.xpath("//div[@class='info']/p[2]")
print("ๆญๆฒไธชๆฐ๏ผ{} ๆญๆไธชๆฐ๏ผ{}".format(len(songs_list), len(songer)))
for index, item in enumerate(songs_list):
# ็ปง็ปญๅค็ไธ้ฆๆญๆฒๆๅคไฝๆผๅฑ่
็ๆ
ๅต๏ผๅ ไธบๆฏไธไฝๆผๅฑ่
้ฝๅจไธไธชaๆ ็ญพ้้ข๏ผๆไปฌๆๅคไธชaๆ ็ญพ็ๆไธไธชlist้ๅๅค็
songer_list = songer[index].xpath(".//a/text()")
# ๆฏไธ้ฆๆญๆฒ็ๆฏไธไฝๆผๅฑ่
็ปๆไธไธชlist้ๅ๏ผๆไปฌๅฉ็จjoinๆนๆณๅฏนlist้ๅ็ๆฏไธ้กน่ฟ่กๆผๆฅ๏ผ็ปๆไธไธชๅญ็ฌฆไธฒ็ปๆ
dealed_songer = ",".join(songer_list)
# ๆๅๆๆ ผๅผ่พๅบ็ปๆ
print("{}ใ{}\tใ{}ใ".format(index + 1, item, dealed_songer))
| true |
2d2dd61ce5b96f8e41fe17613414983765b65e60
|
Python
|
agneet42/IoT
|
/Social/user_sensor.py
|
UTF-8
| 1,305 | 2.796875 | 3 |
[] |
no_license
|
import csv
from collections import Counter
f = csv.reader(open('nodes_things.csv','r'))
f2 = csv.reader(open('nodes_things.csv','r'))
f1 = csv.reader(open('nodes_users.csv', 'r'))
arr_t = []
arr_u = []
for rows in f:
value = rows[0]
if(len(value)==3):
arr_t.append(value)
for rows in f1:
arr_u.append(rows[0])
final_arr = []
for i in range(0,len(arr_t)):
value = int(arr_t[i])
for j in range(0,len(arr_u)):
if(value == int(arr_u[j])):
final_arr.append(value)
break
final_arr.sort()
# print((final_arr))
for i in range(0,len(final_arr)):
final_arr[i] = str(final_arr[i])
# print(final_arr)
'''file1 = csv.writer(open("UNodes.csv","a"))
# for elements in final_arr:
# file1.writerow([elements])
for obj in f2:
print(obj)
if(len(obj[0]) > 3):
file1.writerow([obj[0]])'''
a = csv.reader(open('edges_things.csv','r'))
a1 = csv.reader(open('nodes_things.csv','r'))
b = csv.writer(open("TEdges.csv","w"))
final_arr1 = []
for rows in a1:
if(len(rows[0]) > 3):
final_arr1.append(rows[0])
# print(final_arr1)
for rows in a:
check = 0
value = rows[0]
value1 = rows[1]
for i in final_arr:
if(value == i):
check = check + 1
break
for j in final_arr1:
if(value1 == j):
check = check + 1
break
if(check == 2):
b.writerow([value,value1,rows[2]])
| true |
168486ce3ce5d866fd1f4314cb2169079f65ef14
|
Python
|
shloch/CODING_PRACTICE
|
/Python/helloWorldName.py
|
UTF-8
| 591 | 4.34375 | 4 |
[
"MIT"
] |
permissive
|
'''
Define a method hello that returns "Hello, Name!" to a given name, or says Hello, World! if name is not given (or passed as an empty String).
Assuming that name is a String and it checks for user typos to return a name with a first capital letter (Xxxx).
Examples:
hello "john" => "Hello, John!"
hello "aliCE" => "Hello, Alice!"
hello => "Hello, World!" # name not given
hello "" => "Hello, World!" # name is an empty String
'''
def hello(name = ""):
if(len(name) > 0):
return "Hello, " + name.capitalize() + "!"
else:
return "Hello, World!"
| true |
c00b140241ad2535780bdf25afcfcc5910219727
|
Python
|
mdhvkothari/Python-Program
|
/simple/stare.py
|
UTF-8
| 121 | 3.359375 | 3 |
[] |
no_license
|
a = int(input("enter the number :"))
for i in range(0,a):
for j in range(0,i+1):
print "*",
print "\n"
| true |
c3723b17ddbe18287baf0bbfc2bb821fd127fc5b
|
Python
|
grasshopperTrainer/coding_practice
|
/baekjoon/accepted/20055 ์ปจ๋ฒ ์ด์ด ๋ฒจํธ ์์ ๋ก๋ด.py
|
UTF-8
| 2,729 | 2.859375 | 3 |
[] |
no_license
|
from sys import stdin
from collections import deque
class Place:
half_len = None
num_broken = 0
convoyer = deque()
def __init__(self, id, dur):
self.id = id
self.__dur = dur
if self.convoyer:
self.convoyer[-1].__next = self
self.__next = self.convoyer[0]
self.convoyer.append(self)
self.robot = None
def __str__(self):
return f"<Place {self.id, self.__dur, bool(self.robot)}>"
def __repr__(self):
return self.__str__()
@property
def next(self):
return self.__next
@property
def dur(self):
return self.__dur
@dur.setter
def dur(self, v):
self.__dur = v
if self.__dur == 0:
self.__class__.num_broken += 1
self.robot = None
@classmethod
def move_ahead(cls):
cls.convoyer.appendleft(cls.convoyer.pop())
def is_dropping(self):
return bool(self == self.convoyer[self.half_len- 1])
def drop(self):
self.robot.place = None
self.robot = None
class Robot:
def __init__(self, place):
self.place = place
def __str__(self):
return f"<Robot {self.place}>"
def __repr__(self):
return self.__str__()
def move_ahead(self):
if self.place.next.dur and self.place.next.robot is None:
self.place.robot = None
self.place.next.dur -= 1
self.place.next.robot = self
self.place = self.place.next
def solution(N, K, durabilities):
Place.half_len = N
robots = []
for i, j in enumerate(durabilities, 1):
Place(i, j)
turn = 0
while Place.num_broken < K:
turn += 1
# 1
Place.move_ahead()
# 2
new_robots = []
for r in robots:
if r.place.is_dropping():
r.place.drop()
else:
r.move_ahead()
if r.place.is_dropping(): # dropping place
r.place.drop()
else:
new_robots.append(r)
robots = new_robots
# 3
picking = Place.convoyer[0]
if picking.dur and picking.robot is None:
picking.dur -= 1
robot = Robot(picking)
picking.robot = robot
robots.append(robot)
# print(list(Place.convoyer)[:N])
# print(list(reversed(list(Place.convoyer)[N:])))
# print()
# 4
if K <= Place.num_broken:
return turn
N, K = list(map(int, stdin.readline().strip().split(' ')))
durabilities = list(map(int, stdin.readline().strip().split(' ')))
print(solution(N, K, durabilities))
"""
2 4
1 1 1 1
"""
| true |
daacd9a9fae90982d8464d435fffc7c9097aff98
|
Python
|
MinSu-Kim/python_tutorial
|
/sqlalchemy_tutorial/tutorialspoint/sqlalchemy_orm_building_relationship.py
|
UTF-8
| 1,739 | 2.546875 | 3 |
[] |
no_license
|
from sqlalchemy import create_engine, ForeignKey, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, scoped_session, sessionmaker
Base = declarative_base()
class Invoice(Base):
__tablename__ = 'invoices'
id = Column(Integer, primary_key=True)
custid = Column(Integer, ForeignKey('customers.id'))
invno = Column(Integer)
amount = Column(Integer)
customer = relationship("Customer", back_populates="invoices")
def __repr__(self):
return "<Invoice(id='{0}', custid='{1}', invno='{2}', amount='{3}', customer='{4}'>".\
format(self.id, self.custid, self.invno, self.amount, self.customer)
class Customer(Base):
__tablename__ = 'customers'
id = Column(Integer, primary_key=True)
name = Column(String(length=20))
address = Column(String(length=60))
email = Column(String(length=60))
invoices = relationship( # ์์ ํด๋์ค๋ถํฐ ์ ์ํํ ๋์ด
"Invoice",
order_by=Invoice.id,
back_populates="customer")
def __repr__(self):
return "<Customers(id='{0}', name='{1}', address='{2}', email='{3}'>".format(self.id, self.name, self.address,
self.email)
def drop_create_table(engine):
Base.metadata.drop_all(bind=engine, tables=[Invoice.__table__, Customer.__table__])
Base.metadata.create_all(engine)
if __name__ == "__main__":
engine = create_engine('mysql+mysqlconnector://user_coffee:rootroot@localhost/coffee?use_pure=True', echo=True)
session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
drop_create_table()
| true |
95e1834e9686882983a7f3d2d61a18d650fd6e02
|
Python
|
texnofobix/python-genbadge
|
/genbadge/utils_flake8.py
|
UTF-8
| 6,233 | 2.703125 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
# Authors: Sylvain MARIE <sylvain.marie@se.com>
# + All contributors to <https://github.com/smarie/python-genbadge>
#
# License: 3-clause BSD, <https://github.com/smarie/python-genbadge/blob/master/LICENSE>
from __future__ import division
from warnings import warn
import re
from .utils_badge import Badge
try:
# flake8-html is an optional dependency, do not fail too soon if it cant be loaded
import flake8_html
except ImportError as e:
ee = e # save it
class FakeFlake8HtmlImport(object): # noqa
def __getattribute__(self, item):
raise ImportError("Could not import `flake8_html` module, please install it. "
"Note that all dependencies for the flake8 command can be installed with "
"`pip install genbadge[flake8]`. Caught: %r" % ee)
flake8_html = FakeFlake8HtmlImport()
class Flake8Stats(object):
"""
Contains the results from parsing the flake8 report.
The severity levels are defined by flake8-html
"""
def __init__(self,
nb_critical=0, nb_warning=0, nb_info=0
):
# severities 1, 2, 3
self.nb_critical = nb_critical
self.nb_warning = nb_warning
self.nb_info = nb_info
def add(self,
nb, # type: int
code # type: str
):
"""
Add `nb` errors with the same code to the statistics.
"""
severity = flake8_html.plugin.find_severity(code)
if severity == 1:
self.nb_critical += nb
elif severity == 2:
self.nb_warning += nb
elif severity == 3:
self.nb_info += nb
else:
raise ValueError("Unknown severity: %r for code %r" % (severity, code))
@property
def nb_total(self):
return self.nb_critical + self.nb_warning + self.nb_info
def get_color(
flake8_stats # type: Flake8Stats
):
""" Returns the badge color to use depending on the flake8 results """
if flake8_stats.nb_critical > 0:
color = 'red'
elif flake8_stats.nb_warning > 0:
color = 'orange'
elif flake8_stats.nb_info > 0:
color = 'green'
else:
color = 'brightgreen'
return color
def get_flake8_badge(
flake8_stats # type: Flake8Stats
):
# type: (...) -> Badge
"""Return the badge from coverage results """
color = get_color(flake8_stats)
right_txt = "%s C, %s W, %s I" % (flake8_stats.nb_critical, flake8_stats.nb_warning, flake8_stats.nb_info)
return Badge(left_txt="flake8", right_txt=right_txt, color=color)
def get_flake8_stats(flake8_stats_file):
# type: (...) -> Flake8Stats
"""
Reads an index.html file obtained from flake8-html.
"""
if isinstance(flake8_stats_file, str):
# assume a file path
with open(flake8_stats_file) as f:
flake8_stats_txt = f.read()
else:
# assume a stream already
flake8_stats_txt = flake8_stats_file.read()
return parse_flake8_stats(flake8_stats_txt)
RE_TO_MATCH = re.compile(r"([0-9]+)\s+([A-Z0-9]+)\s.*")
def parse_flake8_stats(stats_txt # type: str
):
# type: (...) -> Flake8Stats
stats = Flake8Stats()
for line in stats_txt.splitlines():
match = RE_TO_MATCH.match(line)
if not match:
warn("Line in Flake8 statistics report does not match template and will be ignored: %r" % line)
else:
nb, code = match.groups()
stats.add(int(nb), code)
return stats
# def parse_flake8_html(html # type: str
# ):
# #
# """Reads the flake8 html report"""
# soup = bs4.BeautifulSoup(html, "html.parser")
#
# # check title
# title = soup.head.title.get_text()
# assert title == 'flake8 violations', "Invalid flake8 html report found, unexpected title: %s" % title
#
# # get page div
# pagediv = soup.body.find("div", {"id": "page"})
# assert pagediv.h1.get_text() == 'flake8 violations'
#
# results_dct = dict()
# ul_violations = pagediv.ul
# for li in ul_violations.find_all('li'):
# # synthesis
# typ_str, severity_str = li.a.span['class']
# assert typ_str == 'count'
# assert severity_str.startswith('sev-')
# count = int(li.a.span.get_text().strip())
# worst_severity_nb = int(severity_str[4:])
#
# count2, file_name = list(li.stripped_strings)
# assert int(count2) == count
#
# # we need to access the details because the count is not correct
# li_href = li.a['href']
# child_results_dct = parse_child_html(path, li_href)
# for c_severity_nb, c_count in child_results_dct.items():
# try:
# results_dct[c_severity_nb] += c_count
# except KeyError:
# results_dct[c_severity_nb] = c_count
#
# assert worst_severity_nb == min(child_results_dct.keys())
# assert count == sum(child_results_dct.values())
#
# return results_dct
#
# def parse_child_html(path, # type: str
# suffix # type: str
# ):
# with open(path + suffix) as f:
# html_child = f.read()
# soup_child = bs4.BeautifulSoup(html_child, "html.parser")
#
# # check title
# title = soup_child.head.title.get_text()
# assert title.startswith('flake8 violations'), "Invalid flake8 html report found, unexpected title: %s" % title
#
# # get page div
# pagediv = soup_child.body.find("div", {"id": "page"})
# # assert pagediv.h1.get_text() == 'flake8 violations'
#
# results_dct = dict()
# ul_violations = pagediv.ul
# for li in ul_violations.find_all('li', recursive=False):
# code = li.a['data-code'] # F401, etc.
# typ_str, severity_str = li.a.span['class']
# assert typ_str == 'count'
# assert severity_str.startswith('sev-')
# count = int(li.a.span.get_text().strip())
# severity_nb = int(severity_str[4:])
#
# try:
# results_dct[severity_nb] += count
# except KeyError:
# results_dct[severity_nb] = count
#
# return results_dct
| true |
8d3261beaa921b61be8576d1d6f4e6d5d4810cfc
|
Python
|
joeyhuaa/Texas-Hold-Em
|
/poker/Cards.py
|
UTF-8
| 2,709 | 3.90625 | 4 |
[] |
no_license
|
# Card and Deck class definitions
class Card:
# first, the suits and ranks, in order
# note the order is important as it is used to compare
suits = ["c", "d", "h", "s"]
ranks = ["2", "3", "4", "5", "6", "7", "8", "9", "T", "J", "Q", "K", "A"]
# create a card; default is AS
def __init__(self, rank="A", suit="s"):
self.rank = rank
self.suit = suit
# generate a string representing the card
def __str__(self):
return self.rank + self.suit
# compare two cards
def __eq__(self, other_card):
if self.rank == other_card.rank and self.suit == other_card.suit:
return True
else:
return False
# compare suits
def scmp(self, c):
# get the number of the suits
# for each card; the sign of the
# difference is the result
ss = Card.suits.index(self.suit)
cs = Card.suits.index(c.suit)
return ss - cs
# compare ranks
def rcmp(self, c):
# get the number of the ranks
# for each card; the sign of the
# difference is the result
sr = Card.ranks.index(self.rank)
cr = Card.ranks.index(c.rank)
return sr - cr
# compare, taking rank and suit into account
def cmp(self, c):
# test suits
sv = self.scmp(c)
if sv != 0:
return sv
# test ranks; if you get here, whatever
# this returns is it!
return self.rcmp(c)
class Deck:
# create a deck of cards
def __init__(self):
self.cards = [Card(r,s) for s in Card.suits for r in Card.ranks]
# print the cards in the deck separated by blanks
# note we have a leading blank so we strip it
def __str__(self):
s = ''
for c in self.cards:
s = s + ' ' + str(c)
return s.strip()
# get number of cards remaining in the deck
def __len__(self):
return len(self.cards)
# return T/F
def has(self, card):
return self.cards.__contains__(card)
# reconstruct the deck, the shuffle
def shuffle_reset(self):
import random as rng
# reconstruct the full deck
# then shuffle
self.__init__()
rng.shuffle(self.cards)
# shuffle the current deck
def shuffle(self):
import random as rng
rng.shuffle(self.cards)
# deal a card, returns that card
def deal(self, card=None):
if card is None:
return self.cards.pop(0)
else:
return self.cards.remove(card)
# remove a specific card from deck, returns nothing
def remove(self, card):
if self.has(card):
self.cards.remove(card)
| true |
6b4bdf802013bed0645a4985fd95af2eab70996a
|
Python
|
riaz4519/python_practice
|
/programiz/_51_dictonary.py
|
UTF-8
| 457 | 4.03125 | 4 |
[] |
no_license
|
#normal
dict1 = {1:'Geeks',2:'for',3:'geeks'}
print(type(dict1))
#with different type
dict2 = {1:"geek",'name':"Fahim",'rol':5,'List':[1,2,3,5,6]}
#printing a dictonary
for x in dict2:
print(dict2[x])
#creating with dict method
dict3 = dict({1:2,2:"faghi"})
#creating pair
dict4 = dict([(1,3),("FAhim",4)])
print(dict4)
#accessing
print(dict2[1])
print(dict2.get(1))
#removeing element
del dict2[1]
print(dict2)
print(dict2.pop('name'))
| true |
73523a4d9f45b48b75c2be97a297b425c700ea90
|
Python
|
hiroshikinofu/hoihoi
|
/hogehoge/python/sura2yomeruPython/chap2/chap2-3-1.py
|
UTF-8
| 74 | 3.359375 | 3 |
[] |
no_license
|
text = input('ๅ
ฅๅใใ')
if text.isdigit():
print('ๆฐๅญใงใ')
| true |
c06d6c3fbb345237e7eb19c97c911c7be9303753
|
Python
|
AmalLRK/Pyhton-Game
|
/pin.py
|
UTF-8
| 2,084 | 3.203125 | 3 |
[] |
no_license
|
import random
attempts = 0
digit = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
digit_code_v0 = 0
num = 0
def checkPin(guessed_pin) :
global attempts
tab_res = []
initialAttemp()
digit_code = list(digit_code_v0)
mon_tab = []
attempts += 1
print("attempts : ", attempts)
try:
mon_tab = []
n = int(guessed_pin)
for element in guessed_pin:
mon_tab.append(element)
if len(mon_tab) != 4:
tab_res.extend([100, attempts])
return tab_res
except:
tab_res.extend([100, attempts])
return tab_res
if (mon_tab == digit_code):
tab_res.extend([200, attempts])
return tab_res
else:
tabl1i = ["1wi", "1xi", "1yi", "1zi"]
tabl2i = ["2wi", "2xi", "2yi", "2zi"]
tabl1j = ["1wj", "1xj", "1yj", "1zj"]
tabl2j = ["2wj", "2xj", "2yj", "2zj"]
count = 0
count2 = 0
for i in range(0, 4):
if (mon_tab[i] == digit_code[i]):
mon_tab[i] = tabl1i[i]
digit_code[i] = tabl2i[i]
count += 1
else:
continue
for i in range(0, 4):
for j in range(0, 4):
if (mon_tab[i] == digit_code[j]):
if i == j:
continue
else:
mon_tab[i] = tabl1j[i]
digit_code[j] = tabl2j[j]
count2 += 1
else:
continue
if ((count != 0) or (count2 != 0) ):
tab_res.extend([count, count2, attempts])
return tab_res
print(digit_code)
else :
tab_res.extend([300, attempts])
return tab_res
#pour initialiser le compteur
def initialAttemp() :
global attempts, digit_code_v0, num
if (attempts == 10 or attempts == 0) :
attempts = 0
digit_code_v0 = random.sample(digit, 4)
num = (''.join(map(str, digit_code_v0)))
print(digit_code_v0)
| true |
16dbf5d6835f377be804282936f5fcd2c2131783
|
Python
|
SummerBigData/Nick-Kyriacou-Repo
|
/Keras_Practice/Keras_NN.py
|
UTF-8
| 2,112 | 3.296875 | 3 |
[] |
no_license
|
#Purpose: The following code will create a neural network using Keras
#Created by: Nick Kyriacou
#Created on: 7/19/2018
#Importing Packages
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
import struct as st
import gzip
# Reads in MNIST dataset
def read_idx(filename, n=None):
with gzip.open(filename) as f:
zero, dtype, dims = st.unpack('>HBB', f.read(4))
shape = tuple(st.unpack('>I', f.read(4))[0] for d in range(dims))
arr = np.fromstring(f.read(), dtype=np.uint8).reshape(shape)
if not n is None:
arr = arr[:n]
return arr
def y_as_matrix(y,training_sets): #This takes a training_setsx1 vector and makes it into a training_sets x num_classes matrix.
y = np.ravel(y)
y_array = np.zeros((training_sets,num_classes))
for i in range(len(y)):
for j in range(num_classes):
if (y[i] == j):
y_array[i][j] = 1
else:
y_array[i][j] = 0
return(y_array)
# Main Code
num_classes = 10
training_images = read_idx('data/train-images-idx3-ubyte.gz',60000)
training_labels = read_idx('data/train-labels-idx1-ubyte.gz',60000)
testing_images = read_idx('data/t10k-images-idx3-ubyte.gz',10000)
testing_labels = read_idx('data/t10k-labels-idx1-ubyte.gz',10000)
training_images = np.reshape(training_images,(60000,784))
testing_images = np.reshape(testing_images,(10000,784))
#Normalize data as well
training_images = training_images/255.0
testing_images = testing_images/255.0
#Take training labels and make it a (60000,10) matrix
training_labels_mat = y_as_matrix(training_labels,60000)
testing_labels_mat = y_as_matrix(testing_labels,10000)
#Now we should create our model in Keras
model = Sequential()
#Keep adding layers based on how we want to structure our NN
model.add(Dense(60000,input_dim = 784,activation = 'sigmoid'))
model.add(Dense(100,activation = 'sigmoid'))
model.add(Dense(100,activation = 'sigmoid'))
model.add(Dense(10,activation = 'softmax'))
#Next compile it
model.compile(loss = 'categorical_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
model.fit(training_images,training_labels_mat,epochs=200,batch_size = 1000)
| true |
7431f9d9bed79a5312a034a41b2f64b23a24ec90
|
Python
|
dustinfreeman/IglooKitchener
|
/old/dustin_shadow_example.py
|
UTF-8
| 1,039 | 2.703125 | 3 |
[] |
no_license
|
import viz
from ShadowTheatre import *
ground = viz.add('tut_ground.wrl')
ground2 = viz.add('tut_ground.wrl', pos = (0,0,8), euler = (0,270,0) )
#Add avatar
avatar = viz.add('vcc_female.cfg',pos=(0,0,6),euler=(180,0,0))
avatar.state(5)
SHADOW_RES = 256
#Postion of shadow projector
SHADOW_POS = [0,2,3]
SHADOW_EULER = [0,25,0]
#Controls size of orthographic shadow projector
#Large values mean larger area is covered, but resolution will be diluted
SHADOW_AREA = [5,5]
shadow = ShadowProjector(size=SHADOW_RES,pos=SHADOW_POS,area=SHADOW_AREA, euler = SHADOW_EULER )
#Add avatar as a shadow caster
shadow.addCaster(avatar)
#Add ground as shadow receiver
shadow.addReceiver(ground)
shadow.addReceiver(ground2)
def update_shadows():
#moving shadow in x
SHADOW_POS[0] = -1 + (2.0/100)*(frameCount%100)
#SHADOW_POS[2] = 2 + (2.0/100)*(frameCount%100)
shadow.setPosition(SHADOW_POS)
frameCount = 0
def frame_tick():
global frameCount
frameCount+=1
update_shadows()
#runs each frame
vizact.ontimer(0, frame_tick)
viz.go()
| true |
cf10c8d3dc023e7e264b8b6fe5f8b71005707c4c
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_135/1792.py
|
UTF-8
| 1,255 | 3.140625 | 3 |
[] |
no_license
|
#!/usr/bin/python
with open('A-small-attempt0.in') as f:
n_case = int(f.readline())
for case in range(n_case):
x1= 0
x2=0
x3=0
x4=0
d1=[]
d2=[]
first_row = int(f.readline())
data1 = []
data2 = []
for i in range(4):
data1.append(f.readline())
row = data1[first_row-1]
x1 = int(row.split()[0])
x2 = int(row.split()[1])
x3 = int(row.split()[2])
x4 = int(row.split()[3])
d1=[x1,x2,x3,x4]
# print 'first row:', d1
second_row = int(f.readline())
for i in range(4):
data2.append(f.readline())
row = data2[second_row-1]
x1 = int(row.split()[0])
x2 = int(row.split()[1])
x3 = int(row.split()[2])
x4 = int(row.split()[3])
d2 = [x1,x2,x3,x4]
#print 'second row:',d2
result = list(set(d1).intersection(d2))
# print 'result is', result
if not result:
print 'Case #{0}:'.format(case+1), 'Volunteer cheated!'
elif len(result) > 1:
print 'Case #{0}:'.format(case+1), 'Bad magician!'
else:
print 'Case #{0}:'.format(case+1), result[0]
| true |
25d7397e93a3b8439c215bba517f01f6ba133f4b
|
Python
|
souravt/wordsearchpuzzler
|
/main.py
|
UTF-8
| 10,763 | 2.625 | 3 |
[] |
no_license
|
import random
import string
import numpy as np
import pandas as pd
import pdfkit
import yaml
from PyPDF2 import PdfFileMerger
from time import time
import itertools
gridLen = 12
basepath = "C://Users/sourav/Desktop/book_generation/"
path_wkhtmltopdf = r'C:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe'
template_html = '<!DOCTYPE html><html><head><style> body{background-color:#B0DFE5; background-image: url(\'file:\\\\\\C:\\dev\\book\\bg.png\');background-repeat: no-repeat; background-attachment: fixed; background-size: 100% 100%;border: 1px dashed grey;} table{border-spacing: 0;border-collapse: collapse;margin-left:auto; margin-right:auto;}td{ border-bottom: 1px solid black !important; text-align: center; vertical-align: middle; font-size : 18px; padding:10px; height: 3vw; width: 3vw;}th{ border-bottom: 1px solid black !important; text-align: center;}.pageheader{ text-align: center; font-size : 30px;font-weight: bold;}</style></head><body> <p> <p class=\'pageheader\'> TITLE_TO_REPLACE</p> <p> TABLE_TO_REPLACE</p><br/><hr style=\'1px dashed grey\'><p>WORDS_TO_REPLACE</p> <br/></p> <footer style=\'text-align:center\'>Page : PAGE_NO</footer> </body> </html>'
# TODO : Replace '-' with blank space in existing file
# TODO : Puzzle Number
# TODO : Repeatability Logic
# TODO : Fix Page No
# TODO : Apply templates - cover page, fist page, content etc
def fillWordGrid(words, title):
filledInWords = list()
for word in words:
word = word.upper()
word = get_if_reversed(word)
wordLen = len(word)
orientations = ['HORIZONTAL', 'VERTICAL', 'DIAGFORWARD', 'DIAGBACKWARD']
orientation = random.choice(orientations)
count = 0
isOverlapping = True
while isOverlapping:
col, row = getStartPostion(orientation, wordLen)
isOverlapping = checkIfOverlapping(word, row, col, orientation, filledInWords)
count = count + 1
if count > 30:
print("Gridlock Detected!! Retrying!")
return fillWordGrid(words, title)
filledInWords.append([word, orientation, row, col])
print(filledInWords)
solution_array = generateFinalGrid(filledInWords)
problem_array = fillInGibberish(solution_array)
return problem_array, solution_array
def generate_html(words, input_array, title, page_no):
html = generatePageHTML(input_array, words, title, page_no)
return generatePagePDF(html, page_no)
def generatePagePDF(html, page_no):
config = pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf)
output_file = basepath + "/pages/" + str(page_no) + ".pdf"
options = {'page-size': 'A5', 'dpi': 400}
pdfkit.from_string(html, output_file, configuration=config, options=options)
return output_file
def htmlToPDF(filePath, outputFileName):
config = pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf)
output_file = basepath + "/pages/" + str(outputFileName) + ".pdf"
options = {'page-size': 'A5', 'dpi': 400}
with open(filePath) as f:
pdfkit.from_file(f, output_file, configuration=config, options=options)
return output_file
def generate_Content_page(titles, problem_titlepages, solution_titlepages):
html = '<html> <head> <style> body{background-image: url(\'file:///C:\\Users\\sourav\\Desktop\\book_generation\\footer-background.jpg\');background-repeat: no-repeat; background-attachment: fixed; background-size: 100% 100%;} html{border: dashed;} table{border-spacing: 0;border-collapse: collapse;margin-left:auto; margin-right:auto;}th{font-size : 18px; font-weight:bold}td{ border-bottom: 1px solid black !important; text-align: left; vertical-align: middle; font-size : 16px; padding:10px; }th{ border-bottom: 1px solid black !important; text-align: center;}.pageheader{text-align: center;font-size : 30px;font-weight: bold;}</style> </head>'
html = html + '<body ><p style=\'text-align:center;font-weight:bold;font-size : 24px;\'> Problems and Solutions </p> <hr><br>'
html = html + '<table>'
for (probs, sols) in zip(problem_titlepages, solution_titlepages):
html = html + '<tr><td>' + probs[0] + '</td><td>' + str(probs[1]) + '</td><td></td><td>' + str(sols[1]) + '</td></tr>'
html = html + '</table> </body> </html>'
return generatePagePDF(html, "content")
def generatePageHTML(input_array, words, title, page_no):
df = pd.DataFrame(input_array)
df = df.replace('-', '*')
puzzle_html = df.to_html(index=False, header=False)
reshaped_words = np.reshape(words, (-1, 3))
words_df = pd.DataFrame(reshaped_words)
words_html = words_df.to_html(index=False, header=False)
html = template_html.replace("TABLE_TO_REPLACE", puzzle_html)
html = html.replace("TITLE_TO_REPLACE", title)
html = html.replace("WORDS_TO_REPLACE", words_html)
html = html.replace("PAGE_NO", str(page_no))
return html
def generateFinalGrid(filledInWords):
final_array = np.full([gridLen, gridLen], '-')
for eachWord in filledInWords:
row = eachWord[2]
col = eachWord[3]
orientation = eachWord[1]
for c in eachWord[0]:
final_array[row][col] = c
if orientation == 'HORIZONTAL':
col = col + 1
elif orientation == 'VERTICAL':
row = row + 1
elif orientation == 'DIAGFORWARD':
row = row + 1
col = col + 1
elif orientation == 'DIAGBACKWARD':
row = row + 1
col = col - 1
return final_array
def fillInGibberish(final_array):
new_array = np.array(final_array)
for x in range(0, new_array.shape[0]):
for y in range(0, new_array.shape[1]):
if new_array[x, y] == '-':
new_array[x, y] = random.choice(string.ascii_uppercase)
return new_array
def getStartPostion(orientation, wordLen):
try:
if orientation == 'HORIZONTAL':
row = random.randint(0, 11)
col = random.randint(0, (gridLen - wordLen - 1))
elif orientation == 'VERTICAL':
row = random.randint(0, (gridLen - wordLen - 1))
col = random.randint(0, 11)
elif orientation == 'DIAGFORWARD':
row = random.randint(0, (gridLen - wordLen - 1))
col = random.randint(0, (gridLen - wordLen - 1))
elif orientation == 'DIAGBACKWARD':
row = random.randint(0, (gridLen - wordLen - 1))
col = random.randint(wordLen, gridLen - 1)
except:
print(wordLen, orientation)
return col, row
def checkIfOverlapping(word, row, col, orientation, filledInWords):
wordCells = getCells(word, row, col, orientation)
# print("CHECK OVERLAP word:", word)
for filledInWord in filledInWords:
tempWordCells = getCells(filledInWord[0], filledInWord[2], filledInWord[3], filledInWord[1])
for refCell in tempWordCells:
for wordCell in wordCells:
if refCell[1] == wordCell[1] and refCell[2] == wordCell[2]:
return True
# print("CheckIfOverlapping word:", word, "filledInWord[0]:",filledInWord[0], "overlapping:",overlapping)
return False
def getCells(word, row, col, orientation):
cells = list()
for c in word:
cells.append([c, row, col])
if orientation == 'HORIZONTAL':
col = col + 1
elif orientation == 'VERTICAL':
row = row + 1
elif orientation == 'DIAGFORWARD':
row = row + 1
col = col + 1
elif orientation == 'DIAGBACKWARD':
row = row + 1
col = col - 1
return cells
def get_if_reversed(word):
is_reverse = random.randint(0, 1)
if is_reverse:
word = word[::-1]
return word
def printBook(problem_puzzle_files, solution_puzzle_files, content_page):
merger = PdfFileMerger()
tempPath = basepath + "/pages/"
merger.append(open(tempPath + "1.pdf", 'rb'),import_bookmarks=False)
merger.append(open(tempPath + "2.pdf", 'rb'),import_bookmarks=False)
merger.append(open(tempPath + "3.pdf", 'rb'),import_bookmarks=False)
merger.append(open(tempPath + "4.pdf", 'rb'),import_bookmarks=False)
merger.append(open(tempPath + "5.pdf", 'rb'),import_bookmarks=False)
merger.append(open(content_page, 'rb'),import_bookmarks=False)
merger.append(open(tempPath + "problems-cover.pdf", 'rb'),import_bookmarks=False)
for puzzle_file in problem_puzzle_files:
merger.append(open(puzzle_file, 'rb'))
merger.append(open(tempPath + "solutions-cover.pdf", 'rb'),import_bookmarks=False)
for puzzle_file in solution_puzzle_files:
merger.append(open(puzzle_file, 'rb'))
merger.append(open(tempPath + "last.pdf", 'rb'),import_bookmarks=False)
with open(basepath + "books_output/Word Search Puzzle Book"+str(round(time()))+".pdf", "wb") as fout:
merger.write(fout)
def generateBasePages():
tempPath = basepath
htmlToPDF(tempPath + "1.html", 1)
htmlToPDF(tempPath + "2.html", 2)
htmlToPDF(tempPath + "3.html", 3)
htmlToPDF(tempPath + "4.html", 4)
htmlToPDF(tempPath + "5.html", 5)
htmlToPDF(tempPath + "problems-cover.html", "problems-cover")
htmlToPDF(tempPath + "solutions-cover.html", "solutions-cover")
htmlToPDF(tempPath + "last.html", "last")
class WordSearchGenerator:
if __name__ == "__main__":
masterList = []
problem_puzzle_files = list()
solution_puzzle_files = list()
solution_puzzle_files = list()
with open('words.yml') as f:
puzzleSets = yaml.load_all(f, Loader=yaml.FullLoader)
for puzzle in puzzleSets:
puzzle_count = len(puzzle.keys())
page_no = 9
problem_pages = list()
solution_pages = list()
for title, words in puzzle.items():
problem_array, solution_array = fillWordGrid(words, title)
problem_file_path = generate_html(words, problem_array, title, page_no)
solution_file_path = generate_html(words, solution_array, title, (page_no + puzzle_count + 2))
problem_puzzle_files.append(problem_file_path)
solution_puzzle_files.append(solution_file_path)
problem_pages.append([title, page_no])
solution_pages.append([title, page_no + puzzle_count + 2])
page_no = page_no + 1
content_page = generate_Content_page(puzzle.keys(), problem_pages, solution_pages)
bookPages = list()
# generateBasePages()
printBook(problem_puzzle_files, solution_puzzle_files, content_page)
| true |
1721a6bc70ed5accd638fff6ea9ef39a1657de50
|
Python
|
petersonprojects/python-exercises
|
/listExercises/listEx3.py
|
UTF-8
| 111 | 3.34375 | 3 |
[] |
no_license
|
numList = [89,2,17000,105,512,74,7,600]
numList.sort()
print(f'Smallest number in the list is: {numList[0]}')
| true |
838bc96d0059758b21c3bbde455a8f7dfd63f4aa
|
Python
|
aklefebvere/bobs-book-club-api
|
/app/DB.py
|
UTF-8
| 1,315 | 3.53125 | 4 |
[] |
no_license
|
import psycopg2
from dotenv import load_dotenv
import os
load_dotenv()
# DB info
URI = os.getenv("DATABASE_URL")
def book_info(id):
"""
Function to query the Postgres table with given book id to return that
book's price and title
Inputs: id - The book id the user is trying to retrieve info from
Output: JSON of the price and title of a book
"""
# Create the DB connection
conn = psycopg2.connect(URI)
# Create the cursor
cur = conn.cursor()
# Query to retrieve the book's price and title given it's id
query = f"""SELECT
CASE
WHEN price IS NULL THEN price
WHEN price IS NOT NULL THEN ABS(price::FLOAT)
END,
CASE
WHEN title IS NULL THEN 'No title'
WHEN title IS NOT NULL THEN title
END
FROM books
WHERE id = {id}
"""
# Execute the query
cur.execute(query)
# Fetch the returned data from the query
data = cur.fetchone()
# If the book exists then create a JSON object with the data
if data:
book = {
"price": data[0],
"title": data[1]
}
# If the book does not exist then return an empty JSON object
else:
return {}
# Close the connection
conn.close()
# Return the JSON containing the book's price and title
return book
| true |
7699815f1011918098c33cc11e68e99ec6001504
|
Python
|
vinnis/python-projects
|
/cards.py
|
UTF-8
| 2,120 | 3.53125 | 4 |
[] |
no_license
|
import random
print ("there are 4 types of deck available to play namely diamond, hearts, club, spades")
nod=input("number of decks you want? ")
naofd=[]
if int(nod)>1:
for i in range(1,int(nod)+1):
naofd.append(input("name of the {} deck you want ".format(i)))
else:
naofd.append(input("name of the {} deck you want ".format(i)))
nop=input("enter number of players who will play ")
player=[]
if int(nop)>1:
for i in range(0,int(nop)):
player.append(input("enter name of the player here "))
else:
print("we cannot play game with 1 player")
print ("name of the {0} players are: {1}".format(len(player),player))
diamond=[]
hearts=[]
club=[]
spades=[]
print ("number of decks inputted are{}".format(nod))
for i in naofd:
print("names are {}".format(i))
#if 'diamond' in naofd:
if i == 'diamond':
word='d'
for i in range(1,14):
word='d'+str(i)
diamond.append(word)
#elif 'club' in naofd:
elif i == 'club':
word='c'
for i in range(1,14):
word='c'+str(i)
club.append(word)
#elif 'spades' in naofd:
elif i == 'spades':
word='s'
for i in range(1,14):
word='s'+str(i)
spades.append(word)
#elif 'hearts' in naofd:
elif i == 'hearts':
word='h'
for i in range(1,14):
word='h'+str(i)
hearts.append(word)
else:
print ('not a valid deck name')
#print ('decks as follows \n',diamond,spades,club,hearts)
total_cards=[]
for i in diamond:
total_cards.append(i)
for i in club:
total_cards.append(i)
for i in spades:
total_cards.append(i)
for i in hearts:
total_cards.append(i)
print ("total cards are {}".format(total_cards))
distribution=len(total_cards)//len(player)
for i in range(0,len(player)):
print ("{} got following cards".format(player[i]))
player[i]=[]
for j in range(0,distribution):
x=random.choice(total_cards)
player[i].append(x)
total_cards.remove(x)
print(player[i][j])
| true |
a2e42a606e66eaf51d5affdca050f11cd1b3a4ff
|
Python
|
SmartFox97/MyLeetCode
|
/#0310/minimum-height-trees.py
|
UTF-8
| 3,593 | 3.40625 | 3 |
[
"MIT"
] |
permissive
|
from collections import deque
from collections import defaultdict
import collections
class Solution:
def findMinHeightTrees(self, n: int, edges) -> list:
# ็ฎๅๆ ๅๅพ๏ผๅฅ่ทฏๆฏๅปบๅพๅนถ้ๅ
# ๅปบๅพ๏ผ้ปๆฅ่กจ
# ้ปๆฅ่กจไธบmap,ๅ
ถๅผไธบlist,ๅฎ็sizeๅฐฑๆฏๅ
ฅๅบฆๆฐ
if n == 2:
return [0,1]
if n == 1:
return [0]
adjs = defaultdict(list) # defaultdictๅๆณๅพๆ็จ
for x in edges: # ๅพ็้ปๆฅ่กจ่กจ็คบๆณ,ๅบๆฌๆฏๆจกๆฟ
adjs[x[0]].append(x[1]) # 1:{2}
adjs[x[1]].append(x[0]) # 2:{1}
# BFS้ๅ: ๅๅง้ๅๆพๅ
ฅๅๅงๅ
็ด ,size=1็ไธบๅถๅญ,ๅ
ฅ้
queue = deque() # ๅบๅฎๅๆณ
for key, value in adjs.items():
if len(value) == 1:
queue.append(key)
# BFSไธคไธชๅคงๅพช็ฏ
while(queue): # ๅบๅฎๅๆณ
size = len(queue) # ๅบๅฎๅๆณ
n = n - size
for _ in range(size):
v = queue.popleft()
v_adj = adjs[v].pop() # v็้ปๆฅไป
ไธไธช,ๅผนๅบๅณๅ ้ค
adjs[v_adj].remove(v) # ๅจv็้ปๆฅๅ
็ด ็้ปๆฅๅ่กจ้ๅ ้คv
if len(adjs[v_adj]) == 1:
queue.append(v_adj)
if n == 1:
return [queue.popleft()]
if n == 2:
return [queue.popleft(), queue.popleft()]
# class Solution(object):
# def findMinHeightTrees(self, n, edges):
# """
# :type n: int
# :type edges: List[List[int]]
# :rtype: List[int]
# """
# # ๆ่ทฏ:
# # ไปๆๅคๅฑ้ๅ,ๆๅไธๅฑๅณไธบ็ปๆ.
#
# if n == 1:
# return [0]
#
# # ๆ้ ้ปๆฅ่กจๅๅบฆ
# adjs = defaultdict(list)
# degrees = [0 for _ in range(n)]
# for (f, t) in edges:
# adjs[f].append(t)
# adjs[t].append(f)
# degrees[f] += 1
# degrees[t] += 1
#
# print(adjs)
# print(degrees)
# # BFS
# # ็ฌฌไธๅฑ(ๆๅคๅฑ)
# layer = []
# for ind, val in enumerate(degrees):
# if val == 1:
# layer.append(ind)
# print(layer)
# # ๅฑๅฑ็ผฉ่ฟ:้ๅๅฝๅๅฑ,็กฎๅฎไธไธๅฑ่็น.
# while layer:
# next_layer = []
# for node in layer:
# for neighbor in adjs[node]:
# degrees[neighbor] -= 1
# if degrees[neighbor] == 1:
# next_layer.append(neighbor)
# if not next_layer: # ไธไธๅฑๆฒกไธ่ฅฟไบ,่ฏดๆๅฝๅ้ๅ็ๆๅไธๅฑ,ไนๅฐฑๆฏๆไปฌ้่ฆ็
# return layer
# layer = next_layer
# class Solution:
# queue = deque()
# def findMinHeightTrees(self, n: int, edges: list) -> list:
# graph = {}
# for k, v in edges:
# if k not in graph.keys():
# graph[k] = []
# graph[k].append(v)
# else:
# graph[k].append(v)
# print(graph)
# # bfs start
# queue += graph
# _result = set({})
# minLength = n
# return list(_result)
if __name__ == "__main__":
n = 4
edges = [[1, 0], [1, 2], [1, 3]]
tests = Solution()
_data = tests.findMinHeightTrees(n, edges)
print(_data)
n = 6
edges = [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]]
tests2 = Solution()
_data = tests2.findMinHeightTrees(n, edges)
print(_data)
| true |
7312f4d72f418a8738e7a7843b35f1ecf6dda933
|
Python
|
rishabhchatterjee/Virtual_Dresser_ComputerVision_OpenCV
|
/superimpose_api.py
|
UTF-8
| 4,549 | 2.65625 | 3 |
[] |
no_license
|
import cv2
import numpy as np
import math
import copy
class Superimposition(object):
def superimpose(self, jewel_image, user_image, angle, bodyx, bodyy, jewel_length, jewellery_type):
if jewellery_type == 'earring':
jewel_image = self.rescale(jewel_image, self.getscalingfactor(jewel_image, jewel_length, 3.0, 505.0))
f = 4.5
dz = 20
elif jewellery_type == 'necklace':
jewel_image = self.rescale(jewel_image, self.getscalingfactor(jewel_image, jewel_length, 25.0, 587.0))
jewel_image = cv2.copyMakeBorder(jewel_image, 10, 0, 0, 0, cv2.BORDER_CONSTANT, value=[255, 255, 255])
f = 1
dz = 0
img = cv2.cvtColor(jewel_image, cv2.COLOR_RGB2BGR)
dst = self.rotate(img, 0, 1, dz, f, angle)
dst = self.contour_centering(dst)
xpos, ypos = self.get_leftmostpoint(dst)
return self.superimpose_centered(dst, user_image, bodyx - xpos, bodyy - ypos)
def rotate(self, img, dx, dy, dz, f, angle):
beta = angle * 1. / 60. # 0.5 & 0.7 &0.8 (left)
beta = beta * 3.14 / 180.
h, w, _ = img.shape
# Projection 2D -> 3D matrix
A1 = np.array([[1, 0, -w / 2.0], [0, 1, -h / 2.0], [0, 0, 1], [0, 0, 1]])
R = np.array([[math.cos(beta), 0, -math.sin(beta), 0],
[0, 1, 0, 0],
[math.sin(beta), 0, math.cos(beta), 0],
[0, 0, 0, 1]])
T = np.array([[1, 0, 0, dx], [0, 1, 0, dy], [0, 0, 1, dz], [0, 0, 0, 1]])
# 3D -> 2D matrix
A2 = [[f, 0, w / 2, 0], [0, f, h / 2, 0], [0, 0, 1, 0]]
# Final transformation matrix
trans = (np.matmul(A2, np.matmul(T, np.matmul(R, A1))))
dst = cv2.warpPerspective(img, trans, (w, h))
return dst
def contour_centering(self, dst):
imgray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
for i in range(imgray.shape[0]):
for j in range(imgray.shape[1]):
if imgray[i][j] > 245:
imgray[i][j] = 0
im2, contours, hierarchy = cv2.findContours(imgray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
maxarea = 0
for cnt in contours:
if cv2.contourArea(cnt) > maxarea:
maxcnt = cnt
maxarea = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(maxcnt)
dst = dst[y:y + h, x:x + w]
return dst
def superimpose_centered(self, dst, user_image, xpos, ypos):
s_img = dst
s_img_2 = cv2.cvtColor(s_img, cv2.COLOR_RGB2GRAY)
l_img = user_image
l_img = cv2.cvtColor(l_img, cv2.COLOR_RGB2BGR)
l_img_2 = copy.deepcopy(l_img)
x_offset = xpos
y_offset = ypos
l_img_2[y_offset:y_offset + s_img.shape[0], x_offset:x_offset + s_img.shape[1]] = s_img
for i in range(y_offset, y_offset + s_img.shape[0]):
for j in range(x_offset, x_offset + s_img.shape[1]):
if s_img_2[i - y_offset][j - x_offset] < 10 or s_img_2[i - y_offset][j - x_offset] > 245:
l_img_2[i][j] = l_img[i][j]
return l_img_2
def get_leftmostpoint(self, jewel_image):
jewel_gray = cv2.cvtColor(jewel_image, cv2.COLOR_RGB2GRAY)
for i in range(jewel_gray.shape[0]):
for j in range(jewel_gray.shape[1]):
if jewel_gray[i][j] > 10 and jewel_gray[i][j] < 255:
return j, i
def rescale(self, image, k):
return cv2.resize(image, None, fx=k, fy=k, interpolation=cv2.INTER_CUBIC)
def getscalingfactor(self, jewel_image, length, model_length, model_pixel):
jewel_gray = cv2.cvtColor(jewel_image, cv2.COLOR_RGB2GRAY)
up = 0
down = len(jewel_image)
flag = False
for i in range(5, jewel_gray.shape[0]):
for j in range(5, jewel_gray.shape[1]):
if not (jewel_gray[i][j] < 10 or jewel_gray[i][j] > 245):
up = i
flag = True
break
if flag:
break
flag = False
for i in range(jewel_gray.shape[0] - 5, 0, -1):
for j in range(5, jewel_gray.shape[1]):
if not (jewel_gray[i][j] < 10 or jewel_gray[i][j] > 245):
down = i
flag = True
break
if flag:
break
scalingfactor = (model_pixel * length) / (model_length * (down - up))
return scalingfactor
| true |
cbf1708b5a64a01c4426122e5f958f52d22f31c3
|
Python
|
wuxu1019/leetcode_sophia
|
/medium/twopointer/test_3_Longest_Substring_Without_Repeating_Characters.py
|
UTF-8
| 2,038 | 3.84375 | 4 |
[] |
no_license
|
"""
Given a string, find the length of the longest substring without repeating characters.
Examples:
Given "abcabcbb", the answer is "abc", which the length is 3.
Given "bbbbb", the answer is "b", with the length of 1.
Given "pwwkew", the answer is "wke", with the length of 3. Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
"""
class Solution(object):
def lengthOfLongestSubstring_1(self, s):
"""
:type s: str
:rtype: int
"""
record = set()
j = i = 0
maxlth = 0
while i < len(s) and j < len(s):
if s[j] not in record:
record.add(s[j])
j += 1
maxlth = max(maxlth, j - i)
else:
record.remove(s[i])
i += 1
return maxlth
def lengthOfLongestSubstring_2(self, s):
"""
:type s: str
:rtype: int
"""
record = collections.Counter()
i = 0
maxlth = 0
for j in range(len(s)):
record[s[j]] += 1
while 2 in record.values():
record[s[i]] -= 1
i += 1
maxlth = max(maxlth, j - i + 1)
return maxlth
def lengthOfLongestSubstring_3(self, s):
"""
:type s: str
:rtype: int
"""
pos = {}
i = 0
ans = 0
for j in range(len(s)):
if s[j] in pos and i <= pos[s[j]]:
i = pos[s[j]] + 1
else:
ans = max(ans, j - i + 1)
pos[s[j]] = j
return ans
def lengthOfLongestSubstring_bitmap(self, s):
"""
:type s: str
:rtype: int
"""
pos = [-1] * 256
i = 0
ans = 0
for j in range(len(s)):
p = ord(s[j])
if pos[p] >= 0 and i <= pos[p]:
i = pos[p] + 1
else:
ans = max(ans, j - i + 1)
pos[p] = j
return ans
| true |
250739c88952e82bd76b8eaf28f4441078b00701
|
Python
|
jinpan/pkb2015
|
/v1/tryhard.py
|
UTF-8
| 4,185 | 3 | 3 |
[] |
no_license
|
import re
class Tryhard:
"""
This class handles looking over all the previous actions in a hand
to identify special moves (like 3-bets).
"""
@staticmethod
def study(game,historypak):
"""
Updates game move history given a historypak.
Also determine whether a player has folded.
"""
g = game # For easy typing
g.history = map(lambda s: s.split(':'),historypak)
for m in g.history:
# Check if current player has folded
if not g.p[g.seat2ind[g.action_on]].isIn:
g.historystr += '-' # Skip player
g.action_on = (g.action_on+1) % 3
if m[0] == 'POST':
g.historystr += 'p'
g.action_on = (g.action_on+1) % 3
elif m[0] == 'RAISE' or m[0] == 'BET':
g.historystr += 'r'
g.action_on = (g.action_on+1) % 3
elif m[0] == 'CALL':
g.historystr += 'c'
g.action_on = (g.action_on+1) % 3
elif m[0] == 'CHECK':
g.historystr += 'k'
g.action_on = (g.action_on+1) % 3
elif m[0] == 'FOLD':
g.historystr += 'f'
g.p[g.seat2ind[g.action_on]].isIn = False
g.action_on = (g.action_on+1) % 3
else: # New cards
g.historystr += 'N'
g.action_on = 1 # Set back to SB
print 'HAND: ' + str(g.hands_idx)
print 'HISTORY: ' + str(g.history)
print 'HISTORYSTR: ' + g.historystr
@staticmethod
def assimilate(game):
"""
Looks at the previous game moves and identifies advanced poker
techniques.
"""
g = game
# TODO: Use regex rules here on game.historystr
# Split by N
split_hist = game.historystr.split('N')
# Do all of this for preflop
if len(split_hist) == 1:
h = split_hist[0]
# Remove the H
h = h[1:]
# Simple actions
for m in re.finditer('(?<=k)',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'CHECK'
for m in re.finditer('(?<=c)',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'CALL'
for m in re.finditer('(?<=r)',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'RAISE'
# Complex actions
for m in re.finditer('(?<=(ppf))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'F_BTN'
for m in re.finditer('(?<=(ppc))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'C_BTN'
for m in re.finditer('(?<=(ppr))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'R_BTN'
# Bets
for m in re.finditer('(?<=(r.?r))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = '3_BET'
for m in re.finditer('(?<=(r.?r.?f))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'F_3_BET'
for m in re.finditer('(?<=(r.?r.?r))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = '4_BET'
for m in re.finditer('(?<=(r.?r.?r.?f))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'F_4_BET'
for m in re.finditer('(?<=(r.?r.?r.?r))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = '5_BET'
for m in re.finditer('(?<=(r.?r.?r.?r.?f))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'F_5_BET'
# Post flop
if len(split_hist) >= 2:
h = split_hist[-1]
# Simple actions
for m in re.finditer('(?<=k)',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'CHECK'
for m in re.finditer('(?<=c)',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'CALL'
for m in re.finditer('(?<=r)',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'RAISE'
# Complex actions
for m in re.finditer('(?<=(r{1}))',h[:2]):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'DONK_BET'
for m in re.finditer('(?<=((r{1}).?f))',h[:2]):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'F_DONK_BET'
for m in re.finditer('(?<=((kr)|(k-r)|(kkr)))',h[:3]):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'C_BET'
for m in re.finditer('(?<=(((kr)|(k-r)|(kkr)).?f))',h[:3]):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = 'F_C_BET'
for m in re.finditer('(?<=(r.?r))',h):
moveind = g.seat2ind[m % 3]
g.p[moveind].last_move = '2_RAISE'
print [x.last_move for x in g.p]
@staticmethod
def retrospect(game,historypak):
pass
| true |
6370de506b1d6a0a7090a173c4ba4ec4bc881bec
|
Python
|
Bhaveshsadhwani/Test
|
/ASSESSMENT/Que 23.py
|
UTF-8
| 218 | 3.046875 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 17:33:01 2017
@author: User
"""
fo=open("Demo.txt","a")
str=raw_input("Enter the string:")
fo.write(str)
fo.close()
f1=open("Demo.txt","r")
a=f1.readlines()
print a
| true |
d76ca3ab942212216366940792a802ef66d8378a
|
Python
|
testTemtProj/OLD_PROJECT
|
/get/2013/site/getmyad/other/mongostat-analyzer.py
|
UTF-8
| 1,220 | 2.90625 | 3 |
[] |
no_license
|
# encoding: utf-8
# ะกะบัะธะฟั ะฐะฝะฐะปะธะทะธััะตั ัะฐะนะป ั ะปะพะณะฐะผะธ mongostat ะธ ะพะฟัะตะดะตะปัะตั, ัะบะพะปัะบะพ ะฒัะตะผะตะฝะธ ะฑะฐะทะฐ ะดะฐะฝะฝัั
ะฑะตะทะดะตะนััะฒะพะฒะฐะปะฐ
# (ะผะตะฝััะต 10 ะทะฐะฟัะพัะพะฒ ะฒ ัะตะบัะฝะดั). ะะฐ ะดะฐะฝะฝัะน ะผะพะผะตะฝั ะพะดะธะฝ ะทะฐะฟัะพั ะบ ะฟะพะบะฐะทั ัะตะบะปะฐะผั GetMyAd ะดะพะปะถะตะฝ
# ะฟัะพะธะทะฒะพะดะธัั ะพะดะธะฝ ะทะฐะฟัะพั ะบ mongodb, ัะฐะบ ััะพ ััะพ ัะฐะบัะธัะตัะบะธ ะฟะพะบะฐะทะฐัะตะปั ัะฐะฑะพัะพัะฟะพัะพะฑะฝะพััะธ GetMyAd
import sys
if len(sys.argv) < 2:
print 'Analyze mongostat log. Usage: mongostat-analyzer.py logfilename [--verbose]'
exit()
try:
log = open(sys.argv[1])
except IOError:
print 'Error opening log file!'
exit()
try:
verbose = (sys.argv[2] == '--verbose')
except:
verbose = False
downtime = 0
total = 0
for row in log:
if row[0] <> ' ':
continue
total += 1
val = [x for x in row.split(' ') if x]
if int(val[1]) < 5:
if verbose:
print row,
downtime += 1
print 'Total minutes: ', total / 60
print 'Downtime minutes: ', downtime / 60
print 'Uptime rate: ', 100 - (float(downtime) / total) * 100.0
| true |
c9a20f2c1ddf7abc9fbf4896f0ee03c424d3cacc
|
Python
|
marlanbar/academic-projects
|
/ith-tp1/src/scripter.py
|
UTF-8
| 1,252 | 2.875 | 3 |
[
"MIT"
] |
permissive
|
'''
Objeto que permite crear el script de praat
'''
class Scripter():
def __init__(self, difonosFolder):
self._script = ""
self._cantidad = 1
self._difFold = difonosFolder
def agregarDifono(self, difono):
difFold = self._difFold
script = self._script
script += 'Read from file: "{}/{}.wav"\n'.format(difFold,difono)
script += 'selectObject: "Sound {}"\n'.format(difono)
script += 'Rename: "difono{}"\n'.format(self._cantidad)
self._cantidad +=1
self._script = script
def concatenar(self, salida):
if self._cantidad == 1:
return
script = self._script
script += 'selectObject:"Sound difono1"\n'
for i in xrange(2,self._cantidad):
script += 'plusObject:"Sound difono{}"\n'.format(str(i))
script += 'Concatenate recoverably\n'
script += 'selectObject: "Sound chain"\n'
script += 'Save as WAV file: "{}"\n'.format(salida)
self._script = script
def escribirScript(self, archivo):
f = open(archivo,'w')
f.write(self._script)
f.close()
| true |
e59a92e76b7ecb42cff7b5bd2e6e45ad59e5dc1f
|
Python
|
rebecca0323/Predicting-Migraines-IAIF
|
/utils.py
|
UTF-8
| 1,000 | 3.109375 | 3 |
[
"MIT"
] |
permissive
|
import numpy as np
import pandas as pd
def load_and_preprocess_data():
""" Load data from total_diary_migrain.csv file
and return the data matrix and labels
Returns
-------
features - np.ndarray
Migraine data matrix
features_list - list
List of Features in our migraine data matrix
labels - np.ndarray
Truth labels
"""
data = pd.read_csv('total_diary_migraine.csv', header=0)
features = pd.DataFrame(data)
features['no_headache_day'].fillna('N', inplace=True)
features['migraine'].fillna(0, inplace=True)
features['headache_day'] = features['headache_day'].map({'Y':0, 'N':1})
labels = np.array(features['migraine'])
features = features.drop(['number', 'patient', 'ID', 'no_headache_day', 'migraine'], axis = 1)
features_list = list(features.columns)
features = np.array(features)
features[np.isnan(features)] = 0
return (features, features_list, labels)
| true |
ac3c83eaaf8bf84b7c0d5aa127b0b7c2ff35bb9c
|
Python
|
yasinshaw/leetcode
|
/test/n1_test.py
|
UTF-8
| 310 | 2.734375 | 3 |
[] |
no_license
|
import unittest
from n1 import *
solution = Solution()
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(solution.twoSum([1, 2, 3, 4], 5), [1, 2])
self.assertEqual(solution.twoSum([1, 3, 5, 6], 6), [0, 2])
if __name__ == '__main__':
unittest.main()
| true |
5c8ccaf37a1cf0db3c6ca247d2dbf79ead698b33
|
Python
|
emmaremy/pill-images
|
/code/image_segmentation.py
|
UTF-8
| 4,331 | 2.9375 | 3 |
[] |
no_license
|
import numpy as np
import cv2
import scipy.spatial.distance as sp_dist
""" threshold_seg: creates image segmentations using simple thresholding
techniques along with morphological operations.
params:
img - a numpy array containing a 3-channel image
hsv_space - a boolean indicating if the image should be converted to hsv
space. If False, the image will instead be converted to grayscale.
returns:
a numpy array containing the segmentation / binary mask
"""
def threshold_seg(img, hsv_space=True):
#img = cv2.imread(img_file_name)
print img
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
if hsv_space:
ret, mask = cv2.threshold(hsv[:,:,0], 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
else:
ret, mask = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
smoothed = morph_ops(mask)
#_, contours, hierarchy = cv2.findContours(smoothed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#smoothed = cv2.cvtColor(smoothed, cv2.COLOR_GRAY2BGR)
#cv2.drawContours(smoothed, contours, -1, (0, 255, 0), 5)
# cv2.imwrite('contours.jpg', smoothed)
return smoothed
""" morph_ops: performs a series of morphological operations on the passed
mask to reduce noise and smooth out the boundary lines.
params:
mask - numpy array containing a binary mask
returns:
a numpy array containing a (hopefully smoothed and de-noised) binary mask
"""
def morph_ops(mask):
little_kernel = np.ones((3, 3), np.uint8)
kernel = np.ones((5, 5), np.uint8)
big_kernel = np.ones((9, 9), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, little_kernel, iterations=1)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, big_kernel, iterations=5)
smoothed = cv2.blur(mask, (19, 19))
ret, smoothed = cv2.threshold(smoothed, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return smoothed
""" distance_to_avg_seg: creates image segmentations by thresholding on the
distance from the average pixel value in the image along with
morphological operations to remove noise and smooth out the mask
params:
img - a numpy array containing a 3-channel image
hsv_space - a boolean indicating if the image should be converted to hsv
space. If False, the image will remain in RGB space.
returns:
a numpy array containing the segmentation / binary mask
"""
def distance_to_avg_seg(img, hsv_space=False):
if hsv_space:
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
col_avgs = np.average(img, axis=1)
avg_color = np.average(col_avgs, axis=0)
img_minus_avg = np.abs(img - avg_color)
img_minus_avg = np.array(img_minus_avg, dtype=np.uint8)
dists = cv2.cvtColor(img_minus_avg, cv2.COLOR_BGR2GRAY)
# Took out +cv2.THRESH_OTSU
dret, thresh = cv2.threshold(dists, 127, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
thresh = morph_ops(thresh)
return thresh
""" distance_to_median_seg: creates image segmentations by thresholding on the
distance from the median pixel value in the image along with
morphological operations to remove noise and smooth out the mask
params:
img - a numpy array containing a 3-channel (RGB) image
hsv_space - a boolean indicating if the image should be converted to hsv
space. If False, the image will remain in RGB space.
returns:
a numpy array containing the segmentation / binary mask
"""
def distance_to_median_seg(img, hsv_space=False):
if hsv_space:
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
pixels = np.reshape(img, (-1, 3))
avg_color = np.median(pixels, axis=0)
img_minus_avg = np.abs(img - avg_color)
#img_minus_avg /= np.linalg.norm(img_minus_avg)
#img_minus_avg *= 255
img_minus_avg = np.array(img_minus_avg, dtype=np.uint8)
dists = cv2.cvtColor(img_minus_avg, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(dists, 0, 255, cv2.THRESH_BINARY)
thresh = morph_ops(thresh)
return thresh
#cv2.imwrite('median.jpg', thresh)
"""
new_img, contours = cv2.findContours(gray, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
print 'found', len(contours), 'contours'
white = (255, 255, 255)
display = np.zeros((img.shape[0], img.shape[1], 3), dtype='uint8')
for j in range(len(contours)):
cv2.drawContours(display, contours, j, white, -1)
"""
| true |
37df5f7a78ae7cd3b874b8acbc4cec6d80c32fef
|
Python
|
PacktPublishing/Hands-On-Deep-Learning-Architectures-with-Python
|
/Chapter02/first_dfn_keras.py
|
UTF-8
| 1,528 | 3.3125 | 3 |
[
"MIT"
] |
permissive
|
# importing the Sequential method in Keras
import keras
from keras.models import Sequential
# Importing the Dense layer which creates a layer of Deep Feedforward Network
from keras.layers import Dense, Activation, Flatten, Dropout
# getting the data as we did earlier
fashionObj = keras.datasets.fashion_mnist
(trainX, trainY), (testX, testY) = fashionObj.load_data()
print('train data x shape: ', trainX.shape)
print('test data x shape:', testX.shape)
print('train data y shape: ', trainY.shape)
print('test data y shape: ', testY.shape)
# Now we can directly jump to building model, we build in Sequential manner as discussed in Chapter 1
model = Sequential()
# the first layer we will use is to flatten the 2-d image input from (28,28) to 784
model.add(Flatten(input_shape = (28, 28)))
# adding first hidden layer with 512 units
model.add(Dense(512))
#adding activation to the output
model.add(Activation('relu'))
#using Dropout for Regularization
model.add(Dropout(0.2))
# adding our final output layer
model.add(Dense(10))
#softmax activation at the end
model.add(Activation('softmax'))
# normalising input data before feeding
trainX = trainX / 255
testX = testX / 255
# compiling model with optimizer and loss
model.compile(optimizer= 'Adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
# training the model
model.fit(trainX, trainY, epochs = 5, batch_size = 64)
# evaluating the model on test data
evalu = model.evaluate(testX, testY)
print('Test Set average Accuracy: ', evalu[1])
| true |
a0f804fe78b0398c7b18ab2ba7fca66c85cd2e5d
|
Python
|
mildrock/weekend
|
/ๅทฅไฝ/main2.py
|
UTF-8
| 597 | 2.65625 | 3 |
[] |
no_license
|
from openpyxl import load_workbook
def getDataFromExcel(index,obj):
row = list(sheet.rows)[index]
obj["entity"] = row[0].value
obj["entityCheck"] = row[1].value
obj["vendor"] = row[2].value
obj["vendorCheck"] = row[3].value
obj["invoice_number"] = row[4].value
obj["beTax"] = row[5].value
obj["Total"] = row[6].value
print(obj)
def processData() =
if __name__ == '__main__':
wb = load_workbook('invoice.xlsx')
sheet = wb.get_sheet_by_name('template')
max = sheet.max_row
for i in range(1,max):
obj = {}
getDataFromExcel(i,obj)
| true |
0409ae707c1c6f2425b0ffcb185e623eda6fbadc
|
Python
|
sasqwatch/SmokeyJab
|
/framework/modules/hostsfilemod.py
|
UTF-8
| 1,322 | 2.5625 | 3 |
[] |
no_license
|
try:
from framework.main import ModuleBase
except ImportError:
pass
class HostsFile(ModuleBase):
@property
def tags(self):
return ['IntrusionSet5']
@property
def needs_root(self):
return True
@property
def relative_delay(self):
# On a scale of 1 (least) to 100 (most) likely to get caught
return 85
@property
def absolute_duration(self):
return 60 * 60 # 1 hour
def do_run(self):
import time
hostname = '${HOSTNAME}'
ip_addr = '${IP_ADDR}'
with open('/etc/hosts', 'a+') as f:
f.seek(0)
data = f.read()
f.write('\n{1}\t{0} # {2}\n'.format(hostname, ip_addr, self._banner))
self.hec_logger('Added a new host to /etc/hosts', hostname=hostname, ip_addr=ip_addr)
time.sleep(self.absolute_duration)
with open('/etc/hosts', 'a+') as f:
f.truncate(len(data))
self.hec_logger('Removed entry from hosts file', hostname=hostname, ip_addr=ip_addr)
def run(self):
self.start()
try:
self.do_run()
except Exception as e:
self.hec_logger('Uncaught exception within module, exiting module gracefully', error=str(e),
severity='error')
self.finish()
| true |
e6f0906c4a6ccb44aeef965613bbebb57b331c7f
|
Python
|
Matthias84/panpy
|
/pan.py
|
UTF-8
| 12,306 | 2.78125 | 3 |
[] |
no_license
|
import argparse
from datetime import datetime, timedelta
from enum import Enum
import xml.etree.ElementTree as ET
from pathlib import Path
from printy import printy
import re
import sys
"""
PAN.py - Read & analyse XML files of Persรถnlicher Arbeitszeit-Nachweis (PAN)
PAN is a JAVA desktop software to log your working times.
Data is stored in a custom XML file format.
#TODO:
- pan.py check (current, month/year, xmlfilename, path)
- pan.py show (current, month/year, xmlfilename, path)
- pan.py email-lock-import imapserver imapaccount imappassword imapfolder - manual sync with all
- pan.py pdf (current, month/year, xmlfilename, path)- generate PDF with original PAN layout
- pan.py plot (current, month/year, xmlfilename, path)
- Error checking on time formats and logic ... asumptions
"""
def prRed(skk): print("\033[91m{}\033[00m" .format(skk))
def prGreen(skk): print("\033[92m{}\033[00m" .format(skk))
def prYellow(skk): print("\033[93m{}\033[00m" .format(skk))
def prLightPurple(skk): print("\033[94m{}\033[00m" .format(skk))
def prPurple(skk): print("\033[95m{}\033[00m" .format(skk))
def prCyan(skk): print("\033[96m{}\033[00m" .format(skk))
def prLightGray(skk): print("\033[97m{}\033[00m" .format(skk))
def prBlack(skk): print("\033[98m{}\033[00m" .format(skk))
def prBold(skk): print("\033[01m{}\033[00m" .format(skk))
def prItalic(skk): print("\33[3m{}\033[00m" .format(skk))
FMT = '%H:%M'
class DayType(Enum):
work = 1
weekend = 2
vacation = 3
holiday = 4
illness = 5
overtime_free = 6
business_trip = 7
unpaid_free = 8
def __str__(self):
mapping={'work': 'Arbeitstag',
'weekend':'Wochenende',
'vacation':'Urlaub',
'holiday': 'Feiertag',
'illness': 'Krankschreibung',
'overtime_free': 'รberstundenausgleich',
'business_trip': 'Dienstreise',
'unpaid_free': 'Freistellung'}
ret = mapping[self.name]
return ret
class WorkDay(object):
def __init__(self, daytype, description, timeblocks):
self.daytype = daytype
self.description = description
self.timeblocks=timeblocks
def check(self, num):
fails = 0
worktime = self.getWorkingTime()
pausetime = self.getPauseTime()
if len(self.timeblocks)>0:
# rule max. worktime day
if worktime > timedelta(hours=10):
prRed('{:02d}. max. Arbeitszeit รผberschritten ({} > 10hrs)'.format(num, worktime))
fails += 1
# rule min. pausetime day
if worktime <= timedelta(hours=9):
if pausetime < timedelta(minutes=30):
prRed('{:02d}. min. Pausenzeit unterschritten ({} < 30mins)'.format(num, pausetime))
fails += 1
else:
if pausetime < timedelta(minutes=45):
prRed('{:02d}. min. Pausenzeit unterschritten ({} < 45mins)'.format(num, pausetime))
fails += 1
# Check servicetimes
serviceBegin = datetime.strptime('09:00', FMT)
serviceEnd = datetime.strptime('15:00', FMT)
amBegin = self.timeblocks[0][0]
pmEnd = self.timeblocks[-1][1]
if not ((amBegin <= serviceBegin) and (pmEnd >= serviceEnd)):
prCyan('! {:02d}. Servicezeit potentiell nicht eingehalten ({} - {})'.format(num, amBegin.strftime(FMT), pmEnd.strftime(FMT)))
# rule max. homeoffice
hotime = self.getHomeofficeTime()
if (hotime > timedelta(hours=8)):
prRed('! {:02d}. max. Heimarbeit รผberschritten ({} <= 8hrs)'.format(num, worktime))
fails += 1
return fails
def getWorkingTime(self):
worktime = timedelta(hours=0)
for block in self.timeblocks:
worktime += block[1] - block[0]
return worktime
def getPauseTime(self):
pausetime = timedelta(hours=0)
if len(self.timeblocks) > 0:
pausetime = self.timeblocks[1][0]-self.timeblocks[0][1]
if len(self.timeblocks) > 2:
pausetime += self.timeblocks[2][0]-self.timeblocks[1][1]
if len(self.timeblocks) == 4:
pausetime += self.timeblocks[3][0]-self.timeblocks[2][1]
return pausetime
def getHomeofficeTime(self):
if self.description:
if self.description.lower().find('homeoffice') != -1:
# Check if provide a percentage e.g. '0.5 Homeoffice')
perc = re.match('\d+\.\d+', self.description)
if perc:
perc = float(perc.group(0))
hotime = self.getWorkingTime() * perc
else:
hotime = self.getWorkingTime()
return hotime
else:
return timedelta(hours=0)
else:
return timedelta(hours=0)
def __str__(self):
return "{0} {1}".format(str(self.daytype),str(self.timeblocks))
class WorkMonth(object):
def __init__(self, year, month,workdays):
self.year = year
self.monthNum = month
self.workdays = workdays
def check(self):
"""Check rules on worktime month, day and print errors"""
fails = 0
worktime_month = timedelta(hours=0)
worktime_homeoffice = timedelta(hours=0)
for num in self.workdays:
day = self.workdays[num]
if day.daytype == DayType.work:
fails += day.check(num)
worktime = day.getWorkingTime()
worktime_month += worktime
hotime = day.getHomeofficeTime()
worktime_homeoffice += hotime
if (worktime_homeoffice > timedelta(days=10)):
prRed('! {:02d}. max. mtl. Heimarbeit รผberschritten ({} <= 10days)'.format(num, worktime))
fails += 1
print('----------------')
if fails == 0:
prGreen('Keine Verstรถรe erkannt')
else:
prRed('{0} Verstรถรe erkannt'.format(fails))
def printSummary(self):
"""Summary report on screen"""
weekWorkHours = None
dayDelta = None
for num in self.workdays:
day = self.workdays[num]
if day.daytype == DayType.weekend:
if weekWorkHours:
hours = weekWorkHours.total_seconds() // 3600
mins = weekWorkHours.seconds // 60 % 60
printy('------{}hrs-----'.format(hours), 'y')
weekWorkHours = None
dayDelta = None
printy('{:02d}. (WE)'.format(num), 'w')
elif day.daytype == DayType.holiday:
printy('{:02d}. (Urlaub)'.format(num), 'c')
dayDelta = timedelta(hours=8)
elif day.daytype == DayType.illness:
printy('{:02d}. (Krank)'.format(num), 'c')
dayDelta = timedelta(hours=8)
elif day.daytype == DayType.overtime_free:
printy('{:02d}. (รberstundenausgleich)'.format(num), 'c')
dayDelta = timedelta(hours=8)
elif day.daytype == DayType.business_trip:
printy('{:02d}. (Dienstreise)'.format(num), 'c')
dayDelta = timedelta(hours=8)
elif day.daytype == DayType.work:
dayDelta = day.getWorkingTime()
workhours = dayDelta.seconds // 3600
workrestminutes = dayDelta.seconds // 60 % 60
absday = datetime.strptime('{}.{}.{}'.format(num, self.monthNum, self.year),'%d.%m.%Y')
today = datetime.today()
pauseDelta = day.getPauseTime()
pausehours = pauseDelta.seconds // 3600
pauserestminutes = pauseDelta.seconds // 60 % 60
if absday == today:
printy('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'wH')
elif absday > today:
# future days
if len(day.timeblocks) == 0:
printy('{:02d}. ?'.format(num), 'g')
else:
printy('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'g')
else:
# past days
if dayDelta > timedelta(hours=8):
printy('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n>')
elif dayDelta < timedelta(hours=8):
printy('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'r>')
else:
printy('{:02d}. {}:{}hrs (Pause: {}:{})'.format(num, workhours, workrestminutes, pausehours, pauserestminutes), 'n')
if weekWorkHours == None:
weekWorkHours = dayDelta
else:
if dayDelta:
weekWorkHours = weekWorkHours + dayDelta
def __str__(self):
ret = ""
for daynumber in self.workdays:
ret+=('{0}. {1}\n'.format(daynumber, self.workdays[daynumber]))
return ret
class PAN(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='Read & analyse XML files of Persรถnlicher Arbeitszeit-Nachweis (PAN)',
usage='''pan.py <command> [<args>]
Supported commands are
check Check schedule validity for work rules
''')
parser.add_argument('command', help='Subcommand to run')
parser.add_argument('--panconf', help='absolute filepath to pan.xml configfile', required=False)
parser.add_argument('--xmlmonth', help='absolute filepath to pan_....xml monthfile', required=False)
args = parser.parse_args()
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
if hasattr(args, 'panconf'):
getattr(self, args.command)(confFilename=args.panconf)
if hasattr(args, 'xmlmonth'):
getattr(self, args.command)(monthXMLFilename=args.xmlmonth)
else:
getattr(self, args.command)()
def check(self, confFilename=None, monthXMLFilename=None):
print('\n-----Prรผfung-----')
if monthXMLFilename is None:
settings = self.__getPanSettings(confFilename)
if settings is not None:
print('{}'.format(settings['fullname']))
else:
print('{}'.format(monthXMLFilename))
xml = self.__openMonthXMLFile(monthXMLFilename)
month = self.__getMonth(xml)
month.check()
def show(self, confFilename=None, monthXMLFilename=None):
print('\n----------')
if monthXMLFilename is None:
settings = self.__getPanSettings(confFilename)
if settings is not None:
print('{}'.format(settings['fullname']))
else:
print('{}'.format(monthXMLFilename))
xml = self.__openMonthXMLFile(monthXMLFilename)
month = self.__getMonth(xml)
month.printSummary()
def __getPanSettings(self, confFilename = None):
if confFilename is None:
confFilename = Path.home() / 'pan.xml'
try:
confTree = ET.parse(confFilename)
properties = confTree.getroot()
settings = {}
for entry in properties.findall('entry'):
key = entry.get('key')
if key == 'verzeichnis':
settings['schedulepath'] = Path(entry.text) / '\pan'
elif key == 'username':
settings['fullname'] = entry.text
elif key == 'abteilung':
settings['department'] = entry.text
elif key == 'uid':
settings['userlogin'] = entry.text
except FileNotFoundError:
print('No pan.xml config file found: {0}\nPlease start PAN application for a first run.'.format(confFilename))
settings = None
return settings
def __openMonthXMLFile(self, filename):
tree = ET.parse(filename)
return tree.getroot()
def __getMonth(self,xml):
"""Parse PAN XML month file and get internal representation of day items"""
#TODO: Monat, Jahr, SollStunden, Urlaub,ZeitdiffAkt, ZeitdiffVor, erweitert
dayTypeMapping = {'Arbeitstag': DayType.work,
'Wochenende': DayType.weekend,
'Urlaub': DayType.vacation,
'Feiertag': DayType.holiday,
'Krankheit': DayType.illness,
'รberstunden genommen': DayType.overtime_free,
'Dienstreise': DayType.business_trip,
'Freistellung': DayType.unpaid_free}
workdays = {}
monthNum = int(xml.find('Monat').text)
yearNum = int(xml.find('Jahr').text)
if xml.find('Erweitert').text == 'true':
extendedFormat = True
else:
extendedFormat = False
for panday in xml.findall('Tag'):
# parse
numday = int(panday.find('Datum').text)
daytype = panday.find('TagesTyp').text
description = panday.find('Bemerkung').text
morning = panday.find('Vormittag').text
afternoon = panday.find('Nachmittag').text
if extendedFormat:
third = panday.find('Dritte').text
fourth = panday.find('Vierte').text
else:
third = None
fourth = None
# convert
daytype = dayTypeMapping[daytype]
morning = self. _parsePANTimeRange(morning)
afternoon = self. _parsePANTimeRange(afternoon)
third = self. _parsePANTimeRange(third)
fourth = self. _parsePANTimeRange(fourth)
timeblocks = [morning, afternoon, third, fourth]
timeblocks = list(filter(None, timeblocks))
# save
day = WorkDay(daytype, description, timeblocks)
workdays[numday] = day
month = WorkMonth(yearNum,monthNum,workdays)
return month
def _parsePANTimeRange(self,strDayRange):
#deconstruct '09:00 - 12:30'
try:
begin, end = strDayRange.split(' - ')
begin = datetime.strptime(begin, FMT)
end = datetime.strptime(end, FMT)
return begin, end
except AttributeError:
return None
if __name__ == '__main__':
PAN()
| true |
f36b8447b65e34e2657e16abdfa6f0d6bfec1556
|
Python
|
miniii222/PythonAlgorithm-Interview
|
/๋ฌธ์์ด/Problem01_valid-palindrome(my).py
|
UTF-8
| 398 | 3.609375 | 4 |
[] |
no_license
|
#https://leetcode.com/problems/valid-palindrome/
#best
def isPalindrome(self, s: str) -> bool:
s = s.lower()
my = ""
for ss in s :
#isalnlum() -> alphabet๊ณผ ์ซ์๋ง ํํฐ
if ss.isalnum() :
my += ss
return my == my[::-1] #๋ฌธ์์ด์์ slicing ๋ฐฉ๋ฒ์ ๊ต์ฅํ ๋น ๋ฅธ ์๊ณ ๋ฆฌ์ฆ!
| true |
9647c6a14b3a937384a2a8546913e981634f2223
|
Python
|
naveenk2k/Project-Euler
|
/P125_PalindromicSums.py
|
UTF-8
| 1,145 | 4.375 | 4 |
[] |
no_license
|
'''
QUESTION: The palindromic number 595 is interesting because it can be written as the sum of consecutive squares: 62 + 72 + 82 + 92 + 102 + 112 + 122.
There are exactly eleven palindromes below one-thousand that can be written as consecutive square sums, and the sum of these palindromes is 4164. Note that 1 = 02 + 12 has not been included as this problem is concerned with the squares of positive integers.
Find the sum of all the numbers less than 108 that are both palindromic and can be written as the sum of consecutive squares.
ANSWER: 2906969179 (~0.6s)
'''
limit = 100000000
sqrtLimit = round(limit ** 0.5)
sum = 0
a = set()
def isPalindrome(n):
return str(n) == str(n)[::-1]
'''Brute force method:
Outer loop sets the max 'n' and inner loop generates all values 1^2 + 2^2 + ... + n^2 and at each step checks for palindromicity also that the number isn't repeated
'''
for i in range(1, sqrtLimit):
num = i * i
for j in range(i + 1, sqrtLimit):
num += j * j
if num > limit:
break
if isPalindrome(num) and num not in a:
sum += num
a.add(num)
print(sum)
| true |
8556e0a03d660092512c5556fbe0ac5cdb91cc94
|
Python
|
swordey/vitrab
|
/core/strategies/DEMA.py
|
UTF-8
| 5,219 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
#! DEMA.py
# Double Exponential Moving Average Strategy
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource
from core.strategies.indicators.DEMA import DEMA
from core.strategies.indicators.SMA import SMA
from core.strategies.base_strategy import BaseStrategy
class DEMAConfig:
def __init__(self):
self.weight = 21
self.down = -0.025
self.up = 0.025
class DEMATrend:
def __init__(self, direction=None):
self.direction = direction
self.duration = 0
self.persisted = False
self.adviced = False
class DEMAStrategy(BaseStrategy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.currentTrend = None
self.age = 0
self.trend = DEMATrend("undefined")
self.config = DEMAConfig()
self.dema = DEMA(self.config.weight)
self.sma = SMA(self.config.weight)
self.signalsDataSource = {}
for ticker in self.tickers:
self.signalsDataSource[ticker] = ColumnDataSource()
self.reset_column_data_sources()
self.init_signals()
# Abstract methods
def init_signals(self):
for ticker in self.tickers:
self.signals[ticker] = pd.DataFrame(dict(Date=[],
signal=[],
DEMA=[],
SMA=[],
positions=[]))
self.signals[ticker].set_index("Date", inplace=True)
def calc_signals(self, history):
for ticker in self.tickers:
self.dema.update(history.loc[ticker]["Close"].iloc[-1])
self.sma.update(history.loc[ticker]["Close"].iloc[-1])
signal = 0
position = 0
resDEMA = self.dema.result
resSMA = self.sma.result
price = history.loc[ticker]["Close"].iloc[-1]
diff = resSMA - resDEMA
if diff > self.config.up:
if self.currentTrend is not 'up':
self.currentTrend = 'up'
position = 1
signal = 1
else:
position = 0
signal = 0
elif diff < self.config.down:
if self.currentTrend is not 'down':
self.currentTrend = 'down'
position = -1
signal = 0
else:
position = 0
signal = 0
else:
position = 0
signal = 0
self.signals[ticker].loc[history.loc[ticker].iloc[-1].name] = 0
self.signals[ticker]["signal"] = signal
self.signals[ticker]["DEMA"] = self.dema.result
self.signals[ticker]["SMA"] = self.sma.result
self.signals[ticker]["positions"] = position
def init_plot(self, plot_area):
for ticker in self.tickers:
self.sma_visu = plot_area.select_one({'name': ticker + '_sma'})
if not self.sma_visu:
self.sma_visu = plot_area.line(x='Date',
y='SMA',
source=self.signalsDataSource[ticker],
legend_label=ticker + " SMA",
line_color="blue",
name=ticker+'_sma')
else:
self.signalsDataSource[ticker] = self.sma_visu.data_source
self.dema_visu = plot_area.select_one({'name': ticker + '_dema'})
if not self.dema_visu:
self.dema_visu = plot_area.line(x='Date',
y='DEMA',
source=self.signalsDataSource[ticker],
legend_label=ticker + " DEMA",
line_color="green",
name=ticker+'_dema')
def plot(self):
for ticker in self.tickers:
if ticker not in self.signals:
continue
signal_data = dict(
Date=[self.signals[ticker].iloc[-1].name],
signal=[self.signals[ticker].iloc[-1].signal],
SMA=[self.signals[ticker].iloc[-1].SMA],
DEMA=[self.signals[ticker].iloc[-1].DEMA],
positions=[self.signals[ticker].iloc[-1].positions]
)
self.signalsDataSource[ticker].stream(signal_data)
def __del__(self):
self.reset_column_data_sources()
def reset_column_data_sources(self):
for ticker in self.tickers:
self.signalsDataSource[ticker].data = dict(Date=[],
signal=[],
SMA=[],
DEMA=[],
positions=[])
| true |
c3534b0552a218c86c689d8ea6698915888f9954
|
Python
|
prusovae/TDM
|
/test/unit/datasource/model_test.py
|
UTF-8
| 625 | 2.546875 | 3 |
[] |
no_license
|
import unittest
from mock import Mock
from datasource.model import IQDataSource
class TestIQDataSource(unittest.TestCase):
def test_get_list_of_table_names_works(self):
mock_iq_gateway = Mock()
my_iq_ds = IQDataSource(name='ะขะตััะพะฒัะน ะธััะพัะฝะธะบ', gateway=mock_iq_gateway)
data = ['d_entity', 'd_currency', 'd_account']
mock_iq_gateway.get_list_of_table_names.return_value = data
self.assertListEqual(
list1=['d_entity', 'd_currency', 'd_account'],
list2=my_iq_ds.get_list_of_table_names())
if __name__ == '__main__':
unittest.main()
| true |
056224b119eb9610f1bff1da6bbeb940abeb42b4
|
Python
|
gomezvillegasdaniel/ecommerce-api
|
/src/tests/unit/test_product.py
|
UTF-8
| 1,360 | 2.65625 | 3 |
[] |
no_license
|
from tests.base_test import BaseTest
from models.product_model import ProductModel
class ProductTest(BaseTest):
def setUp(self):
super().setUp()
with self.app_context():
self.product = ProductModel('Product X', 'PieceXYZ123', 100, 75.95)
self.product.save_to_db()
def test_create_product(self):
with self.app_context():
self.product.save_to_db()
product = ProductModel.find_by_id(self.product.id)
self.assertEqual(product.name, 'Product X')
def test_fail_create_product(self):
with self.assertRaises(BaseException):
ProductModel()
def test_update_product(self):
with self.app_context():
self.product.price = 90.55
self.product.save_to_db()
product = ProductModel.find_by_id(self.product.id)
self.assertEqual(product.price, 90.55)
def test_fail_update_product(self):
with self.app_context():
with self.assertRaises(BaseException):
self.product = None
self.product.save_to_db()
def test_delete_product(self):
with self.app_context():
self.product.delete_from_db()
product = ProductModel.find_by_id(self.product.id)
self.assertIsNone(product)
| true |
28928a460be9c26bcda770d21c37e364fa040b74
|
Python
|
zarar-shah/Chapter-3-Assignment
|
/3.31.py
|
UTF-8
| 440 | 3.8125 | 4 |
[] |
no_license
|
print("Name: Zarar Ali Shah")
print("Roll #: 18B-075-CS (A)")
print("Assignment")
print("\n Exercise 3.31\n")
radius = 8
x = float(input("Please enter the x-coordinate: "))
y = float(input("Please enter the y-coordinate: "))
import math
a = math.sqrt((x*x)+(y*y)) < radius
if a ==True:
print("\nAre these coordinates within the dart? Yes It Is In!")
else:
print("\nAre these coordinates within tha dart? No It Is Not In!")
| true |
de005ac7f1068d3c2d474db2ce13833dc748e9ec
|
Python
|
miraflynn/Pie3-MiniProject-LineFollower
|
/receive_data.py
|
UTF-8
| 2,314 | 3.578125 | 4 |
[] |
no_license
|
# ******************************************************************
# * *
# * *
# * Example Python program that receives data from an Arduino *
# * *
# * *
# ******************************************************************
import serial
#
# Note 1: This python script was designed to run with Python 3.
#
# Note 2: The script uses "pyserial" which must be installed. If you have
# previously installed the "serial" package, it must be uninstalled
# first.
#
# Note 3: While this script is running you can not re-program the Arduino.
# Before downloading a new Arduino sketch, you must exit this
# script first.
#
#
# Set the name of the serial port. Determine the name as follows:
# 1) From Arduino's "Tools" menu, select "Port"
# 2) It will show you which Port is used to connect to the Arduino
#
# For Windows computers, the name is formatted like: "COM6"
# For Apple computers, the name is formatted like: "/dev/tty.usbmodemfa141"
#
arduinoComPort = "COM6"
#
# Set the baud rate
# NOTE1: The baudRate for the sending and receiving programs must be the same!
# NOTE2: For faster communication, set the baudRate to 115200 below
# and check that the arduino sketch you are using is updated as well.
#
baudRate = 9600
#
# open the serial port
#
serialPort = serial.Serial(arduinoComPort, baudRate, timeout=1)
f = open("data/datafile.csv", "w")
#
# main loop to read data from the Arduino, then display it
#
while True:
#
# ask for a line of data from the serial port, the ".decode()" converts the
# data from an "array of bytes", to a string
#
lineOfData = serialPort.readline().decode()
#
# check if data was received
#
if len(lineOfData) > 0:
#
# data was received, convert it into 4 integers
#
f.write(lineOfData)
print(lineOfData[:-1])
# #
# # print the results
# #
# print("a = " + str(a), end="")
# print(", b = " + str(b), end="")
# print(", c = " + str(c), end="")
# print(", d = " + str(d))
| true |
f0cd5013fa92e64ebee7b8b032ac23730fe4d1c2
|
Python
|
KevinGevers/NetComputing
|
/Manager/manager.py
|
UTF-8
| 4,502 | 2.828125 | 3 |
[] |
no_license
|
import pika
import json
import datetime
from threading import Lock, Event, _start_new_thread
#TODO: Currently the same lock is used for locking access to parking_lot and reservations. Make 2 locks??
POOL_TIME = 10 #Seconds
RESERVATION_DURATION = 60 * 2
'''
This Class is the ParkingLot Manager
ParkingLots at the Manager's location update the Manager with their availability status.
The manager joins all the ParkingLots to obtain the total availability at the location.
The manager is also in charge of making reservations through a REST interface. See: 'manager_app.py'
Reservations are removed when expired.
The manager also holds all the data inside the class as there is currently no external database.
'''
class Manager:
data_lock = Lock()
thread_event = Event()
location = {
'longitude': 0.0,
'latitude': 0.0
}
def __init__(self):
print('New manager!')
queue_connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
self.channel = queue_connection.channel()
self.channel.queue_declare(queue='hello')
self.channel.basic_consume(self.handler, queue='hello', no_ack=True)
self.parking_lots = {}
self.reservations = {}
def handler(self, ch, method, properties, body):
msg = body.decode('utf-8')
plot_data = json.loads(msg)
print(' \n\n[x] Received parking status.')
with self.data_lock:
parking_id = plot_data['id']
if parking_id in self.parking_lots:
self.parking_lots.update()
self.parking_lots[parking_id] = plot_data
return
def start(self):
_start_new_thread(self.listen_queue, ())
_start_new_thread(self.reservation_cleaner, ())
def listen_queue(self):
print(' [*] Waiting for messages.')
self.channel.start_consuming()
def set_location(self, lon, lat):
self.location['longitude'] = lon
self.location['latitude'] = lat
def get_status(self):
status = {
'total' : 0,
'reserved' : len(self.reservations),
'taken' : 0,
'available' : 0
}
for (p_id, data) in self.parking_lots.items():
status['total'] += data['total']
status['taken'] += data['taken']
status['available'] += data['available']
status['available'] -= status['reserved']
return status
def get_available(self):
with self.data_lock:
return self.get_status()['available']
def make_reservation(self, client_id):
# Return null of no spaces left
if self.get_available() <= 0:
return None
# If already has a reservation return it.
if client_id in self.reservations:
return self.get_reservation(client_id)
# Otherwise, make reservation
with self.data_lock:
now = datetime.datetime.now()
expiration = now + datetime.timedelta(seconds=RESERVATION_DURATION)
self.reservations[client_id] = expiration
r = {
'client_id': client_id,
'start_time': now,
'end_time': expiration
}
self.reservations[client_id] = r
return r
def delete_reservation(self, client_id):
print('Delete:' + client_id)
with self.data_lock:
r = self.reservations.pop(client_id, None)
print(r)
return {
'result' : not (r == None),
'reservation' : r
}
def get_reservation(self, client_id):
with self.data_lock:
return self.reservations[client_id]
# This method deletes expired reservations
def reservation_cleaner(self):
while(not self.thread_event.wait(POOL_TIME)):
with self.data_lock:
keys = []
for key, item in self.reservations.items():
print(key + ': ' + str(datetime.datetime.now()) + ' ' + str(item['end_time']))
if datetime.datetime.now() >= item['end_time']:
keys.append(key)
for key in keys:
print('Client: ' + key + ' reservation expired')
print(self.delete_reservation(key))
if __name__ == '__main__':
manager = Manager()
manager.start()
while True:
pass
| true |
d93dff47152e9cee3480e880d52579f56122edda
|
Python
|
urbanskii/UdemyPythonCourse
|
/secao04/se04E19.py
|
UTF-8
| 467 | 4.25 | 4 |
[] |
no_license
|
"""
19 - Leia um valor de volume em litros e apresente-o convertido em metros cรบbicos mยณ. A
fรณrmula de conversรฃo รฉ: M = L/1000, sendo L o volume em litros e M o volume em metros cรบbicos.
"""
def main():
volume_litros = float(input('Digite um valor de volume em litros: '))
volume_m_cubicos = volume_litros/1000
print(f'O valor de volume em litros convertido para metros cubicos Mยณ: {volume_m_cubicos}')
if __name__ == '__main__':
main()
| true |
90e7588c9d11bc5156f5416ada05cef9aeac3107
|
Python
|
Tyboon/MOCAD
|
/A2DI/part2/TP7/code-stub/py/polka/classification/binary.py
|
UTF-8
| 11,606 | 2.71875 | 3 |
[] |
no_license
|
"""
Online learner for binary classification.
"""
import sys, os
from numpy import zeros, dot, inf, sqrt, resize, identity, double, array, matrix, append
from numpy.linalg import norm
import time
from polka.common.datasource import BinarySource as Source
from polka.common.alphabet import Alphabet, POS_LAB, NEG_LAB
from polka.common.prediction import BinaryPrediction as Prediction
from polka.common.learner import OnlineLearner
from polka.common.function import ltqnorm
import codecs
class Binary( object ):
""" Abstract linear classifier (in primal form) for binary problems."""
def __init__( self, feature_alphabet=None, bias=False ):
self._feature_alphabet = feature_alphabet
if self._feature_alphabet == None:
self._feature_alphabet = Alphabet()
self._weights = None
self._init_model()
self._bias = bias
return
def _init_model(self):
""" initialize weights to 0 """
m = self._feature_alphabet.size()
self._weights = zeros( m, 'd' )
return
def set_alphabet(self, feature_alphabet ):
""" set alphabet and (re-)initialize weights
accordingly"""
m = feature_alphabet.size()
assert m >= 1 or not feature_alphabet.locked(), "Feature alphabet has %s size." %m
self._feature_alphabet = feature_alphabet
self._init_model()
return
def get_alphabet(self):
return self._feature_alphabet
def get_model(self):
""" return current model """
return self._get_model()
def _get_model(self):
w = self._weights
return w
def set_model( self, weight_dict ):
""" set model weights from dictionaries"""
self._set_weights( weight_dict )
return
def _set_weights( self, weight_dict ):
""" set model weight vectors from weight dictionary"""
assert isinstance(weight_dict,dict)
weights = self._weights
feat_alpha = self._feature_alphabet
for f in weight_dict:
fidx = feat_alpha[f]
weights[fidx] = weight_dict[f]
return
def learn( self, train_sample, epochs ):
raise NotImplementedError
def _get_train_stream( self, data ):
""" returns training instances stream from data file name or
data source"""
feature_alphabet = self._feature_alphabet
if isinstance(data,str):
stream = Source(data, feature_alphabet=feature_alphabet, alphabet_lock=False, alphabet_pop=False, bias=self._bias)
elif isinstance(data,Source):
stream = data
elif callable(data):
stream = Source(data, feature_alphabet=feature_alphabet, alphabet_lock=False, alphabet_pop=False, bias=self._bias)
else:
raise Exception("Error: data is either string for file name or ClassificationSource!")
# set alphabet from data
self.set_alphabet( stream.get_alphabet() )
return stream
def _get_test_stream( self, data ):
""" returns test instances stream from data file name or
data source"""
if isinstance(data,str):
stream = Source( data, alphabet_lock=True,\
alphabet_pop=False, bias=self._bias )
elif isinstance(data,Source):
stream = data
else:
raise Exception("Error: data is either string for file name or ClassificationSource!")
# use model alphabet
stream.set_alphabet( self.get_alphabet() )
return stream
def resize_weights(self, instance):
if len(self._weights) != self._feature_alphabet.size():
self._weights.resize(self._feature_alphabet.size())
return
def update( self, instance, prediction, rate=1.0 ):
raise NotImplementedError
def _decode( self, instance, weights ):
""" return prediction in {-1, 1}
for current instance based on linear combination of given
weight parameters """
fv = instance.get_fv()
score = dot( weights, fv )
return Prediction( score )
def decode( self, instance ):
""" return prediction for instance given current model"""
if not self._feature_alphabet.locked():
self.resize_weights(instance)
return self._decode( instance, self.get_model() )
def predict( self, instance ):
""" return prediction (scored labels) for instance """
ws = self._get_model()
prediction = self._decode( instance, ws )
return prediction
def classify( self, instance ):
""" return highest-scoring outcome along with its score for
instance according to current weight vectors."""
ws = self._get_model()
prediction = self._decode( instance, ws )
return prediction.get_pred() # (label,score)
def test( self, test_sample, sink ):
""" evaluate classifier on test sample """
start_time = time.time()
sink.set_labels = [POS_LAB,NEG_LAB]
# read in data
stream = self._get_test_stream( test_sample )
print >> sys.stderr, "-"*100
print >> sys.stderr, "Testing...",
# make predictions on test sample
for inst in stream:
true_label = inst.get_target_label()
pred_label, score = self.classify( inst )
# store label pair
sink.update( true_label, pred_label, score )
stream.close()
elapsed_time = time.time()-start_time
print >> sys.stderr, "done in %s sec." %(round(elapsed_time,3))
return
class PerceptronBinary( Binary ):
""" Linear classifier with online learning and perceptron update
(in primal form)"""
def __init__( self, bias=False ):
Binary.__init__(self, bias=bias )
self._learner = OnlineLearner(self)
return
def learn( self, data, epochs=1, start_iter=0, write_every_iter=False, modelpath=None, forget_first=False):
instances = self._get_train_stream( data )
self._learner.learn( instances, epochs, start_iter, write_every_iter, modelpath, forget_first )
return
def update( self, instance, prediction ):
""" perceptron update rule:
w = w + y * x
"""
w = self._weights
fv = instance.get_fv()
t_lab = int(instance.get_target_label())
p_lab = int(prediction.get_label())
error = (p_lab != t_lab)
# ...
if (error) :
self._weights = w + dot(t_lab, fv)
return error
class PerceptronBinaryAvg( PerceptronBinary ):
""" Linear classifier with online learning and perceptron update
(in primal form)"""
def __init__( self, bias=False ):
Binary.__init__(self, bias=bias )
self._learner = OnlineLearner(self)
return
def _init_model(self):
""" initialize weights to 0 """
m = self._feature_alphabet.size()
self.weightC = zeros( m, 'd' )
self._weights = zeros( m, 'd' )
return
def resize_weights(self, instance):
if len(self._weights) != self._feature_alphabet.size():
self._weights.resize(self._feature_alphabet.size())
self.weightC.resize(self._feature_alphabet.size())
return
def _get_model(self):
w = self.weightC
return w
def update( self, instance, prediction ):
""" perceptron update rule:
w = w + y * x
"""
fv = instance.get_fv()
t_lab = int(instance.get_target_label())
p_lab = int(prediction.get_label())
error = (p_lab != t_lab)
# ...
if (error) :
w = self.weightC + dot(t_lab, fv)
else :
w = self.weightC
self._weights = self._weights + w
return error
class PABinary( PerceptronBinary ):
""" Passive Aggressive (PA) classifier in primal form. PA has a
margin-based update rule: each update yields at least a margin of
one (see defails below). Specifically, we implement PA-I rule for
the binary setting (see Crammer et. al 2006)."""
def __init__( self, bias=False, C=inf ):
PerceptronBinary.__init__( self, bias=bias )
self._C = C # aggressiveness parameter
return
def update( self, instance, prediction ):
"""
w = w + t * y * x
where: t = min {C, loss / ||x||**2}
loss = 0 if margin >= 1.0
1.0 - margin o.w.
margin = y (w . x)
"""
w = self._weights
fv = instance.get_fv()
t_lab = int(instance.get_target_label())
score = prediction.get_score()
loss = 0.0
# ...
margin = dot(t_lab, dot(w, fv))
if margin < 1.0 :
loss = 1.0 - margin
t = min(self._C, (loss/ pow(norm(fv),2)))
self._weights = w + dot(t, dot(t_lab, fv))
return loss
# TODO
class PABinaryAvg( PerceptronBinary ):
""" Passive Aggressive (PA) classifier in primal form. PA has a
margin-based update rule: each update yields at least a margin of
one (see defails below). Specifically, we implement PA-I rule for
the binary setting (see Crammer et. al 2006)."""
def __init__( self, bias=False, C=inf ):
PerceptronBinary.__init__( self, bias=bias )
self._C = C # aggressiveness parameter
return
def _init_model(self):
""" initialize weights to 0 """
m = self._feature_alphabet.size()
self.weightC = zeros( m, 'd' )
self._weights = zeros( m, 'd' )
return
def resize_weights(self, instance):
if len(self._weights) != self._feature_alphabet.size():
self._weights.resize(self._feature_alphabet.size())
self.weightC.resize(self._feature_alphabet.size())
return
def update( self, instance, prediction ):
"""
w = w + t * y * x
where: t = min {C, loss / ||x||**2}
loss = 0 if margin >= 1.0
1.0 - margin o.w.
margin = y (w . x)
"""
w = self._weights
fv = instance.get_fv()
t_lab = int(instance.get_target_label())
score = prediction.get_score()
loss = 0.0
# ...
margin = dot(t_lab, dot(w, fv))
if margin < 1.0 :
loss = 1.0 - margin
t = min(self._C, (loss/ pow(norm(fv),2)))
self._weights = w + dot(t, dot(t_lab, fv))
return loss
if __name__ == "__main__":
import sys
import optparse
from polka.common.result_sink import ClassificationSink
parser = optparse.OptionParser()
parser.add_option("-u", "--update", \
choices=['perc', 'pa', 'percAvg', 'paAvg'], \
default='pa', \
help="'perc' (perceptron), 'pa' (passive-aggressive)")
parser.add_option("-b", "--bias", \
action="store_true", \
default=False, \
help="use biases (default: False)")
parser.add_option("-C", "--aggressiveness", \
action="store", \
default=inf, \
type=float, \
help="aggressiveness parameter for PA (default: inf)")
parser.add_option("-d", "--train", \
action="store", \
default='', \
help="read training data from file")
parser.add_option("-t", "--test", \
action="store", \
default='', \
help="read test data from file")
parser.add_option("-i", "--iterations", \
action="store",\
default=10, \
type=int, \
help="number of iterations (default: 10)")
parser.add_option("-o", "--output", \
action="store", \
default='', \
help="output predicted labels in file")
(options, args) = parser.parse_args()
# check options
update = options.update
train = options.train
test = options.test
output = options.output
if not train:
sys.exit("Please provide train data file (-d).")
if not test:
sys.exit("Please provide test data file (-t).")
print >> sys.stderr, "Binary Classification model: %s" %update
C = options.aggressiveness
bias = options.bias
# init model
if options.update == 'perc':
classifier = PerceptronBinary( bias=bias )
elif options.update == 'pa':
classifier = PABinary( bias=bias, C=C )
elif options.update == 'percAvg':
classifier = PerceptronBinaryAvg( bias=bias )
elif options.update == 'paAvg':
classifier = PABinaryAvg( bias=bias )
if train:
print >> sys.stderr, "Training on data in '%s'." %train
classifier.learn( train, epochs=options.iterations )
print >> sys.stderr, "done."
if test:
sink = ClassificationSink()
classifier.test( test, sink )
sink.print_report()
if output:
sink.print_prediction(output)
| true |
7566fafe15bb6cfe5cbbe6e46567d86afef05740
|
Python
|
Penaz91/Glitch_Heaven
|
/Game/components/triggerableItem.py
|
UTF-8
| 773 | 2.6875 | 3 |
[
"MIT"
] |
permissive
|
# Triggerable Item Component
# Part of the Glitch_Heaven Project
# Copyright 2015-2016 Penaz <penazarea@altervista.org>
import pygame
class triggerableItem(pygame.sprite.Sprite):
def __init__(self, location, pwd, inactive, active, *groups):
super(triggerableItem, self).__init__(*groups)
self.inactive = inactive
self.active = active
self.image = self.inactive
self.used = False
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = location[0], location[1]
self.password = pwd
def activate(self):
if not self.used:
self.image = self.active
self.used = True
def deactivate(self):
if self.used:
self.image = self.inactive
self.used = False
| true |
3127d121f6f06faa85a09de26f6879220b6fd00d
|
Python
|
paarubhatt/Assignments
|
/Fibonacci series.py
|
UTF-8
| 634 | 4.53125 | 5 |
[] |
no_license
|
#Recursive function to display fibonacci series upto 8 terms
def Fibonacci(n):
#To check given term is negative number
if n < 0:
print("Invalid Input")
#To check given term is 0 ,returns 0
elif n == 0:
return 0
# To check given term is either 1 or 2 because series for 1 or 2 terms will be 0 1
elif n == 1 or n == 2:
return 1
#Return a series until term value exceeds
else:
return Fibonacci(n-1)+Fibonacci(n-2)
#initialized term value
term = 8
#For loop prints the fibonacci series upto 8 terms
for i in range(term):
print(Fibonacci(i))
| true |
6c4469b32c5989eaa85c56569be09f3c6a0ae1a8
|
Python
|
onur-kantar/Gold-Price-Analyzer
|
/Code/Data Scraping/Main.py
|
UTF-8
| 3,892 | 3.171875 | 3 |
[] |
no_license
|
import pandas as pd
import Preprocessing as pre
import Scrape
import Analysis
import WriteToCSV as wcsv
import Augmenter as aug
# Tweet Kazฤฑma ------------------------------------
url = 'https://twitter.com/search?f=live&q=(%23XAUUSD)%20lang%3Aen%20-filter%3Areplies&src=typed_query'
minTweetSize = 100
tweets = Scrape.ScrapeTweet(url, minTweetSize)
#-------------------------------------------------
# Temizleme ฤฐลlemi -------------------------------
tweets = pre.tweetClean(tweets, 'medium')
#-------------------------------------------------
# Etiketleme ฤฐลlemi ------------------------------
classes = []
classes = tweets.apply(Analysis.tweetAnalysis)
#-------------------------------------------------
# .csv Dosyasฤฑna Yazdฤฑrma ฤฐลlemi -----------------
dict_val = {
'tweet' : tweets,
'target' : classes
}
df_tweets = pd.DataFrame(dict_val)
wcsv.writeRow('tweets.csv', dict_val)
#-------------------------------------------------
# Sampling ---------------------------------------
# Class count
count_class_1, count_class_0 = df_tweets.target.value_counts()
# Divide by class
df_class_0 = df_tweets[df_tweets['target'] == 0]
df_class_1 = df_tweets[df_tweets['target'] == 1]
# Random Under-Sampling
df_class_1_under = df_class_1.sample(count_class_0)
df_under = pd.concat([df_class_1_under, df_class_0], axis=0)
wcsv.writeRow('twitter_under_sampling.csv', df_under)
# Random Over-Sampling
df_class_0_over = df_class_0.sample(count_class_1, replace=True)
df_over = pd.concat([df_class_0_over, df_class_1], axis=0)
wcsv.writeRow('twitter_over_sampling.csv', df_over)
# Augmenter
df_augment = aug.augments(df_tweets['tweet'], df_tweets['target'])
wcsv.writeRow('augment_twitter.csv', df_augment)
#-------------------------------------------------
# =============================================================================
# Sayฤฑsal Veri Kazฤฑma ----------------------------
url = input("Verilerini Toplamak ฤฐstediฤiniz Yatฤฑrฤฑm Aracฤฑnฤฑn " +
"Investing'deki Geรงmiล Verileri Sayfasฤฑnฤฑn URL'sini Girin. " +
"รrn: https://www.investing.com/currencies/xau-usd-historical-data: ")
startDateName = input("Baลlangฤฑรง Tarihi Girin. รrn: 7/13/2018: ");
endDateName = input("Baลlangฤฑรง Tarihi Girin. รrn: 7/13/2020: ");
numerical = Scrape.ScrapeNumerical(url, startDateName, endDateName)
#-------------------------------------------------
print(numerical)
# Temizleme ฤฐลlemi -------------------------------
numerical = pre.numericalClean(numerical)
#-------------------------------------------------
# Etiketleme ฤฐลlemi ------------------------------
classes = []
classes = Analysis.numericalAnalysis(numerical)
#-------------------------------------------------
# .csv Dosyasฤฑna Yazdฤฑrma ฤฐลlemi -----------------
numerical['target'] = classes
wcsv.writeRow('tweetas.csv', numerical)
#-------------------------------------------------
# Sampling ---------------------------------------
# Class count
count_class_1, count_class_0 = numerical.target.value_counts()
# Divide by class
df_class_0 = numerical[numerical['target'] == 0]
df_class_1 = numerical[numerical['target'] == 1]
# Random Under-Sampling
df_class_1_under = df_class_1.sample(count_class_0)
df_under = pd.concat([df_class_1_under, df_class_0], axis=0)
wcsv.writeRow('numerical_under_sampling.csv', df_under)
# Random Over-Sampling
df_class_0_over = df_class_0.sample(count_class_1, replace=True)
df_over = pd.concat([df_class_0_over, df_class_1], axis=0)
wcsv.writeRow('numerical_over_sampling.csv', df_over)
#-------------------------------------------------
| true |
2a6040f3a3f1015f461429326407f04719979046
|
Python
|
Tyyrhjelm/cse210-student-jumper
|
/jumper/game/New folder (3)/cse210-student-hilo-master (2)[1337]/cse210-student-hilo-master/cse210-student-hilo-master/hilo/game/DisplayGuy.py
|
UTF-8
| 1,263 | 3.40625 | 3 |
[] |
no_license
|
from game.WordBank import word_bank
class board:
def __init__(self) -> None:
self.fails = 0
self.blanks = []
self.word = word_bank.get_word()
i = 0
while i < len(self.word):
self.blanks.append('*')
i += 1
def guy(self):
print(self.blanks)
def parachute(self):
if self.fails < 1:
print( " ___")
if self.fails < 2:
print('/ \ ')
if self.fails < 3:
print('\___/')
print(' \|/')
print(' o')
print(' /I\ ')
print(' / \ ')
else:
print(' x')
print(' /I\ ')
print(' / \ ')
print('You died')
def check_guess(self, letter):
is_here = False
for i in range(len(self.word)):
if letter == self.word[i]:
self.blanks[i] = letter
is_here = True
if is_here != True:
self.fails += 1
def stayin_alive(self):
if self.fails > 2:
return False
else:
return True
def check_win(self):
if '*' not in self.blanks:
return True
else:
return False
| true |
3d129869622c0420dcf703be6f618f7d7d12a7f9
|
Python
|
shahkeval0101/Intelligent-Interview-System
|
/program/mcq.py
|
UTF-8
| 1,754 | 3.484375 | 3 |
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 15:12:15 2020
@author: keval
"""
def coding_mcq():
print("Now you will have your Mutltiple choice question test")
print("Each question will have 4 answers and you have to select one ")
print("Here are your questions")
count = 0
ans1 = input("With what data structure can a priority queue be implemented?\n"\
"a) Array\n"\
"b) List\n"\
"c) Heap\n"\
"d) Tree\n")
if ans1.lower() == "d":
count+=1
ans2 = input("Architecture of database is viewed as?\n"\
"a) two level\n"\
"b) four level\n"\
"c) three level\n"\
"d) one level\n")
if ans2.lower() == "c":
count+=1
ans3 = input(" _____ is used to find and fix bugs in the Java programs.\n"\
"a) JVM\n"\
"b) JRE\n"\
"c) JDK\n"\
"d) JDB\n")
if ans3.lower() == "d":
count+=1
ans4 = input(" Study the following program:\n"\
"x = ['xy', 'yz']\n"\
"for i in a: \n" \
"\ti.upper()\n" \
"print(a)\n"\
"options\n"\
"a) ['xy', 'yz']\n"\
"b) ['XY', 'YZ']\n"\
"c) [None, None]\n"\
"d) [None of these]\n"
)
if ans4.lower() == "a":
count+=1
ans5 = input("Which of the following requires a device driver?\n"\
"a) Register\n"\
"b) Cache\n"\
"c) Main memory\n"\
"d) Disk\nd"
)
if ans5.lower() == "d":
count+=1
print(count)
return count
| true |
f1a7853542ce32d95d9ce63ccc018c45920b9d61
|
Python
|
tastelessjolt/aarohi
|
/src/utils.py
|
UTF-8
| 370 | 2.75 | 3 |
[] |
no_license
|
import os
import numpy as np
def convertOggToWav(filespath):
for path, dirs, files in os.walk(filespath):
for file in files:
print(file)
if ( file[len(file)-3:] == "ogg" ):
os.system("oggdec -o " + filespath + "/" + file[:-4] + ".wav " + filespath + "/" + file)
def binary_from_int16(num):
inum = bin(np.uint32(num))
return [int(b) for b in inum[2:]]
| true |
c8ca024ebd5bcbf8322cfd2bb8e886912ffe27ac
|
Python
|
alfisiemcse/pythonproject
|
/filehandlingpart2.py
|
UTF-8
| 322 | 2.609375 | 3 |
[] |
no_license
|
k = dict()
i = 0
l = []
f = open("alfismacho.txt")
for x in f.readlines():
if x[:-1] == "!":
if len(l) == 0:
continue
else:
k[i]= l
i = i+1
l=[]
else:
l.append(x[:-1])
k[i]=l
for i in k.values():
print(i)
| true |
be8e2034214c422bf9be242911b01ad877e8a23b
|
Python
|
JasonQVan/Self-Taught-Assignments
|
/Inheritance.py
|
UTF-8
| 417 | 3.890625 | 4 |
[] |
no_license
|
class Shape():
def what_am_i(self):
print("I am a shape")
class Rectangle(Shape):
def __init__(self, w, l):
self.width = w
self.length = l
def calculate_perimeter(self):
return (self.width*2 + self.length*2)
class Square(Shape):
def __init__(self, s):
self.s1 = s
def calculate_perimeter(self):
return (self.s1 *4)
rect = Rectangle(2,3)
squ = Square(4)
rect.what_am_i()
squ.
| true |
226f2f0b40cdc0cfa2c704bf9a90505cc915cd38
|
Python
|
asuprem/imag-s
|
/testing/goof.py
|
UTF-8
| 6,028 | 2.65625 | 3 |
[] |
no_license
|
import sys
import operator
import time
from neo4j.v1 import GraphDatabase
import pdb
from synset_explorer import SynsetExplorer
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import WordNetError
from itertools import product
#import approximate_utils
uri = "bolt://localhost:7687"
driver = GraphDatabase.driver(uri, auth=("neo4j", "scientia"))
def extractRelations(query_file_name):
query_file = open(query_file_name,'r')
nouns = {}
predicates = {}
for line in query_file:
line = line.strip().split(',')
if line[1] == 'n':
nouns[int(line[0])] = line[2]
if line[1] == 'r':
predicates[int(line[0])] = (line[2],int(line[3]), int(line[4]))
query_file.close()
relations = []
for entry in predicates:
relations.append((nouns[predicates[entry][1]],predicates[entry][0],nouns[predicates[entry][2]]))
return relations
def clauseJoin(matchClause,conditionClause,returnClause):
return matchClause+' '+conditionClause+' '+returnClause
def synset_cleaned(neo4j_result):
return [item.values()[0]['synset'].encode("utf-8") for item in neo4j_result]
def sessionRun(clause):
with driver.session() as session:
result = session.run(clause)
return result
def subject_relations_approximates(subjects,objects):
matchClause = 'match (s:ssagObject)-[:SUBJ]->(r:ssagRelation)-[:OBJ]->(o:ssagObject)'
conditionClause = 'where s.synset in '+str(subjects) + ' and o.synset in '+str(objects)
returnClause = 'return r'
return sessionRun(clauseJoin(matchClause,conditionClause,returnClause))
def object_relations_approximates(objects,subjects):
matchClause = 'match (s:osagObject)-[:SUBJ]->(r:osagRelation)-[:OBJ]->(o:osagObject)'
conditionClause = 'where o.synset in '+str(objects) + ' and s.synset in '+str(subjects)
returnClause = 'return r'
return sessionRun(clauseJoin(matchClause,conditionClause,returnClause))
def oldsubject_relations_approximates(subjects):
matchClause = 'match (n:aggregateObject)-[:SUBJ]->(r:aggregateRelation)'
conditionClause = 'where n.synset in '+str(subjects)
returnClause = 'return r'
return sessionRun(clauseJoin(matchClause,conditionClause,returnClause))
def oldobject_relations_approximates(objects):
matchClause = 'match (r:aggregateRelation)-[:OBJ]->(o:aggregateObject)'
conditionClause = 'where o.synset in '+str(objects)
returnClause = 'return r'
return sessionRun(clauseJoin(matchClause,conditionClause,returnClause))
def unique_intersection(aggregate_relation_object,aggregate_relation_subject):
aggregate_relations = aggregate_relation_subject+aggregate_relation_object
aggregate_relations = set([item for item in aggregate_relations if (item in aggregate_relation_subject and item in aggregate_relation_object)])
return aggregate_relations
def main():
objectFamilies = SynsetExplorer('../ExtractedData/objects.db')
relationFamilies = SynsetExplorer('../ExtractedData/relations.db')
#query_file_name = sys.argv[1]
while 1:
query_file_name = raw_input("Query file: ")
#Get the relations and nouns
relations = extractRelations(query_file_name)
#----------------------------------------------------------------------------#
# USE the relation component approximates to generate relation approximates
queryApproximates={}
for relation in relations:
#Get the explored synsets
subjectFamily = objectFamilies.explore(relation[0])
objectFamily = objectFamilies.explore(relation[2])
predicateFamily = relationFamilies.explore(relation[1])
#pdb.set_trace()
start=time.time()
#Get the cleaned up relations (i.e. without u'sdfdf' -> 'sdfdf')
pdb.set_trace()
aggregate_relation_subject = synset_cleaned(subject_relations_approximates(subjectFamily.getFullRanking(),objectFamily.getFullRanking()))
aggregate_relation_object = synset_cleaned(object_relations_approximates(objectFamily.getFullRanking(),subjectFamily.getFullRanking()))
#Get the unique relations and the predicate relations and convert to synset format (for lch similarity)
#pdb.set_trace()
'''
aggregateSynsets = toSynset(unique_intersection(aggregate_relation_object,aggregate_relation_subject))
#Get relationship ranks compared to the predicate family
relationRanks = rankRelations(aggregateSynsets,predicateFamily)
#pdb.set_trace()
# Mabe combine with hypo ranks????
# We generate relations using base, first:
queryApproximates[relation] = generateRelations(subjectFamily.getFullRanking(), relationRanks, objectFamily.getFullRanking())
print 'Finished getting relations in ' + str(time.time()-start)
print '---------------------------------------------\n\n'
#we have query approximates, and relations
# we need to get images with the approximates in them.
image_collection={}
query_collection = {}
for query in relations:
image_collection[query]={}
for approximate in queryApproximates[query]:
image_collection[query][approximate] = image_ids(approximate)
for ids in image_collection[query][approximate]:
if ids not in query_collection:
query_collection[ids] = {}
if query not in query_collection[ids]:
query_collection[ids][query]=[]
query_collection[ids][query].append(approximate)
print 'Finished getting ' + str(query) + ' in '+ str(time.time()-start)
for entry in query_collection:
if len(query_collection[entry])>1:
print entry, query_collection[entry]
pdb.set_trace()
'''
if __name__ == "__main__":
main()
| true |
26fb484792b53187336c1161f17ec1e25c8ff42e
|
Python
|
seravb/IndoorCyclingApp
|
/examples/cairo_gtk.py
|
UTF-8
| 1,765 | 3.25 | 3 |
[] |
no_license
|
#! /usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk, gobject, cairo
# Create a GTK+ widget on which we will draw using Cairo
class Screen(gtk.DrawingArea):
# Draw in response to an expose-event
__gsignals__ = { "expose-event": "override" }
# Handle the expose-event by drawing
def do_expose_event(self, event):
# Create the cairo context
cr = self.window.cairo_create()
# Restrict Cairo to the exposed area; avoid extra work
cr.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
cr.clip()
self.draw(cr, *self.window.get_size())
def draw(self, cr, width, height):
# Fill the background with gray
cr.set_source_rgb(0.5, 0.5, 0.5)
cr.rectangle(0, 0, width, height)
cr.fill()
# GTK mumbo-jumbo to show the widget in a window and quit when it's closed
def run(Widget):
window = gtk.Window()
window.connect("delete-event", gtk.main_quit)
widget = Widget()
widget.show()
window.add(widget)
window.present()
gtk.main()
## Do all your testing in Shapes ##
class Shapes(Screen):
def draw(self, cr, width, height):
## This will draw using a mask.
cr.scale(width,height) #Without this line the mask does not seem to work!
self.linear = cairo.LinearGradient(0, 0, 1, 1)
self.linear.add_color_stop_rgb(0, 0, 0.3, 0.8)
self.linear.add_color_stop_rgb(1, 0, 0.8, 0.3)
self.radial = cairo.RadialGradient(0.5, 0.5, 0.25, 0.5, 0.5, 0.5)
self.radial.add_color_stop_rgba(0, 0, 0, 0, 1)
self.radial.add_color_stop_rgba(0.5, 0, 0, 0, 0)
cr.set_source(self.linear)
cr.mask(self.radial)
run(Shapes)
| true |
531d89a4a6f370a7f46787f4b3d650b08ba77d32
|
Python
|
reikamoon/CS_1.3_Core_Data_Structures
|
/Code/double_linked_lists.py
|
UTF-8
| 2,746 | 3.953125 | 4 |
[
"MIT"
] |
permissive
|
#!python
class ListNode:
def __init__(self, data):
#stores data
self.data = data
#stores reference for the next item
self.next = None
#store reference for previous item
self.previous = None
return
def has_value(self, value):
if self.data == value:
return True
else:
return False
class DoubleLinkedList:
def __init__(self):
self.head = None
self.tail = None
return
def list_length(self):
count = 0
current_node = self.head
while current_node is not None:
#Increase counter by one
count = count + 1
#Jump to the linked node
current_node = current_node.next
return count
def output_list(self):
current_node = self.head
while current_node is not None:
print(current_node.data)
#Jump to the linked Node
current_node = current_node.next
return
def unordered_search(self, value):
#Define Current Node
current_node = self.head
#Define the position
node_id = 1
#Define the list of results
results = []
while current_node is not None:
if current_node.has_value(value):
results.append(node_id)
#jump to the linked node
current_node = current_node.next
node_id = node_id + 1
return results
def add_list_item(self, item):
"add an item at the end of the list"
if isinstance(item, ListNode):
if self.head is None:
self.head = item
item.previous = None
item.next = None
self.tail = item
else:
self.tail.next = item
item.previous = self.tail
self.tail = item
def remove_list_item(self, item_id):
"remove a list item by its id"
current_id = 1
current_node = self.head
while current_node is not None:
previous_node = current_node.previous
next_node = current_node.next
if current_id == item_id:
if previous_node is not None:
previous_node.next = next_node
if next_node is not None:
next_node.previous = previous_node
else:
self.head = next_node
if next_node is not None:
next_node.previous = None
return
#Next Iteration
current_node = next_node
current_id = current_id + 1
return
| true |
b2431adcbc9c3c4b37f1302220f112928b5b5ee8
|
Python
|
snarkfog/python_hillel
|
/lesson_04/n_sqrt.py
|
UTF-8
| 815 | 4.15625 | 4 |
[] |
no_license
|
"""
1. ะะพ ะดะฐะฝะฝะพะผั ัะตะปะพะผั ัะธัะปั N ัะฐัะฟะตัะฐัะฐะนัะต ะฒัะต ะบะฒะฐะดัะฐัั ะฝะฐัััะฐะปัะฝัั
ัะธัะตะป, ะฝะต ะฟัะตะฒะพัั
ะพะดััะธะต N, ะฒ ะฟะพััะดะบะต ะฒะพะทัะฐััะฐะฝะธั.
ะะฐะฟัะธะผะตั:
50 1 4 9 16 25 36 49
10 1 4 9
9 1 4 9
4 1 4
1 1
100 1 4 9 16 25 36 49 64 81 100
99 1 4 9 16 25 36 49 64 81
"""
n = abs(int(input("ะะฒะตะดะธัะต ัะธัะปะพ: ")))
natural_number = 1 # ะฟะตัะฒะพะต ะฝะฐัััะฐะปัะฝะพะต ัะธัะปะพ
square_number = 0 # ะบะฒะฐะดัะฐั ะฝะฐัััะฐะปัะฝะพะณะพ ัะธัะปะฐ
if n == 0:
print("ะงะธัะปะพ ะดะพะปะถะฝะพ ะฑััั ะฑะพะปััะต ะฝะพะปั.")
else:
while natural_number ** 2 < n:
square_number = natural_number ** 2
print(square_number, end=" ")
natural_number += 1
| true |
ebe0b9bb5e033133f9cfa24b8643628d0ac988dc
|
Python
|
justin-tt/ctci-6e
|
/ch1/string_builder2.py
|
UTF-8
| 1,346 | 3.78125 | 4 |
[] |
no_license
|
# https://stackoverflow.com/questions/476772/python-string-join-performance
def join_strings(string_list):
string_accumulator = ""
for string in string_list:
string_accumulator += string
# using + creates new strings for string_accumulator
# every single pass, a lot of memory allocation and
# copy operations involved.
return string_accumulator
def join_strings2(string_list):
'''
Using the string builder method.
'''
string_accumulator = []
asdf = 'aasdfasdfasfd'
for string in string_list:
for c in string:
string_accumulator.append(c)
# appending to a resizable array is probably faster
# than recreating the string every time
# stackover claims that concatenation is done
# on one pass, copying the string only once
# when using the .join() method.
return "".join(string_accumulator)
assert(join_strings(["a", "b"]) == "ab")
assert(join_strings2(["a", "b"]) == "ab")
with open('words_alpha.txt') as file:
word_list = file.read().splitlines()
file.close()
import time
t1 = time.time()
join_strings(word_list)
t2 = time.time()
print(t2 - t1)
t3 = time.time()
join_strings(word_list)
t4 = time.time()
print(t4 - t3)
# in python2, the 2nd method seems to consistently outperform the first
# python -m cProfile stringbuilder2.py
| true |
5ef17163c455480dc6832a677e81435e7924f041
|
Python
|
marmichcash/Python-Script-Programming
|
/files_numbers_sum_average.py
|
UTF-8
| 1,061 | 4.46875 | 4 |
[] |
no_license
|
# This program reads all the values in a file and
# calculates the average and sum of all values read.
def main():
# Define variables for calculation
num_sum = 0.0
num_avg = 0.0
running_total = 0.0
count = 0
# Open numbers.txt in read mode
num_file = open("numbers.txt", "r")
print("Numbers in File\n---------------")
# Read all lines from numbers.txt
for line in num_file:
# Convert line to float for calculations
value = float(line)
# Format and output line to user
print(f"{value:,.2f}")
# Add value to running total
running_total += value
# Keep count of number of lines
count += 1
# Close file
num_file.close()
# Transfer running total to sum for calculations
num_sum = running_total
# Divide sum by # of lines
num_avg = num_sum / count
# Output sum and average to user
print(f"\nFile SUM: {num_sum:,.2f} " +
f"\nFile AVERAGE: {num_avg:,.2f}")
# Call main function
main()
| true |
f94dca11decde4e7487fcad9cbeff506170613af
|
Python
|
jaehyek/axidraw-xy
|
/modules/utils.py
|
UTF-8
| 4,058 | 2.625 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
from numpy import array
from numpy import row_stack
def get_bounding_box(xy):
mi = xy.min(axis=0).squeeze()
ma = xy.max(axis=0).squeeze()
xd = ma[0]-mi[0]
yd = ma[1]-mi[1]
return mi, ma, xd, yd
def print_values(mi, ma, xd, yd):
print(('x: min {:0.08f} max {:0.08f} d {:0.08f}'.format(mi[0], ma[0], xd)))
print(('y: min {:0.08f} max {:0.08f} d {:0.08f}'.format(mi[1], ma[1], yd)))
def do_scale(xy):
_,_,xd,yd = get_bounding_box(xy)
xy /= max(xd,yd)
def fit(vertices):
from modules.ddd import get_mid_2d as get_mid
vertices -= get_mid(vertices)
do_scale(vertices)
vertices[:,:] += array([[0.5]*2])
def get_paths_from_n_files(
pattern,
skip=0,
steps=1,
stride=1,
spatial_sort = True,
spatial_concat = False,
spatial_concat_eps = 1.e-9
):
from glob import glob
from modules.ioOBJ import load_2d as load
from modules.ddd import spatial_sort_2d as sort
from modules.ddd import spatial_concat_2d as concat
vertices = []
lines = []
vnum = 0
files = sorted(glob(pattern))
for fn in files[skip:steps:stride]:
print(fn)
data = load(fn)
v = data['vertices']
l = data['lines']
vn = len(v)
vertices.append(v)
lines.append(array(l, 'int')+vnum)
vnum += vn
vertices = row_stack(vertices)
fit(vertices)
print('scaled size:')
print_values(*get_bounding_box(vertices))
paths = [row_stack(vertices[li,:]) for li in lines]
paths = sort(paths) if spatial_sort else paths
paths = concat(paths, spatial_concat_eps) if spatial_concat else paths
return paths
def get_paths_from_file(
fn,
spatial_sort = True,
spatial_concat = False,
spatial_concat_eps = 1.e-9
):
from modules.ioOBJ import load_2d as load
from modules.ddd import spatial_sort_2d as sort
from modules.ddd import spatial_concat_2d as concat
data = load(fn)
vertices = data['vertices']
lines = data['lines']
fit(vertices)
print('scaled size:')
print_values(*get_bounding_box(vertices))
paths = [row_stack(vertices[l,:]) for l in lines]
paths = sort(paths) if spatial_sort else paths
paths = concat(paths, spatial_concat_eps) if spatial_concat else paths
return paths
def get_tris_from_file(
fn,
spatial_sort = True,
spatial_concat = False,
spatial_concat_eps = 1.0e-9
):
from modules.ioOBJ import load_2d as load
from modules.ddd import get_distinct_edges_from_tris
from modules.ddd import spatial_sort_2d as sort
from modules.ddd import spatial_concat_2d as concat
data = load(fn)
vertices = data['vertices']
fit(vertices)
print('scaled size:')
print_values(*get_bounding_box(vertices))
edges = get_distinct_edges_from_tris(data['faces'])
paths = [row_stack(p) for p in vertices[edges,:]]
paths = sort(paths) if spatial_sort else paths
paths = concat(paths, spatial_concat_eps) if spatial_concat else paths
return paths
# TODO: do we need this?
# def get_edges_from_file(
# fn,
# spatial_sort = True,
# spatial_concat = False,
# spatial_concat_eps = 1.0e-9
# ):
# from modules.ioOBJ import load_2d as load
# from modules.ddd import spatial_sort_2d as sort
# from modules.ddd import spatial_concat_2d as concat
#
# data = load(fn)
# vertices = data['vertices']
#
# fit(vertices)
# print('scaled size:')
# print_values(*get_bounding_box(vertices))
#
# edges = data['edges']
# paths = [row_stack(p) for p in vertices[edges,:]]
#
# paths = sort(paths) if spatial_sort else paths
# paths = concat(paths, spatial_concat_eps) if spatial_concat else paths
# return paths
# TODO: implement draw_dots.py in root. test this.
# def get_dots_from_file(
# fn,
# spatial_sort = True,
# ):
# from ioOBJ import load_2d as load
# from ddd import spatial_sort_dots_2d as sort
#
# data = load(fn)
# vertices = data['vertices']
#
# fit(vertices)
# dots = vertices
# print('scaled size:')
# print_values(*get_bounding_box(vertices))
#
# dots = sort(dots) if spatial_sort else dots
# return dots
| true |
9323c8ee27eb83836291031846da70fb313dab94
|
Python
|
SammyAgrawal/python-references
|
/india_class_stuff/web_scraping/scrapy1.py
|
UTF-8
| 337 | 2.59375 | 3 |
[] |
no_license
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
html=urlopen("http://www.niitahmedabad.com/")
gn=BeautifulSoup(html.read(),"lxml");
print(gn.h1)
#print(gn.get_text())
print(gn.title)
print(gn.head)
print(gn.title.text())
print(gn.table)
print(gn.table.get_text())
print(gn.p.text())
print(gn.body.text())
| true |
5627756297e1c417fd6daa49594b666a08b115a3
|
Python
|
imphatic/fair_credit
|
/src/api/fair_credit.py
|
UTF-8
| 7,782 | 3.15625 | 3 |
[] |
no_license
|
from src import app, db
from sqlalchemy import exc
from src.models import Transactions, CreditLines
from datetime import datetime
from time import time
from src.api.exceptions import CreditLimitExceededError
class FairCredit:
def __init__(self, apr, credit_line_id=None):
self.apr = apr
self.credit_line_id = credit_line_id
self.credit_limit = None
pass
@staticmethod
def get_credit_lines():
"""
Get all credit lines
:return: dict of credit lines
"""
credit_lines = CreditLines.query.order_by(CreditLines.name).all()
return [row.to_dict() for row in credit_lines]
@staticmethod
def new_credit_line(name, credit_limit):
"""
Create a new line of credit
:param name: used only as a label
:param credit_limit: the max amount that can be drawn
:return:
"""
credit_line = CreditLines(name, credit_limit)
db.session.add(credit_line)
try:
db.session.commit()
return {'id': credit_line.id}
except exc.SQLAlchemyError:
raise
@staticmethod
def get_transaction(transaction_id):
"""
Get an existing transaction
:param transaction_id: transaction id to retrieve
:return: dict keyed with database columns
"""
transaction = Transactions.query.get(transaction_id)
return transaction.to_dict() if transaction is not None else None
def new_transaction(self, transaction_type, amount, date_time=None):
"""
Create a new transaction.
:param transaction_type: 1 = debit, 2 = credit, 3 = interest payment
:param amount: amount of the transaction
:param date_time: date the transaction occurred
:return: empty dict or errors
"""
transaction_type = int(transaction_type)
amount = float(amount)
balance = self.get_balance()
try:
if transaction_type == 1:
balance -= amount
if abs(balance) > self.get_credit_limit():
raise CreditLimitExceededError(amount, self.get_credit_limit())
elif transaction_type == 2:
balance += amount
elif transaction_type == 3:
balance = self.balance_after_interest_payment(amount)
date_time = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S') if date_time is None else date_time
transaction = Transactions(self.credit_line_id, transaction_type, amount, balance, date_time)
db.session.add(transaction)
db.session.commit()
return {}
except exc.SQLAlchemyError:
raise
except CreditLimitExceededError:
raise
def edit_transaction(self, transaction_id, updates):
"""
Edit a transaction
:param transaction_id: id of the transaction to edit
:param updates: dictionary keyed with database columns with changes as values
:return: empty dict or errors
"""
try:
transaction = Transactions.query.get(transaction_id)
if 'type' in updates:
transaction.type = int(updates['type'])
if 'amount' in updates:
transaction.amount = float(updates['amount'])
if 'balance' in updates:
transaction.balance = float(updates['balance'])
if 'date_time' in updates:
transaction.date_time = updates['date_time']
db.session.commit()
return {}
except exc.SQLAlchemyError:
raise
@staticmethod
def delete_transaction(transaction_id):
"""
Remove a transaction
:param transaction_id: id of the transaction to delete
:return: empty dict or errors
"""
try:
transaction = Transactions.query.get(transaction_id)
db.session.delete(transaction)
db.session.commit()
return {}
except exc.SQLAlchemyError:
raise
def balance_after_interest_payment(self, amount):
"""
returns the balance after an interest payment is made
if the interest payment made is less than the interest owed then the difference is added to the balance
:param amount: amount of interest paid
:return: balance
"""
interest_owed = self.get_interest()
balance = self.get_balance()
balance += interest_owed - amount
return balance
def get_balance(self):
"""
get the current balance
:return: balance
"""
# get the most recent transaction
transaction = Transactions.query\
.filter(Transactions.credit_line_id == self.credit_line_id)\
.order_by(Transactions.date_time.desc()).first()
if transaction is None:
balance = 0
else:
balance = transaction.balance
return float(balance)
def get_interest(self):
"""
Get the current interest owed
:return: interest
"""
# Find the last interest payment (if it exists)
last_interest_payment = Transactions.query\
.filter(Transactions.credit_line_id == self.credit_line_id, Transactions.type == 3)\
.order_by(Transactions.date_time.desc()).first()
if last_interest_payment is None:
# get all transactions
transactions = Transactions.query\
.filter(Transactions.credit_line_id == self.credit_line_id)\
.order_by(Transactions.date_time).all()
else:
# get all transactions past the latest interest payment
transactions = Transactions.query\
.filter(Transactions.credit_line_id == self.credit_line_id, Transactions.date_time >= last_interest_payment.date_time)\
.order_by(Transactions.date_time).all()
interest = 0.0
apr_per_day = self.apr/365
now = datetime.today()
if len(transactions):
for i, transaction in enumerate(transactions):
next_transaction = transactions[i + 1] if i + 1 < len(transactions) else None
day1 = transaction.date_time
day2 = next_transaction.date_time if next_transaction else now
delta = day1 - day2
interest += (delta.days * apr_per_day) * transaction.balance
return float(interest)
def get_credit_limit(self):
"""
get the credit limit of the current instance
:return: the credit limit
"""
if not self.credit_limit:
credit_line = CreditLines.query.get(self.credit_line_id)
self.credit_limit = credit_line.credit_limit
return self.credit_limit
@staticmethod
def get_ledger(credit_line_id, date_start, date_end):
"""
Get transactions
:param credit_line_id: of the ledger you wish to retrieve
:param date_start: beginning date range to include in transaction list
:param date_end: ending date range to include in transactions list
:return: dict of transactions
"""
date_start += ' 00:00:00'
date_end += ' 11:59:59'
ledger = Transactions.query.filter(Transactions.credit_line_id == credit_line_id,
Transactions.date_time >= date_start,
Transactions.date_time <= date_end)\
.order_by(Transactions.date_time.desc()).all()
return [row.to_dict() for row in ledger]
| true |
0b9221e105ae8092e1b60051bff2e1288f57c793
|
Python
|
gasparRuben01/TeoriaAlgoritmos1
|
/TP1/recorridos en grafos/pqueue.py
|
UTF-8
| 655 | 3.171875 | 3 |
[] |
no_license
|
from heap import *
class NQueue(object):
"""docstring for NQueue."""
def __init__(self, key, value):
super(NQueue, self).__init__()
self.key = key
self.value = value
def __cmp__(self, other):
return self.key - other.key
class PQueue(object):
"""docstring for PQueue."""
def __init__(self):
super(PQueue, self).__init__()
self.heap = Heap()
def push(self, key, value):
self.heap.push(NQueue(key, value))
def pop(self):
return self.heap.pop().value
def top(self):
return self.heap.top().value
def empty(self):
return self.heap.empty()
| true |
98bf591eb63d559101937e008394e0d9980e3aa9
|
Python
|
pedrohcms/reconhecimento_facial
|
/cadastro.py
|
UTF-8
| 2,806 | 2.90625 | 3 |
[] |
no_license
|
#coding: utf8
import tkinter as tk
import os
import images
from db_interaction.User import User
from intelligence import train_neural_network
import shutil
def Cadastro():
new_user = False
def process_name(name):
name = name.strip()
name = name.upper()
name = name.replace(' ','_')
return name
def foto_Click():
name = campo.get()
label = process_name(name)
folder = 'users'
if not os.path.isdir(folder):
os.mkdir(folder)
folder = './users/'+name
folder = os.path.join('users', label)
if not os.path.isdir(folder):
os.mkdir(folder) #Create the directory that stores the user's images
images.take_pictures(label) #Invokes the function that take pictures and save user's imagem
user = User() # Object of class User
user.insert(name, var.get(), label)
conf = tk.Label(janela, text="USUARIO CADASTRADO COM SUCESSO", bg="green")
conf.pack(side='top', fill='x')
print(campo.get())
global new_user
new_user = True
if len(os.listdir(folder)) == 0:
print('Error registering user: '+ name+', try again')
os.rmdir(folder) #We removed the folder that was created if the user does not register correctly
else:
print('User already registered! ')
def on_close():
global new_user
if new_user == True:
if os.path.isdir('backup'):
shutil.rmtree('backup')
train_neural_network()
janela.destroy()
janela = tk.Tk()
janela.title("Sistema de Cadastro") #titulo janela
# Organizaรงรฃo da janela lxA+E+T
janela.geometry("400x500+500+100")
tk.Label(janela, text="Sistema de Cadastro").pack()
#=====================CAMPO========================
campo = tk.Entry(janela, width=60)
campo.place(x=15, y=200)
lb = tk.Label(janela, text="Para realizar Cadastro, insira seu nome, sua prioridade e tire a foto ")
lb.place(x=20, y=150)
#=====================/CAMPO=======================
#=====================BOTรO========================
bt = tk.Button(janela, width = 50, text = "Tirar a foto", command = foto_Click)
bt.place(x=20, y=300)
#=====================/BOTรO=======================
#=====================OPรรES=======================
#prioridade 1= qualquer pessoa, 2= dirigentes 3= ministro
var = tk.StringVar()
var.set("1")
pri = tk.OptionMenu(janela, var, "1","2","3")
pri.place(x=170, y=250)
#====================/OPรรES=======================
janela.protocol("WM_DELETE_WINDOW", on_close)
janela.mainloop()
| true |
f18a4142a0500083accfaf7b169837def44248ed
|
Python
|
nguyendo24/intro-to-ml-with-kubeflow-examples
|
/ch04/code/Lightweight Pipeline.py
|
UTF-8
| 2,703 | 3.4375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
# # Setup
# In[1]:
get_ipython().system('pip install kfp --upgrade --user')
import kfp
from kfp import compiler
import kfp.dsl as dsl
import kfp.notebook
import kfp.components as comp
# Simple function that just add two numbers:
# In[2]:
#Define a Python function
def add(a: float, b: float) -> float:
'''Calculates sum of two arguments'''
return a + b
# Convert the function to a pipeline operation
# In[3]:
add_op = comp.func_to_container_op(add)
# A bit more advanced function which demonstrates how to use imports, helper functions and produce multiple outputs.
# In[4]:
from typing import NamedTuple
def my_divmod(
dividend: float, divisor: float
) -> NamedTuple('MyDivmodOutput', [('quotient', float), ('remainder', float)]):
'''Divides two numbers and calculate the quotient and remainder'''
#Imports inside a component function:
import numpy as np
#This function demonstrates how to use nested functions inside a component function:
def divmod_helper(dividend, divisor):
return np.divmod(dividend, divisor)
(quotient, remainder) = divmod_helper(dividend, divisor)
from collections import namedtuple
divmod_output = namedtuple('MyDivmodOutput', ['quotient', 'remainder'])
return divmod_output(quotient, remainder)
# Test running the python function directly
# In[5]:
my_divmod(100, 7)
# Convert the function to a pipeline operation
# In[6]:
divmod_op = comp.func_to_container_op(
my_divmod, base_image='tensorflow/tensorflow:1.14.0-py3')
# Define the pipeline
# Pipeline function has to be decorated with the @dsl.pipeline decorator
# In[7]:
@dsl.pipeline(
name='Calculation pipeline',
description='A toy pipeline that performs arithmetic calculations.')
def calc_pipeline(
a='a',
b='7',
c='17',
):
#Passing pipeline parameter and a constant value as operation arguments
add_task = add_op(a, 4) # Returns a dsl.ContainerOp class instance.
#Passing a task output reference as operation arguments
#For an operation with a single return value, the output reference can be accessed using `task.output` or `task.outputs['output_name']` syntax
divmod_task = divmod_op(add_task.output, b)
#For an operation with a multiple return values, the output references can be accessed using `task.outputs['output_name']` syntax
result_task = add_op(divmod_task.outputs['quotient'], c)
# Submit the pipeline for execution
# In[8]:
client = kfp.Client()
#Specify pipeline argument values
arguments = {'a': '7', 'b': '8'}
#Submit a pipeline run
client.create_run_from_pipeline_func(calc_pipeline, arguments=arguments)
# In[ ]:
| true |
ca3f90e455be52f5510d938bda4cf55334ec0f8f
|
Python
|
cyrilq/BudgetSprint_2
|
/rating_task51/sample.py
|
UTF-8
| 255 | 2.828125 | 3 |
[] |
no_license
|
import matplotlib.pyplot as plt
import json
data = {}
with open('result.json') as data_file:
data = json.load(data_file)
print(data)
plt.bar(range(len(data)), data.values(), align='center')
plt.xticks(range(len(data)), data.keys())
plt.show()
| true |
6a9d667a041ca52e697e1976e684bacdc3c78ea9
|
Python
|
buddly27/nomenclator-nuke
|
/source/nomenclator/widget/error_widget.py
|
UTF-8
| 6,109 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
from nomenclator.vendor.Qt import QtWidgets, QtCore
class ErrorManagerWidget(QtWidgets.QFrame):
"""Widget used to display error messages."""
def __init__(self, parent=None):
"""Initiate the widget."""
super(ErrorManagerWidget, self).__init__(parent)
self._setup_ui()
self._connect_signals()
self._errors = []
def set_values(self, context):
"""Initialize values."""
errors = []
included = set()
if context.error is not None:
errors.append(context.error)
included.add(context.error["message"])
for _context in context.outputs:
if not _context.error:
continue
# Ignore duplicated error messages.
if _context.error["message"] in included:
continue
errors.append(_context.error)
included.add(_context.error["message"])
# Ignore if error didn't change
if errors == self._errors:
return
self.clear()
for error in errors:
widget = ErrorWidget(error)
self._main_layout.addWidget(widget)
self.setVisible(self._main_layout.count() > 0)
self._errors = errors
def clear(self):
"""Clear all error displayed."""
for index in reversed(range(self._main_layout.count())):
self._main_layout.itemAt(index).widget().deleteLater()
def _setup_ui(self):
"""Initialize user interface."""
self._main_layout = QtWidgets.QVBoxLayout(self)
self._main_layout.setContentsMargins(0, 0, 0, 0)
self._main_layout.setSpacing(0)
self.setVisible(False)
def _connect_signals(self):
"""Initialize signals connection."""
class ErrorWidget(QtWidgets.QFrame):
"""Widget used to display one message."""
def __init__(self, error, parent=None):
"""Initiate the widget."""
super(ErrorWidget, self).__init__(parent)
self._setup_ui(error)
self._connect_signals()
def _setup_ui(self, error):
"""Initialize user interface."""
self.setObjectName("error-widget")
self.setSizePolicy(
QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Maximum
)
)
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
self._header = HeaderMessage(error["message"], self)
main_layout.addWidget(self._header)
self._detail = DetailsMessage(error["details"], self)
main_layout.addWidget(self._detail)
def _connect_signals(self):
"""Initialize signals connection."""
self._header.request_details.connect(self._detail.display)
class HeaderMessage(QtWidgets.QFrame):
"""Widget used to display a header message."""
#: :term:`Qt Signal` emitted when detail message is requested.
request_details = QtCore.Signal(bool)
def __init__(self, message, parent=None):
"""Initiate the widget."""
super(HeaderMessage, self).__init__(parent)
self._setup_ui(message)
self._connect_signals()
def _setup_ui(self, message):
"""Initialize user interface."""
self.setObjectName("error-header-widget")
self.setSizePolicy(
QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Maximum
)
)
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(10, 10, 10, 10)
main_layout.setSpacing(5)
label = QtWidgets.QLabel("ERROR", self)
label.setStyleSheet("font:bold")
label.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
label.setTextInteractionFlags(
QtCore.Qt.LinksAccessibleByMouse | QtCore.Qt.TextSelectableByMouse
)
label.setMaximumWidth(80)
label.setMinimumWidth(80)
main_layout.addWidget(label)
message_lbl = QtWidgets.QLabel(message, self)
message_lbl.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
message_lbl.setTextInteractionFlags(
QtCore.Qt.LinksAccessibleByMouse | QtCore.Qt.TextSelectableByMouse
)
main_layout.addWidget(message_lbl)
spacer = QtWidgets.QSpacerItem(
0, 0, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum
)
self._button_more = QtWidgets.QToolButton(self)
self._button_more.setText("display more")
main_layout.addItem(spacer)
main_layout.addWidget(self._button_more)
def _connect_signals(self):
"""Initialize signals connection."""
self._button_more.clicked.connect(self._toggle_display)
def _toggle_display(self):
"""Toggle the display of the message."""
values = ["display more", "display less"]
new_value = self._button_more.text() == values[0]
self._button_more.setText(values[int(new_value)])
self.request_details.emit(new_value)
class DetailsMessage(QtWidgets.QFrame):
"""Widget used to display a details message."""
def __init__(self, message, parent=None):
"""Initiate the widget."""
super(DetailsMessage, self).__init__(parent)
self._setup_ui(message)
def _setup_ui(self, message):
"""Initialize user interface."""
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(90, 10, 10, 10)
message_lbl = QtWidgets.QLabel(message, self)
message_lbl.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
message_lbl.setTextInteractionFlags(
QtCore.Qt.LinksAccessibleByMouse | QtCore.Qt.TextSelectableByMouse
)
message_lbl.setWordWrap(True)
main_layout.addWidget(message_lbl)
self.setVisible(False)
def display(self, value):
"""indicate whether the widget should be visible"""
self.setVisible(value)
| true |
966e5b3b3a4a5a1b331f069ac30256987b580b10
|
Python
|
SuzyWu2014/coding-practice
|
/python practice/Hashtable/e_202_happy_number.py
|
UTF-8
| 923 | 4.40625 | 4 |
[] |
no_license
|
# 202. Happy Number
# Write an algorithm to determine if a number is "happy".
# A happy number is a number defined by the following process: Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers.
# Example: 19 is a happy number
# 12 + 92 = 82
# 82 + 22 = 68
# 62 + 82 = 100
# 12 + 02 + 02 = 1
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
numSet = set()
while n != 1 and n not in numSet:
numSet.add(n)
sum_n = 0
while n:
digit = n % 10
sum_n += digit * digit
n /= 10
n = sum_n
return n == 1
| true |
0f860c1da1e90b68fc687b1f46331920c2dc0660
|
Python
|
mstern98/topylogic-git
|
/pysrc/test.py
|
UTF-8
| 1,623 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
from topylogic import *
import topylogic
'''
s = stack()
s.push(1)
s.push("hello")
s.push((2,3,4,2))
print(s.get("a"))
print(s.pop())
print(s.pop())
print(s.pop())
a = AVLTree()
a.insert(1, 2)
a.insert("hi", 3)
print(a.find(3))
print(a.find(0))
a.preorder(s)
print(s.pop())
print(s.pop())
a.destroy()
s.destroy()
'''
def v_alt(i, a, b, c, d, e):
m = mod_vertex_request(a.vertices.find(1, dtype=topylogic.VERTEX_TYPE), v_fun, [2,3])
g.submit_request(topylogic.MOD_VERTEX, m)
print("v_alt ", i, b, " ", c, " g_ ", a.state_count, " ", a.max_state_changes, " ", a.max_loop)
return b, c, d, e
def v_fun(i, a, b1, b2, c, d):
m = mod_vertex_request(a.vertices.find(1, dtype=topylogic.VERTEX_TYPE), v_alt, [2,3])
g.submit_request(topylogic.MOD_VERTEX, m)
print("v_fund ", i, a, b1, b2, c, d)
b2[0] += 1
c[0] += 2
return b1, b2, c, d
def e_fun(i, a, b, c, d):
return True, (1,2)
g = graph(max_state_changes=8, max_loop=5, context=topylogic.SWITCH)
v1 = vertex(g, 0, v_fun, [1, {1:2, "s":[1, 2]}])
v2 = vertex(g, 1, v_fun, [2, {1:2, "s":[1, 2]}])
v3 = vertex(g, 2, v_alt, [1, 2])
e1 = edge(v1, v2, e_fun, (0, 0))
e2 = edge(v2, v1, e_fun, (0, 2))
e3 = edge(v3, v1, e_fun, (2, 2))
e4 = edge(v1, v3, e_fun, (10, 10))
vr1 = vertex_result("A", [0])
vr2 = vertex_result("B", [0])
vr3 = vertex_result("C", [10])
print(g.get_vertices())
'''
g.set_starting_vertices([0])
g.run([vr1])
#print("hmm")
#print(g.vertices.find(0, dtype=topylogic.VERTEX_TYPE))
#g.run([vr1, vr2, vr3])
g.destroy()
#print(g.vertices)
'''
| true |
b4f66a97e658d9f546c46d51f1df52fd8fc09c16
|
Python
|
SilvesSun/learn-algorithm-in-python
|
/ๅๆ้/345_ๅ่ฝฌๅญ็ฌฆไธฒไธญ็ๅ
้ณๅญๆฏ.py
|
UTF-8
| 532 | 3.1875 | 3 |
[] |
no_license
|
class Solution:
def reverseVowels(self, s: str) -> str:
vowels = 'ioeauIOEAU'
n = len(s)
s1 = list(s)
i, j = 0, n - 1
while i < j:
while i < n and not s1[i] in vowels:
i += 1
while j > 0 and not s1[j] in vowels:
j -= 1
if i < j:
s1[i], s1[j] = s1[j], s1[i]
i += 1
j -= 1
return ''.join(s1)
if __name__ == '__main__':
print(Solution().reverseVowels('hello'))
| true |
f62f00ffd24e1e55799d761372931264d8754d8c
|
Python
|
yyq1609/Python_road
|
/120 flask็ปไปถ/1.flask_cache/test.py
|
UTF-8
| 481 | 2.546875 | 3 |
[] |
no_license
|
import datetime
from flask import Flask
from flask.ext.cache import Cache
cache = Cache(config={'CACHE_TYPE': 'simple'})
app = Flask(__name__)
cache.init_app(app)
@app.route('/')
def hello():
return "hello, world!"
@app.route('/t')
@cache.cached(timeout=60 * 30)
def cached_page():
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return "hello, world, what's your name, thank you!!... localtime: " + time
if __name__ == '__main__':
app.run()
| true |
f1003539d5e3102b7f5b1521b0f2a72c6d7a1b58
|
Python
|
TALASOSUSAN/bankaccount
|
/__init__.py/account.py
|
UTF-8
| 5,402 | 3.171875 | 3 |
[] |
no_license
|
class BankAccount:
bank ="KCB"
def _init_(self,first_name,last_name,phone_number,bank):
self.first_name = first_name
self.last_name = last_name
self.balance = 0
self.phone_number=phone_number
self.bank=bank
self.deposit=deposit[]
self.withdraw=withdraw[]
def get_currentTime(self);
now=datetime.now()
time_formatted=now.strftime("%b %d %Y,%H %M %S")
return time_formatteds
def get_loan(self,amount):
try:
amount +1
expect TypeError:
print ("You can only enter a digit value")
return
if amount<=0:
print("A loan cannot be offered at the moment")
else:
if self.loan=amount
print("You have sucessfully recieved a loan of {}".format(amount))
def account_name(self):
name ="{} account for {} {}".format(self.bank,self.first_name,self.last_name)
return name
def deposit(self,amount):
try:
amount +1
expect TypeError:
print ("You can only enter a digit value")
return
if amount =0:
print("You cannot deposit a negative amount")
else:
self.balance +=amount
self.deposit.append(deposit)
time=date.time()
formated_time=time.strftime{"%m %drd %Y, %H;%M:%S" }
print("You have deposited {} on {} ".format(amount,self.account,formated_time))
def get_balance(self):
return "{} balance is {} ".format(self.account_name(),self.balance)
def withdraw(self,amount):
try:
amount +1
expect TypeError:
print ("You can only enter a digit value")
return
if amount=0:
print("You cannot withdraw zero amount")
elif amount >self.balance:
print("You dont have enough balance to make this request")
else:
self.balance>=amount
self.withdraw.append(amount)
formated_time=stime.strftime{"%m %drd %Y, %H;%M:%S"}
print("You have withdrawn {} from {}".format (self.account_name,self.amount,formated_time))
def deposit_statement(self):
for deposit in self.deposit:
time=diposit['time']
formated_time=time.strftime{"%m %drd %Y, %H;%M:%S"}
amount=deposit["amount"]
statement="You have deposited {} on {}. Your new balance is {}".format(self.amount,formated_time,self.balance)
print(statement)
def withdraw _ statement(self,amount):
for withdraw in self.withdrawa:
time=deposit["time"]
formated_time=time.strftime{"%m %drd %Y, %H:%M:%S"}
amount withdraw=["amount"]
statement="You have sucessfully withdrawn {} on {}".format(amount,formated_time)
print (statement)
def pay_loan(self,amount):
try:
amount +1
expect TypeError:
print ("You can only enter a digit value")
return
if amount<=0:
print(" You have insufficient balance to repay the loan")
else:
if self.loan==0:
print("You don't have a loan at the moment")
else:
if amount> self.loan:
print("Your loan is {}, an amount less or equal is required".format(self.loan))
def __init__self(self,first_name, second_name, phone_number, service_provider);
self.service_provider=service_provider
self.airtime=[]
self.bills=[]
self.money=[]
self.recieved=[]
super().__init__(first_name, second_name, phone_number)
transaction_details={"amount" amount,"date":timeDate}
self.airtime.append(transaction_details)
print("you have bought airtime worth {} on {}", format(amout,timeDate))
def paybills(self,amount):
try:
amount-1
except TypeError:
print("please enter amount in figures")
return
if amount>self.balance:
print("You have insufficient balance.Your balance is {}".format(self.balance))
else:
self.balance-=amount
time=datetime.now()
paybills={
"time":time
"paybills":amount
}
self.paybills.append(paybills)
print("you have paid bills worth {}.your balance is {}".format(amount,self.get_formatted_time(time)))
def send_money(self,amount):
try:
amount-1
except TypeError:
print("please enter amount in figures")
return
if amount>self.balance:
print("failed.insuffcient fundsin your account.your balance is {}".format(self.balance))
else:
self.balance-=amount
time=datetime.now()
money={
"time":time
"money":amount
}
self.money.append(money)
print(" confirmed you have sent {} .your balance is {}".format(amount,self.get_formatted_time(time)))
def receive_money(self):
time=datetime.now()
money_received={
"time":time
return "You have received {}.your balane is {}".format(self.get_formatted_time)
}
| true |
8da8ab560e8df2a0d1fea46cf9a857ed969effd5
|
Python
|
roy860328/fuzzy_system
|
/fuzzy_system.py
|
UTF-8
| 3,635 | 3.25 | 3 |
[] |
no_license
|
#-1 == Small, 0 == Medium, 1 == Large
class Fuzzifier(object):
def __init__(self):
self.d1scale, self.d1mu = 0, 0
self.d2scale, self.d2mu = 0, 0
self.d3scale, self.d3mu = 0, 0
def run(self, straight, right, left):
self.straightFuzzifier(straight)
self.rightFuzzifier(right)
self.leftFuzzifier(left)
steeringWheel = self.defuzzifier()
return steeringWheel
def straightFuzzifier(self, straight):
#Small
if straight <= 5:
scale = -1
mu = 1
elif 5 < straight and straight <= 15:
scale = -1
mu = (-1*straight/15 + 4/3)
#Medium
elif 15 < straight and straight <= 25:
scale = 0
mu = 0
#Large
else:#elif 25 < straight:
scale = 1
mu = 1
self.d1scale, self.d1mu = scale, mu
def rightFuzzifier(self, right):
#Small
if right <= 10:
scale = -1
mu = 1
elif 10 < right and right <= 12:
scale = -1
mu = (-2*right/5 + 4.2)
# Medium
elif 12 < right and right <= 15:
scale = 0
mu = (2*right/25 - 0.6)
elif 15 < right and right <= 20:
scale = 0
mu = (-1*right/8 + 7/2)
#Large
elif 20 < right and right <= 30:
scale = 1
mu = (1*right/16 - 1)
else:#elif 30 < right:
scale = 1
mu = 1
self.d2scale, self.d2mu = scale, mu
def leftFuzzifier(self, left):
# Small
if left <= 10:
scale = -1
mu = 1
elif 10 < left and left <= 12:
scale = -1
mu = (-2 * left / 5 + 4.2)
# Medium
elif 12 < left and left <= 20:
scale = 0
mu = (2 * left / 25 - 0.6)
elif 20 < left and left <= 35:
scale = 0
mu = (-1 * left / 8 + 7 / 2)
# Large
elif 35 < left and left <= 40:
scale = 1
mu = (1 * left / 16 - 1)
else:#elif 45 < left:
scale = 1
mu = 1
self.d3scale, self.d3mu = scale, mu
def defuzzifier(self):
steeringWheel = 0
#left large distance
if self.d3scale == 1:
steeringWheel = -1*self.d3mu*50
#right large distance
elif self.d2scale == 1:
steeringWheel = 1*self.d2mu*50
#left medium distance
elif self.d3scale == 0:
steeringWheel = -1*self.d3mu*5 - 40
#right medium distance
elif self.d2scale == 0:
steeringWheel = 1*self.d2mu*5 + 40
#straight large or medium distance and left small distance
elif (self.d1scale == 1 or self.d1scale == 0) and self.d3scale == -1:
if self.d1mu == 1:
steeringWheel = 10
else:
steeringWheel = -1*self.d1mu*20 + 40
# straight large or medium distance and right small distance
elif (self.d1scale == 1 or self.d1scale == 0) and self.d2scale == -1:
if self.d1mu == 1:
steeringWheel = -10
else:
steeringWheel = 1*self.d1mu*20 - 40
return steeringWheel
fuzzifier = Fuzzifier()
def fuzzy_System_Return_Angle(straight, right, left):
if straight > 300:
straight = 0
if right > 300:
right = 0
if left > 300:
left = 0
steeringWheel = fuzzifier.run(straight, right, left)
# print(steeringWheel)
# steeringWheel = -7
return steeringWheel
| true |
59a07aab8990bc572c5e0e75e6dd8554f4ecc6b0
|
Python
|
lectricas/ml-intro
|
/hw2.py
|
UTF-8
| 2,910 | 2.8125 | 3 |
[] |
no_license
|
from sklearn.datasets import make_blobs, make_moons, make_swiss_roll
import numpy as np
import random
import matplotlib.pyplot as plt
import matplotlib
import copy
import cv2
from collections import deque
from sklearn.neighbors import KDTree
def visualize_clasters(X, labels):
unique_labels = np.unique(labels)
unique_colors = np.random.random((len(unique_labels), 3))
colors = [unique_colors[l] for l in labels]
plt.figure(figsize=(9, 9))
plt.scatter(X[:, 0], X[:, 1], c=colors)
plt.show()
def clusters_statistics(flatten_image, cluster_colors, cluster_labels):
fig, axes = plt.subplots(3, 2, figsize=(12, 16))
for remove_color in range(3):
axes_pair = axes[remove_color]
first_color = 0 if remove_color != 0 else 2
second_color = 1 if remove_color != 1 else 2
axes_pair[0].scatter([p[first_color] for p in flatten_image], [p[second_color] for p in flatten_image],
c=flatten_image, marker='.')
axes_pair[1].scatter([p[first_color] for p in flatten_image], [p[second_color] for p in flatten_image],
c=[cluster_colors[c] for c in cluster_labels], marker='.')
for a in axes_pair:
a.set_xlim(0, 1)
a.set_ylim(0, 1)
plt.show()
class DBScan:
def __init__(self, eps=0.5, min_samples=5, leaf_size=40, metric="euclidean"):
self.leaf_size = leaf_size
self.min_samples = min_samples
self.metric = metric
self.eps = eps
def fit_predict(self, X, y=None):
tree = KDTree(X, leaf_size=self.leaf_size, metric=self.metric)
nearest = tree.query_radius(X, self.eps, return_distance=False)
labels = np.full((X.shape[0],), -1, dtype=int)
for label_index, label in enumerate(labels):
if nearest[label_index].shape[0] >= self.min_samples:
self.mark(nearest, label_index, labels, -1)
unique_labels = np.unique(labels)
new_labels = np.zeros_like(labels)
for i in range(unique_labels.size):
new_labels[labels == unique_labels[i]] = i
return new_labels
def mark(self, all_nearest, current_point_index, all_labels, current_label):
if current_label != -1:
all_labels[current_point_index] = current_label
else:
current_label = current_point_index + 1
for n in all_nearest[current_point_index]:
if all_labels[n] == -1:
self.mark(all_nearest, n, all_labels, current_label)
X_1, true_labels = make_blobs(400, 2, centers=[[0, 0], [-4, 0], [3.5, 3.5], [3.5, -2.0]])
# visualize_clasters(X_1, true_labels)
# X_1 = np.array([[1, 1], [1.5, 1.5], [8.1, 8.1], [9, 9], [1.2, 1.2], [8, 8]])
dbscan = DBScan(eps=0.2, min_samples=5)
X_2, true_labels = make_moons(400, noise=0.075)
labels_1 = dbscan.fit_predict(X_2)
visualize_clasters(X_2, labels_1)
| true |