text
stringlengths 0
1.05M
| meta
dict |
---|---|
# 1.1
p = (4, 5)
x, y = p
print(x)
print(y)
data = [ 'ACME', 50, 91.1, (2012, 12, 21) ]
name, shares, price, date = data
print(name)
print(date)
name, shares, price, (year, mon, day) = data
print(name)
print(year)
print(mon)
print(day)
s = 'Hello'
a, b, c, d, e = s
print(a)
print(b)
print(e)
data = [ 'ACME', 50, 91.1, (2012, 12, 21) ]
_, shares, price, _ = data
print(shares)
print(price)
# 1.2 * star expression
def drop_first_last(grades):
first, *middle, last = grades
return sum(middle) / len(middle)
record = ('Dave', 'dave@example.com', '773-555-1212', '847-555-1212')
name, email, *phone_numbers = record
print(email)
print(phone_numbers)
print(type(phone_numbers))
record = ('Dave', 'dave@example.com')
name, email, *phone_numbers = record
print(email)
print(phone_numbers)
print(type(phone_numbers))
# two star can not in a line
sales_record = [10, 8, 7, 1, 9, 5, 10, 3]
*trailing_qtrs, current_qtr = sales_record
trailing_avg = sum(trailing_qtrs) / len(trailing_qtrs)
print(trailing_qtrs)
print(current_qtr)
print(trailing_avg)
records = [
('foo', 1, 2),
('bar', 'hello'),
('foo', 3, 4),
]
def do_foo(x, y):
print('foo', x, y)
def do_bar(s):
print('bar', s)
for tag, *args in records:
if tag == 'foo':
do_foo(*args)
elif tag == 'bar':
do_bar(*args)
line = 'nobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false'
uname, *fields, homedir, sh = line.split(':')
print(uname)
print(fields)
print(homedir)
print(sh)
record = ('ACME', 50, 123.45, (12, 18, 2012))
name, *_, (*_, year) = record
print(name)
print(year)
items = [1, 10, 7, 4, 5, 9]
head, *tail = items
print(head)
print(tail)
def sum(items):
head, *tail = items
return head + sum(tail) if tail else head
print(sum(items))
# 1.3
from collections import deque
def search(lines, pattern, history=5):
previous_lines = deque(maxlen=history)
for line in lines:
if pattern in line:
yield line, previous_lines
previous_lines.append(line)
# Example use on a file
if __name__ == '__main__':
with open('./somefile.txt') as f:
for line, prevlines in search(f, 'python', 5):
for pline in prevlines:
print(pline, end='')
print(line, end='')
print('-' * 20)
q = deque(maxlen=3)
q.append(1)
q.append(2)
q.append(3)
print(q)
q.append(4)
print(q)
q.append(5)
print(q)
q = deque()
q.append(1)
q.append(2)
q.append(3)
print(q)
q.appendleft(4)
print(q)
q.pop()
print(q)
q.popleft()
# 1.4
import heapq
nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]
print(heapq.nlargest(3, nums)) # Prints [42, 37, 23]
print(heapq.nsmallest(3, nums)) # Prints [-4, 1, 2]
portfolio = [
{'name': 'IBM', 'shares': 100, 'price': 91.1},
{'name': 'AAPL', 'shares': 50, 'price': 543.22},
{'name': 'FB', 'shares': 200, 'price': 21.09},
{'name': 'HPQ', 'shares': 35, 'price': 31.75},
{'name': 'YHOO', 'shares': 45, 'price': 16.35},
{'name': 'ACME', 'shares': 75, 'price': 115.65}
]
cheap = heapq.nsmallest(3, portfolio, key=lambda s: s['price'])
expensive = heapq.nlargest(3, portfolio, key=lambda s: s['price'])
print(cheap)
print(expensive)
nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]
heap = list(nums)
print(heap)
heapq.heapify(heap)
print(heap)
| {
"repo_name": "cragwen/hello-world",
"path": "py/cookbook/p1.py",
"copies": "1",
"size": "3296",
"license": "unlicense",
"hash": -1141708750737350800,
"line_mean": 18.619047619,
"line_max": 69,
"alpha_frac": 0.5998179612,
"autogenerated": false,
"ratio": 2.547140649149923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36469586103499224,
"avg_score": null,
"num_lines": null
} |
#1 1pt) What is the symbol "=" used for?
# = is called assignment operator, it is used to put a value into a variable.
# For example, if you put Myname = Daniel Kwon. Myname will be the variable
# and the value will be Daniel Kwon.
#(1)
#2 3pts) Write a technical definition for 'function'
# A fuction is a named sequence of statements that performs a computation.
# So when you define a function, you determine the name and the sequence of
# statements.
#(3)
#3 1pt) What does the keyword "return" do?
# "Return" output/results to the calling function.
# "return" doesn't simply end the definiton. It instead is the point at
# which the function returns the result to the caller.
#(1)
#4 5pts) We know 5 basic data types. Write the name for each one and provide two examples of each below
# 1: "int" - integer: a whole number
# example 1 : 0
# example 2 : -34
# 2: "float" - floating point value : a number with a fractional part
# example 1 : 0.0
# example 2 : 8.0
# 3: "bool" - boolean. Telling if its true of false
# example 1 : bool(0.0) is false
# example 2 : bool("Daniel Kwon") is ture
# 4: "str" - "string" of letters
# example 1 : "Hello, Computer"
# example 2 : "I wish i get a good grade"
# 5: tuple - a sequence of python objects
# example 1 : ("Daniel Kwon", 17, "students")
# example 2 : ("Steven", 16, "lol")
#(5)
#5 2pts) What is the difference between a "function definition" and a
# "function call"?
# function definition is for telling definiton and funciton call is the #return value
#(2)
#6 3pts) What are the 3 phases that every computer program has? What happens in
# each of them
# 1: Input : raw data
# 2: Process : data processing
# 3: Output : information
#(3)
#Part 2: Programming (25 points)
#Write a program that asks the user for the areas of 3 circles.
#It should then calculate the diameter of each and the sum of the diameters
#of the 3 circles.
#Finally, it should produce output like this:
#Circle Diameter
#c1 ...
#c2 ...
#c3 ...
#TOTALS ...
import math
#1 pt for header line (1)
#3 pt for correct formula (1)
#1 pt for return value (1)
#1 pt for parameter name (1)
#1 pt for function name (1)
def circle_area(a):
b = a**2/math.pi
c = b*2
return c
#1pt for header line (0)
#1pt for parameter names (0)
#1pt for return value (0)
#1pt for correct output format (0)
#3pt for correct use of format function (0)
#1pt header line (1)
#1pt getting input (1)
#1pt converting input (1)
#1pt for calling output function (0)
#2pt for correct diameter formula (0.5)
#1pt for variable names(1)
def main():
c1 = raw_input("Area of C1: ")
c2 = raw_input("Area of C2: ")
c3 = raw_input("Area of C3: ")
c1a = circle_area(int(c1))
c2a = circle_area(int(c2))
c3a = circle_area(int(c3))
t = c1a + c2a + c3a
out = "Circle" + " " + "Diameter" + "\n" + "c1" + " " + str(c1a) + "\n" + "c2" + " " + str(c2a) + "\n" + "c3" + " " + str(c3a) + "\n" + "Totals" + " " + str(t)
print out
#1pt for calling main (1)
main()
#Hint: Radius is the square root of the area divided by pi
#1pt explanatory comments (0)
#1pt code format (0.5)
#Total=26
| {
"repo_name": "daniel2504-cmis/daniel2504-cmis-cs2",
"path": "cs2quiz1.py",
"copies": "1",
"size": "3131",
"license": "cc0-1.0",
"hash": 1421084007931847700,
"line_mean": 28.819047619,
"line_max": 173,
"alpha_frac": 0.664324497,
"autogenerated": false,
"ratio": 2.8258122743682312,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3990136771368231,
"avg_score": null,
"num_lines": null
} |
# 1.1 Tennis
# Programmed by Rachel J Morris
import pygame, sys, math
from pygame.locals import *
pygame.init()
fpsClock = pygame.time.Clock()
window = pygame.display.set_mode( ( 1024, 768 ) )
pygame.display.set_caption( "Tennis" )
bgColor = pygame.Color( 135, 215, 105 )
txtColor = pygame.Color( 0, 0, 0 )
fontObj = pygame.font.Font( "content/cnr.otf", 20 )
columnCount = 0
setCount = 0
riggsScore = 0
kingScore = 0
tennisScores = [ 0, 15, 30, 40, "game" ]
chanceOfKingWinning = 0.55
chanceOfMatch = -1
images = {
"King" : pygame.image.load( "content/king.png" ),
"Riggs" : pygame.image.load( "content/riggs.png" ),
"Reset" : pygame.image.load( "content/reset.png" )
}
people = []
buttons = []
matchText = {}
winImages = []
def Reset():
del people[:]
del buttons[:]
del winImages[:]
matchText.clear()
print( "Reset" )
global columnCount
global setCount
global riggsScore
global kingScore
global tennisScores
global chanceOfKingWinning
global chanceOfMatch
columnCount = 0
setCount = 0
riggsScore = 0
kingScore = 0
tennisScores = [ 0, 15, 30, 40, "game" ]
chanceOfKingWinning = 0.55
chanceOfMatch = -1
global people
global buttons
global matchText
people = [
{ "name" : "King", "image" : images["King"], "x" : 0, "y" : 50, "w" : 64, "h" : 96 },
{ "name" : "Riggs", "image" : images["Riggs"], "x" : 0, "y" : 130, "w" : 64, "h" : 96 },
]
buttons = [
{ "name" : "Reset", "image" : images["Reset"], "x" : 800, "y" : 50, "w" : 150, "h" : 50 },
]
matchText = {
"set" : { "label" : fontObj.render( "Set 1", False, txtColor ), "pos" : ( 0, 0 ) },
"kingscore" : { "label" : fontObj.render( "King: 0", False, txtColor), "pos" : ( 100, 0 ) },
"riggsscore" : { "label" : fontObj.render( "Riggs: 0", False, txtColor), "pos" : ( 300, 0 ) },
"chance" : { "label" : fontObj.render( "Chance: -", False, txtColor), "pos" : ( 500, 0 ) },
}
def IsClicked( mouseX, mouseY, obj ):
return ( mouseX >= obj["x"]
and mouseX <= obj["x"] + obj["w"]
and mouseY >= obj["y"]
and mouseY <= obj["y"] + obj["h"] )
def AddWinner( winner ):
global riggsScore
global kingScore
global setCount
global columnCount
x = columnCount * 70 + 100
y = setCount * 200 + 80
print( "Round", columnCount, "Set", setCount )
newImage = { "image" : images[ winner ], "pos" : ( x, y ) }
if ( winner == "Riggs" ):
if ( riggsScore == 3 and kingScore == 3 ):
kingScore -= 1
else:
riggsScore += 1
elif ( winner == "King" ):
if ( riggsScore == 3 and kingScore == 3 ):
riggsScore -= 1
else:
kingScore += 1
matchText[ "riggsscore" ]["label"] = fontObj.render( "Riggs: " + str( tennisScores[ riggsScore ] ), False, txtColor )
matchText[ "kingscore" ]["label"] = fontObj.render( "King: " + str( tennisScores[ kingScore ] ), False, txtColor )
global chanceOfMatch
global chanceOfKingWinning
if ( chanceOfMatch == -1 ):
if ( winner == "Riggs" ):
chanceOfMatch = (1 - chanceOfKingWinning)
elif ( winner == "King" ):
chanceOfMatch = chanceOfKingWinning
else:
if ( winner == "Riggs" ):
chanceOfMatch = chanceOfMatch * (1 - chanceOfKingWinning)
else:
chanceOfMatch = chanceOfMatch * chanceOfKingWinning
matchText[ "chance" ]["label"] = fontObj.render( "Chance: " + str( chanceOfMatch * 100 ) + "%", False, txtColor )
winImages.append( newImage )
def ClickPerson( mouseX, mouseY ):
for person in people:
if ( IsClicked( mouseX, mouseY, person ) ):
print( person["name"], " wins" )
AddWinner( person["name"] )
return True
for button in buttons:
if ( IsClicked( mouseX, mouseY, button ) ):
Reset()
return False
def NextSet():
print( "Next set" )
global riggsScore
global kingScore
global setCount
global columnCount
setCount += 1
kingScore = 0
riggsScore = 0
columnCount = 0
people[0]["y"] += 200
people[1]["y"] += 200
matchText["set" + str(setCount)] = { "label" : fontObj.render( "Set " + str( setCount+1 ), False, txtColor ), "pos" : ( 0, setCount * 200 ) }
matchText["riggsscore"]["pos"] = ( 100, setCount * 200 )
matchText["kingscore"]["pos"] = ( 300, setCount * 200 )
matchText[ "riggsscore" ]["label"] = fontObj.render( "Riggs: " + str( tennisScores[ riggsScore ] ), False, txtColor )
matchText[ "kingscore" ]["label"] = fontObj.render( "King: " + str( tennisScores[ kingScore ] ), False, txtColor )
chanceOfMatch = -1
Reset()
while True:
window.fill( bgColor )
for event in pygame.event.get():
if ( event.type == QUIT ):
pygame.quit()
sys.exit()
elif ( event.type == MOUSEBUTTONDOWN ):
mouseX, mouseY = event.pos
if ( ClickPerson( mouseX, mouseY ) ):
columnCount += 1
if ( tennisScores[ riggsScore ] == "game" or tennisScores[ kingScore ] == "game" ):
# Next set
NextSet()
for person in people:
window.blit( person["image"], ( person["x"], person["y"] ) )
for winner in winImages:
window.blit( winner["image"], winner["pos"] )
for key, text in matchText.items():
window.blit( text["label"], text["pos"] )
for button in buttons:
window.blit( button["image"], ( button["x"], button["y"] ) )
pygame.display.update()
fpsClock.tick( 30 )
| {
"repo_name": "Rachels-Courses/CS210-Discrete-Structures",
"path": "Resources/Program Illustrations/1.1/1.1 Tennis/main.py",
"copies": "1",
"size": "5879",
"license": "mit",
"hash": -7675673751382739000,
"line_mean": 28.103960396,
"line_max": 145,
"alpha_frac": 0.5441401599,
"autogenerated": false,
"ratio": 3.266111111111111,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9068541683514082,
"avg_score": 0.04834191749940581,
"num_lines": 202
} |
## 1.1 Types
from deap import base, creator
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
## 1.2 Initialization
import random
from deap import tools
IND_SIZE = 10
toolbox = base.Toolbox()
toolbox.register("attribute", random.random)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attribute, n=IND_SIZE)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
## 1.3 Operators
def evaluate(individual):
return sum(individual),
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.1)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evaluate)
## 1.4 Algorithms
def main():
pop = toolbox.population(n=50)
CXPB, MUTPB, NGEN = 0.5, 0.2, 40
# Evaluate the entire population
fitnesses = map(toolbox.evaluate, pop)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = map(toolbox.clone, offspring)
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring
pop[:] = offspring
return pop
if __name__ == "__main__":
main()
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "docs/code/tutorials/part_1/1_where_to_start.py",
"copies": "2",
"size": "2146",
"license": "mit",
"hash": -5856502628096785000,
"line_mean": 28.8055555556,
"line_max": 74,
"alpha_frac": 0.6547064306,
"autogenerated": false,
"ratio": 3.564784053156146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5219490483756146,
"avg_score": null,
"num_lines": null
} |
# 12/01/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o Updated for wx namespace. Not tested though.
#
# 12/17/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o Removed wx prefix from class name,
# updated reverse renamer
#
"""
sorry no documentation...
Christopher J. Fama
"""
import wx
import wx.html as html
class PyClickableHtmlWindow(html.HtmlWindow):
"""
Class for a wxHtmlWindow which responds to clicks on links by opening a
browser pointed at that link, and to shift-clicks by copying the link
to the clipboard.
"""
def __init__(self,parent,ID,**kw):
apply(html.HtmlWindow.__init__,(self,parent,ID),kw)
def OnLinkClicked(self,link):
self.link = wx.TextDataObject(link.GetHref())
if link.GetEvent().ShiftDown():
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(self.link)
wx.TheClipboard.Close()
else:
dlg = wx.MessageDialog(self,"Couldn't open clipboard!\n",wx.OK)
wx.Bell()
dlg.ShowModal()
dlg.Destroy()
else:
if 0: # Chris's original code...
if sys.platform not in ["windows",'nt'] :
#TODO: A MORE APPROPRIATE COMMAND LINE FOR Linux
#[or rather, non-Windows platforms... as of writing,
#this MEANS Linux, until wxPython for wxMac comes along...]
command = "/usr/bin/netscape"
else:
command = "start"
command = "%s \"%s\"" % (command,
self.link.GetText ())
os.system (command)
else: # My alternative
import webbrowser
webbrowser.open(link.GetHref())
| {
"repo_name": "ktan2020/legacy-automation",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/ClickableHtmlWindow.py",
"copies": "1",
"size": "1895",
"license": "mit",
"hash": 2924706960970223600,
"line_mean": 31.2456140351,
"line_max": 80,
"alpha_frac": 0.5287598945,
"autogenerated": false,
"ratio": 3.89917695473251,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9838148532305622,
"avg_score": 0.017957663385377614,
"num_lines": 57
} |
# 120206_Demonstration.py
# In class exercises for Feb 6th
# Chad Hobbs
# ------------- Slicing Strings -------------------
# name = "Chad Hobbs" # this is a string
# print(name[0]) # putting brackets and a valid number gets a piece of the string out
# print(name[1]) # the string slice starts at 0 and goes up to the total characters minus 1
# print(name[0:4]) # using a colon reports a section of the string, the ending needs to be the total characters of the string
# print(len(name)) # len reports the length of the string
# print(name[0:11]) # this is a full slice of the string
# print(name[2:11]) # the slice can start anywhere that is less than the end length
# ------------- Working with Lists ------------------
##name = input("Name (First Last): ")
##result = name.split(" ")
##print(result[1],result[0],sep=",")
##
##
##
##
##index = name.find(" ")
##first_name = name[0:index]
##last_name = name[index+1:] # or end the brackets with len(name) or leave blank
##print(last_name,first_name,sep=",")
# --------------- Capitalizing parts of strings -----------------------
name = input("Enter your name: ")
new_name = [] # empty list
for ch in name:
new_name.append(ch)
new_name[0] = chr(ord(new_name[0]) - 32) # ord grabs the ordinal number of a character, chr returns the character based on ordinal number
print("".join(new_name))
| {
"repo_name": "itsallvoodoo/csci-school",
"path": "CSCI220/Week 05/120206_Demonstration.py",
"copies": "1",
"size": "1411",
"license": "apache-2.0",
"hash": 5265542333350671000,
"line_mean": 27.3958333333,
"line_max": 137,
"alpha_frac": 0.6109142452,
"autogenerated": false,
"ratio": 3.281395348837209,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9360904835728225,
"avg_score": 0.006280951661796698,
"num_lines": 48
} |
# 120208_Demonstration.py
# In class exercises for Feb 8th
# Chad Hobbs
from graphics import *
import random, time
# -----------printing with lists---------------
##n = eval(input("n: "))
##
##win = GraphWin("Example",300,300)
##
##ball_list = []
##for i in range(n):
## x = random.randint(0,299)
## y = random.randint(0,299)
## circle = Circle(Point(x,y),10)
## circle.draw(win)
## ball_list.append(circle)
##
#### time.sleep(1)
##
##
##for j in range(1000):
##
## # for i in range(n):
## for ball in ball_list:
## dx = random.randint(-10,10)
## dy = random.randint(-10,10)
## # ball_list[i].move(dx,dy)
## ball.move(dx,dy)
## --------------commands with lists-------------------------
num_list = []
for i in range(10):
num_list.append(random.randint(0,10))
print(num_list)
print("Directly")
for num in num_list:
print(num)
print("Using index:")
for i in range(len(num_list)):
print(num_list[i])
# num_list.remove(10) removes the first number 10 from the list, if it exists
# num_list.insert(3,"Paul") will insert Paul after the first 3 is found
board = [['0','',''],['','X',''],['X','','']]
location = [[[15,75],[45,75],[75,75]],[[15,45],[45,45],[75,45]],[[15,15],[45,15],[75,15]]]
win = GraphWin('Tic-Tac-Toe',400,400)
win.setCoords(0,0,90,90)
Line(Point(30,0),Point(30,90)).draw(win)
Line(Point(60,0),Point(60,90)).draw(win)
Line(Point(0,30),Point(90,30)).draw(win)
Line(Point(0,60),Point(90,60)).draw(win)
for i in range(len(board)): # Each rox
for j in range(len(board[i])):
x = location[i][j][0]
y = location[i][j][1]
text = Text(Point(x,y),board[i][j])
text.draw(win)
win.getMouse()
win.close()
| {
"repo_name": "itsallvoodoo/csci-school",
"path": "CSCI220/Week 05/120208_Demonstration.py",
"copies": "1",
"size": "1807",
"license": "apache-2.0",
"hash": -2311275641276566500,
"line_mean": 20.8734177215,
"line_max": 90,
"alpha_frac": 0.5406751522,
"autogenerated": false,
"ratio": 2.596264367816092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36369395200160914,
"avg_score": null,
"num_lines": null
} |
# 12/02/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o Updated for 2.5 compatability.
#
"""
FancyText -- methods for rendering XML specified text
This module exports four main methods::
def GetExtent(str, dc=None, enclose=True)
def GetFullExtent(str, dc=None, enclose=True)
def RenderToBitmap(str, background=None, enclose=True)
def RenderToDC(str, dc, x, y, enclose=True)
In all cases, 'str' is an XML string. Note that start and end tags are
only required if *enclose* is set to False. In this case the text
should be wrapped in FancyText tags.
In addition, the module exports one class::
class StaticFancyText(self, window, id, text, background, ...)
This class works similar to StaticText except it interprets its text
as FancyText.
The text can support superscripts and subscripts, text in different
sizes, colors, styles, weights and families. It also supports a
limited set of symbols, currently *times*, *infinity*, *angle* as well
as greek letters in both upper case (*Alpha* *Beta*... *Omega*) and
lower case (*alpha* *beta*... *omega*).
>>> frame = wx.Frame(wx.NULL, -1, "FancyText demo", wx.DefaultPosition)
>>> sft = StaticFancyText(frame, -1, testText, wx.Brush("light grey", wx.SOLID))
>>> frame.SetClientSize(sft.GetSize())
>>> didit = frame.Show()
>>> from guitest import PauseTests; PauseTests()
"""
# Copyright 2001-2003 Timothy Hochberg
# Use as you see fit. No warantees, I cannot be held responsible, etc.
import copy
import math
import sys
import wx
import xml.parsers.expat
__all__ = "GetExtent", "GetFullExtent", "RenderToBitmap", "RenderToDC", "StaticFancyText"
if sys.platform == "win32":
_greekEncoding = str(wx.FONTENCODING_CP1253)
else:
_greekEncoding = str(wx.FONTENCODING_ISO8859_7)
_families = {"fixed" : wx.FIXED, "default" : wx.DEFAULT, "decorative" : wx.DECORATIVE, "roman" : wx.ROMAN,
"script" : wx.SCRIPT, "swiss" : wx.SWISS, "modern" : wx.MODERN}
_styles = {"normal" : wx.NORMAL, "slant" : wx.SLANT, "italic" : wx.ITALIC}
_weights = {"normal" : wx.NORMAL, "light" : wx.LIGHT, "bold" : wx.BOLD}
# The next three classes: Renderer, SizeRenderer and DCRenderer are
# what you will need to override to extend the XML language. All of
# the font stuff as well as the subscript and superscript stuff are in
# Renderer.
_greek_letters = ("alpha", "beta", "gamma", "delta", "epsilon", "zeta",
"eta", "theta", "iota", "kappa", "lambda", "mu", "nu",
"xi", "omnikron", "pi", "rho", "altsigma", "sigma", "tau", "upsilon",
"phi", "chi", "psi", "omega")
def iround(number):
return int(round(number))
def iceil(number):
return int(math.ceil(number))
class Renderer:
"""Class for rendering XML marked up text.
See the module docstring for a description of the markup.
This class must be subclassed and the methods the methods that do
the drawing overridden for a particular output device.
"""
defaultSize = None
defaultFamily = wx.DEFAULT
defaultStyle = wx.NORMAL
defaultWeight = wx.NORMAL
defaultEncoding = None
defaultColor = "black"
def __init__(self, dc=None, x=0, y=None):
if dc == None:
dc = wx.MemoryDC()
self.dc = dc
self.offsets = [0]
self.fonts = [{}]
self.width = self.height = 0
self.x = x
self.minY = self.maxY = self._y = y
if Renderer.defaultSize is None:
Renderer.defaultSize = wx.NORMAL_FONT.GetPointSize()
if Renderer.defaultEncoding is None:
Renderer.defaultEncoding = wx.Font_GetDefaultEncoding()
def getY(self):
if self._y is None:
self.minY = self.maxY = self._y = self.dc.GetTextExtent("M")[1]
return self._y
def setY(self, value):
self._y = y
y = property(getY, setY)
def startElement(self, name, attrs):
method = "start_" + name
if not hasattr(self, method):
raise ValueError("XML tag '%s' not supported" % name)
getattr(self, method)(attrs)
def endElement(self, name):
methname = "end_" + name
if hasattr(self, methname):
getattr(self, methname)()
elif hasattr(self, "start_" + name):
pass
else:
raise ValueError("XML tag '%s' not supported" % methname)
def characterData(self, data):
self.dc.SetFont(self.getCurrentFont())
for i, chunk in enumerate(data.split('\n')):
if i:
self.x = 0
self.y = self.mayY = self.maxY + self.dc.GetTextExtent("M")[1]
if chunk:
width, height, descent, extl = self.dc.GetFullTextExtent(chunk)
self.renderCharacterData(data, iround(self.x), iround(self.y + self.offsets[-1] - height + descent))
else:
width = height = descent = extl = 0
self.updateDims(width, height, descent, extl)
def updateDims(self, width, height, descent, externalLeading):
self.x += width
self.width = max(self.x, self.width)
self.minY = min(self.minY, self.y+self.offsets[-1]-height+descent)
self.maxY = max(self.maxY, self.y+self.offsets[-1]+descent)
self.height = self.maxY - self.minY
def start_FancyText(self, attrs):
pass
start_wxFancyText = start_FancyText # For backward compatibility
def start_font(self, attrs):
for key, value in attrs.items():
if key == "size":
value = int(value)
elif key == "family":
value = _families[value]
elif key == "style":
value = _styles[value]
elif key == "weight":
value = _weights[value]
elif key == "encoding":
value = int(value)
elif key == "color":
pass
else:
raise ValueError("unknown font attribute '%s'" % key)
attrs[key] = value
font = copy.copy(self.fonts[-1])
font.update(attrs)
self.fonts.append(font)
def end_font(self):
self.fonts.pop()
def start_sub(self, attrs):
if attrs.keys():
raise ValueError("<sub> does not take attributes")
font = self.getCurrentFont()
self.offsets.append(self.offsets[-1] + self.dc.GetFullTextExtent("M", font)[1]*0.5)
self.start_font({"size" : font.GetPointSize() * 0.8})
def end_sub(self):
self.fonts.pop()
self.offsets.pop()
def start_sup(self, attrs):
if attrs.keys():
raise ValueError("<sup> does not take attributes")
font = self.getCurrentFont()
self.offsets.append(self.offsets[-1] - self.dc.GetFullTextExtent("M", font)[1]*0.3)
self.start_font({"size" : font.GetPointSize() * 0.8})
def end_sup(self):
self.fonts.pop()
self.offsets.pop()
def getCurrentFont(self):
font = self.fonts[-1]
return wx.Font(font.get("size", self.defaultSize),
font.get("family", self.defaultFamily),
font.get("style", self.defaultStyle),
font.get("weight",self.defaultWeight),
False, "",
font.get("encoding", self.defaultEncoding))
def getCurrentColor(self):
font = self.fonts[-1]
return wx.TheColourDatabase.FindColour(font.get("color", self.defaultColor))
def getCurrentPen(self):
return wx.Pen(self.getCurrentColor(), 1, wx.SOLID)
def renderCharacterData(self, data, x, y):
raise NotImplementedError()
def _addGreek():
alpha = 0xE1
Alpha = 0xC1
def end(self):
pass
for i, name in enumerate(_greek_letters):
def start(self, attrs, code=chr(alpha+i)):
self.start_font({"encoding" : _greekEncoding})
self.characterData(code)
self.end_font()
setattr(Renderer, "start_%s" % name, start)
setattr(Renderer, "end_%s" % name, end)
if name == "altsigma":
continue # There is no capital for altsigma
def start(self, attrs, code=chr(Alpha+i)):
self.start_font({"encoding" : _greekEncoding})
self.characterData(code)
self.end_font()
setattr(Renderer, "start_%s" % name.capitalize(), start)
setattr(Renderer, "end_%s" % name.capitalize(), end)
_addGreek()
class SizeRenderer(Renderer):
"""Processes text as if rendering it, but just computes the size."""
def __init__(self, dc=None):
Renderer.__init__(self, dc, 0, 0)
def renderCharacterData(self, data, x, y):
pass
def start_angle(self, attrs):
self.characterData("M")
def start_infinity(self, attrs):
width, height = self.dc.GetTextExtent("M")
width = max(width, 10)
height = max(height, width / 2)
self.updateDims(width, height, 0, 0)
def start_times(self, attrs):
self.characterData("M")
def start_in(self, attrs):
self.characterData("M")
def start_times(self, attrs):
self.characterData("M")
class DCRenderer(Renderer):
"""Renders text to a wxPython device context DC."""
def renderCharacterData(self, data, x, y):
self.dc.SetTextForeground(self.getCurrentColor())
self.dc.DrawText(data, x, y)
def start_angle(self, attrs):
self.dc.SetFont(self.getCurrentFont())
self.dc.SetPen(self.getCurrentPen())
width, height, descent, leading = self.dc.GetFullTextExtent("M")
y = self.y + self.offsets[-1]
self.dc.DrawLine(iround(self.x), iround(y), iround( self.x+width), iround(y))
self.dc.DrawLine(iround(self.x), iround(y), iround(self.x+width), iround(y-width))
self.updateDims(width, height, descent, leading)
def start_infinity(self, attrs):
self.dc.SetFont(self.getCurrentFont())
self.dc.SetPen(self.getCurrentPen())
width, height, descent, leading = self.dc.GetFullTextExtent("M")
width = max(width, 10)
height = max(height, width / 2)
self.dc.SetPen(wx.Pen(self.getCurrentColor(), max(1, width/10)))
self.dc.SetBrush(wx.TRANSPARENT_BRUSH)
y = self.y + self.offsets[-1]
r = iround( 0.95 * width / 4)
xc = (2*self.x + width) / 2
yc = iround(y-1.5*r)
self.dc.DrawCircle(xc - r, yc, r)
self.dc.DrawCircle(xc + r, yc, r)
self.updateDims(width, height, 0, 0)
def start_times(self, attrs):
self.dc.SetFont(self.getCurrentFont())
self.dc.SetPen(self.getCurrentPen())
width, height, descent, leading = self.dc.GetFullTextExtent("M")
y = self.y + self.offsets[-1]
width *= 0.8
width = iround(width+.5)
self.dc.SetPen(wx.Pen(self.getCurrentColor(), 1))
self.dc.DrawLine(iround(self.x), iround(y-width), iround(self.x+width-1), iround(y-1))
self.dc.DrawLine(iround(self.x), iround(y-2), iround(self.x+width-1), iround(y-width-1))
self.updateDims(width, height, 0, 0)
def RenderToRenderer(str, renderer, enclose=True):
try:
if enclose:
str = '<?xml version="1.0"?><FancyText>%s</FancyText>' % str
p = xml.parsers.expat.ParserCreate()
p.returns_unicode = 0
p.StartElementHandler = renderer.startElement
p.EndElementHandler = renderer.endElement
p.CharacterDataHandler = renderer.characterData
p.Parse(str, 1)
except xml.parsers.expat.error, err:
raise ValueError('error parsing text text "%s": %s' % (str, err))
# Public interface
def GetExtent(str, dc=None, enclose=True):
"Return the extent of str"
renderer = SizeRenderer(dc)
RenderToRenderer(str, renderer, enclose)
return iceil(renderer.width), iceil(renderer.height) # XXX round up
def GetFullExtent(str, dc=None, enclose=True):
renderer = SizeRenderer(dc)
RenderToRenderer(str, renderer, enclose)
return iceil(renderer.width), iceil(renderer.height), -iceil(renderer.minY) # XXX round up
def RenderToBitmap(str, background=None, enclose=1):
"Return str rendered on a minumum size bitmap"
dc = wx.MemoryDC()
# Chicken and egg problem, we need a bitmap in the DC in order to
# measure how big the bitmap should be...
dc.SelectObject(wx.EmptyBitmap(1,1))
width, height, dy = GetFullExtent(str, dc, enclose)
bmp = wx.EmptyBitmap(width, height)
dc.SelectObject(bmp)
if background is None:
dc.SetBackground(wx.WHITE_BRUSH)
else:
dc.SetBackground(background)
dc.Clear()
renderer = DCRenderer(dc, y=dy)
dc.BeginDrawing()
RenderToRenderer(str, renderer, enclose)
dc.EndDrawing()
dc.SelectObject(wx.NullBitmap)
if background is None:
img = wx.ImageFromBitmap(bmp)
bg = dc.GetBackground().GetColour()
img.SetMaskColour(bg.Red(), bg.Green(), bg.Blue())
bmp = img.ConvertToBitmap()
return bmp
def RenderToDC(str, dc, x, y, enclose=1):
"Render str onto a wxDC at (x,y)"
width, height, dy = GetFullExtent(str, dc)
renderer = DCRenderer(dc, x, y+dy)
RenderToRenderer(str, renderer, enclose)
class StaticFancyText(wx.StaticBitmap):
def __init__(self, window, id, text, *args, **kargs):
args = list(args)
kargs.setdefault('name', 'staticFancyText')
if 'background' in kargs:
background = kargs.pop('background')
elif args:
background = args.pop(0)
else:
background = wx.Brush(window.GetBackgroundColour(), wx.SOLID)
bmp = RenderToBitmap(text, background)
wx.StaticBitmap.__init__(self, window, id, bmp, *args, **kargs)
# Old names for backward compatibiliry
getExtent = GetExtent
renderToBitmap = RenderToBitmap
renderToDC = RenderToDC
# Test Driver
def test():
testText = \
"""<font weight="bold" size="16">FancyText</font> -- <font style="italic" size="16">methods for rendering XML specified text</font>
<font family="swiss" size="12">
This module exports four main methods::
<font family="fixed" style="slant">
def GetExtent(str, dc=None, enclose=True)
def GetFullExtent(str, dc=None, enclose=True)
def RenderToBitmap(str, background=None, enclose=True)
def RenderToDC(str, dc, x, y, enclose=True)
</font>
In all cases, 'str' is an XML string. Note that start and end tags
are only required if *enclose* is set to False. In this case the
text should be wrapped in FancyText tags.
In addition, the module exports one class::
<font family="fixed" style="slant">
class StaticFancyText(self, window, id, text, background, ...)
</font>
This class works similar to StaticText except it interprets its text
as FancyText.
The text can support<sup>superscripts</sup> and <sub>subscripts</sub>, text
in different <font size="20">sizes</font>, <font color="blue">colors</font>, <font style="italic">styles</font>, <font weight="bold">weights</font> and
<font family="script">families</font>. It also supports a limited set of symbols,
currently <times/>, <infinity/>, <angle/> as well as greek letters in both
upper case (<Alpha/><Beta/>...<Omega/>) and lower case (<alpha/><beta/>...<omega/>).
We can use doctest/guitest to display this string in all its marked up glory.
<font family="fixed">
>>> frame = wx.Frame(wx.NULL, -1, "FancyText demo", wx.DefaultPosition)
>>> sft = StaticFancyText(frame, -1, __doc__, wx.Brush("light grey", wx.SOLID))
>>> frame.SetClientSize(sft.GetSize())
>>> didit = frame.Show()
>>> from guitest import PauseTests; PauseTests()
</font></font>
The End"""
app = wx.PySimpleApp()
box = wx.BoxSizer(wx.VERTICAL)
frame = wx.Frame(None, -1, "FancyText demo", wx.DefaultPosition)
frame.SetBackgroundColour("light grey")
sft = StaticFancyText(frame, -1, testText)
box.Add(sft, 1, wx.EXPAND)
frame.SetSizer(box)
frame.SetAutoLayout(True)
box.Fit(frame)
box.SetSizeHints(frame)
frame.Show()
app.MainLoop()
if __name__ == "__main__":
test()
| {
"repo_name": "ktan2020/legacy-automation",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/fancytext.py",
"copies": "1",
"size": "16689",
"license": "mit",
"hash": 1510189997676168700,
"line_mean": 34.1233766234,
"line_max": 151,
"alpha_frac": 0.6023728204,
"autogenerated": false,
"ratio": 3.5186590765338392,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9582122336240748,
"avg_score": 0.007781912138618327,
"num_lines": 462
} |
# 120222_Demonstration.py
# In class exercise creating a tic tac toe board
# <Chad Hobbs>
#Global Commands
from graphics import *
#Functions
def create_board(): # Opens a new window and draws our board
bboard = [['','',''],['','',''],['','','']]
wwin = GraphWin("Tic-Tac-Toe",600,600)
wwin.setCoords(0,0,30,30)
Line(Point(10,0),Point(10,30)).draw(wwin)
Line(Point(20,0),Point(20,30)).draw(wwin)
Line(Point(0,10),Point(30,10)).draw(wwin)
Line(Point(0,20),Point(30,20)).draw(wwin)
return wwin,bboard
def check_winner(board):
#if board[0][0] == board [0][1] and board [0]][1] == board[0][2]:
row1 = "".join(board[0]) #puts the top line together
if row1 == 'XXX': # checks for a X winner on top line
return 'X'
if row2 == 'OOO': # checks for a Y winner on top line
return 'O'
return None # returns a null argument in the event of a tie
def main(): # Main program
win,board = create_board()
win.getMouse()
win.close()
main() # Opens the actual program
| {
"repo_name": "itsallvoodoo/csci-school",
"path": "CSCI220/Week 07 - FEB27-02/120222_Demonstration.py",
"copies": "1",
"size": "1091",
"license": "apache-2.0",
"hash": 8189549489425517000,
"line_mean": 25.275,
"line_max": 69,
"alpha_frac": 0.583868011,
"autogenerated": false,
"ratio": 2.9486486486486485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8796713330403001,
"avg_score": 0.047160665849129355,
"num_lines": 40
} |
# 120229_Demonstration.py
# In class demonstration of Tic Tac Toe
# <Chad Hobbs>
from graphics import *
def create_board():
board = [['','',''],['','',''],['','','']]
wwin = GraphWin("Tic Tac Toe",300,300)
wwin.setCoords(30,30,0,0)
Line(Point(10,0),Point(10,30)).draw(wwin)
Line(Point(20,0),Point(20,30)).draw(wwin)
Line(Point(0,10),Point(30,10)).draw(wwin)
Line(Point(0,20),Point(30,20)).draw(wwin)
return wwin,board
def get_column(board,i):
return board[0][i] + board[1][i] + board[2][i]
def check_winner(board):
row1 = "".join(board[0])
if row1 == 'XXX':
return 'X'
if row1 == 'OOO':
return 'O'
row2 = "".join(board[1])
if row2 == 'XXX':
return 'X'
if row2 == 'OOO':
return 'O'
row3 = "".join(board[2])
if row3 == 'XXX':
return 'X'
if row3 == 'OOO':
return 'O'
col = get_column(board,0)
if col == 'XXX':
return 'X'
if col == 'OOO':
return 'O'
col = get_column(board,1)
if col == 'XXX':
return 'X'
if col == 'OOO':
return 'O'
col = get_column(board,2)
if col == 'XXX':
return 'X'
if col == 'OOO':
return 'O'
diag = board[0][0] + board[1][1] + board[2][2]
if diag == 'XXX':
return 'X'
if diag == 'OOO':
return 'O'
diag = board[2][0] + board[1][1] + board[0][2]
if diag == 'XXX':
return 'X'
if diag == 'OOO':
return 'O'
return None
def take_turn(win,board,who): #Get's a move, draws it to the board, and records it
p = win.getMouse()
col = int(p.getX() // 10)
row = int(p.getY() // 10)
Text(Point(col*10 + 5, row*10 + 5),who).draw(win)
board[row][col] = who
return board
def main():
win,board = create_board()
for turn in range(9):
if turn % 2 == 0: # Even -> X
who = 'X'
else:
who = 'O'
if check_winner(board) != None:
print(check_winner(board))
win.getMouse()
win.close()
main()
| {
"repo_name": "itsallvoodoo/csci-school",
"path": "CSCI220/Week 08 - MAR05-09/120229_Demonstration.py",
"copies": "1",
"size": "2177",
"license": "apache-2.0",
"hash": 8388968225780661000,
"line_mean": 20.6770833333,
"line_max": 82,
"alpha_frac": 0.4735875057,
"autogenerated": false,
"ratio": 2.965940054495913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8844833589573874,
"avg_score": 0.018938794124407746,
"num_lines": 96
} |
# 12/03/2017
from string import ascii_lowercase, ascii_uppercase
def make_table(mirrors):
table = [[]]*15
table[0] = ' ' + ascii_lowercase[0:13] + ' '
table[14] = ' ' + ascii_uppercase[13:] + ' '
for i in range(1, 14):
table[i] = ascii_uppercase[i-1] + mirrors[i-1] + ascii_lowercase[12+i]
return table
def get_letter(c, table):
#get starting position / velocities
if c in ascii_uppercase[0:13] or c in ascii_lowercase[13:]:
x = 1 if c in ascii_uppercase else 13
y = 1 + (ascii_uppercase.index(c) if c in ascii_uppercase else ascii_lowercase[13:].index(c))
dx = 1 if c in ascii_uppercase else -1
dy = 0
else:
x = 1 + (ascii_uppercase[13:].index(c) if c in ascii_uppercase else ascii_lowercase.index(c))
y = 1 if c in ascii_lowercase else 13
dx = 0
dy = 1 if c in ascii_lowercase else -1
#go through the grid
while(x in range(1, 14) and y in range(1, 14)):
if table[y][x] == '/':
tmp = 1 if dy == -1 else -1 if dy == 1 else 0
dy = 1 if dx == -1 else -1 if dx == 1 else 0
dx = tmp
elif table[y][x] == '\\':
tmp = 1 if dy == 1 else -1 if dy == -1 else 0
dy = 1 if dx == 1 else -1 if dx == -1 else 0
dx = tmp
x += dx
y += dy
return table[y][x]
def decode(key, target):
tb = make_table(key)
return ''.join([get_letter(c, tb) for c in target])
empty_mirrors = [' '*13]*13
challenge_mirrors = [' \\\\ /\ ',
' \\',
' / ',
' \\ \\',
' \\ ',
' / / ',
'\\ / \\ ',
' \ ',
'\\/ ',
'/ ',
' \\ ',
' \\/ ',
' / / ']
| {
"repo_name": "tlseabra/dailyprogrammer",
"path": "Python/intermediate/i269.py",
"copies": "2",
"size": "2005",
"license": "mit",
"hash": 3245000988777836500,
"line_mean": 34.1754385965,
"line_max": 101,
"alpha_frac": 0.4119700748,
"autogenerated": false,
"ratio": 3.57397504456328,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9960497798592511,
"avg_score": 0.005089464154153742,
"num_lines": 57
} |
# 120328_Demonstration.py
# In class demonstrations
# <Chad Hobbs>
import sys
# ---------------------------Sorts --------------------------
# ----- Bubble Sort ------
def bubbleSort(array):
times = 0
swapHappend = True
while swapHappend:
for i in range(len(array)-1,0,-1):
swapHappend = False
for j in range(0,i):
times = times + 1
if array[j] > array[j+1]:
array[j],array[j+1] = array[j+1],array[j]
swapHappend = True
display(times,array)
return
# ------------------------- Insertion Sort ------------------------
def InsertionSort(array):
i = 0
j = 0
n = len(array)
times = 0
for j in range(n):
key = array[j]
i = j - 1
while (i >= 0 and array[i] > key):
array[i + 1] = array[i]
i = i -1
times = times + 1
array[i + 1] = key
display(times,array)
return
##def PrintArray(array):
## for x in range(len(array)):
## print(str(array[x]) + " ",end="")
## print()
##
##def TestIntegerArray():
## iarr = [10,3,8,1,99,5,-1]
## PrintArray(iarr)
## InsertionSort(iarr)
## PrintArray(iarr)
##
##def TestStringArray():
## sarr = ["Delhi","Sydney","California","Singapore"]
## PrintArray(sarr)
## InsertionSort(sarr)
## PrintArray(sarr)
##
##if __name__ == "__main__":
## TestIntegerArray()
## TestStringArray()
# ------------------------- Selection Sort ------------------------
##def SelectionSort(array):
##
##
##
## return times, array
# -------------------------- Main Program -------------------------
def display(iterations,array):
print("After sort:-")
print(array)
print("It takes",iterations,"iterations to accomplish the task")
return
def main():
array = [1, 7, 4, 9, 4, 7, 2, 3, 0, 8]
print("Before sort:-")
print(array)
bubbleSort(array)
array = [1, 7, 4, 9, 4, 7, 2, 3, 0, 8]
print("Before sort:-")
print(array)
InsertionSort(array)
main()
| {
"repo_name": "itsallvoodoo/csci-school",
"path": "CSCI220/Week 11 - MAR26-30/120328_Demonstration.py",
"copies": "1",
"size": "2284",
"license": "apache-2.0",
"hash": 894293942618564700,
"line_mean": 15.303030303,
"line_max": 68,
"alpha_frac": 0.4343257443,
"autogenerated": false,
"ratio": 3.3294460641399417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9161554012485282,
"avg_score": 0.020443559190931826,
"num_lines": 132
} |
# 12/07/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 Compatability changes
#
import wx
from wx.lib.evtmgr import eventManager
class FoldOutWindow(wx.PopupWindow):
def __init__(self,parent,style=0):
wx.PopupWindow.__init__(self,parent,style)
self.SetAutoLayout(True)
self.sizer=wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(self.sizer, deleteOld=False)
self.handlers={}
self.InitColors()
self.inWindow=False
self.Bind(wx.EVT_ENTER_WINDOW, self.evEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.evLeave)
def InitColors(self):
faceClr = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)
self.SetBackgroundColour(faceClr)
def AddButton(self,bitmap,handler=None):
id=wx.NewId()
btn=wx.BitmapButton(self,id,bitmap)
self.sizer.Add(btn, 1, wx.ALIGN_CENTER|wx.ALL|wx.EXPAND, 2)
self.Bind(wx.EVT_BUTTON, self.OnBtnClick, btn)
self.sizer.Fit(self)
self.Layout()
if handler:
self.handlers[id]=handler
return id
def Popup(self):
if not self.IsShown():
self.Show()
def OnBtnClick(self,event):
id=event.GetEventObject().GetId()
if self.handlers.has_key(id):
self.handlers[id](event)
self.Hide()
self.inWindow=False
event.Skip()
def evEnter(self,event):
self.inWindow=True
self.rect=self.GetRect()
event.Skip()
def evLeave(self,event):
if self.inWindow:
if not self.rect.Inside(self.ClientToScreen(event.GetPosition())):
self.Hide()
event.Skip()
class FoldOutMenu(wx.BitmapButton):
def __init__(self,parent,id,bitmap,pos = wx.DefaultPosition,
size = wx.DefaultSize, style = wx.BU_AUTODRAW,
validator = wx.DefaultValidator, name = "button"):
wx.BitmapButton.__init__(self, parent, id, bitmap, pos, size, style,
validator, name)
self.parent=parent
self.parent.Bind(wx.EVT_BUTTON, self.click, self)
self.popwin=FoldOutWindow(self.parent)
def AddButton(self,bitmap,handler=None):
return self.popwin.AddButton(bitmap,handler=handler)
def click(self,event):
pos=self.GetPosition()
sz=self.GetSize()
pos.x=pos.x+sz.width
pos.y=pos.y+sz.height/2
self.popwin.Position(pos,sz)
self.popwin.Popup()
| {
"repo_name": "ktan2020/legacy-automation",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/foldmenu.py",
"copies": "1",
"size": "2615",
"license": "mit",
"hash": -611627684909905700,
"line_mean": 27.3820224719,
"line_max": 78,
"alpha_frac": 0.5797323136,
"autogenerated": false,
"ratio": 3.5724043715846996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46521366851846996,
"avg_score": null,
"num_lines": null
} |
# 12/09/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 compatability update.
# o I'm a little nervous about some of it though.
#
# 12/20/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxTreeModel -> TreeModel
# o wxMVCTree -> MVCTree
# o wxMVCTreeEvent -> MVCTreeEvent
# o wxMVCTreeNotifyEvent -> MVCTreeNotifyEvent
#
"""
MVCTree is a control which handles hierarchical data. It is constructed
in model-view-controller architecture, so the display of that data, and
the content of the data can be changed greatly without affecting the other parts.
MVCTree actually is even more configurable than MVC normally implies, because
almost every aspect of it is pluggable:
* MVCTree - Overall controller, and the window that actually gets placed in the GUI.
* Painter - Paints the control. The 'view' part of MVC.
* NodePainter - Paints just the nodes
* LinePainter - Paints just the lines between the nodes
* TextConverter - Figures out what text to print for each node
* Editor - Edits the contents of a node, if the model is editable.
* LayoutEngine - Determines initial placement of nodes
* Transform - Adjusts positions of nodes for movement or special effects.
* TreeModel - Contains the data which the rest of the control acts on. The 'model' part of MVC.
Author/Maintainer - Bryn Keller <xoltar@starship.python.net>
.. note::
This module is *not* supported in any way. Use it however you
wish, but be warned that dealing with any consequences is
entirly up to you.
--Robin
"""
#------------------------------------------------------------------------
import os
import sys
import traceback
import warnings
import wx
#------------------------------------------------------------------------
warningmsg = r"""\
################################################\
# This module is not supported in any way! |
# |
# See cource code for wx.lib.mvctree for more |
# information. |
################################################/
"""
warnings.warn(warningmsg, DeprecationWarning, stacklevel=2)
#------------------------------------------------------------------------
class MVCTreeNode:
"""
Used internally by MVCTree to manage its data. Contains information about
screen placement, the actual data associated with it, and more. These are
the nodes passed to all the other helper parts to do their work with.
"""
def __init__(self, data=None, parent = None, kids = None, x = 0, y = 0):
self.x = 0
self.y = 0
self.projx = 0
self.projy = 0
self.parent = parent
self.kids = kids
if self.kids is None:
self.kids = []
self.data = data
self.expanded = False
self.selected = False
self.built = False
self.scale = 0
def GetChildren(self):
return self.kids
def GetParent(self):
return self.parent
def Remove(self, node):
try:
self.kids.remove(node)
except:
pass
def Add(self, node):
self.kids.append(node)
node.SetParent(self)
def SetParent(self, parent):
if self.parent and not (self.parent is parent):
self.parent.Remove(self)
self.parent = parent
def __str__(self):
return "Node: " + str(self.data) + " (" + str(self.x) + ", " + str(self.y) + ")"
def __repr__(self):
return str(self.data)
def GetTreeString(self, tabs=0):
s = tabs * '\t' + str(self) + '\n'
for kid in self.kids:
s = s + kid.GetTreeString(tabs + 1)
return s
class Editor:
def __init__(self, tree):
self.tree = tree
def Edit(self, node):
raise NotImplementedError
def EndEdit(self, node, commit):
raise NotImplementedError
def CanEdit(self, node):
raise NotImplementedError
class LayoutEngine:
"""
Interface for layout engines.
"""
def __init__(self, tree):
self.tree = tree
def Layout(self, node):
raise NotImplementedError
def GetNodeList(self):
raise NotImplementedError
class Transform:
"""
Transform interface.
"""
def __init__(self, tree):
self.tree = tree
def Transform(self, node, offset, rotation):
"""
This method should only change the projx and projy attributes of
the node. These represent the position of the node as it should
be drawn on screen. Adjusting the x and y attributes can and
should cause havoc.
"""
raise NotImplementedError
def GetSize(self):
"""
Returns the size of the entire tree as laid out and transformed
as a tuple
"""
raise NotImplementedError
class Painter:
"""
This is the interface that MVCTree expects from painters. All painters should
be Painter subclasses.
"""
def __init__(self, tree):
self.tree = tree
self.textcolor = wx.NamedColour("BLACK")
self.bgcolor = wx.NamedColour("WHITE")
self.fgcolor = wx.NamedColour("BLUE")
self.linecolor = wx.NamedColour("GREY")
self.font = wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False)
self.bmp = None
def GetFont(self):
return self.font
def SetFont(self, font):
self.font = font
self.tree.Refresh()
def GetBuffer(self):
return self.bmp
def ClearBuffer(self):
self.bmp = None
def Paint(self, dc, node, doubleBuffered=1, paintBackground=1):
raise NotImplementedError
def GetTextColour(self):
return self.textcolor
def SetTextColour(self, color):
self.textcolor = color
self.textbrush = wx.Brush(color)
self.textpen = wx.Pen(color, 1, wx.SOLID)
def GetBackgroundColour(self):
return self.bgcolor
def SetBackgroundColour(self, color):
self.bgcolor = color
self.bgbrush = wx.Brush(color)
self.bgpen = wx.Pen(color, 1, wx.SOLID)
def GetForegroundColour(self):
return self.fgcolor
def SetForegroundColour(self, color):
self.fgcolor = color
self.fgbrush = wx.Brush(color)
self.fgpen = wx.Pen(color, 1, wx.SOLID)
def GetLineColour(self):
return self.linecolor
def SetLineColour(self, color):
self.linecolor = color
self.linebrush = wx.Brush(color)
self.linepen = wx.Pen( color, 1, wx.SOLID)
def GetForegroundPen(self):
return self.fgpen
def GetBackgroundPen(self):
return self.bgpen
def GetTextPen(self):
return self.textpen
def GetForegroundBrush(self):
return self.fgbrush
def GetBackgroundBrush(self):
return self.bgbrush
def GetTextBrush(self):
return self.textbrush
def GetLinePen(self):
return self.linepen
def GetLineBrush(self):
return self.linebrush
def OnMouse(self, evt):
if evt.LeftDClick():
x, y = self.tree.CalcUnscrolledPosition(evt.GetX(), evt.GetY())
for item in self.rectangles:
if item[1].Contains((x,y)):
self.tree.Edit(item[0].data)
self.tree.OnNodeClick(item[0], evt)
return
elif evt.ButtonDown():
x, y = self.tree.CalcUnscrolledPosition(evt.GetX(), evt.GetY())
for item in self.rectangles:
if item[1].Contains((x, y)):
self.tree.OnNodeClick(item[0], evt)
return
for item in self.knobs:
if item[1].Contains((x, y)):
self.tree.OnKnobClick(item[0])
return
evt.Skip()
class TreeModel:
"""
Interface for tree models
"""
def GetRoot(self):
raise NotImplementedError
def SetRoot(self, root):
raise NotImplementedError
def GetChildCount(self, node):
raise NotImplementedError
def GetChildAt(self, node, index):
raise NotImplementedError
def GetParent(self, node):
raise NotImplementedError
def AddChild(self, parent, child):
if hasattr(self, 'tree') and self.tree:
self.tree.NodeAdded(parent, child)
def RemoveNode(self, child):
if hasattr(self, 'tree') and self.tree:
self.tree.NodeRemoved(child)
def InsertChild(self, parent, child, index):
if hasattr(self, 'tree') and self.tree:
self.tree.NodeInserted(parent, child, index)
def IsLeaf(self, node):
raise NotImplementedError
def IsEditable(self, node):
return False
def SetEditable(self, node):
return False
class NodePainter:
"""
This is the interface expected of a nodepainter.
"""
def __init__(self, painter):
self.painter = painter
def Paint(self, node, dc, location = None):
"""
location should be provided only to draw in an unusual position
(not the node's normal position), otherwise the node's projected x and y
coordinates will be used.
"""
raise NotImplementedError
class LinePainter:
"""
The linepainter interface.
"""
def __init__(self, painter):
self.painter = painter
def Paint(self, parent, child, dc):
raise NotImplementedError
class TextConverter:
"""
TextConverter interface.
"""
def __init__(self, painter):
self.painter = painter
def Convert(node):
"""
Should return a string. The node argument will be an
MVCTreeNode.
"""
raise NotImplementedError
class BasicTreeModel(TreeModel):
"""
A very simple treemodel implementation, but flexible enough for many needs.
"""
def __init__(self):
self.children = {}
self.parents = {}
self.root = None
def GetRoot(self):
return self.root
def SetRoot(self, root):
self.root = root
def GetChildCount(self, node):
if self.children.has_key(node):
return len(self.children[node])
else:
return 0
def GetChildAt(self, node, index):
return self.children[node][index]
def GetParent(self, node):
return self.parents[node]
def AddChild(self, parent, child):
self.parents[child]=parent
if not self.children.has_key(parent):
self.children[parent]=[]
self.children[parent].append(child)
TreeModel.AddChild(self, parent, child)
return child
def RemoveNode(self, node):
parent = self.parents[node]
del self.parents[node]
self.children[parent].remove(node)
TreeModel.RemoveNode(self, node)
def InsertChild(self, parent, child, index):
self.parents[child]=parent
if not self.children.has_key(parent):
self.children[parent]=[]
self.children[parent].insert(child, index)
TreeModel.InsertChild(self, parent, child, index)
return child
def IsLeaf(self, node):
return not self.children.has_key(node)
def IsEditable(self, node):
return False
def SetEditable(self, node, bool):
return False
class FileEditor(Editor):
def Edit(self, node):
treenode = self.tree.nodemap[node]
self.editcomp = wxTextCtrl(self.tree, -1)
for rect in self.tree.painter.rectangles:
if rect[0] == treenode:
self.editcomp.SetPosition((rect[1][0], rect[1][1]))
break
self.editcomp.SetValue(node.fileName)
self.editcomp.SetSelection(0, len(node.fileName))
self.editcomp.SetFocus()
self.treenode = treenode
# self.editcomp.Bind(wx.EVT_KEY_DOWN, self._key)
self.editcomp.Bind(wx.EVT_KEY_UP, self._key)
self.editcomp.Bind(wx.EVT_LEFT_DOWN, self._mdown)
self.editcomp.CaptureMouse()
def CanEdit(self, node):
return isinstance(node, FileWrapper)
def EndEdit(self, commit):
if not self.tree._EditEnding(self.treenode.data):
return
if commit:
node = self.treenode.data
try:
os.rename(node.path + os.sep + node.fileName, node.path + os.sep + self.editcomp.GetValue())
node.fileName = self.editcomp.GetValue()
except:
traceback.print_exc()
self.editcomp.ReleaseMouse()
self.editcomp.Destroy()
del self.editcomp
self.tree.Refresh()
def _key(self, evt):
if evt.GetKeyCode() == wx.WXK_RETURN:
self.EndEdit(True)
elif evt.GetKeyCode() == wx.WXK_ESCAPE:
self.EndEdit(False)
else:
evt.Skip()
def _mdown(self, evt):
if evt.IsButton():
x, y = evt.GetPosition()
w, h = self.editcomp.GetSize()
if x < 0 or y < 0 or x > w or y > h:
self.EndEdit(False)
class FileWrapper:
"""
Node class for FSTreeModel.
"""
def __init__(self, path, fileName):
self.path = path
self.fileName = fileName
def __str__(self):
return self.fileName
class FSTreeModel(BasicTreeModel):
"""
This treemodel models the filesystem starting from a given path.
"""
def __init__(self, path):
BasicTreeModel.__init__(self)
fw = FileWrapper(path, path.split(os.sep)[-1])
self._Build(path, fw)
self.SetRoot(fw)
self._editable = True
def _Build(self, path, fileWrapper):
for name in os.listdir(path):
fw = FileWrapper(path, name)
self.AddChild(fileWrapper, fw)
childName = path + os.sep + name
if os.path.isdir(childName):
self._Build(childName, fw)
def IsEditable(self, node):
return self._editable
def SetEditable(self, node, bool):
self._editable = bool
class LateFSTreeModel(FSTreeModel):
"""
This treemodel models the filesystem starting from a given path.
It retrieves the directory list as requested.
"""
def __init__(self, path):
BasicTreeModel.__init__(self)
name = path.split(os.sep)[-1]
pathpart = path[:-len(name)]
fw = FileWrapper(pathpart, name)
self._Build(path, fw)
self.SetRoot(fw)
self._editable = True
self.children = {}
self.parents = {}
def _Build(self, path, parent):
ppath = parent.path + os.sep + parent.fileName
if not os.path.isdir(ppath):
return
for name in os.listdir(ppath):
fw = FileWrapper(ppath, name)
self.AddChild(parent, fw)
def GetChildCount(self, node):
if self.children.has_key(node):
return FSTreeModel.GetChildCount(self, node)
else:
self._Build(node.path, node)
return FSTreeModel.GetChildCount(self, node)
def IsLeaf(self, node):
return not os.path.isdir(node.path + os.sep + node.fileName)
class StrTextConverter(TextConverter):
def Convert(self, node):
return str(node.data)
class NullTransform(Transform):
def GetSize(self):
return tuple(self.size)
def Transform(self, node, offset, rotation):
self.size = [0,0]
list = self.tree.GetLayoutEngine().GetNodeList()
for node in list:
node.projx = node.x + offset[0]
node.projy = node.y + offset[1]
if node.projx > self.size[0]:
self.size[0] = node.projx
if node.projy > self.size[1]:
self.size[1] = node.projy
class Rect(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def __getitem__(self, index):
return (self.x, self.y, self.width, self.height)[index]
def __setitem__(self, index, value):
name = ['x', 'y', 'width', 'height'][index]
setattr(self, name, value)
def Contains(self, other):
if type(other) == type(()):
other = Rect(other[0], other[1], 0, 0)
if other.x >= self.x:
if other.y >= self.y:
if other.width + other.x <= self.width + self.x:
if other.height + other.y <= self.height + self.y:
return True
return False
def __str__(self):
return "Rect: " + str([self.x, self.y, self.width, self.height])
class TreeLayout(LayoutEngine):
def SetHeight(self, num):
self.NODE_HEIGHT = num
def __init__(self, tree):
LayoutEngine.__init__(self, tree)
self.NODE_STEP = 20
self.NODE_HEIGHT = 20
self.nodelist = []
def Layout(self, node):
self.nodelist = []
self.NODE_HEIGHT = self.tree.GetFont().GetPointSize() * 2
self.layoutwalk(node)
def GetNodeList(self):
return self.nodelist
def layoutwalk(self, node):
if node == self.tree.currentRoot:
node.level = 1
self.lastY = (-self.NODE_HEIGHT)
node.x = self.NODE_STEP * node.level
node.y = self.lastY + self.NODE_HEIGHT
self.lastY = node.y
self.nodelist.append(node)
if node.expanded:
for kid in node.kids:
kid.level = node.level + 1
self.layoutwalk(kid)
class TreePainter(Painter):
"""
The default painter class. Uses double-buffering, delegates the painting of nodes and
lines to helper classes deriving from NodePainter and LinePainter.
"""
def __init__(self, tree, nodePainter = None, linePainter = None, textConverter = None):
Painter.__init__(self, tree)
if not nodePainter:
nodePainter = TreeNodePainter(self)
self.nodePainter = nodePainter
if not linePainter:
linePainter = TreeLinePainter(self)
self.linePainter = linePainter
if not textConverter:
textConverter = StrTextConverter(self)
self.textConverter = textConverter
self.charWidths = []
def Paint(self, dc, node, doubleBuffered=1, paintBackground=1):
if not self.charWidths:
self.charWidths = []
for i in range(25):
self.charWidths.append(dc.GetTextExtent("D")[0] * i)
self.charHeight = dc.GetTextExtent("D")[1]
self.textpen = wx.Pen(self.GetTextColour(), 1, wx.SOLID)
self.fgpen = wx.Pen(self.GetForegroundColour(), 1, wx.SOLID)
self.bgpen = wx.Pen(self.GetBackgroundColour(), 1, wx.SOLID)
self.linepen = wx.Pen(self.GetLineColour(), 1, wx.SOLID)
self.dashpen = wx.Pen(self.GetLineColour(), 1, wx.DOT)
self.textbrush = wx.Brush(self.GetTextColour(), wx.SOLID)
self.fgbrush = wx.Brush(self.GetForegroundColour(), wx.SOLID)
self.bgbrush = wx.Brush(self.GetBackgroundColour(), wx.SOLID)
self.linebrush = wx.Pen(self.GetLineColour(), 1, wx.SOLID)
treesize = self.tree.GetSize()
size = self.tree.transform.GetSize()
size = (max(treesize.width, size[0]+50), max(treesize.height, size[1]+50))
dc.BeginDrawing()
if doubleBuffered:
mem_dc = wx.MemoryDC()
if not self.GetBuffer():
self.knobs = []
self.rectangles = []
self.bmp = wx.EmptyBitmap(size[0], size[1])
mem_dc.SelectObject(self.GetBuffer())
mem_dc.SetPen(self.GetBackgroundPen())
mem_dc.SetBrush(self.GetBackgroundBrush())
mem_dc.DrawRectangle(0, 0, size[0], size[1])
mem_dc.SetFont(self.tree.GetFont())
self.paintWalk(node, mem_dc)
else:
mem_dc.SelectObject(self.GetBuffer())
xstart, ystart = self.tree.CalcUnscrolledPosition(0,0)
size = self.tree.GetClientSizeTuple()
dc.Blit(xstart, ystart, size[0], size[1], mem_dc, xstart, ystart)
else:
if node == self.tree.currentRoot:
self.knobs = []
self.rectangles = []
dc.SetPen(self.GetBackgroundPen())
dc.SetBrush(self.GetBackgroundBrush())
dc.SetFont(self.tree.GetFont())
if paintBackground:
dc.DrawRectangle(0, 0, size[0], size[1])
if node:
#Call with not paintBackground because if we are told not to paint the
#whole background, we have to paint in parts to undo selection coloring.
pb = paintBackground
self.paintWalk(node, dc, not pb)
dc.EndDrawing()
def GetDashPen(self):
return self.dashpen
def SetLinePen(self, pen):
Painter.SetLinePen(self, pen)
self.dashpen = wx.Pen(pen.GetColour(), 1, wx.DOT)
def paintWalk(self, node, dc, paintRects=0):
self.linePainter.Paint(node.parent, node, dc)
self.nodePainter.Paint(node, dc, drawRects = paintRects)
if node.expanded:
for kid in node.kids:
if not self.paintWalk(kid, dc, paintRects):
return False
for kid in node.kids:
px = (kid.projx - self.tree.layout.NODE_STEP) + 5
py = kid.projy + kid.height/2
if (not self.tree.model.IsLeaf(kid.data)) or ((kid.expanded or self.tree._assumeChildren) and len(kid.kids)):
dc.SetPen(self.linepen)
dc.SetBrush(self.bgbrush)
dc.DrawRectangle(px -4, py-4, 9, 9)
self.knobs.append( (kid, Rect(px -4, py -4, 9, 9)) )
dc.SetPen(self.textpen)
if not kid.expanded:
dc.DrawLine(px, py -2, px, py + 3)
dc.DrawLine(px -2, py, px + 3, py)
if node == self.tree.currentRoot:
px = (node.projx - self.tree.layout.NODE_STEP) + 5
py = node.projy + node.height/2
dc.SetPen(self.linepen)
dc.SetBrush(self.bgbrush)
dc.DrawRectangle(px -4, py-4, 9, 9)
self.knobs.append( (node, Rect(px -4, py -4, 9, 9)) )
dc.SetPen(self.textpen)
if not node.expanded:
dc.DrawLine(px, py -2, px, py + 3)
dc.DrawLine(px -2, py, px + 3, py)
return True
def OnMouse(self, evt):
Painter.OnMouse(self, evt)
class TreeNodePainter(NodePainter):
def Paint(self, node, dc, location = None, drawRects = 0):
text = self.painter.textConverter.Convert(node)
extent = dc.GetTextExtent(text)
node.width = extent[0]
node.height = extent[1]
if node.selected:
dc.SetPen(self.painter.GetLinePen())
dc.SetBrush(self.painter.GetForegroundBrush())
dc.SetTextForeground(wx.NamedColour("WHITE"))
dc.DrawRectangle(node.projx -1, node.projy -1, node.width + 3, node.height + 3)
else:
if drawRects:
dc.SetBrush(self.painter.GetBackgroundBrush())
dc.SetPen(self.painter.GetBackgroundPen())
dc.DrawRectangle(node.projx -1, node.projy -1, node.width + 3, node.height + 3)
dc.SetTextForeground(self.painter.GetTextColour())
dc.DrawText(text, node.projx, node.projy)
self.painter.rectangles.append((node, Rect(node.projx, node.projy, node.width, node.height)))
class TreeLinePainter(LinePainter):
def Paint(self, parent, child, dc):
dc.SetPen(self.painter.GetDashPen())
px = py = cx = cy = 0
if parent is None or child == self.painter.tree.currentRoot:
px = (child.projx - self.painter.tree.layout.NODE_STEP) + 5
py = child.projy + self.painter.tree.layout.NODE_HEIGHT/2 -2
cx = child.projx
cy = py
dc.DrawLine(px, py, cx, cy)
else:
px = parent.projx + 5
py = parent.projy + parent.height
cx = child.projx -5
cy = child.projy + self.painter.tree.layout.NODE_HEIGHT/2 -3
dc.DrawLine(px, py, px, cy)
dc.DrawLine(px, cy, cx, cy)
#>> Event defs
wxEVT_MVCTREE_BEGIN_EDIT = wx.NewEventType() #Start editing. Vetoable.
wxEVT_MVCTREE_END_EDIT = wx.NewEventType() #Stop editing. Vetoable.
wxEVT_MVCTREE_DELETE_ITEM = wx.NewEventType() #Item removed from model.
wxEVT_MVCTREE_ITEM_EXPANDED = wx.NewEventType()
wxEVT_MVCTREE_ITEM_EXPANDING = wx.NewEventType()
wxEVT_MVCTREE_ITEM_COLLAPSED = wx.NewEventType()
wxEVT_MVCTREE_ITEM_COLLAPSING = wx.NewEventType()
wxEVT_MVCTREE_SEL_CHANGED = wx.NewEventType()
wxEVT_MVCTREE_SEL_CHANGING = wx.NewEventType() #Vetoable.
wxEVT_MVCTREE_KEY_DOWN = wx.NewEventType()
wxEVT_MVCTREE_ADD_ITEM = wx.NewEventType() #Item added to model.
EVT_MVCTREE_SEL_CHANGED = wx.PyEventBinder(wxEVT_MVCTREE_SEL_CHANGED, 1)
EVT_MVCTREE_SEL_CHANGING = wx.PyEventBinder(wxEVT_MVCTREE_SEL_CHANGING, 1)
EVT_MVCTREE_ITEM_EXPANDED = wx.PyEventBinder(wxEVT_MVCTREE_ITEM_EXPANDED, 1)
EVT_MVCTREE_ITEM_EXPANDING = wx.PyEventBinder(wxEVT_MVCTREE_ITEM_EXPANDING, 1)
EVT_MVCTREE_ITEM_COLLAPSED = wx.PyEventBinder(wxEVT_MVCTREE_ITEM_COLLAPSED, 1)
EVT_MVCTREE_ITEM_COLLAPSING = wx.PyEventBinder(wxEVT_MVCTREE_ITEM_COLLAPSING, 1)
EVT_MVCTREE_ADD_ITEM = wx.PyEventBinder(wxEVT_MVCTREE_ADD_ITEM, 1)
EVT_MVCTREE_DELETE_ITEM = wx.PyEventBinder(wxEVT_MVCTREE_DELETE_ITEM, 1)
EVT_MVCTREE_KEY_DOWN = wx.PyEventBinder(wxEVT_MVCTREE_KEY_DOWN, 1)
class MVCTreeEvent(wx.PyCommandEvent):
def __init__(self, type, id, node = None, nodes = None, keyEvent = None, **kwargs):
apply(wx.PyCommandEvent.__init__, (self, type, id), kwargs)
self.node = node
self.nodes = nodes
self.keyEvent = keyEvent
def GetNode(self):
return self.node
def GetNodes(self):
return self.nodes
def getKeyEvent(self):
return self.keyEvent
class MVCTreeNotifyEvent(MVCTreeEvent):
def __init__(self, type, id, node = None, nodes = None, **kwargs):
apply(MVCTreeEvent.__init__, (self, type, id, node, nodes), kwargs)
self.notify = wx.NotifyEvent(type, id)
def getNotifyEvent(self):
return self.notify
class MVCTree(wx.ScrolledWindow):
"""
The main mvc tree class.
"""
def __init__(self, parent, id, model = None, layout = None, transform = None,
painter = None, *args, **kwargs):
apply(wx.ScrolledWindow.__init__, (self, parent, id), kwargs)
self.nodemap = {}
self._multiselect = False
self._selections = []
self._assumeChildren = False
self._scrollx = False
self._scrolly = False
self.doubleBuffered = False
self._lastPhysicalSize = self.GetSize()
self._editors = []
if not model:
model = BasicTreeModel()
model.SetRoot("Root")
self.SetModel(model)
if not layout:
layout = TreeLayout(self)
self.layout = layout
if not transform:
transform = NullTransform(self)
self.transform = transform
if not painter:
painter = TreePainter(self)
self.painter = painter
self.SetFont(wx.Font(9, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False))
self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouse)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.doubleBuffered = True
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def Refresh(self):
if self.doubleBuffered:
self.painter.ClearBuffer()
wx.ScrolledWindow.Refresh(self, False)
def GetPainter(self):
return self.painter
def GetLayoutEngine(self):
return self.layout
def GetTransform(self):
return self.transform
def __repr__(self):
return "<MVCTree instance at %s>" % str(hex(id(self)))
def __str__(self):
return self.__repr__()
def NodeAdded(self, parent, child):
e = MVCTreeEvent(wxEVT_MVCTREE_ADD_ITEM, self.GetId(), node = child, nodes = [parent, child])
self.GetEventHandler().ProcessEvent(e)
self.painter.ClearBuffer()
def NodeInserted(self, parent, child, index):
e = MVCTreeEvent(wxEVT_MVCTREE_ADD_ITEM, self.GetId(), node = child, nodes = [parent, child])
self.GetEventHandler().ProcessEvent(e)
self.painter.ClearBuffer()
def NodeRemoved(self, node):
e = MVCTreeEvent(wxEVT_MVCTREE_DELETE_ITEM, self.GetId(), node = child, nodes = [parent, child])
self.GetEventHandler().ProcessEvent(e)
self.painter.ClearBuffer()
def OnKeyDown(self, evt):
e = MVCTreeEvent(wxEVT_MVCTREE_KEY_DOWN, self.GetId(), keyEvent = evt)
self.GetEventHandler().ProcessEvent(e)
def SetFont(self, font):
self.painter.SetFont(font)
dc = wx.ClientDC(self)
dc.SetFont(font)
self.layout.SetHeight(dc.GetTextExtent("")[1] + 18)
self.painter.ClearBuffer()
def GetFont(self):
return self.painter.GetFont()
def AddEditor(self, editor):
self._editors.append(editor)
def RemoveEditor(self, editor):
self._editors.remove(editor)
def OnMouse(self, evt):
self.painter.OnMouse(evt)
def OnNodeClick(self, node, mouseEvent):
if node.selected and (self.IsMultiSelect() and mouseEvent.ControlDown()):
self.RemoveFromSelection(node.data)
else:
self.AddToSelection(node.data, mouseEvent.ControlDown(), mouseEvent.ShiftDown())
def OnKnobClick(self, node):
self.SetExpanded(node.data, not node.expanded)
def GetDisplayText(self, node):
treenode = self.nodemap[node]
return self.painter.textConverter.Convert(treenode)
def IsDoubleBuffered(self):
return self.doubleBuffered
def SetDoubleBuffered(self, bool):
"""
By default MVCTree is double-buffered.
"""
self.doubleBuffered = bool
def GetModel(self):
return self.model
def SetModel(self, model):
"""
Completely change the data to be displayed.
"""
self.model = model
model.tree = self
self.laidOut = 0
self.transformed = 0
self._selections = []
self.layoutRoot = MVCTreeNode()
self.layoutRoot.data = self.model.GetRoot()
self.layoutRoot.expanded = True
self.LoadChildren(self.layoutRoot)
self.currentRoot = self.layoutRoot
self.offset = [0,0]
self.rotation = 0
self._scrollset = None
self.Refresh()
def GetCurrentRoot(self):
return self.currentRoot
def LoadChildren(self, layoutNode):
if layoutNode.built:
return
else:
self.nodemap[layoutNode.data]=layoutNode
for i in range(self.GetModel().GetChildCount(layoutNode.data)):
p = MVCTreeNode("RAW", layoutNode, [])
layoutNode.Add(p)
p.data = self.GetModel().GetChildAt(layoutNode.data, i)
self.nodemap[p.data]=p
layoutNode.built = True
if not self._assumeChildren:
for kid in layoutNode.kids:
self.LoadChildren(kid)
def OnEraseBackground(self, evt):
pass
def OnSize(self, evt):
size = self.GetSize()
self.center = (size.width/2, size.height/2)
if self._lastPhysicalSize.width < size.width or self._lastPhysicalSize.height < size.height:
self.painter.ClearBuffer()
self._lastPhysicalSize = size
def GetSelection(self):
"Returns a tuple of selected nodes."
return tuple(self._selections)
def SetSelection(self, nodeTuple):
if type(nodeTuple) != type(()):
nodeTuple = (nodeTuple,)
e = MVCTreeNotifyEvent(wxEVT_MVCTREE_SEL_CHANGING, self.GetId(), nodeTuple[0], nodes = nodeTuple)
self.GetEventHandler().ProcessEvent(e)
if not e.notify.IsAllowed():
return
for node in nodeTuple:
treenode = self.nodemap[node]
treenode.selected = True
for node in self._selections:
treenode = self.nodemap[node]
node.selected = False
self._selections = list(nodeTuple)
e = MVCTreeEvent(wxEVT_MVCTREE_SEL_CHANGED, self.GetId(), nodeTuple[0], nodes = nodeTuple)
self.GetEventHandler().ProcessEvent(e)
def IsMultiSelect(self):
return self._multiselect
def SetMultiSelect(self, bool):
self._multiselect = bool
def IsSelected(self, node):
return self.nodemap[node].selected
def Edit(self, node):
if not self.model.IsEditable(node):
return
for ed in self._editors:
if ed.CanEdit(node):
e = MVCTreeNotifyEvent(wxEVT_MVCTREE_BEGIN_EDIT, self.GetId(), node)
self.GetEventHandler().ProcessEvent(e)
if not e.notify.IsAllowed():
return
ed.Edit(node)
self._currentEditor = ed
break
def EndEdit(self):
if self._currentEditor:
self._currentEditor.EndEdit
self._currentEditor = None
def _EditEnding(self, node):
e = MVCTreeNotifyEvent(wxEVT_MVCTREE_END_EDIT, self.GetId(), node)
self.GetEventHandler().ProcessEvent(e)
if not e.notify.IsAllowed():
return False
self._currentEditor = None
return True
def SetExpanded(self, node, bool):
treenode = self.nodemap[node]
if bool:
e = MVCTreeNotifyEvent(wxEVT_MVCTREE_ITEM_EXPANDING, self.GetId(), node)
self.GetEventHandler().ProcessEvent(e)
if not e.notify.IsAllowed():
return
if not treenode.built:
self.LoadChildren(treenode)
else:
e = MVCTreeNotifyEvent(wxEVT_MVCTREE_ITEM_COLLAPSING, self.GetId(), node)
self.GetEventHandler().ProcessEvent(e)
if not e.notify.IsAllowed():
return
treenode.expanded = bool
e = None
if treenode.expanded:
e = MVCTreeEvent(wxEVT_MVCTREE_ITEM_EXPANDED, self.GetId(), node)
else:
e = MVCTreeEvent(wxEVT_MVCTREE_ITEM_COLLAPSED, self.GetId(), node)
self.GetEventHandler().ProcessEvent(e)
self.layout.Layout(self.currentRoot)
self.transform.Transform(self.currentRoot, self.offset, self.rotation)
self.Refresh()
def IsExpanded(self, node):
return self.nodemap[node].expanded
def AddToSelection(self, nodeOrTuple, enableMulti = True, shiftMulti = False):
nodeTuple = nodeOrTuple
if type(nodeOrTuple)!= type(()):
nodeTuple = (nodeOrTuple,)
e = MVCTreeNotifyEvent(wxEVT_MVCTREE_SEL_CHANGING, self.GetId(), nodeTuple[0], nodes = nodeTuple)
self.GetEventHandler().ProcessEvent(e)
if not e.notify.IsAllowed():
return
changeparents = []
if not (self.IsMultiSelect() and (enableMulti or shiftMulti)):
for node in self._selections:
treenode = self.nodemap[node]
treenode.selected = False
changeparents.append(treenode)
node = nodeTuple[0]
self._selections = [node]
treenode = self.nodemap[node]
changeparents.append(treenode)
treenode.selected = True
else:
if shiftMulti:
for node in nodeTuple:
treenode = self.nodemap[node]
oldtreenode = self.nodemap[self._selections[0]]
if treenode.parent == oldtreenode.parent:
found = 0
for kid in oldtreenode.parent.kids:
if kid == treenode or kid == oldtreenode:
found = not found
kid.selected = True
self._selections.append(kid.data)
changeparents.append(kid)
elif found:
kid.selected = True
self._selections.append(kid.data)
changeparents.append(kid)
else:
for node in nodeTuple:
try:
self._selections.index(node)
except ValueError:
self._selections.append(node)
treenode = self.nodemap[node]
treenode.selected = True
changeparents.append(treenode)
e = MVCTreeEvent(wxEVT_MVCTREE_SEL_CHANGED, self.GetId(), nodeTuple[0], nodes = nodeTuple)
self.GetEventHandler().ProcessEvent(e)
dc = wx.ClientDC(self)
self.PrepareDC(dc)
for node in changeparents:
if node:
self.painter.Paint(dc, node, doubleBuffered = 0, paintBackground = 0)
self.painter.ClearBuffer()
def RemoveFromSelection(self, nodeTuple):
if type(nodeTuple) != type(()):
nodeTuple = (nodeTuple,)
changeparents = []
for node in nodeTuple:
self._selections.remove(node)
treenode = self.nodemap[node]
changeparents.append(treenode)
treenode.selected = False
e = MVCTreeEvent(wxEVT_MVCTREE_SEL_CHANGED, self.GetId(), node, nodes = nodeTuple)
self.GetEventHandler().ProcessEvent(e)
dc = wx.ClientDC(self)
self.PrepareDC(dc)
for node in changeparents:
if node:
self.painter.Paint(dc, node, doubleBuffered = 0, paintBackground = 0)
self.painter.ClearBuffer()
def GetBackgroundColour(self):
if hasattr(self, 'painter') and self.painter:
return self.painter.GetBackgroundColour()
else:
return wx.Window.GetBackgroundColour(self)
def SetBackgroundColour(self, color):
if hasattr(self, 'painter') and self.painter:
self.painter.SetBackgroundColour(color)
else:
wx.Window.SetBackgroundColour(self, color)
def GetForegroundColour(self):
if hasattr(self, 'painter') and self.painter:
return self.painter.GetForegroundColour()
else:
return wx.Window.GetBackgroundColour(self)
def SetForegroundColour(self, color):
if hasattr(self, 'painter') and self.painter:
self.painter.SetForegroundColour(color)
else:
wx.Window.SetBackgroundColour(self, color)
def SetAssumeChildren(self, bool):
self._assumeChildren = bool
def GetAssumeChildren(self):
return self._assumeChildren
def OnPaint(self, evt):
"""
Ensures that the tree has been laid out and transformed, then calls the painter
to paint the control.
"""
try:
self.EnableScrolling(False, False)
if not self.laidOut:
self.layout.Layout(self.currentRoot)
self.laidOut = True
self.transformed = False
if not self.transformed:
self.transform.Transform(self.currentRoot, self.offset, self.rotation)
self.transformed = True
tsize = None
tsize = list(self.transform.GetSize())
tsize[0] = tsize[0] + 50
tsize[1] = tsize[1] + 50
w, h = self.GetSize()
if tsize[0] > w or tsize[1] > h:
if not hasattr(self, '_oldsize') or (tsize[0] > self._oldsize[0] or tsize[1] > self._oldsize[1]):
self._oldsize = tsize
oldstart = self.GetViewStart()
self._lastPhysicalSize = self.GetSize()
self.SetScrollbars(10, 10, tsize[0]/10, tsize[1]/10)
self.Scroll(oldstart[0], oldstart[1])
dc = wx.PaintDC(self)
self.PrepareDC(dc)
dc.SetFont(self.GetFont())
self.painter.Paint(dc, self.currentRoot, self.doubleBuffered)
except:
traceback.print_exc()
| {
"repo_name": "ktan2020/legacy-automation",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/mvctree.py",
"copies": "1",
"size": "41690",
"license": "mit",
"hash": -7215666787507909000,
"line_mean": 34.252173913,
"line_max": 125,
"alpha_frac": 0.5684576637,
"autogenerated": false,
"ratio": 3.85376224810501,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.492221991180501,
"avg_score": null,
"num_lines": null
} |
# 12/09/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o Updated for wx namespace
#
# 12/18/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxScrolledMessageDialog -> ScrolledMessageDialog
#
import re
import wx
class Layoutf(wx.LayoutConstraints):
"""
The class Layoutf(wxLayoutConstraints) presents a simplification
of the wxLayoutConstraints syntax. The name Layoutf is choosen
because of the similarity with C's printf function.
Quick Example::
lc = Layoutf('t=t#1;l=r10#2;r!100;h%h50#1', (self, self.panel))
is equivalent to::
lc = wx.LayoutContraints()
lc.top.SameAs(self, wx.Top)
lc.left.SameAs(self.panel, wx.Right, 10)
lc.right.Absolute(100)
lc.height.PercentOf(self, wx.Height, 50)
Usage:
You can give a constraint string to the Layoutf constructor,
or use the 'pack' method. The following are equivalent::
lc = Layoutf('t=t#1;l=r#2;r!100;h%h50#1', (self, self.panel))
and::
lc = Layoutf()
lc.pack('t=t#1;l=r#2;r!100;h%h50#1', (self, self.panel))
Besides 'pack' there's also 'debug_pack' which does not set
constraints, but prints traditional wxLayoutConstraint calls to
stdout.
The calls to the Layoutf constructor and pack methods have
the following argument list:
`(constraint_string, objects_tuple)`
Constraint String syntax:
Constraint directives are separated by semi-colons. You
generally (always?) need four directives to completely describe a
subwindow's location.
A single directive has either of the following forms:
1. <own attribute><compare operation>[numerical argument]
for example ``r!100`` -> lc.right.Absolute(100) )
and ``w*`` -> lc.width.AsIs()
2. <own attribute><compare operation>[numerical argument]
#<compare object nr.>
for example ``t_10#2`` -> lc.top.Below(<second obj>, 10)
3. <own attribute><compare operation><compare attribute>
[numerical argument]#<compare object nr.>
for example ``w%h50#2`` -> lc.width.PercentOf(<second obj>, wx.Height, 50) and ``t=b#1`` -> lc.top.SameAs(<first obj>, wx.Bottom)
Which one you need is defined by the <compare operation>
type. The following take type 1 (no object to compare with):
* '!': 'Absolute', '?': 'Unconstrained', '*': 'AsIs'
These take type 2 (need to be compared with another object)
* '<': 'LeftOf', '>': 'RightOf', '^': 'Above', '_': 'Below'
These take type 3 (need to be compared to another object
attribute)
* '=': 'SameAs', '%': 'PercentOf'
For all types, the <own attribute> letter can be any of
* 't': 'top', 'l': 'left', 'b': 'bottom',
* 'r': 'right', 'h': 'height', 'w': 'width',
* 'x': 'centreX', 'y': 'centreY'
If the operation takes an (optional) numerical argument, place it
in [numerical argument]. For type 3 directives, the <compare
attribute> letter can be any of
* 't': 'wxTop', 'l': 'wxLeft', 'b': 'wx.Bottom'
* 'r': 'wxRight', 'h': 'wxHeight', 'w': 'wx.Width',
* 'x': 'wxCentreX', 'y': 'wx.CentreY'
Note that these are the same letters as used for <own attribute>,
so you'll only need to remember one set. Finally, the object
whose attribute is refered to, is specified by #<compare object
nr>, where <compare object nr> is the 1-based (stupid, I know,
but I've gotten used to it) index of the object in the
objects_tuple argument.
Bugs:
Not entirely happy about the logic in the order of arguments
after the <compare operation> character.
Not all wxLayoutConstraint methods are included in the
syntax. However, the type 3 directives are generally the most
used. Further excuse: wxWindows layout constraints are at the
time of this writing not documented.
"""
attr_d = { 't': 'top', 'l': 'left', 'b': 'bottom',
'r': 'right', 'h': 'height', 'w': 'width',
'x': 'centreX', 'y': 'centreY' }
op_d = { '=': 'SameAs', '%': 'PercentOf', '<': 'LeftOf',
'>': 'RightOf', '^': 'Above', '_': 'Below',
'!': 'Absolute', '?': 'Unconstrained', '*': 'AsIs' }
cmp_d = { 't': 'wx.Top', 'l': 'wx.Left', 'b': 'wx.Bottom',
'r': 'wx.Right', 'h': 'wx.Height', 'w': 'wx.Width',
'x': 'wx.CentreX', 'y': 'wx.CentreY' }
rexp1 = re.compile('^\s*([tlrbhwxy])\s*([!\?\*])\s*(\d*)\s*$')
rexp2 = re.compile('^\s*([tlrbhwxy])\s*([=%<>^_])\s*([tlrbhwxy]?)\s*(\d*)\s*#(\d+)\s*$')
def __init__(self,pstr=None,winlist=None):
wx.LayoutConstraints.__init__(self)
if pstr:
self.pack(pstr,winlist)
def pack(self, pstr, winlist):
pstr = pstr.lower()
for item in pstr.split(';'):
m = self.rexp1.match(item)
if m:
g = list(m.groups())
attr = getattr(self, self.attr_d[g[0]])
func = getattr(attr, self.op_d[g[1]])
if g[1] == '!':
func(int(g[2]))
else:
func()
continue
m = self.rexp2.match(item)
if not m: raise ValueError
g = list(m.groups())
attr = getattr(self, self.attr_d[g[0]])
func = getattr(attr, self.op_d[g[1]])
if g[3]: g[3] = int(g[3])
else: g[3] = None;
g[4] = int(g[4]) - 1
if g[1] in '<>^_':
if g[3]: func(winlist[g[4]], g[3])
else: func(winlist[g[4]])
else:
cmp = eval(self.cmp_d[g[2]])
if g[3]: func(winlist[g[4]], cmp, g[3])
else: func(winlist[g[4]], cmp)
def debug_pack(self, pstr, winlist):
pstr = pstr.lower()
for item in pstr.split(';'):
m = self.rexp1.match(item)
if m:
g = list(m.groups())
attr = getattr(self, self.attr_d[g[0]])
func = getattr(attr, self.op_d[g[1]])
if g[1] == '!':
print "%s.%s.%s(%s)" % \
('self',self.attr_d[g[0]],self.op_d[g[1]],g[2])
else:
print "%s.%s.%s()" % \
('self',self.attr_d[g[0]],self.op_d[g[1]])
continue
m = self.rexp2.match(item)
if not m: raise ValueError
g = list(m.groups())
if g[3]: g[3] = int(g[3])
else: g[3] = 0;
g[4] = int(g[4]) - 1
if g[1] in '<>^_':
if g[3]: print "%s.%s.%s(%s,%d)" % \
('self',self.attr_d[g[0]],self.op_d[g[1]],winlist[g[4]],
g[3])
else: print "%s.%s.%s(%s)" % \
('self',self.attr_d[g[0]],self.op_d[g[1]],winlist[g[4]])
else:
if g[3]: print "%s.%s.%s(%s,%s,%d)" % \
('self',self.attr_d[g[0]],self.op_d[g[1]],winlist[g[4]],
self.cmp_d[g[2]],g[3])
else: print "%s.%s.%s(%s,%s)" % \
('self',self.attr_d[g[0]],self.op_d[g[1]],winlist[g[4]],
self.cmp_d[g[2]])
if __name__=='__main__':
class TestLayoutf(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, 'Test Layout Constraints',
wx.DefaultPosition, (500, 300))
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.SetAutoLayout(True)
self.panelA = wx.Window(self, -1, style=wx.SIMPLE_BORDER)
self.panelA.SetBackgroundColour(wx.BLUE)
self.panelA.SetConstraints(Layoutf('t=t10#1;l=l10#1;b=b10#1;r%r50#1',(self,)))
self.panelB = wx.Window(self, -1, style=wx.SIMPLE_BORDER)
self.panelB.SetBackgroundColour(wx.RED)
self.panelB.SetConstraints(Layoutf('t=t10#1;r=r10#1;b%b30#1;l>10#2', (self,self.panelA)))
self.panelC = wx.Window(self, -1, style=wx.SIMPLE_BORDER)
self.panelC.SetBackgroundColour(wx.WHITE)
self.panelC.SetConstraints(Layoutf('t_10#3;r=r10#1;b=b10#1;l>10#2', (self,self.panelA,self.panelB)))
b = wx.Button(self.panelA, -1, ' About: ')
b.SetConstraints(Layoutf('X=X#1;Y=Y#1;h*;w%w50#1', (self.panelA,)))
self.Bind(wx.EVT_BUTTON, self.OnAbout, b)
b = wx.Button(self.panelB, 100, ' Panel B ')
b.SetConstraints(Layoutf('t=t2#1;r=r4#1;h*;w*', (self.panelB,)))
self.panelD = wx.Window(self.panelC, -1, style=wx.SIMPLE_BORDER)
self.panelD.SetBackgroundColour(wx.GREEN)
self.panelD.SetConstraints(Layoutf('b%h50#1;r%w50#1;h=h#2;w=w#2', (self.panelC, b)))
b = wx.Button(self.panelC, -1, ' Panel C ')
b.SetConstraints(Layoutf('t_#1;l>#1;h*;w*', (self.panelD,)))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
wx.StaticText(self.panelD, -1, "Panel D", (4, 4)).SetBackgroundColour(wx.GREEN)
def OnButton(self, event):
self.Close(True)
def OnAbout(self, event):
import wx.lib.dialogs
msg = wx.lib.dialogs.ScrolledMessageDialog(self, Layoutf.__doc__, "about")
msg.ShowModal()
msg.Destroy()
def OnCloseWindow(self, event):
self.Destroy()
class TestApp(wx.App):
def OnInit(self):
frame = TestLayoutf(None)
frame.Show(1)
self.SetTopWindow(frame)
return 1
app = TestApp(0)
app.MainLoop()
| {
"repo_name": "ktan2020/legacy-automation",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/layoutf.py",
"copies": "1",
"size": "9885",
"license": "mit",
"hash": 4953203760174129000,
"line_mean": 34.6111111111,
"line_max": 136,
"alpha_frac": 0.5193727871,
"autogenerated": false,
"ratio": 3.252714708785785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.922477546340144,
"avg_score": 0.009462406496868884,
"num_lines": 270
} |
# 12/14/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 compatability update.
#
def RestOfLine(sx, width, data, bool):
if len(data) == 0 and sx == 0:
return [('', bool)]
if sx >= len(data):
return []
return [(data[sx:sx+width], bool)]
def Selection(SelectBegin,SelectEnd, sx, width, line, data):
if SelectEnd is None or SelectBegin is None:
return RestOfLine(sx, width, data, False)
(bRow, bCol) = SelectBegin
(eRow, eCol) = SelectEnd
if (eRow < bRow):
(bRow, bCol) = SelectEnd
(eRow, eCol) = SelectBegin
if (line < bRow or eRow < line):
return RestOfLine(sx, width, data, False)
if (bRow < line and line < eRow):
return RestOfLine(sx, width, data, True)
if (bRow == eRow) and (eCol < bCol):
(bCol, eCol) = (eCol, bCol)
# selection either starts or ends on this line
end = min(sx+width, len(data))
if (bRow < line):
bCol = 0
if (line < eRow):
eCol = end
pieces = []
if (sx < bCol):
if bCol <= end:
pieces += [(data[sx:bCol], False)]
else:
return [(data[sx:end], False)]
pieces += [(data[max(bCol,sx):min(eCol,end)], True)]
if (eCol < end):
pieces += [(data[eCol:end], False)]
return pieces
| {
"repo_name": "ktan2020/legacy-automation",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/editor/selection.py",
"copies": "1",
"size": "1351",
"license": "mit",
"hash": -8716312372957072000,
"line_mean": 28.7045454545,
"line_max": 60,
"alpha_frac": 0.5388601036,
"autogenerated": false,
"ratio": 3.141860465116279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4180720568716279,
"avg_score": null,
"num_lines": null
} |
"""121. Best Time to Buy and Sell Stock
https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
Say you have an array for which the i^th element is the price of a given
stock on day i.
If you were only permitted to complete at most one transaction (i.e., buy one
and sell one share of the stock), design an algorithm to find the maximum
profit.
Note that you cannot sell a stock before you buy one.
Example 1:
Input: [7,1,5,3,6,4]
Output: 5
Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit =
6-1 = 5.
Not 7-1 = 6, as selling price needs to be larger than buying price.
Example 2:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
"""
from typing import List
class Solution:
def max_profit(self, prices: List[int]) -> int:
i, j, profit, length = 0, 1, 0, len(prices)
while i < j < length:
cur_profit = prices[j] - prices[i]
if cur_profit <= 0:
i = j
j = j + 1
else:
profit = max(profit, cur_profit)
j += 1
return profit
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/best_time_to_buy_and_sell_stock.py",
"copies": "1",
"size": "1144",
"license": "mit",
"hash": 7348793416793554000,
"line_mean": 25,
"line_max": 77,
"alpha_frac": 0.6145104895,
"autogenerated": false,
"ratio": 3.1342465753424658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42487570648424655,
"avg_score": null,
"num_lines": null
} |
# # 12 .1 Create Rectangle and Square classes with a method called
# # calculate_perimeter that calculates the perimeter of the shapes they
# # represent. Create Rectangle and Square objects and call the method
# # on both of them.
# #
# class Shape():
# def __init__(self, l, w):
# self._length = l
# self._width = w
#
# def calculate_perimeter(self):
# return (self._length + self._width)*2
#
# # s = Shape(1,2)
# # print(s.calculate_perimeter())
#
# class Square(Shape):
# def __init__(self, l):
# self._length = l
# self._width = l
#
# class Rectangle(Shape):
# pass
#
#
# s = Square(1)
# r = Rectangle(2,3)
#
# print(s.calculate_perimeter())
# print(r.calculate_perimeter())
#
#
# # 12.2 Define a method in your Square class called change_size that allows you to
# # pass in a number that increases or decreases (if the number is negative)
# # each side of a Square object by that number.
#
# class Shape():
# def __init__(self, l, w):
# self._length = l
# self._width = w
#
# def calculate_perimeter(self):
# return (self._length + self._width)*2
#
# # s = Shape(1,2)
# # print(s.calculate_perimeter())
#
# class Square(Shape):
# def __init__(self, l):
# self._length = l
# self._width = l
# def change_size(self,delta):
# self._length = self._length + delta
# self._width = self._width + delta
#
# class Rectangle(Shape):
# pass
#
#
# s = Square(1)
# print(s.calculate_perimeter())
# s.change_size(-.1)
# print(s.calculate_perimeter())
#
# # 12.3 Create a class called Shape. Define a method in it called what_am_i that
# prints "I am a shape" when called. Change your Square and Rectangle classes
# from the previous challenges to inherit from Shape, create Square and
# Rectangle objects, and call the new method on both of them.
# class Shape():
# def __init__(self, l, w):
# self._length = l
# self._width = w
#
# def calculate_perimeter(self):
# return (self._length + self._width)*2
#
# def what_am_i(self):
# print("I am a shape")
#
# # s = Shape(1,2)
# # print(s.calculate_perimeter())
#
# class Square(Shape):
# def __init__(self, l):
# self._length = l
# self._width = l
# def change_size(self,delta):
# self._length = self._length + delta
# self._width = self._width + delta
# def what_am_i(self):
# print("I am a square")
#
# class Rectangle(Shape):
# def what_am_i(self):
# print("I am a rectangle")
#
# r = Rectangle(1,2)
# r.what_am_i()
# s = Square(1)
# s.what_am_i()
# 12.4 Create a class called Horse and a class called Rider.
# Use composition to model a horse that has a rider.
# class Horse():
# def __init__(self, name, breed, rider):
# self.name = name
# self.breed = breed
# self.rider = rider
#
# class Rider():
# def __init__(self, name):
# self.name = name
#
# jocke = Rider('Joakim von Anka')
# horsey = Horse('Polly', 'Kentucky Thoroughbred', jocke)
#
# print(horsey.rider.name)
| {
"repo_name": "Frikeer/LearnPython",
"path": "exc12/exc12.py",
"copies": "1",
"size": "3102",
"license": "unlicense",
"hash": 5269613313305685000,
"line_mean": 24.85,
"line_max": 83,
"alpha_frac": 0.5918762089,
"autogenerated": false,
"ratio": 2.9154135338345863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9006285580907254,
"avg_score": 0.0002008323654665118,
"num_lines": 120
} |
# 1.2.1 Write a Point2D client that takes an integer value N from the
# command line, generates N random points in the unit square, and computes
# the distance separating the closest pair of points.
from math import hypot, atan2, fabs
from random import random
import sys
def main(argv=None):
'''
Function called to run main script including unit tests
INPUT: List of arguments from the command line
RETURNS: Exit code to be passed to sys.exit():
-1: Invalid input
0: Script completed successfully
'''
if argv is None:
argv = sys.argv
options = argv[1:]
if (len(options) != 1):
print('Error - expected single integer option, got {}'.format(options))
return -1
N = int(argv[1])
line = sys.stdin.readline()
line = line.split(' ')
values = [float(word) for word in line]
print('Read {}'.format(values))
ptr = 0
intervals = list()
while ptr < len(values):
intervals.append(Interval1D(values[ptr], values[ptr+1]))
ptr += 2
print('Read {}'.format(intervals))
pair_count = 0
aptr = 0
bptr = 0
while aptr < len(intervals):
bptr = aptr + 1
while bptr < len(intervals):
# print('debug: a = {}, b = {}'.format(aptr, bptr))
if intervals[aptr].intersects(intervals[bptr]):
print('Incrementing pair_count, a = {}, b = {}'.format(intervals[aptr], intervals[bptr]))
pair_count += 1
bptr += 1
aptr += 1
print('Pair count is {}'.format(pair_count))
return 0
class Interval1D:
'''
Class representing a 1D interval
'''
def __init__(self, left, right):
'''
Create a new Point2D using cartesian co-ordinates
'''
assert right > left, 'Error - right has to be greater than left'
self.left = left
self.right = right
def __repr__(self):
description = 'Interval1D: left = {}, right = {}'.format(self.left, self.right)
return description
def length(self):
return self.right - self.left
def contains(self, x):
contained = (x <= self.right) and (x >= self.left)
return contained
def intersects(self, interval):
inter = ((interval.left <= self.right) and (interval.left >= self.left) \
or (interval.right >= self.left) and (interval.right <= self.right))
print('Checking intersect: self {} interval {}, result {}'.format(self, interval, inter))
return inter # Add this in later
def float_equal(a,b, epsilon = 1e-6):
'''
Checks for equality of two floats, using epsilon tolerance
INPUT: floats: a, b
float: epsilon tolerance limit
RETURN: bool showing if the two floats are equal
'''
equal = fabs(a-b) < epsilon
return equal
if __name__ == '__main__':
# Unit tests
test_interval = Interval1D(1.5, 3.8)
assert float_equal(test_interval.left, 1.5)
assert float_equal(test_interval.right, 3.8)
assert float_equal(test_interval.length(), 3.8 - 1.5)
assert test_interval.contains(1.5)
assert test_interval.contains(3.8)
assert test_interval.contains(2.0)
assert not test_interval.contains(1.0)
assert not test_interval.contains(-1.0)
assert not test_interval.contains(100.0)
assert test_interval.intersects(Interval1D(2.0, 3.0))
assert test_interval.intersects(Interval1D(2.0, 4.0))
assert test_interval.intersects(Interval1D(1.0, 2.0))
sys.exit(main())
| {
"repo_name": "timgasser/algorithms_4ed",
"path": "ch1_fundamentals/ex1.2.2.py",
"copies": "1",
"size": "3633",
"license": "mit",
"hash": 2161476008267052800,
"line_mean": 29.5378151261,
"line_max": 105,
"alpha_frac": 0.5953757225,
"autogenerated": false,
"ratio": 3.8121720881427072,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9739215155414762,
"avg_score": 0.03366653104558905,
"num_lines": 119
} |
# 12/20/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxPyInformationalMessagesFrame -> PyInformationalMessagesFrame
# o dummy_wxPyInformationalMessagesFrame -> dummy_PyInformationalMessagesFrame
#
"""
infoframe.py
Released under wxWindows license etc.
This is a fairly rudimentary, but slightly fancier tha
wxPyOnDemandOutputWindow (on which it's based; thanks Robin), version
of the same sort of thing: a file-like class called
wxInformationalMessagesFrame. This window also has a status bar with a
couple of buttons for controlling the echoing of all output to a file
with a randomly-chosen filename...
The class behaves similarly to wxPyOnDemandOutputWindow in that (at
least by default) the frame does not appear until written to, but is
somewhat different in that, either under programmatic (the
DisableOutput method) or user (the frame's close button, it's status
bar's "Dismiss" button, or a "Disable output" item of some menu,
perhaps of some other frame), the frame will be destroyed, an
associated file closed, and writing to it will then do nothing. This
can be reversed: either under programmatic (the EnableOutput method)
or user (an "Enable output" item of some menu), a new frame will be
opened,And an associated file (with a "randomly"selected filename)
opened for writing [to which all subsequent displayed messages will be
echoed].
Please note that, like wxPyOnDemandOutputWindow, the instance is not
itself a subclass of wxWindow: when the window is open (and ONLY
then), it's "frame" attribute is the actual instance of wFrame...
Typical usage::
from wx.lib.infoframe import *
... # ... modify your wxApp as follows:
class myApp(wxApp):
outputWindowClass = PyInformationalMessagesFrame
# ...
If you're running on Linux, you'll also have to supply an argument 1 to your
constructor of myApp to redirect stdout/stderr to this window (it's done
automatically for you on Windows).
If you don't want to redirect stdout/stderr, but use the class directly: do
it this way::
InformationalMessagesFrame = PyInformationalMessagesFrame( \
options_from_progname, # (default = "")
txt), # (default = "informational messages")
#^^^^ early in the program
# ...
InformationalMessagesFrame(list_of_items)
# where list_of_items:
#
# comma-separated list of items to display.
# Note that these will never be separated by spaces as they may
# be when used in the Python 'print' command
The latter statement, of course, may be repeated arbitrarily often.
The window will not appear until it is written to, and it may be
manually closed by the user, after which it will reappear again until
written to... Also note that all output is echoed to a file with a
randomly-generated name [see the mktemp module in the standard
library], in the directory given as the 'dir' keyword argument to the
InformationalMessagesFrame constructor [which has a default value of
'.'), or set via the method SetOutputDirectory... This file will be
closed with the window--a new one will be created [by default] upon
each subsequent reopening.
Please also note the methods EnableOutput and DisableOutput, and the
possible arguments for the constructor in the code below... (* TO DO:
explain this here...*) Neither of these methods need be used at all,
and in this case the frame will only be displayed once it has been
written to, like wxPyOnDemandOutputWindow.
The former, EnableOutput, displays the frame with an introductory
message, opens a random file to which future displayed output also
goes (unless the nofile attribute is present), and sets the __debug__
variable of each module to 1 (unless the no __debug__ attribute is
present]. This is so that you can say, in any module whatsoever::
if __debug__:
InformationalMessagesFrame("... with lots of %<Character> constructs"
% TUPLE)
without worrying about the overhead of evaluating the arguments, and
calling the wxInformationalMessagesFrame instance, in the case where
debugging is not turned on. (This won't happen if the instance has an
attribute no__debug__; you can arrange this by sub-classing...)
"Debug mode" can also be turned on by selecting the item-"Enable
output" from the "Debug" menu [the default--see the optional arguments
to the SetOtherMenuBar method] of a frame which has been either passed
appropriately to the constructor of the wxInformationalMessagesFrame
(see the code), or set via the SetOtherMenuBar method thereof. This
also writes an empty string to the instance, meaning that the frame
will open (unless DisablOutput has been called) with an appropriate
introductory message (which will vary according to whether the
instance/class has the "no __debug__" attribute)^ I have found this to
be an extremely useful tool, in lieu of a full wxPython debugger...
Following this, the menu item is also disabled, and an item "Disable
output" (again, by default) is enabled. Note that these need not be
done: e.g., you don't NEED to have a menu with appropriate items; in
this case simply do not call the SetOtherMenuBar method or use the
othermenubar keyword argument of the class instance constructor.
The DisableOutput method does the reverse of this; it closes the
window (and associated file), and sets the __debug__ variable of each
module whose name begins with a capital letter {this happens to be the
author's personal practice; all my python module start with capital
letters} to 0. It also enables/disabled the appropriate menu items,
if this was done previously (or SetOtherMenuBar has been called...).
Note too that after a call to DisableOutput, nothing further will be
done upon subsequent write()'s, until the EnableOutput method is
called, either explicitly or by the menu selection above...
Finally, note that the file-like method close() destroys the window
(and closes any associated file) and there is a file-like method
write() which displays it's argument.
All (well, most) of this is made clear by the example code at the end
of this file, which is run if the file is run by itself; otherwise,
see the appropriate "stub" file in the wxPython demo.
"""
import os
import sys
import tempfile
import wx
class _MyStatusBar(wx.StatusBar):
def __init__(self, parent, callbacks=None, useopenbutton=0):
wx.StatusBar.__init__(self, parent, -1, style=wx.TAB_TRAVERSAL)
self.SetFieldsCount(3)
self.SetStatusText("",0)
self.button1 = wx.Button(self, -1, "Dismiss", style=wx.TAB_TRAVERSAL)
self.Bind(wx.EVT_BUTTON, self.OnButton1, self.button1)
if not useopenbutton:
self.button2 = wx.Button(self, -1, "Close File", style=wx.TAB_TRAVERSAL)
else:
self.button2 = wx.Button(self, -1, "Open New File", style=wx.TAB_TRAVERSAL)
self.Bind(wx.EVT_BUTTON, self.OnButton2, self.button2)
self.useopenbutton = useopenbutton
self.callbacks = callbacks
# figure out how tall to make the status bar
dc = wx.ClientDC(self)
dc.SetFont(self.GetFont())
(w,h) = dc.GetTextExtent('X')
h = int(h * 1.8)
self.SetSize((100, h))
self.OnSize("dummy")
self.Bind(wx.EVT_SIZE, self.OnSize)
# reposition things...
def OnSize(self, event):
self.CalculateSizes()
rect = self.GetFieldRect(1)
self.button1.SetPosition((rect.x+5, rect.y+2))
self.button1.SetSize((rect.width-10, rect.height-4))
rect = self.GetFieldRect(2)
self.button2.SetPosition((rect.x+5, rect.y+2))
self.button2.SetSize((rect.width-10, rect.height-4))
# widths........
def CalculateSizes(self):
dc = wx.ClientDC(self.button1)
dc.SetFont(self.button1.GetFont())
(w1,h) = dc.GetTextExtent(self.button1.GetLabel())
dc = wx.ClientDC(self.button2)
dc.SetFont(self.button2.GetFont())
(w2,h) = dc.GetTextExtent(self.button2.GetLabel())
self.SetStatusWidths([-1,w1+15,w2+15])
def OnButton1(self,event):
self.callbacks[0] ()
def OnButton2(self,event):
if self.useopenbutton and self.callbacks[2] ():
self.button2.SetLabel ("Close File")
elif self.callbacks[1] ():
self.button2.SetLabel ("Open New File")
self.useopenbutton = 1 - self.useopenbutton
self.OnSize("")
self.button2.Refresh(True)
self.Refresh()
class PyInformationalMessagesFrame(object):
def __init__(self,
progname="",
text="informational messages",
dir='.',
menuname="Debug",
enableitem="Enable output",
disableitem="Disable output",
othermenubar=None):
self.SetOtherMenuBar(othermenubar,
menuname=menuname,
enableitem=enableitem,
disableitem=disableitem)
if hasattr(self,"othermenu") and self.othermenu is not None:
i = self.othermenu.FindMenuItem(self.menuname,self.disableitem)
self.othermenu.Enable(i,0)
i = self.othermenu.FindMenuItem(self.menuname,self.enableitem)
self.othermenu.Enable(i,1)
self.frame = None
self.title = "%s %s" % (progname,text)
self.parent = None # use the SetParent method if desired...
self.softspace = 1 # of rather limited use
if dir:
self.SetOutputDirectory(dir)
def SetParent(self, parent):
self.parent = parent
def SetOtherMenuBar(self,
othermenu,
menuname="Debug",
enableitem="Enable output",
disableitem="Disable output"):
self.othermenu = othermenu
self.menuname = menuname
self.enableitem = enableitem
self.disableitem = disableitem
f = None
def write(self, string):
if not wx.Thread_IsMain():
# Aquire the GUI mutex before making GUI calls. Mutex is released
# when locker is deleted at the end of this function.
#
# TODO: This should be updated to use wx.CallAfter similarly to how
# PyOnDemandOutputWindow.write was so it is not necessary
# to get the gui mutex
locker = wx.MutexGuiLocker()
if self.Enabled:
if self.f:
self.f.write(string)
self.f.flush()
move = 1
if (hasattr(self,"text")
and self.text is not None
and self.text.GetInsertionPoint() != self.text.GetLastPosition()):
move = 0
if not self.frame:
self.frame = wx.Frame(self.parent, -1, self.title, size=(450, 300),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)
self.text = wx.TextCtrl(self.frame, -1, "",
style = wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_RICH)
self.frame.sb = _MyStatusBar(self.frame,
callbacks=[self.DisableOutput,
self.CloseFile,
self.OpenNewFile],
useopenbutton=hasattr(self,
"nofile"))
self.frame.SetStatusBar(self.frame.sb)
self.frame.Show(True)
self.frame.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
if hasattr(self,"nofile"):
self.text.AppendText(
"Please close this window (or select the "
"'Dismiss' button below) when desired. By "
"default all messages written to this window "
"will NOT be written to a file--you "
"may change this by selecting 'Open New File' "
"below, allowing you to select a "
"new file...\n\n")
else:
tempfile.tempdir = self.dir
filename = os.path.abspath(tempfile.mktemp ())
self.text.AppendText(
"Please close this window (or select the "
"'Dismiss' button below) when desired. By "
"default all messages written to this window "
"will also be written to the file '%s'--you "
"may close this file by selecting 'Close "
"File' below, whereupon this button will be "
"replaced with one allowing you to select a "
"new file...\n\n" % filename)
try:
self.f = open(filename, 'w')
self.frame.sb.SetStatusText("File '%s' opened..."
% filename,
0)
except EnvironmentError:
self.frame.sb.SetStatusText("File creation failed "
"(filename '%s')..."
% filename,
0)
self.text.AppendText(string)
if move:
self.text.ShowPosition(self.text.GetLastPosition())
if not hasattr(self,"no__debug__"):
for m in sys.modules.values():
if m is not None:# and m.__dict__.has_key("__debug__"):
m.__dict__["__debug__"] = 1
if hasattr(self,"othermenu") and self.othermenu is not None:
i = self.othermenu.FindMenuItem(self.menuname,self.disableitem)
self.othermenu.Enable(i,1)
i = self.othermenu.FindMenuItem(self.menuname,self.enableitem)
self.othermenu.Enable(i,0)
Enabled = 1
def OnCloseWindow(self, event, exiting=0):
if self.f:
self.f.close()
self.f = None
if (hasattr(self,"othermenu") and self.othermenu is not None
and self.frame is not None
and not exiting):
i = self.othermenu.FindMenuItem(self.menuname,self.disableitem)
self.othermenu.Enable(i,0)
i = self.othermenu.FindMenuItem(self.menuname,self.enableitem)
self.othermenu.Enable(i,1)
if not hasattr(self,"no__debug__"):
for m in sys.modules.values():
if m is not None:# and m.__dict__.has_key("__debug__"):
m.__dict__["__debug__"] = 0
if self.frame is not None: # typically True, but, e.g., allows
# DisableOutput method (which calls this
# one) to be called when the frame is not
# actually open, so that it is always safe
# to call this method...
frame = self.frame
self.frame = self.text = None
frame.Destroy()
self.Enabled = 1
def EnableOutput(self,
event=None,# event must be the first optional argument...
othermenubar=None,
menuname="Debug",
enableitem="Enable output",
disableitem="Disable output"):
if othermenubar is not None:
self.SetOtherMenuBar(othermenubar,
menuname=menuname,
enableitem=enableitem,
disableitem=disableitem)
self.Enabled = 1
if self.f:
self.f.close()
self.f = None
self.write("")
def CloseFile(self):
if self.f:
if self.frame:
self.frame.sb.SetStatusText("File '%s' closed..."
% os.path.abspath(self.f.name),
0)
self.f.close ()
self.f = None
else:
if self.frame:
self.frame.sb.SetStatusText("")
if self.frame:
self.frame.sb.Refresh()
return 1
def OpenNewFile(self):
self.CloseFile()
dlg = wx.FileDialog(self.frame,
"Choose a new log file", self.dir,"","*",
wx.SAVE | wx.OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_CANCEL:
dlg.Destroy()
return 0
else:
try:
self.f = open(os.path.abspath(dlg.GetPath()),'w')
except EnvironmentError:
dlg.Destroy()
return 0
dlg.Destroy()
if self.frame:
self.frame.sb.SetStatusText("File '%s' opened..."
% os.path.abspath(self.f.name),
0)
if hasattr(self,"nofile"):
self.frame.sb = _MyStatusBar(self.frame,
callbacks=[self.DisableOutput,
self.CloseFile,
self.OpenNewFile])
self.frame.SetStatusBar(self.frame.sb)
if hasattr(self,"nofile"):
delattr(self,"nofile")
return 1
def DisableOutput(self,
event=None,# event must be the first optional argument...
exiting=0):
self.write("<InformationalMessagesFrame>.DisableOutput()\n")
if hasattr(self,"frame") \
and self.frame is not None:
self.OnCloseWindow("Dummy",exiting=exiting)
self.Enabled = 0
def close(self):
self.DisableOutput()
def flush(self):
if self.text:
self.text.SetInsertionPointEnd()
wx.Yield()
def __call__(self,* args):
for s in args:
self.write (str (s))
def SetOutputDirectory(self,dir):
self.dir = os.path.abspath(dir)
## sys.__stderr__.write("Directory: os.path.abspath(%s) = %s\n"
## % (dir,self.dir))
class Dummy_PyInformationalMessagesFrame(object):
def __init__(self,progname=""):
self.softspace = 1
def __call__(self,*args):
pass
def write(self,s):
pass
def flush(self):
pass
def close(self):
pass
def EnableOutput(self):
pass
def __call__(self,* args):
pass
def DisableOutput(self,exiting=0):
pass
def SetParent(self,wX):
pass
| {
"repo_name": "ktan2020/legacy-automation",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/infoframe.py",
"copies": "1",
"size": "19730",
"license": "mit",
"hash": 1134730862160436700,
"line_mean": 38.1016260163,
"line_max": 110,
"alpha_frac": 0.558337557,
"autogenerated": false,
"ratio": 4.325805744354308,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008725416538684715,
"num_lines": 492
} |
# 122. Best Time to Buy and Sell Stock II - LeetCode
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii/description/
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
total_profit = 0
if len(prices) == 0:
return total_profit
have_stock_price = -1
i = 1
while i < len(prices):
if have_stock_price > -1:
if prices[i] < prices[i-1]:
total_profit += prices[i-1] - have_stock_price
have_stock_price = -1
else: # Don't have stock, curr means current min
if prices[i] > prices[i-1]: # have profit, buy
have_stock_price = prices[i-1]
i += 1
if have_stock_price > -1:
total_profit += prices[-1] - have_stock_price
return total_profit
s = Solution()
print(s.maxProfit([1,5,10,10,5,1]))
print(s.maxProfit([2,1,2,0,1])) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/122_best-time-to-buy-and-sell-stock-ii.py",
"copies": "1",
"size": "1024",
"license": "mit",
"hash": 2951638824239572500,
"line_mean": 33.1666666667,
"line_max": 79,
"alpha_frac": 0.5166015625,
"autogenerated": false,
"ratio": 3.250793650793651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4267395213293651,
"avg_score": null,
"num_lines": null
} |
# 122. Best Time to Buy and Sell Stock II
#
# Say you have an array for which the i-th element is the price of a
# stock on day i.
#
# Design an algorithm to find the maximum profit. You may complete as
# many buys and sells as you like, but you must hold at most one share
# at a time.
# It seems like this has to be a dynamic programming problem, but it's
# marked as easy. So I'm probably missing some sort of greedy strategy.
# Investigate!
# [ 1, 10, 2, 12, 3, 14, 4, 16]
# 0 1 2 3 4 5 6 7
# 0 9 9 19 19 30 30 42
# 1 0 10 10 21 21 33
# 2 10 10 21 21 33
# 3 0 11 11 23
# 4 11 11 23
# 5 0 12
# 6 12
# [ 1, 2, 4, 2, 5, 7, 2, 4, 9, 0]
# 0 1 2 3 4 5 6 7 8 9
# 0 1 3 3 6 8 8 10 15 15
# 1 2 2 5 7 7 9 14 14
# 2 0 3 5 5 7 12 12
# 3 3 5 5 7 12 12
# 4 2 2 4 9 9
# 5 0 2 7 7
# 6 2 7 7
# 7 5 5
# 8 0
# Well, it turns out that the following properties of sequences of
# numbers can be taken advantage of to create an optimal method that
# does not require dynamic programming.
#
# * A sequence of numbers is composed of consecutive subsequences that
# are either monotonically increasing or monotonically decreasing.
# The subsequences alternate between increasing and decreasing when
# considered to share end points.
# * Extend increasing subsequences as far as possible in each direction
# maximizes the difference: Given a_i <= ... <= a_j <= ... <= a_k <=
# ... <= a_n, (a_n - a_i) >= (a_k - a_j) because a_n >= a_k and a_j >=
# a_i.
# * The difference of a decreasing sequence is <= 0, so it is best to
# not buy or sell.
# * Choosing from a decreasing sequence following an increasing sequence
# cannot increase the difference of the increasing sequence: Given a_i
# <= ... <= a_j >= ... >= a_k, (a_j - a_i) >= (a_k - a_i).
# * Similarly, choosing a starting point in a decreasing sequence
# followed by an increasing sequence cannot increase the difference of
# the increasing sequence: Given a_i >= ... >= a_j <= ... <= a_k, (a_k
# - a_j) >= (a_k - a_i).
# * In an increasing, decreasing, increasing scenario, it is always best
# to treat both increasing sequences separately. Given a_i <= ... <=
# a_j >= ... >= a_k <= ... <= a_n, (a_j - a_i) + (a_n - a_k) >= (a_n -
# a_i) because a_j >= a_k.
#
# Together with inducution, these properties cover all possible
# sequences of numbers and show that the maximum differences are
# obtained from the maximal increasing subsequences which can be found
# with a linear scan.
# In a strategy, a buy is -1, a sell is 1, and a hold is 0.
# Thus, the profit is the dot product of the strategy vector
# with the prices vector.
def gen_strategies(length: int):
strategy = [0] * length
yield strategy
idx = 0
while idx < length:
# Increment to the next strategy
if strategy[idx] == 0:
strategy[idx] = -1
idx = 0
yield strategy
elif strategy[idx] == -1:
strategy[idx] = 1
idx = 0
yield strategy
else:
assert strategy[idx] == 1
strategy[idx] = 0
idx += 1
def is_valid_strategy(strategy):
"""A valid strategy comes in pairs of buys and sells."""
cumsum = 0
for num in strategy:
cumsum += num
if cumsum > 0:
return False
elif cumsum < -1:
return False
return True
def profit(prices, strategy):
return sum(prices[i] * strategy[i] for i in range(len(prices)))
def max_profit_dynprg(prices, idx_beg=None, idx_end=None, table=None):
if idx_beg is None:
idx_beg = 0
if idx_end is None:
idx_end = len(prices) - 1
if table is None:
table = {}
# Ensure sensical arguments. If not, return 0.
if idx_beg >= idx_end or idx_beg < 0 or idx_end >= len(prices):
return 0
# Look up the existing maximum profit for this range of indices
max_prft = table.get((idx_beg, idx_end), None)
# Compute the max profit if it hasn't been computed before
if max_prft is None:
prfts = [
# No buys, no sells
0,
# Buy at beginning, sell at end
prices[idx_end] - prices[idx_beg],
]
length = idx_end - idx_beg + 1
if length >= 3:
# Single intervals of length 1 less
prfts.append(max_profit_dynprg(
prices, idx_beg, idx_end - 1, table))
prfts.append(max_profit_dynprg(
prices, idx_beg + 1, idx_end, table))
if length >= 4:
# Single interval of length 2 less
prfts.append(max_profit_dynprg(
prices, idx_beg + 1, idx_end - 1, table))
# All pairs of intervals
for idx_mid in range(idx_beg + 1, idx_end - 1):
prfts.append(
max_profit_dynprg(prices, idx_beg, idx_mid, table)
+
max_profit_dynprg(prices, idx_mid + 1, idx_end, table)
)
max_prft = max(prfts)
table[idx_beg, idx_end] = max_prft
#print(f'{idx_beg}-{idx_end}: {max_prft}')
return max_prft
def max_profit_dynprg_nonrec(prices):
if len(prices) < 2:
return 0
table = {}
for length in range(2, len(prices) + 1):
#print(f'length: {length}')
for idx_beg in range(0, len(prices) - length + 1):
idx_end = idx_beg + length - 1
prfts = [
# No buys, no sells
0,
# Buy at beginning, sell at end
prices[idx_end] - prices[idx_beg],
]
if length >= 3:
# Single intervals of length 1 less
prfts.append(table[idx_beg, idx_end - 1])
prfts.append(table[idx_beg + 1, idx_end])
if length >= 4:
# All pairs of adjoining, smaller intervals
for idx_mid in range(idx_beg + 1, idx_end - 1):
prfts.append(table[idx_beg, idx_mid] +
table[idx_mid + 1, idx_end])
max_prft = max(prfts)
table[idx_beg, idx_end] = max_prft
#print(f'{idx_beg}-{idx_end}: {max_prft}')
return table[0, len(prices) - 1]
class Solution:
def maxProfit_1(self, prices: List[int]) -> int:
max_profit = 0
for strategy in gen_strategies(len(prices)):
if not is_valid_strategy(strategy):
continue
# Compute the profit of this strategy
prft = profit(prices, strategy)
if max_profit is None or prft > max_profit:
max_profit = prft
return max_profit
def maxProfit_2(self, prices: List[int]) -> int:
prices_length = len(prices)
if prices_length < 2:
return 0
max_profit = 0
for idx1 in range(prices_length - 1):
price_buy = prices[idx1]
for idx2 in range(idx1 + 1, prices_length):
price_sell = prices[idx2]
if price_sell <= price_buy:
continue
prft = price_sell - price_buy
if prices_length - idx2 > 2:
prft += self.maxProfit_2(prices[idx2 + 1:])
if prft > max_profit:
max_profit = prft
return max_profit
def maxProfit_3(self, prices: List[int]) -> int:
return max_profit_dynprg(prices)
def maxProfit_4(self, prices: List[int]) -> int:
return max_profit_dynprg_nonrec(prices)
def maxProfit_5(self, prices: List[int]) -> int:
if len(prices) < 2:
return 0
prft = 0
prices_iter = iter(prices)
prev_price = next(prices_iter)
for curr_price in prices_iter:
if curr_price > prev_price:
prft += curr_price - prev_price
prev_price = curr_price
return prft
maxProfit = maxProfit_5
| {
"repo_name": "afbarnard/glowing-broccoli",
"path": "lc/000122.py",
"copies": "1",
"size": "8280",
"license": "mit",
"hash": 3307837099479888400,
"line_mean": 35.8,
"line_max": 74,
"alpha_frac": 0.5350241546,
"autogenerated": false,
"ratio": 3.420074349442379,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4455098504042379,
"avg_score": null,
"num_lines": null
} |
# [1, 2, 3] => [1, 4, 9]
# 배열로 받는거 제곱
def get_square_list(number_list):
square_list = []
for i in number_list:
square_list.append( i ** 2)
return square_list
get_square_list([1, 2, 3])
# 받는 모든 수 제곱
def get_square_list(*args):
square_list = []
for i in args:
square_list.append( i ** 2)
return square_list
get_square_list(1, 2, 3)
get_square_list(1, 2, 3, 4)
# [1, 2, 3] => [2, 3, 4]
# 이렇게 계속 함수 만들어 줘야되?! 귀찮게...-_-;;
# def get_increment_list()
# lambda, map
def square(x):
return x ** 2
list(map(square, [1, 2, 3]))
# 위에꺼 좀더 리팩토링 안되?
# 람다로 해봐
list(map(lambda x: x ** 2, [4, 5, 7,9]))
# 조건을 줄려면 어떻게?
# filter
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
list(filter(lambda x: x > 5, numbers))
a = map(lambda x: x ** 8, [1, 2, 3, 4, 5])
for i in a:
print(i)
import time
def sleeping_numbers(x):
time.sleep(1)
return x ** 2
#map(sleeping_numbers, )
# Map => 모든 Elements => 새로운 List
# Filter => 모든 Elements => True인 Element만 새로운 List
# Reduce
# 줄이는 아이
# 하나만 남기는 애
# Python3 => functools로 분리 import필요
# 뭔소리이야?
import functools
functools.reduce(lambda x,y: x + y, [10, 20, 30, 40])
# 이런게 되는거지
def sum(x, y):
print((x, y))
return x + y
functools.reduce(sum, [10, 20, 30, 40])
# 리스트에서 최대값을 뽑는 함수
# Python답지 않아!
def max(numbers):
max_number = numbers[0]
for number in numbers:
if number > max_number:
max_number = number
return max_number
max([1, 9, 2, 3, 7, 11, 4])
# [참일 때의 값] if [조건문] else [거짓일때의 값]
functools.reduce(lambda x,y: x if x > y else y, [1, 9, 2, 3, 7, 11, 4, 15, 20])
# Lambda Operator => 숫자인 애들만 제곱해서 새로운 리스트 만드는거
awesome_list = [1, 2, "안수찬", {}, 4, 5]
#list(filter(lambda x: isinstance(x, int), awesome_list))
list(map(
lambda x: x ** 2,
filter(
lambda x: isinstance(x, int),
awesome_list
)
))
# List Comprehension
[i ** 2 for i in range(10)]
list(map(lambda x: x ** 2, range(10)))
[i ** 2 for i in range(9 + 1) if i < 5]
awsome_list = [i for i in range(0, 9+1)]
filter(lambda x: x>5, awsome_list)
square = map(
lambda x: x**2,
filter(lambda x: x>5, awsome_list)
)
| {
"repo_name": "LeoHeo/FastCampus-Python-Django",
"path": "Week-2/06_lambda_map_reduce.py",
"copies": "1",
"size": "2472",
"license": "mit",
"hash": 6694271618836870000,
"line_mean": 17.3898305085,
"line_max": 79,
"alpha_frac": 0.5640552995,
"autogenerated": false,
"ratio": 1.8281381634372367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.2892193462937237,
"avg_score": null,
"num_lines": null
} |
# |1|2|3|4|5|6|7|8|
# 1|a| | | | | | | | |
# 2|b| | | | | | | | |
# 3|c| | | | | | | | |
# 4|d| | | | | | | | |
# 5|e| | | | | | | | |
# 6|f| | | | | | | | |
# 7|g| | | | | | | | |
# 8|h| | | | | | | | |
# 9|i| | | | | | | | |
import logging
import logging.handlers
import traceback
from helpMenu import parser
from helpers import *
from minimax import minimax, available_moves
from alphaBetaMinimax import alphaBetaMinimax, available_moves, count
from timeit import default_timer as timer
from sef import *
LOGGER = logging.getLogger("Animal_checker")
LOGGER.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
# fh = logging.FileHandler("./animal_checker2.log", mode='w')
# fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
# fh.setFormatter(formatter)
# add the handlers to LOGGER
LOGGER.addHandler(ch)
# LOGGER.addHandler(fh)
INITIAL_LOCATIONS = {
'player1':{
'M': '6i',
'E': '2i',
'T': '5h',
# 'T': '3c',
'W': '3h',
'DEN': '4i',
},
'player2': {
'M': '2a',
'E': '6a',
'T': '3b',
'W': '5b',
'DEN': '4a',
}
}
def get_user_input():
"""
T down -> Tiger move down
M up -> mouse move up
E left -> go left
W right -> wolf go right
"""
while 1:
animals_set = {
"t": "tiger",
"m": "mouse",
"e": "elephant",
"w": "wolf",
}
directions = {
"u": "up",
"d": "down",
"l": "left",
"r": "right",
}
try:
LOGGER.debug("Your Turn Make a Move: ( i.e. ~ T down ) ")
_input = raw_input()
# print _input.lower().strip().split(" ")[3:]
_animal, _direction = _input.lower().strip().split(" ")[:2]
LOGGER.debug("User Input: %s move %s" % (_animal, _direction))
if _animal[0] in animals_set.keys() and _direction[0] in directions.keys():
return animals_set[_animal[0]], directions[_direction[0]]
elif _animal == "undo":
return "undo", None
else:
# print _animal in animals_set.keys() and _direction in directions.keys()
raise InvalidUserInputError
except Exception, e:
print traceback.format_exc(e)
class Animal(object):
"""docstring for Animal"""
def __init__(self, _type, owner=None, verbose=False):
self._type = _type
self._capturable = ['den']
self._verbose = verbose
self.location = None
self._row_col_location = None
self.owner = owner
self.is_dead = False
self.neighbor=[]
def __repr__(self):
return ' %s ' % (self._type[0].upper() if self.owner.lower() == 'player1' else self._type[0].lower())
def __lt__(self, other_animal):
if hasattr(self, '_type'):
return other_animal.can_capture(self)
else:
return True
# return other_animal.can_capture(str(self))
def __gt__(self, other_animal):
if hasattr(other_animal, '_type'):
# print "other_animal", other_animal, self._verbose
return self.can_capture(other_animal)
else:
# print self.can_capture(other_animal)
return True
def get_initial_location(self):
self.location = INITIAL_LOCATIONS[self.owner][self.get_symbol()]
return self.location
def can_move_to(self, new_location, new_row=None, new_col=None, _board=None):
''' returns (status, (new_row, new_col)) '''
try:
if self.is_dead:
raise DeadAnimalException("\nDear %s, \n Sorry I'm dead.\n \t ~love your dead %s" % (self.owner, self._type))
# print "new_location", new_location, new_row, new_col
cur_row, cur_col = get_xy_coordinates(self.location)
if not (new_row and new_col):
# print "%s - %s" % (new_row, new_col)
# print new_location
new_row, new_col = get_xy_coordinates(new_location)
# left, up , right , down
# print cur_row, cur_col
# print new_row, new_col
_neighbor = get_neighbor(cur_row, cur_col)
self.neighbor = []
[self.neighbor.append(item) for item in _neighbor.values() if item and self > _board[item[0] - 1][item[1] - 1]]
# print _neighbor, self.neighbor
# LOGGER.debug("Actual Available Moves = %s" % [get_alpha_numeric_coordinates(*x) +" -> "+str(x) for x in self.neighbor])
# LOGGER.debug( "(%s)[%s] can move to %s ? = %s" % (self,self.owner,(new_row, new_col),(new_row, new_col) in self.neighbor))
return ((new_row, new_col) in self.neighbor, (new_row, new_col))
except Exception, e:
# print str(traceback.format_exception_only(type(e), e)[0])
print traceback.format_exc(str(e))
return (False, (0, 0))
def can_capture(self, other_animal):
res = True if other_animal._type.lower() in self._capturable and \
other_animal.owner != self.owner else False
verbose_status = ''
if self._verbose:
if res:
verbose_status = 'The [%s](%s)' % (self._type, self.owner[0::1]) + \
' can capture this [%s](%s)' % (other_animal._type, other_animal.owner[0::1])
else:
verbose_status = 'The [%s](%s)' % (self._type, self.owner[0::1]) + \
' can NOT capture this [%s](%s)' % (other_animal._type, other_animal.owner[0::1])
print verbose_status
return res
def _move(self, direction):
''' Syntatic Sugar function to be able to move in any direction without entering the coordinates'''
_nb = get_neighbor(*get_xy_coordinates(self.location))
print '[%s](%s) attempt to move to %s' % (self._type, self.owner, direction)
if _nb[direction]:
return get_alpha_numeric_coordinates(*_nb[direction])
else:
print '[%s](%s) can\'t move %s' % (self._type, self.owner, direction)
raise OutOfBoardException("[%s](%s) Attempted to move outside the board..." % (self._type, self.owner))
def is_captured(self):
self.is_dead = True
def get_symbol(self):
return self._type[0].upper()
def get_location(self):
return self.location
def distance_from(self, other_animal):
if other_animal.is_dead:
return DeadAnimalException(" [%s](%s) is trying to find the Ghost of this dead [%s](%s) ..." % (self._type, self.owner, other_animal._type, other_animal.owner))
return abs(self._row_col_location[0] - \
other_animal._row_col_location[0]) + \
abs(self._row_col_location[1] - \
other_animal._row_col_location[1])
class Den(Animal):
"""docstring for Den"""
def __init__(self, owner=None, verbose=False):
super(Den, self).__init__(self.__class__.__name__, owner=owner,
verbose=verbose)
self._capturable = []
def __repr__(self):
return '%s' % ('DEN' if self.owner.lower() == 'player1' else 'den')
def is_captured(self):
print "Den Captured"
self.is_dead = True
class Wolf(Animal):
"""docstring for Wolf"""
def __init__(self, owner=None, verbose=False):
super(Wolf, self).__init__(self.__class__.__name__, owner=owner,
verbose=verbose)
self._capturable += ['mouse', 'wolf']
class Mouse(Animal):
"""docstring for Mouse"""
def __init__(self, owner=None, verbose=False):
super(Mouse, self).__init__(self.__class__.__name__, owner=owner,
verbose=verbose)
self._capturable += ['mouse', 'elephant']
class Tiger(Animal):
"""docstring for Tiger"""
def __init__(self, owner=None, verbose=False):
super(Tiger, self).__init__(self.__class__.__name__, owner=owner,
verbose=verbose)
self._capturable += ['mouse', 'tiger', 'wolf']
class Elephant(Animal):
"""docstring for Elephant"""
def __init__(self, owner=None, verbose=False):
super(Elephant, self).__init__(self.__class__.__name__, owner=owner,
verbose=verbose)
self._capturable += ['tiger', 'wolf', 'elephant']
class Player(object):
"""docstring for Player"""
def __init__(self, name='player1', location=None, verbose=False):
self.name = name
self.tiger = Tiger(verbose=verbose, owner=self.name)
self.wolf = Wolf(verbose=verbose, owner=self.name)
self.mouse = Mouse(verbose=verbose, owner=self.name)
self.elephant = Elephant(verbose=verbose, owner=self.name)
self.den = Den(verbose=verbose, owner=self.name)
def __getitem__(self, key):
""" return a animal by the key Name
"""
return self.__dict__[key]
def __iter__(self):
return iter([item for item in self.__dict__ if item not in ['name', 'den']])
class AnimalChecker(object):
"""docstring for AnimalChecker"""
def __init__(self, rows, cols, starting_player=1):
self.rows = rows
self.cols = cols
self._board = []
self._y_arr = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
self.players = []
self.setup()
self.plys = 0
self.starting_player = starting_player if starting_player in [1,2] else 1
self.is_gameover = False
self.last_move = move_tracker(self)
def setup(self):
self._board = self.build_board()
self.players = self.init_players()
for _player in self.players:
self._move_animal(_player.tiger,
_player.tiger.get_initial_location())
self._move_animal(_player.mouse,
_player.mouse.get_initial_location())
self._move_animal(_player.wolf,
_player.wolf.get_initial_location())
self._move_animal(_player.elephant,
_player.elephant.get_initial_location())
self._set_den(_player)
def init_players(self):
Player1 = Player('player1', verbose=False)
Player2 = Player('player2', verbose=False)
return [Player1, Player2]
def _set_den(self, player):
# get den location for this player
player.den.location = INITIAL_LOCATIONS[player.name]['DEN'] # change location
player.den._row_col_location = get_xy_coordinates(player.den.location)
# self._add_to_board(*get_xy_coordinates(player.den.location),
# content=player.den) # remote animal from previous location on board
def _find_whose_turn(self):
# print (self.plys + self.starting_player - 1) % 2
return self.players[(self.plys + self.starting_player - 1) % 2 ].name
def get_players(self):
return self.players
def get_current_game_state(self):
return self
def build_board(self):
_board = []
for row in xrange(0, self.rows):
_board.append([])
for col in xrange(0, self.cols):
_board[row].append(' ')
return _board
def display_board(self, _board=[], raw=False, no_print=False):
if not _board:
_board = self._board
current_player = self._find_whose_turn()
board_str = ""
if not raw:
board_str = "\n[==============( %s Turn - total ply = %s )==============]\n\n" % (current_player, self.plys)
for row_index in xrange(0, len(_board)):
row = _board[row_index]
curren_col = ''
if not raw:
if(row_index == 0): # just to display the first header line
curren_col += ' |'
for col_index in xrange(0, len(_board[row_index])):
curren_col += ' ' + str(col_index + 1) + ' |'
curren_col += '\n'
curren_col += '' + self._y_arr[row_index] + "->|"
else:
curren_col = '|'
# print Den
# make a den accessible to own animals
for index, col in enumerate(row):
if (row_index + 1, index + 1) == get_xy_coordinates(INITIAL_LOCATIONS['player1']["DEN"]):
if isinstance(col, Animal):
curren_col += '*' + str(col).strip() + '*|'
else:
curren_col += 'DEN|'
elif (row_index + 1, index +1) == get_xy_coordinates(INITIAL_LOCATIONS['player2']["DEN"]):
if isinstance(col, Animal):
curren_col += '*' + str(col).strip() + '*|'
else:
curren_col += 'den|'
else:
curren_col += '' + str(col) + '|'
board_str += '%s\n' % curren_col
if not raw:
board_str += "\n[==================================================]\n"
# LOGGER.info(board_str)
if not no_print:
print board_str
return board_str
def get_item_at(self, row, col):
return self._board[row - 1][col - 1]
def _add_to_board(self, row, col, content):
try:
# print "adding to board {%s,%s} = %s(%s)" % (row, col, content, content.owner)
self._board[row - 1][col - 1] = content
except IndexError:
raise InvalidMoveException
def _check_winner_state(self):
currently_on_tile = self._board[0][3]
if isinstance(currently_on_tile, Animal): # check one of the player in on the wining tile
if currently_on_tile.owner == 'player1':
print "WE HAVE A WINNER !!", currently_on_tile.owner.upper(), "WON !!"
self.is_gameover = True
return True
currently_on_tile = self._board[8][3]
if isinstance(currently_on_tile, Animal): # check one of the player in on the wining tile
if currently_on_tile.owner == 'player2':
print "WE HAVE A WINNER !!", currently_on_tile.owner.upper(), "WON !!"
self.is_gameover = True
return True
for player in self.players:
opponent = [p for p in self.players if player != p]
if all(player[animal].is_dead == True for animal in player):
print "WE HAVE A WINNER !!", opponent[0].name.upper(), "WON !!"
return True
return False
def _move_animal(self, animal, where):
''' each animal keep track of it's last position '''
try:
self._add_to_board(*get_xy_coordinates(animal.location),
content=' ') # remote animal from previous location on board
animal.location = where # change location
animal._row_col_location = get_xy_coordinates(where) # change location
self._add_to_board(*get_xy_coordinates(where),
content=animal) # add animal to new location
except Exception, e:
print traceback.format_exc()
raise e
def move(self, animal, direction):
'''Syntactic sugar to conveniently use directions instead of coordinates'''
try:
if animal.owner is self._find_whose_turn():
new_location = animal._move(direction)
if new_location:
return self._move(animal, new_location)
# else:
# LOGGER.warning("Waiting on %s to play ..." % self._find_whose_turn())
except OutOfBoardException, e:
print traceback.format_exc(e)
def ai_move(self, who, new_row=None, new_col=None, sef=0, player=None):
return self._move(player[who], None, new_row, new_col)
def _move(self, who, new_location=None, new_row=None, new_col=None):
try:
if self.is_gameover == True:
print ("Game Over")
return True
if not new_location:
# print who, new_location, new_row, new_col
new_location = get_alpha_numeric_coordinates(new_row, new_col)
cur_player = self._find_whose_turn()
if who.owner is not cur_player:
print "Waiting on %s to play ..." % cur_player
# self.display_board()
return False
old_location = who.location
status, (row, col) = who.can_move_to(new_location, new_row, new_col, self._board)
# print status
if not status:
print "[%s](%s) can't move to %s" % (who._type, who.owner, new_location)
self.display_board()
return False
# print who, ">", self.get_item_at(row, col)
animal_on_tile = self.get_item_at(row, col)
if isinstance(animal_on_tile, Animal): # tile not empty
res = who > animal_on_tile
# print res, who.owner is not animal_on_tile.owner, who.owner, animal_on_tile.owner
if (res and (who.owner is not animal_on_tile.owner)):
print 'BOOYA! [%s](%s) Captured [%s](%s)' % (who._type, who.owner, animal_on_tile._type, animal_on_tile.owner)
animal_on_tile.is_captured()
# self._move_animal(who, new_location)
if new_location:
self._move_animal(who, new_location)
else:
self._move_animal(who, get_alpha_numeric_coordinates(new_row, new_col))
else:
if isinstance(animal_on_tile, Den):
# print "tring to go in the den"
# self._move_animal(who, new_location)
if new_location:
self._move_animal(who, new_location)
else:
self._move_animal(who, get_alpha_numeric_coordinates(new_row, new_col))
else:
print "can't do this - %s , %s" % (res, who.owner is not animal_on_tile.owner)
return False
else: # tile was empty ... just move up there if you can
if new_location:
self._move_animal(who, new_location)
else:
self._move_animal(who, get_alpha_numeric_coordinates(new_row, new_col))
self.plys +=1
self.last_move.update(who, old_location, new_location, animal_on_tile)
# self.display_board()
# check Winning state
# self._check_winner_state(who, new_location);
return True
except Exception, e:
print traceback.format_exc(e)
self.display_board()
return False
# raise e
def undo(self):
''' undo a move using the move_tracker plugin '''
# l_move = self.last_move
self.last_move.revert()
# LOGGER.warning(l_move)
if self.is_gameover == True:
self.is_gameover = False
if __name__ == '__main__':
import sys
print """
| 1 | 2 | 3 | 4 | 5 | 6 | 7 |
a->| | | m |*e*| w | | |
b->| | | | t | | | |
c->| | | | | | | |
d->| | | | | | | |
e->| | | | | | | |
f->| | | | | | | |
g->| | E | | | T | | |
h->| | | W | | | | |
i->| | | |DEN| | M | |
"""
opening_book = {
"offense": [
('mouse', 'down'),
('elephant', 'down'),
('elephant', 'down'),
('wolf', 'right'),
('elephant', 'left'),
('tiger', 'down'),
('mouse', 'down'),
('elephant', 'left'),
('wolf', 'down'),
('mouse', 'down'),
],
"defense": [
# ('elephant', 'down'),
# ('elephant', 'down'),
# ('elephant', 'left'),
# ('elephant', 'left'),
# ('mouse', 'down'),
# ('tiger', 'right'),
# ('elephant', 'left'),
# ('tiger', 'down'),
# ('wolf', 'down'),
# ('mouse', 'down'),
],
}
try:
print tigerAscii
if len(sys.argv) < 3:
parser.print_help()
sys.exit()
args = parser.parse_args()
startingP = int(args.FirstPlayer[0])
game = AnimalChecker(rows=9, cols=7, starting_player=startingP)
game.setup()
p1, p2 = game.get_players()
player = {
"1": p1,
"2": p2,
}
_next=0
while True:
game.display_board()
print "PLAYER Turn = ", game._find_whose_turn()
if game._find_whose_turn() == 'player1':
ani, loc = get_user_input()
if ani=="undo":
game.undo()
continue
status = game.move(p1[ani], loc)
while not status:
ani, loc = get_user_input()
status = game.move(p1[ani], loc)
game._check_winner_state();
if game.is_gameover:
break;
else:
if game.plys < 10 :
ani, direction = opening_book['offense'][(_next)%len(opening_book['offense'])]
while not game.move(p2[ani], direction):
_next+=1
ani, direction = opening_book['offense'][(_next)%len(opening_book['offense'])]
_next+=1
else:
start = timer()
# _, bestMove = minimax(game, 0, 4, p2, None)
_, bestMove = alphaBetaMinimax(game, -1000, 1000, 0, 4, p2, None) # 2 or 4 ( not 3) for depth
end = timer()
# print bestMove
# bestMove = (p2[bestMove[0]], None,)+bestMove[1:-1]
# print bestMove
game.ai_move(player=p2, *bestMove)
# print game._move(*bestMove)
print " Total time %s" % (end - start)
alphaBetaMinimax.count = 0
game._check_winner_state()
if game.is_gameover:
break;
game.display_board()
except InvalidMoveException:
print "Invalid Move, please try again"
| {
"repo_name": "Mimieam/CS540_AI",
"path": "project1/AnimalChecker.py",
"copies": "1",
"size": "23177",
"license": "mit",
"hash": 2082468114857117700,
"line_mean": 36.1426282051,
"line_max": 172,
"alpha_frac": 0.4978211158,
"autogenerated": false,
"ratio": 3.7000319284802043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4697853044280204,
"avg_score": null,
"num_lines": null
} |
"""123
Revision ID: d5480b6ea221
Revises: None
Create Date: 2016-05-09 13:31:11.192108
"""
# revision identifiers, used by Alembic.
revision = 'd5480b6ea221'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('categorys',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('articles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('titile', sa.String(length=64), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('create_time', sa.DATETIME(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['categorys.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('titile')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('articles')
op.drop_table('categorys')
### end Alembic commands ###
| {
"repo_name": "Millyn/Flask_blog",
"path": "migrations/versions/d5480b6ea221_123.py",
"copies": "1",
"size": "1192",
"license": "apache-2.0",
"hash": 5905180780422510000,
"line_mean": 27.380952381,
"line_max": 65,
"alpha_frac": 0.6619127517,
"autogenerated": false,
"ratio": 3.4853801169590644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9595389000466668,
"avg_score": 0.010380773638479375,
"num_lines": 42
} |
# 125. Valid Palindrome
#
# Given a string, determine if it is a palindrome,
# considering only alphanumeric characters and ignoring cases.
#
# For example,
# "A man, a plan, a canal: Panama" is a palindrome.
# "race a car" is not a palindrome.
#
# Note:
# Have you consider that the string might be empty?
# This is a good question to ask during an interview.
#
# For the purpose of this problem, we define empty string as valid palindrome.
class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
s = ''.join(e for e in s.lower() if e.isalnum())
return s == s[::-1]
# http://blog.csdn.net/aliceyangxi1987/article/details/50372724
# not use isalnum()
def isPalindrome(self, s):
new = []
for e in s.lower():
if '0' <= e < '9' or 'a' <= e <= 'z':
new.append(e)
return new == new[::-1]
# not use s[::-1]
def isPalindrome(self, s):
s = ''.join(e for e in s.lower() if e.isalnum())
for i in range(0, len(s) / 2):
if s[i] != s[len(s) - 1 - i]:
return False
return True
# use filter
def isPalindrome(self, s):
st = filter(str.isalnum, s).lower()
return st == st[::-1]
if __name__ == "__main__":
print Solution().isPalindrome("")
print Solution().isPalindrome("abba")
assert Solution().isPalindrome("A man, a plan, a canal: Panama") is True
assert Solution().isPalindrome("race a car") is False
| {
"repo_name": "gengwg/leetcode",
"path": "125_valid_palindrome.py",
"copies": "1",
"size": "1541",
"license": "apache-2.0",
"hash": -9198968882678752000,
"line_mean": 27.0181818182,
"line_max": 78,
"alpha_frac": 0.5691109669,
"autogenerated": false,
"ratio": 3.2373949579831933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43065059248831933,
"avg_score": null,
"num_lines": null
} |
"""128. Longest Consecutive Sequence
https://leetcode.com/problems/longest-consecutive-sequence/
Given an unsorted array of integers, find the length of the longest
consecutive elements sequence.
Your algorithm should run in O(n) complexity.
Example:
Input: [100, 4, 200, 1, 3, 2]
Output: 4
Explanation: The longest consecutive elements sequence is [1, 2, 3, 4].
Therefore its length is 4.
"""
from typing import List
class Solution:
def longest_consecutive_1(self, nums: List[int]) -> int:
"""
O(N)
:param nums:
:return:
"""
num_set = set(nums)
ans = 0
for num in num_set:
if num - 1 not in num_set:
# find the smallest num in one consecutive nums.
temp_ans = 1
temp_num = num
while temp_num + 1 in num_set:
temp_num += 1
temp_ans += 1
ans = max(ans, temp_ans)
return ans
def longest_consecutive_2(self, nums: List[int]) -> int:
"""
O(N*lgN)
:param nums:
:return:
"""
if not nums:
return 0
nums.sort()
length = len(nums)
ans = 1
i, temp = 1, 1
while i < length:
if nums[i] == nums[i - 1] + 1:
temp += 1
ans = max(ans, temp)
elif nums[i] != nums[i - 1]:
temp = 1
i += 1
return ans
def longest_consecutive_3(self, nums: List[int]) -> int:
# {key: num in nums, value: the consecutive length when border is num}
hash_map = {}
ans = 0
for num in nums:
if num in hash_map:
continue
left_length = hash_map.get(num - 1, 0)
right_length = hash_map.get(num + 1, 0)
cur_length = left_length + right_length + 1
ans = max(ans, cur_length)
hash_map[num] = cur_length
# most import tip: update the left and right border
hash_map[num - left_length] = cur_length
hash_map[num + right_length] = cur_length
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/longest_consecutive_sequence.py",
"copies": "1",
"size": "2182",
"license": "mit",
"hash": -4404200000999182300,
"line_mean": 27.6973684211,
"line_max": 78,
"alpha_frac": 0.5061898212,
"autogenerated": false,
"ratio": 3.812937062937063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9819126884137063,
"avg_score": 0,
"num_lines": 76
} |
# 12/9/16, made this AMR specific, eliminating the need to parse srl vectors.
#
# DaisyLu Vectors 10/8/2015
#
# Read in the original conll format files and the list of items represented by each feature.
# Create MySQLite database with everything necessary to generate vectors for specific models,
# in Lua, including:
# Word, Pred, PredContext(3), marker(3) <-- first task, check with LUA generated SQLITE db which
# will have differing indices for odd words, but not enough
# to matter - ? Then check the time for LUA vector creation
# (should more processing be done here to speed up LUA?)x
# Word, Pred, PredContext(5), marker(5)
# Word, Pred, PredContext-Pred(5), marker(5)
# Word, Pred, PredContext-Pred(5), Position, Caps, Suffix
# Word, Pred, PredContext-Pred(5), Position, Caps, Suffix, patch, deprel, POS, PPOS
#
#
# Will extend this to new word reps, and to AMR feature generation, so be ready.
#
# Similar to SRLDepPaper.py:
# --------------------------
#
# It reads in CONLL format and generates a new CONLL format that includes path information
#
# It generates a word list for random training that will only contain words from the training
# dataset, with a fraction removed in order to train the UNKNOWN vector. This prevents using
# untrained, random words during testing.
#
# It can be used to read in CONLL result files from the original contest, or from Daisy, and can
# calculate the F1 score for them.
#
# It can separate the sense and role scores in many ways since the raw comparison data is stored
# in internal data structures. It can rank systems by role and sense scores.
#
# It can read in the output from the official conll2009 perl script so that F1 can be compared.
#
# It can generate Latex tables of the results - PANDAS is better for this, though.
#
# For the sense calculation, it evaluates senses for verbs and creates the list of preds that
# should be tested (versus preds that have the same value always in the test set)
#
# can create heatmaps, and plots of results for various feature combinations, used to generate
# comparative plots in dependency paper.
#
#
import sys
##reload(sys)
##sys.setdefaultencoding('utf-8')
import operator
import re
from pprint import pprint
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import platform
import sqlite3
class WordRepsFileLocations:
# '../data/senna_embeddings.txt'
pWordList = '../data/senna_words.lst'
pWordRepsList = 'dummy.lst'
width = 302
@staticmethod
def init(pathToWordList):
WordRepsFileLocations.pWordList = pathToWordList
@staticmethod
def pathToWordList():
return WordRepsFileLocations.pWordList
@staticmethod
def pathToWordRepsList():
return WordRepsFileLocations.pWordRepsList
@staticmethod
def wordRepsWidth():
return WordRepsFileLocations.width
def DBToDF(dbFn, tables= None):
dFrames = {}
from sqlalchemy import create_engine
engine = create_engine('sqlite:///' + dbFn, echo=False)
if not tables:
tables = engine.table_names() # read em all
for t in tables:
dFrames[t] = pd.read_sql_table(t, engine)
return dFrames
def getTargetsFromModels(mi):
features = {}
for tag in mi.keys():
features[tag] = {}
if mi[tag]['id'] > 0 :
dbfn = '../data/' + mi[tag]['db']
dfVec = DBToDF(dbfn, tables=['Tokens', 'GeneralArch'])
target = dfVec['GeneralArch'][dfVec['GeneralArch']['key']=='target']['value'].tolist()[0]
z = dfVec['Tokens']
for fType in [target]:
features[tag][fType] = dict(zip(z[(z['type']==fType) ]['ix'].tolist(), z[(z['type']==fType) ]['token'].tolist() ))
return features
def parseFile(fn, sents):
with open(fn) as f:
s=[]
for line in f:
t = line.split()
if (len(t) == 0):
sents.append(s)
s=[]
else:
s.append(t)
def parseWord(word, tokens):
AMRCorpusTranslate={ '@-@':'-', '@/@':'/', '."':'.', '",':',', '".':'.', '@:@':':',
'@-':'-', ')?':'?', '!!':'!', '??':'?', '"?':'?', '):':')' }
if word in AMRCorpusTranslate:
word = AMRCorpusTranslate[word]
lc = word.lower()
#print lc,
if lc in tokens:
return tokens[lc]
if re.match("^\-?[\.\,\d]+$", lc):
return tokens['0']
else:
m = re.search('^\'\d+$', lc)
if m:
t = "UNKNOWN"
if t in tokens:
return tokens[t]
m = re.search('^\d+(-)\d+$', lc)
if m:
t = "00"
if t in tokens:
return tokens[t]
m = re.search('^\d+(\D+)$', lc)
if m:
suffix = m.group(1)
t = "0" + suffix
if t in tokens:
return tokens[t]
m = re.search('^(\D+)\d+$', lc)
if m:
prefix = m.group(1)
t = prefix + "0"
if t in tokens:
return tokens[t]
m = re.search('^\d+(\D+)\d+$', lc)
if m:
mid = m.group(1)
t = "0" + mid + "0"
if t in tokens:
return tokens[t]
return tokens["UNKNOWN"]
def strListToCSV(a):
st = u''
if isinstance(a[0], list): # 2d
for row in a:
st += ','.join([x.decode("utf8").encode('utf-8').strip() for x in row]) + '#'
else:
for i in range(len(a)):
if isinstance(a[i], unicode):
a[i] = a[i].encode('ascii', 'ignore').strip().encode('utf8')
st += u','.join([x for x in a])
return st
def intCSVToList(a):
z = a.split(',')
fz = [int(x) for x in z]
return fz
def floatCSVToList(a):
fList= []
zz = a.split('#')
for outer in zz:
if outer=='':
continue
z = outer.split(',')
fz = [float(x) for x in z]
fList.append(fz)
return fList
def listToCSV(a):
st = u''
if isinstance(a[0], list): # 2d
for row in a:
st += ','.join([str(x) for x in row]) + '#'
else:
st += ','.join([str(x) for x in a])
return st
def strToList(st):
if (st.find('#') > -1):
twoD = []
for row in st.strip('\0,#').split('#'):
z = row.strip('\0,').split(',')
twoD.append( [int(a) for a in z] )
return twoD
else:
return [int(a) for a in str.rstrip('\0').split(',')]
def AddToAll(a, addend):
if isinstance(a[0], list): # 2d
b=[]
for row in a:
b.append( [x+addend for x in row])
return b
else:
b = [x+addend for x in a]
return b
def getFDef(fn):
f={}
f['tokens'] = [line.strip() for line in open(fn)]
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getDBPredicateCount(db):
db.execute("SELECT COUNT(*) FROM Predicates")
(ret) = db.fetchone()
return ret[0]
def getDBPredicateVector(db, pnum):
#db.execute("select ix, wfi, pfi, stTarget, ref_si, ref_pi from Predicates WHERE ix = ?", (pnum ))
db.execute("select ix, wfi, pfi, stTarget, ref_si, ref_pi from Predicates WHERE ix = ?", (pnum, ))
(_, _, pfi, stTarget, ref_si, ref_pi) = db.fetchone()
ref_si -= 1 # Lua index
ref_pi -= 1 # Lua index
targetIndices = AddToAll(strToList(stTarget), -1)
db.execute("select ix, stVector FROM PredicateFeatures WHERE ix = ?", (pfi, ))
(pfi, stVector) = db.fetchone()
vectorIndices = AddToAll(strToList(stVector), -1)
return targetIndices, vectorIndices, ref_si, ref_pi
def openDB(fn):
print 'opening ', fn
conn = sqlite3.connect(fn)
conn.text_factory = str # this is new...
return conn
def initializeAMRVectorDatabase(fn):
db = openDB(fn)
c = db.cursor()
# Modified tables so that file size is smaller, moves more work into Lua, but recipe is programmable
c.execute( 'DROP TABLE IF EXISTS GeneralArch' )
c.execute( 'CREATE TABLE IF NOT EXISTS GeneralArch( ix int, key text, value text, PRIMARY KEY (ix))' )
c.execute( 'DROP TABLE IF EXISTS DBFeatures' )
c.execute( 'CREATE TABLE IF NOT EXISTS DBFeatures( type text, ix int, CSV text, PRIMARY KEY (ix, type))' )
c.execute( 'DROP TABLE IF EXISTS Tokens' )
c.execute( 'CREATE TABLE IF NOT EXISTS Tokens( type text, ix int, token text, PRIMARY KEY (type, ix) )' )
c.execute( 'DROP TABLE IF EXISTS Items' )
c.execute( 'CREATE TABLE IF NOT EXISTS Items( ix int, type text, sentIX int, sentenceLen int, pWordIX int, targetCSV text,' + \
' PRIMARY KEY (ix, type))' )
c.execute( 'DROP TABLE IF EXISTS Sentences' )
c.execute( 'CREATE TABLE IF NOT EXISTS Sentences( ix int, type text, fType text, fCSV text, PRIMARY KEY (ix, type, fType))' )
c.execute( 'DROP TABLE IF EXISTS WDFArch' )
c.execute( 'CREATE TABLE IF NOT EXISTS WDFArch( ix int, filename text, size int, width int, learningRate float, name text, clone text, ' + \
' ptrName text, fType text, offset int, PRIMARY KEY (ix))' )
db.commit()
return db
def summarizeDataFrame(inDF, groupCol, excludeVal=None, displayCols=[], countCol=None):
if 'object' != str(inDF[groupCol].dtype):
z = inDF[0:0] # empty, but with same column structure
else:
z = inDF[inDF[groupCol] != excludeVal].copy()
if not countCol:
countCol='count'
z[countCol]=1
z = z.groupby(groupCol).count().sort_values(by=['count'], ascending=[0])
z['cum_sum'] = z[countCol].cumsum()
z['cum_perc'] = 100*z.cum_sum/z[countCol].sum()
z['perc'] = 100*z[countCol]/z[countCol].sum()
z['rank'] = range(len(z.index))
if displayCols:
displayCols = [countCol, 'cum_sum', 'cum_perc', 'rank'] + displayCols
return z[ displayCols ]
else:
return z[ [countCol, 'perc', 'cum_sum', 'cum_perc'] ]
def summarizeDataFrameMultiCols(inDF, groupCols, excludeVal=None, displayCols=[], countCol=None):
zlist=[]
for col in groupCols:
zlist += inDF[inDF[col].astype(str) != excludeVal][col].dropna().tolist()
z = pd.DataFrame()
z['labels'] = pd.Series(zlist, name='labels')
if not countCol:
countCol='count'
z[countCol]=1
z = z.groupby('labels').count().sort_values(by=['count'], ascending=[0])
z['cum_sum'] = z[countCol].cumsum()
z['cum_perc'] = 100*z.cum_sum/z[countCol].sum()
z['rank'] = range(len(z.index))
return z
def summarizeDataFrameMultiColPairs(inDF, firstCols, secondCols ):
list1=[]
list2=[]
for col in firstCols:
list1 += inDF[col].dropna().tolist()
for col in secondCols:
list2 += inDF[col].dropna().tolist()
joinedList=[]
for i,t in enumerate(list1):
joinedList.append(t + '(' + list2[i])
z = pd.DataFrame()
z['labels'] = pd.Series(joinedList, name='labels')
countCol='count'
z[countCol]=1
z = z.groupby('labels').count().sort_values(by=['count'], ascending=[0])
z['cum_sum'] = z[countCol].cumsum()
z['cum_perc'] = 100*z.cum_sum/z[countCol].sum()
z['rank'] = range(len(z.index))
return z
def capsTag(w):
capbool = [1 if c.isupper() else 0 for c in w]
capsum = sum(capbool)
if (capsum==0):
caps = 'nocaps'
elif ((capsum == 1) and (capbool[0]==1)):
caps = 'initcap'
elif (capsum == len(w)):
caps = 'allcaps'
else:
caps = 'hascap'
return caps
def getIndex(w, feature, defaultToken='UNKNOWN'):
if (w == '') or pd.isnull(w):
w = 'O'
if w not in feature['t2i']:
return feature['t2i'][defaultToken]
else:
return feature['t2i'][w]
def getdistSGFeatureInfo(vectors):
f = { 'tokens' : [], 't2i' : {} }
flist = []
for sType in vectors:
if not 'distSG' in vectors[sType]:
return f
flist += vectors[sType]['distSG'].tolist()
flist.append(flist[-1]) # <--------------------- This should be vectors for UNK and PAD
flist.append(flist[-1]) # <--------------------- This should be vectors for UNK and PAD
#flist += ['UNKNOWN', 'PADDING'] # <--------------------- This should be vectors for UNK and PAD
f['tokens'] = flist
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getWordsFeatureInfo(dtVectors, randomWords, randomWordsCutoffPercent):
if not randomWords:
path = WordRepsFileLocations.pathToWordList()
f = getFDef(path)
else:
sdf = summarizeDataFrame(dtVectors, 'words', excludeVal='O', displayCols=[ 'relSrc', 'ar0_arg', 'ar1_arg', 'ar2_arg' ])
f={}
f['tokens'] = sdf[sdf['cum_perc']<=randomWordsCutoffPercent].index.tolist() + ['UNKNOWN', 'PADDING']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getCapsFeatureInfo():
f={}
f['tokens'] = ['PADDING', 'allcaps', 'hascap', 'initcap', 'nocaps']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getDistanceFeatureInfo(maxD):
f={}
f['tokens']=[]
for i in range(-maxD,maxD+1):
f['tokens'].append('%d' % i)
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getSuffixFeatureInfo(dtVectors, suffixCutoffPercent):
fullWordList = dtVectors['words'].tolist()
suffices = [w.lower()[-2:] for w in fullWordList]
dtVectors['suffix'] = suffices
suffixSummary = summarizeDataFrame(dtVectors, 'suffix')
f={}
f['tokens'] = suffixSummary[suffixSummary['cum_perc']<=suffixCutoffPercent].index.tolist() + ['UNKNOWN', 'PADDING']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getConceptFeatureInfo(dtVectors, cutoffPercent):
sdf = summarizeDataFrame(dtVectors, 'txBIOES', excludeVal='O')
f={}
f['tokens'] = ['O'] + sdf[sdf['cum_perc']<=cutoffPercent].index.tolist() + ['UNKNOWN', 'PADDING']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getNERFeatureInfo(dtVectors):
sdf = summarizeDataFrame(dtVectors, 'NERLabel', excludeVal='O' )
f={}
f['tokens'] = ['O'] + sdf.index.tolist() + ['UNKNOWN', 'PADDING']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getArgsFeatureInfo(dtVectors, cutoffPercent):
argList = [ 'ar0_arg', 'ar1_arg', 'ar2_arg', 'ar3_arg' ]
sdf = summarizeDataFrameMultiCols(dtVectors, argList,
excludeVal='O' )
f={}
f['tokens'] = ['O'] + sdf[sdf['cum_perc']<=cutoffPercent].index.tolist() + ['UNKNOWN']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getNargsFeatureInfo(dtVectors, cutoffPercent):
nargList = [ 'nar0_lbl', 'nar1_lbl', 'nar2_lbl', 'nar3_lbl' ]
sdf = summarizeDataFrameMultiCols(dtVectors, nargList,
excludeVal='O' )
print sdf
f={}
f['tokens'] = ['O'] + sdf[sdf['cum_perc']<=cutoffPercent].index.tolist() + ['UNKNOWN']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getAttrsFeatureInfo(dtVectors, cutoffPercent):
attrList1 = [ 'attr0_lbl', 'attr1_lbl', 'attr2_lbl', 'attr3_lbl' ]
attrList2 = [ 'attr0_val', 'attr1_val', 'attr2_val', 'attr3_val' ]
sdf = summarizeDataFrameMultiColPairs(dtVectors, attrList1, attrList2 )
print sdf
f={}
f['tokens'] = ['O'] + sdf[sdf['cum_perc']<=cutoffPercent].index.tolist() + ['UNKNOWN']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getUnqualifiedAttrsFeatureInfo(dtVectors, cutoffPercent):
sdf = summarizeDataFrameMultiCols(dtVectors, [ 'attr0_lbl', 'attr1_lbl', 'attr2_lbl', 'attr3_lbl' ] )
print sdf
f={}
f['tokens'] = ['O'] + sdf[sdf['cum_perc']<=cutoffPercent].index.tolist() + ['UNKNOWN']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getNCATFeatureInfo(dtVectors):
sdf = summarizeDataFrame(dtVectors, 'nameCategory', excludeVal='O', displayCols=[ 'WCAT0', 'WCAT1', 'WCAT2', 'words' ])
f={}
f['tokens'] = ['O'] + sdf[sdf['cum_perc']<=97.0].index.tolist() + ['UNKNOWN', 'PADDING']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def getWCATFeatureInfo(dtVectors):
stack = pd.DataFrame()
for i in range(8):
w = dtVectors[ ['WCAT%d'%i ] ]
w.columns = ['WCAT']
stack = stack.append(w)
sdf = summarizeDataFrame(stack, 'WCAT', excludeVal='O', displayCols=[ ])
f={}
f['tokens'] = ['O'] + sdf[sdf['cum_perc']<=76.0].index.tolist() + ['UNKNOWN', 'PADDING']
f['t2i'] = dict( [(x, y) for y, x in enumerate(f['tokens'])] )
return f
def checkFeatures(dtVectors, features):
for key in features.keys():
print key, len(features[key]['tokens'])
sdf = summarizeDataFrame(dtVectors, 'words', excludeVal='O', displayCols=[ 'relSrc', 'ar0_arg', 'ar1_arg', 'ar2_arg' ])
wordList = sdf.index.tolist()
translatedWords = [parseWord(w, features['words']['t2i']) for w in wordList]
backToWords = [features['words']['tokens'][ix] for ix in translatedWords]
sdf['translatedWords'] = translatedWords
sdf['backToWords'] = backToWords
unknownWords = sdf[sdf['backToWords']=='UNKNOWN']
print unknownWords.head(100)
def createAMRL0Vectors(inFn, dbFn, L0CutoffPercent, keepSense, sTypes= ['test','training','dev'], vectors=None, featuresDB=None, maxSents=None, useNER=True):
wordDF={}
if inFn:
vectors = pickle.load( open( inFn ) )
vectors = preProcessVectors(vectors, sTypes, keepSense)
for sType in sTypes:
vectors[sType][ vectors[sType]['NERLabel']=='']['NERLabel'] = 'O' # lazy way to correct initialization to '', can remove
db = initializeAMRVectorDatabase(dbFn)
# ================================
# read features from the training database, or generate them?
if featuresDB:
_, features, _ = readAMRVectorDatabase(featuresDB)
else:
features = getAMRFeatures(vectors, L0CutoffPercent)
print 'add feature lists to db'
featureNames = ['suffix', 'caps', 'words', 'L0']
if useNER:
featureNames += ['ner']
for f in featureNames:
for i,t in enumerate(features[f]['tokens']):
db.execute("INSERT INTO Tokens (type, ix, token) VALUES (?, ?, ?)", (f, i+1, t))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 1, 'network', 'BDLSTM' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 2, 'output', 'viterbi' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 3, 'target', 'L0' ))
cmd = "INSERT INTO WDFArch (ix, name, filename, size, width, learningRate, clone, ptrName, fType, offset) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
db.execute(cmd, ( 1, 'words', WordRepsFileLocations.pathToWordRepsList(), len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, None, 'wordP', 'words', 0 ) )
db.execute(cmd, ( 2, 'caps', None, len(features['caps']['tokens']), 5, 0.6, None, 'wordP', 'caps', 0 ) )
db.execute(cmd, ( 3, 'suffix', None, len(features['suffix']['tokens']), 5, 0.6, None, 'wordP', 'suffix', 0 ) )
if (useNER):
db.execute(cmd, ( 4, 'ner', None, len(features['ner']['tokens']), 5, 0.6, None, 'wordP', 'ner', 0 ) )
for sType in sTypes:
df = vectors[sType].dropna(subset = ['words']).copy()
fullWordList = df['words'].tolist()
BIOESList = df['txBIOES'].tolist()
df['suffIX'] = [getIndex(w.lower()[-2:], features['suffix'], defaultToken='UNKNOWN') for w in fullWordList]
df['capsIX'] = [getIndex(capsTag(w), features['caps'], defaultToken='UNKNOWN') for w in fullWordList]
df['wRepIX'] = [parseWord(w, features['words']['t2i']) for w in fullWordList]
df['L0IX'] = [getIndex(b, features['L0'], defaultToken='UNKNOWN' ) for b in BIOESList]
if useNER:
df['NERIX'] = [getIndex(n, features['ner'], defaultToken='UNKNOWN' ) for n in df['NERLabel'].tolist()]
# merge wRepIX into vectors, for DAMR construction
wordDF[sType] = df[['sentIX','wordIX', 'words', 'wRepIX']]
maxIX = int(df[['sentIX']].values.max())
if maxSents:
maxIX= maxSents
for sentIX in range(maxIX+1):
if not sentIX % 100:
print 'creating vectors ', sType, sentIX, maxIX
z = df[ df['sentIX'] == sentIX]
if z.shape[0]>0:
#print 'DEBUG ', sentIX, 'Z IS'
#print z
#print
#len(z['words'].tolist())
sentLength = len(z['words'].tolist())
tokensCSV = ','.join(z['words'].tolist())
suffixCSV = listToCSV(AddToAll(z['suffIX'].tolist(), 1))
capsCSV = listToCSV(AddToAll(z['capsIX'].tolist(), 1))
wordsCSV = listToCSV(AddToAll(z['wRepIX'].tolist(), 1))
L0CSV = listToCSV(AddToAll(z['L0IX'].tolist(), 1))
if useNER:
nerCSV = listToCSV(AddToAll(z['NERIX'].tolist(), 1))
cmd = "INSERT INTO Sentences (ix, type, fType, fCSV) VALUES (?, ?, ?, ?)"
db.execute(cmd, ( sentIX+1, sType, "words", wordsCSV ))
db.execute(cmd, ( sentIX+1, sType, "caps", capsCSV ))
db.execute(cmd, ( sentIX+1, sType, "suffix", suffixCSV ))
db.execute(cmd, ( sentIX+1, sType, "tokens", tokensCSV ))
db.execute(cmd, ( sentIX+1, sType, "L0", L0CSV ))
if (useNER):
db.execute(cmd, ( sentIX+1, sType, "ner", nerCSV ))
predIX = sentIX
db.execute("INSERT INTO Items (ix, type, sentIX, sentenceLen, targetCSV) VALUES (?, ?, ?, ?, ?)",
( predIX+1, sType, sentIX+1, sentLength, L0CSV ))
db.commit()
db.close()
return wordDF
def preProcessVectors(vectors, sTypes, keepSense):
if (keepSense):
for sType in sTypes:
temp = vectors[sType]['txBIOES'].tolist()
sense = vectors[sType]['sense'].tolist()
print len(temp), len(sense)
for i, t in enumerate(temp):
if t=='S_txPred':
if (sense[i] in ['01','02']): # NEW, only distinguish between 01 and 02.
t = t + '-' + sense[i]
temp[i]=t
vectors[sType]['txBIOES'] = pd.Series(temp)
# split WKCategory by /t to create the WCAT0-3
for sType in sTypes:
temp = vectors[sType]['WKCategory'].tolist()
wcat = []
for i in range(8):
wcat.append( [np.NaN] * len(temp) )
for ti, toks in enumerate(temp):
if not pd.isnull(toks):
for i,t in enumerate(toks.split('\t')):
if len(t) and (i<len(wcat)):
wcat[i][ti] = t
for i in range(len(wcat)):
vectors[sType]['WCAT%d'%i] = pd.Series(wcat[i])
return vectors
def getAMRFeatures(vectors, L0CutoffPercent):
randomWords = False
randomWordsCutoffPercent = 99.5
suffixCutoffPercent = 95
print 'get Feature Lists'
if ('dev' in vectors) and ('training' in vectors):
devTestVectors = vectors['training'].append(vectors['dev'])
else:
devTestVectors = vectors['test']
devTestL0Vectors = devTestVectors.copy().dropna(subset = ['words']) # Level 0 only
pd.set_option('display.width', 10000)
pd.set_option('display.max_rows', 200)
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_colwidth', 200)
features = {}
features['distSG'] = getdistSGFeatureInfo(vectors)
features['words'] = getWordsFeatureInfo(devTestL0Vectors, randomWords, randomWordsCutoffPercent)
features['suffix'] = getSuffixFeatureInfo(devTestL0Vectors, suffixCutoffPercent)
features['caps'] = getCapsFeatureInfo()
features['distance'] = getDistanceFeatureInfo(10)
features['L0'] = getConceptFeatureInfo(devTestL0Vectors, L0CutoffPercent)
features['ner'] = getNERFeatureInfo(devTestL0Vectors)
features['args'] = getArgsFeatureInfo(devTestL0Vectors, 100.0)
features['nargs'] = getNargsFeatureInfo(devTestL0Vectors, 99.0)
features['attr'] = getAttrsFeatureInfo(devTestL0Vectors, 100.0)
features['ncat'] = getNCATFeatureInfo(devTestL0Vectors)
features['wcat'] = getWCATFeatureInfo(devTestL0Vectors)
return features
def getForcedProb(width, forcedIndex):
maxP = 0.99
p = [(1.0-maxP)/(width-1)] * width
p[forcedIndex] = maxP
return p
def createAMRL0NcatVectors(inFn, dbFn, L0CutoffPercent, keepSense, sTypes= ['test','training','dev'],
vectors=None, featuresDB=None, maxSents=None, padVectorSG=None,
L0OnlyFromFeaturesDB=False, useDistSG=False):
if inFn:
vectors = pickle.load( open( inFn ) )
vectors = preProcessVectors(vectors, sTypes, keepSense)
# There are three use cases:
#
# 0) create all features based on statistics of the data
# 1) read all features from a database ( for running data forward through a generated model that does not use distSG )
# 1s) read all features from a database, but use the DistSG as input ( for running data forward through a generated model that does not use distSG )
# 2) useDistSG=True, read only L0 feature from a database, generate the others.
# use the distSG column as input.
if L0OnlyFromFeaturesDB: # Used to form hard decisions from SG during training
_, vectorFeatures, _ = readAMRVectorDatabase(featuresDB)
features = getAMRFeatures(vectors, L0CutoffPercent)
features['L0'] = vectorFeatures['L0']
elif featuresDB:
_, features, _ = readAMRVectorDatabase(featuresDB)
else:
features = getAMRFeatures(vectors, L0CutoffPercent)
db = initializeAMRVectorDatabase(dbFn)
if useDistSG:
features['distSG'] = getAMRFeatures(vectors, L0CutoffPercent)['distSG']
if len(features['distSG']['tokens'][0]) > 1:
print 'SG feature is distributed and is %d by %d wide' % (len(features['distSG']['tokens']), features['distSG']['tokens'][0].count(',')+ 1)
# set the last vector to be the Padding vector, torch is coded to use this
if not padVectorSG:
SGWidth = len( features['distSG']['tokens'][0].split(',') )
padVectorSG = listToCSV(getForcedProb(SGWidth, 0))
features['distSG']['tokens'].append(padVectorSG)
features['distSG']['t2i']['PADDING'] = len(features['distSG']['tokens'])-1
#features['distSG']['t2i']['UNKNOWN'] = len(features['distSG']['tokens'])-1 # fix added 6/4/17 during MNLI processing...
SGFeature = 'distSG'
SGWidth = len( features['distSG']['tokens'][0].split(',') )
SGColumn = 'distSG'
SGSource = 'DB:distSG' # this tells daisyluTorch to preload table from this DB
print 'storing distSG feature to DB'
for i,t in enumerate(features['distSG']['tokens']):
db.execute("INSERT INTO DBFeatures (type, ix, CSV) VALUES (?, ?, ?)", ('distSG', i+1, t))
SGLrate = 0.0
else:
print 'Using hard decision from SG'
for ss in ['training', 'dev', 'test']:
iList = vectors[ss]['distSG'].tolist()
rList = [features['L0']['tokens'][int(x)-1] for x in iList]
vectors[ss]['distSG_Prob'] = rList
SGFeature ='L0'
SGWidth = 10
SGColumn = 'distSG_Prob'
SGSource = None
SGLrate = 1.0
else:
SGFeature ='L0'
SGWidth = 10
SGColumn = 'txBIOES'
SGSource = None
SGLrate = 1.0
print(features.keys())
print(features['ncat']['tokens'])
print(features['wcat']['tokens'])
print 'add feature lists to NCat db'
for f in ['suffix', 'caps', 'words', 'L0', 'wcat', 'ncat']:
print 'storing %s feature to DB' % f
for i,t in enumerate(features[f]['tokens']):
db.execute("INSERT INTO Tokens (type, ix, token) VALUES (?, ?, ?)", (f, i+1, t))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 1, 'network', 'BDLSTM' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 2, 'output', 'viterbi' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 3, 'target', 'ncat' ))
cmd = "INSERT INTO WDFArch (ix, name, filename, size, width, learningRate, clone, ptrName, fType, offset) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
db.execute(cmd, ( 1, 'words', WordRepsFileLocations.pathToWordRepsList(), len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, None, 'wordP', 'words', 0 ))
db.execute(cmd, ( 2, 'caps', None, len(features['caps']['tokens']), 5, 0.6, None, 'wordP', 'caps', 0 ))
db.execute(cmd, ( 3, 'suffix', None, len(features['suffix']['tokens']), 5, 0.6, None, 'wordP', 'suffix', 0 ))
db.execute(cmd, ( 4, SGFeature, SGSource, len(features[SGFeature]['tokens']), SGWidth, SGLrate, None, 'wordP', SGFeature, 0 ))
db.execute(cmd, ( 5, 'wcat', None, len(features['wcat']['tokens']), 5, 0.6, None, 'wordP', 'wcat', 0 ))
db.execute(cmd, ( 6, 'wcat1', None, len(features['wcat']['tokens']), 5, 0.6, 'wcat', 'wordP', 'wcat', 0 ))
db.execute(cmd, ( 7, 'wcat2', None, len(features['wcat']['tokens']), 5, 0.6, 'wcat', 'wordP', 'wcat', 0 ))
db.execute(cmd, ( 8, 'wcat3', None, len(features['wcat']['tokens']), 5, 0.6, 'wcat', 'wordP', 'wcat', 0 ))
db.execute(cmd, ( 9, 'wcat4', None, len(features['wcat']['tokens']), 5, 0.6, 'wcat', 'wordP', 'wcat', 0 ))
db.execute(cmd, ( 10, 'wcat5', None, len(features['wcat']['tokens']), 5, 0.6, 'wcat', 'wordP', 'wcat', 0 ))
db.execute(cmd, ( 11, 'wcat6', None, len(features['wcat']['tokens']), 5, 0.6, 'wcat', 'wordP', 'wcat', 0 ))
db.execute(cmd, ( 12, 'wcat7', None, len(features['wcat']['tokens']), 5, 0.6, 'wcat', 'wordP', 'wcat', 0 ))
for sType in sTypes: # add ood
predIX=0
df = vectors[sType].dropna(subset = ['words']).copy()
fullWordList = df['words'].tolist()
BIOESList = df[SGColumn].tolist() # cascaded from L0 nn
fullWcat0List = df['WCAT0'].tolist()
fullWcat1List = df['WCAT1'].tolist()
fullWcat2List = df['WCAT2'].tolist()
fullWcat3List = df['WCAT3'].tolist()
fullWcat4List = df['WCAT4'].tolist()
fullWcat5List = df['WCAT5'].tolist()
fullWcat6List = df['WCAT6'].tolist()
fullWcat7List = df['WCAT7'].tolist()
fullNcatList = df['nameCategory'].tolist()
#----------
df['suffIX'] = [getIndex(w.lower()[-2:], features['suffix'], defaultToken='UNKNOWN') for w in fullWordList]
df['capsIX'] = [getIndex(capsTag(w), features['caps'], defaultToken='UNKNOWN') for w in fullWordList]
df['wordIX'] = [parseWord(w, features['words']['t2i']) for w in fullWordList]
df['L0IX'] = [getIndex(b, features[SGFeature], defaultToken='UNKNOWN' ) for b in BIOESList]
df['ncat'] = [getIndex(a, features['ncat'], defaultToken='UNKNOWN') for a in fullNcatList]
df['wcat0'] = [getIndex(a, features['wcat'], defaultToken='UNKNOWN') for a in fullWcat0List]
df['wcat1'] = [getIndex(a, features['wcat'], defaultToken='UNKNOWN') for a in fullWcat1List]
df['wcat2'] = [getIndex(a, features['wcat'], defaultToken='UNKNOWN') for a in fullWcat2List]
df['wcat3'] = [getIndex(a, features['wcat'], defaultToken='UNKNOWN') for a in fullWcat3List]
df['wcat4'] = [getIndex(a, features['wcat'], defaultToken='UNKNOWN') for a in fullWcat4List]
df['wcat5'] = [getIndex(a, features['wcat'], defaultToken='UNKNOWN') for a in fullWcat5List]
df['wcat6'] = [getIndex(a, features['wcat'], defaultToken='UNKNOWN') for a in fullWcat6List]
df['wcat7'] = [getIndex(a, features['wcat'], defaultToken='UNKNOWN') for a in fullWcat7List]
maxIX = int(df[['sentIX']].values.max())
if maxSents:
maxIX= maxSents
for sentIX in range(maxIX+1):
z = df[ df['sentIX'] == sentIX]
if z.shape[0]>0:
sentLength = len(z['words'].tolist())
#BIOES = z['txBIOES'].tolist()
wordsCSV = listToCSV(AddToAll(z['wordIX'].tolist(), 1))
L0CSV = listToCSV(AddToAll(z['L0IX'].tolist(), 1))
suffixCSV = listToCSV(AddToAll(z['suffIX'].tolist(), 1))
capsCSV = listToCSV(AddToAll(z['capsIX'].tolist(), 1))
ncatCSV = listToCSV(AddToAll(z['ncat'].tolist(), 1))
wcat0CSV = listToCSV(AddToAll(z['wcat0'].tolist(), 1))
wcat1CSV = listToCSV(AddToAll(z['wcat1'].tolist(), 1))
wcat2CSV = listToCSV(AddToAll(z['wcat2'].tolist(), 1))
wcat3CSV = listToCSV(AddToAll(z['wcat3'].tolist(), 1))
wcat4CSV = listToCSV(AddToAll(z['wcat4'].tolist(), 1))
wcat5CSV = listToCSV(AddToAll(z['wcat5'].tolist(), 1))
wcat6CSV = listToCSV(AddToAll(z['wcat6'].tolist(), 1))
wcat7CSV = listToCSV(AddToAll(z['wcat7'].tolist(), 1))
cmd = "INSERT INTO Sentences (ix, type, fType, fCSV) VALUES (?, ?, ?, ?)"
db.execute(cmd, ( sentIX+1, sType, "words", wordsCSV ))
db.execute(cmd, ( sentIX+1, sType, SGFeature, L0CSV ))
db.execute(cmd, ( sentIX+1, sType, "caps", capsCSV ))
db.execute(cmd, ( sentIX+1, sType, "suffix", suffixCSV ))
db.execute(cmd, ( sentIX+1, sType, "wcat", wcat0CSV ))
db.execute(cmd, ( sentIX+1, sType, "wcat1", wcat1CSV ))
db.execute(cmd, ( sentIX+1, sType, "wcat2", wcat2CSV ))
db.execute(cmd, ( sentIX+1, sType, "wcat3", wcat3CSV ))
db.execute(cmd, ( sentIX+1, sType, "wcat4", wcat4CSV ))
db.execute(cmd, ( sentIX+1, sType, "wcat5", wcat5CSV ))
db.execute(cmd, ( sentIX+1, sType, "wcat6", wcat6CSV ))
db.execute(cmd, ( sentIX+1, sType, "wcat7", wcat7CSV ))
db.execute("INSERT INTO Items (ix, type, sentIX, pWordIX, sentenceLen, targetCSV) VALUES (?, ?, ?, ?, ?, ?)",
( predIX+1, sType, sentIX+1, i+1, sentLength, ncatCSV ))
db.commit()
predIX += 1
db.commit()
db.close()
def createAMRL0ArgVectors(inFn, dbFn, L0CutoffPercent, keepSense, sTypes= ['test','training','dev'],
vectors=None, featuresDB=None, maxSents=None, padVectorSG=None,
L0OnlyFromFeaturesDB=False, useDistSG=False):
if inFn:
vectors = pickle.load( open( inFn ) )
vectors = preProcessVectors(vectors, sTypes, keepSense)
# There are three use cases:
#
# 0) create all features based on statistics of the data
# 1) read all features from a database ( for running data forward through a generated model )
# 2) useDistSG=True, read only L0 feature from a database, generate the others.
# use the distSG column as input.
if L0OnlyFromFeaturesDB: # Used to form hard decisions from SG during training
_, vectorFeatures, _ = readAMRVectorDatabase(featuresDB)
features = getAMRFeatures(vectors, L0CutoffPercent)
features['L0'] = vectorFeatures['L0']
elif featuresDB:
_, features, _ = readAMRVectorDatabase(featuresDB)
else:
features = getAMRFeatures(vectors, L0CutoffPercent)
db = initializeAMRVectorDatabase(dbFn)
if useDistSG:
features['distSG'] = getAMRFeatures(vectors, L0CutoffPercent)['distSG']
if len(features['distSG']['tokens'][0]) > 1:
print 'SG feature is distributed and is %d by %d wide' % (len(features['distSG']['tokens']), features['distSG']['tokens'][0].count(',')+ 1)
# set the last vector to be the Padding vector, torch is coded to use this
if not padVectorSG:
SGWidth = len( features['distSG']['tokens'][0].split(',') )
padVectorSG = listToCSV(getForcedProb(SGWidth, 0))
features['distSG']['tokens'].append(padVectorSG)
features['distSG']['t2i']['PADDING'] = len(features['distSG']['tokens'])-1
SGFeature = 'distSG'
SGWidth = len( features['distSG']['tokens'][0].split(',') )
SGColumn = 'distSG'
SGSource = 'DB:distSG' # this tells daisyluTorch to preload table from this DB
print 'storing distSG feature to DB'
for i,t in enumerate(features['distSG']['tokens']):
db.execute("INSERT INTO DBFeatures (type, ix, CSV) VALUES (?, ?, ?)", ('distSG', i+1, t))
SGLrate = 0.0
else:
print 'Using hard decision from SG'
for ss in ['training', 'dev', 'test']:
iList = vectors[ss]['distSG'].tolist()
rList = [features['L0']['tokens'][int(x)-1] for x in iList]
vectors[ss]['distSG_Prob'] = rList
SGFeature ='L0'
SGWidth = 10
SGColumn = 'distSG_Prob'
SGSource = None
SGLrate = 1.0
else:
SGFeature ='L0'
SGWidth = 10
SGColumn = 'txBIOES'
SGSource = None
SGLrate = 1.0
print(features.keys())
print(features['args']['tokens'])
print 'add feature lists to Args db'
for f in ['suffix', 'caps', 'words', 'L0', 'args', 'distance']:
print 'storing %s feature to DB' % f
for i,t in enumerate(features[f]['tokens']):
db.execute("INSERT INTO Tokens (type, ix, token) VALUES (?, ?, ?)", (f, i+1, t))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 1, 'network', 'BDLSTM' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 2, 'output', 'viterbi' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 3, 'target', 'args' ))
cmd = "INSERT INTO WDFArch (ix, name, filename, size, width, learningRate, clone, ptrName, fType, offset) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
db.execute(cmd, ( 1, 'words', WordRepsFileLocations.pathToWordRepsList(), len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, None, 'wordP', 'words', 0 ))
db.execute(cmd, ( 2, 'caps', None, len(features['caps']['tokens']), 5, 0.6, None, 'wordP', 'caps', 0 ) )
db.execute(cmd, ( 3, 'suffix', None, len(features['suffix']['tokens']), 5, 0.6, None, 'wordP', 'suffix', 0 ) )
db.execute(cmd, ( 4, 'Pred', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', 0 ))
db.execute(cmd, ( 5, 'ctxP1', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', -2 ))
db.execute(cmd, ( 6, 'ctxP2', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', -1 ))
db.execute(cmd, ( 7, 'ctxP3', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', 0 ))
db.execute(cmd, ( 8, 'ctxP4', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', 1 ))
db.execute(cmd, ( 9, 'ctxP5', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', 2 ))
db.execute(cmd, ( 10, SGFeature, SGSource, len(features[SGFeature]['tokens']), SGWidth, SGLrate, None, 'wordP', SGFeature, 0 ))
db.execute(cmd, ( 11, 'L0Pred', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, 0 ))
db.execute(cmd, ( 12, 'L0ctxP1', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, -2 ))
db.execute(cmd, ( 13, 'L0ctxP2', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, -1 ))
db.execute(cmd, ( 14, 'L0ctxP3', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, 0 ))
db.execute(cmd, ( 15, 'L0ctxP4', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, 1 ))
db.execute(cmd, ( 16, 'L0ctxP5', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, 2 ))
db.execute(cmd, ( 17, 'regionMark', None, len(features['distance']['tokens']), 5, 0.6, None, 'deltaP', 'distance', 0 ))
for sType in sTypes: # add ood
predIX=0
df = vectors[sType].dropna(subset = ['words']).copy()
fullWordList = df['words'].tolist()
BIOESList = df[SGColumn].tolist()
fullArg0List = df['ar0_arg'].tolist()
fullArg1List = df['ar1_arg'].tolist()
fullArg2List = df['ar2_arg'].tolist()
fullArg3List = df['ar3_arg'].tolist()
df['suffIX'] = [getIndex(w.lower()[-2:], features['suffix'], defaultToken='UNKNOWN') for w in fullWordList]
df['capsIX'] = [getIndex(capsTag(w), features['caps'], defaultToken='UNKNOWN') for w in fullWordList]
df['wordIX'] = [parseWord(w, features['words']['t2i']) for w in fullWordList]
df['L0IX'] = [getIndex(b, features[SGFeature],defaultToken='UNKNOWN' ) for b in BIOESList]
df['arg0IX'] = [getIndex(a, features['args'], defaultToken='UNKNOWN') for a in fullArg0List]
df['arg1IX'] = [getIndex(a, features['args'], defaultToken='UNKNOWN') for a in fullArg1List]
df['arg2IX'] = [getIndex(a, features['args'], defaultToken='UNKNOWN') for a in fullArg2List]
df['arg3IX'] = [getIndex(a, features['args'], defaultToken='UNKNOWN') for a in fullArg3List]
maxIX = int(df[['sentIX']].values.max())
if maxSents:
maxIX= maxSents
for sentIX in range(maxIX+1):
z = df[ df['sentIX'] == sentIX]
if z.shape[0]>0:
sentLength = len(z['words'].tolist())
BIOES = z['txBIOES'].tolist()
wordsCSV = listToCSV(AddToAll(z['wordIX'].tolist(), 1))
L0CSV = listToCSV(AddToAll(z['L0IX'].tolist(), 1))
suffixCSV = listToCSV(AddToAll(z['suffIX'].tolist(), 1))
capsCSV = listToCSV(AddToAll(z['capsIX'].tolist(), 1))
cmd = "INSERT INTO Sentences (ix, type, fType, fCSV) VALUES (?, ?, ?, ?)"
db.execute(cmd, ( sentIX+1, sType, "words", wordsCSV ))
db.execute(cmd, ( sentIX+1, sType, SGFeature, L0CSV ))
db.execute(cmd, ( sentIX+1, sType, "caps", capsCSV ))
db.execute(cmd, ( sentIX+1, sType, "suffix", suffixCSV ))
# what to set
arg0List = z['arg0IX'].tolist()
arg1List = z['arg1IX'].tolist()
arg2List = z['arg2IX'].tolist()
arg3List = z['arg3IX'].tolist()
arg0Loc = z['ar0_ix'].tolist()
arg1Loc = z['ar1_ix'].tolist()
arg2Loc = z['ar2_ix'].tolist()
arg3Loc = z['ar3_ix'].tolist()
for i,_ in enumerate(arg0Loc):
if ('txNonPred' in BIOES[i]) or ('txNamed' in BIOES[i]) or ('O' == BIOES[i]) : # this is how we figure out if ARGS come out of this node
continue
# the targetList contains the args in their proper positions
# Need to check what concepts are being trained here - ;)
#
targetList = [0] * sentLength # where 0 is the assumed null tag....
if not pd.isnull(arg0Loc[i]):
if int(arg0Loc[i]) < len(targetList):
targetList[ int(arg0Loc[i]) ] = arg0List[i]
if not pd.isnull(arg1Loc[i]):
if int(arg1Loc[i]) < len(targetList):
targetList[ int(arg1Loc[i]) ] = arg1List[i]
if not pd.isnull(arg2Loc[i]):
if int(arg2Loc[i]) < len(targetList):
targetList[ int(arg2Loc[i]) ] = arg2List[i]
if not pd.isnull(arg3Loc[i]):
if int(arg3Loc[i]) < len(targetList):
targetList[ int(arg3Loc[i]) ] = arg3List[i]
#if (targetNotNull):
targetCSV = listToCSV(AddToAll(targetList, 1))
db.execute("INSERT INTO Items (ix, type, sentIX, pWordIX, sentenceLen, targetCSV) VALUES (?, ?, ?, ?, ?, ?)",
( predIX+1, sType, sentIX+1, i+1, sentLength, targetCSV ))
db.commit()
predIX += 1
db.commit()
db.close()
def createAMRL0NargVectors(inFn, dbFn, L0CutoffPercent, keepSense, sTypes= ['test','training','dev'],
vectors=None, featuresDB=None, maxSents=None, padVectorSG=None,
L0OnlyFromFeaturesDB=False, useDistSG=False):
if inFn:
vectors = pickle.load( open( inFn ) )
vectors = preProcessVectors(vectors, sTypes, keepSense)
# There are three use cases:
#
# 0) create all features based on statistics of the data
# 1) read all features from a database ( for running data forward through a generated model )
# 2) useDistSG=True, read only L0 feature from a database, generate the others.
# use the distSG column as input.
if L0OnlyFromFeaturesDB: # Used to form hard decisions from SG during training
_, vectorFeatures, _ = readAMRVectorDatabase(featuresDB)
features = getAMRFeatures(vectors, L0CutoffPercent)
features['L0'] = vectorFeatures['L0']
elif featuresDB:
_, features, _ = readAMRVectorDatabase(featuresDB)
else:
features = getAMRFeatures(vectors, L0CutoffPercent)
db = initializeAMRVectorDatabase(dbFn)
if useDistSG:
features['distSG'] = getAMRFeatures(vectors, L0CutoffPercent)['distSG']
if len(features['distSG']['tokens'][0]) > 1:
print 'SG feature is distributed and is %d by %d wide' % (len(features['distSG']['tokens']), features['distSG']['tokens'][0].count(',')+ 1)
# set the last vector to be the Padding vector, torch is coded to use this
if not padVectorSG:
SGWidth = len( features['distSG']['tokens'][0].split(',') )
padVectorSG = listToCSV(getForcedProb(SGWidth, 0))
features['distSG']['tokens'].append(padVectorSG)
features['distSG']['t2i']['PADDING'] = len(features['distSG']['tokens'])-1
SGFeature = 'distSG'
SGWidth = len( features['distSG']['tokens'][0].split(',') )
SGColumn = 'distSG'
SGSource = 'DB:distSG' # this tells daisyluTorch to preload table from this DB
print 'storing distSG feature to DB'
for i,t in enumerate(features['distSG']['tokens']):
db.execute("INSERT INTO DBFeatures (type, ix, CSV) VALUES (?, ?, ?)", ('distSG', i+1, t))
SGLrate = 0.0
else:
print 'Using hard decision from SG'
for ss in ['training', 'dev', 'test']:
iList = vectors[ss]['distSG'].tolist()
rList = [features['L0']['tokens'][int(x)-1] for x in iList]
vectors[ss]['distSG_Prob'] = rList
SGFeature ='L0'
SGWidth = 10
SGColumn = 'distSG_Prob'
SGSource = None
SGLrate = 1.0
else:
SGFeature ='L0'
SGWidth = 10
SGColumn = 'txBIOES'
SGSource = None
SGLrate = 1.0
print(features.keys())
print(features['nargs']['tokens'])
print 'add feature lists to Nargs db'
for f in ['suffix', 'caps', 'words', 'L0', 'nargs', 'distance']:
print 'storing %s feature to DB' % f
for i,t in enumerate(features[f]['tokens']):
db.execute("INSERT INTO Tokens (type, ix, token) VALUES (?, ?, ?)", (f, i+1, t))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 1, 'network', 'BDLSTM' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 2, 'output', 'viterbi' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 3, 'target', 'nargs' ))
cmd = "INSERT INTO WDFArch (ix, name, filename, size, width, learningRate, clone, ptrName, fType, offset) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
db.execute(cmd, ( 1, 'words', WordRepsFileLocations.pathToWordRepsList(), len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, None, 'wordP', 'words', 0 ))
db.execute(cmd, ( 2, 'caps', None, len(features['caps']['tokens']), 5, 0.6, None, 'wordP', 'caps', 0 ) )
db.execute(cmd, ( 3, 'suffix', None, len(features['suffix']['tokens']), 5, 0.6, None, 'wordP', 'suffix', 0 ) )
db.execute(cmd, ( 4, 'Pred', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', 0 ))
db.execute(cmd, ( 5, 'ctxP1', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', -2 ))
db.execute(cmd, ( 6, 'ctxP2', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', -1 ))
db.execute(cmd, ( 7, 'ctxP3', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', 0 ))
db.execute(cmd, ( 8, 'ctxP4', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', 1 ))
db.execute(cmd, ( 9, 'ctxP5', None, len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, "words", 'itemP', 'words', 2 ))
db.execute(cmd, ( 10, SGFeature, SGSource, len(features[SGFeature]['tokens']), SGWidth, SGLrate, None, 'wordP', SGFeature, 0 ))
db.execute(cmd, ( 11, 'L0Pred', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, 0 ))
db.execute(cmd, ( 12, 'L0ctxP1', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, -2 ))
db.execute(cmd, ( 13, 'L0ctxP2', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, -1 ))
db.execute(cmd, ( 14, 'L0ctxP3', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, 0 ))
db.execute(cmd, ( 15, 'L0ctxP4', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, 1 ))
db.execute(cmd, ( 16, 'L0ctxP5', None, len(features[SGFeature]['tokens']), SGWidth, SGLrate, SGFeature, 'itemP', SGFeature, 2 ))
db.execute(cmd, ( 17, 'regionMark', None, len(features['distance']['tokens']), 5, 0.6, None, 'deltaP', 'distance', 0 ))
for sType in sTypes:
predIX=0
df = vectors[sType].dropna(subset = ['words']).copy()
fullWordList = df['words'].tolist()
BIOESList = df[SGColumn].tolist()
fullNarg0List = df['nar0_lbl'].tolist()
fullNarg1List = df['nar1_lbl'].tolist()
fullNarg2List = df['nar2_lbl'].tolist()
fullNarg3List = df['nar3_lbl'].tolist()
df['suffIX'] = [getIndex(w.lower()[-2:], features['suffix'], defaultToken='UNKNOWN') for w in fullWordList]
df['capsIX'] = [getIndex(capsTag(w), features['caps'], defaultToken='UNKNOWN') for w in fullWordList]
df['wordIX'] = [parseWord(w, features['words']['t2i']) for w in fullWordList]
df['L0IX'] = [getIndex(b, features[SGFeature], defaultToken='UNKNOWN' ) for b in BIOESList]
df['narg0IX'] = [getIndex(a, features['nargs'], defaultToken='UNKNOWN') for a in fullNarg0List]
df['narg1IX'] = [getIndex(a, features['nargs'], defaultToken='UNKNOWN') for a in fullNarg1List]
df['narg2IX'] = [getIndex(a, features['nargs'], defaultToken='UNKNOWN') for a in fullNarg2List]
df['narg3IX'] = [getIndex(a, features['nargs'], defaultToken='UNKNOWN') for a in fullNarg3List]
maxIX = int(df[['sentIX']].values.max())
if maxSents:
maxIX= maxSents
for sentIX in range(maxIX+1):
z = df[ df['sentIX'] == sentIX]
if z.shape[0]>0:
sentLength = len(z['words'].tolist())
BIOES = z['txBIOES'].tolist()
wordsCSV = listToCSV(AddToAll(z['wordIX'].tolist(), 1))
L0CSV = listToCSV(AddToAll(z['L0IX'].tolist(), 1))
suffixCSV = listToCSV(AddToAll(z['suffIX'].tolist(), 1))
capsCSV = listToCSV(AddToAll(z['capsIX'].tolist(), 1))
cmd = "INSERT INTO Sentences (ix, type, fType, fCSV) VALUES (?, ?, ?, ?)"
db.execute(cmd, ( sentIX+1, sType, "words", wordsCSV ))
db.execute(cmd, ( sentIX+1, sType, SGFeature, L0CSV ))
db.execute(cmd, ( sentIX+1, sType, "caps", capsCSV ))
db.execute(cmd, ( sentIX+1, sType, "suffix", suffixCSV ))
# what to set
narg0List = z['narg0IX'].tolist()
narg1List = z['narg1IX'].tolist()
narg2List = z['narg2IX'].tolist()
narg3List = z['narg3IX'].tolist()
narg0Loc = z['nar0_ix'].tolist()
narg1Loc = z['nar1_ix'].tolist()
narg2Loc = z['nar2_ix'].tolist()
narg3Loc = z['nar3_ix'].tolist()
for i,_ in enumerate(narg0Loc):
if ('O' == BIOES[i]) or ('txNamed' in BIOES[i]):
continue
# the targetList contains the args in their proper positions
targetList = [0] * sentLength
if not pd.isnull(narg0Loc[i]):
if int(narg0Loc[i]) < len(targetList):
targetList[ int(narg0Loc[i]) ] = narg0List[i]
if not pd.isnull(narg1Loc[i]):
if int(narg1Loc[i]) < len(targetList):
targetList[ int(narg1Loc[i]) ] = narg1List[i]
if not pd.isnull(narg2Loc[i]):
if int(narg2Loc[i]) < len(targetList):
targetList[ int(narg2Loc[i]) ] = narg2List[i]
if not pd.isnull(narg3Loc[i]):
if int(narg3Loc[i]) < len(targetList):
targetList[ int(narg3Loc[i]) ] = narg3List[i]
targetCSV = listToCSV(AddToAll(targetList, 1))
db.execute("INSERT INTO Items (ix, type, sentIX, pWordIX, sentenceLen, targetCSV) VALUES (?, ?, ?, ?, ?, ?)",
( predIX+1, sType, sentIX+1, i+1, sentLength, targetCSV ))
db.commit()
predIX += 1
db.commit()
db.close()
def createAMRL0AttrVectors(inFn, dbFn, L0CutoffPercent, keepSense, sTypes= ['test','training','dev'],
vectors=None, featuresDB=None, maxSents=None, padVectorSG=None,
L0OnlyFromFeaturesDB=False, useDistSG=False):
if inFn:
vectors = pickle.load( open( inFn ) )
vectors = preProcessVectors(vectors, sTypes, keepSense)
# There are three use cases:
#
# 0) create all features based on statistics of the data
# 1) read all features from a database ( for running data forward through a generated model )
# 2) useDistSG=True, read only L0 feature from a database, generate the others.
# use the distSG column as input.
if L0OnlyFromFeaturesDB: # Used to form hard decisions from SG during training
_, vectorFeatures, _ = readAMRVectorDatabase(featuresDB)
features = getAMRFeatures(vectors, L0CutoffPercent)
features['L0'] = vectorFeatures['L0']
elif featuresDB:
_, features, _ = readAMRVectorDatabase(featuresDB)
else:
features = getAMRFeatures(vectors, L0CutoffPercent)
db = initializeAMRVectorDatabase(dbFn)
if useDistSG:
features['distSG'] = getAMRFeatures(vectors, L0CutoffPercent)['distSG']
if len(features['distSG']['tokens'][0]) > 1:
print 'SG feature is distributed and is %d by %d wide' % (len(features['distSG']['tokens']), features['distSG']['tokens'][0].count(',')+ 1)
# set the last vector to be the Padding vector, torch is coded to use this
if not padVectorSG:
SGWidth = len( features['distSG']['tokens'][0].split(',') )
padVectorSG = listToCSV(getForcedProb(SGWidth, 0))
features['distSG']['tokens'].append(padVectorSG)
features['distSG']['t2i']['PADDING'] = len(features['distSG']['tokens'])-1
SGFeature = 'distSG'
SGWidth = len( features['distSG']['tokens'][0].split(',') )
SGColumn = 'distSG'
SGSource = 'DB:distSG' # this tells daisyluTorch to preload table from this DB
print 'storing distSG feature to DB'
for i,t in enumerate(features['distSG']['tokens']):
db.execute("INSERT INTO DBFeatures (type, ix, CSV) VALUES (?, ?, ?)", ('distSG', i+1, t))
SGLrate = 0.0
else:
print 'Using hard decision from SG'
for ss in ['training', 'dev', 'test']:
iList = vectors[ss]['distSG'].tolist()
rList = [features['L0']['tokens'][int(x)-1] for x in iList]
vectors[ss]['distSG_Prob'] = rList
SGFeature ='L0'
SGWidth = 10
SGColumn = 'distSG_Prob'
SGSource = None
SGLrate = 1.0
else:
SGFeature ='L0'
SGWidth = 10
SGColumn = 'txBIOES'
SGSource = None
SGLrate = 1.0
"""
Do just the top unqualified labels for now
polarity
TOP
quant
These could be added later
mode(interrogative 709 8796 13.060134 5
mode(imperative 328 15719 23.339272 21
"""
features['attr']['tokens'] = ['O'] + ['polarity','TOP','quant'] + ['UNKNOWN']
features['attr']['t2i'] = dict( [(x, y) for y, x in enumerate(features['attr']['tokens'])] )
print(features.keys())
print(features['attr']['tokens'])
print 'add feature lists to Attr db'
for f in ['suffix', 'caps', 'words', 'L0', 'attr', 'distance']:
print 'storing %s feature to DB' % f
for i,t in enumerate(features[f]['tokens']):
db.execute("INSERT INTO Tokens (type, ix, token) VALUES (?, ?, ?)", (f, i+1, t))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 1, 'network', 'BDLSTM' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 2, 'output', 'viterbi' ))
db.execute("INSERT INTO GeneralArch (ix, key, value) VALUES (?, ?, ?)", ( 3, 'target', 'attr' ))
cmd = "INSERT INTO WDFArch (ix, name, filename, size, width, learningRate, clone, ptrName, fType, offset) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
db.execute(cmd, ( 1, 'words', WordRepsFileLocations.pathToWordRepsList(), len(features['words']['tokens']), WordRepsFileLocations.wordRepsWidth(), 2.0, None, 'wordP', 'words', 0 ))
db.execute(cmd, ( 2, 'caps', None, len(features['caps']['tokens']), 5, 0.6, None, 'wordP', 'caps', 0 ))
db.execute(cmd, ( 3, 'suffix', None, len(features['suffix']['tokens']), 5, 0.6, None, 'wordP', 'suffix', 0 ))
db.execute(cmd, ( 4, SGFeature, SGSource, len(features[SGFeature]['tokens']), SGWidth, SGLrate, None, 'wordP', SGFeature, 0 ))
for sType in sTypes: # add ood
predIX=0
df = vectors[sType].dropna(subset = ['words']).copy()
fullWordList = df['words'].tolist()
BIOESList = df[SGColumn].tolist()
fullAttr0List = df['attr0_lbl'].tolist()
fullAttr1List = df['attr1_lbl'].tolist()
fullAttr2List = df['attr2_lbl'].tolist()
fullAttr3List = df['attr3_lbl'].tolist()
#----------
df['suffIX'] = [getIndex(w.lower()[-2:], features['suffix'], defaultToken='UNKNOWN') for w in fullWordList]
df['capsIX'] = [getIndex(capsTag(w), features['caps'], defaultToken='UNKNOWN') for w in fullWordList]
df['wordIX'] = [parseWord(w, features['words']['t2i']) for w in fullWordList]
df['L0IX'] = [getIndex(b, features[SGFeature], defaultToken='UNKNOWN' ) for b in BIOESList]
df['attr0IX'] = [getIndex(a, features['attr'], defaultToken='UNKNOWN') for a in fullAttr0List]
df['attr1IX'] = [getIndex(a, features['attr'], defaultToken='UNKNOWN') for a in fullAttr1List]
df['attr2IX'] = [getIndex(a, features['attr'], defaultToken='UNKNOWN') for a in fullAttr2List]
df['attr3IX'] = [getIndex(a, features['attr'], defaultToken='UNKNOWN') for a in fullAttr3List]
maxIX = int(df[['sentIX']].values.max())
if maxSents:
maxIX= maxSents
for sentIX in range(maxIX+1):
z = df[ df['sentIX'] == sentIX]
if z.shape[0]>0:
sentLength = len(z['words'].tolist())
BIOES = z['txBIOES'].tolist()
wordsCSV = listToCSV(AddToAll(z['wordIX'].tolist(), 1))
L0CSV = listToCSV(AddToAll(z['L0IX'].tolist(), 1))
suffixCSV = listToCSV(AddToAll(z['suffIX'].tolist(), 1))
capsCSV = listToCSV(AddToAll(z['capsIX'].tolist(), 1))
cmd = "INSERT INTO Sentences (ix, type, fType, fCSV) VALUES (?, ?, ?, ?)"
db.execute(cmd, ( sentIX+1, sType, "words", wordsCSV ))
db.execute(cmd, ( sentIX+1, sType, SGFeature, L0CSV ))
db.execute(cmd, ( sentIX+1, sType, "caps", capsCSV ))
db.execute(cmd, ( sentIX+1, sType, "suffix", suffixCSV ))
# what to set
attrList=[ [] ] * 4
attrList[0] = z['attr0IX'].tolist()
attrList[1] = z['attr1IX'].tolist()
attrList[2] = z['attr2IX'].tolist()
attrList[3] = z['attr3IX'].tolist()
targetList = [0] * sentLength
for w in range(sentLength):
#if any of the attr lists contains a token in the features dict,
#set it as the target for this word.
if not ( pd.isnull(BIOES[w])):
if attrList[0][w]>0 :
targetList[ w ] = attrList[0][w]
targetCSV = listToCSV(AddToAll(targetList, 1))
db.execute("INSERT INTO Items (ix, type, sentIX, pWordIX, sentenceLen, targetCSV) VALUES (?, ?, ?, ?, ?, ?)",
( predIX+1, sType, sentIX+1, i+1, sentLength, targetCSV ))
db.commit()
predIX += 1
db.commit()
db.close()
def readAMRVectorDatabase(dbFn):
db = openDB(dbFn)
cur = db.cursor()
sType = 'test'
cmd = "SELECT * FROM Sentences WHERE (type = '%s')" % (sType)
#print dbFn
#print cmd
cur.execute(cmd)
keys = [z[0] for z in cur.description]
sinfo = {}
winfo = {}
for row in cur:
d = dict(zip(keys,row))
if d['fType']=='tokens':
sinfo[d['ix']] = d['fCSV']
if d['fType']=='words':
winfo[d['ix']] = d['fCSV']
cmd = "SELECT * FROM Items WHERE (type = '%s')" % (sType)
#print cmd
cur.execute(cmd)
keys = [z[0] for z in cur.description]
rows = []
for row in cur:
d = dict(zip(keys,row))
sentIX = d['sentIX']
d['words'] = winfo[sentIX]
if sentIX in sinfo:
wordTokens = sinfo[sentIX]
d['wordTokens'] = wordTokens
rows.append(d)
#print d['wordTokens']
#srows.append(d)
cmd = "SELECT * FROM Tokens"
print cmd
cur.execute(cmd)
keys = [z[0] for z in cur.description]
features = {}
for row in cur:
d = dict(zip(keys,row))
tp = d['type']
token = d['token']
ix = d['ix'] - 1 # adjusting for lua one based arrays
if not tp in features:
features[tp] = {'tokens':[], 't2i':{}}
features[tp]['tokens'].append(token)
features[tp]['t2i'][token]=ix
if (ix != len(features[tp]['tokens'])-1):
assert('Error in read tokens from db')
cmd = "SELECT * FROM GeneralArch"
print cmd
cur.execute(cmd)
keys = [z[0] for z in cur.description]
generalArch = {}
for row in cur:
d = dict(zip(keys,row))
key = d['key']
val = d['value']
generalArch[key] = val
db.close()
return pd.DataFrame(rows), features, generalArch
def readAMRResultsDatabase(dbFn, sType = 'test'):
db = openDB(dbFn)
cur = db.cursor()
"""
db:execute( 'CREATE TABLE IF NOT EXISTS Sentences( ix int, type text, targetVector text, PRIMARY KEY (ix, type))' )
"""
cmd = "SELECT * FROM Items WHERE (type = '%s') ORDER BY ix;" % (sType)
print cmd
cur.execute(cmd)
keys = [z[0] for z in cur.description]
rows = []
for row in cur:
d = dict(zip(keys,row))
rows.append(d)
cmd = "SELECT count(*) FROM sqlite_master WHERE type='table' AND name='%s';" % ('Parameters')
cur.execute(cmd)
if cur.fetchall()[0][0]:
#type, dataString
cmd = "SELECT * FROM Parameters WHERE (type = '%s')" % ('weightString')
print cmd
cur.execute(cmd)
#rString = cur.fetchall()[0][1]
#lst = floatCSVToList(rString)
#x = np.array(lst)
x=None
db.close()
return pd.DataFrame(rows)
def getComparisonDFrames(dbfn, dbrfn, pVector2d=False):
# from vectors and results, compute a merged comparison [pandas dataframe]
df, features, genArch = readAMRVectorDatabase(dbfn)
targetTokenType = genArch['target']
dfr = readAMRResultsDatabase(dbrfn)
result = pd.merge(df, dfr, on='ix')
#
# merge df with dfr based on 'ix'
# ix is the itemIX
# sentIX and pWordIX come from df
# make sure this still works on AMRL0, though
#
tokPairs = []
confusion = {}
for _,c_row in result.iterrows():
sentIX = c_row['sentIX']
pWordIX = c_row['pWordIX']
wstring = c_row['words']
wi = wstring.split(',')
wordTokens = [features['words']['tokens'][int(i)-1] for i in wi]
tv = intCSVToList(c_row['targetCSV'])
rv = intCSVToList(c_row['resultVector'])
pVectors = c_row['logProbVector'].split('#')
length = min(100,len(tv) )
for i in range(length):
ftv = features[targetTokenType]['tokens'][ tv[i]-1 ]
frv = features[targetTokenType]['tokens'][ rv[i]-1 ]
if pVector2d:
if (pWordIX-1)==i:
pVector = c_row['logProbVector']
else:
pVector=None
else:
pVector = pVectors[i]
tokPairs.append( {'sentIX':sentIX, 'pWordIX':pWordIX, 'wordIX':i, 'word':wordTokens[i],'target':ftv, 'result':frv, 'pVector':pVector} )
if not frv in confusion:
confusion[frv] = {}
if not ftv in confusion[frv]:
confusion[frv][ftv] = 0
confusion[frv][ftv] += 1
tp = pd.DataFrame(tokPairs)
return tp, df, dfr, features, genArch
def plotHeatmaps(tp, genArch=None):
import seaborn as sns; sns.set()
direc = getSystemPath('figures')
prefix = 'heatmap'
# tp contains target, result, count triplets
# find the list of targets and the error counts associated with them
# whats the accuracy per target?
x = tp.groupby([ 'target', 'result' ], as_index=False ).count()
tList = tp[ tp['target'] != tp['result']].groupby( ['target'], as_index=False).count().sort(['sentIX'], ascending=[0])['target'].tolist()
x = x[ x['target'].isin(tList[1:11]) ]
#print x.sort(['target','sentIX'], ascending=[1,0])
x = x.pivot("result", "target", "wordIX")
title = 'Top 10, excluding O, most confused Target Tags'
plt.figure(figsize=(18, 10))
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.title(title)
plt.tight_layout()
saveFn = '%s/%s_%d.png' % (direc, prefix, 1)
plt.savefig(saveFn)
# tp contains target, result, count triplets
# find the list of targets and the error counts associated with them
# whats the accuracy per target?
x = tp.groupby([ 'target', 'result' ], as_index=False ).count()
tList = tp[ tp['target'] != tp['result']].groupby( ['target'], as_index=False).count().sort(['sentIX'], ascending=[0])['target'].tolist()
x = x[ x['target'].isin(tList[:10]) ]
#print x.sort(['target','sentIX'], ascending=[1,0])
x = x.pivot("result", "target", "wordIX")
title = 'Top 10 most confused Target Tags'
plt.figure(figsize=(18, 10))
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.title(title)
plt.tight_layout()
saveFn = '%s/%s_%d.png' % (direc, prefix, 2)
plt.savefig(saveFn)
# tp contains target, result, count triplets
# find the list of targets and the error counts associated with them
# whats the accuracy per target?
x = tp.groupby([ 'target', 'result' ], as_index=False ).count()
tList = tp[ tp['target'] != tp['result']].groupby( ['target'], as_index=False).count().sort(['sentIX'], ascending=[0])['target'].tolist()
x = x.pivot("result", "target", "wordIX")
title = 'Confusion Matrix for all Tags'
plt.figure(figsize=(18, 10))
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.title(title)
plt.tight_layout()
saveFn = '%s/%s_%d.png' % (direc, prefix, 3)
plt.savefig(saveFn)
plt.show()
sns.plt.show()
if __name__ == '__main__':
exit(10)
| {
"repo_name": "BillFoland/daisyluAMR",
"path": "system/daisylu_vectors.py",
"copies": "1",
"size": "74777",
"license": "mit",
"hash": -4332465398764724700,
"line_mean": 46.088790932,
"line_max": 198,
"alpha_frac": 0.5389625152,
"autogenerated": false,
"ratio": 3.42056630529253,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.445952882049253,
"avg_score": null,
"num_lines": null
} |
''' 12-extract_magnitude_long_periods.py
===============================================
AIM: Prepare cumulative plots (THIS SCRIPT IS with STRAY LIGHT)
INPUT: files: - <orbit_id>_misc/orbits.dat
- <orbit_id>_flux/flux_*.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_misc/ : complicated name files depending on the case (handled by 13-<...>.py)
CMD: python 12-extract_magnitude_long_periods.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: THIS SCRIPT IS with STRAY LIGHT
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import os
import time
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.figures as figures
###########################################################################
### PARAMETERS
# orbit_id
orbit_id = 301
apogee=700
perigee=700
# First minute in data set
minute_ini = 0
# Last minute to look for
minute_end = 1440*365
# File name for the list of orbit file
orbits_file = 'orbits.dat'
# Minimum consecutive observable time for plots
threshold_obs_time = 78
# Take a minimum observation time per orbit and Minimum observable time per orbit (NON-CONSECUITIVE)
min_obs_per_orbit = True
threshold_obs_time_per_orbit = 78
# Time to acquire a target
t_acquisition = 3
# Maximum visible magnitude
mag_max = 12.
# File name for the output file
output_fname = 'mag_over_year_%d_mag_%02.1f' % (threshold_obs_time, mag_max)
extension = '.dat'
# Show preview ?
show = True
# Include SAA ?
SAA = False
# File name that contains the orbit used and the percentage of the sky unviewable because of SL
namefile = 'cumultative_SL_forbidden_%d_mag_%02.1f.dat' % (threshold_obs_time, mag_max)
# Factor in the SL post treatment correction ?
SL_post_treat = True
# Factor in mirror efficiency for the equivalent star magnitude ?
mirror_correction = True
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee,perigee)
###########################################################################
### INITIALISATION
file_flux = 'flux_'
print 'ORBIT ID:\t\t%d\nTHRESHOLD OBS TIME:\t%d+%d min' % (orbit_id,threshold_obs_time,t_acquisition)
if min_obs_per_orbit:
print 'obs time per orbit\t%d min' % threshold_obs_time_per_orbit
print 'MAGNITIUDE:\t\t%02.1f\nN/S max:\t\t%d ppm\nIncluding SAA?\t\t%g' % (mag_max,param.ppm_threshold,SAA)
# changes the threshold by addition the acquisition time:
threshold_obs_time += t_acquisition
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
if not os.path.isdir(folder_figures):
print '\tError: figure folder %s does not exists.' % (folder_figures)
exit()
sys.stdout.write("Loading list of computed orbits...\t\t")
sys.stdout.flush()
orbits = np.loadtxt(folder_misc+orbits_file,dtype='i4')
list_minutes = -1. * np.ones( ( np.shape(orbits)[0] + 2 ) * period )
id_min = 0
times = np.loadtxt('resources/minute_table_%d.dat' % orbit_id, delimiter=',',dtype='Int32')
for ii, orbit_current in enumerate(orbits[:,0]):
t_ini, t_end, a_ini, a_end = fast_orbit2times(times,orbit_current,orbit_id)
for minute in range(a_ini, a_end+1):
list_minutes[id_min] = int(minute)
id_min += 1
list_minutes = list_minutes[list_minutes > -1]
# apply conditions
list_minutes = list_minutes[list_minutes >= minute_ini]
list_minutes = list_minutes[list_minutes <= minute_end]
print 'Done.'
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
visibility = np.zeros(np.shape(ra_grid))
visibility_save = np.zeros([np.shape(ra_grid)[0], np.shape(ra_grid)[1], int(period+2)])
workspace = np.zeros(np.shape(ra_grid))
data = np.zeros(np.shape(ra_grid))
if min_obs_per_orbit: data_orbit = np.zeros(np.shape(ra_grid))
numberofminutes = minute_end+1 - minute_ini
minutes_orbit_iditude = np.loadtxt('resources/minute_table_%d.dat' % orbit_id, delimiter=',',dtype='Int32')
maximum_sl_flux = mag2flux(mag_max)
if mirror_correction: maximum_sl_flux *= param.mirror_efficiency
if SAA:
SAA_data = np.loadtxt('resources/SAA_table_%d.dat' % orbit_id, delimiter=',')
SAA_data = SAA_data[SAA_data[:,0]>= minute_ini]
SAA_data = SAA_data[SAA_data[:,0]<= minute_end]
if os.path.isfile(folder_misc+namefile):
os.remove(folder_misc+namefile)
f = open(folder_misc+namefile,'w')
###########################################################################
### LOAD AND COMPUTE LARGEST OBSERVATION PERIOD
start = time.time()
lp = -1
previous_part = -1
sl_rejection_orbit = 0.
shutdown_time = 0.
previous_period_rel = 1.
orbit_previous = 0.
do_print = False
load = True
try:
for minute in range(minute_ini,minute_end+1):
minute = int(minute)
if SAA and fast_SAA(SAA_data, minute): SAA_at_minute = True
else: SAA_at_minute = False
orbit_current = fast_minute2orbit(minutes_orbit_iditude, minute, orbit_id)
junk, period_rel, atc_ini, junk = fast_orbit2times(minutes_orbit_iditude, orbit_current, orbit_id)
if orbit_current > lp:
lp = orbit_current
message = "Loading stray light data orbit %d on %d...\t" % (lp, minutes_orbit_iditude[-1,0])
sys.stdout.write( '\r'*len(message) )
sys.stdout.write(message)
sys.stdout.flush()
if load: print >> f, orbit_previous, sl_rejection_orbit/float(previous_period_rel-shutdown_time+1)*100.
shutdown_time = 0
sl_rejection_orbit = 0.
sl_rejection_orbit_save = 0.
previous_period_rel = period_rel
orbit_previous = orbit_current
if min_obs_per_orbit:
data[ (data_orbit>threshold_obs_time_per_orbit-1)] += \
data_orbit[(data_orbit>threshold_obs_time_per_orbit-1)]
data_orbit = np.zeros(np.shape(ra_grid))
try:
ra, dec, S_sl = load_flux_file(minute, file_flux, folder=folder_flux)
load=True
# Apply the flux correction (SL post-treatment removal)
if SL_post_treat: S_sl *= (1.0 - param.SL_post_treat_reduction)
nb_targets = np.size(S_sl)
except IOError:
# if there is nothing then well, do nothing ie we copy the past values
# in which orbit are we ?
# get the previous orbit computed and copy the stray light data of this orbit :
load = False
minute_replacement = minute - atc_ini# + at_ini
# populate the visbility matrix
if SAA_at_minute:
visibility = np.zeros(np.shape(ra_grid))
shutdown_time += 1
elif load:
sl_rejection_minute = 0.
visibility_save[...,minute-atc_ini] = 0
for ra_, dec_, sl in zip(ra,dec,S_sl):
if sl > maximum_sl_flux:
sl_rejection_minute += 1.
continue
id_ra = find_nearest(ras,ra_)
id_dec = find_nearest(decs,dec_)
visibility[id_dec,id_ra] = 1
visibility_save[id_dec,id_ra,minute-atc_ini] = 1
sl_rejection_orbit += sl_rejection_minute/nb_targets
else: visibility = visibility_save[...,minute_replacement]
if minute == minute_ini: workspace=visibility.copy()
else :
# if there is an interruption then, reset the value in workspace
# but before saves the value if it is larger than "threshold_obs_time" minutes
if min_obs_per_orbit:
data_orbit[ (workspace>threshold_obs_time-1) & (visibility < 1) ] += \
workspace[(workspace>threshold_obs_time-1)&(visibility< 1)]
else:
data[ (workspace>threshold_obs_time-1) & (visibility < 1) ] += \
workspace[(workspace>threshold_obs_time-1)&(visibility< 1)]
workspace[visibility < 1] = 0
# if the point existed already, then add one minute
workspace[visibility > 0] += 1
# reset visibility without taking a chance of a wrong something
del visibility
visibility = np.zeros(np.shape(ra_grid))
except KeyboardInterrupt: print hilite('\nWARNING! USER STOPPED LOADING AT MINUTE %d' % minute,False,False)
# Check that we did not left anything behind (in a try structure to avoid weird things...)
try:
data[ (workspace>threshold_obs_time-1) ] += \
workspace[(workspace>threshold_obs_time-1)&(visibility< 1)]
except ValueError: pass
del workspace
end = time.time()
elapsed_time = round((end-start)/60.,1)
sys.stdout.write( '\r'*len(message) )
sys.stdout.flush()
print
print "Loaded stray light data\tTime needed: %2.2f min" % elapsed_time
if SAA: note = '_SAA'
else: note = ''
np.savetxt(folder_misc+output_fname+note+extension,data)
print "Data saved in %s%s" % (folder_misc,output_fname+note+extension)
if not show : exit()
plt.figure()
ax = plt.subplot(111)
extent = (-np.pi,np.pi,-np.pi/2.,np.pi/2.)
CS = ax.contour((ra_grid-np.pi)*180. / np.pi,dec_grid*180. / np.pi,data,colors='k',extent=extent)
CS = ax.contourf((ra_grid-np.pi)*180. / np.pi,dec_grid*180. / np.pi,data,cmap=plt.cm.jet,extent=extent)
plt.xlim([-180, 180])
plt.ylim([-90, 90])
plt.colorbar(CS)
ax.grid(True)
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$\delta$')
plt.title('PREVIEW OF THE DATA [MINUTES]')
plt.show()
f.close()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "12_extract_magnitude_long_periods.py",
"copies": "1",
"size": "9561",
"license": "bsd-3-clause",
"hash": -8532696114189522000,
"line_mean": 28.878125,
"line_max": 117,
"alpha_frac": 0.6631105533,
"autogenerated": false,
"ratio": 2.9158279963403477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8936901457049233,
"avg_score": 0.028407418518222823,
"num_lines": 320
} |
# 12:{'name':'m3','self_servant':2,'self_blood':0,'enermy_servant':0,'enermy_blood':0,'time':2,'img':'m3.jpg','show_img':'m3.jpg','card_type':1}
import pygame
import threading
import time
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class MagicCard:
def __init__(self,dict):
self.self_servant_effect = dict['self_servant']
self.self_blood_effect = dict['self_blood']
self.enermy_servant_effect = dict['enermy_servant']
self.enermy_blood_effect = dict['enermy_blood']
self.show_img = pygame.image.load(dict['show_img'])
self.create_time = dict['time']
self.use_time = ''
self.screen = ''
self.type = ''
def load_card(self,current_time,screen):
if current_time-self.use_time < 1:
if self.type == 0:
if self.enermy_servant_effect>0:
m7effect_img = pygame.image.load('m7effect.png')
screen.blit(m7effect_img, (0, 145))
elif self.self_servant_effect>0:
m2effect_img = pygame.image.load('m2effect.png')
screen.blit(m2effect_img, (0, 300))
elif self.type==1:
if self.enermy_servant_effect>0:
m7effect_img = pygame.image.load('m7effect.png')
screen.blit(m7effect_img, (0, 300))
elif self.self_servant_effect>0:
m2effect_img = pygame.image.load('m2effect.png')
screen.blit(m2effect_img, (0, 145))
def use_card(self,self_blood,enermy_blood,self_servant_list,enermy_servant_list,type):
self.use_time = time.time()
new_self_blood = self_blood + self.self_blood_effect
new_enermy_blood = enermy_blood - self.enermy_blood_effect
new_self_servant_list = self_servant_list
new_enermy_servant_list = enermy_servant_list
for i in new_self_servant_list:
i.blood_increase(self.self_servant_effect)
dead_list = []
for i in new_enermy_servant_list:
if i.blood_decrease(self.enermy_servant_effect,self.use_time):
dead_list.append(new_enermy_servant_list.index(i))
dead_list.sort(reverse=True)
for i in dead_list:
del new_enermy_servant_list[i]
self.type = type
return new_self_blood,new_enermy_blood,new_self_servant_list,new_enermy_servant_list
| {
"repo_name": "diyalujiaf/yewan-s-",
"path": "cardgame/magic_card.py",
"copies": "1",
"size": "2494",
"license": "mit",
"hash": 1923527192178204200,
"line_mean": 38.2258064516,
"line_max": 144,
"alpha_frac": 0.5753809142,
"autogenerated": false,
"ratio": 3.2097812097812097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42851621239812093,
"avg_score": null,
"num_lines": null
} |
# 1305, 9 Jan 2018 (NZDT)
#
# Nevil Brownlee, U Auckland
# From a simple original version by Joe Hildebrand
# ElementTree doesn't have nsmap
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
#from lxml import etree as ET
import getopt, sys, re
import word_properties as wp
indent = 4
warn_nbr = 0
current_file = None
verbose = False # set by -v
warn_limit = 40 # set by -w nnn
new_file = False # set by -n
trace = False # set by -t
bad_namespaces = []
def help_msg(msg):
suffix = ''
if msg:
suffix = ": %s" % msg
print("Nevil's SVG checker%s" % suffix)
print("\n ./check.py [options] input-svg-file(s)\n")
print("options:")
print(" -n write .new.svg file, stripping out anything\n not allowed in SVG 1.2 RFC")
print(" -w nn stop after nn warning messages\n")
exit()
try:
options, rem_args = getopt.getopt(sys.argv[1:], "hntvw:")
except getopt.GetoptError:
help_msg("Unknown option")
filter = None
for o,v in options:
if o == "-w":
warn_limit = int(v)
elif o == "-v":
verbose = True
elif o == "-h":
help_msg(None)
elif o == "-n":
new_file = True
elif o == "-t":
trace = True
if len(rem_args) == 0:
help_msg("No input file(s) specified!")
def warn(msg, depth):
global indent, warn_nbr, warn_limit, current_file
warn_nbr += 1
print("%5d %s%s" % (warn_nbr, ' '*(depth*indent), msg))
if warn_nbr == warn_limit:
print("warning limit (%d) reached for %s <<" % (
warn_nbr, current_file))
exit()
#def check_some_props(attr, val, depth): # For [style] properties
# Not needed Jan 2018 versionq
# props_to_check = wp.property_lists[attr]
# new_val = ''; ok = True
# style_props = val.rstrip(';').split(';')
# print("style_props = %s" % style_props)
# for prop in style_props:
# print("prop = %s" % prop)
# p, v = prop.split(':')
# v = v.strip() # May have leading blank
# if p in props_to_check:
# #allowed_vals = wp.properties[p]
# #print("$csp p=%s, allowed_vals=%s." % (p, allowed_vals))
# allowed = value_ok(v, p, depth)
# if not allowed:
# warn("['%s' attribute: value %s not valid for '%s']" % (
# attr,v, p), depth)
# ok = False
# else:
# new_val += ';' + prop
# return (ok, new_val)
def value_ok(v, obj, depth): # Is value v OK for attrib/property obj?
# Returns (T/F/int, val that matched)
#print("V++ value_ok(%s, %s, %s) type(v) = %s, type(obj)=%s" % (
# v, obj, depth, type(v), type(obj)))
if obj in wp.properties:
values = wp.properties[obj]
elif obj in wp.basic_types:
values = wp.basic_types[obj]
elif isinstance(obj, str):
return (v == obj, v)
else: # Unknown attribute
return (False, None)
#print("2++ values = %s <%s>" % ((values,), type(values)))
if len(values) == 0: # Empty values tuple, can't check
return (True, None)
elif isinstance(values, str): # values is a string
if values[0] == '<':
#print("4++ values=%s, v=%s" % (values, v))
ok_v, matched_v = value_ok(v, values, depth)
#print("5++ ok_v = %s, matched_v = %s" % (ok_v, matched_v))
return (ok_v, matched_v)
if values[0] == '+g': # Any integer or real
n = re.match(r'\d+\.\d+$', v)
rv = None
if n:
rv = n.group()
return (True, rv)
if values[0] == '+h': # [100,900] in hundreds
n = re.match(r'\d00$', v)
rv = None
if n:
rv = n.group()
return (True, rv)
if values == v:
print("4++ values=%s, v=%s." % (values, v))
return (True, values)
if values[0] == "[":
some_ok, matched_val = check_some_props(values, v, depth)
return (some_ok, matched_val)
#if values == '#': # RGB value
# lv = v.lower()
# if lv[0] == '#': #rrggbb hex
# if len(lv) == 7:
# return (lv[3:5] == lv[1:3] and lv[5:7] == lv[1:3], None)
# if len(lv) == 4:
# return (lv[2] == lv[1] and lv[3] == lv[1], None)
# return (False, None)
# elif lv.find('rgb') == 0: # integers
# rgb = re.search(r'\((\d+),(\d+),(\d+)\)', lv)
# if rgb:
# return ((rgb.group(2) == rgb.group(1) and
# rgb.group(3) == rgb.group(1)), None)
# return (False, None)
#print("6++ values tuple = %s" % (values,))
for val in values: # values is a tuple
ok_v, matched_v = value_ok(v, val, depth)
#print("7++ ok_v = %s, matched_v = %s" % (ok_v, matched_v))
if ok_v:
return (True, matched_v)
#print("8++ values=%s, (%s) <<<" % ((values,), type(values)))
return (True, None) # Can't check it, so it's OK
def strip_prefix(element): # Remove {namespace} prefix
global bad_namespaces
ns_ok = True
if element[0] == '{':
rbp = element.rfind('}') # Index of rightmost }
if rbp >= 0:
ns = element[1:rbp]
if not ns in wp.xmlns_urls:
if not ns in bad_namespaces:
bad_namespaces.append(ns)
ns_ok = False
#print("@@ element=%s" % element[rbp+1:])
element = element[rbp+1:]
return element, ns_ok # return False if not in a known namespace
def check(el, depth):
global new_file, trace
if trace:
print("T1: %s tag = %s (depth=%d <%s>)" % (
' '*(depth*indent), el.tag, depth, type(depth)))
if warn_nbr >= warn_limit:
return False
element, ns_ok = strip_prefix(el.tag) # name of element
# ElementTree prefixes elements with default namespace in braces
#print("element=%s, ns_ok=%s" % (element, ns_ok))
if not ns_ok:
return False # Remove this el
if verbose:
print("%selement % s: %s" % (' '*(depth*indent), element, el.attrib))
attrs_to_remove = [] # Can't remove them inside the iteration!
attrs_to_set = []
for attrib, val in el.attrib.items():
# (attrib,val) tuples for each attribute
attr, ns_ok = strip_prefix(attrib)
if trace:
print("%s attrib %s = %s (ns_ok = %s), val = %s" % (
' ' * (depth*(indent+1)), attr, val, ns_ok, val))
if attrib in wp.elements: # Is it an element?
warn("element '%s' not allowed as attribute" % element, depth )
attrs_to_remove.append(attrib)
else:
atr_ok, matched_val = value_ok(val, attr, depth)
#print("$1-- val=%s, attr=%s -> atr_ok=%s, matched_val=%s" % (
# val, attr, atr_ok, matched_val))
if not atr_ok:
warn("value '%s' not allowed for attribute %s" % (val, attrib),
depth)
attrs_to_remove.append(attrib)
if matched_val != val and attrib == 'font-family':
# Special case!
if val.find('sans') >= 0:
attrs_to_set.append( (attrib, 'sans-serif') )
if val.find('serif') >= 0:
attrs_to_set.append( (attrib, 'serif') )
#print("%s is %s, matched_val %s" % (attr, atr_ok, matched_val))
for atr in attrs_to_remove:
el.attrib.pop(atr)
for ats in attrs_to_set:
el.set(ats[0], ats[1])
children_to_remove = []
for child in el: # Children of this svg element
ch_el, el_ok = strip_prefix(child.tag) # name of element
#print("$$ el=%s, child=%s, el_ok=%s, child.tag=%s, %s" % (
# el, ch_el, el_ok, child.tag, type(child)))
# Check for not-allowed elements
if ch_el in wp.element_children:
allowed_children = wp.element_children[element]
else: # not in wp.element_children
allowed_children = []
if not ch_el in allowed_children:
msg = "'%s' may not appear in a '%s'" % (ch_el, element)
warn(msg, depth)
children_to_remove.append(child)
else:
ch_ok = check(child, depth+1) # OK, check this child
#print("@2@ check(depth %d) returned %s" % (depth, ch_ok))
#print("@3@ children_to_remove = %s" % children_to_remove)
for child in children_to_remove:
el.remove(child)
return True # OK
def remove_namespace(doc, namespace):
return True # OKace):
# From http://stackoverflow.com/questions/18159221/
# remove-namespace-and-prefix-from-xml-in-python-using-lxml
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
print("elem.tag before= %s," % elem.tag)
elem.tag = elem.tag[nsl:]
print("after=%s." % elem.tag)
def checkFile(fn, options):
global current_file, warn_nbr, root
current_file = fn
print("Starting %s%s" % (fn, options))
tree = ET.parse(fn)
root = tree.getroot()
#print("root.attrib=%s, test -> %d" % (root.attrib, "xmlns" in root.attrib))
# # attrib list doesn't have includes "xmlns", even though it's there
#print("root.tag=%s" % root.tag)
no_ns = root.tag.find("{") < 0
#print("no_ns = %s" % no_ns)
ET.register_namespace("", "http://www.w3.org/2000/svg")
# Stops tree.write() from prefixing above with "ns0"
check(root, 0)
if trace and len(bad_namespaces) != 0:
print("bad_namespaces = %s" % bad_namespaces)
if new_file:
sp = fn.rfind('.svg')
if sp+3 != len(fn)-1: # Indeces of last chars
print("filename doesn't end in '.svg' (%d, %d)" % (sp, len(fn)))
else:
if no_ns:
root.attrib["xmlns"] = "http://www.w3.org/2000/svg"
for ns in bad_namespaces:
remove_namespace(root, ns)
new_fn = fn.replace(".svg", ".new.svg")
print("writing to %s" % (new_fn))
tree.write(new_fn)
return warn_nbr
if __name__ == "__main__":
options = ''
if len(sys.argv) > 2:
options = " %s" % ' '.join(sys.argv[1:-1])
for arg in rem_args:
warn_nbr = 0
n_warnings = checkFile(arg, options)
print("%d warnings for %s" % (n_warnings, arg))
if len(rem_args) == 1:
exit(n_warnings)
| {
"repo_name": "nevil-brownlee/check_svg",
"path": "check-svg.py",
"copies": "1",
"size": "10646",
"license": "mit",
"hash": -1276686827572183000,
"line_mean": 35.3344709898,
"line_max": 104,
"alpha_frac": 0.5166259628,
"autogenerated": false,
"ratio": 3.232918311569997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9224535292570006,
"avg_score": 0.00500179635999824,
"num_lines": 293
} |
# 133. Single Number
#
# Given a non-empty array of integers, every element appears twice
# except for one. Find that single one.
#
# Note: Your algorithm should have a linear runtime complexity. Could
# you implement it without using extra memory? [No, not that I can
# figure out.]
def multiscan(nums):
# Idea is to repeatedly check if the number at index 0 has a twin.
# If not, win! If so, fill in with numbers from end,
n_nums = len(nums)
while n_nums > 0:
n1 = nums[0]
found_duplicate = False
for idx2 in range(1, n_nums):
if n1 == nums[idx2]:
# Found duplicate
found_duplicate = True
# Replace duplicate with end
nums[idx2] = nums[n_nums - 1]
# Replace initial with end
nums[0] = nums[n_nums - 2]
n_nums -= 2
break
if not found_duplicate:
return n1
def sortscan(nums):
nums = sorted(nums)
n1 = nums[0]
idx = 1
while idx < len(nums):
n2 = nums[idx]
if n1 == n2:
n1 = nums[idx + 1]
idx += 2
else:
return n1
return n1
class Solution:
def singleNumber_1(self, nums: List[int]) -> int:
counts = {}
for n in nums:
counts[n] = counts.get(n, 0) + 1
for n, count in counts.items():
if count == 1:
return n
def singleNumber_2(self, nums: List[int]) -> int:
for idx1, n1 in enumerate(nums):
for idx2, n2 in enumerate(nums):
if idx1 == idx2:
continue
if n1 == n2:
break
if idx2 + 1 == len(nums) and (n1 != n2 or (idx1, n1) == (idx2, n2)):
return n1
def singleNumber_3(self, nums: List[int]) -> int:
return multiscan(nums)
def singleNumber_4(self, nums: List[int]) -> int:
return sortscan(nums)
singleNumber = singleNumber_4
| {
"repo_name": "afbarnard/glowing-broccoli",
"path": "lc/000136.py",
"copies": "1",
"size": "2033",
"license": "mit",
"hash": 8331102719575565000,
"line_mean": 27.6338028169,
"line_max": 80,
"alpha_frac": 0.5159862273,
"autogenerated": false,
"ratio": 3.716636197440585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47326224247405846,
"avg_score": null,
"num_lines": null
} |
# 13.5 pts of 15 for terminology
# 23 pts of 25 for programming
#Part 1: Terminology (15 points)
#1 1pt) What is the symbol "=" used for?
# assigning values, function calls to a variable
# 1 pt right answer
#
#2 3pts) Write a technical definition for 'function'
# A function is a named sequence of statements that perform some calculation and may return an output
# 3 pts right answer
#
#3 1pt) What does the keyword "return" do?
# returns some form of output from a function
# 1 pt right answer
#
#4 5pts) We know 5 basic data types. Write the name for each one and provide two
# examples of each below
# 1: boolean: True, False
# 2: string: "ASDASD", "sooosad"
# 3: float: 1.23, 14264.80124
# 4: integer: 314, 0
# 5: tuple: True, "SDA", 12.456, 87
# 4pts, missed parenthesis on tuple
#5 2pts) What is the difference between a "function definition" and a
# "function call"?
# a function definition defines what the function does and a function call calls the function to do what it was defined to do.
# the main difference between the two is the definition has a ":" after it while the function call does not
# a function must me defined before it can be called
# 1.5 pts, mostly right answer, missed function name
#6 3pts) What are the 3 phases that every computer program has? What happens in
# each of them
# 1: Input: user inputs something
# 2: Processing/computation: computer does something with the input
# 3: Output: computer returns some form of output
# 3pts right answer
#Part 2: Programming (25 points)
#Write a program that asks the user for the areas of 3 circles.
#It should then calculate the diameter of each and the sum of the diameters
#of the 3 circles.
#Finally, it should produce output like this:
#Circle Diameter
#c1 ...
#c2 ...
#c3 ...
#TOTALS ...
# Hint: Radius is the square root of the area divided by pi
import math
#1 pt for header line 1 pt correct
#3 pt for correct formula 3 pt correct
#1 pt for return value 1 pt correct
#1 pt for parameter name 0 pt put x instead of area
#1 pt for function name 1 pt correct
def diameterfromarea(x):
return math.sqrt(x/math.pi)*2
#1pt for header line 1 pt correct
#1pt for parameter names 1 pt correct
#1pt for return value 1 pt correct
#1pt for correct output format 1 pt correct
#3pt for correct use of format function 3 pts correct
def output(c1, c2, c3, total):
out = """
Circle Diameter
c1 {}
c2 {}
c3 {}
Totals {}
""".format(c1, c2, c3, total)
return out
#1pt header line 1 pt correct
#1pt getting input 1 pt got input
#1pt converting input 1 pt converted input
#1pt for calling output function 1 pt called output
#2pt for correct diameter formula 2 pts correct
#1pt for variable names 0 pt used single letter variable names
def main():
#Input Section
a = float(raw_input("Area of C1: "))
b = float(raw_input("Area of C2: "))
c = float(raw_input("Area of C3: "))
#Processings
c1 = diameterfromarea(a)
c2 = diameterfromarea(b)
c3 = diameterfromarea(c)
total = c1 + c2 + c3
#Output Section
res = output(c1, c2, c3, total)
print res
#1pt for calling main 1 pt main called
main()
#1pt explanatory comments 1 pt added explanatory comments
#1pt code format 1 pt code format correct
#1pt script runs without errors 1 pt script runs no errors
| {
"repo_name": "walter2645-cmis/walter2645-cmis-cs2",
"path": "cs2quiz1.py",
"copies": "1",
"size": "3328",
"license": "cc0-1.0",
"hash": 2672031770761448000,
"line_mean": 33.6666666667,
"line_max": 126,
"alpha_frac": 0.7142427885,
"autogenerated": false,
"ratio": 3.389002036659878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4603244825159878,
"avg_score": null,
"num_lines": null
} |
# 137. Single Number II
#
# Given an array of integers, every element appears three times except for one,
# which appears exactly once. Find that single one.
#
# Note:
# Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
class Solution(object):
def singleNumber(self, nums):
"""
use a dictionary to record how many times each number appeared.
:type nums: List[int]
:rtype: int
"""
dict = {}
for num in nums:
if num not in dict:
dict[num] = 1
else:
dict[num] += 1
print dict
for num in dict:
if dict[num] == 1:
return num
def singleNumber(self, nums):
resultDict = {}
for i in nums:
if i in resultDict.keys():
if resultDict[i] == 2:
del resultDict[i]
else:
resultDict[i] += 1
else:
resultDict[i] = 1
return list(resultDict.keys())[0]
if __name__ == '__main__':
print Solution().singleNumber([1, 1, 1, 2, 3, 3, 3])
| {
"repo_name": "gengwg/leetcode",
"path": "137_single_number_ii.py",
"copies": "1",
"size": "1181",
"license": "apache-2.0",
"hash": -2004322425491656000,
"line_mean": 25.2444444444,
"line_max": 108,
"alpha_frac": 0.5156646909,
"autogenerated": false,
"ratio": 4.232974910394265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5248639601294265,
"avg_score": null,
"num_lines": null
} |
# 138. Copy List with Random Pointer
#
# A linked list is given such that each node contains an additional random pointer
# which could point to any node in the list or null.
#
# Return a deep copy of the list.
# Definition for singly-linked list with a random pointer.
class RandomListNode(object):
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution(object):
def copyRandomList(self, head):
"""
http://www.cnblogs.com/zuoyuan/p/3745126.html
解题思路:这题主要是需要深拷贝。看图就明白怎么写程序了。
首先,在原链表的每个节点后面都插入一个新节点,新节点的内容和前面的节点一样。比如上图,1后面插入1,2后面插入2,依次类推。
其次,原链表中的random指针如何映射呢?比如上图中,1节点的random指针指向3,4节点的random指针指向2。
如果有一个tmp指针指向1(蓝色),则一条语句:tmp.next.random = tmp.random.next;就可以解决这个问题。
第三步,将新的链表从上图这样的链表中拆分出来。
:type head: RandomListNode
:rtype: RandomListNode
"""
if head is None:
return None
# add a new node after every old node
tmp = head
while tmp:
newNode = RandomListNode(tmp.label)
newNode.next = tmp.next
tmp.next = newNode
tmp = tmp.next.next
# fix random
tmp = head
while tmp:
if tmp.random:
tmp.next.random = tmp.random.next
tmp = tmp.next.next # only loop over old lists
# separate two lists
newhead = head.next
pold = head
pnew = newhead
while pnew.next:
pold.next = pnew.next
pold = pold.next
pnew.next = pold.next
pnew = pnew.next
pold.next = None
pnew.next = None
return newhead
| {
"repo_name": "gengwg/leetcode",
"path": "138_copy_list_with_random_pointer.py",
"copies": "1",
"size": "2060",
"license": "apache-2.0",
"hash": -9079333589129421000,
"line_mean": 25.625,
"line_max": 82,
"alpha_frac": 0.5874413146,
"autogenerated": false,
"ratio": 2.4377682403433476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35252095549433476,
"avg_score": null,
"num_lines": null
} |
# 1396. Design Underground System - LeetCode
# https://leetcode.com/problems/design-underground-system/
class UndergroundSystem:
def __init__(self):
self.time_sheet = dict() # key=(startStation, endStation), value=(count, total)
self.checkin_cache = dict() # key=id
def checkIn(self, id: int, stationName: str, t: int) -> None:
if self.checkin_cache.get(id):
return
self.checkin_cache.update({id: (stationName, t)})
def checkOut(self, id: int, stationName: str, t: int) -> None:
endStation = stationName
if self.checkin_cache.get(id):
startStation, startTime = self.checkin_cache.get(id)
del(self.checkin_cache[id])
count, total = 0, 0
if self.time_sheet.get((startStation, endStation)):
count, total = self.time_sheet.get((startStation, endStation))
self.time_sheet.update({(startStation, endStation): (count+1, total+t-startTime)})
return
def getAverageTime(self, startStation: str, endStation: str) -> float:
count, total = self.time_sheet.get((startStation, endStation))
return total / count
# Your UndergroundSystem object will be instantiated and called as such:
# obj = UndergroundSystem()
# obj.checkIn(id,stationName,t)
# obj.checkOut(id,stationName,t)
# param_3 = obj.getAverageTime(startStation,endStation)
obj = UndergroundSystem()
obj.checkIn(1, "A", 1)
obj.checkIn(2, "A", 1)
obj.checkOut(2, "B", 4)
obj.checkOut(1, "B", 3)
print(obj.getAverageTime("A", "B"))
| {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/1396_design-underground-system.py",
"copies": "1",
"size": "1558",
"license": "mit",
"hash": 5499505929967784000,
"line_mean": 36.0952380952,
"line_max": 94,
"alpha_frac": 0.648267009,
"autogenerated": false,
"ratio": 3.3008474576271185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9442510203443908,
"avg_score": 0.0013208526366421103,
"num_lines": 42
} |
"""139. Word Break
https://leetcode.com/problems/word-break/description/
Given a non-empty string s and a dictionary wordDict containing a list of
non-empty words, determine if s can be segmented into a space-separated
sequence of one or more dictionary words.
Note:
The same word in the dictionary may be reused multiple times in the
segmentation.
You may assume the dictionary does not contain duplicate words.
Example 1:
Input: s = "leetcode", wordDict = ["leet", "code"]
Output: true
Explanation: Return true because "leetcode" can be segmented as "leet
code".
Example 2:
Input: s = "applepenapple", wordDict = ["apple", "pen"]
Output: true
Explanation: Return true because "applepenapple" can be segmented as "apple
pen apple".
Note that you are allowed to reuse a dictionary word.
Example 3:
Input: s = "catsandog", wordDict = ["cats", "dog", "sand", "and", "cat"]
Output: false
"""
from typing import List
class Solution:
def word_break_1(self, s: str, word_dict: List[str]) -> bool:
unmatched = set()
def backtrack(string: str) -> bool:
if string in unmatched:
return False
for word in word_dict:
if string == word:
return True
if string.startswith(word):
if backtrack(string[len(word):]):
return True
unmatched.add(string)
return False
return backtrack(s)
def word_break_2(self, s: str, word_dict: List[str]) -> bool:
visited = {}
def backtrack(cur_str: str) -> bool:
if cur_str in visited:
return visited[cur_str]
if cur_str in word_dict:
return True
for i in range(1, len(cur_str)):
if cur_str[:i] in word_dict and backtrack(cur_str[i:]):
visited[cur_str] = True
return True
i += 1
visited[cur_str] = False
return False
return backtrack(s)
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/word_break.py",
"copies": "1",
"size": "2055",
"license": "mit",
"hash": -1025950237266193500,
"line_mean": 25.0126582278,
"line_max": 75,
"alpha_frac": 0.5907542579,
"autogenerated": false,
"ratio": 3.914285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 79
} |
# 139. Word Break
#
# Given a non-empty string s and a dictionary wordDict containing a list of non-empty words,
# determine if s can be segmented into a space-separated sequence of one or more dictionary words.
# You may assume the dictionary does not contain duplicate words.
#
# For example, given
# s = "leetcode",
# dict = ["leet", "code"].
#
# Return true because "leetcode" can be segmented as "leet code".
class Solution(object):
def wordBreak(self, s, wordDict):
"""
http://www.cnblogs.com/zuoyuan/p/3760660.html
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
# dp[i] is whether s[:i] can break into wordDict.
dp = [False for _ in range(len(s) + 1)] # len+1
dp[0] = True # dp[0] is always True to initiate the process
for i in range(1, len(dp)): # i starts at 1
for k in range(i):
if dp[k] and s[k:i] in wordDict:
dp[i] = True
return dp[-1]
# print dp
# return dp[len(s)]
if __name__ == '__main__':
print Solution().wordBreak("leetcode", ["leet", "code"])
print Solution().wordBreak("cars", ["car", "ca", "rs"])
| {
"repo_name": "gengwg/leetcode",
"path": "139_word_break.py",
"copies": "1",
"size": "1201",
"license": "apache-2.0",
"hash": 45148379034663280,
"line_mean": 32.3611111111,
"line_max": 98,
"alpha_frac": 0.5795170691,
"autogenerated": false,
"ratio": 3.3361111111111112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44156281802111114,
"avg_score": null,
"num_lines": null
} |
"""13C(methyl) - H to C CPMG
Measures methyl carbon chemical exchange recorded on site-specifically
13CH3-labeled proteins in a highly deuterated background. Magnetization is
initally anti-phase and is read out as in-phase. Because of the P-element only
even ncyc should be recorded. The calculation uses a 12x12 basis set:
[Cx(a), Cy(a), Cz(a), 2HxCz(a), 2HyCz(a), 2HzCz(a),
Cx(b), Cy(b), Cz(b), 2HxCz(b), 2HyCz(b), 2HzCz(b)]
Off resonance effects are taken into account. The calculation is designed
explicitly for analyzing the Lewis Kay pulse sequence:
HtoC_CH3_exchange_*00_lek_ILV
Journal of Biomolecular NMR (2007) 38, 79-88
"""
import numpy as np
from numpy import linalg as la
from chemex.experiments.cpmg.base_cpmg import ProfileCPMG2
_EXP_DETAILS = {"taub": {"default": 1.99e-3, "type": float}}
class ProfileCPMGCH3H2C(ProfileCPMG2):
"""TODO: class docstring."""
EXP_DETAILS = dict(**ProfileCPMG2.EXP_DETAILS, **_EXP_DETAILS)
SPIN_SYSTEM = "ixyzsz"
CONSTRAINTS = "nh"
def __init__(self, name, data, exp_details, model):
super().__init__(name, data, exp_details, model)
self.taub = self.exp_details["taub"]
# Set the row vector for detection
self.detect = self.liouv.detect["iz_a"]
# Set the delays in the experiments
self.delays += [self.taub]
# Set the varying parameters by default
for name, full_name in self.map_names.items():
if name.startswith(("dw", "r2_i_a")):
self.params[full_name].set(vary=True)
def _calculate_unscaled_profile(self, params_local, **kwargs):
"""TODO: Write docstring"""
self.liouv.update(params_local)
# Calculation of the propagators corresponding to all the delays
delays = dict(zip(self.delays, self.liouv.delays(self.delays)))
d_neg = delays[self.t_neg]
d_eq = delays[self.time_eq]
d_taub = delays[self.taub]
# Calculation of the propagators corresponding to all the pulses
pulses = self.liouv.pulses_90_180_i()
p90 = np.array([pulses[name] for name in ["90px", "90py", "90mx", "90my"]])
p180 = np.array([pulses[name] for name in ["180px", "180py", "180mx", "180my"]])
p180_s = self.liouv.perfect180["sx"]
# Calculate starting magnetization vector
mag0 = self.liouv.compute_mag_eq(params_local, term="2izsz")
palmer = d_taub @ p90[0] @ p180_s @ p90[0] @ d_taub
# Calculating the cpmg trains
cp1 = {0: self.liouv.identity}
cp2 = {0: self.liouv.identity}
for ncyc in set(self.data["ncycs"][~self.reference]):
tau_cp = delays[self.tau_cps[ncyc]]
echo = tau_cp @ p180[[1, 0]] @ tau_cp
cp_trains = la.matrix_power(echo, int(ncyc))
cp1[ncyc] = cp_trains[0] @ d_neg
cp2[ncyc] = d_neg @ cp_trains[1]
profile = [
self.liouv.collapse(
self.detect
@ d_eq
@ p90[1]
@ cp2[ncyc]
@ palmer
@ cp1[ncyc]
@ p90[0]
@ mag0
)
for ncyc in self.data["ncycs"]
]
return np.asarray(profile)
| {
"repo_name": "gbouvignies/chemex",
"path": "chemex/experiments/cpmg/ch3_h2c.py",
"copies": "1",
"size": "3248",
"license": "bsd-3-clause",
"hash": 6305267732005172000,
"line_mean": 32.8333333333,
"line_max": 88,
"alpha_frac": 0.5969827586,
"autogenerated": false,
"ratio": 3.041198501872659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41381812604726587,
"avg_score": null,
"num_lines": null
} |
"""13CO - Pure Anti-phase Carbonyl 13C CPMG
Analyzes carbonyl chemical exchange that is maintained as anti-phase
magnetization throughout the CPMG block. This results in lower intrinsic
relaxation rates and therefore better sensitivity. The calculations use a 12x12,
2-spin exchange matrix:
[ COx(a), COy(a), COz(a), 2COxNz(a), 2COyNz(a), 2COzNz(a),
COx(b), COy(b), COz(b), 2COxNz(b), 2COyNz(b), 2COzNz(b)]
Notes
-----
Because of the length of the shaped pulses used during the CPMG blocks, off-
resonance effects are taken into account only for the 90-degree pulses that
create COxNz before the CPMG and COzNz after the CPMG. The calculation is
designed explicitly for analyzing the Kay laboratory pulse sequence:
CO_CPMG_SCFilter_x00_dfh1
And can be run with or without sidechain CO inversion via the Inv_CO flag for
uniformly 13C-labeled proteins.
Reference
---------
Journal of Biomolecular NMR (2008) 42, 35-47
"""
import numpy as np
from numpy import linalg as la
from chemex.experiments.cpmg.base_cpmg import ProfileCPMG2
_EXP_DETAILS = {
"sidechain": {"type": str, "default": "False"},
"taucc": {"type": float, "default": 9.09e-3},
}
class ProfileCPMGCOAP(ProfileCPMG2):
"""TODO: class docstring."""
EXP_DETAILS = dict(**ProfileCPMG2.EXP_DETAILS, **_EXP_DETAILS)
SPIN_SYSTEM = "ixyzsz"
CONSTRAINTS = "hn_ap"
def __init__(self, name, data, exp_details, model):
super().__init__(name, data, exp_details, model)
self.taucc = self.exp_details["taucc"]
self.sidechain = self.get_bool(self.exp_details["sidechain"])
# Set the row vector for detection
self.detect = self.liouv.detect["2izsz_a"]
# Set the delays in the experiments
self.delays += [self.taucc]
# Set the varying parameters by default
for name, full_name in self.map_names.items():
if name.startswith(("dw", "r2_i_a")):
self.params[full_name].set(vary=True)
def _calculate_unscaled_profile(self, params_local, **kwargs):
"""TODO: Write docstring"""
self.liouv.update(params_local)
# Calculation of the propagators corresponding to all the delays
delays = dict(zip(self.delays, self.liouv.delays(self.delays)))
d_neg = delays[self.t_neg]
d_eq = delays[self.time_eq]
d_taucc = delays[self.taucc]
# Calculation of the propagators corresponding to all the pulses
pulses = self.liouv.pulses_90_180_i()
p90 = np.array([pulses[name] for name in ["90px", "90py", "90mx", "90my"]])
p180 = np.array([pulses[name] for name in ["180px", "180py", "180mx", "180my"]])
p180pmy = 0.5 * (p180[1] + p180[3]) # +/- phase cycling
# Calculate starting magnetization vector
mag0 = self.liouv.compute_mag_eq(params_local, term="2izsz")
# Calculate the flip block
if self.sidechain:
p_flip = p180pmy
else:
p_flip = p90[3] @ d_taucc @ p180pmy @ d_taucc @ p90[1]
# Calculating the cpmg trains
cp = {0: self.liouv.identity}
for ncyc in set(self.data["ncycs"][~self.reference]):
tau_cp = delays[self.tau_cps[ncyc]]
echo = tau_cp @ p180[[1, 0]] @ tau_cp
cp_train = la.matrix_power(echo, int(ncyc))
cp[ncyc] = d_neg @ cp_train @ d_neg
profile = [
self.liouv.collapse(
self.detect
@ d_eq
@ p90[1]
@ cp[ncyc]
@ p_flip
@ cp[ncyc]
@ p90[1]
@ mag0
)
for ncyc in self.data["ncycs"]
]
return np.asarray(profile)
| {
"repo_name": "gbouvignies/chemex",
"path": "chemex/experiments/cpmg/co_ap.py",
"copies": "1",
"size": "3724",
"license": "bsd-3-clause",
"hash": 3661196107404293000,
"line_mean": 31.6666666667,
"line_max": 88,
"alpha_frac": 0.608216971,
"autogenerated": false,
"ratio": 3.1479289940828403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42561459650828404,
"avg_score": null,
"num_lines": null
} |
# 1-3: Given two strings, write a method to decide if one is a permutation
# of the other
# Permutation: an ordered set that uses the same elements as another
# Example: cat act
# Brute force: for each letter in word1, see if that letter exists in word2
# If yes, remove that letter from the word2. If you get to the end of word1 and
# there are 0 or more letters left, return True
# Worst case: n^2
# Hash
# For each letter in word1, hash. Add 1 at hash for each letter
# For each letter in word2, hash. If zero, return false. Otherwise, subtract 1
class PermutationChecker:
'Checks two strings to see if one is a permutation of the second'
def __init__(self):
self.character_map = [False] * 26
return
def __hash_character(self, c):
char_num = ord(c.upper())
if char_num == ord(' '):
return -1
elif char_num < ord('A'):
return False
elif char_num > ord('Z'):
return False
else:
return char_num - ord('A')
def __add_to_map(self, c):
char_num = self.__hash_character(c)
if char_num is False:
return False
elif char_num is not -1:
self.character_map[char_num] += 1
return True
def __subtract_from_map(self, c):
char_num = self.__hash_character(c)
if char_num is False:
return False
elif char_num is not -1:
if self.character_map[char_num] is 0:
return False
else:
self.character_map[char_num] -= 1
return True
def isPerm(self, str1, str2):
if len(str2) > len(str1):
return False
else:
# hash first word
for char in str1:
result = self.__add_to_map(char)
if result is False:
print('str1 false')
return False
# hash second word
for char in str2:
result = self.__subtract_from_map(char)
if result is False:
return False
return True
a = PermutationChecker()
print(a.isPerm('cat', 'act'))
print(a.isPerm('tack', 'kack'))
| {
"repo_name": "dmart914/CTCI",
"path": "01-arrays-and-strings/permutation.py",
"copies": "1",
"size": "2235",
"license": "apache-2.0",
"hash": 8652293307203110000,
"line_mean": 29.2027027027,
"line_max": 79,
"alpha_frac": 0.5494407159,
"autogenerated": false,
"ratio": 3.9557522123893807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005405405405405405,
"num_lines": 74
} |
# 13 octobre 2017
# astro_v3.py
from pylab import *
import os
def B3V_eq(x):
"""
:param x: abcsisse du point de la ligne B3V dont on veut obtenir l'ordonnee
:return: ordonnee du point de la ligne B3V correspondant a l'abscisse x (dans un graphique u-g vs g-r)
"""
return 0.9909 * x - 0.8901
def lignes(filename, n_c1, n_c2):
"""
:param filename: nom du fichier qui contient les donnees des etoiles dont on veut connaitre
les valeurs dans les colonnes c1 et c2
:param n_c1: numero de la colonne correspondant a la colonne c1 dans le fichier d'entree
:param n_c2: numero de la colonne correspondant a la colonne c2 dans le fichier d'entree
:return: que dalle, c'est un generateur
"""
data = open(filename, 'r')
line = data.readline()
while line[0:2] != "--":
line = data.readline()
line = data.readline()
while line != "":
c1 = ""
c2 = ""
n_colonne = 1
for char in line:
if char == "|":
n_colonne += 1
if n_colonne == n_c1:
if char != " " and char != "|":
c1 += char
elif n_colonne == n_c2:
if char != " " and char != "|":
c2 += char
if n_colonne > max([n_c1, n_c2]):
break
if c1 == "":
c1 = None
if c2 == "":
c2 = None
yield c1, c2
line = data.readline()
data.close()
def recupere_magnitudes(filename, n_g_r, n_u_g):
"""
:param filename: nom du fichier qui contient les donnees des etoiles dont on veut connaitre
les caracteristique u-g et g-r
:param n_g_r: numero de la colonne correspondant a g-r dans le fichier d'entree
:param n_u_g: numero de la colonne correspondant a u-g dans le fichier d'entree
:return: liste avec les donnees de la colonne g-r dans le fichier filename, et une autre avec celles de u-g
"""
colonne_u_g = []
colonne_g_r = []
for g_r, u_g in lignes(filename, n_g_r, n_u_g):
if u_g is not None: colonne_u_g.append(float(u_g))
else: colonne_u_g.append(u_g)
if g_r is not None: colonne_g_r.append(float(g_r))
else: colonne_g_r.append(g_r)
return colonne_g_r, colonne_u_g
def find_hot_stars(input_file, output_file, output_folder=None, n_g_r=6, n_u_g=5):
"""
:param input_file: nom du fichier qui contient les donnees d'entree correspondant a des etoiles
:param output_file: nom du fichier qui contiendra les donnees correspondant uniquement aux etoiles chaudes
:param n_u_g: numero de la colonne correspondant a u-g dans le fichier d'entree
:param n_g_r: numero de la colonne correspondant a g-r dans le fichier d'entree
:param output_folder: nom du dossier dans lequel on va travailler (la ou y a le fichier d entree et la ou on veut mettre le fichier de sortie)
:return: None : cree juste le nouveau fichier dans le meme repertoire que celui dans lequel se trouve le programme
"""
if output_folder is not None:
output_folder_for_terminal = ""
for char in output_folder:
if char == " ":
output_folder_for_terminal += "\ "
elif char == "(":
output_folder_for_terminal += "\("
elif char == ")":
output_folder_for_terminal += "\)"
else:
output_folder_for_terminal += char
if not os.path.exists(output_folder):
os.system("mkdir " + output_folder_for_terminal)
input_file = output_folder + "/" + input_file
output_file = output_folder + "/" + output_file
data = open(input_file, 'r')
nfile = open(output_file, "w")
nfile.write("HOT STARS\n")
line = data.readline()
while line[0:2] != "--":
nfile.write(line)
line = data.readline()
nfile.write(line)
line = data.readline()
i = 0
while line != "":
i += 1
if i % 10000 == 0:
print("avancement : ", i)
u_g = ""
g_r = ""
n_colonne = 1
for char in line:
if char == "|":
n_colonne += 1
if n_colonne == n_u_g:
if char != " " and char != "|":
u_g += char
elif n_colonne == n_g_r:
if char != " " and char != "|":
g_r += char
if n_colonne > max([n_u_g, n_g_r]):
break
if u_g != "" and g_r != "" and float(u_g) <= B3V_eq(float(g_r)):
nfile.write(line)
line = data.readline()
data.close()
nfile.close()
def fichier_reg(input_file, output_file, output_folder=None, n_alpha=3, n_delta=4):
"""
:param input_file: fichier avec les etoiles chaudes
:param output_file: fichier en .reg
:param n_alpha: colonne avec les coordonees alpha de l'etoile
:param n_delta: colonne avec les coordonnees delta de l'etoile
:param output_folder: nom du dossier dans lequel on va travailler (la ou y a le fichier d entree et la ou on veut mettre le fichier de sortie)
:return: None
"""
if output_folder is not None:
output_folder_for_terminal = ""
for char in output_folder:
if char == " ":
output_folder_for_terminal += "\ "
elif char == "(":
output_folder_for_terminal += "\("
elif char == ")":
output_folder_for_terminal += "\)"
else:
output_folder_for_terminal += char
if not os.path.exists(output_folder):
os.system("mkdir " + output_folder_for_terminal)
input_file = output_folder + "/" + input_file
output_file = output_folder + "/" + output_file
nfile = open(output_file, "w")
nfile.write('# Region file format: DS9 version 4.1\n')
nfile.write(
'global color=green dashlist=8 3 width=1 font=\"helvetica 10 normal roman\" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n')
nfile.write('fk5')
for alpha, delta in lignes(input_file, n_alpha, n_delta):
nfile.write("\n")
nfile.write('circle(' + alpha + ',' + delta + ',5\")')
nfile.close()
def trace_graphique(titre, data_filename, SP_filename="SP.txt", n_g_r_data=6, n_u_g_data=5, n_g_r_SP=4, n_u_g_SP=3,
hot_stars_filename=None):
"""
:param titre: titre que l'on veut donner au graphique
:param data_filename: nom du fichier qui contient les donnees d'entree correspondant a des etoiles
:param SP_filename: nom du fichier qui contient des coordonnees de points de la sequence principale
:param n_g_r_data: numero de la colonne correspondant a g-r dans le fichier data_filename
:param n_u_g_data: numero de la colonne correspondant a u-g dans le fichier data_filename
:param n_g_r_SP: numero de la colonne correspondant a g-r dans le fichier SP_filename
:param n_u_g_SP: numero de la colonne correspondant a u-g dans le fichier SP_filename
:param hot_stars_filename: facultatif, nom du fichier contenant uniquement les donnees des etoiles chaudes
dans data_filename pour afficher d'une autre couleur les points correspondant aux etoiles chaudes
:return: None, trace le graphique u-g vs g-r avec la sequance principale et la ligne B3V
"""
# recupere donnees
g_r_data, u_g_data = recupere_magnitudes(data_filename, n_g_r_data, n_u_g_data)
g_r_SP, u_g_SP = recupere_magnitudes(SP_filename, n_g_r_SP, n_u_g_SP)
# parametre le graphique
plt.xlabel('g-r')
plt.ylabel('u-g')
plt.gca().invert_yaxis()
# trace u-g vs g-r avec nos donnees
plt.plot(g_r_data, u_g_data, '.', c='red', label='Étoiles')
if hot_stars_filename != None:
g_r_hot_stars, u_g_hot_stars = recupere_magnitudes(hot_stars_filename, n_g_r_data, n_u_g_data)
plt.plot(g_r_hot_stars, u_g_hot_stars, '.', c='blue', label='Étoiles chaudes')
# trace ligne B3V
m = min([x for x in g_r_data if x != None])
M = max([y for y in g_r_data if y != None])
x = np.linspace(m, M, 100)
plt.plot(x, B3V_eq(x), c='orange', label='Ligne B3V')
# trace sequence principale
plt.plot(g_r_SP, u_g_SP, c='black', label='Séquence principale')
# met le titre et affiche le tout
title(titre)
plt.legend()
plt.show()
def get_sky_picture(region_name, output_file, x_size, y_size, output_folder=None, coordinate_system="J2000",
survey="DSS2-red", ra="", dec=""):
output_file_for_terminal = ""
for char in output_file:
if char == " ":
output_file_for_terminal += "\ "
elif char == "(":
output_file_for_terminal += "\("
elif char == ")":
output_file_for_terminal += "\)"
else:
output_file_for_terminal += char
if output_folder is not None:
output_folder_for_terminal = ""
for char in output_folder:
if char == " ":
output_folder_for_terminal += "\ "
elif char == "(":
output_folder_for_terminal += "\("
elif char == ")":
output_folder_for_terminal += "\)"
else:
output_folder_for_terminal += char
if not os.path.exists(output_folder):
os.system("mkdir " + output_folder_for_terminal)
output_file_for_terminal = output_folder_for_terminal + "/" + output_file_for_terminal
region_name_for_link = ""
region_name_for_terminal = ""
for char in region_name:
if char == " ":
region_name_for_link += "+"
region_name_for_terminal += "\ "
else:
region_name_for_link += char
region_name_for_terminal += char
os.system(
"wget 'archive.eso.org/dss/dss/image?ra=" + ra + "&dec=" + dec + "&equinox=" + coordinate_system + "&name="
+ region_name_for_link + "&x=" + str(x_size) + "&y=" + str(y_size) + "&Sky-Survey=" + survey
+ "&mime-type=download-fits&statsmode=WEBFORM' -O " + output_file_for_terminal)
def recup_catalogue(region_name, output_file, cone_size, output_folder=None, size_unit='arcmin'):
output_file_for_terminal = ""
for char in output_file:
if char == " ":
output_file_for_terminal += "\ "
elif char == "(":
output_file_for_terminal += "\("
elif char == ")":
output_file_for_terminal += "\)"
else:
output_file_for_terminal += char
if output_folder is not None:
output_folder_for_terminal = ""
for char in output_folder:
if char == " ":
output_folder_for_terminal += "\ "
elif char == "(":
output_folder_for_terminal += "\("
elif char == ")":
output_folder_for_terminal += "\)"
else:
output_folder_for_terminal += char
if not os.path.exists(output_folder):
os.system("mkdir " + output_folder_for_terminal)
output_file_for_terminal = output_folder_for_terminal + "/" + output_file_for_terminal
region_name_for_link = ""
region_name_for_terminal = ""
for char in region_name:
if char == " ":
region_name_for_link += "+"
region_name_for_terminal += "\ "
else:
region_name_for_link += char
region_name_for_terminal += char
os.system(
"wget '" + 'http://vizier.u-strasbg.fr/viz-bin/asu-tsv/VizieR?-source=II/341/&-oc.form=dec&-out.max=unlimited&-c='
+ region_name_for_link + '&-c.eq=J2000&-c.r=' + str(cone_size) + '&-c.u=' + size_unit
+ '&-c.geom=r&-out=RAJ2000&-out=DEJ2000&-out=u-g&-out=g-r2&-out=umag&-out=e_umag&-out=gmag&-out=e_gmag&-out=r2mag&-out=e_r2mag&-out=Hamag&-out=e_Hamag&-out=rmag&-out=e_rmag&-out=imag&-out=e_imag&-out.add=_Glon,_Glat&-oc.form=dec&-out.form=|+-Separated-Values'
+ "' -O " + output_file_for_terminal)
def save_plot(output_file, input_file, titre, SP_filename="SP.txt", output_folder=None, n_g_r_data=6, n_u_g_data=5, n_g_r_SP=4, n_u_g_SP=3,
input_file_hot_stars=None):
"""
:param titre: titre que l'on veut donner au graphique
:param input_file: nom du fichier qui contient les donnees d'entree correspondant a des etoiles
:param SP_filename: nom du fichier qui contient des coordonnees de points de la sequence principale
:param output_folder: nom du dossier dans lequel on travaille (la ou y a les catalogues d entree (sauf SP) et la ou on met le fichier de sortie)
:param n_g_r_data: numero de la colonne correspondant a g-r dans le fichier data_filename
:param n_u_g_data: numero de la colonne correspondant a u-g dans le fichier data_filename
:param n_g_r_SP: numero de la colonne correspondant a g-r dans le fichier SP_filename
:param n_u_g_SP: numero de la colonne correspondant a u-g dans le fichier SP_filename
:param input_file_hot_stars: facultatif, nom du fichier contenant uniquement les donnees des etoiles chaudes
dans data_filename pour afficher d'une autre couleur les points correspondant aux etoiles chaudes
:return: None, trace le graphique u-g vs g-r avec la sequence principale et la ligne B3V
"""
if output_folder is not None:
output_folder_for_terminal = ""
for char in output_folder:
if char == " ":
output_folder_for_terminal += "\ "
elif char == "(":
output_folder_for_terminal += "\("
elif char == ")":
output_folder_for_terminal += "\)"
else:
output_folder_for_terminal += char
if not os.path.exists(output_folder):
os.system("mkdir " + output_folder_for_terminal)
input_file = output_folder + "/" + input_file
if input_file_hot_stars is not None:
input_file_hot_stars = output_folder + "/" + input_file_hot_stars
output_file = output_folder + "/" + output_file
# recupere donnees
g_r_data, u_g_data = recupere_magnitudes(input_file, n_g_r_data, n_u_g_data)
g_r_SP, u_g_SP = recupere_magnitudes(SP_filename, n_g_r_SP, n_u_g_SP)
# parametre le graphique
plt.xlabel('g-r')
plt.ylabel('u-g')
plt.gca().invert_yaxis()
# trace u-g vs g-r avec nos donnees
plt.plot(g_r_data, u_g_data, '.', c='red', label='Etoiles')
if input_file_hot_stars != None:
g_r_hot_stars, u_g_hot_stars = recupere_magnitudes(input_file_hot_stars, n_g_r_data, n_u_g_data)
plt.plot(g_r_hot_stars, u_g_hot_stars, '.', c='blue', label='Etoiles chaudes')
# trace ligne B3V
m = min([x for x in g_r_data if x != None])
M = max([y for y in g_r_data if y != None])
x = np.linspace(m, M, 100)
plt.plot(x, B3V_eq(x), c='orange', label='Ligne B3V')
# trace sequence principale
plt.plot(g_r_SP, u_g_SP, c='black', label='Séquence principale')
# met le titre et enregistre le tout
title(titre)
plt.legend()
plt.savefig(output_file)
def analyser_region(region_name, cone_size):
region_name_for_filenames = ""
for char in region_name:
if char == " ":
region_name_for_filenames += "_"
else:
region_name_for_filenames += char
output_folder = region_name_for_filenames + " (" + str(cone_size) + " arcmin)"
output_folder_for_terminal = ""
for char in output_folder:
if char == " ":
output_folder_for_terminal += "\ "
elif char == "(":
output_folder_for_terminal += "\("
elif char == ")":
output_folder_for_terminal += "\)"
else:
output_folder_for_terminal += char
output_file_data = region_name_for_filenames + ".data.txt"
output_file_hot_stars_data = region_name_for_filenames + ".hot_stars_data.txt"
output_file_reg = region_name_for_filenames + ".reg"
output_file_fits = region_name_for_filenames + ".fits"
output_file_plot = region_name_for_filenames + ".plot.png"
output_file_sky_picture = region_name_for_filenames + ".sky_picture.png"
recup_catalogue(region_name, output_file_data, cone_size, output_folder)
get_sky_picture(region_name, output_file_fits, 2 * cone_size, 2 * cone_size, output_folder)
find_hot_stars(output_file_data, output_file_hot_stars_data, output_folder)
fichier_reg(output_file_hot_stars_data, output_file_reg, output_folder)
save_plot(output_file_plot, output_file_data, region_name + " (cone search : " + str(cone_size) + " arcmin)", output_folder=output_folder, input_file_hot_stars=output_file_hot_stars_data)
oldpwd = os.getcwd()
os.chdir(output_folder)
os.system("ds9 " + output_file_fits + " -regions " + output_file_reg + " -saveimage " + output_file_sky_picture + " -exit")
os.chdir(oldpwd)
analyser_region("RCW 49", 10)
| {
"repo_name": "anthonygi13/Recherche_etoiles_chaudes",
"path": "astro_v3.py",
"copies": "1",
"size": "16991",
"license": "apache-2.0",
"hash": -5304523769552843000,
"line_mean": 37.4321266968,
"line_max": 267,
"alpha_frac": 0.5871548831,
"autogenerated": false,
"ratio": 3.0751267197682837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9135046343971998,
"avg_score": 0.005447051779257345,
"num_lines": 442
} |
''' 13-plot_long_periods.py
===============================================
AIM: Prepare cumulative plots (THIS SCRIPT IS with STRAY LIGHT)
INPUT: files: - <orbit_id>_misc/ : files from 12-<...>.py or 11-<...>.py
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_figures/ : <height>_<threshold_obs_time>_<max_mag><_SAA?>.png/pdf/eps
CMD: python 13-plot_long_periods.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- BaseMap --> http://matplotlib.org/basemap/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: see typeplot to know which map to plot
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
from mpl_toolkits.basemap import Basemap
import matplotlib.cm as cm
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.constants as const
import resources.figures as figures
###########################################################################
### PARAMETERS
# orbit_id
orbit_id = 301
# Show plots ?
show = True
# Save the picture ?
save = True
# max_mag
max_mag = 12.
# Plot a few stars as well ?
stars= False
targets=False
# Fancy plots ?
fancy = True
# Scale of the plot il log form ?
log_plot = False
# SAA ?
SAA = True
threshold_obs_time = 50
# what to plot ?
# mag -> magnitude
# raw -> using raws maps data (unused)
# anything else: without magnitude
typeplot = 'mag'
## Do not touch
if SAA: note = '_SAA'
else: note = ''
## end do not touch
# File name for the input data file (WITHOUT EXTENSION)
input_fname_wo_mag = 'TEST-data_%d%s' % (threshold_obs_time, note)
input_fname_wi_mag = 'mag_over_year_%d_mag_%02.1f%s' % (threshold_obs_time, max_mag, note)
input_fname_raw = 'data_raw_%d%s' % (threshold_obs_time, note)
# Extension (example: .dat)
ext = '.dat'
###########################################################################
### INITIALISATION
if typeplot == 'mag': input_fname = input_fname_wi_mag
elif typeplot == 'raw': input_fname = input_fname_raw
else: input_fname = input_fname_wo_mag
input_fname += ext
print 'Loading %s' % input_fname
if fancy: figures.set_fancy()
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
visibility = np.zeros(np.shape(ra_grid))
workspace = np.zeros(np.shape(ra_grid))
data = np.zeros(np.shape(ra_grid))
if stars:
ra_stars=[101.2833, 95.9875, 213.9167, 219.9, 279.2333, 78.6333, 114.8250, 88.7917]
dec_stars=[-16.7161, -52.6956, 19.1822, -60.8339, 38.7836, -8.2014, 5.2250, 7.4069]
y_offset=[0.5e6,0.5e6,-0.8e6,0.5e6,0.5e6,0.5e6,-0.8e6,0.5e6]
labels = ['Sirius','Canopus','Arcturus',r'$\alpha\mathrm{Centauri}$','Vega','Rigel','Procyon','Betelgeuse']
if targets: ra_tar, dec_tar = np.loadtxt('resources/targets.dat', unpack=True)
if targets: ra_tar, dec_tar, magn = np.loadtxt('resources/defined-exo.csv', delimiter=';', unpack=True)
############################################################################
### LOADS AND PLOTS
data = np.loadtxt(folder_misc+input_fname)
sky = np.size(data)
seeable_points = 0.
for i in range(0,np.shape(data)[0]):
seeable_points += np.size(data[i,data[i,:]>0])
seeable_points = seeable_points / sky * 100.
# mask point in the map for which observation time is zero
#data[data<1] = np.nan
fig = plt.figure()
m = Basemap(projection='moll',lon_0=180,resolution='c')
extent = (-np.pi,np.pi,-np.pi/2.,np.pi/2.)
if log_plot:
from matplotlib.colors import LogNorm
max_level = np.ceil(np.max(data/60.))
levels = np.logspace(np.log10(1), np.log10(max_level),100)
levels_lines_cb = np.logspace(np.log10(1), np.log10(max_level),10, endpoint=True)
levels_lines = np.linspace(1, max_level,5)
fmt={}
for l in levels_lines:
fmt[l] = '%3.0f' % l
fmt_cb={}
for l in levels_lines_cb:
fmt_cb[l] = '%3.0f' % l
else:
levels_lines = 10
levels = 100
ra_grid *= const.RAD
#ra_grid -= 180.
#ra_grid = ra_grid - 180 #= (ra_grid-np.pi) #*180. / np.pi
dec_grid *= const.RAD
CS1=m.contour( ra_grid,dec_grid,data/60.,levels_lines,colors='k',latlon=True)
if log_plot:
CS = m.contourf( ra_grid ,dec_grid,data/60.,levels,norm=LogNorm(), cmap=plt.cm.jet,latlon=True)
else:
CS = m.contourf( ra_grid ,dec_grid,data/60.,levels, cmap=plt.cm.jet,latlon=True)
#m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-60.,90.,30.),labels=[1,0,0,0])
m.drawmeridians(np.arange(0.,360.,30.))
if stars:
x,y = m(ra_stars, dec_stars)
m.plot(x,y, 'w*', markersize=10)
for label, xpt, ypt, y_offset in zip(labels, x, y,y_offset):
plt.text(xpt, ypt+y_offset, label, color='white', size='x-small', ha='center', weight='black') # #93a4ed
ra__ = np.arange(0., 360., 30.)
#print ra__
x, y = m(ra__,ra__*0)
for x,y,ra in zip(x,y,ra__):
plt.text(x, y, figures.format_degree(ra), color='black', ha='center', weight='black', size='small') ##93c6ed
if targets:
x,y = m(ra_tar*180./np.pi, dec_tar*180./np.pi)
x,y = m(ra_tar, dec_tar)
m.scatter(x,y, c='white', edgecolor='k', marker='+', s=20,zorder=10, lw=0.5)
if log_plot:
plt.clabel(CS1, inline=1, fontsize=10, fmt=fmt)
cbar = plt.colorbar(CS,ticks=levels_lines_cb, orientation='horizontal',shrink=.8, format='%.f', spacing='proportional')
else:
cbar = plt.colorbar(CS, orientation='horizontal',shrink=.8)
l,b,w,h = plt.gca().get_position().bounds
ll,bb,ww,hh = cbar.ax.get_position().bounds
cbar.ax.set_position([ll, bb+0.1, ww, hh])
cbar.set_label(r'$\mathrm{Observation\ time\ [Hours]}$')
# Save plot
if save:
if SAA: note='_SAA'
else: note=''
if log_plot: note = '%s_log' % note
if typeplot == 'mag': fname = '%d_%d_mag_%3.1f%s' % (orbit_id, threshold_obs_time, max_mag, note)
elif typeplot == 'raw': fname = '%d_%d%s' % (orbit_id, threshold_obs_time, note)
else: fname = '%d_%d%s' % (orbit_id, threshold_obs_time, note)
figures.savefig(folder_figures+fname, fig, fancy)
print 'saved as %s' % folder_figures+fname
if show: plt.show()
print 'Percentage of the sky which is visible: %3.1f%%' % seeable_points
| {
"repo_name": "kuntzer/SALSA-public",
"path": "13_plot_long_periods.py",
"copies": "1",
"size": "6716",
"license": "bsd-3-clause",
"hash": -8277964516791386000,
"line_mean": 28.0735930736,
"line_max": 120,
"alpha_frac": 0.6360929124,
"autogenerated": false,
"ratio": 2.600077429345722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37361703417457215,
"avg_score": null,
"num_lines": null
} |
# 14.02.2007
# last revision: 20.03.2008
#!
#! Poisson Equation
#! ================
#$ \centerline{Example input file, \today}
#! Mesh
#! ----
filename_mesh = 'database/simple.mesh'
#! Materials
#! ---------
#$ Here we define just a constant coefficient $c$ of the Poisson equation.
#$ The 'here' mode says that. Other possible mode is 'function', for
#$ material coefficients computed/obtained at runtime.
material_2 = {
'name' : 'coef',
'mode' : 'here',
'region' : 'Omega',
'val' : 1.0,
}
#! Fields
#! ------
#! A field is used mainly to define the approximation on a (sub)domain, i.e. to
#$ define the discrete spaces $V_h$, where we seek the solution.
#!
#! The Poisson equation can be used to compute e.g. a temperature distribution,
#! so let us call our field 'temperature'. On a region called 'Omega'
#! (see below) it will be approximated using P1 finite elements.
field_1 = {
'name' : 'temperature',
'dim' : (1,1),
'flags' : (),
'domain' : 'Omega',
'bases' : {'Omega' : '3_4_P1'}
}
#! Variables
#! ---------
#! One field can be used to generate discrete degrees of freedom (DOFs) of
#! several variables. Here the unknown variable (the temperature) is called
#! 't', it's asssociated DOF name is 't.0' --- this will be referred to
#! in the Dirichlet boundary section (ebc). The corresponding test variable of
#! the weak formulation is called 's'. Notice that the 'dual' item of a test
#! variable must specify the unknown it corresponds to.
variable_1 = {
'name' : 't',
'kind' : 'unknown field',
'field' : 'temperature',
'order' : 0, # order in the global vector of unknowns
}
variable_2 = {
'name' : 's',
'kind' : 'test field',
'field' : 'temperature',
'dual' : 't',
}
#! Regions
#! -------
region_1000 = {
'name' : 'Omega',
'select' : 'elements of group 6',
}
region_03 = {
'name' : 'Gamma_Left',
'select' : 'nodes in (x < 0.00001)',
}
region_4 = {
'name' : 'Gamma_Right',
'select' : 'nodes in (x > 0.099999)',
}
#! Boundary Conditions
#! -------------------
#! Essential (Dirichlet) boundary conditions can be specified as follows:
ebc_1 = {
'name' : 't1',
'region' : 'Gamma_Left',
'dofs' : {'t.0' : 2.0},
}
ebc_2 = {
'name' : 't2',
'region' : 'Gamma_Right',
'dofs' : {'t.0' : -2.0},
}
#! Equations
#! ---------
#$ The weak formulation of the Poisson equation is:
#$ \begin{center}
#$ Find $t \in V$, such that
#$ $\int_{\Omega} c\ \nabla t : \nabla s = f, \quad \forall s \in V_0$.
#$ \end{center}
#$ The equation below directly corresponds to the discrete version of the
#$ above, namely:
#$ \begin{center}
#$ Find $\bm{t} \in V_h$, such that
#$ $\bm{s}^T (\int_{\Omega_h} c\ \bm{G}^T G) \bm{t} = 0, \quad \forall \bm{s}
#$ \in V_{h0}$,
#$ \end{center}
#$ where $\nabla u \approx \bm{G} \bm{u}$. Below we use $f = 0$ (Laplace
#$ equation).
#! We also define an integral here: 'gauss_o1_d3' says that we wish to use
#! quadrature of the first order in three space dimensions.
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o1_d3',
}
equations = {
'Temperature' : """dw_laplace.i1.Omega( coef.val, s, t ) = 0"""
}
#! Linear solver parameters
#! ---------------------------
#! Just use upfpack.
solver_0 = {
'name' : 'ls',
'kind' : 'ls.umfpack',
}
#! Nonlinear solver parameters
#! ---------------------------
#! Even linear problems are solved by a nonlinear solver (KISS rule) - only one
#! iteration is needed and the final rezidual is obtained for free.
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'matrix' : 'internal', # 'external' or 'internal'
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
#! Options
#! -------
#! Use them for anything you like... Here we show how to tell which solvers
#! should be used - reference solvers by their names.
options = {
'nls' : 'newton',
'ls' : 'ls',
}
#! FE assembling parameters
#! ------------------------
#! 'chunk_size' determines maximum number of elements to assemble in one C
#! function call. Higher values mean faster assembling, but also more memory
#! usage.
fe = {
'chunk_size' : 1000
}
| {
"repo_name": "certik/sfepy",
"path": "input/poisson.py",
"copies": "1",
"size": "4538",
"license": "bsd-3-clause",
"hash": 624042417552123000,
"line_mean": 25.0804597701,
"line_max": 79,
"alpha_frac": 0.572939621,
"autogenerated": false,
"ratio": 2.875792141951838,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8783633444423575,
"avg_score": 0.0330196637056524,
"num_lines": 174
} |
# 1418. Display Table of Food Orders in a Restaurant - LeetCode
# https://leetcode.com/problems/display-table-of-food-orders-in-a-restaurant/
from typing import List
class Solution:
def displayTable(self, orders: List[List[str]]) -> List[List[str]]:
order_set = set()
tables = {} # id: { "order": count }
ret = []
for order in orders:
_, number, name = order
if name not in order_set:
order_set.add(name)
if not tables.get(number, False):
tables[number] = dict()
if not tables[number].get(name, False):
tables[number][name] = 0
tables[number][name] += 1
order_list = sorted(list(order_set))
title = ["Table"] + list(order_list)
ret.append(title)
number_list = sorted([int(number) for number in tables])
for number in number_list:
number = str(number)
table = tables[number]
table_order = [number] + [ str(table.get(name, 0)) for name in order_list ]
ret.append(table_order)
return ret
orders = [["David","3","Ceviche"],["Corina","10","Beef Burrito"],["David","3","Fried Chicken"],["Carla","5","Water"],["Carla","5","Ceviche"],["Rous","3","Ceviche"]]
sl = Solution()
ret = sl.displayTable(orders)
print(ret)
| {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/1418_display-table-of-food-orders-in-a-restaurant.py",
"copies": "1",
"size": "1351",
"license": "mit",
"hash": -1901851463914809900,
"line_mean": 37.6,
"line_max": 164,
"alpha_frac": 0.5632864545,
"autogenerated": false,
"ratio": 3.5646437994722957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46279302539722955,
"avg_score": null,
"num_lines": null
} |
#14.1 Add a square_list
# class Shape():
# def __init__(self, l, w):
# self._length = l
# self._width = w
#
# def calculate_perimeter(self):
# return (self._length + self._width)*2
#
# class Square(Shape):
# square_list = []
#
# def __init__(self, l):
# self._length = l
# self._width = l
# self.square_list.append(self._length)
#
#
# sq1 = Square(12)
# sq2 = Square(4)
#
# print(Square.square_list)
# 14.2
class Shape():
def __init__(self, l, w):
self._length = l
self._width = w
def calculate_perimeter(self):
return (self._length + self._width)*2
class Square(Shape):
square_list = []
def __init__(self, l):
self._length = l
self._width = l
self.square_list.append(self._length)
def __repr__(self):
return '{} by {}'.format(self._length, self._length)
# sq1 = Square(4)
# print(sq1)
#14.3
def isTheSame(obj1, obj2):
if type(obj1) is type(obj2):
return True
return False
sq1 = Square(2)
sq2 = Square(4)
print(isTheSame(sq1, sq2))
| {
"repo_name": "Frikeer/LearnPython",
"path": "exc14/exc14.py",
"copies": "1",
"size": "1099",
"license": "unlicense",
"hash": 6761107140029584000,
"line_mean": 17.9482758621,
"line_max": 60,
"alpha_frac": 0.5432211101,
"autogenerated": false,
"ratio": 2.8694516971279374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8875960756507266,
"avg_score": 0.0073424101441342815,
"num_lines": 58
} |
# 141. Linked List Cycle - LeetCode
# https://leetcode.com/problems/linked-list-cycle/description/
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# TLE:
# class Solution(object):
# def hasCycle(self, head):
# """
# :type head: ListNode
# :rtype: bool
# """
# ptr = head
# total_call = 0
# while True:
# if ptr is None:
# break
# call_count = 0
# working_ptr = head
# while call_count < total_call:
# if working_ptr == ptr:
# return True
# else:
# call_count += 1
# working_ptr = working_ptr.next
# ptr = ptr.next
# total_call += 1
# return False
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if head is None or head.next is None:
return False
slow = head
fast = head.next
while True:
if slow == fast:
return True
if fast.next is None or fast.next.next is None or slow.next is None:
return False
slow = slow.next
fast = fast.next.next
s = Solution()
l = None
print(s.hasCycle(l))
l = ListNode(0)
print(s.hasCycle(l))
l.next = ListNode(1)
l.next.next = ListNode(2)
l.next.next.next = ListNode(3)
print(s.hasCycle(l))
l.next.next.next.next = l.next
print(s.hasCycle(l)) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/141_linked-list-cycle.py",
"copies": "1",
"size": "1611",
"license": "mit",
"hash": 3138583598956296000,
"line_mean": 25,
"line_max": 80,
"alpha_frac": 0.5009310987,
"autogenerated": false,
"ratio": 3.5098039215686274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4510735020268627,
"avg_score": null,
"num_lines": null
} |
"""141. Linked List Cycle
https://leetcode.com/problems/linked-list-cycle/
Given a linked list, determine if it has a cycle in it.
To represent a cycle in the given linked list, we use an integer pos which
represents the position (0-indexed) in the linked list where tail connects to.
If pos is -1, then there is no cycle in the linked list.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: true
Explanation: There is a cycle in the linked list,
where tail connects to the second node.
Example 2:
Input: head = [1,2], pos = 0
Output: true
Explanation: There is a cycle in the linked list,
where tail connects to the first node.
Example 3:
Input: head = [1], pos = -1
Output: false
Explanation: There is no cycle in the linked list.
Follow up:
Can you solve it using O(1) (i.e. constant) memory?
"""
from common.list_node import ListNode
class Solution(object):
def has_cycle(self, head: ListNode) -> bool:
if not head or not head.next:
return False
p1, p2 = head, head
while p2:
p1 = p1.next
p2 = p2.next
if p2:
p2 = p2.next
else:
return False
if p1 == p2:
return True
return False
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/linked_list_cycle.py",
"copies": "1",
"size": "1260",
"license": "mit",
"hash": 7742093496961098000,
"line_mean": 21.1052631579,
"line_max": 78,
"alpha_frac": 0.6285714286,
"autogenerated": false,
"ratio": 3.452054794520548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9580626223120547,
"avg_score": 0,
"num_lines": 57
} |
# 141. Linked List Cycle
#
# Given a singly-linked list, determine if it has a cycle.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
def has_cycle_set(head):
seen = set()
node = head
while node is not None:
if node in seen:
return True
seen.add(node)
node = node.next
def has_cycle_two_ptrs1(head):
node1 = head
node2 = head
idx = 0
while node2 is not None:
# If the two nodes are the same (and it's not the beginning),
# there is a cycle
if idx % 2 == 0 and idx > 0 and node1 == node2:
return True
# Otherwise, continue
node2 = node2.next
if idx % 2 == 0:
node1 = node1.next
idx += 1
return False
def has_cycle_two_ptrs2(head):
node1 = head
node2 = head
incremented = False
while node2 is not None:
# If the two nodes are the same (and it's not the beginning),
# there is a cycle
if incremented and node1 == node2:
return True
# Otherwise, continue
node1 = node1.next
node2 = node2.next
if node2 is not None:
node2 = node2.next
incremented = True
return False
class Solution:
def hasCycle1(self, head: ListNode) -> bool:
return has_cycle_set(head)
def hasCycle2(self, head: ListNode) -> bool:
return has_cycle_two_ptrs1(head)
def hasCycle3(self, head: ListNode) -> bool:
return has_cycle_two_ptrs2(head)
hasCycle = hasCycle1
| {
"repo_name": "afbarnard/glowing-broccoli",
"path": "lc/000141.py",
"copies": "1",
"size": "1621",
"license": "mit",
"hash": -5942605714990883000,
"line_mean": 23.1940298507,
"line_max": 69,
"alpha_frac": 0.5712523134,
"autogenerated": false,
"ratio": 3.531590413943355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46028427273433553,
"avg_score": null,
"num_lines": null
} |
"""142. Linked List Cycle II
https://leetcode.com/problems/linked-list-cycle-ii/
Given a linked list, return the node where the cycle begins.
If there is no cycle, return null.
To represent a cycle in the given linked list, we use an integer pos which
represents the position (0-indexed) in the linked list where tail connects to.
If pos is -1, then there is no cycle in the linked list.
Note: Do not modify the linked list.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: tail connects to node index 1
Explanation: There is a cycle in the linked list, where tail connects to the
second node.
Example 2:
Input: head = [1,2], pos = 0
Output: tail connects to node index 0
Explanation: There is a cycle in the linked list, where tail connects to the
first node.
Example 3:
Input: head = [1], pos = -1
Output: no cycle
Explanation: There is no cycle in the linked list.
Follow-up:
Can you solve it without using extra space?
"""
from typing import Optional
from common.list_node import ListNode
class Solution(object):
def detect_cycle_1(self, head: ListNode) -> Optional[ListNode]:
"""
Use extra space to record node.
:param head:
:return:
"""
if not head:
return None
checked = {}
p = ListNode(0)
p.next = head
while p:
p = p.next
if not p:
return None
if p in checked:
return p
checked[p] = 1
def detect_cycle_2(self, head: ListNode) -> Optional[ListNode]:
"""
Without using extra space
:param head:
:return:
"""
if not head:
return None
p1, p2 = head, head
cycle_len = 0
meet_times = 0
while p2:
if meet_times == 1:
cycle_len += 1
p1 = p1.next
p2 = p2.next
if not p2 or not p2.next:
return None
p2 = p2.next
if p1 == p2:
meet_times += 1
if meet_times == 2:
break
while head:
entry = head
for i in range(cycle_len):
entry = entry.next
if entry == head:
return entry
else:
head = head.next
def detect_cycle_3(self, head: ListNode) -> Optional[ListNode]:
"""
Good method of using math
:param head:
:return:
"""
if not head:
return None
p1 = p2 = head
while p2:
p1 = p1.next
p2 = p2.next
if not p2 or not p2.next:
return None
p2 = p2.next
if p1 == p2:
break
if p2 == head:
return head
while p2 != head:
head = head.next
p2 = p2.next
return head
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/linked_list_cycle_ii.py",
"copies": "1",
"size": "2923",
"license": "mit",
"hash": -1472553331056441000,
"line_mean": 23.3583333333,
"line_max": 78,
"alpha_frac": 0.5237769415,
"autogenerated": false,
"ratio": 3.923489932885906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49472668743859055,
"avg_score": null,
"num_lines": null
} |
# 1439. Find the Kth Smallest Sum of a Matrix With Sorted Rows
# Maintain the first 200 smallest sums and merge the rows one by one.
# O(40 * 200 * log(200))
from heapq import heappush, heappop
from collections import defaultdict
class Solution:
def kthSmallest(self, mat: List[List[int]], k: int) -> int:
m = len(mat)
n = len(mat[0])
def merge(first200, row):
h = []
inHeap = defaultdict(bool)
heappush(h, (first200[0] + row[0], 0, 0))
inHeap[(0, 0)] = True
next200 = []
while h:
s, f, r = heappop(h)
next200.append(s)
if f + 1 < len(first200) and (not inHeap[(f + 1, r)]):
heappush(h, (first200[f + 1] + row[r], f + 1, r))
inHeap[(f + 1, r)] = True
if r + 1 < len(row) and (not inHeap[(f, r + 1)]):
heappush(h, (first200[f] + row[r + 1], f, r + 1))
inHeap[(f, r + 1)] = True
if len(next200) == 200:
break
return next200
first200 = mat[0]
for i in range(1, m):
first200 = merge(first200, mat[i])
return first200[k - 1]
| {
"repo_name": "digiter/Arena",
"path": "1439-find-the-kth-smallest-sum-of-a-matrix-with-sorted-rows.py",
"copies": "1",
"size": "1254",
"license": "mit",
"hash": -2489632573334207500,
"line_mean": 32.8918918919,
"line_max": 70,
"alpha_frac": 0.4704944179,
"autogenerated": false,
"ratio": 3.3619302949061662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43324247128061666,
"avg_score": null,
"num_lines": null
} |
# 146 - LRU Cache (Medium)
# https://leetcode.com/problems/lru-cache/
# Implement a LRU (Least Recently Used) cache. It has an initialization (with as
# capacity) and two operations, get and set. Any time an element is retrieved,
# set or updated, it has become the most recently used. If there's a lot of elements
# exceeding the LRU capacity, then start removing the least recently used ones.
# If you feel OrderedDict is like cheating, you can use a dict + deque :-)
# So the order is kept while inserting keys on the deque, but values are on the dict.
from collections import OrderedDict
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.dic = OrderedDict()
self.cap = capacity
def get(self, key):
"""
:rtype: int
"""
try:
# Least Recently Used, does that mean that once its retrieved
# it has to be put on top again?
val = self.dic[key]
del self.dic[key]
self.dic[key] = val
return val
except:
return -1
def set(self, key, value):
"""
:type key: int
:type value: int
:rtype: nothing
"""
try:
# If the value exists, lets update it and put it back on front.
del self.dic[key]
self.dic[key] = value
except:
self.dic[key] = value
# If the cap is exceeded, lets erase the element at the back.
if len(self.dic) > self.cap:
keys = self.dic.keys()
del self.dic[keys[0]]
# Using Dictionary + Deque
from collections import deque
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.deq = deque([])
self.dic = {}
self.cap = capacity
def get(self, key):
"""
:rtype: int
"""
try:
val = self.dic[key]
self.deq.remove(key)
self.deq.append(key)
return val
except:
return -1
def set(self, key, value):
"""
:type key: int
:type value: int
:rtype: nothing
"""
try:
self.deq.remove(key)
except:
None
self.deq.append(key)
self.dic[key] = value
# If the cap is exceeded, lets erase the element at the back.
if len(self.dic) > self.cap:
key = self.deq.popleft()
del self.dic[key]
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/02_Medium/lc_146.py",
"copies": "1",
"size": "2603",
"license": "mit",
"hash": 5871490891473516000,
"line_mean": 25.8453608247,
"line_max": 85,
"alpha_frac": 0.5255474453,
"autogenerated": false,
"ratio": 4.0482115085536545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012080921680232514,
"num_lines": 97
} |
"""146. LRU Cache
https://leetcode.com/problems/lru-cache/
Design and implement a data structure for Least Recently Used (LRU) cache.
It should support the following operations: get and put.
get(key) - Get the value (will always be positive) of the key if the key
exists in the cache, otherwise return -1.
put(key, value) - Set or insert the value if the key is not already present.
When the cache reached its capacity, it should invalidate the least recently
used item before inserting a new item.
The cache is initialized with a positive capacity.
Follow up:
Could you do both operations in O(1) time complexity?
Example:
LRUCache cache = new LRUCache( 2 /* capacity */ );
cache.put(1, 1);
cache.put(2, 2);
cache.get(1); // returns 1
cache.put(3, 3); // evicts key 2
cache.get(2); // returns -1 (not found)
cache.put(4, 4); // evicts key 1
cache.get(1); // returns -1 (not found)
cache.get(3); // returns 3
cache.get(4); // returns 4
"""
class LRUCache1:
def __init__(self, capacity: int):
self.capacity = capacity
self.map = {}
self.sorted_keys = []
def get(self, key: int) -> int:
if key not in self.map:
return -1
self.sorted_keys.remove(key)
self.sorted_keys.insert(0, key)
return self.map[key]
def put(self, key: int, value: int) -> None:
if key not in self.map:
if len(self.map) == self.capacity:
lru_key = self.sorted_keys[-1]
self.sorted_keys.remove(lru_key)
self.sorted_keys.insert(0, key)
del self.map[lru_key]
self.map[key] = value
else:
self.sorted_keys.insert(0, key)
self.map[key] = value
else:
self.sorted_keys.remove(key)
self.sorted_keys.insert(0, key)
self.map[key] = value
class LRUCache2:
def __init__(self, capacity: int):
self.capacity = capacity
self.keys = []
self.map = {}
def get(self, key: int) -> int:
if key not in self.keys:
return -1
self._visit(key)
return self.map[key]
def put(self, key: int, value: int) -> None:
if key not in self.map:
if len(self.keys) == self.capacity:
self._evict()
self.map[key] = value
self._visit(key)
def _evict(self) -> None:
evicted_key = self.keys[0]
del self.map[evicted_key]
self.keys = self.keys[1:]
def _visit(self, key: int) -> None:
if key in self.keys:
self.keys.remove(key)
self.keys.append(key)
class LRUCache3:
class LinkedMap:
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.next = None
def __init__(self, capacity):
self.capacity = capacity
self.map = {}
self.head = self.LinkedMap(0, -1)
self.tail = self.LinkedMap(0, -1)
self.head.next = self.tail
self.tail.prev = self.head
def get(self, key):
if key not in self.map:
return -1
target = self.map[key]
self._remove(target)
self._add(target)
return target.value
def put(self, key, value):
if key not in self.map:
if len(self.map) == self.capacity:
del self.map[self.head.next.key]
self._remove(self.head.next)
else:
self._remove(self.map[key])
target = self.LinkedMap(key, value)
self._add(target)
self.map[key] = target
def _remove(self, node):
p = node.prev
n = node.next
p.next = n
n.prev = p
def _add(self, node):
p = self.tail.prev
p.next = node
self.tail.prev = node
node.prev = p
node.next = self.tail
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/lru_cache.py",
"copies": "1",
"size": "3938",
"license": "mit",
"hash": -1133091344744559500,
"line_mean": 26.9290780142,
"line_max": 76,
"alpha_frac": 0.5510411376,
"autogenerated": false,
"ratio": 3.488042515500443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4539083653100443,
"avg_score": null,
"num_lines": null
} |
# 146. LRU Cache
#
# Design and implement a data structure for Least Recently Used (LRU) cache.
# It should support the following operations: get and put.
#
# get(key) - Get the value (will always be positive) of the key if the key exists in the cache,
# otherwise return -1.
# put(key, value) - Set or insert the value if the key is not already present.
# When the cache reached its capacity, it should invalidate the least recently used item
# before inserting a new item.
#
# Follow up:
# Could you do both operations in O(1) time complexity?
#
# Example:
#
# LRUCache cache = new LRUCache( 2 /* capacity */ );
#
# cache.put(1, 1);
# cache.put(2, 2);
# cache.get(1); // returns 1
# cache.put(3, 3); // evicts key 2
# cache.get(2); // returns -1 (not found)
# cache.put(4, 4); // evicts key 1
# cache.get(1); // returns -1 (not found)
# cache.get(3); // returns 3
# cache.get(4); // returns 4
import collections
# http://www.cnblogs.com/chruny/p/5477982.html
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.length = 0
self.dict = collections.OrderedDict()
def get(self, key):
"""
:type key: int
:rtype: int
"""
try:
value = self.dict[key]
del self.dict[key]
self.dict[key] = value
return value
except:
return -1
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
The popitem() method for ordered dictionaries returns and removes a (key, value) pair.
The pairs are returned in LIFO order if last is true or FIFO order if false.
"""
try:
del self.dict[key]
self.dict[key] = value
except:
if self.length == self.capacity:
self.dict.popitem(last=False)
self.length -= 1
self.dict[key] = value
self.length += 1
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key in self.dict:
value = self.dict[key]
del self.dict[key]
self.dict[key] = value
return value
return -1
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if key in self.dict:
del self.dict[key]
self.dict[key] = value
else:
# when key not in dict, add it until reach capacity
# then pop first item, and add new value.
if self.length == self.capacity:
self.dict.popitem(last=False)
self.dict[key] = value
else:
self.dict[key] = value
self.length += 1
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
if __name__ == '__main__':
lru_cache = LRUCache(3)
lru_cache.put(1, 1)
lru_cache.put(2, 2)
lru_cache.put(3, 3)
assert lru_cache.get(0) == -1
assert lru_cache.get(1) == 1
lru_cache.put(1, 10)
assert lru_cache.get(1) == 10
lru_cache.put(4, 4)
assert lru_cache.get(2) == -1
| {
"repo_name": "gengwg/leetcode",
"path": "146_lru_cache.py",
"copies": "1",
"size": "3358",
"license": "apache-2.0",
"hash": -3790482572943026700,
"line_mean": 26.9833333333,
"line_max": 95,
"alpha_frac": 0.5413936867,
"autogenerated": false,
"ratio": 3.527310924369748,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45687046110697477,
"avg_score": null,
"num_lines": null
} |
# 1473. Paint House III
# O(m * n * target * n)
class Solution:
def minCost(
self, houses: List[int], cost: List[List[int]], m: int, n: int, target: int
) -> int:
INF = 10 ** 4 * 100 + 5
isPainted = lambda x: houses[x] != 0
@cache
def solve(x, groupCnt, color):
xCost = 0 if isPainted(x) else cost[x][color - 1]
if x == 0:
return xCost if groupCnt == 1 else INF
if groupCnt == 0:
return INF
prev = INF
if isPainted(x - 1):
newGroup = 1 if houses[x - 1] != color else 0
prev = min(prev, solve(x - 1, groupCnt - newGroup, houses[x - 1]))
else:
for c in range(1, n + 1):
newGroup = 1 if c != color else 0
prev = min(prev, solve(x - 1, groupCnt - newGroup, c))
return xCost + prev
ans = INF
if isPainted(m - 1):
ans = solve(m - 1, target, houses[m - 1])
else:
for c in range(1, n + 1):
ans = min(ans, solve(m - 1, target, c))
return ans if ans != INF else -1
| {
"repo_name": "digiter/Arena",
"path": "1473-paint-house-iii.py",
"copies": "1",
"size": "1184",
"license": "mit",
"hash": 4871736994581748000,
"line_mean": 31.8888888889,
"line_max": 83,
"alpha_frac": 0.4527027027,
"autogenerated": false,
"ratio": 3.4121037463976944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43648064490976946,
"avg_score": null,
"num_lines": null
} |
"""148. Sort List
https://leetcode.com/problems/sort-list/
Sort a linked list in O(n log n) time using constant space complexity.
Example 1:
Input: 4->2->1->3
Output: 1->2->3->4
Example 2:
Input: -1->5->3->4->0
Output: -1->0->3->4->5
"""
from common.list_node import ListNode
# Definition for a list node.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def sort_list(self, head: ListNode) -> ListNode:
"""
merge sort
:param head:
:return:
"""
def merge_sorted_list(l1: ListNode, l2: ListNode) -> ListNode:
if not l1:
return l2
if not l2:
return l1
if l1.val <= l2.val:
merged_list = l1
merged_list.next = merge_sorted_list(l1.next, l2)
else:
merged_list = l2
merged_list.next = merge_sorted_list(l1, l2.next)
return merged_list
def get_middle_node(l: ListNode) -> ListNode:
"""
get the middle ListNode.
:return:
"""
if not l:
return l
slow_ptr = l
fast_prt = l.next
while fast_prt:
fast_prt = fast_prt.next
if fast_prt:
fast_prt = fast_prt.next
slow_ptr = slow_ptr.next
return slow_ptr
if not head or not head.next:
return head
middle_mode = get_middle_node(head)
right_list = middle_mode.next
middle_mode.next = None
left_sorted_list = self.sort_list(head)
right_sorted_list = self.sort_list(right_list)
return merge_sorted_list(left_sorted_list, right_sorted_list)
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/sort_list.py",
"copies": "1",
"size": "1827",
"license": "mit",
"hash": 4130150800186769000,
"line_mean": 24.375,
"line_max": 70,
"alpha_frac": 0.5073891626,
"autogenerated": false,
"ratio": 3.554474708171206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9560577862540753,
"avg_score": 0.00025720164609053495,
"num_lines": 72
} |
# 148. Sort List
#
# Sort a linked list in O(n log n) time using constant space complexity.
# http://bookshadow.com/weblog/2014/11/21/leetcode-sort-list/
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None:
return head
# split linked list into two
mid = self.getMiddle(head)
rHead = mid.next
mid.next = None
# sort recursively and merge
return self.merge(self.sortList(head), self.sortList(rHead))
# merge two sorted linked lists
def merge(self, lHead, rHead):
dummyNode = ListNode(0)
dummyHead = dummyNode
while lHead and rHead:
if lHead.val < rHead.val:
dummyHead.next = lHead
lHead = lHead.next
else:
dummyHead.next = rHead
rHead = rHead.next
dummyHead = dummyHead.next
if lHead:
dummyHead.next = lHead
elif rHead:
dummyHead.next = rHead
return dummyNode.next
# use fast/slow pointer
def getMiddle(self, head):
if head is None:
return head
slow = head
fast = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
return slow
| {
"repo_name": "gengwg/leetcode",
"path": "148_sort_list.py",
"copies": "1",
"size": "1543",
"license": "apache-2.0",
"hash": 3449421644226414600,
"line_mean": 26.5535714286,
"line_max": 72,
"alpha_frac": 0.5547634478,
"autogenerated": false,
"ratio": 3.9564102564102566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 56
} |
#14 - Inflight Entertainment.py
# https://www.interviewcake.com/question/python/inflight-entertainment
#Users on longer flights like to start a second movie right when their first one ends,
#but they complain that the plane usually lands before they can see the ending.
#So you're building a feature for choosing two movies whose total runtimes will equal the exact flight length.
#Write a function that takes an integer flight_length (in minutes)
# and a list of integers movie_lengths (in minutes)
# and returns a boolean indicating whether there are two numbers in movie_lengths whose sum equals flight_length.
flight_length = 360
movie_lengths = [121, 181, 239]
def exists_movie_combo(flight_length, movie_lengths):
# Order the movies shortest to longest
movie_lengths.sort(key = lambda movie_lengths:movie_lengths)
# Find the required movie length for movie to for a perfect fit
ideal_second_movie_length = [flight_length - x for x in movie_lengths]
# This is a less efficient method, but it ensures the same movie isn't watched twice
i = 0
while i < len(movie_lengths):
if ideal_second_movie_length[i] in movie_lengths[:i]+movie_lengths[(i+1):]:
return(True)
i = i + 1
return(False)
# Returns true because two movies fit the flight
print(exists_movie_combo(flight_length = 360, movie_lengths = [109,251]))
# Returns false because there must be 2 movies
print(exists_movie_combo(flight_length = 360, movie_lengths = [1,2,360]))
# Returns false because the same movie should not be watched twice, even if correct length
print(exists_movie_combo(flight_length = 360, movie_lengths = [1,2,180]))
| {
"repo_name": "bruno615/one-off-analysis",
"path": "Python/Inteview Cake/14 - Inflight Entertainment.py",
"copies": "1",
"size": "1639",
"license": "mit",
"hash": -5125389999771418000,
"line_mean": 41.0256410256,
"line_max": 113,
"alpha_frac": 0.7504575961,
"autogenerated": false,
"ratio": 3.594298245614035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48447558417140346,
"avg_score": null,
"num_lines": null
} |
# 14.
print_log('\n14. Issuer (Trust Anchor) is creating a Credential Offer for Prover\n')
schema_json = json.dumps(schema)
cred_offer_json = await anoncreds.issuer_create_credential_offer(wallet_handle, cred_def_id)
print_log('Credential Offer: ')
pprint.pprint(json.loads(cred_offer_json))
# 15.
print_log('\n15. Prover creates Credential Request for the given credential offer\n')
(cred_req_json, cred_req_metadata_json) = await anoncreds.prover_create_credential_req(prover_wallet_handle, prover_did, cred_offer_json, cred_def_json, master_secret_id)
print_log('Credential Request: ')
pprint.pprint(json.loads(cred_req_json))
# 16.
print_log('\n16. Issuer (Trust Anchor) creates Credential for Credential Request\n')
cred_values_json = json.dumps({
'sex': ['male', '5944657099558967239210949258394887428692050081607692519917050011144233115103'],
'name': ['Alex', '1139481716457488690172217916278103335'],
'height': ['175', '175'],
'age': ['28', '28']
})
(cred_json, _, _) = await anoncreds.issuer_create_credential(wallet_handle, cred_offer_json, cred_req_json, cred_values_json, None, None)
print_log('Credential: ')
pprint.pprint(json.loads(cred_json))
# 17.
print_log('\n17. Prover processes and stores Credential\n')
await anoncreds.prover_store_credential(prover_wallet_handle, None, cred_req_metadata_json, cred_json, cred_def_json, None)
# 18.
print_log('\n18. Closing both wallet_handles and pool\n')
await wallet.close_wallet(wallet_handle)
await wallet.close_wallet(prover_wallet_handle)
await pool.close_pool_ledger(pool_handle)
# 19.
print_log('\n19. Deleting created wallet_handles\n')
await wallet.delete_wallet(wallet_config, wallet_credentials)
await wallet.delete_wallet(prover_wallet_config, prover_wallet_credentials)
# 20.
print_log('\n20. Deleting pool ledger config\n')
await pool.delete_pool_ledger_config(pool_name)
| {
"repo_name": "anastasia-tarasova/indy-sdk",
"path": "docs/how-tos/issue-credential/python/step4.py",
"copies": "2",
"size": "2162",
"license": "apache-2.0",
"hash": 7309655443027840000,
"line_mean": 49.2790697674,
"line_max": 178,
"alpha_frac": 0.6503237743,
"autogenerated": false,
"ratio": 3.4481658692185007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5098489643518501,
"avg_score": null,
"num_lines": null
} |
''' 14-plot_target-list.py
===============================================
AIM: Given a catalogue of objects, plots when the targets are visible according to their magnitude for a given period of time.
INPUT: files: - <orbit_id>_misc/orbits.dat
- <orbit_id>_flux/flux_*.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_figures/ : (see below for file name definition)
CMD: python 14-plot_target-list.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- BaseMap --> http://matplotlib.org/basemap/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: based on 11-<...>.py, but has a better way of saving appearance and disapperance of the targets, using the class in resources/targets.py
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import matplotlib.cm as cm
import time
from resources.routines import *
from resources.TimeStepping import *
from resources.targets import *
import parameters as param
import resources.constants as const
import resources.figures as figures
import time
from matplotlib import dates
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# orbit_id
orbit_id = 701
apogee=700
perigee=700
# First minute analysis
minute_ini = 0
# Last minute to look for
minute_end = 1440
# Include SAA ?
SAA = False
# Show plots
show = True
# Save the picture ?
save = True
# Fancy plots ?
fancy = True
# Take into account the stray light?
straylight = True
# Minimum observable time for plots
threshold_obs_time = 50
# Time to acquire a target
t_acquisition = 6
# Catalogue name (in resources/)
catalogue = 'cheops_target_list_v0.1.dat'
# Maximum magnitude that can be seen by CHEOPS, only for cosmetics purposes
CHEOPS_mag_max = 12
# File name for the list of orbit file
orbits_file = 'orbits.dat'
# Factor in the SL post treatment correction ?
SL_post_treat = True
# Factor in mirror efficiency for the equivalent star magnitude ?
mirror_correction = True
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee,perigee)
###########################################################################
### INITIALISATION
file_flux = 'flux_'
# changes the threshold by addition the acquisition time:
threshold_obs_time += t_acquisition
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
if SAA:
SAA_data = np.loadtxt('resources/SAA_table_%d.dat' % orbit_id, delimiter=',')
SAA_data = SAA_data[SAA_data[:,0]>= minute_ini]
SAA_data = SAA_data[SAA_data[:,0]<= minute_end]
computed_orbits = np.loadtxt(folder_misc+orbits_file)[:,0]
############################################################################
### Load catalogue and assign them to the nearest grid point
name_cat, ra_cat, dec_cat, mag_cat = load_catalogue(catalogue)
index_ra_cat = np.zeros(np.shape(ra_cat))
index_dec_cat= np.zeros(np.shape(ra_cat))
targets = []
for name, ra, dec, mag in zip(name_cat, ra_cat, dec_cat, mag_cat):
id_ra = find_nearest(ras, ra/const.RAD)
id_dec = find_nearest(decs, dec/const.RAD)
targets.append(target_list(name, ra/const.RAD, id_ra, dec/const.RAD, id_dec, mag, int(period+3)))
# Apply the flux correction (SL post-treatment removal and the mirror efficiency)
corr_fact = 1.0
if mirror_correction: corr_fact /= param.mirror_efficiency
if SL_post_treat: corr_fact *= (1.0 - param.SL_post_treat_reduction)
############################################################################
### Start the anaylsis
start = time.time()
# Prepare the arrays
visibility = np.zeros(np.shape(ra_grid))
#observations = np.zeros(len(name_cat)*)
workspace = np.zeros(np.shape(ra_grid))
#data = np.zeros(np.shape(ra_grid))
# Load the reference times
orbits = np.loadtxt(folder_misc+orbits_file,dtype='i4')
minutes_orbit_iditude = np.loadtxt('resources/minute_table_%d.dat' % orbit_id, delimiter=',',dtype='Int32')
# Set variables for printing the advance
numberofminutes = minute_end+1 - minute_ini
lo = fast_minute2orbit(minutes_orbit_iditude,minute_end, orbit_id)
fo = fast_minute2orbit(minutes_orbit_iditude,minute_ini, orbit_id)
lp = -1
junk, junk, at_ini, junk = fast_orbit2times(minutes_orbit_iditude, fo, orbit_id)
first_computed = computed_orbits[computed_orbits<=fo][-1]
first_minute = minute_ini
last_minute = minute_end
if not fo == first_computed:
junk, junk, minute_ini, junk = fast_orbit2times(minutes_orbit_iditude, first_computed, orbit_id)
# print '1st referenced orbit: %d\twanted orbit: %d' % (first_computed, fo)
try:
for minute in range(minute_ini,minute_end+1+int(period)):
minute = int(minute)
if SAA and fast_SAA(SAA_data, minute): SAA_at_minute = True
else: SAA_at_minute = False
orbit_current = fast_minute2orbit(minutes_orbit_iditude, minute, orbit_id)
if orbit_current > lp:
lp = orbit_current
message = "Analysing orbit %d on %d...\t" % (lp,lo)
sys.stdout.write( '\r'*len(message) )
sys.stdout.write(message)
sys.stdout.flush()
junk, len_orbit, atc_ini, junk = fast_orbit2times(minutes_orbit_iditude, orbit_current, orbit_id)
try:
ra, dec, S_sl = load_flux_file(minute, file_flux, folder=folder_flux)
load = True
minute_to_load = minute-atc_ini#+shift
except IOError:
# if there is nothing then well, do nothing ie we copy the past values
# in which orbit are we ?
# get the previous orbit computed and copy the stray light data of this orbit :
#orbit_previous = orbits[orbits[:,0] < orbit_current][-1,0]
#minute_replacement = minute - atc_ini + shift #+ at_ini
minute_to_load = minute-atc_ini
for obj in targets:
if SAA_at_minute:
obj.current_visibility = 0
else:
obj.current_visibility = obj.visible_save[minute_to_load]
load = False
# populate the visbility matrix
# for ii in range(0, targets[0].CountObjects()):
if load:
for obj in targets:
ra_ = obj.ra
dec_ = obj.dec
a = np.where(np.abs(ra_-ra)<ra_step/2)[0]
b = np.where(np.abs(dec_-dec)<dec_step/2)[0]
INT = np.intersect1d(a,b)
if np.shape(INT)[0] == 0 or (straylight and S_sl[INT]*corr_fact > obj.maximum_flux()):
obj.visible_save[minute_to_load] = 0
obj.current_visibility = 0
continue
else:
obj.visible_save[minute_to_load] = 1
if SAA_at_minute: obj.current_visibility = 0
else: obj.current_visibility = 1
if minute == minute_ini:
for obj in targets:
obj.workspace=obj.current_visibility
continue
for obj in targets: obj.Next(minute,threshold_obs_time)
except KeyboardInterrupt: print hilite('\nWARNING! USER STOPPED LOADING AT MINUTE %d' % minute,False,False)
for ii in range(0, targets[0].CountObjects()): targets[ii].Next(minute,threshold_obs_time)
### #TODO if first minute look for past orbits anyways
print
worthy_targets = []
for ii in range(0, targets[0].CountObjects()):
if np.shape(targets[ii].visible)[0] > 0:
worthy_targets.append(targets[ii])
############################################################################
end = time.time()
elapsed_time = round((end-start)/60.,2)
sys.stdout.write( '\r'*len(message) )
sys.stdout.flush()
print "Time needed: %2.2f min" % elapsed_time
### Plot a few things
if fancy: figures.set_fancy()
### Plot time line
figures.set_fancy()
minute_ini = first_minute
minute_end = last_minute
maxy = len(worthy_targets)
print 'Number of star visible in period selected: %d' % maxy
size = 2 + maxy/3
figsize = (17.,size) # fig size in inches (width,height)
fig = plt.figure(figsize=figsize)
ax = plt.subplot(111)
ii = 0
ax.yaxis.set_major_locator(MultipleLocator(1))
plt.grid(True)
for ii in range (0, len(worthy_targets)):
y = float(ii)
visi = worthy_targets[ii].Visibility()
invi = worthy_targets[ii].Invisibility()
for vis, ini in zip(visi, invi):
plt.hlines(y, vis, ini, lw=3, color=cm.Dark2(y/(maxy+5)))
if ii > maxy: break
else: ii+=1
labels = ['%s (%2.1f)' % (wt.name, wt.mag) for wt in worthy_targets[0:maxy]]
ax.set_yticklabels(labels)
ax.set_ylim(-0.5,maxy-0.5)
# convert epoch to matplotlib float format
labels = np.linspace(minute_ini, minute_end+1, 12) * 60. + const.timestamp_2018_01_01
plt.xlim([minute_ini, minute_end+1])
ax.xaxis.set_major_locator(MultipleLocator((minute_end-minute_ini+1)/11))
# to human readable date
pre = map (time.gmtime, labels)
labels = map(figures.format_second, pre)
ax.set_xticklabels(labels)
fig.autofmt_xdate()
if save:
threshold_obs_time -= t_acquisition
if SAA: note = '_SAA'
else: note = ''
fname = '%svisibility_stars_obs_%d_o_%d_to_%d%s' % (folder_figures,threshold_obs_time,fo,lo, note)
figures.savefig(fname,fig,fancy)
### A spatial plot of the targets
fig = plt.figure()
ax = plt.subplot(111, projection='mollweide')
plt.scatter((ra_cat-180)/const.RAD,dec_cat/const.RAD, c=mag_cat, marker='*', s=50, edgecolor='none', vmin=param.magnitude_min,vmax=param.magnitude_max+0.2)
v = np.linspace(param.magnitude_min,param.magnitude_max, (param.magnitude_max-param.magnitude_min+1), endpoint=True)
t = map(figures.format_mag, v)
cbar = plt.colorbar(ticks=v, orientation='horizontal',shrink=.8)
cbar.set_ticklabels(t)
l,b,w,h = plt.gca().get_position().bounds
ll,bb,ww,hh = cbar.ax.get_position().bounds
cbar.ax.set_position([ll, bb+0.1, ww, hh])
ax.grid(True)
ax.set_xticklabels([r'$30^{\circ}$',r'$60^{\circ}$',r'$90^{\circ}$',r'$120^{\circ}$',r'$150^{\circ}$',r'$180^{\circ}$',r'$210^{\circ}$',r'$240^{\circ}$',r'$270^{\circ}$',r'$300^{\circ}$',r'$330^{\circ}$']) #,r'$360^{\circ}$'
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$\delta$')
if save:
fname = '%stargets_distribution' % folder_figures
figures.savefig(fname,fig,fancy)
### A histogram of the magnitudes
fig = plt.figure(dpi=100)
ax = fig.add_subplot(111)
bins=np.linspace(np.amin(mag_cat),np.amax(mag_cat), 50)
n, bins, patches = plt.hist(mag_cat,bins=bins)
plt.setp(patches, 'edgecolor', 'black', 'linewidth', 2, 'facecolor','blue','alpha',1)
ax.xaxis.set_major_locator(MultipleLocator(2))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(5))
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
plt.xlim([np.amin(mag_cat)*0.95, 1.05*np.amax(mag_cat)])
plt.xlabel(r'$m_V$')
plt.ylabel(r'$\mathrm{distribution}$')
x1,x2,y1,y2 = plt.axis()
plt.axvline(CHEOPS_mag_max, lw=2, color='r')
if save:
fname = '%stargets_hist_mag' % folder_figures
figures.savefig(fname,fig,fancy)
if show: plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "14_plot_target-list.py",
"copies": "1",
"size": "11435",
"license": "bsd-3-clause",
"hash": -7994642935063516000,
"line_mean": 28.8563968668,
"line_max": 224,
"alpha_frac": 0.6625273284,
"autogenerated": false,
"ratio": 2.880352644836272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.892555830470265,
"avg_score": 0.02346433370672446,
"num_lines": 383
} |
#1-50. Разработайте собственную стратегию ходов компьютера для игры "Крестики-нолики" (Задача 12).
#Перепишите функцию computer_move() в соответствии с этой стратегией.
# Митин Д.С., 01.06.2016, 22:50
X="X"
O="O"
EMPTY=" "
TIE="Ничья"
NUM_SQUARES=9
def display_instruct():
print('''
Привет, студент! Давай поиграем в крестики-нолики! Вводи число от 0 до 8.\nЧисла соответствуют полям доски - так, поле ниже:
0 | 1 | 2
---------
3 | 4 | 5
---------
6 | 7 | 8''')
def ask_yes_no(question):
response = None
while response not in ("да", "нет"):
response = input(question).lower()
return response
def ask_number(question, low, high):
response = None
while response not in range(low, high):
response = int(input(question))
return response
def pieces():
go_first = ask_yes_no("Хочешь ходить первым? (да/нет): ")
if go_first == "y":
print("\nХоди крестиками.")
human = X
computer = O
else:
print("\nЯ хожу, ты играешь ща нолики")
computer = X
human = O
return computer, human
def new_board():
board = []
for square in range(NUM_SQUARES):
board.append(EMPTY)
return board
def display_board(board):
print("\n\t", board[0], "|", board[1], "|", board[2])
print("\t", "---------")
print("\t", board[3], "|", board[4], "|", board[5])
print("\t", "---------")
print("\t", board[6], "|", board[7], "|", board[8])
def legal_moves(board):
moves = []
for square in range(NUM_SQUARES):
if board[square] == EMPTY:
moves.append(square)
return moves
def winner(board):
WAYS_TO_WIN = ((0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
winner = board[row[0]]
return winner
if EMPTY not in board:
return TIE
return None
def human_move(board, human):
legal = legal_moves(board)
move = None
while move not in legal:
move = ask_number("Твой ход. Выбери поле (0-8):", 0, NUM_SQUARES)
if move not in legal:
print("\nПоле занято. Выбери другое\n")
print("Ладно...")
return move
def computer_move(board, computer, human):
board = board[:]
BEST_MOVES = (0, 8, 2, 1, 5, 6, 7, 3, 4)
print("Я выберу поле номер", end = " ")
for move in legal_moves(board):
board[move] = computer
if winner(board) == computer:
print(move)
return move
board[move] = EMPTY
for move in legal_moves(board):
board[move] = human
if winner(board) == human:
print(move)
return move
board[move] = EMPTY
for move in BEST_MOVES:
if move in legal_moves(board):
print(move)
return move
def next_turn(turn):
if turn == X:
return O
else:
return X
def congrat_winner(the_winner, computer, human):
if the_winner != TIE:
print("Три", the_winner, "!\n")
else:
print("Ничья!\n")
if the_winner == computer:
print("Я победил!")
elif the_winner == human:
print("Ты выиграл! Молодец!")
elif the_winner == TIE:
print("Так и быть, победила дружба!")
def main():
display_instruct()
computer, human = pieces()
turn = X
board = new_board()
display_board(board)
while not winner(board):
if turn == human:
move = human_move(board, human)
board[move] = human
else:
move = computer_move(board, computer, human)
board[move] = computer
display_board(board)
turn=next_turn(turn)
the_winner=winner(board)
congrat_winner(the_winner, computer, human)
main()
input("Нажмите Enter, чтобы выйти.") | {
"repo_name": "Mariaanisimova/pythonintask",
"path": "INBa/2015/Mitin_D_S/task_13_15.py",
"copies": "1",
"size": "3835",
"license": "apache-2.0",
"hash": -1901769136229669000,
"line_mean": 23.0563380282,
"line_max": 124,
"alpha_frac": 0.6333821376,
"autogenerated": false,
"ratio": 2.160025300442758,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.32934074380427575,
"avg_score": null,
"num_lines": null
} |
# 151 currencies, 22650 currency pairs, 364 days (period 1) 134 days (period 2) => 3,035,100(20,100/from_c) - 8,221,950 entries(8361.157)
#####IMPORT PACKAGES
print('importing packages')
import time
import sqlite3
import json
import requests
import datetime
import pytz
from datetime import date
from multiprocessing.pool import Pool
###### Create/connect to database (sqlite file)
print('connecting to db')
conn = sqlite3.connect('mastercard_db.sqlite')
cur = conn.cursor()
###### Defining Functions, Convert dates into different types, date/day/string, chunkIt splits currency codes into approximately equal chunks
print('defining functions')
def day_calculator(date):
return (date - date_1).days + 1
def date_calculator(day):
return date_1+datetime.timedelta(day-1)
def date_stringer(date):
return date.strftime('%Y-%m-%d')
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
###### Defining constants, first_date is first_date in database, now is datetime in EST, today is last date rates available for, date_1 is first date rates available for
print('defining constants')
start_from_id = int(input('from_id initial value: '))
start_to_ids= [int(x) for x in input('List of to_ids, seperated by spaces').split()]
n = len(start_to_ids)
base_url = 'https://www.mastercard.us/settlement/currencyrate/fxDate={date_};transCurr={from_};crdhldBillCurr={to_};bankFee=0.00;transAmt=1/conversion-rate'
first_date=date(2016,2,29)
now = datetime.datetime.now(pytz.timezone('US/Eastern'))
if now.hour < 14:
today=now.date() - datetime.timedelta(days=1)
else:
today=now.date()
print('today: ', today)
date_1=today - datetime.timedelta(days=364)
if date_1.weekday()==6:
date_1=date_1+datetime.timedelta(days=1)
if date_1.weekday()==5:
date_1=date_1+datetime.timedelta(days=2)
print(date_1)
date_string = date_stringer(date_1)
print('first date in period', date_1, 'today:',today)
late_day=day_calculator(date(2016,10,14))
print('grabbing codes from db')
cur.execute('SELECT code FROM Currency_Codes')
code_tuples=cur.fetchall()
codes = [ x[0] for x in code_tuples ]
number_of_codes = len(codes)
######### Extracts all exchnage rates from 14th Oct onward
def extract_rates(from_id,to_id):
if to_id>151:
entry='done'
entries.append(entry)
return entries
if from_id is 'done':
entry='done'
entries.append(entry)
return entries
######### Creates URL
else:
from_c = codes[from_id-1]
to_c = codes[to_id-1]
print(from_c,to_c)
day=late_day
date=date_calculator(day)
print('extracting rates...')
while (today - date).days >=0:
date_string=date_stringer(date)
url=base_url.format(date_=date_string,from_=from_c,to_=to_c)
#Retries if requests doesn't return a json file (server errors)
while True:
try:
r = requests.get(url)
JSON=r.json()
except:
time.sleep(5)
continue
break
######### Error Handling
if 'errorCode' in JSON['data']:
if JSON['data']['errorCode'] in ('104','114'):
print('data not available for this date')
day = day + 1
date = date_calculator(day)
continue
elif JSON['data']['errorCode'] in ('500','401','400'):
print('error code: ',JSON['data']['errorCode'])
print('Server having technical problems')
time.sleep(500)
continue
else:
print('error code: ',JSON['data']['errorCode'])
print('conversion rate too small')
break
######### Adds conversion rate with date_id and currency ids
else:
rate = JSON['data']['conversionRate']
day = day_calculator(date)
date_id=(date_1-first_date).days+day
entry=(rate,from_id,to_id,date_id)
entries.append(entry)
day+=1
date=date_calculator(day)
return entries
print('initiating')
entries=list()
chunks=chunkIt(range(start_from_id,152),n)
for code in codes[(start_from_id-1):]:
try:
to_ids
except:
to_ids = start_to_ids
from_ids = [chunks[x][0] for x in range(0,n)]
last_from_ids = [chunks[x][-1] for x in range(0,n)]
while any(from_ids[x] != 'done' for x in range(0,n)):
while all(to_id <=151 for to_id in to_ids):
entries.clear()
for i in range (0,n):
if from_ids[i] is to_ids[i]:
to_ids[i] +=1
continue
for i in range (0,n):
print(from_ids[i],to_ids[i])
start_time = datetime.datetime.now()
### Multithread with n threads
p = Pool(processes=n)
### Returns list of n lists of entries for the year for n currency codes
entries_list =p.starmap(extract_rates, [(from_ids[x],to_ids[x]) for x in range(0,n) ])
p.close()
for entries in entries_list:
for entry in entries:
if entry == 'done':
pass
else:
cur.execute('''INSERT OR REPLACE INTO Rates
(rate, from_id, to_id, date_id)
VALUES ( ?, ?, ?, ?)''',
(entry[0], entry[1], entry[2], entry[3]) )
conn.commit()
end_time = datetime.datetime.now()
print('Duration: {}'.format(end_time - start_time))
to_ids[:] = [x+1 for x in to_ids]
### Updates current date to ensure that if time has passed still collecting data for all available dates
now = datetime.datetime.now(pytz.timezone('US/Eastern'))
if now.hour < 14:
today=now.date() - datetime.timedelta(days=1)
else:
today=now.date()
date_1=today - datetime.timedelta(days=364)
if date_1.weekday()==6:
date_1=date_1+datetime.timedelta(days=1)
if date_1.weekday()==5:
date_1=date_1+datetime.timedelta(days=2)
from_ids[:] = ['done' if from_ids[x] in ('done',last_from_ids[x]) else from_ids[x]+1 for x in range(0,n)]
print (from_ids)
to_ids[:] = [1] * n
| {
"repo_name": "HartBlanc/Mastercard_Exchange_Rates",
"path": "rate__retriever_multi_oct_14.py",
"copies": "1",
"size": "6885",
"license": "mit",
"hash": -947402704726338300,
"line_mean": 33.425,
"line_max": 169,
"alpha_frac": 0.5443718228,
"autogenerated": false,
"ratio": 3.7055974165769645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47499692393769644,
"avg_score": null,
"num_lines": null
} |
# 151 currencies, 22650 currency pairs, 364 days (period 1) 134 days (period 2) => 3,035,100(20,100/from_c) - 8,221,950 entries(8361.157)
print('importing packages')
import time
import sqlite3
import json
import requests
import datetime
import math
import pytz
from datetime import date
from multiprocessing.pool import Pool
import sys
print('connecting to db')
conn = sqlite3.connect('mastercard_db.sqlite')
cur = conn.cursor()
print('defining functions')
def day_calculator(date):
return (date - date_1).days + 1
def date_calculator(day):
return date_1+datetime.timedelta(day-1)
def date_stringer(date):
return date.strftime('%Y-%m-%d')
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
print('defining constants')
start_from_id = int(input('from_id initial value: '))
start_to_ids=[int(x) for x in input('numbers seperated by spaces ').split()]
number_of_threads=input('number of threads ')
n = int(number_of_threads)
base_url = 'https://www.mastercard.us/settlement/currencyrate/fxDate={date_};transCurr={from_};crdhldBillCurr={to_};bankFee=0.00;transAmt=1/conversion-rate'
first_date=date(2016,2,29)
now = datetime.datetime.now(pytz.timezone('US/Eastern'))
print('now: ', now)
if now.hour < 14:
today=now.date() - datetime.timedelta(days=1)
else:
today=now.date()
print('today: ', today)
date_1=today - datetime.timedelta(days=364)
if date_1.weekday()==6:
date_1=date_1+datetime.timedelta(days=1)
if date_1.weekday()==5:
date_1=date_1+datetime.timedelta(days=2)
print(date_1)
date_string = date_stringer(date_1)
print('first date in period', date_1, 'today:',today)
late_day=day_calculator(date(2016,10,14))
print('grabbing codes from db')
cur.execute('SELECT code FROM Currency_Codes')
code_tuples=cur.fetchall()
codes = [ x[0] for x in code_tuples ]
number_of_codes = len(codes)
#### FIND START DATE - FIRST CHECKS LATE DAY, THEN FIRST DAY, THEN DOES BINARY SEARCH
def find_start_day(from_c,to_c):
if to_c=='done':
return ('done',from_c,to_c)
else:
lower_bound=1
upper_bound=late_day
day_i=late_day-1
while upper_bound != lower_bound:
date_i = date_calculator(day_i)
if day_i < late_day-4:
if date_i.weekday() == 6:
if lower_bound <= day_i-2 :
day_i=day_i-2
if date_i.weekday() == 5:
if lower_bound <= day_i-1:
day_i=day_i-1
date_i = date_calculator(day_i)
date_string_i=date_stringer(date_i)
url=base_url.format(date_=date_string_i,from_=from_c,to_=to_c)
print(date_string_i,'day number:', day_i,'day of the week:', date_i.weekday())
#Retries if requests doesn't return a json file (server errors)
print('requesting url')
while True:
try:
r = requests.get(url)
JSON=r.json()
except:
time.sleep(5)
continue
break
print('json retrieved')
if 'errorCode' in JSON['data']:
if JSON['data']['errorCode'] in ('104','114'):
print('data not available for this date')
lower_bound = day_i+1
if day_i==late_day-1:
day_i=late_day
break
else:
day_i=math.ceil((lower_bound+upper_bound)/2)
print('lower:',lower_bound,'upper:',upper_bound)
elif JSON['data']['errorCode'] in ('500','400'):
print('error code: ',JSON['data']['errorCode'])
print('Server having technical problems')
time.sleep(500)
continue
elif JSON['data']['errorCode'] in ('401'):
print('error code: ',JSON['data']['errorCode'])
print('data not available for this date')
lower_bound = day_i+1
day_i+=1
else:
print('error code: ',JSON['data']['errorCode'])
print('conversion rate too small')
break
else:
upper_bound = day_i
if day_i == late_day-1:
day_i=1
elif day_i == 1:
break
else:
day_i=math.floor((lower_bound+upper_bound)/2)
print('lower:',lower_bound,'upper:',upper_bound)
print('found start day', lower_bound)
return (lower_bound,from_c,to_c)
def extract_rates(start_day,from_c,to_c):
if start_day=='done':
entry='done'
entries.append(entry)
return entries
else:
day=start_day
date=date_calculator(day)
while (today - date).days >=0:
if day < late_day-4:
if date.weekday() == 5:
day = day + 2
date = date_calculator(day)
date_string=date_stringer(date)
url=base_url.format(date_=date_string,from_=from_c,to_=to_c)
print(date)
#Retries if requests doesn't return a json file (server errors)
print('requesting url')
while True:
try:
r = requests.get(url)
JSON=r.json()
except:
time.sleep(5)
continue
break
print('json retrieved')
if 'errorCode' in JSON['data']:
if JSON['data']['errorCode'] in ('104','114'):
print('data not available for this date')
day = day + 1
date = date_calculator(day)
continue
elif JSON['data']['errorCode'] in ('500','401','400'):
print('error code: ',JSON['data']['errorCode'])
print('Server having technical problems')
time.sleep(500)
continue
else:
print('error code: ',JSON['data']['errorCode'])
print('conversion rate too small')
break
else:
rate = JSON['data']['conversionRate']
day = day_calculator(date)
print(rate)
date_id=(date_1-first_date).days+day
entry=(rate,from_c,to_c,date_id)
entries.append(entry)
day+=1
date=date_calculator(day)
return entries
print('initiating')
entries=list()
for code in codes[(start_from_id-1):]:
chunks=chunkIt(range(start_from_id,151),n)
try:
to_ids
except:
to_ids = start_to_ids
from_ids = [chunks[x][0] for x in range(0,n)]
print(from_ids)
print(to_ids)
from_cs = [codes[from_ids[x]-1] for x in range(0,n)]
print(from_cs)
print('from set')
while all(to_id != 'done' for to_id in to_ids):
for i in range (0,n):
if from_ids[i] is to_ids[i]:
to_ids[i] +=1
continue
to_cs = ['done' if to_ids[x] == 'done' else codes[to_ids[x]-1] for x in range(0,n)]
print(to_cs)
for i in range (0,n):
print(from_cs[i],to_cs[i])
start_time = datetime.datetime.now()
p = Pool(processes=n)
function_1 = find_start_day
arguments_1 = [(from_cs[x],to_cs[x]) for x in range(0,n) ]
start_days = p.starmap(function_1, arguments_1)
function_2 = extract_rates
arguments_2 = start_days
entries_list =p.starmap(function_2, arguments_2)
for entries in entries_list:
for entry in entries:
if entry == 'done':
pass
else:
cur.execute('''INSERT OR REPLACE INTO Rates
(rate, from_id, to_id, date_id)
VALUES ( ?, ?, ?, ?)''',
(entry[0], codes.index(entry[1])+1, codes.index(entry[2])+1, entry[3]) )
conn.commit()
end_time = datetime.datetime.now()
print('Duration: {}'.format(end_time - start_time))
for to_id in to_ids:
if to_id == 151:
to_id = 'done'
if to_id < 151:
to_id +=1
date_1=today - datetime.timedelta(days=364)
if date_1.weekday()==6:
date_1=date_1+datetime.timedelta(days=1)
if date_1.weekday()==5:
date_1=date_1+datetime.timedelta(days=2)
now = datetime.datetime.now(pytz.timezone('US/Eastern'))
if now.hour < 14:
today=now.date() - datetime.timedelta(days=1)
else:
today=now.date()
for from_id in from_ids:
from_id+=1
| {
"repo_name": "HartBlanc/Mastercard_Exchange_Rates",
"path": "rate__retriever_multi.py",
"copies": "1",
"size": "9269",
"license": "mit",
"hash": 5944809106055677000,
"line_mean": 34.1098484848,
"line_max": 156,
"alpha_frac": 0.5043694034,
"autogenerated": false,
"ratio": 3.7571949736522092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9631455455604567,
"avg_score": 0.026021784289528477,
"num_lines": 264
} |
# 152. Maximum Product Subarray
#
# Find the contiguous subarray within an array (containing at least one number) which has the largest product.
#
# For example, given the array [2,3,-2,4],
# the contiguous subarray [2,3] has the largest product = 6.
class Solution:
def maxProduct(self, A):
"""
http://www.cnblogs.com/zuoyuan/p/4019326.html
主要需要考虑负负得正这种情况,比如之前的最小值是一个负数,再乘以一个负数就有可能成为一个很大的正数
e.g. min_tmp = -4, max_tmp = -4, c = 2
:param A:
:return:
"""
# if len(A) == 0:
# return 0
min_tmp = A[0]
max_tmp = A[0]
result = A[0]
for i in range(1, len(A)):
# must save tmp max/min to variable. otherwise not using current max/min.
a = A[i] * min_tmp
b = A[i] * max_tmp
c = A[i]
min_tmp = min(min(a, b), c)
max_tmp = max(max(a, b), c)
result = max_tmp if max_tmp > result else result
return result
# https://gengwg.blogspot.com/2018/03/leetcode-152-maximum-product-subarray.html
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
mintmp = nums[0]
maxtmp = nums[0]
res = nums[0]
for i, num in enumerate(nums):
if i == 0:
continue
tmp = mintmp
mintmp = min(num * mintmp, num * maxtmp, num)
maxtmp = max(num * tmp, num * maxtmp, num)
res = max(maxtmp, res)
return res
if __name__ == '__main__':
print Solution().maxProduct([2, 3, 1, 4, 7, -2, 2])
| {
"repo_name": "gengwg/leetcode",
"path": "152_maximum_product_subarray.py",
"copies": "1",
"size": "1725",
"license": "apache-2.0",
"hash": -5237878554375346000,
"line_mean": 29.6981132075,
"line_max": 110,
"alpha_frac": 0.5175169023,
"autogenerated": false,
"ratio": 2.926258992805755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39437758951057555,
"avg_score": null,
"num_lines": null
} |
# 1531. String Compression II
# O(kN^2), 1340 ms
class Solution:
def getLengthOfOptimalCompression(self, s: str, k: int) -> int:
INF = 105
def runLen(count):
if count <= 1:
return count
if count <= 9:
return 2
if count <= 99:
return 3
return 4
@cache
def solve(x, atMost):
if x == -1:
return 0
# Deletes s[x].
res = INF
if atMost - 1 >= 0:
res = solve(x - 1, atMost - 1)
# Keeps s[x], enumerates the last group that equals s[x].
cnt = 1
res = min(res, solve(x - 1, atMost) + runLen(cnt))
for i in range(x - 1, -1, -1):
if s[i] == s[x]:
cnt += 1
else:
atMost -= 1
if atMost >= 0:
res = min(res, solve(i - 1, atMost) + runLen(cnt))
else:
break
return res
return solve(len(s) - 1, k)
| {
"repo_name": "digiter/Arena",
"path": "1531-string-compression-ii_2.py",
"copies": "1",
"size": "1114",
"license": "mit",
"hash": -2726846771393935000,
"line_mean": 24.9069767442,
"line_max": 70,
"alpha_frac": 0.3859964093,
"autogenerated": false,
"ratio": 3.950354609929078,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48363510192290776,
"avg_score": null,
"num_lines": null
} |
# 1531. String Compression II
# The dynamic programming states are sparse.
# Time: 2944 ms
class Solution:
def getLengthOfOptimalCompression(self, s: str, k: int) -> int:
char, cnt = [], []
x = 0
while x < len(s):
y = x + 1
while y < len(s) and s[y] == s[x]:
y += 1
char.append(s[x])
cnt.append(y - x)
x = y
INF = 105
def runLen(count):
if count <= 1:
return count
if count <= 9:
return 2
if count <= 99:
return 3
return 4
@cache
def solve(i, lastChar, lastCnt, remaining):
if i == len(char):
return runLen(lastCnt)
res = INF
# Tries to remove all of char[i].
if cnt[i] <= remaining:
res = min(res, 0 + solve(i + 1, lastChar, lastCnt, remaining - cnt[i]))
if lastChar == char[i]:
extra = 0
lastCnt += cnt[i]
else:
extra = runLen(lastCnt)
lastChar, lastCnt = char[i], cnt[i]
# Tries to remove to only one, a single digit, or double digits.
for x in [1, 9, 99]:
if lastCnt > x and (lastCnt - x) <= remaining:
res = min(
res,
extra + solve(i + 1, lastChar, x, remaining - (lastCnt - x)),
)
# Tries to do nothing to char[i].
res = min(res, extra + solve(i + 1, lastChar, lastCnt, remaining))
return res
return solve(0, "#", 0, k)
| {
"repo_name": "digiter/Arena",
"path": "1531-string-compression-ii.py",
"copies": "1",
"size": "1706",
"license": "mit",
"hash": -2965531853986724400,
"line_mean": 30.0181818182,
"line_max": 87,
"alpha_frac": 0.429073857,
"autogenerated": false,
"ratio": 4.023584905660377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49526587626603774,
"avg_score": null,
"num_lines": null
} |
# 153. Find Minimum in Rotated Sorted Array
#
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
#
# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
#
# Find the minimum element.
#
# You may assume no duplicate exists in the array.
class Solution(object):
# http://bookshadow.com/weblog/2014/10/16/leetcode-find-minimum-rotated-sorted-array/
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 0, len(nums) - 1
while l < r:
m = (l + r) / 2
# if nums[m] <= nums[r]:
if nums[m] < nums[r]:
r = m
else:
l = m + 1
return nums[l]
# http://www.cnblogs.com/zuoyuan/p/4045742.html
def findMin(self, nums):
l, r = 0, len(nums) - 1
while l < r and nums[l] > nums[r]:
m = (l + r) / 2
if nums[m] < nums[r]:
r = m
else:
l = m + 1
return nums[l]
if __name__ == '__main__':
print Solution().findMin([4, 5, 6, 7, 0, 1, 2])
| {
"repo_name": "gengwg/leetcode",
"path": "153_find_minimum_in_rotated_sorted_array.py",
"copies": "1",
"size": "1129",
"license": "apache-2.0",
"hash": -8695469665433482000,
"line_mean": 25.880952381,
"line_max": 96,
"alpha_frac": 0.4880425155,
"autogenerated": false,
"ratio": 3.189265536723164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41773080522231637,
"avg_score": null,
"num_lines": null
} |
# 155. Min Stack - LeetCode
# https://leetcode.com/problems/min-stack/description/
# Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.min_stack = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack.append(x)
if len(self.min_stack) == 0:
self.min_stack.append(x)
else:
self.min_stack.append( x if x < self.min_stack[-1] else self.min_stack[-1] )
def pop(self):
"""
:rtype: void
"""
if len(self.stack) > 0:
self.stack.pop()
self.min_stack.pop()
def top(self):
"""
:rtype: int
"""
if len(self.stack) > 0:
return self.stack[-1]
def getMin(self):
"""
:rtype: int
"""
if len(self.min_stack) > 0 :
return self.min_stack[-1]
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
minStack = MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
print minStack.getMin(); # --> Returns -3.
minStack.pop();
print minStack.top(); # --> Returns 0.
print minStack.getMin(); # --> Returns -2. | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/155_min-stack.py",
"copies": "1",
"size": "1501",
"license": "mit",
"hash": 3087350743966768600,
"line_mean": 22.8412698413,
"line_max": 99,
"alpha_frac": 0.5163224517,
"autogenerated": false,
"ratio": 3.4585253456221197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.939870995025045,
"avg_score": 0.015227569414333958,
"num_lines": 63
} |
"""155. Min Stack
https://leetcode.com/problems/min-stack/
Design a stack that supports push, pop, top, and retrieving the minimum element
in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
Example:
MinStack minStack = new MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
minStack.getMin(); --> Returns -3.
minStack.pop();
minStack.top(); --> Returns 0.
minStack.getMin(); --> Returns -2.
"""
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.elements = []
self.min_cache = []
def push(self, x: int) -> None:
if not self.elements:
cur_min = x
else:
cur_min = min(x, self.min_cache[-1])
self.min_cache.append(cur_min)
self.elements.append(x)
def pop(self) -> None:
self.elements.pop()
self.min_cache.pop()
def top(self) -> int:
return self.elements[-1]
def get_min(self) -> int:
return self.min_cache[-1]
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/min_stack.py",
"copies": "1",
"size": "1168",
"license": "mit",
"hash": 3806502000796175400,
"line_mean": 21.9019607843,
"line_max": 79,
"alpha_frac": 0.5924657534,
"autogenerated": false,
"ratio": 3.3855072463768114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44779729997768114,
"avg_score": null,
"num_lines": null
} |
# 155. Min Stack
#
# Design a stack that supports push, pop, top,
# and retrieving the minimum element in constant time.
#
# push(x) -- Push element x onto stack.
# pop() -- Removes the element on top of the stack.
# top() -- Get the top element.
# getMin() -- Retrieve the minimum element in the stack.
# Example:
# MinStack minStack = new MinStack();
# minStack.push(-2);
# minStack.push(0);
# minStack.push(-3);
# minStack.getMin(); --> Returns -3.
# minStack.pop();
# minStack.top(); --> Returns 0.
# minStack.getMin(); --> Returns -2.
# http://www.cnblogs.com/zuoyuan/p/4091870.html
# use 2 stack. one for ordinary stack; one for keeping min.
# using one stack will tle
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack1 = [] # stack
self.stack2 = [] # min stack
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack1.append(x)
if not self.stack2 or x <= self.stack2[-1]:
self.stack2.append(x)
def pop(self):
"""
:rtype: void
"""
top = self.stack1[-1]
self.stack1.pop()
if top == self.stack2[-1]:
self.stack2.pop()
def top(self):
"""
:rtype: int
"""
return self.stack1[-1]
def getMin(self):
"""
:rtype: int
"""
return self.stack2[-1]
class MinStack:
def __init__(self):
self.stack1 = []
self.stack2 = []
def push(self, x):
self.stack1.append(x)
if not self.stack2: # if min stack is empty, push 1st value
self.stack2.append(x)
else: # always push min value
self.stack2.append(min(self.stack2[-1], x))
def pop(self):
# pop both stacks
self.stack1.pop()
self.stack2.pop()
def top(self):
return self.stack1[-1]
def getMin(self):
return self.stack2[-1]
if __name__ == '__main__':
minStack = MinStack();
# print minStack.getMin()
minStack.push(-2)
minStack.push(0)
minStack.push(-3)
print minStack.getMin() # --> Returns - 3.
minStack.pop()
print minStack.top() # 0
print minStack.getMin() # --> Returns - 2.
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| {
"repo_name": "gengwg/leetcode",
"path": "155_min_stack.py",
"copies": "1",
"size": "2488",
"license": "apache-2.0",
"hash": -2329201528075071000,
"line_mean": 23.1553398058,
"line_max": 67,
"alpha_frac": 0.5438102894,
"autogenerated": false,
"ratio": 3.3440860215053765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43878963109053765,
"avg_score": null,
"num_lines": null
} |
# 1563. Stone Game V
# Dynamic programming from bottom up.
# Time complexity: O(N^2), 6380 ms
#
# Note that we can't assume stoneGameV(A) <= stoneGameV(A + [x]), see
# https://leetcode.com/problems/stone-game-v/discuss/889244/Please-do-not-assume-monotonicity-like-stoneGameV(A)-stoneGameV(A%3A-1)
class Solution:
def stoneGameV(self, stoneValue: List[int]) -> int:
N = len(stoneValue)
stoneValue.insert(0, 0) # Makes it one-based.
prefixSum = [0] * (N + 1)
for i in range(1, N + 1):
prefixSum[i] = prefixSum[i - 1] + stoneValue[i]
def rangeSum(begin, end):
return prefixSum[end] - prefixSum[begin - 1]
dp = [[0] * (N + 1) for _ in range(N + 1)]
leftCut = copy.deepcopy(dp)
leftMax = copy.deepcopy(dp)
rightCut = copy.deepcopy(dp)
rightMax = copy.deepcopy(dp)
# Initializes when size is 1.
for x in range(1, N + 1):
dp[x][x] = 0
leftCut[x][x] = x - 1
leftMax[x][x] = 0
rightCut[x][x] = x + 1
rightMax[x][x] = 0
for size in range(2, N + 1):
for begin in range(1, N + 1 - size + 1):
end = begin + size - 1
lc = leftCut[begin][end - 1]
lm = leftMax[begin][end - 1]
while lc + 1 < end and rangeSum(begin, lc + 1) <= rangeSum(lc + 2, end):
lm = max(lm, rangeSum(begin, lc + 1) + dp[begin][lc + 1])
lc += 1
leftCut[begin][end] = lc
leftMax[begin][end] = lm
rc = rightCut[begin + 1][end]
rm = rightMax[begin + 1][end]
while begin < rc - 1 and rangeSum(rc - 1, end) <= rangeSum(
begin, rc - 2
):
rm = max(rm, rangeSum(rc - 1, end) + dp[rc - 1][end])
rc -= 1
rightCut[begin][end] = rc
rightMax[begin][end] = rm
dp[begin][end] = max(lm, rm)
return dp[1][N]
| {
"repo_name": "digiter/Arena",
"path": "1563-stone-game-v.py",
"copies": "1",
"size": "2079",
"license": "mit",
"hash": -8198490243154992000,
"line_mean": 34.8448275862,
"line_max": 131,
"alpha_frac": 0.4785954786,
"autogenerated": false,
"ratio": 3.279179810725552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9255592490868791,
"avg_score": 0.00043655969135229826,
"num_lines": 58
} |
# 1563. Stone Game V
# Time: O(N^2), 5280 ms
#
# Another bottom up dynamic programming.
# Loops y increasing and x decreasing, in order to enumerate interval [x, y] from small to large.
# So that the answer of interval [x, y] can be constructed via its sub-intervals.
class Solution:
def stoneGameV(self, stoneValue: List[int]) -> int:
N = len(stoneValue)
stoneValue.insert(0, 0) # Makes it one-based.
prefixSum = [0] * (N + 1)
for i in range(1, N + 1):
prefixSum[i] = prefixSum[i - 1] + stoneValue[i]
def rangeSum(begin, end):
return prefixSum[end] - prefixSum[begin - 1]
dp = [[0] * (N + 1) for _ in range(N + 1)]
leftBest = copy.deepcopy(dp)
rightBest = copy.deepcopy(dp)
for y in range(1, N + 1):
dp[y][y] = 0
leftBest[y][y] = 0 + stoneValue[y]
rightBest[y][y] = 0 + stoneValue[y]
cut = y
for x in range(y - 1, 0, -1):
while x <= cut - 1 and rangeSum(x, cut - 1) >= rangeSum(cut, y):
cut -= 1
def tryUse(t):
if not (x <= t - 1 and t <= y):
return
diff = rangeSum(x, t - 1) - rangeSum(t, y)
if diff < 0:
dp[x][y] = max(dp[x][y], leftBest[x][t - 1])
elif diff == 0:
dp[x][y] = max(dp[x][y], leftBest[x][t - 1], rightBest[t][y])
else:
dp[x][y] = max(dp[x][y], rightBest[t][y])
tryUse(cut)
tryUse(cut + 1)
leftBest[x][y] = max(leftBest[x][y - 1], rangeSum(x, y) + dp[x][y])
rightBest[x][y] = max(rightBest[x + 1][y], rangeSum(x, y) + dp[x][y])
return dp[1][N]
| {
"repo_name": "digiter/Arena",
"path": "1563-stone-game-v_2.py",
"copies": "1",
"size": "1851",
"license": "mit",
"hash": -2344841015569233000,
"line_mean": 36.02,
"line_max": 97,
"alpha_frac": 0.4575904916,
"autogenerated": false,
"ratio": 3.219130434782609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4176720926382609,
"avg_score": null,
"num_lines": null
} |
# 156. Binary Tree Upside Down
# Given a binary tree where all the right nodes are either leaf nodes with a sibling
# (a left node that shares the same parent node) or empty,
# flip it upside down and turn it into a tree where the original right nodes turned into left leaf nodes.
# Return the new root.
# For example:
# Given a binary tree {1,2,3,4,5},
#
# 1
# / \
# 2 3
# / \
# 4 5
#
# return the root of the binary tree [4,5,2,#,#,3,1].
#
# 4
# / \
# 5 2
# / \
# 3 1
#
# confused what "{1,#,2,3}" means? > read more on how binary tree is serialized on OJ.
# https://zhuhan0.blogspot.com/2017/05/leetcode-156-binary-tree-upside-down.html
#
# Thought process:
# After the flip, root and root.right will become siblings, and the left most child will become the new root.
# The idea is to traverse the tree to the left. As we traverse, we make root.left the new root,
# root.right the left child of new root, and root itself the right child of new root.
class Solution:
def upsideDownBinaryTree(self, root):
if root is None or root.left is None:
return root
left = self.upsideDownBinaryTree(root.left)
root.left.left = root.right
root.left.right = root
root.left = None
root.right = None
return left
| {
"repo_name": "gengwg/leetcode",
"path": "156_binary_tree_upside_down.py",
"copies": "1",
"size": "1310",
"license": "apache-2.0",
"hash": -2283411060256632800,
"line_mean": 26.8723404255,
"line_max": 109,
"alpha_frac": 0.6526717557,
"autogenerated": false,
"ratio": 3.234567901234568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9274066422492612,
"avg_score": 0.022634646888391213,
"num_lines": 47
} |
"""15N CEST with CW decoupling.
Analyzes chemical exchange in the presence of 1H CW decoupling during the CEST
block. Magnetization evolution is calculated using the 30*30 two-spin matrix:
[ I{xyz}, S{xyz}, 2I{xyz}S{xyz} ]{a, b, ...}
Notes
-----
The calculation is designed specifically to analyze the experiment found in the
reference:
Bouvignies and Kay. J Phys Chem B (2012), 116:14311-7
"""
import numpy as np
from chemex.experiments.cest.base_cest import ProfileCEST
_EXP_DETAILS = {"carrier_dec": {"type": float}, "b1_frq_dec": {"type": float}}
class ProfileCESTNIPHCW(ProfileCEST):
"""Profile for CEST with CW decoupling."""
EXP_DETAILS = dict(**ProfileCEST.EXP_DETAILS, **_EXP_DETAILS)
SPIN_SYSTEM = "ixyzsxyz"
CONSTRAINTS = "nh"
def __init__(self, name, data, exp_details, model):
super().__init__(name, data, exp_details, model)
self.liouv.w1_s = 2 * np.pi * self.exp_details["b1_frq_dec"]
self.liouv.carrier_s = self.exp_details["carrier_dec"]
# Set the row vector for detection
self.detect = self.liouv.detect["iz_a"]
# Set the varying parameters by default
for name, full_name in self.map_names.items():
if name.startswith(
("dw", "r1_i_a", "r2_i_a", "r2_mq_a", "etaxy_i_a", "etaz_i_a")
):
self.params[full_name].set(vary=True)
def _calculate_unscaled_profile(self, params_local, offsets=None):
"""Calculate the CEST profile in the presence of exchange.
TODO: Parameters
----------
Returns
-------
out : float
Intensity after the CEST block
"""
self.liouv.update(params_local)
reference = self.reference
carriers_i = self.carriers_i
if offsets is not None:
reference = np.zeros_like(offsets, dtype=np.bool)
carriers_i = self.offsets_to_ppm(offsets)
mag0 = self.liouv.compute_mag_eq(params_local, term="iz")
profile = []
for ref, carrier_i in zip(reference, carriers_i):
self.liouv.carrier_i = carrier_i
if not ref:
cest = self.liouv.pulse_is(self.time_t1, 0.0, 0.0, self.dephasing)
else:
cest = self.liouv.identity
mag = self.liouv.collapse(self.detect @ cest @ mag0)
profile.append(mag)
return np.asarray(profile)
| {
"repo_name": "gbouvignies/chemex",
"path": "chemex/experiments/cest/n_ip_h_cw.py",
"copies": "1",
"size": "2437",
"license": "bsd-3-clause",
"hash": 8694834469004552000,
"line_mean": 29.0864197531,
"line_max": 82,
"alpha_frac": 0.5999179319,
"autogenerated": false,
"ratio": 3.2278145695364238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.932698878583327,
"avg_score": 0.00014874312063067083,
"num_lines": 81
} |
''' 15-observation_fixed_direction
===============================================
AIM: Similar to 14-<...>.py, but for only one traget.
INPUT: files: - <orbit_id>_misc/orbits.dat
- <orbit_id>_flux/flux_*.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_figures/ : (see below for file name definition)
CMD: python 15-observation_fixed_direction
ISSUES: ! DOES NOT WORK !
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- BaseMap --> http://matplotlib.org/basemap/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: <none>
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import matplotlib.cm as cm
import time
from resources.routines import *
from resources.TimeStepping import *
from resources.targets import *
import parameters as param
import resources.constants as const
import resources.figures as figures
import time
from matplotlib import dates
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# Name of object of interest OI:
OI = 'BD-082823'
# orbit_iditude of the orbit in km
orbit_id = 701
apogee=700
perigee=700
# First minute analysis
minute_ini = 30.*1440.
# Last minute to look for
minute_end = 50.*1440.
# Include SAA ?
SAA = False
# Show plots
show = True
# Save the picture ?
save = False
# Fancy plots ?
fancy = True
# Take into account the stray light?
straylight = False
# Minimum observable time for plots
threshold_obs_time = 50
# Time to acquire a target
t_acquisition = 6
# Catalogue name (in resources/)
catalogue = 'cheops_target_list_v0.1.dat'
# Maximum magnitude that can be seen by CHEOPS, only for cosmetics purposes
CHEOPS_mag_max = 12.5
# File name for the list of orbit file
orbits_file = 'orbits.dat'
# Factor in the SL post treatment correction ?
SL_post_treat = True
# Factor in mirror efficiency for the equivalent star magnitude ?
mirror_correction = True
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee,perigee)
###########################################################################
### INITIALISATION
file_flux = 'flux_'
# changes the threshold by addition the acquisition time:
threshold_obs_time += t_acquisition
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
if SAA:
SAA_data = np.loadtxt('resources/SAA_table_%d.dat' % orbit_id, delimiter=',')
SAA_data = SAA_data[SAA_data[:,0]>= minute_ini]
SAA_data = SAA_data[SAA_data[:,0]<= minute_end]
computed_orbits = np.loadtxt(folder_misc+orbits_file)[:,0]
############################################################################
### Load catalogue and assign them to the nearest grid point
name_cat, ra_cat, dec_cat, mag_cat = load_catalogue(catalogue)
index_ra_cat = np.zeros(np.shape(ra_cat))
index_dec_cat= np.zeros(np.shape(ra_cat))
ii = 0
for name in name_cat:
if name == OI:
break
ii += 1
print 'Target is >>>', name_cat[ii]
name_cat= name_cat[ii]
ra=ra_cat[ii]
dec=dec_cat[ii]
mag=mag_cat[ii]
id_ra = find_nearest(ras, ra/const.RAD)
id_dec = find_nearest(decs, dec/const.RAD)
obj = target_list(name, ra/const.RAD, id_ra, dec/const.RAD, id_dec, mag, int(period+3))
# Apply the flux correction (SL post-treatment removal and the mirror efficiency)
corr_fact = 1.0
if mirror_correction: corr_fact /= param.mirror_efficiency
if SL_post_treat: corr_fact *= (1.0 - param.SL_post_treat_reduction)
############################################################################
### Start the anaylsis
start = time.time()
# Prepare the arrays
visibility = np.zeros(np.shape(ra_grid))
#observations = np.zeros(len(name_cat)*)
workspace = np.zeros(np.shape(ra_grid))
#data = np.zeros(np.shape(ra_grid))
# Load the reference times
orbits = np.loadtxt(folder_misc+orbits_file,dtype='i4')
minutes_orbit_iditude = np.loadtxt('resources/minute_table_%d.dat' % orbit_id, delimiter=',',dtype='Int32')
# Set variables for printing the advance
numberofminutes = minute_end+1 - minute_ini
lo = fast_minute2orbit(minutes_orbit_iditude,minute_end, orbit_id)
fo = fast_minute2orbit(minutes_orbit_iditude,minute_ini, orbit_id)
lp = -1
junk, junk, at_ini, junk = fast_orbit2times(minutes_orbit_iditude, fo, orbit_id)
first_computed = computed_orbits[computed_orbits<=fo][-1]
first_minute = minute_ini
last_minute = minute_end
if not fo == first_computed:
junk, junk, minute_ini, junk = fast_orbit2times(minutes_orbit_iditude, first_computed, orbit_id)
# print '1st referenced orbit: %d\twanted orbit: %d' % (first_computed, fo)
try:
for minute in range(minute_ini,int(minute_end)+1+int(period)):
minute = int(minute)
if SAA and fast_SAA(SAA_data, minute): SAA_at_minute = True
else: SAA_at_minute = False
orbit_current = fast_minute2orbit(minutes_orbit_iditude, minute, orbit_id)
if orbit_current > lp:
lp = orbit_current
message = "Analysing orbit %d on %d...\t" % (lp,lo)
sys.stdout.write( '\r'*len(message) )
sys.stdout.write(message)
sys.stdout.flush()
junk, len_orbit, atc_ini, junk = fast_orbit2times(minutes_orbit_iditude, orbit_current, orbit_id)
try:
ra, dec, S_sl = load_flux_file(minute, file_flux, folder=folder_flux)
load = True
minute_to_load = minute-atc_ini#+shift
except IOError:
# if there is nothing then well, do nothing ie we copy the past values
# in which orbit are we ?
# get the previous orbit computed and copy the stray light data of this orbit :
#orbit_previous = orbits[orbits[:,0] < orbit_current][-1,0]
#minute_replacement = minute - atc_ini + shift #+ at_ini
minute_to_load = minute-atc_ini
if SAA_at_minute:
obj.current_visibility = 0
else:
obj.current_visibility = obj.visible_save[minute_to_load]
load = False
# populate the visbility matrix
# for ii in range(0, targets[0].CountObjects()):
if load:
ra_ = obj.ra
dec_ = obj.dec
a = np.where(np.abs(ra_-ra)<ra_step/2)[0]
b = np.where(np.abs(dec_-dec)<dec_step/2)[0]
INT = np.intersect1d(a,b)
if np.shape(INT)[0] == 0 or (straylight and S_sl[INT]*corr_fact > obj.maximum_flux()):
obj.visible_save[minute_to_load] = 0
obj.current_visibility = 0
continue
else:
obj.visible_save[minute_to_load] = 1
if SAA_at_minute: obj.current_visibility = 0
else: obj.current_visibility = 1
if minute == minute_ini:
obj.workspace=obj.current_visibility
continue
obj.Next(minute,threshold_obs_time)
except KeyboardInterrupt: print hilite('\nWARNING! USER STOPPED LOADING AT MINUTE %d' % minute,False,False)
obj.Next(minute,threshold_obs_time)
print
############################################################################
end = time.time()
elapsed_time = round((end-start)/60.,2)
sys.stdout.write( '\r'*len(message) )
sys.stdout.flush()
print "Time needed: %2.2f min" % elapsed_time
### Plot a few things
if fancy: figures.set_fancy()
### Plot time line
figures.set_fancy()
minute_ini = first_minute
minute_end = last_minute
fig = plt.figure()
ax = plt.subplot(111)
ii = 0
#ax.yaxis.set_major_locator(MultipleLocator(1))
plt.grid(True)
visi = obj.Visibility()
invi = obj.Invisibility()
dist = 0
##for v, i in zip(visi, invi):
## print v, i, i-v, v-dist
## dist = i
timestamps = np.zeros(lo+1-fo)
obs_time = np.zeros(lo+1-fo)
for orbit in range(fo, lo+1):
ii = orbit-fo
junk, junk, a, e = fast_orbit2times(minutes_orbit_iditude, orbit, orbit_id)
timestamps[ii] = a
visi_c = visi[(visi <= e) & (visi >= a)]
next_inv = invi[(visi <= e) & (visi >= a)]
invi_c = invi[(invi <= e) & (invi >= a)]
if np.shape(visi_c)[0] == 2:
print np.shape(visi_c)[0]
exit()
if np.shape(next_inv)[0] == 2:
print np.shape(visi_c)[0]
exit()
if np.shape(visi_c)[0] > 0 and next_inv[0] > e:
obs_time[ii] += e - visi_c + 1
elif np.shape(visi_c)[0] > 0:
print orbit
obs_time[ii] += next_inv - visi_c
#2@ current_in = invi[(invi >= a) & (invi <= e)]
#2@ current_vi = visi[(visi >= a) & (visi <= e)]
#2@shape_in = np.shape(current_in)[0]
#2@shape_vi = np.shape(current_vi)[0]
#2@if shape_in == 2 :
#2@ obs_time[ii] += current_in[0]-a
#2@ np.delete(current_in, 0)
#2@ shape_in = np.shape(current_in)[0]
#2@if shape_in == 1 and shape_vi == 1:
#2@ obs_time[ii] += current_in[0] - current_vi[0]
#2@elif shape_in == 1 and shape_vi == 0:
#2@ obs_time[ii] += current_in[0] - a
#2@elif shape_in == 0 and shape_vi == 1:
#2@ obs_time[ii] += e - current_vi[0]
if obs_time[ii] < 0:
print a,e
print current_in
print current_vi
exit()
#print timestamps
#print obs_time
plt.plot (timestamps, obs_time, lw=2)
plt.ylabel('Available Obs. Time per Orbit [min]')
# convert epoch to matplotlib float format
labels = timestamps * 60. + const.timestamp_2018_01_01
labels = np.linspace(minute_ini, minute_end+1, 12) * 60. + const.timestamp_2018_01_01
plt.xlim([minute_ini, minute_end+1])
#plt.xlim([minute_ini, minute_end+1])
#ax.xaxis.set_major_locator(MultipleLocator((minute_end-minute_ini+1)/11))
# to human readable date
pre = map(time.gmtime, labels)
labels = map(figures.format_second, pre)
ax.set_xticklabels(labels)
fig.autofmt_xdate()
if save:
threshold_obs_time -= t_acquisition
if SAA: note = '_SAA'
else: note = ''
fname = '%svisibility_%s_obs_%d_o_%d_to_%d%s' % (folder_figures, OI, threshold_obs_time,fo,lo, note)
figures.savefig(fname,fig,fancy)
if show: plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "15_observation_fixed_direction.py",
"copies": "1",
"size": "10213",
"license": "bsd-3-clause",
"hash": 1957432752794572000,
"line_mean": 26.4543010753,
"line_max": 117,
"alpha_frac": 0.6433956722,
"autogenerated": false,
"ratio": 2.8057692307692306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39491649029692305,
"avg_score": null,
"num_lines": null
} |
# 15-Puzzle Solver
# Yoni Elhanani 2019
from typing import List, Iterator, Tuple
from argparse import ArgumentParser
from heapq import heappop, heappush
from random import randrange
# A state is a configuration of stones on the board
State = Tuple[int, ...]
def showState(state: State) -> str:
"Prints the state of the board"
divider = "\n|" + "----+" * 3 + "----|\n"
output = divider
for i in range(4):
output += "|"
for j in range(4):
n = state[4*i+j]
output += " " + str(n) if n else " "
if n < 10:
output += " "
output += " |"
output += divider
return output
def nextState(source: State, pos1: int, pos2: int) -> State:
"Next state of the board, after switching 2 stones"
L = list(source)
L[pos1], L[pos2] = L[pos2], L[pos1]
return tuple(L)
def genNeignbors() -> List[List[int]]:
"Neighbors of a particular position"
neighbors: List[List[int]] = [[] for i in range(16)]
for i in range(4):
for j in range(3):
neighbors[4*i+j].append(4*i+j+1)
neighbors[4*i+j+1].append(4*i+j)
neighbors[i+4*j].append(i+4*j+4)
neighbors[i+4*j+4].append(i+4*j)
return neighbors
neighbors = genNeignbors()
def tile_distance(i: int, n: int) -> int:
"Finds the l1 distance between 2 tiles"
if n > 0:
srccol, srcrow = divmod(n-1, 4)
dstcol, dstrow = divmod(i, 4)
return abs(srccol-dstcol) + abs(srcrow-dstrow)
else:
return 0
def state_distance(state: State) -> int:
"Computes the l1 norm of a particular state"
return sum(tile_distance(i, n) for i, n in enumerate(state))
# @total_ordering
class Node:
"Nodes record a state, the path to that state, and distances"
# Optimization > 1 is BFS
# Optimization = 1/k is a k-approximation (gurantees at most k times more than shortest path)
# Optimization = 1 is shortest path
# Optimization = 0 is the quickest solver (indifferent to path length).
# Optimization < -1 is DFS
opt = 0
def __init__(self, value: State, zero: int, parent: State, move: int,
dstdist: int, srcdist: int) -> None:
self.value = value # The state of the node
self.zero = zero # The location of the empty place
self.parent = parent # The state of the parent node
self.move = move # The stone that moved
self.dstdist = dstdist # The l1-distance to the terminal node
self.srcdist = srcdist # The moves count from the source terminal node
def children(self) -> Iterator["Node"]:
"Generates subnodes for the given node"
for location in neighbors[self.zero]:
face = self.value[location]
next = nextState(self.value, location, self.zero)
diff = tile_distance(self.zero, face) - tile_distance(location, face)
yield Node(next, location, self.value, self.zero,
self.dstdist + diff, self.srcdist + 1)
def __lt__(self, other):
return self.dstdist + Node.opt*self.srcdist < other.dstdist + Node.opt*other.srcdist
def AStar(state: State) -> Tuple[List[State], int]:
"Performs A* search to find shortest path to terminal state"
source = state
zero = source.index(0)
distance = state_distance(state)
DAG = {}
heap = [Node(source, zero, source, zero, distance, 0)]
while heap:
node = heappop(heap)
if node.value not in DAG:
DAG[node.value] = node
path = []
if node.dstdist == 0:
while node.value != node.parent:
path.append(node.value)
node = DAG[node.parent]
return (path[::-1], len(DAG))
for child in node.children():
heappush(heap, child)
raise Exception("Odd Permutation. Impossible to reach destination")
def solve15(state: State, opt: float, verbose: bool) -> None:
"Solves the puzzle"
Node.opt = opt
idx = 0
if verbose:
print(f"Move: {idx}, Distance: {state_distance(state)}")
print(showState(state))
path, iterations = AStar(state)
for p in path:
idx += 1
if verbose:
print(f"Move: {idx}, Distance: {state_distance(p)}")
print(showState(p))
return (len(path), iterations)
def even_random(n: int) -> List[int]:
"Generates a random even permutation"
# A permutation is even iff it can be written as a product of 3-cycles.
L = list(range(1, 16))
for _ in range(n):
a = randrange(15)
b = randrange(15)
c = randrange(15)
if a != b and b != c and a != c:
L[a], L[b], L[c] = L[b], L[c], L[a]
return L
if __name__ == "__main__":
parser = ArgumentParser(description='15-puzzle solver')
parser.add_argument('--perm', '-p', metavar="i", type=int, nargs='+',
help='A permutation of 1..15')
parser.add_argument('--opt', '-o', metavar="n", action='store', type=float, default=50,
help='Optimization percent')
parser.add_argument('--batch', '-b', metavar='n', action='store', type=int, default=0,
help='Batch statistics')
args = parser.parse_args()
if not args.perm:
input = even_random(10000)
else:
input = args.perm
assert sorted(input) == list(range(1, 16)), "Invalid permutation"
if args.batch:
print("moves\tnodes")
for _ in range(args.batch):
input = even_random(10000)
moves, nodes = solve15(tuple(input) + (0, ), min(args.opt/100, 1), False)
print(f"{moves}\t{nodes}")
else:
if not args.perm:
input = even_random(10000)
else:
input = args.perm
assert sorted(input) == list(range(1, 16)), "Invalid permutation"
solve15(tuple(input) + (0, ), min(args.opt/100, 1), True)
| {
"repo_name": "ryanbhayward/games-puzzles-algorithms",
"path": "simple/stile/15star.py",
"copies": "1",
"size": "6039",
"license": "mit",
"hash": 9147156056561495000,
"line_mean": 33.1186440678,
"line_max": 97,
"alpha_frac": 0.5721145885,
"autogenerated": false,
"ratio": 3.5481786133960047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46202932018960047,
"avg_score": null,
"num_lines": null
} |
# 15-Puzzle Solver
# Yoni Elhanani 2019
# RBH: added node count printing
from typing import List, Iterator, Tuple
from argparse import ArgumentParser
from collections import deque
from random import randrange
# A state is a configuration of stones on the board
State = Tuple[int, ...]
def showState(state: State) -> str:
"Prints the state of the board"
divider = "\n|" + "----+" * 3 + "----|\n"
output = divider
for i in range(4):
output += "|"
for j in range(4):
n = state[4*i+j]
output += " " + str(n) if n else " "
if n < 10:
output += " "
output += " |"
output += divider
return output
def nextState(source: State, pos1: int, pos2: int) -> State:
"Next state of the board, after switching 2 stones"
L = list(source)
L[pos1], L[pos2] = L[pos2], L[pos1]
return tuple(L)
def genNeighbors() -> List[List[int]]:
"Neighbors of a particular position"
neighbors: List[List[int]] = [[] for i in range(16)]
for i in range(4):
for j in range(3):
neighbors[4*i+j].append(4*i+j+1)
neighbors[4*i+j+1].append(4*i+j)
neighbors[i+4*j].append(i+4*j+4)
neighbors[i+4*j+4].append(i+4*j)
return neighbors
neighbors = genNeighbors()
class Node:
"Nodes record a state and the path to that state"
def __init__(self, value: State, zero: int, parent: State, move: int) -> None:
self.value = value # The state of the node
self.zero = zero # The location of the empty place
self.parent = parent # The state of the parent node
self.move = move # The stone that moved
def children(self) -> Iterator["Node"]:
"Generates subnodes for the given node"
for location in neighbors[self.zero]:
# Negative values stand for fixed stones
if self.value[location] > 0:
next = nextState(self.value, location, self.zero)
yield Node(next, location, self.value, self.zero)
def BFS(state: State, fixed: List[int], goal: List[int], verbose: bool) -> Tuple[List[int], int]:
"Performs BFS search without moving the fixed stones, until all goal stones are in place"
# Negative values are fixed stones.
# There are stones for which we are indifferent to their location at a particular stage.
# By not distinguishing them, we significantly reduce the state space for this problem.
# For convinience, they are all given the value 16.
source = tuple(-1 if x in fixed else 16 if x not in [0] + goal else x for x in state)
if all(source[(n-1) % 16] == n for n in goal):
return ([], 0)
zero = source.index(0)
DAG = {source: Node(source, zero, source, zero)}
queue = deque(DAG[source].children())
iterations = 0
while queue:
iterations += 1
# if 0 == iterations % 1000: print(iterations, "iterations")
node = queue.pop()
if node.value not in DAG:
DAG[node.value] = node
queue.extendleft(node.children())
if all(node.value[(n-1) % 16] == n for n in goal):
path = [node.zero, node.move]
while node.value != source:
path.append(node.move)
node = DAG[node.parent]
if verbose:
print("nodes searched", iterations)
return (path[-2::-1], iterations)
raise Exception("Odd Permutation. Impossible to reach destination")
def solve15(state: State, stages: List[List[int]], verbose: bool) -> Tuple[int, int]:
"Solves the puzzle in stages"
# At each stage we find the shortest path to reach the next stage.
# Then we apply it to the current state and continue to the next stage from there.
if verbose:
print(showState(state))
zero = state.index(0)
fixed = []
movecount = 0
stagecount = 0
iterations = 0
for goal in stages:
stagecount += 1
if verbose:
print(f"\nStage {stagecount}:\n")
path, subiter = BFS(state, fixed, goal, verbose)
iterations += subiter
for x in path:
if zero != x:
movecount += 1
state = nextState(state, zero, x)
zero = x
if verbose:
print(showState(state))
print(f"Moves: {movecount}")
fixed += goal
if verbose:
print()
print(f"Total nodes searched: {iterations}")
return (movecount, iterations)
def even_random(n: int) -> List[int]:
"Generates a random even permutation"
# A permutation is even iff it can be written as a product of 3-cycles.
L = list(range(1, 16))
for _ in range(n):
a = randrange(15)
b = randrange(15)
c = randrange(15)
if a != b and b != c and a != c:
L[a], L[b], L[c] = L[b], L[c], L[a]
return L
if __name__ == "__main__":
parser = ArgumentParser(description='15-puzzle solver')
parser.add_argument('--perm', '-p', metavar='i', type=int, nargs='+',
help='A permutation of 1..15')
parser.add_argument('--staging', '-s', metavar='n', action='store', type=int, default=1,
help='Staging schedule')
parser.add_argument('--batch', '-b', metavar='n', action='store', type=int, default=0,
help='Batch statistics')
args = parser.parse_args()
optlevels = [[[1, 2], [3, 4], [5, 6], [7, 8], [9, 13], [10, 14], [11, 12, 15]],
[[1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15]],
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15]]]
assert 0 < args.staging <= len(optlevels), "Staging schedule does not exist"
if args.batch:
print("moves\tnodes")
for _ in range(args.batch):
input = even_random(10000)
moves, nodes = solve15(tuple(input) + (0, ), optlevels[args.staging-1], False)
print(f"{moves}\t{nodes}")
else:
if not args.perm:
input = even_random(10000)
else:
input = args.perm
assert sorted(input) == list(range(1, 16)), "Invalid permutation"
solve15(tuple(input) + (0, ), optlevels[args.staging-1], True)
| {
"repo_name": "ryanbhayward/games-puzzles-algorithms",
"path": "simple/stile/15puzzle.py",
"copies": "1",
"size": "6363",
"license": "mit",
"hash": -4683582141403558000,
"line_mean": 36.4294117647,
"line_max": 97,
"alpha_frac": 0.5599559956,
"autogenerated": false,
"ratio": 3.629777524244153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46897335198441525,
"avg_score": null,
"num_lines": null
} |
"""15th Night Flask App."""
from email_client import send_email, verify_email
from flask import (
Flask, render_template, redirect, url_for, request, session, flash
)
from flask.ext.login import (
login_user, current_user, login_required, LoginManager
)
from twilio_client import send_sms
from werkzeug.exceptions import HTTPException
from app import database
from app.database import db_session
from app.forms import RegisterForm, LoginForm, AlertForm, ResponseForm, DeleteUserForm
from app.models import User, Alert, Response
from app.email_client import send_email
try:
from config import HOST_NAME
except:
from configdist import HOST_NAME
flaskapp = Flask(__name__)
try:
flaskapp.config.from_object('config')
except:
flaskapp.config.from_object('configdist')
flaskapp.secret_key = flaskapp.config['SECRET_KEY']
login_manager = LoginManager()
login_manager.init_app(flaskapp)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(id):
"""User loading needed by Flask-Login."""
return User.query.get(int(id))
@flaskapp.teardown_appcontext
def shutdown_session(response):
"""Database management."""
database.db_session.remove()
@flaskapp.errorhandler(404)
@flaskapp.errorhandler(Exception)
def error_page(error):
"""Generic Error handling."""
code = 500
if isinstance(error, HTTPException):
code = error.code
print(error)
return render_template("error.html", error_code=code), code
@flaskapp.route('/')
def index():
"""Handle routing to the dashboard if logged in or the login page."""
if current_user.is_authenticated:
return redirect(url_for('dashboard'))
return render_template('home.html')
@flaskapp.route('/login', methods=['GET', 'POST'])
def login():
"""Route for handling the login page logic."""
if current_user.is_authenticated:
return redirect(url_for('dashboard'))
# creates instance of form
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.get_by_email(request.form['email'].lower())
passwd = request.form.get("password")
if user is not None and user.check_password(passwd):
# session cookie in browser
session['logged_in'] = True
login_user(user)
flash('Logged in successfully.', 'success')
return redirect(request.args.get('next') or url_for('dashboard'))
else:
flash('Invalid Credentials. Please try again.', 'danger')
return render_template('login.html', form=form)
@flaskapp.route('/dashboard', methods=['GET', 'POST'])
@login_required
def dashboard():
"""Dashboard."""
if current_user.role == 'admin':
# Admin user, show register form
form = RegisterForm()
form_error = False
deleted_user = session.pop('deleted_user', False)
if request.method == 'POST' and form.validate_on_submit():
user = User(
email=form.email.data,
password=form.password.data,
phone_number=form.phone_number.data,
other=form.other.data,
shelter=form.shelter.data,
food=form.food.data,
clothes=form.clothes.data,
role=form.role.data
)
user.save()
verify_email(user.email)
flash('User registered succesfully', 'success')
return redirect(url_for('dashboard'))
elif request.method == 'POST' and not form.validate_on_submit():
form_error = True
return render_template('dashboard/admin.html',
form=form,
form_error=form_error,
users=User.get_users(),
alerts=Alert.get_alerts(),
delete_user_form=DeleteUserForm(),
deleted_user=deleted_user)
elif current_user.role == 'advocate':
# Advocate user, show alert form
form = AlertForm()
if request.method == 'POST' and form.validate_on_submit():
alert = Alert(
description=form.description.data,
other=form.other.data,
shelter=form.shelter.data,
food=form.food.data,
clothes=form.clothes.data,
gender=form.gender.data,
age=form.age.data,
user=current_user
)
alert.save()
users_to_notify = User.get_provider(alert.food, alert.clothes, alert.shelter, alert.other)
for user in users_to_notify:
print("found user to notify {}".format(user))
body = "There is a new 15th night alert. Go to " + \
HOST_NAME + \
"/respond_to/" + \
str(alert.id) + " to respond."
send_sms(to_number=user.phone_number, body=body)
send_email(user.email, '15th Night Alert', body)
flash('Alert sent successfully', 'success')
return redirect(url_for('dashboard'))
return render_template('dashboard/advocate.html', form=form)
else:
# Provider user, show alerts
return render_template(
'dashboard/provider.html',
user=current_user,
alerts=Alert.get_active_alerts_for_provider(current_user)
)
@flaskapp.route('/delete_user', methods=['POST'])
@login_required
def delete_user():
if current_user.role != 'admin':
flash('Access denied', 'danger')
return redirect(url_for('dashboard'))
form = DeleteUserForm()
if form.validate_on_submit():
user = User.get(form.id.data)
user.delete()
flash('User Deleted Successfully', 'success')
else:
flash('Failed to delete user', 'danger')
session['deleted_user'] = True
return redirect(url_for('dashboard'))
@flaskapp.route("/logout")
@login_required
def logout():
"""User logout."""
session.clear()
flash('You have been logged out!', 'success')
return redirect(url_for('index'))
@flaskapp.route('/health')
def healthcheck():
"""Low overhead health check."""
return 'ok', 200
@flaskapp.route('/about')
def about():
"""Simple about page route."""
return render_template('about.html')
@flaskapp.route('/contact', methods=['GET', 'POST'])
def contact():
if request.method == 'POST':
flash('you tried to make a post')
name = request.form['name']
email = request.form['email']
message = request.form['message']
send_email(to=email, subject="Contact Form", body=message)
return redirect(url_for('login'))
return render_template('contact.html')
@flaskapp.route('/respond_to/<int:alert_id>', methods=['GET','POST'])
@login_required
def response_submitted(alert_id):
"""
Action performed when a response is provided.
Text the creator of the alert:
- email, phone, and things able to help with of the responding user.
"""
if request.method == 'POST':
submitted_message = request.form['message']
responding_user = current_user
try:
alert = Alert.query.get(int(alert_id))
except Exception as e:
return 'Error {}'.format(e), 404
user_to_message = alert.user
response_message = "%s" % responding_user.email
if responding_user.phone_number:
response_message += ", %s" % responding_user.phone_number
response_message += " is availble for: "
availble = {
"shelter": responding_user.shelter,
"clothes": responding_user.clothes,
"food": responding_user.food,
"other": responding_user.other,
}
response_message += "%s" % ", ".join(k for k, v in availble.items() if v)
response_message += " Message: " + submitted_message
if user_to_message.phone_number:
send_sms(
user_to_message.phone_number,
response_message
)
send_email(
to=user_to_message.email,
subject="Alert Response",
body=response_message,
)
Response(user=current_user, alert=alert, message=submitted_message).save()
flash('Your response has been sent to the advocate, thank you!', 'success')
return redirect(url_for('dashboard'))
else:
try:
alert = Alert.query.get(int(alert_id))
except Exception as e:
return 'Error {}'.format(e), 404
return render_template('respond_to.html', alert=alert, user=current_user, form=ResponseForm())
if __name__ == '__main__':
flaskapp.run(debug=True)
| {
"repo_name": "Hack4Eugene/Hack4Cause2016",
"path": "src/bobby-drop-tables/15thnight/app/__init__.py",
"copies": "1",
"size": "8815",
"license": "mit",
"hash": -798903816248380500,
"line_mean": 31.8917910448,
"line_max": 102,
"alpha_frac": 0.600226886,
"autogenerated": false,
"ratio": 3.992300724637681,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5092527610637682,
"avg_score": null,
"num_lines": null
} |
# 1601. Maximum Number of Achievable Transfer Requests
# Time: 4572 ms
class Solution:
def maximumRequests(self, n: int, requests: List[List[int]]) -> int:
lenReq = len(requests)
def isCircle(subset):
inDegree = collections.Counter()
outDegree = collections.Counter()
for i in range(lenReq):
if subset & (1 << i):
x, y = requests[i]
inDegree[y] += 1
outDegree[x] += 1
if len(inDegree) != len(outDegree):
return False
for x in inDegree:
if inDegree[x] != outDegree[x]:
return False
return True
# The number of elements when they form a circle.
circleSize = [0] * (2 ** lenReq)
for subset in range(1, len(circleSize)):
if isCircle(subset):
circleSize[subset] = bin(subset).count("1")
@cache
def solve(mask):
subset = mask
ans = 0
while subset > 0:
if circleSize[subset] > 0:
ans = max(ans, circleSize[subset] + solve(mask ^ subset))
subset = mask & (subset - 1)
return ans
return solve(2 ** lenReq - 1)
| {
"repo_name": "digiter/Arena",
"path": "1601-maximum-number-of-achievable-transfer-requests.py",
"copies": "1",
"size": "1302",
"license": "mit",
"hash": 6513162663974796000,
"line_mean": 32.3846153846,
"line_max": 77,
"alpha_frac": 0.4869431644,
"autogenerated": false,
"ratio": 4.213592233009709,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5200535397409709,
"avg_score": null,
"num_lines": null
} |
# 1601. Maximum Number of Achievable Transfer Requests
# Time: O(2^len(requests)), 640 ms
class Solution:
def maximumRequests(self, n: int, requests: List[List[int]]) -> int:
stay = sum(1 if x == y else 0 for x, y in requests)
requests = [(x, y) for x, y in requests if x != y]
lenReq = len(requests)
balance = [0] * n
def choose(i, usedCnt, zeroCnt):
if i == lenReq:
return usedCnt if zeroCnt == n else 0
# Do not use request[i].
ans1 = choose(i + 1, usedCnt, zeroCnt)
# Or use request[i].
x, y = requests[i]
if balance[x] == 0:
zeroCnt -= 1
balance[x] -= 1
if balance[x] == 0:
zeroCnt += 1
if balance[y] == 0:
zeroCnt -= 1
balance[y] += 1
if balance[y] == 0:
zeroCnt += 1
ans2 = choose(i + 1, usedCnt + 1, zeroCnt)
# Reverts changes.
balance[x] += 1
balance[y] -= 1
return max(ans1, ans2)
return stay + choose(0, 0, n)
| {
"repo_name": "digiter/Arena",
"path": "1601-maximum-number-of-achievable-transfer-requests_3.py",
"copies": "1",
"size": "1205",
"license": "mit",
"hash": 403998231328250750,
"line_mean": 27.6904761905,
"line_max": 72,
"alpha_frac": 0.4406639004,
"autogenerated": false,
"ratio": 3.8621794871794872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48028433875794874,
"avg_score": null,
"num_lines": null
} |
# 16/02/2018
import sys
def ffd(s, b):
"""First Fit Decreasing algorithm
1.Sort the items to be inserted in decreasing order by size
2.Go through the items in sorted order
1.Try to place the item in the first bin that will accommodate it
2.If no bin is found, start a new bin
"""
#shelf -> [id, capacity, available, book list]
#book -> (title, width)
s = sorted(s, key=lambda x: x[1], reverse=True)
b = sorted(b, key=lambda x: x[1], reverse=True)
for book in b:
for shelf in s:
if shelf[2] > book[1]:
shelf[3].append(book[0])
shelf[2] -= book[1]
break
elif shelf[1] == shelf[2] and not shelf[2] > book[1]:
return None
return list(filter(lambda x: len(x[3]) > 0, s))
if __name__ == "__main__":
shelves = [[n, int(capacity), int(capacity), []] for n, capacity in enumerate(sys.stdin.readline().split())]
books = []
for line in sys.stdin.readlines():
w, t = line.split(maxsplit=1)
books.append((t.strip(), int(w)))
solution = ffd(shelves, books)
if solution is None:
print("Impossible")
else:
print(len(solution), " shelves used")
print('\n'.join("{}: {}".format(s[0], s[3]) for s in solution))
| {
"repo_name": "tlseabra/dailyprogrammer",
"path": "Python/easy/e350.py",
"copies": "2",
"size": "1313",
"license": "mit",
"hash": -6985693161280268000,
"line_mean": 33.5526315789,
"line_max": 112,
"alpha_frac": 0.5582635187,
"autogenerated": false,
"ratio": 3.298994974874372,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.983533166949756,
"avg_score": 0.004385364815362425,
"num_lines": 38
} |
# 160. Intersection of Two Linked Lists - LeetCode
# https://leetcode.com/problems/intersection-of-two-linked-lists/description/
# Your code should preferably run in O(n) time and use only O(1) memory.
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
len_a = 0
len_b = 0
pa = headA
pb = headB
while True:
if pa is None:
break
pa = pa.next
len_a += 1
while True:
if pb is None:
break
pb = pb.next
len_b += 1
pa = headA
pb = headB
delta = len_a - len_b
while delta != 0:
if delta > 0: # len_a > len_b
pa = pa.next
delta -= 1
else:
pb = pb.next
delta += 1
while pa:
if pa == pb:
return pa
else:
pa = pa.next
pb = pb.next
return None
s = Solution()
c1 = ListNode("c1")
c1.next = ListNode("c2")
c1.next.next = ListNode("c3")
a1 = ListNode("a1")
a1.next = ListNode("a2")
a1.next.next = c1
b1 = None
print s.getIntersectionNode(a1,b1) # None
b1 = ListNode("b1")
print s.getIntersectionNode(a1,b1) # None
b2 = ListNode("b2")
b1.next = b2
b2.next = c1
print s.getIntersectionNode(a1,b1) # c1 | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/160_intersection-of-two-linked-lists.py",
"copies": "1",
"size": "1591",
"license": "mit",
"hash": 8724546032823632000,
"line_mean": 23.4923076923,
"line_max": 77,
"alpha_frac": 0.5028284098,
"autogenerated": false,
"ratio": 3.3636363636363638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9343056771182208,
"avg_score": 0.00468160045083122,
"num_lines": 65
} |
# 160. Intersection of Two Linked Lists
#
# Write a program to find the node at which the intersection
# of two singly linked lists begins.
#
#
# For example, the following two linked lists:
#
# A: a1 - a2
# \
# c1 - c2 - c3
# /
# B: b1 - b2 - b3
# begin to intersect at node c1.
#
# Notes:
#
# If the two linked lists have no intersection at all, return null.
# The linked lists must retain their original structure after the function returns.
# You may assume there are no cycles anywhere in the entire linked structure.
# Your code should preferably run in O(n) time and use only O(1) memory.
#
# Credits:
# Special thanks to @stellari for adding this problem and creating all test cases.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, repr(self.next))
class Solution:
def getIntersectionNode(self, headA, headB):
a = self._len(headA)
b = self._len(headB)
# let the longer linked list step |a-b| times first
if a > b:
for i in range(a - b):
headA = headA.next
else:
for i in range(b - a):
headB = headB.next
# step at the same time on both list
# the first identical node is their first common node
while headA and headB:
# This is the address of the object in memory.
# seems not working
# if id(headA) == id(headB):
# to submit to leetcode, remove .val below
if headA.val == headB.val:
return headA.val
headA = headA.next
headB = headB.next
return None
# step both lists to get their length
def _len(self, head):
len = 0
while head:
len += 1
head = head.next
return len
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
p = headA
q = headB
# get length of both lists
lengthA = 0
lengthB = 0
while p:
p = p.next
lengthA += 1
while q:
q = q.next
lengthB += 1
# move the longer one diff steps first
p = headA
q = headB
if lengthA > lengthB:
for _ in range(lengthA - lengthB):
p = p.next
else:
for _ in range(lengthB - lengthA):
q = q.next
# move together until equals
while p and q:
if p == q:
return p
p = p.next
q = q.next
if __name__ == "__main__":
headA = ListNode(1)
headA.next = ListNode(2)
headA.next.next = ListNode(3)
headA.next.next.next = ListNode(6)
headA.next.next.next.next = ListNode(7)
headB = ListNode(4)
headB.next = ListNode(5)
headB.next.next = ListNode(6)
headB.next.next.next = ListNode(7)
print headA
print headB
print Solution().getIntersectionNode(headA, headB)
| {
"repo_name": "gengwg/leetcode",
"path": "160_intersection_two_linked_lists.py",
"copies": "1",
"size": "3199",
"license": "apache-2.0",
"hash": -8922265284723552000,
"line_mean": 25.0081300813,
"line_max": 83,
"alpha_frac": 0.5323538606,
"autogenerated": false,
"ratio": 3.7591069330199764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47914607936199766,
"avg_score": null,
"num_lines": null
} |
# 163 Missing Ranges
# Given a sorted integer array where the range of elements are [lower, upper] inclusive,
# return its missing ranges.
#
# For example, given [0, 1, 3, 50, 75], lower = 0 and upper = 99,
# return ["2", "4->49", "51->74", "76->99"].
#
class Solution(object):
def findMissingRanges(self, nums, lower, upper):
"""
:type nums: List[int]
:type lower: int
:type upper: int
:rtype: List[str]
"""
# helper function
def rangify(lo, hi):
if lo == hi:
return '{}'.format(lo)
else:
return '{}->{}'.format(lo, hi)
res = []
start = lower
for num in nums:
# if num exists increament start
if num == start:
start += 1
# if num > start, missing numbers: start->num-1
elif num > start:
res.append(rangify(start, num-1))
start = num + 1
# append last range
if start <= upper:
res.append(rangify(start, upper))
return res
if __name__ == "__main__":
print (Solution().findMissingRanges([0, 1, 3, 50, 75], 0, 99)) | {
"repo_name": "gengwg/leetcode",
"path": "163_missing_range.py",
"copies": "1",
"size": "1202",
"license": "apache-2.0",
"hash": -5720606794152456000,
"line_mean": 26.976744186,
"line_max": 88,
"alpha_frac": 0.496672213,
"autogenerated": false,
"ratio": 3.840255591054313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4836927804054313,
"avg_score": null,
"num_lines": null
} |
# 1655. Distribute Repeating Integers
# Enumerates a subset of quantity to fill the ith freq.
# Dynamic programming, number of states: len(freq) * 2^len(quantity).
# Time complexity: O(3^len(quantity)), see https://cp-algorithms.com/algebra/all-submasks.html
class Solution:
def canDistribute(self, nums: List[int], quantity: List[int]) -> bool:
supply = collections.Counter(nums)
freq = list(supply.values())
N = len(quantity)
sumQuantity = [0] * (2 ** N)
for subset in range(1, 2 ** N):
sumQuantity[subset] = sum(
quantity[i] if subset & (1 << i) else 0 for i in range(N)
)
@cache
def solve(i, mask):
if i == len(freq):
return mask == 0
subset = mask
while subset > 0:
if sumQuantity[subset] <= freq[i]:
if solve(i + 1, mask ^ subset):
return True
subset = mask & (subset - 1)
if solve(i + 1, mask ^ 0):
return True
return False
return solve(0, 2 ** N - 1)
| {
"repo_name": "digiter/Arena",
"path": "1655-distribute-repeating-integers.py",
"copies": "1",
"size": "1140",
"license": "mit",
"hash": -6979880613950458000,
"line_mean": 33.5454545455,
"line_max": 94,
"alpha_frac": 0.5175438596,
"autogenerated": false,
"ratio": 3.877551020408163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9893499983676424,
"avg_score": 0.0003189792663476874,
"num_lines": 33
} |
# 1659. Maximize Grid Happiness
#
# Time complexity of this dynamic programming:
# Number of states: 5^2 * 6 * 6 * 3^5
# Number of transitions: 3 * 5
# So 3*10^6 in total.
#
# It takes 228 ms to calculate the input of (5, 5, 5, 5).
class Solution:
def getMaxGridHappiness(
self, m: int, n: int, introvertsCount: int, extrovertsCount: int
) -> int:
EMPTY, IN, EX = 0, 1, 2
def connect(x, y):
delta = 0
if x == IN and y != EMPTY:
delta += -30
if x == EX and y != EMPTY:
delta += 20
if y == IN and x != EMPTY:
delta += -30
if y == EX and x != EMPTY:
delta += 20
return delta
@cache
def solve(index, inCnt, exCnt, prevN):
if index == m * n:
return 0
if inCnt == 0 and exCnt == 0:
return 0
ansEmpty = solve(index + 1, inCnt, exCnt, prevN[1:] + (EMPTY,))
ansIn = 0
if inCnt > 0:
ansIn += 120
# Updates the above grid.
ansIn += connect(prevN[0], IN)
# Updates the left grid.
if 1 <= index % n:
ansIn += connect(prevN[-1], IN)
ansIn += solve(index + 1, inCnt - 1, exCnt, prevN[1:] + (IN,))
ansEx = 0
if exCnt > 0:
ansEx += 40
# Updates the above grid.
ansEx += connect(prevN[0], EX)
# Updates the current grid.
if 1 <= index % n:
ansEx += connect(prevN[-1], EX)
ansEx += solve(index + 1, inCnt, exCnt - 1, prevN[1:] + (EX,))
return max(ansEmpty, ansIn, ansEx)
return solve(0, introvertsCount, extrovertsCount, tuple([EMPTY] * n))
| {
"repo_name": "digiter/Arena",
"path": "1659-maximize-grid-happiness_2.py",
"copies": "1",
"size": "1887",
"license": "mit",
"hash": -7347570347155684000,
"line_mean": 30.45,
"line_max": 78,
"alpha_frac": 0.4483306836,
"autogenerated": false,
"ratio": 3.774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.97223306836,
"avg_score": 0,
"num_lines": 60
} |
# 1659. Maximize Grid Happiness
# This solution got TLE for input (5, 5, 5, 5): it takes 3380 ms to run.
# Time complexity (roughtly):
# Number of states: 5 * 6 * 6 * 3^5
# Number of transitions: 3^5 * 5
# So 5*10^7 in total.
class Solution:
def getMaxGridHappiness(
self, m: int, n: int, introvertsCount: int, extrovertsCount: int
) -> int:
EMPTY, IN, EX = 0, 1, 2
def generateRow(inCnt, exCnt, row, colIndex):
if colIndex == n:
yield row
return
row[colIndex] = EMPTY
for r in generateRow(inCnt, exCnt, row, colIndex + 1):
yield r
if inCnt > 0:
row[colIndex] = IN
for r in generateRow(inCnt - 1, exCnt, row, colIndex + 1):
yield r
if exCnt > 0:
row[colIndex] = EX
for r in generateRow(inCnt, exCnt - 1, row, colIndex + 1):
yield r
@cache
def solve(rowIndex, inCnt, exCnt, prevRow):
if rowIndex == m:
return 0
if inCnt == 0 and exCnt == 0:
return 0
ans = 0
for row in generateRow(inCnt, exCnt, [-1] * n, 0):
score = 0
nextInCnt, nextExCnt = inCnt, exCnt
# Accounts score update for the previous row.
for i in range(n):
if prevRow[i] == IN:
if row[i] != EMPTY:
score -= 30
elif prevRow[i] == EX:
if row[i] != EMPTY:
score += 20
# Accounts score update for the current row.
for i in range(n):
if row[i] == IN:
score += 120
if prevRow[i] != EMPTY:
score -= 30
if 0 <= i - 1 and row[i - 1] != EMPTY:
score -= 30
if i + 1 < n and row[i + 1] != EMPTY:
score -= 30
nextInCnt -= 1
elif row[i] == EX:
score += 40
if prevRow[i] != EMPTY:
score += 20
if 0 <= i - 1 and row[i - 1] != EMPTY:
score += 20
if i + 1 < n and row[i + 1] != EMPTY:
score += 20
nextExCnt -= 1
score += solve(rowIndex + 1, nextInCnt, nextExCnt, tuple(row))
ans = max(ans, score)
return ans
return solve(0, introvertsCount, extrovertsCount, tuple([EMPTY] * n))
| {
"repo_name": "digiter/Arena",
"path": "1659-maximize-grid-happiness_TLE.py",
"copies": "1",
"size": "2814",
"license": "mit",
"hash": -646941700635765400,
"line_mean": 36.0263157895,
"line_max": 78,
"alpha_frac": 0.4004975124,
"autogenerated": false,
"ratio": 4.289634146341464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5190131658741464,
"avg_score": null,
"num_lines": null
} |
# 165. Compare Version Numbers
#
# Compare two version numbers version1 and version2.
# If version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.
#
# You may assume that the version strings are non-empty and contain only digits and the . character.
# The . character does not represent a decimal point and is used to separate number sequences.
#
# For instance, 2.5 is not "two and a half" or "half way to version three",
# it is the fifth second-level revision of the second first-level revision.
#
# Here is an example of version numbers ordering:
#
# 0.1 < 1.1 < 1.2 < 13.37
#
# Credits:
# Special thanks to @ts for adding this problem and creating all test cases.
class Solution(object):
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
nums1 = list(map(int, version1.split(".")))
nums2 = list(map(int, version2.split(".")))
len1 = len(nums1)
len2 = len(nums2)
if len1 > len2:
nums2 += [0] * (len1 - len2)
else:
nums1 += [0] * (len2 - len1)
for i in map(lambda x, y: x - y, nums1, nums2):
if i > 0:
return 1
elif i < 0:
return -1
return 0
# http://bookshadow.com/weblog/2014/12/17/leetcode-compare-version-numbers/
# w/o map
def compareVersion(self, version1, version2):
v1Arr = version1.split(".")
v2Arr = version2.split(".")
len1 = len(v1Arr)
len2 = len(v2Arr)
for i in range(max(len1, len2)):
v1Token = 0
v2Token = 0
if i < len1:
v1Token = int(v1Arr[i])
if i < len2:
v2Token = int(v2Arr[i])
if v1Token > v2Token:
return 1
if v1Token < v2Token:
return -1
return 0
if __name__ == '__main__':
print Solution().compareVersion("1.0", "1")
print Solution().compareVersion("01", "1") # >>> int('01') == 1
vers = ['0.1', '1.2', '1.1', '0.1', '13.37']
print sorted(vers, cmp=Solution().compareVersion)
| {
"repo_name": "gengwg/leetcode",
"path": "165_compare_version_numbers.py",
"copies": "1",
"size": "2196",
"license": "apache-2.0",
"hash": -4598336444710715000,
"line_mean": 29.9295774648,
"line_max": 100,
"alpha_frac": 0.5564663024,
"autogenerated": false,
"ratio": 3.425897035881435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9409710995961389,
"avg_score": 0.014530468464009344,
"num_lines": 71
} |
# 16695334890
import math
import euler
s = 0
for a in xrange(1, 999/17):
aa = euler.digit_usage(a * 17, 3)
for b in xrange(1, 999/13):
if (a * 17) / 10 != (b * 13) % 100:
continue
for c in xrange(1, 999/11):
if (b * 13) / 10 != (c * 11) % 100:
continue
for d in xrange(1, 999/7):
if (c * 11) / 10 != (d * 7) % 100:
continue
dd = euler.digit_usage(d * 7, 3)
if aa & dd != 0:
continue
for e in xrange(1, 999/5):
if (d * 7) / 10 != (e * 5) % 100:
continue
for f in xrange(1, 999/3):
if (e * 5) / 10 != (f * 3) % 100:
continue
for g in xrange(999/2):
if (f * 3) / 10 != (g * 2) % 100:
continue
gg = euler.digit_usage(g * 2, 3)
if aa & gg != 0 or dd & gg != 0:
continue
nn = (aa | dd | gg) ^ int('1111111111', 2)
h = math.log(nn, 2)
if h != int(h):
continue
n = int('%d%03d%03d%03d' % (h, g * 2, d * 7, a * 17))
s += n
print s
| {
"repo_name": "higgsd/euler",
"path": "py/43.py",
"copies": "1",
"size": "1463",
"license": "bsd-2-clause",
"hash": 1447385772348309800,
"line_mean": 37.5,
"line_max": 81,
"alpha_frac": 0.3075871497,
"autogenerated": false,
"ratio": 3.8806366047745358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46882237544745353,
"avg_score": null,
"num_lines": null
} |
# 167. Two Sum II - Input array is sorted
#
# Given an array of integers that is already sorted in ascending order,
# find two numbers such that they add up to a specific target number.
#
# The function twoSum should return indices of the two numbers such that
# they add up to the target, where index1 must be less than index2.
# Please note that your returned answers (both index1 and index2) are not zero-based.
#
# You may assume that each input would have exactly one solution and you may not use the same element twice.
#
# Input: numbers={2, 7, 11, 15}, target=9
# Output: index1=1, index2=2
class Solution:
# binary search
def twoSum(self, nums, target):
start, end = 0, len(nums) - 1
while start != end:
sum = nums[start] + nums[end]
if sum > target:
end -= 1
elif sum < target:
start += 1
else:
# index is 1-based, not 0-based.
# return [start, end]
return list(map(lambda x: x + 1, [start, end]))
return []
# hash map
def twoSum(self, numbers, target):
lookup = {}
for i, num in enumerate(numbers):
if target - num in lookup:
# return [lookup[target - num] + 1, i + 1]
return list(map(lambda x: x + 1, [lookup[target - num], i]))
lookup[num] = i
return []
if __name__ == "__main__":
print Solution().twoSum([2, 7, 11, 15], 9)
| {
"repo_name": "gengwg/leetcode",
"path": "167_two_sum_input_array_sorted.py",
"copies": "1",
"size": "1494",
"license": "apache-2.0",
"hash": -7301708719713219000,
"line_mean": 32.9545454545,
"line_max": 108,
"alpha_frac": 0.5702811245,
"autogenerated": false,
"ratio": 3.830769230769231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9898686466757487,
"avg_score": 0.0004727777023488566,
"num_lines": 44
} |
# 1681. Minimum Incompatibility
# O(C(N, SIZE-1)*N + C(N-SIZE, SIZE-1)*(N-SIZE) + C(N-SIZE*2, SIZE-1)*(N-SIZE*2) + ...)
# About O(10^5)
class Solution:
def minimumIncompatibility(self, nums: List[int], k: int) -> int:
nums.sort()
N = len(nums)
SIZE = N // k
INF = nums[-1] * N + 5
if SIZE == 1:
return 0
@cache
def dp(fromNums):
if len(fromNums) == SIZE:
if len(set(fromNums)) == SIZE:
return fromNums[-1] - fromNums[0]
else:
return INF
ans = INF
tail = fromNums[1:]
candidates = set(tail)
candidates.discard(fromNums[0])
for chosen in combinations(sorted(candidates), SIZE - 1):
toNums = []
i = 0
for x in tail:
if i < SIZE - 1 and x == chosen[i]:
i += 1
continue
toNums.append(x)
ans = min(ans, dp(tuple(toNums)) + chosen[-1] - fromNums[0])
return ans
ans = dp(tuple(nums))
return -1 if ans == INF else ans
| {
"repo_name": "digiter/Arena",
"path": "1681-minimum-incompatibility.py",
"copies": "1",
"size": "1210",
"license": "mit",
"hash": -8493537203627276000,
"line_mean": 30.0256410256,
"line_max": 87,
"alpha_frac": 0.4338842975,
"autogenerated": false,
"ratio": 3.723076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9655504344120047,
"avg_score": 0.0002913752913752914,
"num_lines": 39
} |
# 1687. Delivering Boxes from Storage to Ports
# Inspired from https://leetcode.com/problems/delivering-boxes-from-storage-to-ports/discuss/969560/Python-O(n).-DP-%2B-Monotonic-Queue-(Sliding-Window)-with-Full-Explanation
class Solution:
def boxDelivering(
self, boxes: List[List[int]], portsCount: int, maxBoxes: int, maxWeight: int
) -> int:
N = len(boxes)
ports = [b[0] for b in boxes]
weights = [b[1] for b in boxes]
# Ignores limitations and ships boxes [0, i] together.
together = [0] * N
together[0] = 2
for y in range(1, N):
together[y] = together[y - 1] + (0 if ports[y] == ports[y - 1] else 1)
# The extra cost when splitting between y and y+1, so that we will
# travel from ports[y] to the storage then to ports[y+1].
splitCost = lambda y: 2 - (0 if y + 1 < N and ports[y] == ports[y + 1] else 1)
prefixWeight = [0] * N
prefixWeight[0] = weights[0]
for y in range(1, N):
prefixWeight[y] = prefixWeight[y - 1] + weights[y]
# Adds a dummy value to make prefixWeight[-1] useful.
prefixWeight.append(0)
# Splits the last portion to satisfy the constraints, which adds extra costs.
extra = [0] * (N + 1)
heap = []
heappush(heap, (extra[-1], -1))
for y in range(0, N):
while True:
prevCost, prevSplit = heap[0]
if (y - prevSplit > maxBoxes) or (
prefixWeight[y] - prefixWeight[prevSplit] > maxWeight
):
heappop(heap)
else:
break
extra[y] = prevCost
heappush(heap, (extra[y] + splitCost(y), y))
return together[N - 1] + extra[N - 1]
| {
"repo_name": "digiter/Arena",
"path": "1687-delivering-boxes-from-storage-to-ports.py",
"copies": "1",
"size": "1812",
"license": "mit",
"hash": 8724266326574729000,
"line_mean": 38.3913043478,
"line_max": 174,
"alpha_frac": 0.5469094923,
"autogenerated": false,
"ratio": 3.425330812854442,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.946526817503579,
"avg_score": 0.001394426023730296,
"num_lines": 46
} |
__author__ = 'Libao Jin'
__date__ = 'December 14, 2015'
class Solution(object):
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
alphabets = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
remainders = []
r = n % 26
if r == 0:
r = 26
n -= 1
n //= 26
remainders.append(r)
while n != 0:
r = n % 26
if r == 0:
r = 26
n -= 1
n //= 26
remainders.append(r)
remainders.reverse()
title = []
for i in remainders:
title.append(alphabets[i-1])
# title.reverse()
print(remainders, title)
return ''.join(title)
if __name__ == '__main__':
s = Solution()
print(s.convertToTitle(1))
print(s.convertToTitle(25))
print(s.convertToTitle(26))
print(s.convertToTitle(27))
print(s.convertToTitle(52))
print(s.convertToTitle(53))
print(s.convertToTitle(1048))
print(s.convertToTitle(104800))
| {
"repo_name": "jinlibao/LeetCode-Solutions",
"path": "solutions/168_Excel_Sheet_Column_TItle.py",
"copies": "2",
"size": "1099",
"license": "mit",
"hash": 675759214301460400,
"line_mean": 21.4285714286,
"line_max": 48,
"alpha_frac": 0.4895359418,
"autogenerated": false,
"ratio": 3.6633333333333336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5152869275133334,
"avg_score": null,
"num_lines": null
} |
# 1697. Checking Existence of Edge Length Limited Paths
# Should be medium difficulty.
class Solution:
def distanceLimitedPathsExist(
self, n: int, edgeList: List[List[int]], queries: List[List[int]]
) -> List[bool]:
# Adds index to each query.
for i in range(len(queries)):
queries[i].append(i)
queries.sort(key=lambda q: q[2]) # Sorts by limit.
edgeList.sort(key=lambda e: e[2]) # Sorts by distance.
# Creates disjoint sets for nodes.
father = list(range(n))
def findRoot(x):
father[x] = x if x == father[x] else findRoot(father[x])
return father[x]
def merge(x, y):
x = findRoot(x)
y = findRoot(y)
if randint(0, 1) == 0:
father[x] = y
else:
father[y] = x
# Processes queries.
ans = [False] * len(queries)
e = 0
for (p, q, limit, index) in queries:
while e < len(edgeList) and edgeList[e][2] < limit:
merge(edgeList[e][0], edgeList[e][1])
e += 1
pRoot = findRoot(p)
qRoot = findRoot(q)
if pRoot == qRoot:
ans[index] = True
return ans
| {
"repo_name": "digiter/Arena",
"path": "1697-checking-existence-of-edge-length-limited-paths.py",
"copies": "1",
"size": "1276",
"license": "mit",
"hash": -6153467610366099000,
"line_mean": 30.1219512195,
"line_max": 73,
"alpha_frac": 0.5039184953,
"autogenerated": false,
"ratio": 3.5444444444444443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9548362939744445,
"avg_score": 0,
"num_lines": 41
} |
#16 - cake thief.py
import pdb
# weighs 7 kilograms and has a value of 160 pounds
(7, 160)
# weighs 3 kilograms and has a value of 90 pounds
(3, 90)
cake_tuples = [(7, 160), (3, 90), (2, 15)]
capacity = 20
def max_duffel_bag_value(cake_tuples, capacity):
# pdb.set_trace()
# rank by value/weight
cake_tuples = sorted(cake_tuples, key = lambda x:x[1]/x[0], reverse = True)
# Cake Index
i = 0
duffel_bag_value = 0
# Fill the duffel bag
while capacity > 0 and i <= len(cake_tuples)-1:
cakes_added = int(capacity / cake_tuples[i][0])
value_added = cakes_added * cake_tuples[i][1]
capacity -= cakes_added * cake_tuples[i][0]
duffel_bag_value += value_added
#print 'Added %s cakes, totaling %s' % (cakes_added, value_added)
# Move to next most valuable cake size
i += 1
print 'Stole %s GBP of cake! For the Queen!' % (duffel_bag_value)
return(duffel_bag_value)
print(max_duffel_bag_value(cake_tuples, capacity))
# returns 555 (6 of the middle type of cake and 1 of the last type of cake)
# Returns 0 if there is 0 capacity
print(max_duffel_bag_value(cake_tuples, 0))
print(max_duffel_bag_value([(5,5)], 4))
| {
"repo_name": "bruno615/one-off-analysis",
"path": "Python/Inteview Cake/16 - cake thief.py",
"copies": "1",
"size": "1164",
"license": "mit",
"hash": 2834756345059349500,
"line_mean": 25.4545454545,
"line_max": 77,
"alpha_frac": 0.6589347079,
"autogenerated": false,
"ratio": 2.5866666666666664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37456013745666666,
"avg_score": null,
"num_lines": null
} |
''' 16-compute-ephemerids.py
=========================
AIM: Computes the ephermerids for all the cells that constitute the sky grids.
The cell must be visible for at least (period)-(max_interruptions)+t_acquisition
To be used by the three next scripts (17, 18, 19) to treat and plot.
INPUT: files: - <orbit_id>_misc/orbits.dat
- <orbit_id>_flux/flux_*.dat
- resources/saa_*.dat
variables: see section PARAMETERS (below)
OUTPUT: - <orbit_id>_misc/ephemerids_inter_<max_interruptions>_mag_<mag_max><_SAA?>.npz
CMD: python 16-compute-ephemerids.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/maps/ --> figures
* <orbit_id>_misc/ --> storages of data
REMARKS: Not with real catalogue.
'''
###########################################################################
### INCLUDES
import numpy as np
import os
import matplotlib.cm as cm
import time
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.figures as figures
from resources.targets import *
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# orbit_iditude of the orbit in km
alt = 700
orbit_id = '6am_%d_5_conf4e' % alt
apogee=alt
perigee=alt
# First minute in data set !
minute_ini = 0
# Last minute to look for
minute_end = 1440 * 365
# File name for the list of orbit file
orbits_file = 'orbits.dat'
# Maximum interruption time tolerated [min] (acquisition time not included)
max_interruptions = 0 # see below period =
# Time to acquire a target [min]
t_acquisition = 0
# Take into account the stray light?
straylight = True
# Maximum visible magnitude
mag_max = 9.
conversion_V_class = {9: 'G', 12: 'K'}
# Include SAA ?
SAA = True
# This is a way to vary the results by multiplying the whole pst by a number.
# This is very easy as if the pst is multiplied by a constant, it can be taken out of the
# integral and only multplying the flux is equivalent to re-running all the simulations
pst_factor = 1.
# Factor in the SL post treatment correction ?
SL_post_treat = True
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee,perigee)
max_interruptions = period - 1
###########################################################################
### INITIALISATION
flux_threshold = param.ppm_thresholds[mag_max] * 1e-6
file_flux = 'flux_'
# changes the threshold by addition the acquisition time:
threshold_obs_time = period - max_interruptions + t_acquisition
print 'ORBIT ID:\t%s\nmax_interruptions:%d+%d min\nMAGNITIUDE:\t%02.1f\nPST factor\t%g\nSAA\t\t%s' % (orbit_id,max_interruptions,t_acquisition, mag_max,pst_factor,SAA)
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
if not os.path.isdir(folder_figures):
print '\tError: figure folder %s does not exists.' % (folder_figures)
exit()
sys.stdout.write("Loading list of computed orbits...\t")
sys.stdout.flush()
orbits = np.loadtxt(folder_misc+orbits_file,dtype='i4')
#list_minutes = -1. * np.ones( ( np.shape(orbits)[0] + 2 ) * period )
list_minutes=[]
id_min = 0
times = np.loadtxt('resources/minute_table_%s.dat' % orbit_id, delimiter=',',dtype='Int32')
for ii, orbit_current in enumerate(orbits[:,0]):
t_ini, t_end, a_ini, a_end = fast_orbit2times(times,orbit_current,orbit_id)
for minute in range(a_ini, a_end+1):
list_minutes.append(int(minute))
id_min += 1
list_minutes=np.asarray(list_minutes)
list_minutes = list_minutes[list_minutes > -1]
# apply conditions
list_minutes = list_minutes[list_minutes >= minute_ini]
list_minutes = list_minutes[list_minutes <= minute_end]
minute_end = int(list_minutes[-1])
print 'Done.'
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
visibility = np.zeros(np.shape(ra_grid))
visibility_save = np.zeros([np.shape(ra_grid)[0], np.shape(ra_grid)[1], int(period+2)])
workspace = np.zeros(np.shape(ra_grid))
data = np.zeros(np.shape(ra_grid))
numberofminutes = minute_end+1 - minute_ini
if SAA:
SAA_data = np.loadtxt('resources/SAA_table_%s.dat' % orbit_id, delimiter=',')
SAA_data = SAA_data[SAA_data[:,0]>= minute_ini]
SAA_data = SAA_data[SAA_data[:,0]<= minute_end]
computed_orbits = np.loadtxt(folder_misc+orbits_file)[:,0]
stellar_type = conversion_V_class[mag_max]
stellar_flux = param.stellar_fluxes[stellar_type][mag_max]
aperture_aera_in_px = np.pi * param.aperture_size * param.aperture_size
############################################################################
### Load catalogue and assign them to the nearest grid point
message = 'Preparing the target list...\t\t'
sys.stdout.write(message)
sys.stdout.flush()
targets = []
for ra, dec in zip(np.ravel(ra_grid), np.ravel(dec_grid)):
id_ra = find_nearest(ras, ra)
id_dec = find_nearest(decs, dec)
targets.append(target_list('%3.1f/%2.1f' % (ra,dec), ra, id_ra, dec, id_dec, mag_max, int(period+3), flux=stellar_flux))
message = 'Done, %d targets prepared.\n' % len(targets)
sys.stdout.write(message)
sys.stdout.flush()
# Apply the flux correction (SL post-treatment removal and the mirror efficiency)
corr_fact = 1.
if SL_post_treat: corr_fact *= (1. - param.SL_post_treat_reduction)
############################################################################
### Start the anaylsis
start = time.time()
# Prepare the arrays
visibility = np.zeros(np.shape(ra_grid))
#observations = np.zeros(len(name_cat)*)
workspace = np.zeros(np.shape(ra_grid))
#data = np.zeros(np.shape(ra_grid))
# Load the reference times
orbits = np.loadtxt(folder_misc+orbits_file,dtype='i4')
minutes_altitude = np.loadtxt('resources/minute_table_%s.dat' % orbit_id, delimiter=',',dtype='Int32')
# Set variables for printing the status
numberofminutes = minute_end+1 - minute_ini
lo = fast_minute2orbit(minutes_altitude,minute_end, orbit_id)
fo = fast_minute2orbit(minutes_altitude,minute_ini, orbit_id)
lp = -1
_, _, at_ini, _ = fast_orbit2times(minutes_altitude, fo, orbit_id)
first_computed = computed_orbits[computed_orbits<=fo][-1]
first_minute = minute_ini
last_minute = minute_end
if not fo == first_computed:
_, _, minute_ini, _ = fast_orbit2times(minutes_altitude, first_computed, orbit_id)
# print '1st referenced orbit: %d\twanted orbit: %d' % (first_computed, fo)
try:
for minute in range(minute_ini,minute_end+1):#+int(period)):
minute = int(minute)
if SAA and fast_SAA(SAA_data, minute): SAA_at_minute = True
else: SAA_at_minute = False
orbit_current = fast_minute2orbit(minutes_altitude, minute, orbit_id)
if orbit_current > lp:
lp = orbit_current
message = "Analysing orbit %d on %d...\t" % (lp,lo)
sys.stdout.write( '\r'*len(message) )
sys.stdout.write(message)
sys.stdout.flush()
_, len_orbit, atc_ini, atc_end = fast_orbit2times(minutes_altitude, orbit_current, orbit_id)
try:
ra, dec, S_sl = load_flux_file(minute, file_flux, folder=folder_flux)
S_sl *= pst_factor
load = True
minute_to_load = minute-atc_ini#+shift
except IOError:
# if there is nothing then well, do nothing ie we copy the past values
# in which orbit are we ?
# get the previous orbit computed and copy the stray light data of this orbit :
#orbit_previous = orbits[orbits[:,0] < orbit_current][-1,0]
#minute_replacement = minute - atc_ini + shift #+ at_ini
minute_to_load = minute-atc_ini
for obj in targets:
try:
obj.current_visibility = obj.visible_save[minute_to_load]
except IndexError:
print minute_to_load, minute_end, atc_end, minute
raise IndexError()
if SAA_at_minute and obj.current_visibility==1:
obj.current_SAA_interruption+=1
load = False
# populate the visbility matrix
# for ii in range(0, targets[0].CountObjects()):
if load:
for obj in targets:
ra_ = obj.ra
dec_ = obj.dec
a = np.where(np.abs(ra_-ra)<ra_step/10)[0]
b = np.where(np.abs(dec_-dec)<dec_step/10)[0]
INT = np.intersect1d(a,b)
assert np.size(INT)<2
F_sl_for_obj = S_sl[INT] * corr_fact * param.SL_QE * aperture_aera_in_px
F_star = obj.get_flux() * flux_threshold # global throughput already included in the stellar flux!
if np.shape(INT)[0] == 0 or (straylight and F_sl_for_obj > F_star):
obj.visible_save[minute_to_load] = 0
obj.current_visibility = 0
continue
else:
#print S_sl[INT] * corr_fact * param.SL_QE * aperture_aera_in_px, S_sl[INT], corr_fact, param.SL_QE, aperture_aera_in_px, F_sl_for_obj, F_star
obj.visible_save[minute_to_load] = 1
if SAA_at_minute:
obj.current_SAA_interruption+=1
obj.current_visibility = 1
else: obj.current_visibility = 1
if minute == minute_ini:
for obj in targets:
obj.obs_time=obj.current_visibility
continue
for obj in targets: obj.Next(minute,threshold_obs_time)
except KeyboardInterrupt:
print hilite('\nWARNING! USER STOPPED LOADING AT MINUTE %d' % minute,False,False)
raise KeyboardInterrupt()
for ii in range(0, targets[0].CountObjects()): targets[ii].Next(minute,threshold_obs_time)
print
worthy_targets = []
for obj in targets: obj.PrepareSave()
for ii in range(0, targets[0].CountObjects()):
if np.shape(targets[ii].visible)[0] > 0:
worthy_targets.append(targets[ii])
############################################################################
end = time.time()
elapsed_time = round((end-start)/60.,2)
sys.stdout.write( '\r'*len(message) )
sys.stdout.flush()
print "Time needed: %2.2f min" % elapsed_time
threshold_obs_time -= t_acquisition
if SAA: note = '_SAA'
else: note = ''
if not pst_factor == 1.: note += '_%1.1fpst' % pst_factor
if SL_post_treat: note+= '_%4.3fSLreduction' % param.SL_post_treat_reduction
fname = 'ephemerids_inter_%d_mag_%3.1f%s' % (max_interruptions,mag_max,note)#,threshold_obs_time,fo,lo, note)
print 'Filed saved as %s' % fname
np.savez_compressed(folder_misc+fname, worthy_targets=worthy_targets)
| {
"repo_name": "kuntzer/SALSA-public",
"path": "16_compute_ephermerids.py",
"copies": "1",
"size": "10591",
"license": "bsd-3-clause",
"hash": -4849171423493973000,
"line_mean": 30.15,
"line_max": 167,
"alpha_frac": 0.6587668775,
"autogenerated": false,
"ratio": 2.889768076398363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4048534953898363,
"avg_score": null,
"num_lines": null
} |
# 16 GPIO of ESP board
# Pin objects are stored here once they have been initialized
pins = [None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None]
def pwmStart(arg):
pwmNb = arg[0]
frequence = arg[1]
print("pwmStart", pwmNb, frequence)
def pwmSet(arg):
pwmNb = arg[0]
duty = arg[1]
print("pwmSet",pwmNb, duty)
def pwmStop(arg):
pwmNb = arg[0]
print("pwmStop",pwmNb)
# digital GPIO
def pinMode(arg):
pinId = arg[0]
mode = arg[1]
print("pinMode",pinId, mode)
def digitalWrite(arg):
pinId = arg[0]
value = arg[1]
print("digitalWrite",pinId, value)
def digitalRead(arg):
pinId = arg[0]
print("digitalRead", pinId, "returns 1")
return 1
callbacks = {
"pwmStart": {"call": pwmStart, "parameters": "pinNumber, frequency", "description": "Start PWM signal on pin with frequency"},
"pwmSet": {"call": pwmSet, "parameters": "pinNumber, duty", "description": "Set PWM duty cycle 0-1023"},
"pwmStop": {"call": pwmStop, "parameters": None, "description": "Stop PWM signal"},
"pinMode": {"call": pinMode, "parameters": "pinNumber, mode", "description": "Set pin to IN/OUT and PULL_UP/DOWN modes"},
"digitalWrite": {"call": digitalWrite, "parameters": "pinNumber, value", "description": "Set voltage level on pin, 1 -> 3.3V, 0 -> 0V"},
"digitalRead": {"call": digitalRead, "parameters": "pinNumber, callbackId", "description": "Read digital value from pin. Callback Id will be mirrored"}
} | {
"repo_name": "nodesign/micropython-electrolink",
"path": "firmware/modules/electroGpioDebug.py",
"copies": "1",
"size": "1598",
"license": "apache-2.0",
"hash": -5583718346052110000,
"line_mean": 37.0714285714,
"line_max": 159,
"alpha_frac": 0.6157697121,
"autogenerated": false,
"ratio": 3.2024048096192383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9213344456789295,
"avg_score": 0.020966012985988564,
"num_lines": 42
} |
'''
Created on Mar 13, 2016
@author: Dead Robot Society
'''
from wallaby import ao
from wallaby import digital
from wallaby import analog
from wallaby import seconds
from wallaby import a_button_clicked
from wallaby import b_button_clicked
import constants as c
# reads the right button
def getRBUTTON():
return digital (c.RBUTTON)
# reads the ET sensor
def getET():
return analog(c.ET)
# stop program for testing
def DEBUG():
ao()
print 'Program stop for DEBUG\nSeconds: ', seconds() - c.startTime
exit(0)
def currentTime():
print 'Current time: ', seconds() - c.startTime
def onBlack(port):
return analog(port) > c.topHatMidValue
def onBlackLineFollower():
return analog(c.STARBOARD_TOPHAT) > c.topHatMidValue
def crossBlack(port):
while not onBlack(port): # wait for black
pass
while onBlack(port): # wait for white
pass
def waitForButton():
print("Press the right button to start...")
while not getRBUTTON():
pass
def atArmLength():
return analog (c.ET) > c.armLength
def atTest():
return analog (4) > 1800
def atCeilingHeight():
return analog (c.ET) > c.ceilingHeight
def testET():
x = analog(c.ET)
print("ET = ", x)
def wait4light():
while not calibrate(c.STARTLIGHT):
pass
wait4(c.STARTLIGHT)
def calibrate(port):
print "Press A button with light on"
while not a_button_clicked():
if digital(13):
DEBUG()
lightOn = analog(port)
print "On value =", lightOn
if lightOn > 200:
print "Bad calibration"
return False
print "Press B button with light off"
while not b_button_clicked():
if digital(13):
DEBUG()
lightOff = analog(port)
print "Off value =", lightOff
if lightOff < 3000:
print "Bad calibration"
return False
if (lightOff - lightOn) < 2000:
print "Bad calibration"
return False
c.startLightThresh = (lightOff - lightOn) / 2
print "Good calibration! ", c.startLightThresh
return True
def wait4(port):
print "waiting for light!! "
while analog(port) > c.startLightThresh:
pass
def testSensors():
if onBlack(c.STARBOARD_TOPHAT):
print "Problem with outrigger tophat."
print "Check for unplugged tophat or bad robot setup"
DEBUG()
if onBlack(c.LINE_FOLLOWER):
print "Problem with center tophat."
print "Check for unplugged tophat or bad robot setup"
DEBUG()
| {
"repo_name": "gras/16-TaterBot",
"path": "src/sensors.py",
"copies": "1",
"size": "2682",
"license": "mit",
"hash": -292232812248376400,
"line_mean": 22.1621621622,
"line_max": 71,
"alpha_frac": 0.6118568233,
"autogenerated": false,
"ratio": 3.6489795918367345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47608364151367344,
"avg_score": null,
"num_lines": null
} |
'''
Created on Mar 13, 2016
@author: Dead Robot Society
'''
import constants as c
from sensors import onBlack
from sensors import atArmLength
from sensors import getET
from wallaby import motor
from wallaby import msleep
from wallaby import seconds
# tests motors
def testMotors():
drive(100, 100)
while not onBlack(c.LINE_FOLLOWER): # wait to see line
pass
stop()
drive(75, 0)
while not onBlack(c.STARBOARD_TOPHAT):
pass
stop()
drive(-75, 0)
while not onBlack(c.LINE_FOLLOWER):
pass
msleep(100)
stop()
def binGrabUp():
driveMotorTimed(c.BIN, 55, 600)
driveMotor(c.BIN, 10)
def binGrabDown():
driveMotorTimed(c.BIN, -100, 500)
driveMotor(c.BIN, -10)
def testET():
print("Put your hand in front of ET")
i = 0
while getET() >= 2000:
print "BLOCKED!"
msleep(333)
binGrabDown()
msleep(300)
binGrabUp()
msleep(300)
while getET() < 2000:
if i > 0:
binGrabUp()
i = 0
else:
binGrabDown()
i = 1
msleep(300)
binGrabDown()
driveTimed(-100, -100, 1000)
stop()
# start left & right motors
def drive(left, right):
motor(c.LMOTOR, left)
motor(c.RMOTOR, right)
# power and time of motors
def driveTimed(left, right, time):
drive(left, right)
msleep(time)
drive(0, 0)
def driveTimedNoStop(left, right, time):
drive(left, right)
msleep(time)
drive(0, 0)
def driveMotorTimed(motorport, speed, time):
motor(motorport, speed)
msleep(time)
def driveMotor(motorport, speed):
motor(motorport, speed)
def driveTilLineStarboard(left, right):
driveTilLine(c.STARBOARD_TOPHAT, left, right)
def driveTilLine(port, left, right):
drive(left, right)
while not onBlack(port):
pass
stop()
# Follows black line on right for specified amount of time
def timedLineFollowRight(port, time):
sec = seconds() + time
while seconds() < sec:
if not onBlack(port):
driveTimed(20, 90, 20)
else:
driveTimed(90, 20, 20)
msleep(10)
# Follows black line on right for specified amount of time BACKWARDS....
def timedLineFollowRightBack(port, time):
sec = seconds() + time
while seconds() < sec:
if not onBlack(port):
driveTimed(-20, -90, 20)
else:
driveTimed(-90, -20, 20)
msleep(10)
# Follows black line on left for specified amount of time
def timedLineFollowLeft(port, time):
sec = seconds() + time
while seconds() < sec:
if not onBlack(port):
driveTimed(90, 50, 20)
else:
driveTimed(50, 90, 20)
msleep(10)
def timedLineFollowBack(port, time):
sec = seconds() + time
while seconds() < sec:
if onBlack(port):
driveTimed(-90, -20, 20)
else:
driveTimed(-20, -90, 20)
msleep(10)
def timedLineFollowRightSmooth(port, time):
sec = seconds() + time
while seconds() < sec:
if not onBlack(port):
driveTimed(20, 40, 20)
else:
driveTimed(40, 20, 20)
msleep(10)
def timedLineFollowLeftSmooth(port, time):
sec = seconds() + time
while seconds() < sec:
if onBlack(port):
driveTimed(20, 40, 20)
else:
driveTimed(40, 20, 20)
msleep(10)
def lineFollowUntilEndLeft(port):
i = 0
while (i < 10):
if onBlack(port):
i = 0
driveTimed(50, 90, 20)
else:
i = i + 1
driveTimed(90, 50, 20)
def lineFollowUntilEndLeft2(port):
i = 0
while (i < 20):
if onBlack(port):
i = 0
driveTimed(50, 90, 20)
else:
i = i + 1
driveTimed(90, 50, 20)
def lineFollowUntilEndRight(port):
i = 0
while (i < 10):
if not onBlack(port):
driveTimed(50, 90, 20)
i = i + 1
else:
i = 0
driveTimed(90, 50, 20)
def turnUntilBlack(port, left, right):
drive(left, right)
while (not onBlack(port)):
pass
stop()
# Follows black line on right until under or not under ceiling
# if findCeiling is true, will go until ET finds ceiling
def ETLineFollowRight(port, findCeiling):
while atArmLength() ^ findCeiling :
if not onBlack(port):
driveTimed(50, 100, 20)
else:
driveTimed(100, 50, 20)
msleep(10)
# stop all motors
def stop():
drive(0, 0)
| {
"repo_name": "gras/16-TaterBot",
"path": "src/drive.py",
"copies": "1",
"size": "4870",
"license": "mit",
"hash": 790785920122633600,
"line_mean": 22.1138613861,
"line_max": 72,
"alpha_frac": 0.5435318275,
"autogenerated": false,
"ratio": 3.4785714285714286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9455594015571911,
"avg_score": 0.013301848099903498,
"num_lines": 202
} |
'''
Created on Mar 13, 2016
@author: Dead Robot Society
'''
import constants as c
from wallaby import set_servo_position
from wallaby import msleep
from wallaby import enable_servos
from wallaby import get_servo_position
from wallaby import ao
# tests servos
def testServos():
print "Testing servos"
set_servo_position(c.ARM, c.armUp)
set_servo_position(c.CLAW, c.clawClose)
set_servo_position(c.OUTRIGGER, c.outriggerIn)
enable_servos()
msleep(1000)
moveClaw(c.clawOpen, 25)
msleep(500)
moveClaw(c.clawClose, 25)
msleep(500)
moveArm(c.armBack, 15)
msleep(500)
moveOutrigger(c.outriggerOut, 15)
msleep(500)
moveArm(c.armFront, 15)
moveClaw(c.clawMid, 25)
msleep(500)
# temp
def tempServos():
set_servo_position(c.ARM, c.armUp)
set_servo_position(c.CLAW, c.clawClose)
set_servo_position(c.OUTRIGGER, c.outriggerIn)
enable_servos()
def deliverPoms():
moveArm(c.armBack, 25)
msleep(500)
moveClaw(c.clawMid, 25)
def moveOutrigger(endPos, speed=10):
moveServo(c.OUTRIGGER, endPos, speed)
def moveArm(endPos, speed=10):
moveServo(c.ARM, endPos, speed)
def moveClaw(endPos, speed=10):
moveServo(c.CLAW, endPos, speed)
def moveServo(servo, endPos, speed=10):
# speed of 1 is slow
# speed of 2000 is fast
# speed of 10 is the default
now = get_servo_position(servo)
if now > 2048 :
PROGRAMMER_ERROR ("Servo setting too large")
if now < 0 :
PROGRAMMER_ERROR ("Servo setting too small")
if now > endPos:
speed = -speed
for i in range (now, endPos, speed):
set_servo_position(servo, i)
msleep(10)
set_servo_position(servo, endPos)
msleep(10)
def PROGRAMMER_ERROR(msg) :
ao()
print msg
exit()
| {
"repo_name": "gras/16-TaterBot",
"path": "src/servos.py",
"copies": "1",
"size": "1902",
"license": "mit",
"hash": 287913699535163460,
"line_mean": 22.4615384615,
"line_max": 52,
"alpha_frac": 0.6366982124,
"autogenerated": false,
"ratio": 2.8905775075987843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4027275719998784,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.