text
stringlengths 0
1.05M
| meta
dict |
---|---|
# 05_thermomether_plus_f.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
from Tkinter import * # tkinter provides the graphical user interface (GUI)
import RPi.GPIO as GPIO
import time, math
C = 0.38 # uF - Tweek this value around 0.33 to improve accuracy
R1 = 1000 # Ohms
B = 3800.0 # The thermistor constant - change this for a different thermistor
R0 = 1000.0 # The resistance of the thermistor at 25C -change for different thermistor
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# This project uses a thermistor, a component whose resistance varies with the temperature.
# To measure its resistance, the code records the time it takes for a capacitor to fill
# when supplied by a current passing through the resistor. The lower the resistance the faster
# it fills up.
#
# You can think of a capacitor as a tank of electricity, and as it fills with charge, the voltage
# across it increases. We cannot measure that voltage directly, because the Raspberry Pi
# does not have an analog to digital convertor (ADC or analog input). However, we can time how long it
# takes for the capacitor to fill with charge to the extent that it gets above the 1.65V or so
# that counts as being a high digital input.
#
# For more information on this technique take a look at:
# learn.adafruit.com/basic-resistor-sensor-reading-on-raspberry-pi
# The code here is based on that in the Raspberry Pi Cookbook (Recipes 12.1 to 12.3)
# Pin a charges the capacitor through a fixed 1k resistor and the thermistor in series
# pin b discharges the capacitor through a fixed 1k resistor
a_pin = 18
b_pin = 23
buzzer_pin = 24
GPIO.setup(buzzer_pin, GPIO.OUT)
set_temp = 25 # The temperature above which the buzzer will sound
# The type of capacitors only have an accuracy of +-10% on its stated value and there are
# other components that will not be exactly the value stated on the package
# changing the fiddle_factor will help compensate for this.
# fiddle with the fiddle_factor (keep it close to 1.0) until this project agrees with a
# thermometer you trust.
# To be honest, its never going to be very accurate, as an absolute thermometer,
# but the value of temp should increase when you hold the thermistor between you fingers to
# warm it up.
# empty the capacitor ready to start filling it up
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
# return the time taken for the voltage on the capacitor to count as a digital input HIGH
# than means around 1.65V
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
return (t2 - t1) * 1000000 # microseconds
# Take an analog readin as the time taken to charge after first discharging the capacitor
def analog_read():
discharge()
t = charge_time()
discharge()
return t
# Convert the time taken to charge the cpacitor into a value of resistance
# To reduce errors, do it lots of times and take the average.
def read_resistance():
n = 10
total = 0;
for i in range(0, n):
total = total + analog_read()
t = total / float(n)
T = t * 0.632 * 3.3
r = (T / C) - R1
return r
def read_temp_c():
R = read_resistance()
t0 = 273.15 # 0 deg C in K
t25 = t0 + 25.0 # 25 deg C in K
# Steinhart-Hart equation - Google it
inv_T = 1/t25 + 1/B * math.log(R/R0)
T = (1/inv_T - t0)
return T
# sound the buzzer at a certain pitch (in Hz) for a duration in seconds
def buzz(pitch, duration):
period = 1.0 / pitch # period of cycle
delay = period / 2 # delay half of period (2 delays per cycle)
cycles = int(duration * pitch) # total number of cycles needed for duration specified
for i in range(cycles): # turn buzzer on and off for number of cycles needed
GPIO.output(buzzer_pin, True)
time.sleep(delay)
GPIO.output(buzzer_pin, False)
time.sleep(delay)
# group together all of the GUI code into a class called App
class App:
# this function gets called when the app is created
def __init__(self, master):
self.master = master
frame = Frame(master)
frame.pack()
label = Label(frame, text='Temp F', font=("Helvetica", 32))
label.grid(row=0)
self.reading_label = Label(frame, text='12.34', font=("Helvetica", 110))
self.reading_label.grid(row=1)
self.update_reading()
# Update the temperature reading
def update_reading(self):
temp_c = read_temp_c()
temp_f = temp_c * 9.0 / 5.0 + 32.0
if temp_c > set_temp:
buzz(500, 0.3)
reading_str = "{:.2f}".format(temp_f)
self.reading_label.configure(text=reading_str)
self.master.after(500, self.update_reading)
# Set the GUI running, give the window a title, size and position
root = Tk()
root.wm_title('Thermometer')
app = App(root)
root.geometry("400x300+0+0")
try:
root.mainloop()
finally:
print("Cleaning up")
GPIO.cleanup()
| {
"repo_name": "simonmonk/pi_starter_kit",
"path": "05_thermometer_plus_f.py",
"copies": "1",
"size": "5246",
"license": "mit",
"hash": -610329300623059700,
"line_mean": 34.6870748299,
"line_max": 102,
"alpha_frac": 0.6850934045,
"autogenerated": false,
"ratio": 3.272613849033063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9395508063876399,
"avg_score": 0.01243983793133291,
"num_lines": 147
} |
# 05_thermomether_plus.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
from Tkinter import * # tkinter provides the graphical user interface (GUI)
import RPi.GPIO as GPIO
import time, math
C = 0.38 # uF - Tweek this value around 0.33 to improve accuracy
R1 = 1000 # Ohms
B = 3800.0 # The thermistor constant - change this for a different thermistor
R0 = 1000.0 # The resistance of the thermistor at 25C -change for different thermistor
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# This project uses a thermistor, a component whose resistance varies with the temperature.
# To measure its resistance, the code records the time it takes for a capacitor to fill
# when supplied by a current passing through the resistor. The lower the resistance the faster
# it fills up.
#
# You can think of a capacitor as a tank of electricity, and as it fills with charge, the voltage
# across it increases. We cannot measure that voltage directly, because the Raspberry Pi
# does not have an analog to digital convertor (ADC or analog input). However, we can time how long it
# takes for the capacitor to fill with charge to the extent that it gets above the 1.65V or so
# that counts as being a high digital input.
#
# For more information on this technique take a look at:
# learn.adafruit.com/basic-resistor-sensor-reading-on-raspberry-pi
# The code here is based on that in the Raspberry Pi Cookbook (Recipes 12.1 to 12.3)
# Pin a charges the capacitor through a fixed 1k resistor and the thermistor in series
# pin b discharges the capacitor through a fixed 1k resistor
a_pin = 18
b_pin = 23
buzzer_pin = 24
GPIO.setup(buzzer_pin, GPIO.OUT)
set_temp = 25 # The temperature above which the buzzer will sound
# The type of capacitors only have an accuracy of +-10% on its stated value and there are
# other components that will not be exactly the value stated on the package
# changing the fiddle_factor will help compensate for this.
# fiddle with the fiddle_factor (keep it close to 1.0) until this project agrees with a
# thermometer you trust.
# To be honest, its never going to be very accurate, as an absolute thermometer,
# but the value of temp should increase when you hold the thermistor between you fingers to
# warm it up.
# empty the capacitor ready to start filling it up
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
# return the time taken for the voltage on the capacitor to count as a digital input HIGH
# than means around 1.65V
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
return (t2 - t1) * 1000000 # microseconds
# Take an analog readin as the time taken to charge after first discharging the capacitor
def analog_read():
discharge()
t = charge_time()
discharge()
return t
# Convert the time taken to charge the cpacitor into a value of resistance
# To reduce errors, do it lots of times and take the average.
def read_resistance():
n = 10
total = 0;
for i in range(0, n):
total = total + analog_read()
t = total / float(n)
T = t * 0.632 * 3.3
r = (T / C) - R1
return r
def read_temp_c():
R = read_resistance()
t0 = 273.15 # 0 deg C in K
t25 = t0 + 25.0 # 25 deg C in K
# Steinhart-Hart equation - Google it
inv_T = 1/t25 + 1/B * math.log(R/R0)
T = (1/inv_T - t0)
return T
# sound the buzzer at a certain pitch (in Hz) for a duration in seconds
def buzz(pitch, duration):
period = 1.0 / pitch # period of cycle
delay = period / 2 # delay half of period (2 delays per cycle)
cycles = int(duration * pitch) # total number of cycles needed for duration specified
for i in range(cycles): # turn buzzer on and off for number of cycles needed
GPIO.output(buzzer_pin, True)
time.sleep(delay)
GPIO.output(buzzer_pin, False)
time.sleep(delay)
# group together all of the GUI code into a class called App
class App:
# this function gets called when the app is created
def __init__(self, master):
self.master = master
frame = Frame(master)
frame.pack()
label = Label(frame, text='Temp C', font=("Helvetica", 32))
label.grid(row=0)
self.reading_label = Label(frame, text='12.34', font=("Helvetica", 110))
self.reading_label.grid(row=1)
self.update_reading()
# Update the temperature reading
def update_reading(self):
temp_c = read_temp_c()
if temp_c > set_temp:
buzz(500, 0.3)
reading_str = "{:.2f}".format(temp_c)
self.reading_label.configure(text=reading_str)
self.master.after(500, self.update_reading)
# Set the GUI running, give the window a title, size and position
root = Tk()
root.wm_title('Thermometer')
app = App(root)
root.geometry("400x300+0+0")
try:
root.mainloop()
finally:
print("Cleaning up")
GPIO.cleanup()
| {
"repo_name": "simonmonk/pi_starter_kit",
"path": "05_thermometer_plus.py",
"copies": "1",
"size": "5201",
"license": "mit",
"hash": -6303971557562073000,
"line_mean": 34.6232876712,
"line_max": 102,
"alpha_frac": 0.6875600846,
"autogenerated": false,
"ratio": 3.2896900695762175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4477250154176218,
"avg_score": null,
"num_lines": null
} |
#06_01_hangman_file
import random
f = open('hangman_words.txt')
words = f.read().splitlines()
f.close()
lives_remaining = 14
guessed_letters = ''
def play():
word = pick_a_word()
while True:
guess = get_guess(word)
if process_guess(guess, word):
print('You win! Well Done!')
break
if lives_remaining == 0:
print('You are Hung!')
print('The word was: ' + word)
break
def pick_a_word():
word_position = random.randint(0, len(words) - 1)
return words[word_position]
def get_guess(word):
print_word(word)
print('Lives Remaining: ' + str(lives_remaining))
guess = input(' Guess a letter or whole word?')
return guess
def print_word(word):
display_word = ''
for letter in word:
if guessed_letters.find(letter) > -1:
display_word = display_word + letter
else:
display_word = display_word + '-'
print(display_word)
def process_guess(guess, word):
if len(guess) > 1:
return whole_word_guess(guess, word)
else:
return single_letter_guess(guess, word)
def whole_word_guess(guess, word):
global lives_remaining
if guess == word:
return True
else:
lives_remaining = lives_remaining - 1
def single_letter_guess(guess, word):
global guessed_letters
global lives_remaining
if word.find(guess) == -1:
lives_remaining = lives_remaining - 1
guessed_letters = guessed_letters + guess
if all_letters_guessed(word):
return True
def all_letters_guessed(word):
for letter in word:
if guessed_letters.find(letter) == -1:
return False
return True
play() | {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "06_01_hangman_file.py",
"copies": "1",
"size": "1522",
"license": "mit",
"hash": 9201748876312018000,
"line_mean": 19.8630136986,
"line_max": 50,
"alpha_frac": 0.6833114323,
"autogenerated": false,
"ratio": 2.877126654064272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4060438086364272,
"avg_score": null,
"num_lines": null
} |
#06_02_hangman_file_try
import random
words_file = 'hangman_words.txt'
try:
f = open(words_file)
words = f.read().splitlines()
f.close()
except IOError:
print("Cannot find file: " + words_file)
exit()
lives_remaining = 14
guessed_letters = ''
def play():
word = pick_a_word()
while True:
guess = get_guess(word)
if process_guess(guess, word):
print('You win! Well Done!')
break
if lives_remaining == 0:
print('You are Hung!')
print('The word was: ' + word)
break
def pick_a_word():
word_position = random.randint(0, len(words) - 1)
return words[word_position]
def get_guess(word):
print_word(word)
print('Lives Remaining: ' + str(lives_remaining))
guess = input(' Guess a letter or whole word?')
return guess
def print_word(word):
display_word = ''
for letter in word:
if guessed_letters.find(letter) > -1:
display_word = display_word + letter
else:
display_word = display_word + '-'
print(display_word)
def process_guess(guess, word):
if len(guess) > 1:
return whole_word_guess(guess, word)
else:
return single_letter_guess(guess, word)
def whole_word_guess(guess, word):
global lives_remaining
if guess == word:
return True
else:
lives_remaining = lives_remaining - 1
def single_letter_guess(guess, word):
global guessed_letters
global lives_remaining
if word.find(guess) == -1:
lives_remaining = lives_remaining - 1
guessed_letters = guessed_letters + guess
if all_letters_guessed(word):
return True
def all_letters_guessed(word):
for letter in word:
if guessed_letters.find(letter) == -1:
return False
return True
play() | {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "06_02_hangman_file_try.py",
"copies": "1",
"size": "1624",
"license": "mit",
"hash": -3523331003671620000,
"line_mean": 19.8333333333,
"line_max": 50,
"alpha_frac": 0.6828817734,
"autogenerated": false,
"ratio": 2.854130052724077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4037011826124077,
"avg_score": null,
"num_lines": null
} |
# 06/05/2017
'''
1. Slim the enable1 dictionary so that it only contains "not-embedded" words;
This is done through the not_embedded_wordlist function and takes forever (~2h) to run.
2. Create an initial solution using a simple algorithm in the embed_word1 function. Length ~390.
3. Create an initial solution using an even simpler algorithm (create_solution2). Length ~275.
4. ????
'''
import os.path
import time
from string import ascii_lowercase
def get_substrings(string, n):
'''Returns all of string's substrings of length n'''
result = []
for i in range(1+len(string)-n):
result.append(string[i:i+n])
return result
def is_embedded(string_a, string_b):
'''
Checks if string_a is embedded in string_b.
[WIP] May or may not return the interval in which string_a is embedded in b.
'''
i = 0
for j in range(0, len(string_b)):
if string_a[i] == string_b[j]:
#if i == 0:
# first_index = j
i += 1
if i == len(string_a):
return True
#return (first_index, j)
return False
def not_embedded_wordlist(all_words):
'''Slims down a dictionary so that only "un-embedded" words are present.'''
print("Slimming down your dictionary...")
print("Initial dictionary size: {} words".format(len(all_words)))
all_words = sorted(all_words, key=len, reverse=True)
not_embedded = []
start = time.process_time()
for sub_word in all_words:
flag = False
for word in not_embedded:
if is_embedded(sub_word, word) != False:
flag = True
break
if not flag:
not_embedded.append(sub_word)
elapsed = time.process_time() - start
print("New dictionary size: {} words".format(len(not_embedded)))
print("This took {} seconds.".format(elapsed))
return not_embedded
def embed_word1(word, solution):
'''
Returns a new solution so that word is now embedded.
Explanation missing.
'''
missing_f, missing_b = [], []
for w, s, m in zip([word, word[::-1]], [solution, solution[::-1]], [missing_f, missing_b]):
curr_index = 0
for char in w:
result = s.find(char, curr_index)
if result == -1:
m.append((char, curr_index))
else:
curr_index = result + 1
missing = missing_f if len(missing_f) < len(missing_b) else missing_b
new_solution = list(solution) if len(missing_f) < len(missing_b) else list(solution[::-1])
already_included = 0
for chars, pos in missing:
new_solution.insert(pos+already_included, chars)
already_included += 1
return ''.join(new_solution) if len(missing_f) < len(missing_b) else ''.join(new_solution[::-1])
def create_solution1(wordlist):
'''Creates a solution from the given wordlist.'''
solution = ""
for word in wordlist:
if not is_embedded(word, solution):
solution = embed_word1(word, solution)
return solution
def create_solution2(wordlist):
'''Creates a solution from the given wordlist.'''
solution = []
while not test_valid(''.join(solution), wordlist):
alpha_count = {l : 0 for l in ascii_lowercase}
for word in wordlist:
alpha_count[word[0]] += 1
top_letter = max(alpha_count, key=alpha_count.get)
solution.append(top_letter)
for i, word in enumerate(wordlist):
if word[0] == top_letter:
if len(word) == 1:
wordlist.remove(word)
else:
wordlist[i] = word[1:]
return ''.join(solution)
def test_valid(solution, wordlist):
'''Tests wheter or not every word in the wordlist is embedded in the solution.'''
for word in wordlist:
if not is_embedded(word, solution):
return False
return True
if __name__ == "__main__":
if not os.path.isfile("not_embedded.txt"):
with open(r"..\\..\\other\\enable1.txt", "r") as f:
ENABLE_WORDS = [w.rstrip() for w in f.readlines()]
NEW_WORDLIST = not_embedded_wordlist(ENABLE_WORDS)
with open("not_embedded.txt", "a+") as f:
f.write('\n'.join(NEW_WORDLIST))
else:
with open("not_embedded.txt", "r") as f:
NEW_WORDLIST = [w.rstrip() for w in f.readlines()]
print("\n<<< First attempt >>>")
INITIAL_SOLUTION = create_solution1(NEW_WORDLIST)
print("Initial solution's length: {}".format(len(INITIAL_SOLUTION)))
print("Passes tests: {}".format(test_valid(INITIAL_SOLUTION, NEW_WORDLIST)))
print("\n<<< Second attempt >>>")
INITIAL_SOLUTION2 = create_solution2(NEW_WORDLIST)
print("Initial solution's length: {}".format(len(INITIAL_SOLUTION2)))
print("Passes tests: {}".format(test_valid(INITIAL_SOLUTION2, NEW_WORDLIST)))
| {
"repo_name": "tlgs/dailyprogrammer",
"path": "Python/hard/h313.py",
"copies": "2",
"size": "4905",
"license": "unlicense",
"hash": -7310825389250606000,
"line_mean": 31.9194630872,
"line_max": 100,
"alpha_frac": 0.5993883792,
"autogenerated": false,
"ratio": 3.6741573033707864,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5273545682570786,
"avg_score": null,
"num_lines": null
} |
# caselesssort.py
# A sort function for lists of strings that is case insensitive.
# Copyright Michael Foord
# You are free to modify, use and relicense this code.
# No warranty express or implied for the accuracy, fitness to purpose or otherwise for this code....
# Use at your own risk !!!
# E-mail michael AT foord DOT me DOT uk
# Maintained at www.voidspace.org.uk/atlantibots/pythonutils.html
"""
The built in sort method for lists is case sensitive.
This means it can be unsuitable for sorting some lists of strings.
e.g. ['Apple', 'Pear', 'apple'].sort()
leaves 'Apple' and 'apple' at opposite ends of the list.
You can pass in a function to the sort method - but this can be very slow.
cSort still uses the sort method, so there isn't much performance hit, but it is caseless.
cSort can handle non-string members in the list without failing.
In addition cSort will sort any sets of entries for which entry1.lower() == entry2.lower()
i.e. cSort(['fish', 'FISH', 'fIsh'])
returns ['FISH', 'fIsh', 'fish']
You can turn this behaviour off by passing cSort an optional 'False' parameter.
i.e. cSort(['fish', 'FISH', 'fIsh'], False)
returns ['fish', 'FISH', 'fIsh']
"""
def cSort(inlist, minisort=True):
sortlist = []
newlist = []
sortdict = {}
for entry in inlist:
try:
lentry = entry.lower()
except AttributeError:
sortlist.append(lentry)
else:
try:
sortdict[lentry].append(entry)
except KeyError:
sortdict[lentry] = [entry]
sortlist.append(lentry)
sortlist.sort()
for entry in sortlist:
try:
thislist = sortdict[entry]
if minisort: thislist.sort()
newlist = newlist + thislist
except KeyError:
newlist.append(entry)
return newlist
if __name__ == '__main__':
list1 = ['pish', 'fish', 'FISH', 'Fish', 'PISH', 'FIsh', 'fiSH', 'Pish','piSH']
list2 = list(list1)
print 'Here is an unsorted list :'
print list1
list1.sort()
print 'Here is a list sorted using list.sort() :'
print list1
print 'Here is the list sorted using cSort(list) :'
print cSort(list2)
print 'Here is the list sorted using cSort(list, False) :'
print cSort(list2, False)
"""
TODO/ISSUES
CHANGELOG
06-07-04 Version 1.0.0
A working caseless sort.
Will be part of the caseless module, but also stands on it's own.
"""
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/286204_Case_Insensitive_Sort/recipe-286204.py",
"copies": "1",
"size": "2491",
"license": "mit",
"hash": -4777112680903409000,
"line_mean": 28.6547619048,
"line_max": 100,
"alpha_frac": 0.6403050984,
"autogenerated": false,
"ratio": 3.3845108695652173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4524815967965217,
"avg_score": null,
"num_lines": null
} |
# scraper.py
# A general HTML 'parser' and a specific example that will modify URLs in tags.
# Copyright Michael Foord
# You are free to modify, use and relicense this code.
# No warranty express or implied for the accuracy, fitness to purpose or otherwise for this code....
# Use at your own risk !!!
# E-mail michael AT foord DOT me DOT uk
# Maintained at www.voidspace.org.uk/atlantibots/pythonutils.html
import re
#namefind is supposed to match a tag name and attributes into groups 1 and 2 respectively.
#the original version of this pattern:
# namefind = re.compile(r'(\S*)\s*(.+)', re.DOTALL)
#insists that there must be attributes and if necessary will steal the last character
#of the tag name to make it so. this is annoying, so let us try:
namefind = re.compile(r'(\S+)\s*(.*)', re.DOTALL)
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?') # this is taken from sgmllib
class Scraper:
def __init__(self):
"""Initialise a parser."""
self.buffer = ''
self.outfile = ''
def reset(self):
"""This method clears the input buffer and the output buffer."""
self.buffer = ''
self.outfile = ''
def push(self):
"""This returns all currently processed data and empties the output buffer."""
data = self.outfile
self.outfile = ''
return data
def close(self):
"""Returns any unprocessed data (without processing it) and resets the parser.
Should be used after all the data has been handled using feed and then collected with push.
This returns any trailing data that can't be processed.
If you are processing everything in one go you can safely use this method to return everything.
"""
data = self.push() + self.buffer
self.buffer = ''
return data
def feed(self, data):
"""Pass more data into the parser.
As much as possible is processed - but nothing is returned from this method.
"""
self.index = -1
self.tempindex = 0
self.buffer = self.buffer + data
outlist = []
thischunk = []
while self.index < len(self.buffer)-1: # rewrite with a list of all the occurences of '<' and jump between them, much faster than character by character - which is fast enough to be fair...
self.index += 1
inchar = self.buffer[self.index]
if inchar == '<':
outlist.append(self.pdata(''.join(thischunk)))
thischunk = []
result = self.tagstart()
if result: outlist.append(result)
if self.tempindex: break
else:
thischunk.append(inchar)
if self.tempindex:
self.buffer = self.buffer[self.tempindex:]
else:
self.buffer = ''
if thischunk: self.buffer = ''.join(thischunk)
self.outfile = self.outfile + ''.join(outlist)
def tagstart(self):
"""We have reached the start of a tag.
self.buffer is the data
self.index is the point we have reached.
This function should extract the tag name and all attributes - and then handle them !."""
test1 = self.buffer.find('>', self.index+1)
test2 = self.buffer.find('<', self.index+1) # will only happen for broken tags with a missing '>'
test1 += 1
test2 += 1
if not test2 and not test1:
self.tempindex = self.index # if we get this far the buffer is incomplete (the tag doesn't close yet)
self.index = len(self.buffer) # this signals to feed that some of the buffer needs saving
return
if test1 and test2:
test = min(test1, test2)
if test == test2: # if the closing tag is missing and we're working from the next starting tag - we eed to be careful with our index position...
mod=1
else:
mod=0
else:
test = test1 or test2
if test2:
mod=1
else:
mod=0
thetag = self.buffer[self.index+1:test-1].strip()
if thetag.startswith('!'): # is a declaration or comment
return self.pdecl()
if thetag.startswith('?'):
return self.ppi() # is a processing instruction
if mod: # as soon as we return, the index will have 1 added to it straight away
self.index = test -2
else:
self.index = test -1
if thetag.startswith('/'):
return self.endtag(thetag) # is an endtag
nt = namefind.match(thetag)
if not nt: return self.emptytag(thetag) # nothing inside the tag ?
name, attributes = nt.group(1,2)
matchlist = attrfind.findall(attributes)
attrs = []
#the doc says a tag must be nameless to be "empty" so kill
#next line that calls any tag with no attributes "empty"
#if not matchlist: return self.emptytag(thetag) # nothing inside the tag ?
for entry in matchlist:
attrname, rest, attrvalue = entry # this little chunk nicked from sgmllib - except findall is used to match all the attributes
if not rest:
attrvalue = attrname
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrs.append((attrname.lower(), attrvalue))
return self.handletag(name.lower(), attrs, thetag) # deal with what we've found.
################################################################################################
# The following methods are called to handle the various HTML elements.
# They are intended to be overridden in subclasses.
def pdata(self, inchunk):
"""Called when we encounter a new tag. All the unprocessed data since the last tag is passed to this method.
Dummy method to override. Just returns the data unchanged."""
return inchunk
def pdecl(self):
"""Called when we encounter the *start* of a declaration or comment. <!....
It uses self.index and isn't passed anything.
Dummy method to override. Just returns."""
return '<'
def ppi(self):
"""Called when we encounter the *start* of a processing instruction. <?....
It uses self.index and isn't passed anything.
Dummy method to override. Just returns."""
return '<'
def endtag(self, thetag):
"""Called when we encounter a close tag. </....
It is passed the tag contents (including leading '/') and just returns it."""
return '<' + thetag + '>'
def emptytag(self, thetag):
"""Called when we encounter a tag that we can't extract any valid name or attributes from.
It is passed the tag contents and just returns it."""
return '<' + thetag + '>'
def handletag(self, name, attrs, thetag):
"""Called when we encounter a tag.
Is passed the tag name and a list of (attrname, attrvalue) - and the original tag contents as a string."""
return '<' + thetag + '>'
#################################################################
# The simple test script looks for a file called 'index.html'
# It parses it, and saves it back out as 'index2.html'
#
# See how all the parsed file can safely be returned using the close method.
# If Scraper works - the new file should be a pretty much unchanged copy of the first.
if __name__ == '__main__':
# a = approxScraper('http://www.pythonware.com/daily', 'approx.py')
a = Scraper()
a.feed(open('index.html').read()) # read and feed
open('index2.html','w').write(a.close())
#################################################################
__doc__ = """
Scraper is a class to parse HTML files.
It contains methods to process the 'data portions' of an HTML and the tags.
These can be overridden to implement your own HTML processing methods in a subclass.
This class does most of what HTMLParser.HTMLParser does - except without choking on bad HTML.
It uses the regular expression and a chunk of logic from sgmllib.py (standard python distribution)
The only badly formed HTML that will cause errors is where a tag is missing the closing '>'. (Unfortunately common)
In this case the tag will be automatically closed at the next '<' - so some data could be incorrectly put inside the tag.
The useful methods of a Scraper instance are :
feed(data) - Pass more data into the parser.
As much as possible is processed - but nothing is returned from this method.
push() - This returns all currently processed data and empties the output buffer.
close() - Returns any unprocessed data (without processing it) and resets the parser.
Should be used after all the data has been handled using feed and then collected with push.
This returns any trailing data that can't be processed.
reset() - This method clears the input buffer and the output buffer.
The following methods are the methods called to handle various parts of an HTML document.
In a normal Scraper instance they do nothing and are intended to be overridden.
Some of them rely on the self.index attribute property of the instance which tells it where in self.buffer we have got to.
Some of them are explicitly passed the tag they are working on - in which case, self.index will be set to the end of the tag.
After all these methods have returned self.index will be incremented to the next character.
If your methods do any future processing they can manually modify self.index
All these methods should return anything to include in the processed document.
pdata(inchunk)
Called when we encounter a new tag. All the unprocessed data since the last tag is passed to this method.
Dummy method to override. Just returns the data unchanged.
pdecl()
Called when we encounter the *start* of a declaration or comment. <!.....
It uses self.index and isn't passed anything.
Dummy method to override. Just returns '<'.
ppi()
Called when we encounter the *start* of a processing instruction. <?.....
It uses self.index and isn't passed anything.
Dummy method to override. Just returns '<'.
endtag(thetag)
Called when we encounter a close tag. </...
It is passed the tag contents (including leading '/') and just returns it.
emptytag(thetag)
Called when we encounter a tag that we can't extract any valid name or attributes from.
It is passed the tag contents and just returns it.
handletag(name, attrs, thetag)
Called when we encounter a tag.
Is passed the tag name and attrs (a list of (attrname, attrvalue) tuples) - and the original tag contents as a string.
Typical usage :
filehandle = open('file.html', 'r')
parser = Scraper()
while True:
data = filehandle.read(10000) # read in the data in chunks
if not data: break # we've reached the end of the file - python could do with a do:...while syntax...
parser.feed(data)
## print parser.push() # you can output data whilst processing using the push method
processedfile = parser.close() # or all in one go using close
## print parser.close() # Even if using push you will still need a final close
filehandle.close()
TODO/ISSUES
Could be sped up by jumping from '<' to '<' rather than a character by character search (which is still pretty quick).
Need to check I have all the right tags and attributes in the tagdict in approxScraper.
The only other modification this makes to HTML is to close tags that don't have a closing '>'.. theoretically it could close them in the wrog place I suppose....
(This is very bad HTML anyway - but I need to watch for missing content that gets caught like this.)
Could check for character entities and named entities in HTML like HTMLParser.
Doesn't do anything special for self clsoing tags (e.g. <br />)
CHANGELOG
06-09-04 Version 1.3.0
A couple of patches by Paul Perkins - mainly prevents the namefind regular expression grabbing a characters when it has no attributes.
28-07-04 Version 1.2.1
Was losing a bit of data with each new feed. Have sorted it now.
24-07-04 Version 1.2.0
Refactored into Scraper and approxScraper classes.
Is now a general purpose, basic, HTML parser.
19-07-04 Version 1.1.0
Modified to output URLs using the PATH_INFO method - see approx.py
Cleaned up tag handling - it now works properly when there is a missing closing tag (common - but see TODO - has to guess where to close it).
11-07-04 Version 1.0.1
Added the close method.
09-07-04 Version 1.0.0
First version designed to work with approx.py the CGI proxy.
"""
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/286269_HTML_Scraper/recipe-286269.py",
"copies": "1",
"size": "13243",
"license": "mit",
"hash": 5951587557415347000,
"line_mean": 43.8915254237,
"line_max": 206,
"alpha_frac": 0.6249339274,
"autogenerated": false,
"ratio": 4.206797966963151,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006941564914842604,
"num_lines": 295
} |
# 06_reactions.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
import RPi.GPIO as GPIO
import time, random
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# pins used for the LED and switches
red_pin = 18
green_pin = 23
red_switch_pin = 24
green_switch_pin = 25
# LED pins outputs, switch pins inputs
GPIO.setup(red_pin, GPIO.OUT)
GPIO.setup(green_pin, GPIO.OUT)
GPIO.setup(red_switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(green_switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# The next three functions turn appropriate LEDs on and off
def green():
GPIO.output(green_pin, True)
GPIO.output(red_pin, False)
def red():
GPIO.output(green_pin, False)
GPIO.output(red_pin, True)
def off():
GPIO.output(green_pin, False)
GPIO.output(red_pin, False)
# find which buttons pressed -1 means neither, 0=both, 1=red, 2=green
def key_pressed():
# if button is pressed GPIO.input will report false for that input
if GPIO.input(red_switch_pin) and GPIO.input(green_switch_pin):
return 0
if not GPIO.input(red_switch_pin) and not GPIO.input(green_switch_pin):
return -1
if not GPIO.input(red_switch_pin) and GPIO.input(green_switch_pin):
return 1
if GPIO.input(red_switch_pin) and not GPIO.input(green_switch_pin):
return 2
try:
while True:
off()
print("Press the button for red or green when one lights")
delay = random.randint(3, 7) # random delay of 3 to 7 seconds
color = random.randint(1, 2) # random color red=1, green=2
time.sleep(delay)
if (color == 2):
red()
else:
green()
t1 = time.time()
while not key_pressed():
pass
t2 = time.time()
if key_pressed() != color : # check the right buton was pressed
print("WRONG BUTTON")
else:
# display the response time
print("Time: " + str(int((t2 - t1) * 1000)) + " milliseconds")
finally:
print("Cleaning up")
GPIO.cleanup()
# You could get rid of the try: finally: code and just have the while loop
# and its contents. However, the try: finally: construct makes sure that
# when you CTRL-c the program to end it, all the pins are set back to
# being inputs. This helps protect your Pi from accidental shorts-circuits
# if something metal touches the GPIO pins.
| {
"repo_name": "simonmonk/pi_starter_kit",
"path": "06_reactions.py",
"copies": "1",
"size": "2525",
"license": "mit",
"hash": -2887311400755725000,
"line_mean": 32.2236842105,
"line_max": 85,
"alpha_frac": 0.6463366337,
"autogenerated": false,
"ratio": 3.389261744966443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45355983786664433,
"avg_score": null,
"num_lines": null
} |
#07/09/2017 Working on graphics
#07/16/2017 Can save protocol to a specific file path and file name.
import pandas as pd
import os
from graphics import *
from PIL import Image as NewImage
import mods.saveOutput as saveOutput
#Generates one protocol for inserting each marker sequence in mrkrs in the plsmds. restriction_sites is a database of restriction enzymes and sites.
def plasmid_builder (plsmds, mrkrs,restriction_sites):
mrkrs=mrkrs.split()
# Changes all to upper case (so user can input case insensitive)
plsmds = plsmds.upper()
mrkrs_upper = []
for marker in mrkrs:
mrkrs_upper.append(marker.upper())
mrkrs = mrkrs_upper
#ERROR HANDLING
# Only capital A,T,C,G
for nuc in plsmds:
if nuc not in "ATCG":
raise UserWarning(
'Plasmid sequence must only contain A, T, C, or G!'
)
for marker in mrkrs:
# Only capital A,T,C,G
for nuc in marker:
if nuc not in "ATCG":
raise UserWarning(
'Marker sequences must only contain A, T, C, or G!'
)
# Parsing biolabs restriction sites
restriction_enzymes = restriction_sites.values.T.tolist()
restriction_enzymes = restriction_enzymes[0]
restriction_sites = restriction_sites.values.tolist()
restriction_sites = restriction_sites[1:]
# Cleaning NaNs
restriction_seqs = []
for site in restriction_sites:
clean_site = [x for x in site if str(x) != 'nan']
del clean_site[0]
restriction_seqs.append(clean_site)
del restriction_enzymes[0]
# Reading plasmids and markers
#plsmds = []
#for seq_record in SeqIO.parse(plasmid_fasta, "fasta"):
# plsmd = (repr(seq_record.seq))
# plsmds.append(plasmd)
#mrkrs = []
#for seq_record in SeqIO.parse(markers_fasta, "fasta"):
# mrkr = (repr(seq_record.seq))
# mrkrs.append(mrkr)
plasmid = plsmds
plasmid_re = []
for seq in restriction_seqs:
if seq[0] in plasmid:
idx = restriction_seqs.index(seq)
re_name = restriction_enzymes[idx]
plasmid_re.append(re_name)
index=1
for marker in mrkrs:
marker_re = []
matched = []
idx = 0
for seq in restriction_seqs:
if seq[0] in marker:
idx = restriction_seqs.index(seq)
re_name = restriction_enzymes[idx]
marker_re.append(re_name)
for re in marker_re:
if re in plasmid_re:
matched.append(re)
protocol=[]
if len(matched) > 0:
# Add to PROTOCOL for (1) --- matched[0] gives plasmid name
protocol.append ("Digest the recipient plasmid and gene with " + matched[0] + " following the manufacturer's instructions")
protocol.append ("Run on gel electrophoresis to separate the fragments by size; use GelViz to visualize outcome of gel.")
protocol.append ("Add the separated fragments into the following mixture:")
protocol.append ("a. " + "Marker of sequence " + marker)
protocol.append ("b. the recipient plasmid fragment")
protocol.append ("c. DNA ligase")
protocol.append ("e. follow the manufacturer's instruction for appropriate temperatures and incubation times")
elif len(marker) > 400:
primer1 = marker[0:30] + plasmid[idx:idx+30]
primer2 = plasmid[idx+30:idx+60] + marker[len(marker)-30:len(marker)]
protocol.append ("FOLLOWING GIBSON PROTOCOL")
protocol.append ("Recipient Plasmid Instructions:")
protocol.append ("a. Obtain" + plasmid_re[0] + " enzyme and recipient plasmid; follow manufacturer's instruction for digestion")
protocol.append ("b. Conduct Gel electrophoresis to ensure cutting; check fragments with Gel.Viz")
protocol.append ("Gene instructions:")
protocol.append ("a. Obtain two primers:")
protocol.append (str(primer1) + ' and ' + str(primer2))
protocol.append ("b. Run a Two Step PCR with the appropriate annealing temperatures")
protocol.append ("c. Conduct Gel electrophoresis to ensure extension; check fragments with Gel.Viz")
protocol.append ("Final instructions:")
protocol.append ("Incubate the amplified fragment and recipient plasmid with the following mixture:")
protocol.append ("a.an exonuclease the chews back 5' ends of the fragment to create overhangs")
protocol.append ("b. a polymerase to fill in gaps")
protocol.append ("c. a DNA ligase tht seals the nicks of filled in gaps")
protocol.append ("d. follow the manufacturer's instructions for appropriate buffers and incubation time")
out_text=''
for item in protocol:
out_text=out_text+("%s\n" % item)
saveOutput.saveData(out_text,"BUILDR Protocol "+str(index))
index=index+1
#Drawing plasmid
def plasDraw(plasmid, mrkrs):
total=len(plasmid)
for i in mrkrs:
total=total+len(i)
scale=360/total
padding=50
master=Toplevel()
master.title("Plasmid")
w=Canvas(master,width=500,height=500)
w.pack()
x=w.winfo_width()
y=w.winfo_height()
plasEnd=scale*len(plasmid)
w.create_arc(padding,padding,x-padding,y-padding,start=0,extent=plasEnd, style=ARC,width=5, outline="black")
for i in mrkrs:
w.create_arc(50,50,450,450,start=plasEnd,extent=len(i)*scale,style=ARC,width=5,outline="red")
#Need to incorporate cuts in where the restriction sites are
# else:
# Primer protocol
# Finding
# User is presented with a list of restriction enzymes and other reagents and selects what they have in the lab.
# All of the following will be in a for loop for each gene/marker.
# 1. Restriction enzyme/ligation function:
# a. Search for all restriction sites on plasmid that were selected by user.
# b. Search for all restriction sites on genes/markers that were selected by user.
# c. Search for matches between plasmids and genes.
# d. If there are matches, print restriction enzyme names and protocol.
# "Digest the #recipient plasmid and #gene with #restriction enzymes following the manufacturer's instructions"
# "Run on gel electrophoresis to separate the fragments by size; use GelViz to visualize outcome of gel."
# "Add the separated fragments into the following mixture:
# a. the %gene frgment
# b. the %recipient plasmid fragment
# c. DNA ligase
# e. follow the manufacturer's instruction for appropriate temperatures and incubation times
#
#
# 2. Gibson method:
# a. Pick first restriction site found on plasmid.
# b. Copy 30 bps upstream and downstream of restriction site location.
# c. Copy 20 bps on either end of gene/marker.
# d. Combine to form 60 bps for each primer.
# e. Check melting temperature of primers. Discard and repeat if temp < 40C or > 60C.
# f. Check for primer dimer formation. Discard and repeat if 5 or more nucleotides form dimers.
# g. Print primer sequences and protocol.
# "Recipient Plasmid Instructions:
# a. Obtain %restriction enzyme and %recipientplasmid; follow manufacturer's instruction for digestion
# b. Conduct Gel electrophoresis to ensure cutting; check with following gel:"
# "Gene instruction:
# a. "Obtain primer with the following sequene %primer sequence
# b. Run a Two Step PCR with the appropriate annealing temperatures
# "Final instructions:
# ubate the amplified fragment and recipient plasmid with the following mixture:
# a.an exonuclease the chews back 5' ends of the fragment to create overhangs
# b. a polymerase to fill in gaps
# c. a DNA ligase tht seals the nicks of filled in gaps
# d. follow the manufacturer's instructions for appropriate buffers and incubation time
# 3. Restriction Primers
# Protocol:
# a. find sequence of restriction enzyme of recipient plasmid
# b. combine sequence with one end of the gene complemetary sequence
# c. print primer sequence and protocol:
# "Obtain primers with sequence: %primer_seq"
# "
# "Aliquot an appropriate amount of the %enzyme restriction enzyme into the mixture."
# "Incubate the mixture for an appropriate duration of time."
# "Remove the restriction enzymes."
# "Add the primers into the mixture." | {
"repo_name": "sunericd/ISTools",
"path": "GeneTK/mods/plasmid_builder.py",
"copies": "1",
"size": "8825",
"license": "mit",
"hash": -909607771300903000,
"line_mean": 42.0536585366,
"line_max": 148,
"alpha_frac": 0.6397733711,
"autogenerated": false,
"ratio": 3.7330795262267342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4872852897326734,
"avg_score": null,
"num_lines": null
} |
"""-0.75/(1+x**2)-(0.65*x*math.atan(1/x))+0.65
(-4* x**2 - 20*x - 100) + (1 - x)**4
3*x**2+ 12/(x**3) - 5
3*x**4 + x**2 - 2*x + 1
10+x**3-2*x-5*(np.finfo(float).eps)**x
x**2- 10*(np.finfo(float).eps)**(0.1*x)
(10*x**3+3*x**2+5)**2
0.5/math.sqrt(1+x**2)- math.sqrt(1+x**2)*(1-0.5/(1+x**2))+x
(np.finfo(float).eps)**x-x**3
(x**2-1)**3-(2*x-5)**4
(-4*x**2-20*x-100) + (1-x)**4
(x**2+(y+1)**2)*(x**2+(y-1)**2)
(x**2-y)**2+y**2
50*(y-x**2)**2+(2-x)**2
(x+2*y-7)**2+(2*x+y-5)**2
(1.5-x*(1-y))**2+(2.25-x*(1-y**2))**2+(2.625-x*(1-y**3))**2
(10*(x[1]-x[0]**2))**2+(1-x[0])**2+90*(x[3]-x[2]**2)**2+(1-x[2])**2+10*(x[1]+x[3]-2)**2+0.1*(x[1]-x[3]) #4 variables, imposible graficar
(4-2.1*x[0]**2+(x[0]**4)/3)*x[0]**2+x[0]*x[1]+(-4+4*x[1]**2)*x[1]**2
(x[0]+10*x[1])**2+5*(x[2]-x[3])**2+(x[1]-2*x[2])**4+10*(x[0]-x[3])**4 #4 variables
x[0]**2+x[1]**2+x[2]**2 #3 variables
100*(x[0]**2-x[1])**2+(1-x[0])**2
math.floor(x[0])+math.floor(x[1])+math.floor(x[2])+math.floor(x[3])+math.floor(x[4]) #5 variables
#23 - 30 variables
suma = 0
for i in range(1,30):
suma+= i*x[i-1]**4
return suma + random.gauss(0,1)
#24
a = [[-32,-16,0,16,32,-32,-16,0,16,32,-32,-16,0,16,32,-32,-16,0,16,32,-32,-16,0,16,32], [-32,-32,-32,-32,-32,-16,-16,-16,-16,-16,0,0,0,0,0,16,16,16,16,16,32,32,32,32,32]]
def fitness(x,y):
superSuma = 0
for j in range(1,25):
superSuma += 1/f2(j,x,y)
return 1/(1/500 + superSuma)
def f2(j,x,y):
suma = 0
i = 0
suma+= (x- a[i][j])**6
i = 1
suma+= (y- a[i][j])**6
return j + suma """ | {
"repo_name": "jresendiz27/EvolutionaryComputing",
"path": "practices/first/evolutionaryStrategies/functions.py",
"copies": "2",
"size": "1490",
"license": "apache-2.0",
"hash": -3856515271743588400,
"line_mean": 33.6744186047,
"line_max": 170,
"alpha_frac": 0.489261745,
"autogenerated": false,
"ratio": 1.6266375545851528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8018810646719186,
"avg_score": 0.01941773057319362,
"num_lines": 43
} |
# 07_light_meter.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
from Tkinter import *
import RPi.GPIO as GPIO
import time, math
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# This project uses a photoresistor, a component whose resistance varies with the light falling on it.
# To measure its resistance, the code records the time it takes for a capacitor to fill
# when supplied by a current passing through the resistor. The lower the resistance the faster
# it fills up.
#
# You can think of a capacitor as a tank of electricity, and as it fills with charge, the voltage
# across it increases. We cannot measure that voltage directly, because the Raspberry Pi
# does not have an analog to digital convertor (ADC or analog input). However, we can time how long it
# takes for the capacitor to fill with charge to the extent that it gets above the 1.65V or so
# that counts as being a high digital input.
#
# For more information on this technique take a look at:
# learn.adafruit.com/basic-resistor-sensor-reading-on-raspberry-pi
# The code here is based on that in the Raspberry Pi Cookbook (Recipes 12.1 to 12.3)
# Pin a charges the capacitor through a fixed 1k resistor and the thermistor in series
# pin b discharges the capacitor through a fixed 1k resistor
a_pin = 18
b_pin = 23
# empty the capacitor ready to start filling it up
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
# return the time taken for the voltage on the capacitor to count as a digital input HIGH
# than means around 1.65V
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
return (t2 - t1) * 1000000
# Take an analog readin as the time taken to charge after first discharging the capacitor
def analog_read():
discharge()
return charge_time()
# Convert the time taken to charge the cpacitor into a value of resistance
# To reduce errors, do it 100 times and take the average.
def read_resistance():
n = 20
total = 0;
for i in range(1, n):
total = total + analog_read()
reading = total / float(n)
resistance = reading * 6.05 - 939
return resistance
def light_from_r(R):
# Log the reading to compress the range
return math.log(1000000.0/R) * 10.0
# group together all of the GUI code into a class called App
class App:
# this function gets called when the app is created
def __init__(self, master):
self.master = master
frame = Frame(master)
frame.pack()
label = Label(frame, text='Light', font=("Helvetica", 32))
label.grid(row=0)
self.reading_label = Label(frame, text='12.34', font=("Helvetica", 110))
self.reading_label.grid(row=1)
self.update_reading()
# Update the reading
def update_reading(self):
light = light_from_r(read_resistance())
reading_str = "{:.0f}".format(light)
self.reading_label.configure(text=reading_str)
self.master.after(200, self.update_reading)
# Set the GUI running, give the window a title, size and position
root = Tk()
root.wm_title('Light Meter')
app = App(root)
root.geometry("400x300+0+0")
try:
root.mainloop()
finally:
print("Cleaning up")
GPIO.cleanup()
| {
"repo_name": "simonmonk/pi_starter_kit",
"path": "07_light_meter.py",
"copies": "1",
"size": "3510",
"license": "mit",
"hash": 6232889419278562000,
"line_mean": 33.7524752475,
"line_max": 102,
"alpha_frac": 0.6948717949,
"autogenerated": false,
"ratio": 3.371757925072046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4566629719972046,
"avg_score": null,
"num_lines": null
} |
#08_03_rasp_game_one
import pygame
from pygame.locals import *
from sys import exit
import random
screen_width = 600
screen_height = 400
spoon_x = 300
spoon_y = screen_height - 100
raspberry_x = random.randint(10, screen_width)
raspberry_y = 0
pygame.init()
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Raspberry Catching')
spoon = pygame.image.load('spoon.jpg').convert()
raspberry = pygame.image.load('raspberry.jpg').convert()
def update_spoon():
global spoon_x
global spoon_y
spoon_x, ignore = pygame.mouse.get_pos()
screen.blit(spoon, (spoon_x, spoon_y))
def update_raspberry():
global raspberry_x
global raspberry_y
raspberry_y += 5
if raspberry_y > spoon_y:
raspberry_y = 0
raspberry_x = random.randint(10, screen_width)
raspberry_x += random.randint(-5, 5)
if raspberry_x < 10:
raspberry_x = 10
if raspberry_x > screen_width - 20:
raspberry_x = screen_width - 20
screen.blit(raspberry, (raspberry_x, raspberry_y))
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.fill((255, 255, 255))
update_raspberry()
update_spoon()
pygame.display.update()
| {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "08_03_rasp_game_one.py",
"copies": "1",
"size": "1300",
"license": "mit",
"hash": 3960308470716985300,
"line_mean": 23.0740740741,
"line_max": 63,
"alpha_frac": 0.6423076923,
"autogenerated": false,
"ratio": 2.882483370288248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40247910625882477,
"avg_score": null,
"num_lines": null
} |
#08_04_rasp_game_scoring
import pygame
from pygame.locals import *
from sys import exit
import random
score = 0
screen_width = 600
screen_height = 400
spoon_x = 300
spoon_y = screen_height - 100
raspberry_x = random.randint(10, screen_width)
raspberry_y = 0
pygame.init()
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Raspberry Catching')
spoon = pygame.image.load('spoon.jpg').convert()
raspberry = pygame.image.load('raspberry.jpg').convert()
def update_spoon():
global spoon_x
global spoon_y
spoon_x, ignore = pygame.mouse.get_pos()
screen.blit(spoon, (spoon_x, spoon_y))
def update_raspberry():
global raspberry_x
global raspberry_y
raspberry_y += 5
if raspberry_y > spoon_y:
raspberry_y = 0
raspberry_x = random.randint(10, screen_width)
raspberry_x += random.randint(-5, 5)
if raspberry_x < 10:
raspberry_x = 10
if raspberry_x > screen_width - 20:
raspberry_x = screen_width - 20
screen.blit(raspberry, (raspberry_x, raspberry_y))
def check_for_catch():
global score
if raspberry_y >= spoon_y and raspberry_x >= spoon_x and \
raspberry_x < spoon_x + 50:
score += 1
display("Score: " + str(score))
def display(message):
font = pygame.font.Font(None, 36)
text = font.render(message, 1, (10, 10, 10))
screen.blit(text, (0, 0))
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.fill((255, 255, 255))
update_raspberry()
update_spoon()
check_for_catch()
pygame.display.update()
| {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "08_04_rasp_game_scoring.py",
"copies": "1",
"size": "1669",
"license": "mit",
"hash": -3682527572559103000,
"line_mean": 22.5070422535,
"line_max": 63,
"alpha_frac": 0.6315158778,
"autogenerated": false,
"ratio": 2.8676975945017182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3999213472301718,
"avg_score": null,
"num_lines": null
} |
#08_06_rasp_game_final
import pygame
from pygame.locals import *
from sys import exit
import random
score = 0
screen_width = 600
screen_height = 400
spoon_x = 300
spoon_y = screen_height - 100
class Raspberry:
x = 0
y = 0
dy = 0
def __init__(self):
self.x = random.randint(10, screen_width)
self.y = 0
self.dy = random.randint(3, 10)
def update(self):
self.y += self.dy
if self.y > spoon_y:
self.y = 0
self.x = random.randint(10, screen_width)
self.x += random.randint(-5, 5)
if self.x < 10:
self.x = 10
if self.x > screen_width - 20:
self.x = screen_width - 20
screen.blit(raspberry_image, (self.x, self.y))
def is_caught(self):
return self.y >= spoon_y and self.x >= spoon_x and self.x < spoon_x + 50
clock = pygame.time.Clock()
rasps = [Raspberry(), Raspberry(), Raspberry()]
pygame.init()
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Raspberry Catching')
spoon = pygame.image.load('spoon.jpg').convert()
raspberry_image = pygame.image.load('raspberry.jpg').convert()
def update_spoon():
global spoon_x
global spoon_y
spoon_x, ignore = pygame.mouse.get_pos()
screen.blit(spoon, (spoon_x, spoon_y))
def check_for_catch():
global score
for r in rasps:
if r.is_caught():
score += 1
def display(message):
font = pygame.font.Font(None, 36)
text = font.render(message, 1, (10, 10, 10))
screen.blit(text, (0, 0))
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.fill((255, 255, 255))
for r in rasps:
r.update()
update_spoon()
check_for_catch()
display("Score: " + str(score))
pygame.display.update()
clock.tick(30)
| {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "08_06_rasp_game_final.py",
"copies": "1",
"size": "1915",
"license": "mit",
"hash": -6845524353052185000,
"line_mean": 22.0722891566,
"line_max": 80,
"alpha_frac": 0.580156658,
"autogenerated": false,
"ratio": 3.0787781350482315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4158934793048231,
"avg_score": null,
"num_lines": null
} |
# 082 - Remove Duplicates from Sorted List II
# https://leetcode.com/problems/remove-duplicates-from-sorted-list-ii/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
duplicates = {}
def reduce_recursively(head, node, prev_node=None):
# No more elements in the list, return the original pointer.
if node is None:
return head
# No previous value, continue the recursion.
if prev_node is None:
return reduce_recursively(head, node.next, node)
else:
if prev_node.val == node.val:
# Remove the current node and continue with recursion while preserving
# the old previous node until finding a different value.
prev_node.next = node.next
duplicates[prev_node.val] = 1
return reduce_recursively(head, node.next, prev_node)
# Otherwise just continue with recursion.
return reduce_recursively(head, node.next, node)
def remove_recursively(head, node, prev_node=None):
if prev_node is None:
prev_node = node
if node is None:
return head
# If the value of the current node is duplicated.
if duplicates.get(node.val):
prev_node.next = node.next
return remove_recursively(head, node.next, prev_node)
return remove_recursively(head, node.next, node)
# This will reduce to unique ocurrences, but register separately the repeated values.
reduce_recursively(head, head)
# After reusing the solution to the reduction problem, go through the list removing
# nodes that are labeled as duplicated previously.
remove_recursively(head, head)
# The removing doesn't remove the value at the head so manually check if the
# head (if defined) has a duplicated value, if so return whatever is next.
if head and duplicates.get(head.val):
return head.next
return head
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/02_Medium/lc_082.py",
"copies": "1",
"size": "2370",
"license": "mit",
"hash": -2717259463950642000,
"line_mean": 37.8524590164,
"line_max": 93,
"alpha_frac": 0.5924050633,
"autogenerated": false,
"ratio": 4.505703422053232,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5598108485353233,
"avg_score": null,
"num_lines": null
} |
# 083 - Remove Duplicates from Sorted List (Easy)
# https://leetcode.com/problems/remove-duplicates-from-sorted-list/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def remove_recursively(head, node, prev_node=None):
# No more elements in the list, return the original pointer.
if node is None:
return head
# No previous value, continue the recursion.
if prev_node is None:
return remove_recursively(head, node.next, node)
else:
if prev_node.val == node.val:
# Remove the current node and continue with recursion while preserving
# the old previous node until finding a different value.
prev_node.next = node.next
return remove_recursively(head, node.next, prev_node)
# Otherwise just continue with recursion.
return remove_recursively(head, node.next, node)
return remove_recursively(head, head)
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_083.py",
"copies": "1",
"size": "1289",
"license": "mit",
"hash": -8668892554760389000,
"line_mean": 38.0606060606,
"line_max": 90,
"alpha_frac": 0.5841737781,
"autogenerated": false,
"ratio": 4.35472972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.543890350782973,
"avg_score": null,
"num_lines": null
} |
# 08_light_harp.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
import RPi.GPIO as GPIO
import time, math
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# Pin a charges the capacitor through a fixed 1k resistor and the thermistor in series
# pin b discharges the capacitor through a fixed 1k resistor
a_pin = 18
b_pin = 23
buzzer_pin = 24
GPIO.setup(buzzer_pin, GPIO.OUT)
# empty the capacitor ready to start filling it up
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.001)
# return the time taken for the voltage on the capacitor to count as a digital input HIGH
# than means around 1.65V
# In this project, the return value is not used. The time taken for this function itself to run
# directly influenced the buzzer tome (see next comment block)
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
count = 0
GPIO.output(a_pin, True)
while not GPIO.input(b_pin):
count = count + 1
return count
# Rather misleadingly, this function actually makes the tone on the buzzer
# by turning it on and off, with a delay caused by charge_time.
# Cunning or what?
def analog_read():
discharge()
GPIO.output(buzzer_pin, True)
discharge()
charge_time()
GPIO.output(buzzer_pin, False)
charge_time()
try:
while True:
analog_read()
finally:
print("Cleaning up")
GPIO.cleanup()
# You could get rid of the try: finally: code and just have the while loop
# and its contents. However, the try: finally: construct makes sure that
# when you CTRL-c the program to end it, all the pins are set back to
# being inputs. This helps protect your Pi from accidental shorts-circuits
# if something metal touches the GPIO pins. | {
"repo_name": "simonmonk/pi_starter_kit",
"path": "08_light_harp.py",
"copies": "1",
"size": "1918",
"license": "mit",
"hash": 6990756843295913000,
"line_mean": 30.4590163934,
"line_max": 95,
"alpha_frac": 0.7059436913,
"autogenerated": false,
"ratio": 3.273037542662116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4478981233962116,
"avg_score": null,
"num_lines": null
} |
# 08_manual_robot_continuous.py
# Use the arrow keys to direct the robot
from rrb3 import *
import sys
import tty
import termios
rr = RRB3(9.0, 6.0) # battery, motor
UP = 0
DOWN = 1
RIGHT = 2
LEFT = 3
print("Use the arrow keys to move the robot")
print("Press CTRL-c to quit the program")
# These functions allow the program to read your keyboard
def readchar():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
if ch == '0x03':
raise KeyboardInterrupt
return ch
def readkey(getchar_fn=None):
getchar = getchar_fn or readchar
c1 = getchar()
if ord(c1) != 0x1b:
return c1
c2 = getchar()
if ord(c2) != 0x5b:
return c1
c3 = getchar()
return ord(c3) - 65 # 0=Up, 1=Down, 2=Right, 3=Left arrows
# This will control the movement of your robot and display on your screen
try:
while True:
keyp = readkey()
if keyp == UP:
rr.forward() # if you don't specifiy duration it keeps going indefinately
print 'forward'
elif keyp == DOWN:
rr.reverse()
print 'backward'
elif keyp == RIGHT:
rr.right()
print 'clockwise'
elif keyp == LEFT:
rr.left()
print 'anti clockwise'
elif keyp == LEFT:
rr.left()
print 'anti clockwise'
elif keyp == ' ':
rr.stop()
print 'stop'
elif ord(keyp) == 3:
break
except KeyboardInterrupt:
GPIO.cleanup()
| {
"repo_name": "teknoteacher/raspirobot3",
"path": "08_manual_robot_continuous.py",
"copies": "3",
"size": "1691",
"license": "mit",
"hash": -8517094233418003000,
"line_mean": 23.1571428571,
"line_max": 85,
"alpha_frac": 0.5718509758,
"autogenerated": false,
"ratio": 3.4651639344262297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.553701491022623,
"avg_score": null,
"num_lines": null
} |
# 08_manual_robot.py
# Use the arrow keys to direct the robot
from rrb3 import *
import sys
import tty
import termios
rr = RRB3(9.0, 6.0) # battery, motor
UP = 0
DOWN = 1
RIGHT = 2
LEFT = 3
print("Use the arrow keys to move the robot")
print("Press CTRL-c to quit the program")
# These functions allow the program to read your keyboard
def readchar():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
if ch == '0x03':
raise KeyboardInterrupt
return ch
def readkey(getchar_fn=None):
getchar = getchar_fn or readchar
c1 = getchar()
if ord(c1) != 0x1b:
return c1
c2 = getchar()
if ord(c2) != 0x5b:
return c1
c3 = getchar()
return ord(c3) - 65 # 0=Up, 1=Down, 2=Right, 3=Left arrows
# This will control the movement of your robot and display on your screen
try:
while True:
keyp = readkey()
if keyp == UP:
rr.forward(1)
print 'forward'
elif keyp == DOWN:
rr.reverse(1)
print 'backward'
elif keyp == RIGHT:
rr.right(1)
print 'clockwise'
elif keyp == LEFT:
rr.left(1)
print 'anti clockwise'
elif keyp == LEFT:
rr.left(1)
print 'anti clockwise'
elif ord(keyp) == 3:
break
except KeyboardInterrupt:
GPIO.cleanup()
| {
"repo_name": "simonmonk/raspberrypi_cookbook_ed2",
"path": "rover.py",
"copies": "4",
"size": "1551",
"license": "mit",
"hash": -3878133912986541000,
"line_mean": 22.1492537313,
"line_max": 73,
"alpha_frac": 0.5738233398,
"autogenerated": false,
"ratio": 3.349892008639309,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.592371534843931,
"avg_score": null,
"num_lines": null
} |
#08_
import pygame
from pygame.locals import *
from sys import exit
import random
score = 0
screen_width = 600
screen_height = 400
spoon_x = 300
spoon_y = screen_height - 100
class Raspberry:
x = 0
y = 0
dy = 0
def __init__(self):
self.x = random.randint(10, screen_width)
self.y = 0
self.dy = random.randint(3, 10)
def update(self):
self.y += self.dy
if self.y > spoon_y:
self.y = 0
self.x = random.randint(10, screen_width)
self.x += random.randint(-5, 5)
if self.x < 10:
self.x = 10
if self.x > screen_width - 20:
self.x = screen_width - 20
screen.blit(raspberry_image, (self.x, self.y))
def is_caught(self):
return self.y >= spoon_y and self.x >= spoon_x and self.x < spoon_x + 50
clock = pygame.time.Clock()
rasps = [Raspberry(), Raspberry(), Raspberry()]
pygame.init()
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Raspberry Catching')
spoon = pygame.image.load('spoon.jpg').convert()
raspberry_image = pygame.image.load('raspberry.jpg').convert()
def update_spoon():
global spoon_x
global spoon_y
spoon_x, ignore = pygame.mouse.get_pos()
screen.blit(spoon, (spoon_x, spoon_y))
def check_for_catch():
global score
for r in rasps:
if r.is_caught():
score += 1
def display(message):
font = pygame.font.Font(None, 36)
text = font.render(message, 1, (10, 10, 10))
screen.blit(text, (0, 0))
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.fill((255, 255, 255))
for r in rasps:
r.update()
update_spoon()
check_for_catch()
display("Score: " + str(score))
pygame.display.update()
clock.tick(30)
| {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "08_raspberry_bounce_5.py",
"copies": "1",
"size": "1898",
"license": "mit",
"hash": -3424469766874564600,
"line_mean": 21.5952380952,
"line_max": 80,
"alpha_frac": 0.5774499473,
"autogenerated": false,
"ratio": 3.0912052117263844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41686551590263843,
"avg_score": null,
"num_lines": null
} |
#08_
import pygame
from pygame.locals import *
from sys import exit
import random
score = 0
screen_width = 600
screen_height = 400
spoon_x = 300
spoon_y = screen_height - 100
raspberry_x = random.randint(10, screen_width)
raspberry_y = 0
clock = pygame.time.Clock()
pygame.init()
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Raspberry Catching')
spoon = pygame.image.load('spoon.jpg').convert()
raspberry = pygame.image.load('raspberry.jpg').convert()
def update_spoon():
global spoon_x
global spoon_y
spoon_x, ignore = pygame.mouse.get_pos()
screen.blit(spoon, (spoon_x, spoon_y))
def update_raspberry():
global raspberry_x
global raspberry_y
raspberry_y += 5
if raspberry_y > spoon_y:
raspberry_y = 0
raspberry_x = random.randint(10, screen_width)
raspberry_x += random.randint(-5, 5)
if raspberry_x < 10:
raspberry_x = 10
if raspberry_x > screen_width - 20:
raspberry_x = screen_width - 20
screen.blit(raspberry, (raspberry_x, raspberry_y))
def check_for_catch():
global score
if raspberry_y >= spoon_y and raspberry_x >= spoon_x and raspberry_x < spoon_x + 50:
score += 1
display("Score: " + str(score))
def display(message):
font = pygame.font.Font(None, 36)
text = font.render(message, 1, (10, 10, 10))
screen.blit(text, (0, 0))
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.fill((255, 255, 255))
update_raspberry()
update_spoon()
check_for_catch()
pygame.display.update()
clock.tick(30)
| {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "08_raspberry_bounce_3.py",
"copies": "1",
"size": "1688",
"license": "mit",
"hash": -5401274146478605000,
"line_mean": 22.1232876712,
"line_max": 88,
"alpha_frac": 0.6327014218,
"autogenerated": false,
"ratio": 2.865874363327674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3998575785127674,
"avg_score": null,
"num_lines": null
} |
#08_
import pygame
from pygame.locals import *
from sys import exit
import random
screen_width = 600
screen_height = 400
spoon_x = 300
spoon_y = screen_height - 100
raspberry_x = random.randint(10, screen_width)
raspberry_y = 0
pygame.init()
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Raspberry Catching')
spoon = pygame.image.load('spoon.jpg').convert()
raspberry = pygame.image.load('raspberry.jpg').convert()
def update_spoon():
global spoon_x
global spoon_y
spoon_x, ignore = pygame.mouse.get_pos()
screen.blit(spoon, (spoon_x, spoon_y))
def update_raspberry():
global raspberry_x
global raspberry_y
raspberry_y += 5
if raspberry_y > spoon_y:
raspberry_y = 0
raspberry_x = random.randint(10, screen_width)
raspberry_x += random.randint(-5, 5)
if raspberry_x < 10:
raspberry_x = 10
if raspberry_x > screen_width - 20:
raspberry_x = screen_width - 20
screen.blit(raspberry, (raspberry_x, raspberry_y))
while True:
for event in pygame.event.get():
if event.type == QUIT:
exit()
screen.fill((255, 255, 255))
update_raspberry()
update_spoon()
pygame.display.update()
| {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "08_raspberry_bounce_2.py",
"copies": "1",
"size": "1282",
"license": "mit",
"hash": -390138207909545000,
"line_mean": 21.8928571429,
"line_max": 63,
"alpha_frac": 0.6411856474,
"autogenerated": false,
"ratio": 2.893905191873589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8909696931710565,
"avg_score": 0.025078781512605043,
"num_lines": 56
} |
# 09-01-04
#v1.0.1
#
#
# Prints output to the screen and/or
# logs it to a file - depending on the settings.
#
# Prints output to the screen and/or
# logs it to a file - depending on the settings.
#
# To use this function you need to include verbose (yes if you want to print to screen), 'outputpath' (filename)
# ('' if you don't want to output to a file) and 'logmode'( (w)rite or (a)ppend )
#
# If passed in an alternative method for printing (newfunc) it can also output using that method !
# This method can be created at any time.
# Usage :
# verbose = 'Yes' # to print to screen
# outputpath = 'system/logfile.txt' # logfile to print to, or '' for no logging
# logmode = 'w' # the mode to open the logfile in - 'w' for write or 'a' for append
#
# from standout import StandOut
# stout = StandOut(verbose, outputpath, logmode, newfunc=None)
# stout.out('message line\n') # this line prints to the screen and adds a line to logfile.txt
# stout.close() # closes the logging file
# stout.verbose = 'No' # Switches printing off
# stout.newfunc = printtowindow # sets an additional printing function
# stout.newfile(outputpath, logmode) # sets a file to log to if the object was originally created without one
class StandOut:
"Creates an output object that will print *and/or* write to an output file if required."
def __init__(self,verbose,outputpath,logmode, newfunc=None):
self.verbose=verbose.lower().strip()
if logmode=='a' and outputpath !='': # are we appending or creating a newfile ?
self.outputfile=open(outputpath,'a')
self.putout('\n\n')
elif outputpath != '':
self.outputfile=open(outputpath,'w')
else:
self.outputfile=''
self.newfunc = newfunc
def out(self,line):
if self.verbose=='yes':
print line,
if self.outputfile:
self.outputfile.write(line)
if self.newfunc:
self.newfunc(line)
def close(self):
if self.outputfile != '':
self.outputfile.close()
self.outputfile = ''
def newfile(self, outputpath,logmode):
"""Use this method for adding a file after creating the object without one."""
if logmode=='a' and outputpath !='': # are we appending or creating a newfile ?
self.outputfile=open(outputpath,'a')
self.putout('\n\n')
elif outputpath != '':
self.outputfile=open(outputpath,'w')
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/259176_StandOut__Flexible_Output_Object/recipe-259176.py",
"copies": "1",
"size": "2491",
"license": "mit",
"hash": -6812921665900219000,
"line_mean": 38.5396825397,
"line_max": 112,
"alpha_frac": 0.6403050984,
"autogenerated": false,
"ratio": 3.631195335276968,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4771500433676968,
"avg_score": null,
"num_lines": null
} |
# 09/04/2019
import functools
import itertools
import operator
def fit1(X, Y, x, y):
return int(X/x) * int(Y/y)
assert fit1(25, 18, 6, 5) == 12
assert fit1(10, 10, 1, 1) == 100
assert fit1(12, 34, 5, 6) == 10
assert fit1(12345, 678910, 1112, 1314) == 5676
assert fit1(5, 100, 6, 1) == 0
def fit2(X, Y, x, y):
return max(fit1(X, Y, x, y), fit1(X, Y, y, x))
assert fit2(25, 18, 6, 5) == 15
assert fit2(12, 34, 5, 6) == 12
assert fit2(12345, 678910, 1112, 1314) == 5676
assert fit2(5, 5, 3, 2) == 2
assert fit2(5, 100, 6, 1) == 80
assert fit2(5, 5, 6, 1) == 0
def fit3(X, Y, Z, x, y, z):
orientations = [
(x, y, z),
(x, z, y),
(y, x, z),
(y, z, x),
(z, x, y),
(z, y, x)
]
return max(int(X/a) * int(Y/b) * int(Z/c) for a, b, c in orientations)
assert fit3(10, 10, 10, 1, 1, 1) == 1000
assert fit3(12, 34, 56, 7, 8, 9) == 32
assert fit3(123, 456, 789, 10, 11, 12) == 32604
assert fit3(1234567, 89101112, 13141516, 171819, 202122, 232425) == 174648
def fitn(crate, box):
def _fit(c, b):
return functools.reduce(
operator.mul,
(int(x/y) for x, y in zip(c, b)),
1
)
return max(_fit(crate, perm) for perm in itertools.permutations(box))
assert fitn([3, 4], [1, 2]) == 6
assert fitn([123, 456, 789], [10, 11, 12]) == 32604
assert fitn([123, 456, 789, 1011, 1213, 1415], [16, 17, 18, 19, 20, 21]) == 1883443968
| {
"repo_name": "tlseabra/dailyprogrammer",
"path": "Python/easy/e377.py",
"copies": "2",
"size": "1442",
"license": "mit",
"hash": 7352715090567525000,
"line_mean": 23.8620689655,
"line_max": 86,
"alpha_frac": 0.5409153953,
"autogenerated": false,
"ratio": 2.3523654159869496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.389328081128695,
"avg_score": null,
"num_lines": null
} |
"""0.9.0 News and Changes
- Updated Swagger UI 3.x
- uiversion now defaults to 3
- 'hide_top_bar' config option to remove the green top bar
- 'fotter_text' added to customize the footer text (allows html and <script>)
- templates/flasgger/footer.html added and can be replaced to customization
this footer.html is the right place for custom <script>
- 'top_text' added to customize the header text (allows html)
- templates/flasgger/top.html added and can be replaced to customization
- 'head_text' added to customize the <head> (allows html)
- templates/flasgger/head.html added and can be replaced to customization
- added 'doc_expansion' config to control the collapse
- added 'ui_params' to allow override of any swagger.ui values
"""
from flask import Flask, request
from flasgger import Swagger
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config['SWAGGER'] = {
'title': 'MyAPI',
# UI version 3 is now the default
# 'uiversion': '3',
# This setup hides the green top bar
'hide_top_bar': True,
# this text is rendered in the footer
# (optionally you can replace flasgger/footer.html template)
'footer_text': '<b>Hello World </b><script>alert("Hello World!")</script>',
# this text is rendered in the header
# (optionally you can replace flasgger/header.html template)
'top_text': '<b><span class="top_text">Welcome to my api </span></b>',
# this text is rendered in the <head>
# (optionally you can replace flasgger/head.html template)
'head_text': '<style>.top_text{color: red;}</style>',
# Control the collapse of each tag, '"none"' means all tags default closed
# "none" - It'll Hide everything.
# "list"- It'll expand/List all the operations only. (default)
# "full" - It'll expand everything(Full expand as the name says).
'doc_expansion': "list",
# Allows overriding any of the uiparams
# This is useful to override other stuff not provided by the above aliases
'ui_params': {
'apisSorter': 'alpha',
'operationsSorter': 'alpha',
},
# Allows overriding any of the uiparams with Javascript expressions
# This is useful to override other stuff not provided by the above aliases which cannot be serialized to a JSON string
'ui_params_text': '''{
"operationsSorter" : (a, b) => a.get("path").localeCompare(b.get("path"))
}'''
}
Swagger(app)
@app.route('/', methods=['GET', 'POST'])
def main_route():
"""
Test Endpoint
---
tags:
- Test
parameters:
- name: data
in: formData
required: True
type: string
description: data to send
responses:
200:
description: data received successfully
404:
description: data not found in request form
"""
if 'data' not in request.form.keys():
return 'data not found in request form', 404
return 'data received: ' + str(request.form['data'])
if __name__ == '__main__':
app.run()
| {
"repo_name": "rochacbruno/flasgger",
"path": "examples/changelog_090.py",
"copies": "1",
"size": "3037",
"license": "mit",
"hash": 5998135994631770000,
"line_mean": 32.3736263736,
"line_max": 122,
"alpha_frac": 0.6542640764,
"autogenerated": false,
"ratio": 3.7867830423940148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49410471187940147,
"avg_score": null,
"num_lines": null
} |
# 09_proximity.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
import RPi.GPIO as GPIO
import time
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# This project uses the Capsense technique modelled on this:
# http://playground.arduino.cc/Main/CapacitiveSensor
# pin a is the send pin, pin b is the sense pin
a_pin = 18
b_pin = 23
led_pin = 24
threshold = 0
# setup the pin modes
GPIO.setup(a_pin, GPIO.OUT)
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(led_pin, GPIO.OUT)
# return the time taken for the sense pin to flip state as a result of
# the capcitatve effect of being near the sense pin
def step():
GPIO.output(a_pin, False)
t1 = time.time()
while GPIO.input(b_pin):
pass
t2 = time.time()
time.sleep(0.1)
GPIO.output(a_pin, True)
time.sleep(0.1)
return (t2 - t1) * 1000000
# This function takes 10 readings and finds the largest and puts it in the
# variable - threshold
def calibrate():
global threshold
print("Wait! Calibrating")
n = 10
maximum = 0
for i in range(1, n):
reading = step()
if reading > maximum:
maximum = reading
threshold = maximum * 1.15
print(threshold)
print("Calibration Complete")
calibrate()
try:
while True:
reading = step() # take a reading
GPIO.output(led_pin, (reading > threshold)) # LED on if reading > threshold, otherwise off
finally:
print("Cleaning up")
GPIO.cleanup()
# You could get rid of the try: finally: code and just have the while loop
# and its contents. However, the try: finally: construct makes sure that
# when you CTRL-c the program to end it, all the pins are set back to
# being inputs. This helps protect your Pi from accidental shorts-circuits
# if something metal touches the GPIO pins. | {
"repo_name": "simonmonk/pi_starter_kit",
"path": "09_proximity.py",
"copies": "1",
"size": "1918",
"license": "mit",
"hash": -521023599936823000,
"line_mean": 27.2205882353,
"line_max": 98,
"alpha_frac": 0.6746611053,
"autogenerated": false,
"ratio": 3.412811387900356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4587472493200356,
"avg_score": null,
"num_lines": null
} |
#09_resistance.py
import RPi.GPIO as GPIO
import time, math
C = 0.33 # uF
R1 = 1000 # Ohms
GPIO.setmode(GPIO.BCM)
# Pin a charges the capacitor through a fixed 1k resistor and the thermistor in$
# pin b discharges the capacitor through a fixed 1k resistor
a_pin = 18
b_pin = 23
# empty the capacitor ready to start filling it up
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
# return the time taken (uS) for the voltage on the capacitor to count as HIGH
# than means around 1.65V
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
return (t2 - t1) * 1000000
# Take an analog reading as the time taken to charge after first discharging th$
def analog_read():
discharge()
t = charge_time()
discharge()
return t
# To reduce errors, do it n times and take the average.
def read_resistance():
n = 20
total = 0;
for i in range(1, n):
total = total + analog_read()
t = total / float(n)
T = t * 0.632 * 3.3
r = (T / C) - R1
return r
try:
while True:
print(read_resistance())
time.sleep(0.5)
finally:
print("Cleaning up")
GPIO.cleanup() | {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "09_resistance.py",
"copies": "1",
"size": "1365",
"license": "mit",
"hash": -8874098190061959000,
"line_mean": 21.7666666667,
"line_max": 80,
"alpha_frac": 0.6293040293,
"autogenerated": false,
"ratio": 2.9609544468546636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40902584761546634,
"avg_score": null,
"num_lines": null
} |
# -*0 coding: cp949 -*-
"""
web crawler
JungWon(postgame.tistory.com)
requirement : python 2.7.x
BeautifulSoup4
Read the target html page and save it.
if a title on the page is updated, return True
"""
import urllib2
from bs4 import BeautifulSoup
import os.path
class WebCrawler:
def __init__(self, url, title, html_file):
self.url = url
self.title = title
self.file_path = os.path.dirname(__file__) + '/' + html_file
def diff_file(self, html):
f = open(self.file_path, 'r')
is_same = self.compare_title(html, f.read())
f.close()
return is_same
def compare_title(self, url_html, file_html):
url_soup = BeautifulSoup(url_html)
file_soup = BeautifulSoup(file_html)
url_title = url_soup.find(class_=self.title).text
file_title = file_soup.find(class_=self.title).text
if url_title == file_title:
return True
else:
return False
def save_html_to_file(self, html):
f = open(self.file_path, 'w')
f.write(html)
f.close()
def proc_chkupdate(self):
url = urllib2.urlopen(self.url)
html = url.read()
if os.path.isfile(self.file_path) is False:
self.save_html_to_file(html)
return False
if self.diff_file(html) is False:
self.save_html_to_file(html)
return False
return True
| {
"repo_name": "JungWon2/web_crawler",
"path": "web_crawler.py",
"copies": "1",
"size": "1468",
"license": "apache-2.0",
"hash": -4436739985696024600,
"line_mean": 21.9375,
"line_max": 68,
"alpha_frac": 0.575613079,
"autogenerated": false,
"ratio": 3.5288461538461537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9526334232846154,
"avg_score": 0.015625,
"num_lines": 64
} |
#0 -*- coding: utf-8 -*-
import json
import re
from uuid import uuid4
from datetime import datetime
from collections import defaultdict
from mongoengine.queryset import Q
from flask import Blueprint, jsonify, render_template, request
from flask_login import current_user, login_required
from flask_babel import gettext as _
from application.services.cart import entry_info_from_ids
from application.services.price import cal_order_price, FakeCart
import application.services.jobs as Jobs
import application.services.json_tmpl as Json
from configs.price import PRICE_FN
import application.models as Models
from application.utils import paginate
user = Blueprint('user', __name__, url_prefix='/api/users')
@user.route('/session_key', methods=['GET'])
def session_key():
return jsonify(message='OK', session_key=get_session_key())
@user.route('/permissions', methods=['GET'])
def permissions():
if not current_user.is_authenticated:
return jsonify(message="Failed")
roles = current_user.roles
return jsonify(message="OK", roles=roles)
@user.route('/coupons/by_entries', methods=['POST'])
@login_required
def coupon_by_entries():
data = request.json
entry_ids = data.get('entries')
if not entry_ids:
return jsonify(message='Failed', error=_(u'Please choose the item'))
entries_info = entry_info_from_ids(entry_ids)
order = FakeCart(entries_info,
user=current_user._get_current_object())
o = cal_order_price(order)
c_jsons = []
for c in current_user.wallet.consumable_coupons:
if c.is_expired:
continue
c_json = c.to_json()
c_json['can_apply'] = (c.coupon.is_effective() and
c.coupon.can_apply(o))
c_json['saving'] = PRICE_FN.ORDER_COUPON[c.coupon.coupon_type](
o, c.coupon)[1]
c_jsons.append(c_json)
return jsonify(message='OK',
consumable_coupons=c_jsons)
@user.route('/coupons/by_order', methods=['POST'])
@login_required
def coupon_by_order():
data = request.json
order_id = data.get('order_id')
if not order_id:
return jsonify(message='Failed', error=_(u'Please choose order'))
order = Models.Order.objects(id=order_id).first()
o = cal_order_price(order)
c_jsons = []
for c in current_user.wallet.consumable_coupons:
if c.is_expired:
continue
c_json = c.to_json()
c_json['can_apply'] = (c.coupon.is_effective() and
c.coupon.can_apply(o))
c_json['saving'] = PRICE_FN.ORDER_COUPON[c.coupon.coupon_type](
o, c.coupon)[1]
c_jsons.append(c_json)
return jsonify(message='OK',
consumable_coupons=c_jsons)
@user.route('/account/change_password', methods=['POST'])
@login_required
def change_password():
user = current_user
password = request.json.get('password', '')
password_confirm = request.json.get('password_confirm','')
if not password.isalnum():
# 密码包含非法字符
return jsonify(message='Failed',
error=_(u'Password contains illegal characters'))
if len(password) < 6:
# 密码长度不足
return jsonify(message='Failed', error=_(u'Password is too short'))
if password != password_confirm:
return jsonify(message='Failed', error=_(u'Password is inconsistent'))
user.account.password = password
user.save()
return jsonify(message='OK')
@user.route('/account/reset_password', methods=['POST'])
def reset_password():
email = request.json.email
user = Models.User.objects(account__email=email).first()
if user:
user.account.activation_key = str(uuid4())
user.save()
url = "http://m.maybi.cn/account/confirm_reset_password?activation_key=%s&email=%s" % \
(user.account.activation_key, user.account.email)
html = render_template('admin/user/_reset_password.html',
project=current_app.config['PROJECT'], username=user.name, url=url)
message = Message(subject=_('Reset your password in ')+ 'Maybi',
html=html, recipients=[user.account.email])
message.sender = 'notify@maybi.cn'
mail.send(message)
return jsonify(message="OK",desc=_('Please see your email for instructions on '
'how to access your account'))
else:
return jsonify(message="Failed", desc=_('Sorry, no user found for that email address'))
@user.route('/update_avatar', methods=['POST'])
@login_required
def update_avatar():
path = request.json.get('avatar_url')
if path:
url = "http://assets.maybi.cn/%s" % path
Jobs.image.make_thumbnails('maybi-img', path, url)
user = current_user._get_current_object()
user.avatar_url = url
user.save()
return jsonify(message='OK', user=Json.get_user_info(user))
@user.route('/update_username', methods=['POST'])
@login_required
def update_username():
username = request.json.get('username')
if username:
if len(username) >16:
return jsonify(message="Failed", error=_('Username is too long'))
user = current_user._get_current_object()
user.name = username
user.save()
return jsonify(message='OK', user=Json.get_user_info(user))
return jsonify(message='Failed', error=u"参数不对")
@user.route('/user_info/<user_id>', methods=['GET'])
def user_info(user_id):
user = Models.User.objects(id=user_id).first_or_404()
return jsonify(message='OK', user=Json.user_json(user))
@user.route('/follow/<follow_id>', methods=['GET'])
@login_required
def follow(follow_id):
follow_user = Models.User.objects(id=follow_id).first_or_404()
if follow_user.id == current_user.id:
return jsonify(message='Failed', error="Can not follow yourself")
current_user.follow(follow_user)
return jsonify(message='OK')
@user.route('/unfollow/<follow_id>', methods=['GET'])
@login_required
def unfollow(follow_id):
follow_user = Models.User.objects(id=follow_id).first_or_404()
current_user.unfollow(follow_user)
return jsonify(message='OK')
@user.route('/followers', methods=['GET'])
def user_followers():
args = request.args
user_id = args.get('user_id')
page = int(args.get('page', 0))
per_page = int(args.get('per_page', 20))
user = Models.User.objects(id=user_id).first_or_404()
followers = user.followers
users = paginate(followers, page, per_page)
return jsonify(message='OK', users=[Json.user_json(u) for u in users])
@user.route('/followings', methods=['GET'])
def user_followings():
args = request.args
user_id = args.get('user_id')
page = int(args.get('page', 0))
per_page = int(args.get('per_page', 20))
user = Models.User.objects(id=user_id).first_or_404()
followings = user.followings
users = paginate(followings, page, per_page)
return jsonify(message='OK', users=[Json.user_json(u) for u in users])
| {
"repo_name": "seasonstar/bibi",
"path": "application/controllers/website/user.py",
"copies": "1",
"size": "7044",
"license": "apache-2.0",
"hash": -8457694951861626000,
"line_mean": 31.7476635514,
"line_max": 95,
"alpha_frac": 0.6454052511,
"autogenerated": false,
"ratio": 3.502248875562219,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9636052988150614,
"avg_score": 0.002320227702321004,
"num_lines": 214
} |
##0. CV_CAP_PROP_POS_MSEC Current position of the video file in milliseconds.
##1. CV_CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next.
##3. CV_CAP_PROP_POS_AVI_RATIO Relative position of the video file
##4. CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
##5. CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
##6. CV_CAP_PROP_FPS Frame rate.
##7. CV_CAP_PROP_FOURCC 4-character code of codec.
##8. CV_CAP_PROP_FRAME_COUNT Number of frames in the video file.
##9. CV_CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() .
##10. CV_CAP_PROP_MODE Backend-specific value indicating the current capture mode.
##11. CV_CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras).
##12. CV_CAP_PROP_CONTRAST Contrast of the image (only for cameras).
##13. CV_CAP_PROP_SATURATION Saturation of the image (only for cameras).
##14. CV_CAP_PROP_HUE Hue of the image (only for cameras).
##15. CV_CAP_PROP_GAIN Gain of the image (only for cameras).
##16. CV_CAP_PROP_EXPOSURE Exposure (only for cameras).
##17. CV_CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB.
##18. CV_CAP_PROP_WHITE_BALANCE Currently unsupported
##19. CV_CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
import numpy as np
import cv2
from tkinter import *
root = Tk()
root.title("video")
cap = cv2.VideoCapture(0)
#cap.set(3,720)
#cap.set(4,720)
#cap.set(6, 24)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',frame)
cv2.imshow('gray',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| {
"repo_name": "AntonioQu/pythontest",
"path": "opencv_video.py",
"copies": "1",
"size": "1960",
"license": "apache-2.0",
"hash": 775982421892712200,
"line_mean": 37.2,
"line_max": 126,
"alpha_frac": 0.7071428571,
"autogenerated": false,
"ratio": 2.912332838038633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41194756951386324,
"avg_score": null,
"num_lines": null
} |
# 0 == down, 1 == right, 2 == diag
import filecmp
import sys
sys.setrecursionlimit(5000)
def getMax(s, i, j, match):
a = s[i - 1][j]
b = s[i][j - 1]
c = s[i - 1][j - 1] + match
if (a >= b and a >= c):
return (-1, 0)
elif (c >= a and c >= b):
return (-1, -1)
elif (b >= a and b >= c):
return (0, -1)
def LCSBackTrack(s1, s2):
s = [[0 for y in range(len(s2) + 1)] for x in range((len(s1) + 1))]
backTrack = [[0 for y in range(len(s2) + 1)] for x in range((len(s1) + 1))]
v = len(s1)
w = len(s2)
for i in range(0, v+1):
s[i][0] = 0
for j in range(0, w+1):
s[0][j] = 0
for j in range(1, w+1):
for i in range(1, v+1):
if (s1[i-1] == s2[j-1]):
match = 1
else:
match = 0
tup = getMax(s, i, j, match)
s[i][j] = s[i + tup[0]][j + tup[1]]
if (tup == (-1, -1)):
s[i][j] += match
if (tup == (-1, 0)):
backTrack[i][j] = 0
elif (tup == (0, -1)):
backTrack[i][j] = 1
elif (tup == (-1, -1) and match == 1):
backTrack[i][j] = 2
return backTrack
# where i and j are len(s1) and len(s2)
buf = ""
def OutputLCS(backTrack, v, i, j):
global buf
if (i == 0 or j == 0):
return
elif (backTrack[i][j] == 0):
OutputLCS(backTrack, v, i - 1, j)
elif (backTrack[i][j] == 1):
OutputLCS(backTrack, v, i, j - 1)
elif (backTrack[i][j] == 2):
OutputLCS(backTrack, v, i - 1, j - 1)
buf = buf + v[i-1]
s1 = "CAGCGTCCTTCCCACGCCTCAGCCACCCGGCATCGCGCGAGCTGTCAGCTGCTCACATGTATGCTCCTTGAAGAGGTAGGGCTTTTTATATTCACACATAGAAGTGGCTATAAGACAAGCATAGCTCGCCCCTTAGCCGACATCGATTTCTTGCGAGGGCTTACACTGGCGGGGCGAATCGCCAGGCGGACCCCGAAATGCTTTAGGCTGGTCGTCAATTTGCGGTCAAGCCCCGACTTTGTCTACATTTGTAACGCTTTCCCGTCAAGTTCCGGGAGCTGCCTGTAACGACCGGGCCAGCAGCGCGACGAGGTGCGTCCCACCACTCCCATCATGCGAATGGCCTTCCGGTCATGTACCGGATAAACCGGAGCTATGTGGGATAACATAGATTACCCCGGCAGGAGTCTCGAAATACTAAGGTTGGGTGCTGCGCTACAGATAATACCACAGATCACTACGGTGTGGTGGCATTAAAGTGAGTGGCGGTGACCTGTTCTGTTTACGAGTTTTAAATCCACGTAGACGATCTAATGCTTTGTGCTCGTGCAGTGTTTTATTGCTGATCCCCCCGCGCCGGTTGGCTCAAAGGCGAGTCTGATTGATCCCCTTACTACCTACGGAACACACCTACTATTTCCGTATCGGAGTAAACGCTTTACCTAAGGAATCACCTTTTCTCAAGAACTGTGGGATGGTGCCCGCACGTCGGGTATACACTAATTCAATGGCTGTCTAAGCCCACTCCACCAGCGAACTCGAATAGTTTAGAATTCTCCTGGGGTCTGCACAACGATGCACCATTGGGCGATATCAGGTAACTGACCACATGACTTACGCGGTTGGGATACAACACAACGGTGTGTCTGTGCGCACAGCTAGTATATCCGTGCGACTCAACCGTAATGTCACGT"
s2 = "CGCGCCCAGGCCATGTCATGTTATTCCAGCTTTCTTCTACAAACAGGCTATCTATATCACTGATTTAAGGCCGATGTGATCTGATTGAGTTTAACCCTCAGCCGCGAAAAAAAGCCAGACCCTCAGATGGAGTGTCTCGGGGTGTTTAAGCGGCATTTAAGACCAGATAACCTGATAGACCCAAGATACTGATAGCTTGAAGGCTACGGCGTTAACGCCATCAATACTCGCAGGAAAATTCCCAATCCACACCTTGAGGAAGCTGCTAGGCTCTGCCTGTAAACCGTCAGGATGCCATCAATCCCATCTGCTTGCGCCCTTCATGTGTTTCCCGGGGTGCTGTGGCCTACCCGGCTGACTCGTAGCTGAATCAATGTGTGTGAAGGTCACAGGCGTCGCGGCTCGAACAGGACTTGAGCGCGACGGGTTTAGATAGGGACAGCTAGATTGTGAGCACGCAGTATTAACTAACAATTGTCACAACGCCGGAACAAAGTAACGCCCAGGGGTCTGTTGGAGGAGCGCTTTTGTAGCCAACCTAGTATCGATGCGCCCACTGCAGACCTGCTCGATTGAGTCTGGCAGGCGATCGACGTCCCTTTCCAAATATCTTTGCCAATTGTAAGGGCGCTCACCTTGTAATTCCTGGTTTGTACTGGATGTACGGTCTTATCTGGGCTACAACATGTGAGGCGGCTTTCCTTGAGATAACACGCACGAAGCACGCGGATTAAGCTTCGCTCAATCAGGGGAAAAACTTCATGCCTCTATACCCTTTGTACTGATCGCTTTCGGGGACACTAATAGAGAGCTGGCTTCATGGGCCACCAGCCGCTACAGGGACCTCGTCCAAAGCTCTGATTC"
OutputLCS(LCSBackTrack(s1,s2), s1, len(s1) , len(s2))
f = open("output.txt", "w")
f.write(buf)
f.close()
print buf
# print filecmp.cmp('output.txt', 'proper_output.txt')
| {
"repo_name": "Larkenx/coursework",
"path": "bioinformatics/rosalind-problems/LCS/LCS.py",
"copies": "1",
"size": "3601",
"license": "mit",
"hash": 8117486821208298000,
"line_mean": 46.3815789474,
"line_max": 921,
"alpha_frac": 0.7173007498,
"autogenerated": false,
"ratio": 2.579512893982808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3796813643782808,
"avg_score": null,
"num_lines": null
} |
0. eight = 8
1. b = eight
2. print(b)
3. if x is 'a' or x is 'b:
return True
else:
return False
4. if x > 10 and x%2 != 0:
return True
5. def param(n):
return(n)
6. def triple_print(n):
x = 0
while x != 3:
print(n)
x += 1
7. def wage():
h = eval(input("Enter Hours: "))
r = eval(input("Enter Rate: "))
p = h*r
print("Pay: ", p)
8. str1 + str2
#This will return a temporary copy,
#to retain the concatenated value See below
str3 = str1 + str2
9. str1[4], str1[-1]
10. str1 * 3
11. lst = [0,1,2,3,4,5,6,7,8,9,10]
12. lst.append('hi')
13. lst.remove(4)
14. 5 in lst
15. for elem in lst[:10]:
print(elem)
16. for elem in lst:
print(elem)
17. for elem in lst:
print(elem * 2)
18. x = 0
while x = 0:
print(x)
x += 10
19. def check_empty(n):
if n == None:
return None
20. ('a',)
21. ('a','b')
22. Actor1 = ('Dicaprio',43)
Name, Age = Actor1
23. dct = {}
24. dct.update({'one':1,'two':2,'three':3})
25. dct['two'] = 'dos'
26. del dct['two']
27. for key in dct:
print(key)
28. for val in dct.values():
print(val)
29. for key, val in dct.items()
print(key, val)
30. for pairs in dct.items():
print(pairs)
31. #Dictionaries are mutable unlike tuples and can be useful for
#counting and cataloguing occurrences of words/integers in files
32. #Mutability and Immutability pertain to the nature of various datatypes in programming languages
#Immutable types such as strings and tuples cannot be altered once created and in order to
#change them a copy of them needs to be made to change them. Mutable items such as integers,
#dictionaries(values are mutable, keys are not) and lists can be altered in place without the need
#to make copies of them in order to alter them
33. #Examples of homogeneous datatypes are strings where characters/integers enclosed in apostrophes/quotes
#are all the same type. Heterogeneous datatypes include lists and dictionaries where combinations of integers, strings
#and even nested lists and dictionaries can be inserted and removed
34. #Overflow is related to the set capacity of variables and objects created in programming, for example
#in a range of numbers from 0 to 4 there are only 5 possible numbers and when looping through the range
#the loop would be unable to continue beyond/overflow past the number 4
35. #Abstraction is the process in programming/mathematics/logic referring to reasoning in a manner that deals with
#focusing on the basic components of a problem and breaking them down into their simplest parts in order to
#understand the process step by step
36. # Modularization is a concept in programming that relates to elegantly and efficiently breaking down a program
# into numerous small and functional components.
#To modularize for example 1+1,although it would be excessive one could make four functions
#to process it, the first function to intake the first integer (1),
#the second function to intake the second integer (1),
#a third function to arithmetically add them (1+1)
#and a fourth function to return the sum (2)
37. string:IM/HO
list: M/HE
tuple: IM/HE
dictionary: M/HE
38. #Printing returns a displayed output to the user whereas return is useful for feeding
#results into other functions without displaying the intermediate results and only the
#final results when print is used at the end of a program
| {
"repo_name": "nsrchemie/code_guild",
"path": "wk2_quiz.py",
"copies": "1",
"size": "3436",
"license": "mit",
"hash": -3813929550033168000,
"line_mean": 35.1684210526,
"line_max": 119,
"alpha_frac": 0.7037252619,
"autogenerated": false,
"ratio": 3.3230174081237913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4526742670023791,
"avg_score": null,
"num_lines": null
} |
0 High Card: Highest value card.
1 One Pair: Two cards of the same value.
2 Two Pairs: Two different pairs.
3 Three of a Kind: Three cards of the same value.
4 Straight: All cards are consecutive values.
5 Flush: All cards of the same suit.
6 Full House: Three of a kind and a pair.
7 Four of a Kind: Four cards of the same value.
8 Straight Flush: All cards are consecutive values of same suit.
9 Royal Flush: Ten, Jack, Queen, King, Ace, in same suit.
cardRanks = {'2': 0, '3': 1, '4': 2, '5': 3, '6': 4,
'7': 5, '8': 6, '9': 7, 'T': 8, 'J': 9,
'Q': 10, 'K': 11, 'A': 12}
def faces(hand) :
return [i[0] for i in hand]
def suits(hand) :
return [i[1] for i in hand]
def straight(hand, royal = False) :
cards = [cardRanks[i] for i in faces(hand)]
cards.sort()
#print cards
last = cards[0]
for card in cards[1:] :
#print "%d - %d = %d" % (card, last, card - last)
if card - last != 1 : return False
last = card
return not royal or cards[0] == 8
def flush(hand) :
cards = suits(hand)
return cards.count(cards[0]) == 5
def freqs(hand) :
faceList = faces(hand)
return [faceList.count(i) for i in faceList]
def pairs(freqCount) :
return freqCount.count(2)/2
def handRank(hand) :
isFlush = flush(hand)
isStraight = straight(hand, True)
if isFlush and isStraight : return 9
isStraight = straight(hand)
if isFlush and isStraight : return 8
freqCounts = freqs(hand)
if 4 in freqCounts : return 7
if 2 in freqCounts and 3 in freqCounts : return 6
if isFlush : return 5
if isStraight : return 4
if 3 in freqCounts : return 3
pairCount = pairs(freqCounts)
return pairCount
def highFullHouse(hand) :
faceList = faces(hand)
return faceList[freqs(hand).index(3)]
def highPair(hand) :
faceList = faces(hand)
freqList = freqs(hand)
return max([faceList[i] for i in range(0,5) if freqList[i] == 2])
def highCard(one, two) :
rankOne = [cardRanks[i] for i in faces(one)]
rankTwo = [cardRanks[i] for i in faces(two)]
rankOne.sort()
rankTwo.sort()
i = 4
while i > 0 and rankOne[i] == rankTwo[i] : i -= 1
return rankOne[i] > rankTwo[i]
def splitHand(hand) :
return hand.split(' ')
def handWins(one, two) :
handOne = handRank(one)
handTwo = handRank(two)
if handOne != handTwo :
return handOne > handTwo
if handOne == 6 :
oneRank = cardRanks[highFullHouse(one)]
twoRank = cardRanks[highFullHouse(two)]
if oneRank != twoRank :
return oneRank > twoRank
if handOne == 2 or handOne == 1 :
oneRank = cardRanks[highPair(one)]
twoRank = cardRanks[highPair(two)]
if oneRank != twoRank :
return oneRank > twoRank
print "Tied %d! %s %s" % (handOne, ' '.join(one), ' '.join(two)[:-1])
return highCard(one, two)
poker = open("poker.txt")
sum(handWins(splitHand(hand[:14]), splitHand(hand[15:])) for hand in poker)
poker.close()
2H 2D 4C 4D 4S 3C 3D 3S 9S 9D
| {
"repo_name": "alanbly/ProjectEuler",
"path": "54.py",
"copies": "1",
"size": "2925",
"license": "mit",
"hash": -1277487043613614000,
"line_mean": 24,
"line_max": 75,
"alpha_frac": 0.6437606838,
"autogenerated": false,
"ratio": 2.8179190751445087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8819226047963977,
"avg_score": 0.028490742196106207,
"num_lines": 117
} |
# 0 - iframe
# 1 - thumbs ( , as split)
# 2 - title
# 3 - tag
# 4 - category
# 5 - pornstar
# 6 - duration
import csv
from pymongo import MongoClient
# mongo connection
client = MongoClient()
# selecting database
db = client.amateurxxx
# collections
categoriesC = db.categories
videosC = db.videos
# read csv file
with open('YouPorn-Embed-Videos-Dump.csv', newline='', encoding='utf-8') as csvfile:
videoReader = csv.reader(csvfile, delimiter='|', quoting=csv.QUOTE_NONE)
for row in videoReader:
isSaved = videosC.find_one({'title': row[2].rstrip()})
if not isSaved:
iframe = row[0]
if row[0].startswith('"') and row[0].endswith('"'):
iframe = str(row[0][1:-1])
iframeParts = iframe.split('<br />')
iframe = bytes(str(iframeParts[0].encode('utf-8')), 'ascii')
thumbParts = row[1].split(',')
thumb = thumbParts[0]
if (thumbParts[0] == None):
thumb = thumbParts[1]
title = row[2].rstrip()
if title.startswith('"') and title.endswith('"'):
title = str(title[1:-1])
videoElem = {
'title' : title,
'images' : thumb,
'iframe' : iframe,
'views' : 0,
'likes' : 0,
'dislikes' : 0
}
toReplace = [
",", "`", "'", "/", "!", "\"", "(", ")", "+", "&", "\\", "<", ">", ".", ";", ":",
"?", "]", "[", "{", "}", "^", "%", "*", "$", "#", "@", "~", '-', '´'
]
for character in toReplace:
row[2] = row[2].replace(character, '')
url = row[2].replace(" ", "-").lower()
videoElem['url'] = url
tags = ''
if 4 < len(row) :
tags = row[4]
if tags.startswith('"') and tags.endswith('"'):
tags = tags[1:-1]
videoElem['keywords'] = tags
videosC.save(videoElem)
print('Done!')
| {
"repo_name": "vacu/redtube-import",
"path": "youporn_import.py",
"copies": "1",
"size": "2102",
"license": "mit",
"hash": -8469546705492532000,
"line_mean": 26.2857142857,
"line_max": 97,
"alpha_frac": 0.4388386483,
"autogenerated": false,
"ratio": 3.7317939609236235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46706326092236233,
"avg_score": null,
"num_lines": null
} |
# 0 loop_fix branch
# 1 - Import library
import pygame
from pygame.locals import *
import math
import random
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
pygame.mixer.init()
# 3 - Load images
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
arrow = pygame.image.load("resources/images/bullet.png")
badguyimg1 = pygame.image.load("resources/images/badguy.png")
badguyimg=badguyimg1
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
gameover = pygame.image.load("resources/images/gameover.png")
youwin = pygame.image.load("resources/images/youwin.png")
# 3.1 - Load audio
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
hit.set_volume(0.05)
enemy.set_volume(0.05)
shoot.set_volume(0.05)
pygame.mixer.music.load('resources/audio/moonlight.wav')
pygame.mixer.music.set_volume(0.25)
# 4 - keep looping through
def main():
keys = [False, False, False, False]
playerpos=[100,100]
acc=[0,0]
arrows=[]
badtimer=100
badtimer1=0
badguys=[[640,100]]
healthvalue=194
timestart = pygame.time.get_ticks()
pygame.mixer.music.play(-1, 0.0)
num_arrows = 100
running = 1
exitcode = 0
while running:
badtimer-=1
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the screen elements
for x in range(width/grass.get_width()+1):
for y in range(height/grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
screen.blit(castle,(0,30))
screen.blit(castle,(0,135))
screen.blit(castle,(0,240))
screen.blit(castle,(0,345 ))
# 6.1 - Set player position and rotation
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32),position[0]-(playerpos[0]+26))
playerrot = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
# 6.2 - Draw arrows
for bullet in list(arrows):
velx=math.cos(bullet[0])*10
vely=math.sin(bullet[0])*10
bullet[1]+=velx
bullet[2]+=vely
if bullet[1]<-64 or bullet[1]>640 or bullet[2]<-64 or bullet[2]>480:
arrows.remove(bullet)
if num_arrows <= 0:
running = 0
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
# 6.3 - Draw badgers
if badtimer==0:
badguys.append([640, random.randint(50,430)])
badtimer=100-(badtimer1*2)
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
for badguy in list(badguys):
if badguy[0]<-64:
badguys.remove(badguy)
badguy[0]-=7
# 6.3.1 - Attack castle
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
hit.play()
healthvalue -= random.randint(5,20)
badguys.remove(badguy)
#6.3.2 - Check for collisions
for bullet in list(arrows):
bullrect=pygame.Rect(arrow.get_rect())
bullrect.left=bullet[1]
bullrect.top=bullet[2]
if badrect.colliderect(bullrect):
enemy.play()
acc[0]+=1
badguys.remove(badguy)
arrows.remove(bullet)
# 6.3.3 - Next bad guy
for badguy in badguys:
screen.blit(badguyimg, badguy)
# 6.4 - Draw clock
font = pygame.font.Font(None, 24)
time_remaining = 90000 - (pygame.time.get_ticks() - timestart)
survivedtext = font.render(str((time_remaining / 60000))+":"+str(time_remaining/1000%60).zfill(2), True, (0,0,0))
textRect = survivedtext.get_rect()
textRect.topright=[635,5]
screen.blit(survivedtext, textRect)
arrowstext = font.render("Remaining arrows: " + str(num_arrows), True, (0,0,0))
arrowsTextRect = arrowstext.get_rect()
arrowsTextRect.topright = [635, 20]
screen.blit(arrowstext, arrowsTextRect)
# 6.5 - Draw health bar
screen.blit(healthbar, (5,5))
for health1 in range(healthvalue):
screen.blit(health, (health1+8,8))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key==K_w:
keys[0]=True
elif event.key==K_a:
keys[1]=True
elif event.key==K_s:
keys[2]=True
elif event.key==K_d:
keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w:
keys[0]=False
elif event.key==pygame.K_a:
keys[1]=False
elif event.key==pygame.K_s:
keys[2]=False
elif event.key==pygame.K_d:
keys[3]=False
if event.type==pygame.MOUSEBUTTONDOWN:
shoot.play()
position=pygame.mouse.get_pos()
acc[1]+=1
arrows.append([math.atan2(position[1]-(playerpos1[1]+32),position[0]-(playerpos1[0]+26)),playerpos1[0]+32,playerpos1[1]+32])
num_arrows -= 1
# 9 - Move player
if keys[0]:
playerpos[1]-=5
elif keys[2]:
playerpos[1]+=5
if keys[1]:
playerpos[0]-=5
elif keys[3]:
playerpos[0]+=5
#10 - Win/Lose check
timenow = pygame.time.get_ticks()
if timenow - timestart >=90000:
running=0
exitcode=1
if healthvalue<=0:
running=0
exitcode=0
if acc[1]!=0:
accuracy=round(acc[0]*1.0/acc[1]*100,2)
else:
accuracy=0
# 11 - Win/lose display
pygame.font.init()
font = pygame.font.Font(None, 24)
elapsedtime = pygame.time.get_ticks()-timestart/1000
game_over_message = ""
if num_arrows <= 0:
game_over_message = "You have run out of arrows!!! "
game_over_message += "Score: "+str(accuracy)+"% (Accuracy) * "+str(elapsedtime/1000)+" (Time) = "+str(int(accuracy*elapsedtime/1000))
text = font.render(game_over_message, True, (0, 255, 0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
if exitcode==0:
screen.blit(gameover, (0,0))
else:
screen.blit(youwin, (0,0))
screen.blit(text, textRect)
pygame.display.flip()
pygame.mixer.music.fadeout(1500)
pygame.time.delay(1500)
# draw replay/exit buttons
global textx, texty, textx_size, texty_size
global text2x, text2y, text2x_size, text2y_size
bigfont = pygame.font.Font(None, 80)
text = bigfont.render('Play Again', 13, (0, 255, 0))
textx = width / 2 - text.get_width() / 2
texty = height / 4 - text.get_height() / 2
textx_size = text.get_width()
texty_size = text.get_height()
pygame.draw.rect(screen, (0, 255, 255), ((textx - 5, texty - 5),
(textx_size + 10, texty_size +
10)))
screen.blit(text, (width / 2 - text.get_width() / 2,
height / 4 - text.get_height() / 2))
text2 = bigfont.render('Exit', 13, (255, 0, 0))
text2x = width / 2 - text2.get_width() / 2
text2y = height * 3 / 4 - text2.get_height() / 2
text2x_size = text2.get_width()
text2y_size = text2.get_height()
pygame.draw.rect(screen, (0, 255, 255), ((text2x - 5, text2y - 5),
(text2x_size + 10, text2y_size +
10)))
screen.blit(text2, (width / 2 - text2.get_width() / 2,
height * 3 / 4 - text2.get_height() / 2))
pygame.display.flip()
main()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
x, y = event.pos
if x >= textx - 5 and x <= textx + textx_size + 5:
if y >= texty - 5 and y <= texty + texty_size + 5:
main()
break
if x >= text2x - 5 and x <= text2x + text2x_size + 5:
if y >= text2y - 5 and y <= text2y + text2y_size + 5:
pygame.quit()
exit(0)
| {
"repo_name": "swapagarwal/bunnies-and-badgers",
"path": "Bunnies&Badgers.py",
"copies": "1",
"size": "9564",
"license": "mit",
"hash": -7625053873376202000,
"line_mean": 36.0697674419,
"line_max": 140,
"alpha_frac": 0.5420326223,
"autogenerated": false,
"ratio": 3.3652357494722027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4407268371772203,
"avg_score": null,
"num_lines": null
} |
"""0MQ authentication related functions and classes."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import datetime
import glob
import io
import os
import zmq
from zmq.utils.strtypes import bytes, unicode, b, u
_cert_secret_banner = u("""# **** Generated on {0} by pyzmq ****
# ZeroMQ CURVE **Secret** Certificate
# DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions.
""")
_cert_public_banner = u("""# **** Generated on {0} by pyzmq ****
# ZeroMQ CURVE Public Certificate
# Exchange securely, or use a secure mechanism to verify the contents
# of this file after exchange. Store public certificates in your home
# directory, in the .curve subdirectory.
""")
def _write_key_file(key_filename, banner, public_key, secret_key=None, metadata=None, encoding='utf-8'):
"""Create a certificate file"""
if isinstance(public_key, bytes):
public_key = public_key.decode(encoding)
if isinstance(secret_key, bytes):
secret_key = secret_key.decode(encoding)
with io.open(key_filename, 'w', encoding='utf8') as f:
f.write(banner.format(datetime.datetime.now()))
f.write(u('metadata\n'))
if metadata:
for k, v in metadata.items():
if isinstance(k, bytes):
k = k.decode(encoding)
if isinstance(v, bytes):
v = v.decode(encoding)
f.write(u(" {0} = {1}\n").format(k, v))
f.write(u('curve\n'))
f.write(u(" public-key = \"{0}\"\n").format(public_key))
if secret_key:
f.write(u(" secret-key = \"{0}\"\n").format(secret_key))
def create_certificates(key_dir, name, metadata=None):
"""Create zmq certificates.
Returns the file paths to the public and secret certificate files.
"""
public_key, secret_key = zmq.curve_keypair()
base_filename = os.path.join(key_dir, name)
secret_key_file = "{0}.key_secret".format(base_filename)
public_key_file = "{0}.key".format(base_filename)
now = datetime.datetime.now()
_write_key_file(public_key_file,
_cert_public_banner.format(now),
public_key)
_write_key_file(secret_key_file,
_cert_secret_banner.format(now),
public_key,
secret_key=secret_key,
metadata=metadata)
return public_key_file, secret_key_file
def load_certificate(filename):
"""Load public and secret key from a zmq certificate.
Returns (public_key, secret_key)
If the certificate file only contains the public key,
secret_key will be None.
If there is no public key found in the file, ValueError will be raised.
"""
public_key = None
secret_key = None
if not os.path.exists(filename):
raise IOError("Invalid certificate file: {0}".format(filename))
with open(filename, 'rb') as f:
for line in f:
line = line.strip()
if line.startswith(b'#'):
continue
if line.startswith(b'public-key'):
public_key = line.split(b"=", 1)[1].strip(b' \t\'"')
if line.startswith(b'secret-key'):
secret_key = line.split(b"=", 1)[1].strip(b' \t\'"')
if public_key and secret_key:
break
if public_key is None:
raise ValueError("No public key found in %s" % filename)
return public_key, secret_key
def load_certificates(directory='.'):
"""Load public keys from all certificates in a directory"""
certs = {}
if not os.path.isdir(directory):
raise IOError("Invalid certificate directory: {0}".format(directory))
# Follow czmq pattern of public keys stored in *.key files.
glob_string = os.path.join(directory, "*.key")
cert_files = glob.glob(glob_string)
for cert_file in cert_files:
public_key, _ = load_certificate(cert_file)
if public_key:
certs[public_key] = True
return certs
__all__ = ['create_certificates', 'load_certificate', 'load_certificates']
| {
"repo_name": "lancezlin/ml_template_py",
"path": "lib/python2.7/site-packages/zmq/auth/certs.py",
"copies": "24",
"size": "4179",
"license": "mit",
"hash": 5541343095134847000,
"line_mean": 32.1666666667,
"line_max": 104,
"alpha_frac": 0.6018186169,
"autogenerated": false,
"ratio": 3.7750677506775068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011368102796674228,
"num_lines": 126
} |
"""0MQ Constant names"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
# dictionaries of constants new or removed in particular versions
new_in = {
(2,2,0) : [
'RCVTIMEO',
'SNDTIMEO',
],
(3,2,2) : [
# errnos
'EMSGSIZE',
'EAFNOSUPPORT',
'ENETUNREACH',
'ECONNABORTED',
'ECONNRESET',
'ENOTCONN',
'ETIMEDOUT',
'EHOSTUNREACH',
'ENETRESET',
# ctx opts
'IO_THREADS',
'MAX_SOCKETS',
'IO_THREADS_DFLT',
'MAX_SOCKETS_DFLT',
# socket opts
'IPV4ONLY',
'LAST_ENDPOINT',
'ROUTER_BEHAVIOR',
'ROUTER_MANDATORY',
'FAIL_UNROUTABLE',
'TCP_KEEPALIVE',
'TCP_KEEPALIVE_CNT',
'TCP_KEEPALIVE_IDLE',
'TCP_KEEPALIVE_INTVL',
'DELAY_ATTACH_ON_CONNECT',
'XPUB_VERBOSE',
# msg opts
'MORE',
'EVENT_CONNECTED',
'EVENT_CONNECT_DELAYED',
'EVENT_CONNECT_RETRIED',
'EVENT_LISTENING',
'EVENT_BIND_FAILED',
'EVENT_ACCEPTED',
'EVENT_ACCEPT_FAILED',
'EVENT_CLOSED',
'EVENT_CLOSE_FAILED',
'EVENT_DISCONNECTED',
'EVENT_ALL',
],
(4,0,0) : [
# socket types
'STREAM',
# socket opts
'IMMEDIATE',
'ROUTER_RAW',
'IPV6',
'MECHANISM',
'PLAIN_SERVER',
'PLAIN_USERNAME',
'PLAIN_PASSWORD',
'CURVE_SERVER',
'CURVE_PUBLICKEY',
'CURVE_SECRETKEY',
'CURVE_SERVERKEY',
'PROBE_ROUTER',
'REQ_RELAXED',
'REQ_CORRELATE',
'CONFLATE',
'ZAP_DOMAIN',
# security
'NULL',
'PLAIN',
'CURVE',
# events
'EVENT_MONITOR_STOPPED',
],
(4,1,0) : [
# ctx opts
'SOCKET_LIMIT',
'THREAD_PRIORITY',
'THREAD_PRIORITY_DFLT',
'THREAD_SCHED_POLICY',
'THREAD_SCHED_POLICY_DFLT',
# socket opts
'ROUTER_HANDOVER',
'TOS',
'IPC_FILTER_PID',
'IPC_FILTER_UID',
'IPC_FILTER_GID',
'CONNECT_RID',
'GSSAPI_SERVER',
'GSSAPI_PRINCIPAL',
'GSSAPI_SERVICE_PRINCIPAL',
'GSSAPI_PLAINTEXT',
'HANDSHAKE_IVL',
'XPUB_NODROP',
'SOCKS_PROXY',
# msg opts
'SRCFD',
'SHARED',
# security
'GSSAPI',
],
(4,2,0): [
# polling
'POLLPRI',
],
(4,2,3): [
'ROUTING_ID',
'CONNECT_ROUTING_ID',
],
(4,3,0): [
# context options
'MSG_T_SIZE',
'THREAD_AFFINITY_CPU_ADD',
'THREAD_AFFINITY_CPU_REMOVE',
'THREAD_NAME_PREFIX',
# socket options
'GSSAPI_PRINCIPAL_NAMETYPE',
'GSSAPI_SERVICE_PRINCIPAL_NAMETYPE',
'BINDTODEVICE',
# GSSAPI principal name types
'GSSAPI_NT_HOSTBASED',
'GSSAPI_NT_USER_NAME',
'GSSAPI_NT_KRB5_PRINCIPAL',
# events
'EVENT_HANDSHAKE_FAILED_NO_DETAIL',
'EVENT_HANDSHAKE_SUCCEEDED',
'EVENT_HANDSHAKE_FAILED_PROTOCOL',
'EVENT_HANDSHAKE_FAILED_AUTH',
'PROTOCOL_ERROR_ZMTP_UNSPECIFIED',
'PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND',
'PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE',
'PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME',
'PROTOCOL_ERROR_ZMTP_INVALID_METADATA',
'PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC',
'PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH',
'PROTOCOL_ERROR_ZAP_UNSPECIFIED',
'PROTOCOL_ERROR_ZAP_MALFORMED_REPLY',
'PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID',
'PROTOCOL_ERROR_ZAP_BAD_VERSION',
'PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE',
'PROTOCOL_ERROR_ZAP_INVALID_METADATA',
]
}
draft_in = {
(4,2,0): [
# socket types
'SERVER',
'CLIENT',
'RADIO',
'DISH',
'GATHER',
'SCATTER',
'DGRAM',
# ctx options
'BLOCKY',
# socket options
'XPUB_MANUAL',
'XPUB_WELCOME_MSG',
'STREAM_NOTIFY',
'INVERT_MATCHING',
'HEARTBEAT_IVL',
'HEARTBEAT_TTL',
'HEARTBEAT_TIMEOUT',
'XPUB_VERBOSER',
'CONNECT_TIMEOUT',
'TCP_MAXRT',
'THREAD_SAFE',
'MULTICAST_MAXTPDU',
'VMCI_BUFFER_SIZE',
'VMCI_BUFFER_MIN_SIZE',
'VMCI_BUFFER_MAX_SIZE',
'VMCI_CONNECT_TIMEOUT',
'USE_FD',
],
(4,2,4): [
# socket options
'ZAP_ENFORCE_DOMAIN',
'LOOPBACK_FASTPATH',
'METADATA',
'ZERO_COPY_RECV',
],
(4,3,0): [
# socket options
'ROUTER_NOTIFY',
'MULTICAST_LOOP',
'NOTIFY_CONNECT',
'NOTIFY_DISCONNECT',
],
}
removed_in = {
(3,2,2) : [
'UPSTREAM',
'DOWNSTREAM',
'HWM',
'SWAP',
'MCAST_LOOP',
'RECOVERY_IVL_MSEC',
]
}
# collections of zmq constant names based on their role
# base names have no specific use
# opt names are validated in get/set methods of various objects
base_names = [
# base
'VERSION',
'VERSION_MAJOR',
'VERSION_MINOR',
'VERSION_PATCH',
'NOBLOCK',
'DONTWAIT',
'POLLIN',
'POLLOUT',
'POLLERR',
'POLLPRI',
'SNDMORE',
'STREAMER',
'FORWARDER',
'QUEUE',
'IO_THREADS_DFLT',
'MAX_SOCKETS_DFLT',
'POLLITEMS_DFLT',
'THREAD_PRIORITY_DFLT',
'THREAD_SCHED_POLICY_DFLT',
# socktypes
'PAIR',
'PUB',
'SUB',
'REQ',
'REP',
'DEALER',
'ROUTER',
'XREQ',
'XREP',
'PULL',
'PUSH',
'XPUB',
'XSUB',
'UPSTREAM',
'DOWNSTREAM',
'STREAM',
'SERVER',
'CLIENT',
'RADIO',
'DISH',
'GATHER',
'SCATTER',
'DGRAM',
# events
'EVENT_CONNECTED',
'EVENT_CONNECT_DELAYED',
'EVENT_CONNECT_RETRIED',
'EVENT_LISTENING',
'EVENT_BIND_FAILED',
'EVENT_ACCEPTED',
'EVENT_ACCEPT_FAILED',
'EVENT_CLOSED',
'EVENT_CLOSE_FAILED',
'EVENT_DISCONNECTED',
'EVENT_ALL',
'EVENT_MONITOR_STOPPED',
'EVENT_HANDSHAKE_FAILED_NO_DETAIL',
'EVENT_HANDSHAKE_SUCCEEDED',
'EVENT_HANDSHAKE_FAILED_PROTOCOL',
'EVENT_HANDSHAKE_FAILED_AUTH',
'PROTOCOL_ERROR_ZMTP_UNSPECIFIED',
'PROTOCOL_ERROR_ZMTP_UNEXPECTED_COMMAND',
'PROTOCOL_ERROR_ZMTP_INVALID_SEQUENCE',
'PROTOCOL_ERROR_ZMTP_KEY_EXCHANGE',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_UNSPECIFIED',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_MESSAGE',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_HELLO',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_INITIATE',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_ERROR',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_READY',
'PROTOCOL_ERROR_ZMTP_MALFORMED_COMMAND_WELCOME',
'PROTOCOL_ERROR_ZMTP_INVALID_METADATA',
'PROTOCOL_ERROR_ZMTP_CRYPTOGRAPHIC',
'PROTOCOL_ERROR_ZMTP_MECHANISM_MISMATCH',
'PROTOCOL_ERROR_ZAP_UNSPECIFIED',
'PROTOCOL_ERROR_ZAP_MALFORMED_REPLY',
'PROTOCOL_ERROR_ZAP_BAD_REQUEST_ID',
'PROTOCOL_ERROR_ZAP_BAD_VERSION',
'PROTOCOL_ERROR_ZAP_INVALID_STATUS_CODE',
'PROTOCOL_ERROR_ZAP_INVALID_METADATA',
'NOTIFY_CONNECT',
'NOTIFY_DISCONNECT',
# security
'NULL',
'PLAIN',
'CURVE',
'GSSAPI',
'GSSAPI_NT_HOSTBASED',
'GSSAPI_NT_USER_NAME',
'GSSAPI_NT_KRB5_PRINCIPAL',
## ERRNO
# Often used (these are else in errno.)
'EAGAIN',
'EINVAL',
'EFAULT',
'ENOMEM',
'ENODEV',
'EMSGSIZE',
'EAFNOSUPPORT',
'ENETUNREACH',
'ECONNABORTED',
'ECONNRESET',
'ENOTCONN',
'ETIMEDOUT',
'EHOSTUNREACH',
'ENETRESET',
# For Windows compatibility
'HAUSNUMERO',
'ENOTSUP',
'EPROTONOSUPPORT',
'ENOBUFS',
'ENETDOWN',
'EADDRINUSE',
'EADDRNOTAVAIL',
'ECONNREFUSED',
'EINPROGRESS',
'ENOTSOCK',
# 0MQ Native
'EFSM',
'ENOCOMPATPROTO',
'ETERM',
'EMTHREAD',
]
int64_sockopt_names = [
'AFFINITY',
'MAXMSGSIZE',
# sockopts removed in 3.0.0
'HWM',
'SWAP',
'MCAST_LOOP',
'RECOVERY_IVL_MSEC',
# new in 4.2
'VMCI_BUFFER_SIZE',
'VMCI_BUFFER_MIN_SIZE',
'VMCI_BUFFER_MAX_SIZE',
]
bytes_sockopt_names = [
'IDENTITY',
'SUBSCRIBE',
'UNSUBSCRIBE',
'LAST_ENDPOINT',
'TCP_ACCEPT_FILTER',
'PLAIN_USERNAME',
'PLAIN_PASSWORD',
'CURVE_PUBLICKEY',
'CURVE_SECRETKEY',
'CURVE_SERVERKEY',
'ZAP_DOMAIN',
'CONNECT_RID',
'GSSAPI_PRINCIPAL',
'GSSAPI_SERVICE_PRINCIPAL',
'SOCKS_PROXY',
'XPUB_WELCOME_MSG',
# new in 4.2.3
'ROUTING_ID',
'CONNECT_ROUTING_ID',
# new in 4.3.0
'BINDTODEVICE',
]
fd_sockopt_names = [
'FD',
]
int_sockopt_names = [
# sockopts
'RECONNECT_IVL_MAX',
# sockopts new in 2.2.0
'SNDTIMEO',
'RCVTIMEO',
# new in 3.x
'SNDHWM',
'RCVHWM',
'MULTICAST_HOPS',
'IPV4ONLY',
'ROUTER_BEHAVIOR',
'TCP_KEEPALIVE',
'TCP_KEEPALIVE_CNT',
'TCP_KEEPALIVE_IDLE',
'TCP_KEEPALIVE_INTVL',
'DELAY_ATTACH_ON_CONNECT',
'XPUB_VERBOSE',
'EVENTS',
'TYPE',
'LINGER',
'RECONNECT_IVL',
'BACKLOG',
'ROUTER_MANDATORY',
'FAIL_UNROUTABLE',
'ROUTER_RAW',
'IMMEDIATE',
'IPV6',
'MECHANISM',
'PLAIN_SERVER',
'CURVE_SERVER',
'PROBE_ROUTER',
'REQ_RELAXED',
'REQ_CORRELATE',
'CONFLATE',
'ROUTER_HANDOVER',
'TOS',
'IPC_FILTER_PID',
'IPC_FILTER_UID',
'IPC_FILTER_GID',
'GSSAPI_SERVER',
'GSSAPI_PLAINTEXT',
'HANDSHAKE_IVL',
'XPUB_NODROP',
# new in 4.2
'XPUB_MANUAL',
'STREAM_NOTIFY',
'INVERT_MATCHING',
'XPUB_VERBOSER',
'HEARTBEAT_IVL',
'HEARTBEAT_TTL',
'HEARTBEAT_TIMEOUT',
'CONNECT_TIMEOUT',
'TCP_MAXRT',
'THREAD_SAFE',
'MULTICAST_MAXTPDU',
'VMCI_CONNECT_TIMEOUT',
'USE_FD',
# new in 4.3
'GSSAPI_PRINCIPAL_NAMETYPE',
'GSSAPI_SERVICE_PRINCIPAL_NAMETYPE',
'MULTICAST_LOOP',
'ROUTER_NOTIFY',
'ZAP_ENFORCE_DOMAIN',
]
switched_sockopt_names = [
'RATE',
'RECOVERY_IVL',
'SNDBUF',
'RCVBUF',
'RCVMORE',
]
ctx_opt_names = [
'IO_THREADS',
'MAX_SOCKETS',
'SOCKET_LIMIT',
'THREAD_PRIORITY',
'THREAD_SCHED_POLICY',
'BLOCKY',
# new in 4.3
'MSG_T_SIZE',
'THREAD_AFFINITY_CPU_ADD',
'THREAD_AFFINITY_CPU_REMOVE',
'THREAD_NAME_PREFIX',
]
msg_opt_names = [
'MORE',
'SRCFD',
'SHARED',
]
from itertools import chain
all_names = list(chain(
base_names,
ctx_opt_names,
bytes_sockopt_names,
fd_sockopt_names,
int_sockopt_names,
int64_sockopt_names,
switched_sockopt_names,
msg_opt_names,
))
del chain
def no_prefix(name):
"""does the given constant have a ZMQ_ prefix?"""
return name.startswith('E') and not name.startswith('EVENT')
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/zmq/utils/constant_names.py",
"copies": "1",
"size": "11394",
"license": "mit",
"hash": -6736603251650287000,
"line_mean": 19.7163636364,
"line_max": 65,
"alpha_frac": 0.5510795155,
"autogenerated": false,
"ratio": 2.9625585023400935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8987294777746853,
"avg_score": 0.005268648018648018,
"num_lines": 550
} |
"""0MQ Constants."""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
from zmq.backend import constants
from zmq.backend import has
from zmq.utils.constant_names import (
base_names,
switched_sockopt_names,
int_sockopt_names,
int64_sockopt_names,
bytes_sockopt_names,
fd_sockopt_names,
ctx_opt_names,
msg_opt_names,
)
#-----------------------------------------------------------------------------
# Python module level constants
#-----------------------------------------------------------------------------
__all__ = [
'int_sockopts',
'int64_sockopts',
'bytes_sockopts',
'ctx_opts',
'ctx_opt_names',
'DRAFT_API',
]
if constants.VERSION < 40200:
DRAFT_API = False
else:
DRAFT_API = bool(has('draft') and constants.DRAFT_API)
int_sockopts = set()
int64_sockopts = set()
bytes_sockopts = set()
fd_sockopts = set()
ctx_opts = set()
msg_opts = set()
if constants.VERSION < 30000:
int64_sockopt_names.extend(switched_sockopt_names)
else:
int_sockopt_names.extend(switched_sockopt_names)
_UNDEFINED = -9999
def _add_constant(name, container=None):
"""add a constant to be defined
optionally add it to one of the sets for use in get/setopt checkers
"""
c = getattr(constants, name, _UNDEFINED)
if c == _UNDEFINED:
return
globals()[name] = c
__all__.append(name)
if container is not None:
container.add(c)
return c
for name in base_names:
_add_constant(name)
for name in int_sockopt_names:
_add_constant(name, int_sockopts)
for name in int64_sockopt_names:
_add_constant(name, int64_sockopts)
for name in bytes_sockopt_names:
_add_constant(name, bytes_sockopts)
for name in fd_sockopt_names:
_add_constant(name, fd_sockopts)
for name in ctx_opt_names:
_add_constant(name, ctx_opts)
for name in msg_opt_names:
_add_constant(name, msg_opts)
# ensure some aliases are always defined
aliases = [
('DONTWAIT', 'NOBLOCK'),
('XREQ', 'DEALER'),
('XREP', 'ROUTER'),
]
for group in aliases:
undefined = set()
found = None
for name in group:
value = getattr(constants, name, -1)
if value != -1:
found = value
else:
undefined.add(name)
if found is not None:
for name in undefined:
globals()[name] = found
__all__.append(name)
| {
"repo_name": "josephkirk/PipelineTools",
"path": "packages/zmq/sugar/constants.py",
"copies": "1",
"size": "2468",
"license": "bsd-2-clause",
"hash": -7618709390858384000,
"line_mean": 22.5047619048,
"line_max": 78,
"alpha_frac": 0.5891410049,
"autogenerated": false,
"ratio": 3.432545201668985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9476858903413494,
"avg_score": 0.008965460631098173,
"num_lines": 105
} |
"""0MQ Constants."""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
from zmq.backend import constants
from zmq.utils.constant_names import (
base_names,
switched_sockopt_names,
int_sockopt_names,
int64_sockopt_names,
bytes_sockopt_names,
fd_sockopt_names,
ctx_opt_names,
msg_opt_names,
)
#-----------------------------------------------------------------------------
# Python module level constants
#-----------------------------------------------------------------------------
__all__ = [
'int_sockopts',
'int64_sockopts',
'bytes_sockopts',
'ctx_opts',
'ctx_opt_names',
'DRAFT_API',
]
DRAFT_API = constants.DRAFT_API
int_sockopts = set()
int64_sockopts = set()
bytes_sockopts = set()
fd_sockopts = set()
ctx_opts = set()
msg_opts = set()
if constants.VERSION < 30000:
int64_sockopt_names.extend(switched_sockopt_names)
else:
int_sockopt_names.extend(switched_sockopt_names)
_UNDEFINED = -9999
def _add_constant(name, container=None):
"""add a constant to be defined
optionally add it to one of the sets for use in get/setopt checkers
"""
c = getattr(constants, name, _UNDEFINED)
if c == _UNDEFINED:
return
globals()[name] = c
__all__.append(name)
if container is not None:
container.add(c)
return c
for name in base_names:
_add_constant(name)
for name in int_sockopt_names:
_add_constant(name, int_sockopts)
for name in int64_sockopt_names:
_add_constant(name, int64_sockopts)
for name in bytes_sockopt_names:
_add_constant(name, bytes_sockopts)
for name in fd_sockopt_names:
_add_constant(name, fd_sockopts)
for name in ctx_opt_names:
_add_constant(name, ctx_opts)
for name in msg_opt_names:
_add_constant(name, msg_opts)
# ensure some aliases are always defined
aliases = [
('DONTWAIT', 'NOBLOCK'),
('XREQ', 'DEALER'),
('XREP', 'ROUTER'),
]
for group in aliases:
undefined = set()
found = None
for name in group:
value = getattr(constants, name, -1)
if value != -1:
found = value
else:
undefined.add(name)
if found is not None:
for name in undefined:
globals()[name] = found
__all__.append(name)
| {
"repo_name": "swn1/pyzmq",
"path": "zmq/sugar/constants.py",
"copies": "10",
"size": "2355",
"license": "bsd-3-clause",
"hash": 3445862275744525000,
"line_mean": 22.3168316832,
"line_max": 78,
"alpha_frac": 0.5842887473,
"autogenerated": false,
"ratio": 3.437956204379562,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00932052837886444,
"num_lines": 101
} |
"""0MQ Error classes and functions."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from errno import EINTR
class ZMQBaseError(Exception):
"""Base exception class for 0MQ errors in Python."""
pass
class ZMQError(ZMQBaseError):
"""Wrap an errno style error.
Parameters
----------
errno : int
The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
used.
msg : string
Description of the error or None.
"""
errno = None
def __init__(self, errno=None, msg=None):
"""Wrap an errno style error.
Parameters
----------
errno : int
The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
used.
msg : string
Description of the error or None.
"""
from zmq.backend import strerror, zmq_errno
if errno is None:
errno = zmq_errno()
if isinstance(errno, int):
self.errno = errno
if msg is None:
self.strerror = strerror(errno)
else:
self.strerror = msg
else:
if msg is None:
self.strerror = str(errno)
else:
self.strerror = msg
# flush signals, because there could be a SIGINT
# waiting to pounce, resulting in uncaught exceptions.
# Doing this here means getting SIGINT during a blocking
# libzmq call will raise a *catchable* KeyboardInterrupt
# PyErr_CheckSignals()
def __str__(self):
return self.strerror
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, str(self))
class ZMQBindError(ZMQBaseError):
"""An error for ``Socket.bind_to_random_port()``.
See Also
--------
.Socket.bind_to_random_port
"""
pass
class NotDone(ZMQBaseError):
"""Raised when timeout is reached while waiting for 0MQ to finish with a Message
See Also
--------
.MessageTracker.wait : object for tracking when ZeroMQ is done
"""
pass
class ContextTerminated(ZMQError):
"""Wrapper for zmq.ETERM
.. versionadded:: 13.0
"""
pass
class Again(ZMQError):
"""Wrapper for zmq.EAGAIN
.. versionadded:: 13.0
"""
pass
try:
InterruptedError
except NameError:
InterruptedError = OSError
class InterruptedSystemCall(ZMQError, InterruptedError):
"""Wrapper for EINTR
This exception should be caught internally in pyzmq
to retry system calls, and not propagate to the user.
.. versionadded:: 14.7
"""
def __str__(self):
s = super(InterruptedSystemCall, self).__str__()
return s + ": This call should have been retried. Please report this to pyzmq."
def _check_rc(rc, errno=None):
"""internal utility for checking zmq return condition
and raising the appropriate Exception class
"""
if rc < 0:
if errno is None:
from zmq.backend import zmq_errno
errno = zmq_errno()
from zmq import EAGAIN, ETERM
if errno == EINTR:
raise InterruptedSystemCall(errno)
elif errno == EAGAIN:
raise Again(errno)
elif errno == ETERM:
raise ContextTerminated(errno)
else:
raise ZMQError(errno)
_zmq_version_info = None
_zmq_version = None
class ZMQVersionError(NotImplementedError):
"""Raised when a feature is not provided by the linked version of libzmq.
.. versionadded:: 14.2
"""
min_version = None
def __init__(self, min_version, msg='Feature'):
global _zmq_version
if _zmq_version is None:
from zmq import zmq_version
_zmq_version = zmq_version()
self.msg = msg
self.min_version = min_version
self.version = _zmq_version
def __repr__(self):
return "ZMQVersionError('%s')" % str(self)
def __str__(self):
return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version)
def _check_version(min_version_info, msg='Feature'):
"""Check for libzmq
raises ZMQVersionError if current zmq version is not at least min_version
min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info().
"""
global _zmq_version_info
if _zmq_version_info is None:
from zmq import zmq_version_info
_zmq_version_info = zmq_version_info()
if _zmq_version_info < min_version_info:
min_version = '.'.join(str(v) for v in min_version_info)
raise ZMQVersionError(min_version, msg)
__all__ = [
'ZMQBaseError',
'ZMQBindError',
'ZMQError',
'NotDone',
'ContextTerminated',
'InterruptedSystemCall',
'Again',
'ZMQVersionError',
]
| {
"repo_name": "dash-dash/pyzmq",
"path": "zmq/error.py",
"copies": "7",
"size": "4898",
"license": "bsd-3-clause",
"hash": 8340425690789371000,
"line_mean": 25.192513369,
"line_max": 97,
"alpha_frac": 0.5902409147,
"autogenerated": false,
"ratio": 4.07826810990841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016987081954469557,
"num_lines": 187
} |
"""0MQ Error classes and functions."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
class ZMQBaseError(Exception):
"""Base exception class for 0MQ errors in Python."""
pass
class ZMQError(ZMQBaseError):
"""Wrap an errno style error.
Parameters
----------
errno : int
The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
used.
msg : string
Description of the error or None.
"""
errno = None
def __init__(self, errno=None, msg=None):
"""Wrap an errno style error.
Parameters
----------
errno : int
The ZMQ errno or None. If None, then ``zmq_errno()`` is called and
used.
msg : string
Description of the error or None.
"""
from zmq.backend import strerror, zmq_errno
if errno is None:
errno = zmq_errno()
if isinstance(errno, int):
self.errno = errno
if msg is None:
self.strerror = strerror(errno)
else:
self.strerror = msg
else:
if msg is None:
self.strerror = str(errno)
else:
self.strerror = msg
# flush signals, because there could be a SIGINT
# waiting to pounce, resulting in uncaught exceptions.
# Doing this here means getting SIGINT during a blocking
# libzmq call will raise a *catchable* KeyboardInterrupt
# PyErr_CheckSignals()
def __str__(self):
return self.strerror
def __repr__(self):
return "ZMQError('%s')"%self.strerror
class ZMQBindError(ZMQBaseError):
"""An error for ``Socket.bind_to_random_port()``.
See Also
--------
.Socket.bind_to_random_port
"""
pass
class NotDone(ZMQBaseError):
"""Raised when timeout is reached while waiting for 0MQ to finish with a Message
See Also
--------
.MessageTracker.wait : object for tracking when ZeroMQ is done
"""
pass
class ContextTerminated(ZMQError):
"""Wrapper for zmq.ETERM
.. versionadded:: 13.0
"""
pass
class Again(ZMQError):
"""Wrapper for zmq.EAGAIN
.. versionadded:: 13.0
"""
pass
def _check_rc(rc, errno=None):
"""internal utility for checking zmq return condition
and raising the appropriate Exception class
"""
if rc < 0:
from zmq.backend import zmq_errno
if errno is None:
errno = zmq_errno()
from zmq import EAGAIN, ETERM
if errno == EAGAIN:
raise Again(errno)
elif errno == ETERM:
raise ContextTerminated(errno)
else:
raise ZMQError(errno)
_zmq_version_info = None
_zmq_version = None
class ZMQVersionError(NotImplementedError):
"""Raised when a feature is not provided by the linked version of libzmq.
.. versionadded:: 14.2
"""
min_version = None
def __init__(self, min_version, msg='Feature'):
global _zmq_version
if _zmq_version is None:
from zmq import zmq_version
_zmq_version = zmq_version()
self.msg = msg
self.min_version = min_version
self.version = _zmq_version
def __repr__(self):
return "ZMQVersionError('%s')" % str(self)
def __str__(self):
return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version)
def _check_version(min_version_info, msg='Feature'):
"""Check for libzmq
raises ZMQVersionError if current zmq version is not at least min_version
min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info().
"""
global _zmq_version_info
if _zmq_version_info is None:
from zmq import zmq_version_info
_zmq_version_info = zmq_version_info()
if _zmq_version_info < min_version_info:
min_version = '.'.join(str(v) for v in min_version_info)
raise ZMQVersionError(min_version, msg)
__all__ = [
'ZMQBaseError',
'ZMQBindError',
'ZMQError',
'NotDone',
'ContextTerminated',
'Again',
'ZMQVersionError',
]
| {
"repo_name": "dimagol/trex-core",
"path": "scripts/external_libs/pyzmq-14.5.0/python2/intel/ucs4/64bit/zmq/error.py",
"copies": "21",
"size": "4254",
"license": "apache-2.0",
"hash": -2109089334493122000,
"line_mean": 24.9390243902,
"line_max": 97,
"alpha_frac": 0.5825105783,
"autogenerated": false,
"ratio": 4.0437262357414445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""0MQ polling related functions and classes."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import zmq
from zmq.backend import zmq_poll
from .constants import POLLIN, POLLOUT, POLLERR
#-----------------------------------------------------------------------------
# Polling related methods
#-----------------------------------------------------------------------------
class Poller(object):
"""A stateful poll interface that mirrors Python's built-in poll."""
sockets = None
_map = {}
def __init__(self):
self.sockets = []
self._map = {}
def __contains__(self, socket):
return socket in self._map
def register(self, socket, flags=POLLIN|POLLOUT):
"""p.register(socket, flags=POLLIN|POLLOUT)
Register a 0MQ socket or native fd for I/O monitoring.
register(s,0) is equivalent to unregister(s).
Parameters
----------
socket : zmq.Socket or native socket
A zmq.Socket or any Python object having a ``fileno()``
method that returns a valid file descriptor.
flags : int
The events to watch for. Can be POLLIN, POLLOUT or POLLIN|POLLOUT.
If `flags=0`, socket will be unregistered.
"""
if flags:
if socket in self._map:
idx = self._map[socket]
self.sockets[idx] = (socket, flags)
else:
idx = len(self.sockets)
self.sockets.append((socket, flags))
self._map[socket] = idx
elif socket in self._map:
# uregister sockets registered with no events
self.unregister(socket)
else:
# ignore new sockets with no events
pass
def modify(self, socket, flags=POLLIN|POLLOUT):
"""Modify the flags for an already registered 0MQ socket or native fd."""
self.register(socket, flags)
def unregister(self, socket):
"""Remove a 0MQ socket or native fd for I/O monitoring.
Parameters
----------
socket : Socket
The socket instance to stop polling.
"""
idx = self._map.pop(socket)
self.sockets.pop(idx)
# shift indices after deletion
for socket, flags in self.sockets[idx:]:
self._map[socket] -= 1
def poll(self, timeout=None):
"""Poll the registered 0MQ or native fds for I/O.
If there are currently events ready to be processed, this function will return immediately.
Otherwise, this function will return as soon the first event is available or after timeout
milliseconds have elapsed.
Parameters
----------
timeout : float, int
The timeout in milliseconds. If None, no `timeout` (infinite). This
is in milliseconds to be compatible with ``select.poll()``.
Returns
-------
events : list of tuples
The list of events that are ready to be processed.
This is a list of tuples of the form ``(socket, event_mask)``, where the 0MQ Socket
or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second.
It is common to call ``events = dict(poller.poll())``,
which turns the list of tuples into a mapping of ``socket : event_mask``.
"""
if timeout is None or timeout < 0:
timeout = -1
elif isinstance(timeout, float):
timeout = int(timeout)
return zmq_poll(self.sockets, timeout=timeout)
def select(rlist, wlist, xlist, timeout=None):
"""select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist)
Return the result of poll as a lists of sockets ready for r/w/exception.
This has the same interface as Python's built-in ``select.select()`` function.
Parameters
----------
timeout : float, int, optional
The timeout in seconds. If None, no timeout (infinite). This is in seconds to be
compatible with ``select.select()``.
rlist : list of sockets/FDs
sockets/FDs to be polled for read events
wlist : list of sockets/FDs
sockets/FDs to be polled for write events
xlist : list of sockets/FDs
sockets/FDs to be polled for error events
Returns
-------
(rlist, wlist, xlist) : tuple of lists of sockets (length 3)
Lists correspond to sockets available for read/write/error events respectively.
"""
if timeout is None:
timeout = -1
# Convert from sec -> us for zmq_poll.
# zmq_poll accepts 3.x style timeout in ms
timeout = int(timeout*1000.0)
if timeout < 0:
timeout = -1
sockets = []
for s in set(rlist + wlist + xlist):
flags = 0
if s in rlist:
flags |= POLLIN
if s in wlist:
flags |= POLLOUT
if s in xlist:
flags |= POLLERR
sockets.append((s, flags))
return_sockets = zmq_poll(sockets, timeout)
rlist, wlist, xlist = [], [], []
for s, flags in return_sockets:
if flags & POLLIN:
rlist.append(s)
if flags & POLLOUT:
wlist.append(s)
if flags & POLLERR:
xlist.append(s)
return rlist, wlist, xlist
#-----------------------------------------------------------------------------
# Symbols to export
#-----------------------------------------------------------------------------
__all__ = [ 'Poller', 'select' ]
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/zmq/sugar/poll.py",
"copies": "1",
"size": "5578",
"license": "mit",
"hash": -8821430977959501000,
"line_mean": 33.4320987654,
"line_max": 104,
"alpha_frac": 0.5537827178,
"autogenerated": false,
"ratio": 4.4624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005706281204972437,
"num_lines": 162
} |
# [\0] #
# #
# This code is confidential and proprietary, All rights reserved. #
# #
# Tamar Labs 2015. #
# #
# @author: Adam Lev-Libfeld (adam@tamarlabs.com) #
# #
from __future__ import absolute_import, print_function, unicode_literals
from kombu import Connection
from streamparse.spout import Spout
import common.logger
from common.config import Config
class QSpout(Spout):
def initialize(self, stormconf, context):
config = Config()
self._conn = Connection('librabbitmq://guest:guest@%s:%s//' %
(config.queues['host'], config.queues['port']))
self.queue = self._conn.SimpleQueue(config.queues['intake'])
self.logger = common.logger.get_logger(config.queues.intake.name)
def next_tuple(self):
message = self.queue.get(block=True)
try:
data = message.payload
self.emit([0, data])
except ValueError:
pass
except:
import sys, traceback
msg = "Unexpected Spout error:%s" % "\n".join(traceback.format_exception(*sys.exc_info()))
self.logger.error(msg)
finally:
message.ack()
| {
"repo_name": "daTokenizer/quickstorm",
"path": "storm/src/spouts/qspout.py",
"copies": "1",
"size": "1261",
"license": "apache-2.0",
"hash": -1845188087641268700,
"line_mean": 30.525,
"line_max": 93,
"alpha_frac": 0.5678033307,
"autogenerated": false,
"ratio": 3.512534818941504,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4580338149641504,
"avg_score": null,
"num_lines": null
} |
# 0 = open space, 1=boundary , 2= the robot, 3= finish
def maze_vision():
path= ''
maze=[]
maze.append(list('000000002000000'))
maze.append(list('000000003001100'))
maze.append(list('000000000000000'))
maze.append(list('000000000000000'))
maze.append(list('000000000000000'))
maze.append(list('000000000000000'))
#print(maze)
fx=0
fy=0
sx=0
sy=0
#print(maze[0][8])
#print(len(maze[0]))
for x in range(0,len(maze[0])-1):
for y in range(0,len(maze)-1):
if maze[y][x]=='2':
sx=x
sy=y
elif maze[y][x]=='3':
fx=x
fy=y
#print(fx)
#print(fy)
#print(sx)
#print(sy)
ans= distance(maze,sx,sy,fx,fy,path)
print ("the shortest path is "+str(ans)+ " spaces")
print(path)
def distance(maze, sx, sy, fx, fy,path):
up= int(sy-1)
down= int(sy+1)
left = int(sx-1)
right = int(sx+1)
print(str(sx)+','+str(sy))
updist=3333333
downdist=6666666
leftdist=5555555
rightdist=4444444
if maze[sy][sx]=='3': #reached finish
print(hit)
return 0 #return
#up
# if up >-1:
# if maze[sy][up]=='0': #if this direction is open
# maze[sy][up]='4' #mark it as traveled to
# path= path +'u' #add that direction to final path
# updist= 1+ distance(maze,up,sy,fx,fy,path) #calculate shortest dist from there
#if it makes it past here, that was not the shortest distance
#path= path[:-1] #remove that direction from final path
#maze[sy][up]=0 #mark that direction as not traveled
#down
print(down)
if down < (len(maze)-1):
print('w')
print(maze[down][sx])
if maze[down][sx]=='0':
maze[sy][sx]='4'
#path path +'d'
downdist= 1 + distance(maze,down,sy,fx,fy,path)
#path= path[:-1]
#maze[sy][down]='0'
#else:
#downdist=999999
#left
# if left>-1:
# if maze[left][sx]=='0':
# maze[left][sx]='4'
# path= path +'l'
# leftdist= 1+distance(maze,sx,left,fx,fy,path)
# path= path[:-1]
# maze[left][sx]='0'
#right
# if right<(len(maze[0])-1):
# if maze[sx][right]=='0':
# maze[sx][right]='4'
# path=path+'r'
# rightdist= 1+distance(maze,sx,right,fx,fy,path)
# path=path[:-1]
# maze[right][sx]='0'
#print(str(sx)+','+str(sy))
return min(updist,downdist,rightdist,leftdist)
# sum2= min(rightdist,leftdist)
# return min(sum2,sum1)
maze_vision()
| {
"repo_name": "jpschnel/maze-vision",
"path": "maze_vision.py",
"copies": "1",
"size": "2411",
"license": "apache-2.0",
"hash": -8237789192253436000,
"line_mean": 20.3240740741,
"line_max": 82,
"alpha_frac": 0.5777685608,
"autogenerated": false,
"ratio": 2.2940057088487156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7687445268100586,
"avg_score": 0.13686580030962617,
"num_lines": 108
} |
#0 seznam zvířat
dom_zvirata = ['pes','kočka','kralík','had']
#4 jména zvířat
moji_mazlicci = ['Chula', 'Gordo', 'Bondy', 'Alan', 'Sancho']
vasi_mazlicci = ['Chula', 'Gordo', 'Brok', 'Alfonso', 'Silák']
spolecni_mazlicci = []
jen_moji = []
jen_vasi = []
for jmena_1 in moji_mazlicci:
for jmena_2 in vasi_mazlicci:
if jmena_1 == jmena_2:
spolecni_mazlicci.append(jmena_1)
elif jmena_1 not in vasi_mazlicci:
jen_moji.append(jmena_1)
elif jmena_2 not in moji_mazlicci:
jen_vasi.append(jmena_2)
print("Společní mazličci jsou", spolecni_mazlicci)
print("Tohle jsou jen vaši mazlové: ", jen_vasi)
print("Tohle jsou jen moji mazlové: ", jen_moji)
#5. funkce, která seřadí mazlíky dle abecedy
def seradi_abeceda(seznam):
"""Seřadí daný seznam dle abecedy"""
print(sorted(seznam))
seradi_abeceda (dom_zvirata)
#6.priletela andulka
dom_zvirata.append("andulka")
print(dom_zvirata)
seznam_dvojic = []
dom_zvirata_nove = []
for zvire in dom_zvirata:
seznam_dvojic.append([zvire[1:], zvire])
print(sorted(seznam_dvojic))
for podseznamy in sorted(seznam_dvojic):
dom_zvirata_nove.append(podseznamy[1])
print(dom_zvirata_nove)
| {
"repo_name": "Zuzanita/PyZuzu",
"path": "DP_07_seznamy/Jmena_mazliku.py",
"copies": "1",
"size": "1217",
"license": "mit",
"hash": -4555907480182074400,
"line_mean": 29.6666666667,
"line_max": 62,
"alpha_frac": 0.6747491639,
"autogenerated": false,
"ratio": 2.0514579759862777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3226207139886278,
"avg_score": null,
"num_lines": null
} |
## 0. struct
import collections
Object = collections.namedtuple('Object', ('balanced', 'height'));
C = Object(True, -1);
print(C.balanced, C.height);
## 1. Class (structure):
class ListNode:
def __init__(self, data=0, next_node=None):
self.data = data;
self.next = next_node;
A = ListNode();
class TreeNode:
def __init__(self, data = 0, left = None, right = None):
self.data = data;
self.left = left;
self.right = right;
A = TreeNode();
## 2 Array
#int
A = []; A = [1,2,3]; A = range(0,10); A = [0]*10;
# class
A = []; B = ListNode();
A.append(B);
A[-1] == B;
A.pop() == B;
## 3. Stack
# stack
stack = [];
stack.append(B);
assert stack[-1] == B;
stack.pop();
# stack with max
class Stack:
class cache:
def __init__(self, _max = -float('inf'), count = 0):
self.max = _max;
self.count = count;
def __init__(self):
self._element = [];
self._maxCache =[];
def empty(self):
return not self._element;
def max(self):
return self._maxCache[-1].max;
def push(self, x):
#update elemetn
self._element.append(x);
#update cache
#if x larger than maxchache[-1], or maxcheche empty, add
if(not self._maxCache or x > self._maxCache[-1].max):
self._maxCache.append(self.cache(x, 1));
#if x equal to maxcache[-1].max, cout += 1
elif(x == self._maxCache[-1].max):
self._maxCache[-1].count += 1;
#if x larger than maxchache[-1].max, do nothing
def pop(self):
#update element
result = self._element.pop();
#update cache
#if result < maxCache[-1].max, no update
#if result == ---------------, cout -= 1, if cout == 0, pop it
if(result == self.max()):
self._maxCache[-1].count -= 1;
if(self._maxCache[-1].count == 0):
self._maxCache.pop();
return result;
## 4. Queue
# queue
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
# queue using deque
import collections
queue_ = collections.deque();
queue_.append(B);
queue_[0]; #peek
queue_[-1];#peek end
queue_.popleft;
# queue with max
import collections
class QueueWithMax:
def __init__(self):
self._entries = collections.deque()
self._candidates_for_max = collections.deque()
def enqueue(self, x):
self._entries.append(x)
# Eliminate dominated elements in _candidates_for_max.
while self._candidates_for_max and self._candidates_for_max[-1] < x:
self._candidates_for_max.pop()
self._candidates_for_max.append(x)
def dequeue(self):
if self._entries:
result = self._entries.popleft()
if result == self._candidates_for_max[0]:
self._candidates_for_max.popleft()
return result
raise IndexError('empty queue')
def max(self):
if self._candidates_for_max:
return self._candidates_for_max[0]
raise IndexError('empty queue')
# 5 hashtable
table = {};
table['key1'] = 1;
table['key2'] = B;
if(table.get('key1') == None): print("No key1");
if(not table): print("table empty");
table.pop('key1');
| {
"repo_name": "Liuqian0501/elements-of-programming-interview",
"path": "data structure/python/python_data_structure.py",
"copies": "1",
"size": "3569",
"license": "mit",
"hash": 4765714974546848000,
"line_mean": 21.8782051282,
"line_max": 76,
"alpha_frac": 0.5396469599,
"autogenerated": false,
"ratio": 3.4684159378036927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9300274445348282,
"avg_score": 0.04155769047108216,
"num_lines": 156
} |
0#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 14:23:25 2017
@authors: Mikel Zemborain, Houghton Yonge, Fuming Qiu, and Viva R. Horowitz
"""
#--------------------------------Library Imports------------------------------#
from PyQt5 import QtWidgets
from PyQt5.QtCore import QTimer
import sys
import sr400_GUI
import visa
import datetime
import os
import numpy as np
import serial
#-------------------------------just a reminder-------------------------------#
print("")
print("")
print("####--------------------------------------------------------####")
print(" REMEMBER TO TURN ON SR400 AND CONNECT/POWER THE APD!!!")
print("####--------------------------------------------------------####")
#------------------------ Setting up Arduino Connectivity---------------------#
arduino = serial.Serial('COM5', 9600)
arduino.close()
#--------------------------Setting Up GPIB Connectivity-----------------------#
rm = visa.ResourceManager()
instList = rm.list_resources()
print(instList)
GPIBName = ''
for instr in instList:
if instr.find('GPIB') == 0:
GPIBName = instr
sr400 = rm.open_resource(GPIBName)
sr400.timeout = 1000
#--------------------------Data File Functionality----------------------------#
#__Defining Variables__#
global temp
#This line formats the categories of the data for a text file.
Header = "Time (s), Total Counts, Rate (counts/s), \n\n"
#L = long, S = short
DateL = datetime.date.today().isoformat() # = yyyy-mm-dd
DateS = DateL[2:3] + DateL[5:6] + DateL[8:9] # = y[2:3]mmdd
TimeL = str(datetime.datetime.now().time())
#this version of windows does not allow you to save a file with ":" in its title,
# so i had to replace all the colons with semi-colons.
TimeS = TimeL[0:2] + ";" + TimeL[3:5] + ";" + TimeL[6:8]
#This function is called by Update().
def AddData(dataTimes, counts, rates):
"""Given three lists of the same length, add all the data to data file"""
for entry in range(len(dataTimes)):
DataString = (str(dataTimes[entry]) + ", " + str(counts[entry]) +
", " + str(rates[entry]) + "\n")
# writes datastring into tempporary data file
temp.write(DataString)
#This function is called by Stop_fxn().
def FileSave(RunCount):
temp.close()
print("Here is your file: \n\n")
#reads the data just saved into our temporary data file
data = open(tempFileName).read()
print(data)
#Done to ensure temp can be properly accessed and deleted now that it
#isn't necessary anymore.
print("\nYour file has been successfully saved!")
#----------------------------Establishing Variables---------------------------#
class MainApp(sr400_GUI.Ui_Form):
#lists and variables-:
#the current time in secons
curTimeVal = 0
#list of all the count rates
Ratelst= [] #
#list of all the times where collection of data occurs
Timelst=[]
#list of all the counts
Countlst = []
#average rate
average = 0
#standard deviation of average rate
StDev = 0
#standard error of average rate
StErr = 0
#Counter parameters controlled by GUI
TimeInt = 0
#Tallies number of measurement periods in current session
RunCount = 0
#ask for the current period (1-2000) -> should = 1 here
curPeriod= int(sr400.query("NN"))
print(curPeriod)
scrollWidth = 0 #the width of the x axis (in s) when graph scrolls
scaleWidth = 20 #the width of the x axis (in s) when graph scales
scrollCounter = 1 #keeps track of the window scroll number
lag = 0 #the number of times lag has occured in a row
#Threshold parameter controlled by GUI
Threshold = 0
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
#button connections to functions
self.StartBtn.clicked.connect(self.Start_fxn)
self.StopBtn.clicked.connect(self.Stop_fxn)
#enable/disable start and stop buttons
self.StopBtn.setEnabled(False)
self.StartBtn.setEnabled(True)
#QTimer to update and call for data
self.graphTimer = QTimer()
self.graphTimer.setSingleShot(False)
self.graphTimer.timeout.connect(self.Update)
#--------------Formatting Functions-------------------------------------------#
def TSETtoFloat(self, text):
#converts a string of the form NUMeNUM to an float
return float(text)
#--------------------------GUI Widget Functions-------------------------------#
def TSET_fxn(self):
"""gets the time value from the textbox and sets it as the sr400 time
and sets an instance variable to hold that value"""
#get value and assert it's in correct form
TSETText = self.TSETBox.toPlainText()
#this has to do with the syntax or the sr400 commands,1E7 = 1second
TSET = str(self.TSETtoFloat(TSETText) * 10**7)
#convert string to proper float and add dwell time
self.TimeInt = self.TSETtoFloat(TSETText) + 0.002
#set the width of the graph to be 50 times longer than the selected
#time bin time
self.scrollWidth = ((self.TimeInt-.002)* 50)
#set the sr400 to that time period
sr400.write('CP2, ' + TSET)
def valve(self, List):
#open serial comminucation with arduino if its not already open
if(arduino.isOpen() == False):
arduino.open()
#find the current count rate value, assign it as rate
rate = List[-1]
#get Threshold value and assert it's in correct form
TSETText1 = self.TSETBox1.toPlainText()
#convert string to proper float
self.Threshold = self.TSETtoFloat(TSETText1)
#if count rate is above the threshold, actuate valve.
if rate >= self.Threshold:
arduino.write(b'1')
#if count rate is below the threshold, de-actuate valve.
elif rate < self.Threshold:
arduino.write(b'0')
def Stop_fxn(self):
"""stops the data collection, commands data to be saved,
closes serial communications"""
#stop button gets dissablec after being clicked, start button gets enabled
self.StopBtn.setEnabled(False)
self.StartBtn.setEnabled(True)
#scale checkbox begs enabled
self.checkBox1.setEnabled(True)
#tells graphtimer to stop
self.graphTimer.stop()
#tells Photon counter to stop counting, reset data and tracking variables
sr400.write('cr')
#resets photon counter to factory settings
#(needed just in case a hardware issuephoton counter such as not
#turning on APD causes the SR400 to bug out)
sr400.write('cl')
#if you select to save the data, once you click the stop button,
if self.checkBox.isChecked():
#set up a folder in savedData to store our file in
self.FileSetup()
#add our data to a temporary file
AddData(self.Timelst, self.Countlst, self.Ratelst)
#save that temporary file
FileSave(self.RunCount)
#if you didnt select to save your data, say that
else:
print("")
print("you did not save your data")
#reset the threshold limit
self.Threshold = 0
#close valve
arduino.write(b'0')
#close serial communication
arduino.close()
#reset lists, so that old data is not stored in them during next measurement
self.Ratelst = []
self.Timelst = []
self.Countlst = []
def Start_fxn(self):
"""starts the data collection"""
#reset data and tracking variables
sr400.write('cr')
self.TSET_fxn()
self.curTimeVal = 0
#clear graph and reset window range. this depends on if your want to scale or scroll.
self.rvtGraph.clear()
if self.checkBox1.isChecked():
self.rvtGraph.setXRange(0, self.scaleWidth)
else:
self.rvtGraph.setXRange(0, int(self.scrollWidth))
#reset data and tracking variables
self.TSET_fxn()
self.curTimeVal = 0
self.scrollCounter = 1
#enable/disable start and stop buttons
self.StopBtn.setEnabled(True)
self.StartBtn.setEnabled(False)
#scale checkbox disabled
self.checkBox1.setEnabled(False)
#sets dwell time,
sr400.write("DT 2E-3")
#set number of periods (aka time bins)
sr400.write("NP 2000")
#When the number of time bins (Nperiods) reaches its maximum of 2000,
#reset to 0 and continue measuring
sr400.write("NE 1")
#ask for the current period (1-2000) -> should = 1 here
self.curPeriod= int(sr400.query("NN"))
#start counter
sr400.write("cs")
#starts the QTimer at timeInt, already includes 2ms dwell time
self.graphTimer.start((self.TimeInt) * 1000)
def FileSetup(self):
"""sets up files"""
TimeL = str(datetime.datetime.now().time())
#this version of windows does not allow you to save a file with ":" in its title,
# so i had to replace all the colons with semi-colons.
TimeS = TimeL[0:2] + ";" + TimeL[3:5] + ";" + TimeL[6:8]
folder = DateL + "_" + TimeS + "_SavedData"
#The directory FileName goes in, look in SavedData folder in documents!
saveDir = "C:/Users/HorowitzLab/Documents/SavedData/" + folder
os.makedirs(saveDir)
os.chdir(saveDir)
self.RunCount += 1
global tempFileName
tempFileName = (DateS + "_Data_" + TimeS + ".csv")
global temp
temp = open(tempFileName, "w+")
temp.write(Header)
def Update(self):
"""gets current count and updates instance variables"""
#create list holders for times and rates
countVals = []
timeVals = []
rateVals = []
#ask SR400 to give current Nperiod number
self.curPeriod= int(sr400.query("NN"))
#ask SR400 to give the photon count
data = int(sr400.query("QA " + str(self.curPeriod)))
#get data: continually ask for i-th point until not -1, add to list
#poll for data until get -1
while (data > -1):
#add to list, update other vals
countVals.append(data)
self.curTimeVal += self.TimeInt
self.curTimeVal = round(self.curTimeVal, 3)
#print(self.curTimeVal)
timeVals.append(self.curTimeVal)
#print(timeVals)
rateVals.append(round(data / (self.TimeInt-0.002), 1))
#at small time bins (> 0.2sec), the code will try to catch up to the measurements being taken
# and produce a rateVals with multiple measurements, with only the last one being new data.
#this if else loop ensures that this lag doesnt interfere with data collection
if len(rateVals) > 1:
self.Ratelst.append(rateVals[-1])
self.Countlst.append(countVals[-1])
self.Timelst.append(round(self.curTimeVal,3))
else:
self.Ratelst.append(rateVals[0])
self.Timelst.append(round(timeVals[0],3))
self.Countlst.append(countVals[0])
#calculates the avg, stdv, sterr and appends them to their respective lists
self.average = round(sum(self.Ratelst)/len(self.Ratelst), 1)
self.StDev = round(np.std(self.Ratelst), 0)
self.StErr = round( self.StDev / np.sqrt(len(self.Ratelst)), 0)
#increase curPeriod, query for next data point
self.curPeriod += 1
data = int(sr400.query("QA " + str(self.curPeriod)))
#shift window if enough time passes
if self.checkBox1.isChecked():
if (self.curTimeVal > self.scaleWidth * self.scrollCounter):
self.scale()
else:
if (self.curTimeVal > self.scrollWidth * self.scrollCounter):
self.scroll()
#graph: x = time, y = rate, or photon count/period
self.rvtGraph.plot(timeVals, rateVals, pen = None, symbol = '+')
#put in all the values into their respective places in GUI
self.TimeVL.setText(str(self.curTimeVal))
self.CountRateVL.setText(str(rateVals[-1]))
self.TotAvgVL.setText(str(self.average))
self.StDevVL.setText(str(self.StDev) + "(1/s)")
self.StErrVL.setText(str(self.StErr) + "(1/s)")
self.valve(self.Ratelst)
def scroll(self):
""" scrolls the window, clearing old data off the graph. does not
experience lag. (lag mainly affects stoping measuring and starting
a new measurement)"""
self.rvtGraph.clear()
self.rvtGraph.setXRange(self.scrollWidth * (self.scrollCounter), self.scrollWidth * (self.scrollCounter + 1))
self.scrollCounter += 1
def scale(self):
""" scales the window to fit all the data. laggy when dealing with
large quantities of data, but good to see thigs slow phenomena
such as photo-bleaching"""
self.rvtGraph.setXRange(0, self.scaleWidth * (self.scrollCounter + 1))
self.scrollCounter += 1
def main():
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QWidget()
form = MainApp(window)
app.exec_()
main()
| {
"repo_name": "horowitz-lab/photon-counting",
"path": "Photon_Counter_GPIB.py",
"copies": "1",
"size": "14098",
"license": "mit",
"hash": -7452355780520665000,
"line_mean": 36.2962962963,
"line_max": 117,
"alpha_frac": 0.5714285714,
"autogenerated": false,
"ratio": 3.9869909502262444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5058419521626245,
"avg_score": null,
"num_lines": null
} |
# 0: WBAN
# 1: Date
# 2: Time
# 3: StationType
# 4: SkyCondition
# 5: SkyConditionFlag
# 6: Visibility
# 7: VisibilityFlag
# 8: WeatherType
# 9: WeatherTypeFlag
# 10: DryBulbFarenheit
# 11: DryBulbFarenheitFlag
# 12: DryBulbCelsius
# 13: DryBulbCelsiusFlag
# 14: WetBulbFarenheit
# 15: WetBulbFarenheitFlag
# 16: WetBulbCelsius
# 17: WetBulbCelsiusFlag
# 18: DewPointFarenheit
# 19: DewPointFarenheitFlag
# 20: DewPointCelsius
# 21: DewPointCelsiusFlag
# 22: RelativeHumidity
# 23: RelativeHumidityFlag
# 24: WindSpeed
# 25: WindSpeedFlag
# 26: WindDirection
# 27: WindDirectionFlag
# 28: ValueForWindCharacter
# 29: ValueForWindCharacterFlag
# 30: StationPressure
# 31: StationPressureFlag
# 32: PressureTendency
# 33: PressureTendencyFlag
# 34: PressureChange
# 35: PressureChangeFlag
# 36: SeaLevelPressure
# 37: SeaLevelPressureFlag
# 38: RecordType
# 39: RecordTypeFlag
# 40: HourlyPrecip
# 41: HourlyPrecipFlag
# 42: Altimeter
# 43: AltimeterFlag
# 0: Wban Number => 0
# 1: YearMonthDay => 1
# 2: Time => 2
# 3: Station Type => 3
# 4: Maintenance Indicator => null
# 5: Sky Conditions => 4
# 6: Visibility => 6
# 7: Weather Type => 8
# 8: Dry Bulb Temp => 10
# 9: Dew Point Temp => 18
# 10: Wet Bulb Temp => 14
# 11: % Relative Humidity => 22
# 12: Wind Speed (kt) => 24
# 13: Wind Direction => 26
# 14: Wind Char. Gusts (kt) => null
# 15: Val for Wind Char. => 28
# 16: Station Pressure => 30
# 17: Pressure Tendency => 32
# 18: Sea Level Pressure => 36
# 19: Record Type => 38
# 20: Precip. Total => 40
import csv
import sys
from csvkit.cli import parse_column_identifiers
HEADER = ['wban','station_type','skycondition','skyconditionflag','visibility',
'visibilityflag','weathertype','weathertypeflag','drybulbfarenheit','drybulbfarenheitflag',
'drybulbcelsius','drybulbcelsiusflag','wetbulbfarenheit','wetbulbfarenheitflag',
'wetbulbcelsius','wetbulbcelsiusflag','dewpointfarenheit','dewpointfarenheitflag',
'dewpointcelsius','dewpointcelsiusflag','relativehumidity','relativehumidityflag',
'windspeed','windspeedflag','winddirection','winddirectionflag','valueforwindcharacter',
'valueforwindcharacterflag','stationpressure','stationpressureflag','pressuretendency',
'pressuretendencyflag','pressurechange','pressurechangeflag','sealevelpressure',
'sealevelpressureflag','recordtype','recordtypeflag','hourlyprecip','hourlyprecipflag',
'altimeter','altimeterflag','date_time',
]
def process_old_file(reader):
rows = [HEADER]
cols = [0,3,5,42,6,42,7,42,8,42,42,42,10,42,42,42,9,42,42,42,11,42,12,42,13,42,15,
42,16,42,17,42,42,42,18,42,19,42,20,42,42,21]
raw_rows = []
for row in reader:
for i in range(len(HEADER) - len(row)):
row.append('')
raw_rows.append(row)
for row in raw_rows:
rows.append([row[c] for c in cols])
return rows
def process_new_file(reader):
rows = [HEADER]
cols = [0,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,
30,31,32,33,34,35,36,37,38,39,40,41,42,43,44]
for row in reader:
val = []
for col in cols:
val.append(row[col])
rows.append(val)
return rows
if __name__ == "__main__":
reader = csv.reader(sys.stdin)
header = reader.next()
if len(header) <= 22:
rows = process_old_file(reader)
else:
rows = process_new_file(reader)
writer = csv.writer(sys.stdout)
writer.writerows(rows)
| {
"repo_name": "datamade/wopr-data",
"path": "code/processors/catch_format_diffs.py",
"copies": "1",
"size": "3480",
"license": "mit",
"hash": 5743411063735500000,
"line_mean": 29,
"line_max": 95,
"alpha_frac": 0.6781609195,
"autogenerated": false,
"ratio": 2.664624808575804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8743335232636062,
"avg_score": 0.019890099087948192,
"num_lines": 116
} |
"""10,001st prime
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that
the 6th prime is 13.
What is the 10,001st prime number?
"""
from euler.solutions import solution_5
def find_nth_prime(n):
"""Find the nth prime number by computing prime factorizations.
If an upper bound for the target prime were known in advance, using a sieve
of Eratosthenes would be more efficient.
"""
primes_found = 0
candidate = 2
while primes_found < n:
# All primes except 2 are odd. As such, we can optimize this solution
# by discarding even numbers which aren't equal to 2.
if candidate % 2 == 0 and candidate != 2:
candidate += 1
continue
# Check if the candidate's prime factorization consists only
# of (1 and) itself.
if solution_5.get_prime_factorization(candidate) == {candidate: 1}:
primes_found += 1
if primes_found == n:
return candidate
candidate += 1
| {
"repo_name": "rlucioni/project-euler",
"path": "euler/solutions/solution_7.py",
"copies": "1",
"size": "1029",
"license": "mit",
"hash": -3996143416320888000,
"line_mean": 28.4,
"line_max": 79,
"alpha_frac": 0.6258503401,
"autogenerated": false,
"ratio": 3.972972972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 35
} |
# 10001st prime
# Problem 7
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
#
# What is the 10001st prime number?
#
# Using a slightly modified sieve implementation from Problem 5,
# but returning a list instead of set, this is simple to do.
#
# Can guess size of sieve needed by utilizing approximation of
# prime-counting function pi(x) ~ x/(ln(x) - 1)
#
# 10001 = x / (ln(x) - 1)
# x = 105694
# Use 110000 to account for margin of error in estimation
from math import sqrt, ceil
SIEVE_SIZE = 110000
# Generates a set of prime numbers less than or equal to max_num
# using Eratosthenes method. Could be optimized a bit more, but
# fast enough for current purposes.
def prime_sieve(sieve_up_to):
# Make sieve inclusive
max_num = sieve_up_to + 1
sieve = [True] * max_num
# Result is an unordered set since order doesn't matter
result = []
max_sieve_element = ceil(sqrt(max_num))
# Only need to sieve up to sqrt(max_num)
i = 2
for i in range(2, max_sieve_element):
if sieve[i]:
result.append(i)
sieve_iterator = i * 2
while sieve_iterator < max_num:
sieve[sieve_iterator] = False
sieve_iterator += i
for j in range(i + 1, max_num):
if sieve[j]:
result.append(j)
return result
def run():
sieve = prime_sieve(SIEVE_SIZE)
print("Prime numbers in size {0} sieve: {1}".format(SIEVE_SIZE, len(sieve)))
i = 10000
print("sieve[{0}]: {1}".format(i, sieve[i]))
# Sample Output:
# Prime numbers in size 110000 sieve: 10453
# sieve[10000]: 104743
#
# Total running time for Problem7.py is 0.04801486096072827 seconds
| {
"repo_name": "YangLuGitHub/Euler",
"path": "src/scripts/Problem7.py",
"copies": "1",
"size": "1723",
"license": "mit",
"hash": 8402896768148650000,
"line_mean": 28.7068965517,
"line_max": 102,
"alpha_frac": 0.6453859547,
"autogenerated": false,
"ratio": 3.065836298932384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4211222253632384,
"avg_score": null,
"num_lines": null
} |
# 10001st prime
# Problem 7
#
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
# What is the 10001st prime number?
import itertools
N = 10001
def is_divisible_by_one(x, factors):
for factor in factors:
if x % factor == 0:
return True
return False
def gen_prime(limit=None):
assert limit is None or limit >= 2
sieve = [2, 3] # because 2 is the only even prime
yield sieve[-2]
yield sieve[-1]
while True:
generator = filter(lambda v: not is_divisible_by_one(v, sieve),
itertools.count(sieve[-1] + 2, 2))
new_prime = next(generator)
if not limit is None and new_prime > limit > 0:
return
else:
sieve.append(new_prime)
print(new_prime)
yield sieve[-1]
# https://docs.python.org/2/library/itertools.html#recipes
def nth(iterable, n, default=None):
"""Returns the nth item or a default value"""
return next(itertools.islice(iterable, n, None), default)
print(nth(gen_prime(), N - 1))
| {
"repo_name": "chjdev/euler",
"path": "python/problem7.py",
"copies": "1",
"size": "1107",
"license": "bsd-2-clause",
"hash": -2022629770251505200,
"line_mean": 24.7441860465,
"line_max": 102,
"alpha_frac": 0.6016260163,
"autogenerated": false,
"ratio": 3.3545454545454545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4456171470845455,
"avg_score": null,
"num_lines": null
} |
# 1000-digit Fibonacci number
# Problem 25
# The Fibonacci sequence is defined by the recurrence relation:
#
# Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
# Hence the first 12 terms will be:
#
# F1 = 1
# F2 = 1
# F3 = 2
# F4 = 3
# F5 = 5
# F6 = 8
# F7 = 13
# F8 = 21
# F9 = 34
# F10 = 55
# F11 = 89
# F12 = 144
# The 12th term, F12, is the first term to contain three digits.
#
# What is the index of the first term in the Fibonacci sequence to contain 1000 digits?
#
# Naive solution is trivial in Python, again, thanks to support for big integers.
#
# Fibonacci numbers given by closed-form formula can be solved mathematically for solution in
# constant time for arbitrarily large digit counts, if needed.
from math import log10
DIGIT_COUNT_REQUIRED = 1000
def run():
fib_last = 1
fib_current = 1
fib_index = 2
# Loop until fibonacci numbers get too large
# Fibonacci computed iteratively
while log10(fib_current) < DIGIT_COUNT_REQUIRED - 1:
fib_new = fib_last + fib_current
fib_last = fib_current
fib_current = fib_new
fib_index += 1
print("The first Fibonacci number to exceed {0} digits has index {1}".format(DIGIT_COUNT_REQUIRED, fib_index))
# Sample Output:
# The first Fibonacci number to exceed 1000 digits has index 4782
#
# Total running time for Problem25.py is 0.00294952969895123 seconds
| {
"repo_name": "YangLuGitHub/Euler",
"path": "src/scripts/Problem25.py",
"copies": "1",
"size": "1371",
"license": "mit",
"hash": -3778833256510314000,
"line_mean": 26.34,
"line_max": 114,
"alpha_frac": 0.685442575,
"autogenerated": false,
"ratio": 2.9397849462365593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41252275212365597,
"avg_score": null,
"num_lines": null
} |
# 10/05/2021
import random
alice, bob, carol, dave, erin, frank, gina = (0,) * 7
for prize in random.choices((1, 2, 3), k=1000):
alice += prize == 1
for prize in random.choices((1, 2, 3), k=1000):
bob += prize != 1
for prize in random.choices((1, 2, 3), k=1000):
pick = random.randint(1, 3)
show = random.choice(tuple({1, 2, 3} - {prize, pick}))
pick = random.choice(tuple({1, 2, 3} - {show}))
carol += prize == pick
for prize in random.choices((1, 2, 3), k=1000):
pick = random.randint(1, 3)
dave += prize == pick
for prize in random.choices((1, 2, 3), k=1000):
pick = random.randint(1, 3)
erin += prize != pick
for prize in random.choices((1, 2, 3), k=1000):
show = random.choice(tuple({2, 3} - {prize}))
pick = 2 if show != 2 else 1
frank += prize == pick
strat = 1
for prize in random.choices((1, 2, 3), k=1000):
gina += strat * (prize == 1) + (not strat) * (prize != 1)
strat = prize == 1
print(f"alice: {alice / 1000}")
print(f"bob: {bob / 1000}")
print(f"carol: {carol / 1000}")
print(f"dave: {dave / 1000}")
print(f"erin: {erin / 1000}")
print(f"frank: {frank / 1000}")
print(f"gina: {gina / 1000}")
| {
"repo_name": "tlgs/dailyprogrammer",
"path": "Python/easy/e389.py",
"copies": "1",
"size": "1185",
"license": "unlicense",
"hash": 4094641048538243600,
"line_mean": 25.3333333333,
"line_max": 61,
"alpha_frac": 0.5755274262,
"autogenerated": false,
"ratio": 2.5538793103448274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8629406736544827,
"avg_score": 0,
"num_lines": 45
} |
# 10.07.2007, c
# last revision: 25.03.2008
from __future__ import absolute_import
from sfepy import data_dir
from sfepy.mechanics.matcoefs import stiffness_from_lame
filename_meshes = ['/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_hexa.mesh']
filename_meshes = [data_dir + name for name in filename_meshes]
all_your_bases = [1, 2, 1]
filename_mesh = None
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : None,
}
def get_pars( dim, full = False ):
import numpy as nm
sym = (dim + 1) * dim // 2
lam = 1e1
mu = 1e0
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
if full:
return lam * oot + mu * nm.diag( o + 1.0 )
else:
return lam, mu
material_1 = {
'name' : 'solid',
'values' : {
'Dijkl' : get_pars( 3, True ),
'D' : stiffness_from_lame(3, get_pars(3)[0], get_pars(3)[1]),
'lam' : get_pars(3)[0],
'mu' : get_pars(3)[1],
}
}
material_2 = {
'name' : 'spring',
'values' : {
'.stiffness' : 1e0,
}
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Bottom',
'select' : 'vertices in (z < -0.499)',
'kind' : 'facet',
}
region_2 = {
'name' : 'Top',
'select' : 'vertices in (z > 0.499)',
'kind' : 'facet',
}
ebc_1 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.2' : 0.1},
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations_getpars = {
'balance_of_forces' :
"""dw_lin_elastic.i.Omega(solid.Dijkl, v, u)
= dw_point_lspring.i.Bottom(spring.stiffness, v, u)""",
}
equations_matcoefs = {
'balance_of_forces' :
"""dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_point_lspring.i.Bottom(spring.stiffness, v, u)""",
}
equations_iso = {
'balance_of_forces' :
"""dw_lin_elastic_iso.i.Omega(solid.lam, solid.mu, v, u)
= dw_point_lspring.i.Bottom(spring.stiffness, v, u)""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
from sfepy.base.testing import TestCommon
##
# 10.07.2007, c
class Test( TestCommon ):
tests = ['test_get_solution', 'test_linear_terms']
##
# 10.07.2007, c
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 25.03.2008, r: 25.03.2008
def test_linear_terms( self ):
ok = True
for sols in self.solutions:
ok = ok and self.compare_vectors(sols[0], sols[1],
label1 = 'getpars',
label2 = 'matcoefs')
ok = ok and self.compare_vectors(sols[0], sols[2],
label1 = 'getpars',
label2 = 'iso')
return ok
##
# c: 10.07.2007, r: 25.03.2008
def test_get_solution( self ):
from sfepy.applications import solve_pde
from sfepy.base.base import IndexedStruct
import os.path as op
ok = True
self.solutions = []
for ii, approx_order in enumerate(all_your_bases):
fname = filename_meshes[ii]
self.conf.filename_mesh = fname
fields = {'field_1' : {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : approx_order,
}
}
self.conf.edit('fields', fields)
self.report('mesh: %s, base: %s' % (fname, approx_order))
status = IndexedStruct()
self.report('getpars')
self.conf.equations = self.conf.equations_getpars
problem, state1 = solve_pde(self.conf, status=status,
save_results=False)
converged = status.nls_status.condition == 0
ok = ok and converged
self.report('converged: %s' % converged)
self.report('matcoefs')
self.conf.equations = self.conf.equations_matcoefs
problem, state2 = solve_pde(self.conf, status=status,
save_results=False)
converged = status.nls_status.condition == 0
ok = ok and converged
self.report('converged: %s' % converged)
self.report('iso')
self.conf.equations = self.conf.equations_iso
problem, state3 = solve_pde(self.conf, status=status,
save_results=False)
converged = status.nls_status.condition == 0
ok = ok and converged
self.report('converged: %s' % converged)
self.solutions.append((state1(), state2(), state3()))
name = op.join(self.options.out_dir,
'_'.join(('test_elasticity_small_strain',
op.splitext(op.basename(fname))[0],
'%d' % approx_order))
+ '.vtk')
problem.save_state(name, state1)
return ok
| {
"repo_name": "sfepy/sfepy",
"path": "tests/test_elasticity_small_strain.py",
"copies": "5",
"size": "5744",
"license": "bsd-3-clause",
"hash": -1732080147768210000,
"line_mean": 26.7487922705,
"line_max": 72,
"alpha_frac": 0.4895543175,
"autogenerated": false,
"ratio": 3.23970671178793,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014637473681123826,
"num_lines": 207
} |
"""1008. Construct Binary Search Tree from Preorder Traversal
https://leetcode.com/problems/construct-binary-search-tree-from-preorder-traversal
Return the root node of a binary search tree that matches the given preorder
traversal.
(Recall that a binary search tree is a binary tree where for every node, any
descendant of node.left has a value < node.val, and any descendant of
node.right has a value > node.val. Also recall that a preorder traversal
displays the value of the node first, then traverses node.left, then
traverses node.right.)
Example 1:
Input: [8,5,1,7,10,12]
Output: [8,5,10,1,7,null,12]

Note:
1 <= preorder.length <= 100
The values of preorder are distinct.
"""
from typing import List
from common.tree_node import TreeNode
class Solution:
def bst_from_preorder(self, preorder: List[int]) -> TreeNode:
def backtrack(start: int, end: int) -> TreeNode:
"""
Recursively generate TreeNode by sub list of preorder.
:param start: the start index of sub list.
:param end: the end index of sub list.
:return: the generated TreeNode.
"""
val = preorder[start]
root = TreeNode(val)
if start == end:
return root
i = start + 1
split = -1
has_split = False
# find the split num of the sub list if it exists.
while i <= end:
if preorder[i] > val:
has_split = True
split = i
break
i += 1
if has_split:
if start + 1 <= split - 1:
# process the left children TreeNode
root.left = backtrack(start + 1, split - 1)
if split <= end:
# process the right children TreeNode
root.right = backtrack(split, end)
else:
if start + 1 <= end:
# process the left children TreeNode
root.left = backtrack(start + 1, end)
return root
return backtrack(0, len(preorder) - 1)
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/construct_binary_search_tree_from_preorder_traversal.py",
"copies": "1",
"size": "2247",
"license": "mit",
"hash": -3512974907669293000,
"line_mean": 29.2837837838,
"line_max": 82,
"alpha_frac": 0.5658188309,
"autogenerated": false,
"ratio": 4.008944543828265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5074763374728264,
"avg_score": null,
"num_lines": null
} |
# 100 documents take about 600 MB RAM
from nltk import NaiveBayesClassifier, classify, FreqDist
import FileHandler as FH
import Document as Doc
from TextProcessor import TextProcessor as TP
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import *
class Classifier():
def SK_NB_train(self, trainDir):
myFH = FH.FileHandler()
docs = myFH.loadDirs(trainDir, True)
print "Extracting Features..."
docCats = [doc.category for doc in docs]
uniqueCats = self.uniqify(docCats)
[tfidfVec, tfidfs] = TP.SK_NB_calcTFIDFs(docs, True, False)
del(docs)
clf = MultinomialNB(fit_prior=False)
clf.fit(tfidfs, docCats)
print "Classifier trained."
self.SK_NB_accuracy(clf, tfidfVec, None, False, True, uniqueCats)
save = raw_input("Would you like to pickle (save) classifier and TFIDFs? (y|n): ")
if (save == 'y'):
from sklearn.externals import joblib
print "Writing Classifier to file."
joblib.dump(clf, "pickles/SK_NB/SK_classifier.pkl")
del(clf)
print "Writing TFIDF pickle file."
joblib.dump(tfidfVec, "pickles/SK_NB/SK_Tfidfs.pkl")
del(tfidfVec)
else:
return clf
def SK_NB_accuracy(self, clf, tfidfVec, trainDocs = None, showMistakes = False, showImpWords = False, class_labels = None):
dir = "docs/testing/" # actual dir will be build from args (docs/userID/setID dir)
myFH = FH.FileHandler()
#Load test documents
testDocs = myFH.loadDirs(dir, True)
rawDocs = [doc.content for doc in testDocs]
docCats = [doc.category for doc in testDocs]
tfidfs = tfidfVec.transform(rawDocs)
# Clean up
del(rawDocs)
if (showMistakes):
self.showMistakes(clf, testDocs, tfidfs, class_labels)
# Clean up
del(testDocs)
if (showImpWords):
self.printImpWords(tfidfVec, clf, None, class_labels, 50)
print "Accuracy: ", clf.score(tfidfs, docCats) * 100
self.PRF(docCats, clf, tfidfs)
# Clean up
del(docCats)
def PRF(self, y_true, clf, tfidfs, average="micro"):
from sklearn.metrics import precision_recall_fscore_support as prfs
y_pred = clf.predict(tfidfs)
print f1_score(y_true, y_pred)
print("confusion matrix:")
print(confusion_matrix(y_true, y_pred))
def showMistakes(self, clf, testDocs, tfidfs, class_labels):
for i in range(len(testDocs)):
prediction = clf.predict(tfidfs[i])[0]
if (testDocs[i].category != prediction):
print testDocs[i].title
print "Actual Class = ", testDocs[i].category
print "Predicted Class = ", prediction
if class_labels is not None:
predictions = clf.predict_proba(tfidfs[i])
for i in range(len(predictions)):
print class_labels, " : "
print predictions, ", "
print ' '
def printImpWords(self, vectorizer, clf, trainDir = None, class_labels = None, numWords = 10):
import numpy as np
"""Prints features with the highest coefficient values, per class"""
print "Most important features:"
print "..."
if trainDir is not None and class_labels is None:
myFH = FH.FileHandler()
trainDocs = myFH.loadDirs(trainDir, True)
elif trainDir is None and class_labels is None:
import sys
sys.exit("Error: either pass training documents or a directory of training data or list of classes to printImpWords()")
if class_labels is None:
class_labels = self.uniqify([doc.category for doc in trainDocs])
feature_names = vectorizer.get_feature_names()
for i, class_label in enumerate(class_labels):
try:
topWords = np.argsort(clf.coef_[i])[-numWords:]
print class_label
print " ".join(feature_names[j] + ", " for j in topWords)
print " "
except:
pass
def NLTK_NB_train(self, trainDir):
myFH = FH.FileHandler()
docs = myFH.loadDirs(trainDir, True)
print "Extracting Features..."
[doc.nbPrepare() for doc in docs]
featureSet = [(doc.features, doc.category) for doc in docs]
del(docs)
print "Deleted Docs"
classifier = NaiveBayesClassifier.train(featureSet)
del(featureSet)
print "Deleted Features"
print "Classifier Trained."
# print "Creating .pickle file..."
# f = open('my_classifier.pickle', 'wb')
# pickle.dump(classifier, f)
# f.close()
# print "Created .pickle file"
#
return classifier
def NLTK_NB_LoadTrainer(self, file = "my_classifier.pickle"): #change to joblib.load andmake sure it all works
print "Loading Classifier.."
# f = open(file)
# classifier = pickle.load(f)
# f.close()
# print "Classifier Loaded."
# return classifier
def NLTK_NB_getFeatures(self, document, all_words):
document_words = set(document)
features = {}
for word in all_words:
features['contains(%s)' % word] = (word in document_words)
return features
def NLTK_NB_sentiTrain(self):
from nltk.corpus import movie_reviews
print "Extracting features.."
documents = [(list(movie_reviews.words(fileid)), category)
for category in movie_reviews.categories()
for fileid in movie_reviews.fileids(category)]
documents = documents[1800:]
print len(documents)
all_words = FreqDist(w.lower() for w in movie_reviews.words()).keys()
print "Extracting features...."
featuresets = [(self.NLTK_NB_getFeatures(d, all_words), c) for (d,c) in documents]
from random import shuffle
shuffle(featuresets)
train_set, test_set = featuresets[100:], featuresets[:100]
classifier = NaiveBayesClassifier.train(train_set)
print classify.accuracy(classifier, test_set)
classifier.show_most_informative_features(10)
#classifier = NaiveBayesClassifier.train(featureSet)
print "Classifier Trained."
# HELPER FUNCTIONS
def uniqify(self, seq, idfun=None):
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
| {
"repo_name": "jahanzebk/python-text-classifier",
"path": "Classifier.py",
"copies": "1",
"size": "7324",
"license": "mit",
"hash": -9069071199593002000,
"line_mean": 34.04784689,
"line_max": 131,
"alpha_frac": 0.5598033861,
"autogenerated": false,
"ratio": 4.068888888888889,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5128692274988889,
"avg_score": null,
"num_lines": null
} |
#100% for part 1 and part 2
#Part 1: Terminology (15 points)
#1 1pt) What is the symbol "=" used for?
#The symbol "=" is used to assign a value.
#1 Correct
#
#2 3pts) Write a technical definition for 'function'
#A function is a sequence of instructions to perform a calculation.
# 3 Correct
#
#3 1pt) What does the keyword "return" do?
#The keyword "return" is used to give the final output of a function, and is used inside a function as a statement.
#1 Correct
#
#4 5pts) We know 5 basic data types. Write the name for each one and provide two
# examples of each below
# 1: int = 3,-5
# 2: str = " Hello World " , " How are you? "
# 3: float = 5.5, 5.76
# 4: tuple = (" My "," name "," is "," Tonsom ") , (" I "," am "," 17 "," years "," old ")
# 5: bool = True , False
#5 Correct
#5 2pts) What is the difference between a "function definition" and a
# "function call"?
# A function definition is naming a function, and calculating the statements. #Meanwhile, a function call is calling a defined function to perform a #calculation, and present the result to its caller.
# 2Correct
#6 3pts) What are the 3 phases that every computer program has? What happens in
# each of them
# 1: Input- The computer receives commands
# 2: Process- Uses the commands and performs a caluculation
# 3: Output- Presents the results of the calculations
#3 Correct
#Part 2: Programming (25 points)
#Write a program that asks the user for the areas of 3 circles.
#It should then calculate the diameter of each and the sum of the diameters
#of the 3 circles.
#Finally, it should produce output like this:
import math
def radiusOfCircle1(a):
diameter = (math.sqrt((a/math.pi)))*2
return diameter
def radiusOfCircle2(b):
diameter = (math.sqrt((b/math.pi)))*2
return diameter
def radiusOfCircle3(c):
diameter = (math.sqrt((c/math.pi)))*2
return diameter
def main():
C1 = int(raw_input("Area of C1:"))
C2 = int(raw_input("Area of C2:"))
C3 = int(raw_input("Area of C3:"))
D1 = radiusOfCircle1(C1)
D2 = radiusOfCircle2(C2)
D3 = radiusOfCircle3(C3)
Totals = (D1 + D2 + D3)
print """ Circle Diameter
c1 {}
c2 {}
c3 {}
Totals {}
""".format(D1, D2, D3, Totals)
main()
#Circle Diameter
#c1 ...
#c2 ...
#c3 ...
#TOTALS ...
# Hint: Radius is the square root of the area divided by pi
| {
"repo_name": "tonsom1592-cmis/tonsom1592-cmis-cs2",
"path": "cs2quiz1.py",
"copies": "1",
"size": "2315",
"license": "cc0-1.0",
"hash": 6763219833586417000,
"line_mean": 28.6794871795,
"line_max": 201,
"alpha_frac": 0.6807775378,
"autogenerated": false,
"ratio": 2.9794079794079793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41601855172079794,
"avg_score": null,
"num_lines": null
} |
#### 100% of CREDIT TO ERIK HALLSTORM used for benchmarking
from __future__ import print_function
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
import time
def get_times(maximum_time):
device_times = {
"/gpu:0":[],
"/cpu:0":[]
}
matrix_sizes = range(500,50000,50)
for size in matrix_sizes:
for device_name in device_times.keys():
print("####### Calculating on the " + device_name + " #######")
shape = (size,size)
data_type = tf.float16
with tf.device(device_name):
r1 = tf.random_uniform(shape=shape, minval=0, maxval=1, dtype=data_type)
r2 = tf.random_uniform(shape=shape, minval=0, maxval=1, dtype=data_type)
dot_operation = tf.matmul(r2, r1)
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as session:
start_time = time.time()
result = session.run(dot_operation)
time_taken = time.time() - start_time
print(result)
device_times[device_name].append(time_taken)
print(device_times)
if time_taken > maximum_time:
return device_times, matrix_sizes
device_times, matrix_sizes = get_times(1.5)
gpu_times = device_times["/gpu:0"]
cpu_times = device_times["/cpu:0"]
plt.plot(matrix_sizes[:len(gpu_times)], gpu_times, 'o-')
plt.plot(matrix_sizes[:len(cpu_times)], cpu_times, 'o-')
plt.ylabel('Time')
plt.xlabel('Matrix size')
plt.show() | {
"repo_name": "swirlingsand/self-driving-car-nanodegree-nd013",
"path": "play/benchmark-test-copy-paste-from-erik-hallstrom.py",
"copies": "1",
"size": "1579",
"license": "mit",
"hash": 1136365113537082100,
"line_mean": 29.9803921569,
"line_max": 89,
"alpha_frac": 0.5820139329,
"autogenerated": false,
"ratio": 3.548314606741573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9591314455847304,
"avg_score": 0.007802816758853791,
"num_lines": 51
} |
# 100. Same Tree - LeetCode
# https://leetcode.com/problems/same-tree/description/
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def pre_order_traversal(tn):
res = [tn.val]
if tn.left is None and tn.right is None:
return res
if tn.left != None:
res += pre_order_traversal(tn.left)
else:
res += [None]
if tn.right != None:
res += pre_order_traversal(tn.right)
else:
res += [None]
return res
t = TreeNode(1)
# t.left = TreeNode(2)
t.right = TreeNode(3)
pre_order_traversal(t)
class Solution(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
if p is None and q is None:
return True
if p is None or q is None:
return False
if p.val == q.val:
return self.isSameTree(p.left,q.left) and self.isSameTree(p.right,q.right)
else:
return False
s = Solution()
p = TreeNode(1)
p.right = TreeNode(3)
q = TreeNode(1)
q.right = TreeNode(3)
s.isSameTree(p,q) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/100_same-tree.py",
"copies": "1",
"size": "1215",
"license": "mit",
"hash": -3079274060154637300,
"line_mean": 21.1090909091,
"line_max": 86,
"alpha_frac": 0.5563786008,
"autogenerated": false,
"ratio": 3.155844155844156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9125838785082699,
"avg_score": 0.01727679431229127,
"num_lines": 55
} |
__author__ = 'Libao Jin'
__date__ = 'December 13, 2015'
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
if p is None and q is None:
return True
elif p is not None and q is None:
return False
elif p is None and q is not None:
return False
elif p is not None and q is not None and p.val != q.val:
return False
else:
if self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right):
return True
else:
return False
if __name__ == '__main__':
a = TreeNode(1)
b = TreeNode(2)
c = TreeNode(3)
d = TreeNode(4)
e = TreeNode(5)
f = TreeNode(6)
a.left = b
b.left = c
c.left = d
d.left = e
e.left = f
u = TreeNode(1)
v = TreeNode(2)
w = TreeNode(3)
x = TreeNode(4)
y = TreeNode(5)
z = TreeNode(6)
u.left = v
v.left = w
w.left = x
x.left = y
y.right = z
s = Solution()
print(s.isSameTree(a, u))
| {
"repo_name": "imthomasking/LeetCode-Solutions",
"path": "solutions/100_Same_Tree.py",
"copies": "2",
"size": "1318",
"license": "mit",
"hash": 2117289901765007000,
"line_mean": 20.9666666667,
"line_max": 85,
"alpha_frac": 0.4992412747,
"autogenerated": false,
"ratio": 3.2463054187192117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9728594456021238,
"avg_score": 0.003390447479594766,
"num_lines": 60
} |
# 10170,7840;
# http://cmc.music.columbia.edu/chiplotle/manual/chapters/api/hpgl.html
from chiplotle import *
plotter = instantiate_plotters()[0]
#plotter = plottertools.instantiate_virtual_plotter()
import math
paper_width = 10170
paper_height = 7840
image_width = 3000;
image_height = 3800;
border_width = ((paper_width - image_width) / 2.0) - 600
border_height = ((paper_height - image_height) / 2.0) - 300
num_lines = 20.0
height_unit = image_height / num_lines;
SP = hpgl.SP
LT = hpgl.LT
FS = hpgl.FS
SM = hpgl.SM
PU = hpgl.PU
PA = hpgl.PA
PD = hpgl.PD
OT = hpgl.OT
OW = hpgl.OW
PG = hpgl.PG
SC = hpgl.SC
IP = hpgl.IP
RO = hpgl.RO
IN = hpgl.IN
CV = hpgl.CV
CI = hpgl.CI
commands = []
commands.append(IN())
commands.append(SP(2))
for j in range (0,8):
commands.append(FS(5))
commands.append(LT(j % 6))
for i in range (0,int(num_lines)):
la = CoordinateArray ([(border_width, border_height + i/num_lines * image_height + j/8.0 * height_unit)])
lb = CoordinateArray ([(border_width + image_width, border_height + i/num_lines * image_height + j/8.0 * height_unit)])
commands.append(PU(la))
commands.append(CI(20))
commands.append(PU(lb))
commands.append(CI(10))
commands.append(SP(0))
plotter.write(commands)
| {
"repo_name": "wolftype/pony",
"path": "old/forcetest.py",
"copies": "1",
"size": "1240",
"license": "bsd-2-clause",
"hash": 4750363425905681000,
"line_mean": 20.7543859649,
"line_max": 121,
"alpha_frac": 0.6814516129,
"autogenerated": false,
"ratio": 2.494969818913481,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36764214318134814,
"avg_score": null,
"num_lines": null
} |
# # 10.1
#
# tens_list = [10, 20, 30, 40]
# funny_strings = ['crunchy frog', 'ram bladder', 'lark vomit']
# some_other = ['spam', 2.0, 5, [10, 20]]
#
# empty_list = []
#
# print(tens_list)
# print(funny_strings)
# print(some_other)
# print(empty_list)
# print()
# print(tens_list[1:])
# print(funny_strings[1])
# print(some_other[::2])
# # 10.2
# food_birds = ['chicken', 'turkey']
#
# food_birds[0] = 'seagull'
# print(food_birds)
# 10.3
# cheeses = ['cheddar', 'gouda', 'munster']
#
# for cheese in cheeses:
# print(cheese)
#
# print()
#
# numbers = [1, 2, 3, 4, 5]
#
# for i in range(len(numbers)):
# numbers[i] = numbers[i] * 2
#
# print(numbers)
#
# print()
#
# for x in []:
# print("this won't execute")
#
# a = [1, 2, 3]
# b = [4, 5, 6]
# c = a + b
# print(c)
#
# 10.7
# def add_all(t):
# total = 0
# for x in t:
# total += x
# return total
#
# print("Result of add all function is {0}".format(add_all([1, 2, 3])))
# exercise 1
# def nested_sum(lists_of_list_of_ints):
# for lists in lists_of_list_of_ints:
# print(sum(lists))
# pass
#
# print(nested_sum([[1, 1, 1], [2, 2, 2], [3, 3, 3]]))
#
# def capitalize_all(t):
# res = []
# for s in t:
# res.append(s.capitalize())
# return res
#
# print(capitalize_all(["ahoy there."]))
# """
# Exercise 3
# Write a function that takes a list of numbers and returns the cumulative sum; that is, a new list where the ith element is the sum of the first i+1 elements from the original list. For example, the cumulative sum of [1, 2, 3] is [1, 3, 6].
# """
#
# list_to_update = [1, 2, 3, 4, 5, 6, 7, 8, 9]
#
# def cumulativer(nums):
# sums = 0
# cums_list = []
# for i in nums:
# sums += i
# cums_list.append(sums)
# return cums_list
#
# print(cumulativer(list_to_update))
# 10.8
# t = ['a', 'b', 'c']
# x = t.pop(1)
# print(t)
# x = t.pop()
# print(t)
# print(x)
# """Exercise 4
# Write a function called middle that takes a list and returns a new list that contains all but the first and last elements. So middle([1,2,3,4]) should return [2,3]."""
#
#
# def middle(str):
# return str[1:-1]
#
# print(middle("Oklahoma"))
# print(middle("Michelle Obama"))
# print(middle("Beyonce Knowles"))
# """Exercise 5
# Write a function called chop that takes a list, modifies it by removing the first and last elements, and returns None."""
#
# def chop(lst):
# lst = lst[1:-1]
# print(lst)
# return None
#
# chop(["Apple", "Banana", "Canteloupe"])
| {
"repo_name": "UWPCE-PythonCert/IntroPython2016",
"path": "students/sheree/session_02/homework/TP2-Ch10-Scratch.py",
"copies": "3",
"size": "2517",
"license": "unlicense",
"hash": 8762222992159565000,
"line_mean": 19.2983870968,
"line_max": 241,
"alpha_frac": 0.5740961462,
"autogenerated": false,
"ratio": 2.4798029556650247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4553899101865025,
"avg_score": null,
"num_lines": null
} |
# 101. Symmetric Tree - LeetCode
# https://leetcode.com/problems/symmetric-tree/description/
# Bonus points if you could solve it both recursively and iteratively.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None:
return True
if root.left is None and root.right is None:
return True
if root.left is None or root.right is None:
return False
if root.left.val == root.right.val:
new_tree_left = TreeNode(None)
new_tree_left.left = root.left.left
new_tree_left.right = root.right.right
new_tree_right = TreeNode(None)
new_tree_right.left = root.left.right
new_tree_right.right = root.right.left
return self.isSymmetric(new_tree_left) and self.isSymmetric(new_tree_right)
else:
return False
s = Solution()
p = TreeNode(1)
p.left = TreeNode(3)
p.right = TreeNode(3)
p.left.left = TreeNode(5)
# p.right.right = TreeNode(5)
print s.isSymmetric(p) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/101_symmetric-tree.py",
"copies": "1",
"size": "1273",
"license": "mit",
"hash": 400701219315914400,
"line_mean": 28.6279069767,
"line_max": 87,
"alpha_frac": 0.6017282011,
"autogenerated": false,
"ratio": 3.5068870523415976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46086152534415975,
"avg_score": null,
"num_lines": null
} |
# 101. Symmetric Tree - LeetCode
# https://leetcode.com/problems/symmetric-tree/description/
# iteratively trial
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
next_level = [root]
value_count = 1
while len(next_level) > 0:
current_level = next_level
current_output = []
next_level = []
value_count = 0
for i in current_level:
if i:
value_count = 1
current_output.append(i.val)
next_level += [ i.left, i.right ]
else:
current_output.append( None )
x = 0
while x < len(current_output):
if current_output[x] != current_output[len(current_output)-x-1]:
return False
x += 1
return True
s = Solution()
p = TreeNode(1)
p.left = TreeNode(3)
p.right = TreeNode(3)
p.left.left = TreeNode(5)
# p.right.right = TreeNode(5)
print s.isSymmetric(p) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/101_symmetric-tree_i.py",
"copies": "1",
"size": "1263",
"license": "mit",
"hash": 7761590385831952000,
"line_mean": 25.3333333333,
"line_max": 80,
"alpha_frac": 0.5083135392,
"autogenerated": false,
"ratio": 3.7701492537313435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47784627929313434,
"avg_score": null,
"num_lines": null
} |
"""101. Symmetric Tree
https://leetcode.com/problems/symmetric-tree/description/
Given a binary tree, check whether it is a mirror of itself (ie, symmetric
around its center).
For example, this binary tree [1,2,2,3,4,4,3] is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
3241423
But the following [1,2,2,null,3,null,3] is not:
1
/ \
2 2
\ \
3 3
Note:
Bonus points if you could solve it both recursively and iteratively.
"""
from common.tree_node import TreeNode
class Solution:
def is_symmetric_iterative(self, root: TreeNode) -> bool:
def check(node_1: TreeNode, node_2: TreeNode) -> bool:
if not node_1 and not node_2:
return True
if not (node_1 and node_2):
return False
if node_1.val != node_2.val:
return False
return True
if not root:
return True
queue = [root]
while queue:
i, j = 0, len(queue) - 1
while i < j:
if not check(queue[i], queue[j]):
return False
i += 1
j -= 1
next_queue = []
for node in queue:
if node:
next_queue += [node.left, node.right]
queue = next_queue
return True
def is_symmetric_recursive(self, root: TreeNode) -> bool:
if not root:
return True
def check(l_node: TreeNode, r_node: TreeNode):
if not l_node and not r_node:
return True
if not (l_node and r_node):
return False
if l_node.val != r_node.val:
return False
return check(l_node.left, r_node.right) and \
check(l_node.right, r_node.left)
return check(root.left, root.right)
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/symmetric_tree.py",
"copies": "1",
"size": "1896",
"license": "mit",
"hash": 905953582459806300,
"line_mean": 23.3896103896,
"line_max": 74,
"alpha_frac": 0.5111821086,
"autogenerated": false,
"ratio": 3.490706319702602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4501888428302602,
"avg_score": null,
"num_lines": null
} |
# 10/29/15
# Cindy Mo
# clean data from translated text file
# remove stop words
import sys
from collections import OrderedDict
import csv
import numpy as np
import pandas as pd
import re
import json
import nltk
nltk.download("wordnet")
# data file
description = open("../description_pp/description.csv")
csv_description = csv.reader(description)
# stopwords file
stopwords = open("../description_pp/stopwords.txt")
stopwords_list = stopwords.read().splitlines()
# extract set of all significant words from text
with open("../description_pp/description.csv") as f:
uwords = set()
for row in f:
r = row.split(',')[3].lower().split()
print r
r = [re.sub(r'\W+', '', w) for w in r if re.sub(r'\W+', '', w) != ""]
r = [w for w in r if w not in stopwords_list]
uwords |= set(r)
print uwords
with open("../description_pp/uniquewords.json", 'w') as f:
json.dump(dict.fromkeys(uwords), f)
affordance_dict_fpath = "../description_pp/uniquewords.json" #loadin the json file with all unique words as keys
with open(affordance_dict_fpath) as fid:
affordanceDict = json.loads(fid.readlines()[0])
fid.close()
all_words = affordanceDict.keys()
word_dict = dict.fromkeys(all_words)
def get_wn_synsets(lemma):
"""
Get all synsets for a word, return a list of [wordnet_label,definition, hypernym_string]
for all synsets returned.
"""
from nltk.corpus import wordnet as wn
synsets = wn.synsets(lemma)
out = []
for s in synsets:
# if not '.v.' in s.name(): continue # only verbs!
hyp = ''
for ii,ss in enumerate(s.hypernym_paths()):
try:
hyp+=(repr([hn.name() for hn in ss])+'\n')
except:
hyp+='FAILED for %dth hypernym\n'%ii
out.append(dict(synset=s.name(), definition=s.definition(),hypernyms=hyp))
return out
def get_wn_meaning(lemma):
"""get meaning of a word using wordNet labels"""
# from nltk.corpus import wordnet as wn
# return wn.synset(lemma).definition()
return None
for w in all_words:
if get_wn_synsets(w) == []:
word_dict.pop(w)
else:
word_dict[w] = [get_wn_synsets(w)[0]['synset']]
with open("../description_pp/word2wn.json", 'w') as f:
json.dump(word_dict, f)
with open("../description_pp/word2wn.json") as f:
translator = json.loads(f.read())
for w in translator:
translator[w] = translator[w][0]
print translator
with open("../description_pp/description.csv") as f:
storedictduration = OrderedDict()
storedictstop = OrderedDict()
ind = 0
for row in csv_description:
if ind == 0:
pass
else:
start, stop, duration = row[0], row[1], str(float(row[1])-float(row[0]))
raw = row[3].lower().split()
refined = [re.sub(r'\W+', '', w) for w in raw if re.sub(r'\W+', '', w) != ""]
refined = [translator[w] for w in refined if w in translator]
storedictduration[ind] = {"start":start, "duration":duration, "words":refined}
storedictstop[ind] = {"start":start, "stop":stop, "words":refined}
ind += 1
with open("../description_pp/wordnet_stop.json", 'w') as f:
json.dump(storedictstop.values(), f)
with open("../description_pp/wordnet_duration.json", 'w') as f:
json.dump(storedictduration.values(), f)
# print sum([len(e["words"]) for e in storedict])/float(len(storedict))
# df = pd.DataFrame(storedict.values()[1:], columns = ["start", "stop", "words"])
# df.to_csv("cleaned.csv")
| {
"repo_name": "rishizsinha/project-beta",
"path": "code/dataclean.py",
"copies": "3",
"size": "3411",
"license": "bsd-3-clause",
"hash": 5796022785475638000,
"line_mean": 29.185840708,
"line_max": 112,
"alpha_frac": 0.6493696863,
"autogenerated": false,
"ratio": 2.984251968503937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5133621654803937,
"avg_score": null,
"num_lines": null
} |
# 10/29/15
# Cindy Mo
# clean data from translated text file
# remove stop words
from collections import OrderedDict
import csv
import numpy as np
import pandas as pd
import re
import json
import nltk
# data file
description = open("../description_pp/description.csv")
csv_description = csv.reader(description)
# stopwords file
stopwords = open("../description_pp/stopwords.txt")
stopwords_list = stopwords.read().splitlines()
# extract set of all significant words from text
with open("../description_pp/description.csv") as f:
uwords = set()
for row in f:
r = row.split(',')[3].lower().split()
print r
r = [re.sub(r'\W+', '', w) for w in r if re.sub(r'\W+', '', w) != ""]
r = [w for w in r if w not in stopwords_list]
uwords |= set(r)
print uwords
with open("../description_pp/uniquewords.json", 'w') as f:
json.dump(dict.fromkeys(uwords), f)
affordance_dict_fpath = "../description_pp/uniquewords.json" #loadin the json file with all unique words as keys
with open(affordance_dict_fpath) as fid:
affordanceDict = json.loads(fid.readlines()[0])
fid.close()
all_words = affordanceDict.keys()
word_dict = dict.fromkeys(all_words)
def get_wn_synsets(lemma):
"""
Get all synsets for a word, return a list of [wordnet_label,definition, hypernym_string]
for all synsets returned.
"""
from nltk.corpus import wordnet as wn
synsets = wn.synsets(lemma)
out = []
for s in synsets:
# if not '.v.' in s.name(): continue # only verbs!
hyp = ''
for ii,ss in enumerate(s.hypernym_paths()):
try:
hyp+=(repr([hn.name() for hn in ss])+'\n')
except:
hyp+='FAILED for %dth hypernym\n'%ii
out.append(dict(synset=s.name(), definition=s.definition(),hypernyms=hyp))
return out
def get_wn_meaning(lemma):
"""get meaning of a word using wordNet labels"""
# from nltk.corpus import wordnet as wn
# return wn.synset(lemma).definition()
return None
for w in all_words:
if get_wn_synsets(w) == []:
word_dict.pop(w)
else:
word_dict[w] = [get_wn_synsets(w)[0]['synset']]
with open("../description_pp/word2wn.json", 'w') as f:
json.dump(word_dict, f)
with open("../description_pp/word2wn.json") as f:
translator = json.loads(f.read())
for w in translator:
translator[w] = translator[w][0]
print translator
with open("../description_pp/description.csv") as f:
storedictduration = OrderedDict()
storedictstop = OrderedDict()
ind = 0
for row in csv_description:
if ind == 0:
pass
else:
start, stop, duration = row[0], row[1], str(float(row[1])-float(row[0]))
raw = row[3].lower().split()
refined = [re.sub(r'\W+', '', w) for w in raw if re.sub(r'\W+', '', w) != ""]
refined = [translator[w] for w in refined if w in translator]
storedictduration[ind] = {"start":start, "duration":duration, "words":refined}
storedictstop[ind] = {"start":start, "stop":stop, "words":refined}
ind += 1
with open("../description_pp/wordnet_stop.json", 'w') as f:
json.dump(storedictstop.values(), f)
with open("../description_pp/wordnet_duration.json", 'w') as f:
json.dump(storedictduration.values(), f)
# print sum([len(e["words"]) for e in storedict])/float(len(storedict))
# df = pd.DataFrame(storedict.values()[1:], columns = ["start", "stop", "words"])
# df.to_csv("cleaned.csv")
| {
"repo_name": "cxmo/project-beta",
"path": "code/dataclean.py",
"copies": "1",
"size": "3376",
"license": "bsd-3-clause",
"hash": 1535760709174704600,
"line_mean": 29.1428571429,
"line_max": 112,
"alpha_frac": 0.6478080569,
"autogenerated": false,
"ratio": 2.9876106194690264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.900388706147865,
"avg_score": 0.026306322978075346,
"num_lines": 112
} |
"""102. Binary Tree Level Order Traversal
https://leetcode.com/problems/binary-tree-level-order-traversal/
Given a binary tree, return the level order traversal of its nodes' values.
(ie, from left to right, level by level).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its level order traversal as:
[
[3],
[9,20],
[15,7]
]
"""
from typing import List
from common.tree_node import TreeNode
class Solution:
def level_order(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
ans = []
queue = [root]
while queue:
level_list = []
next_queue = []
for node in queue:
level_list.append(node.val)
if node.left:
next_queue.append(node.left)
if node.right:
next_queue.append(node.right)
ans.append(level_list)
queue = next_queue
return ans
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/binary_tree_level_order_traversal.py",
"copies": "1",
"size": "1039",
"license": "mit",
"hash": 7946465741639012000,
"line_mean": 19.8775510204,
"line_max": 75,
"alpha_frac": 0.550342131,
"autogenerated": false,
"ratio": 3.3106796116504853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43610217426504855,
"avg_score": null,
"num_lines": null
} |
#####10/2 Second Round Interview with Linkedin
__iter__()
next()
/** A reference to a file. */
public class TextFile implements Iterable<String>
{
public TextFile(String fileName) { // please implement this
}
/** Begin reading the file, line by line. The returned Iterator.next() will return a line. */
@Override
public Iterator<String> iterator() { // please implement this
}
}
class TextFile:
def __init__(self, fileName):
f = open(fileName)
self.file = f
class Iterator():
def __init__(self, file_inside)
self.file_inside = file_inside
self.index = -1
def next()
self.index += 1
line = self.file_inside.get_line_by_index(self.index)
if self.index == max
if '\eof' in line:
f.close()
return line
def __iter__(self):
return Iterator(self.file)
t1 = TextFile()
'\n'
eof
[1,2,3,4,5]
def f1(t):
i1 = t.__iter__()
i1.next()
i1.next()
i1.next()
def f2(t):
i2 = t.__iter__()
i2.next()
i2.next()
i2.next()
t1.next() # 1
t1.next() # 2
public interface PointsOnAPlane {
/**
* Stores a given point in an internal data structure
*/
void addPoint(Point point);
/**
* For given 'center' point returns a subset of 'p' stored points
* that are closer to the center than others.
*
* E.g.
* Stored:
* (0, 1)
* (0, 2)
* (0, 3)
* (0, 4)
* (0, 5)
*
* findNearest(new Point(0, 0), 3) -> (0, 1), (0, 2), (0, 3)
*/
Collection<Point> findNearest(Point center, int p);
class Point {
final int x;
final int y;
public Point(int x, int y) {
this.x = x;
this.y = y;
}
}
}
(key, value)
(distance_to_center, Point)
from heap import heappush, heappop
class Find_Near():
def __init__(self):
self.point_list = []
def add_point(self, Point):
self.point_list.append(Point)
def find_nearest(self, center, p):
if len(self.point_list) < p:
raise 'Not enough points stored'
elif len(self.point_list) == p:
return self.point_list
heap = []
for point in self.point_list:
distance = self.calculate_distance(point, center)
heappush(heap, (distance, point))
distance , point
nearest = []
for i in range(p):
nearest.append(heappop(heap)[1])
return nearrest
max_heap
p points into the max_heap
O(n*logn)
+
O(k*logn) | {
"repo_name": "cyandterry/Python-Study",
"path": "Interviews/Real_Life/Second_Round_Interview_with_Linkedin.py",
"copies": "2",
"size": "2781",
"license": "mit",
"hash": -4338085256662038000,
"line_mean": 17.7972972973,
"line_max": 95,
"alpha_frac": 0.5012585401,
"autogenerated": false,
"ratio": 3.4849624060150375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49862209461150375,
"avg_score": null,
"num_lines": null
} |
# 10.2 Write a program to read through the mbox-short.txt and figure out the distribution by hour of the day for each of the messages. You can pull the hour out from the 'From ' line by finding the time and then splitting the string a second time using a colon.
# From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008
#
# Once you have accumulated the counts for each hour, print out the counts, sorted by hour as shown below.
fname = raw_input("Enter file name: ")
if len(fname)<1 : fname = "mbox-short.txt"
try:
fhandle = open(fname)
except:
print "This is not a valid file name"
exit()
# select From line, split into words and split the 5th word at the colon, count hours in dictionary
sndrdict = dict()
for sndrline in fhandle :
if sndrline.startswith("From "):
sndrlist = sndrline.split()
colonpos = sndrlist[5].find(":")
hour = sndrlist[5][:colonpos]
sndrdict[hour] = sndrdict.get(hour,0)+1
# convert dict in list
srtlst = list()
for hr,c in sndrdict.items() :
srtlst.append ((hr,c))
# sort list
srtlst.sort()
for hr,c in srtlst :
print hr,c
| {
"repo_name": "jerrydeboer/crs-assignments",
"path": "assignement102.py",
"copies": "1",
"size": "1113",
"license": "cc0-1.0",
"hash": -239392148094853340,
"line_mean": 33.78125,
"line_max": 261,
"alpha_frac": 0.6909254268,
"autogenerated": false,
"ratio": 3.1709401709401708,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9219513535195575,
"avg_score": 0.028470412508919296,
"num_lines": 32
} |
# 1036. Escape a Large Maze
# The difficulty is hard, but should be medium.
class Solution:
def isEscapePossible(
self, blocked: List[List[int]], source: List[int], target: List[int]
) -> bool:
# Calculates the mapping
def calcMapping(z):
z.sort()
mapz = {z[0]: 0}
for i in range(1, len(z)):
if z[i] == z[i - 1]:
continue
elif z[i] == z[i - 1] + 1:
mapz[z[i]] = mapz[z[i - 1]] + 1
else:
mapz[z[i]] = mapz[z[i - 1]] + 2
return mapz
mapx = calcMapping(
[x for (x, y) in blocked] + [source[0], target[0], 0, 10 ** 6]
)
mapy = calcMapping(
[y for (x, y) in blocked] + [source[1], target[1], 0, 10 ** 6]
)
# Rebuilt the board
limitX = mapx[10 ** 6]
limitY = mapy[10 ** 6]
blocked = {(mapx[x], mapy[y]) for (x, y) in blocked}
source = (mapx[source[0]], mapy[source[1]])
target = (mapx[target[0]], mapy[target[1]])
# Remove source and target from blocked
blocked = {g for g in blocked if g != source and g != target}
# DFS
visited = defaultdict(bool)
def dfs(x, y):
if 0 <= x and x < limitX and 0 <= y and y < limitY:
if visited[(x, y)]:
return
if (x, y) in blocked:
return
visited[(x, y)] = True
dfs(x - 1, y)
dfs(x + 1, y)
dfs(x, y - 1)
dfs(x, y + 1)
dfs(source[0], source[1])
return visited[target]
| {
"repo_name": "digiter/Arena",
"path": "1036-escape-a-large-maze.py",
"copies": "1",
"size": "1714",
"license": "mit",
"hash": 255910287769141630,
"line_mean": 30.7407407407,
"line_max": 76,
"alpha_frac": 0.4299883314,
"autogenerated": false,
"ratio": 3.4556451612903225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9385633492690323,
"avg_score": 0,
"num_lines": 54
} |
# 1043. Partition Array for Maximum Sum - LeetCode Contest
# https://leetcode.com/contest/weekly-contest-136/problems/partition-array-for-maximum-sum/
# Runtime: 1784 ms
# Memory Usage: 97.4 MB
from functools import lru_cache
INT_MIN = 0
class Solution:
@lru_cache(maxsize=None)
def subarray_maxsum(self,start,end):
return max(self.A[start:end]) * (end - start)
@lru_cache(maxsize=None)
def msa_helper(self,start,end):
if end > len(self.A):
end = len(self.A)
if self.K >= end - start:
return max(self.A[start:end]) * (end - start)
current_max = INT_MIN
for i in range(start+1, start+self.K+1, 1):
current_max = max(current_max,self.subarray_maxsum(start,i)+self.msa_helper(i,end))
return current_max
def maxSumAfterPartitioning(self, A, K) -> int:
self.A = A
self.K = K
return self.msa_helper(0,len(A))
# 数组边界的思考方式
ans = [
{"A": [1,15,7,9,2,5,10], "K": 3, "Output": 84},
{"A": [9,15,7,1,2,10], "K": 3, "Output": 75},
{"A": [1], "K": 100, "Output": 1}
]
for i in ans:
s = Solution()
r = s.maxSumAfterPartitioning(i["A"],i["K"])
print(r, r == i["Output"]) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/1043_partition-array-for-maximum-sum.py",
"copies": "1",
"size": "1240",
"license": "mit",
"hash": 7676276806878836000,
"line_mean": 27.4418604651,
"line_max": 95,
"alpha_frac": 0.5785597381,
"autogenerated": false,
"ratio": 2.7216035634743876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38001633015743874,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Libao Jin'
__date__ = 'December 13, 2015'
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def add_left_node(self, y):
self.left = y
def add_right_node(self, y):
self.right = y
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
max_depth = 0
if root is None:
max_depth = 0
elif root.left is not None and root.right is not None:
max_depth_left = self.maxDepth(root.left)
max_depth_right = self.maxDepth(root.right)
max_depth = max(max_depth_left, max_depth_right) + 1
elif root.left is None and root.right is not None:
max_depth = self.maxDepth(root.right) + 1
elif root.right is None and root.left is not None:
max_depth = self.maxDepth(root.left) + 1
else:
max_depth = 1
return max_depth
if __name__ == '__main__':
s = Solution()
a = TreeNode(1)
b = TreeNode(2)
c = TreeNode(3)
d = TreeNode(4)
e = TreeNode(5)
f = TreeNode(6)
g = TreeNode(7)
h = TreeNode(8)
i = TreeNode(9)
a.add_left_node(b)
b.add_right_node(e)
e.add_left_node(g)
b.add_left_node(d)
a.add_right_node(c)
c.add_right_node(f)
f.add_left_node(h)
h.add_left_node(i)
max_depth = s.maxDepth(a)
print(max_depth)
| {
"repo_name": "jinlibao/LeetCode-Solutions",
"path": "solutions/104_Maximum_Depth_Binary_Tree.py",
"copies": "2",
"size": "1560",
"license": "mit",
"hash": -3180895947209529000,
"line_mean": 24.5737704918,
"line_max": 64,
"alpha_frac": 0.5493589744,
"autogenerated": false,
"ratio": 3.1075697211155378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4656928695515538,
"avg_score": null,
"num_lines": null
} |
"""105. Construct Binary Tree from Preorder and Inorder Traversal
https://leetcode.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal
Given preorder and inorder traversal of a tree, construct the binary tree.
Note:
You may assume that duplicates do not exist in the tree.
For example, given
preorder = [3,9,20,15,7]
inorder = [9,3,15,20,7]
Return the following binary tree:
3
/ \
9 20
/ \
15 7
"""
from typing import List
from common.tree_node import TreeNode
class Solution:
def build_tree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder:
return None
root = TreeNode(preorder[0])
index = inorder.index(preorder[0])
left_tree_inorder = inorder[:index]
right_tree_inorder = inorder[index + 1:]
left_tree_preorder = preorder[1: index + 1]
right_tree_preorder = preorder[index + 1:]
root.left = self.build_tree(left_tree_preorder, left_tree_inorder)
root.right = self.build_tree(right_tree_preorder, right_tree_inorder)
return root
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/construct_binary_tree_from_preorder_and_inorder_traversal.py",
"copies": "1",
"size": "1114",
"license": "mit",
"hash": 2206194709725937000,
"line_mean": 26.575,
"line_max": 87,
"alpha_frac": 0.6699909338,
"autogenerated": false,
"ratio": 3.089635854341737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42596267881417366,
"avg_score": null,
"num_lines": null
} |
# 107. Binary Tree Level Order Traversal II - LeetCode
# https://leetcode.com/problems/binary-tree-level-order-traversal-ii/description/
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root is None:
return []
else:
res = []
next_level = [root]
while len(next_level) > 0:
current_level = next_level
current_output = []
next_level = []
for i in current_level:
if i:
current_output.append(i.val)
next_level += [ i.left, i.right ]
if len(current_output) > 0:
res.insert(0,current_output)
return res
tn = TreeNode(1)
tn.left = TreeNode(2)
tn.right = TreeNode(3)
tn.left.left = TreeNode(4)
s = Solution()
s.levelOrderBottom(tn) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/107_binary-tree-level-order-traversal-ii.py",
"copies": "1",
"size": "1140",
"license": "mit",
"hash": 4746788746615752000,
"line_mean": 27.525,
"line_max": 81,
"alpha_frac": 0.5096491228,
"autogenerated": false,
"ratio": 3.825503355704698,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4835152478504698,
"avg_score": null,
"num_lines": null
} |
''' 10-angle_to_earth.py
=========================
AIM: Similar to 9-plot_flux.py, but shows angle to Earth limb instead of sl flux
INPUT: files: - <orbit_id>_misc/orbits.dat
- <orbit_id>_flux/flux_*.dat
- resources/moon_*.dat, sun_*.dat, orbits_*.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_figures/maps/ : map with the following name: dist_earth_%07d.png
CMD: python 10-angle_to_earth.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/maps/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: <none>
'''
######################################################################
import numpy as np
import pylab as plt
import time
from resources.constants import *
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import matplotlib.cm as cm
import resources.figures as figures
from matplotlib.ticker import MaxNLocator
#p1 = np.array([-86.67, 36.12])
#p2 = np.array([-118.40, 33.94])
#p1 *= np.pi / 180.
#p2 *= np.pi / 180.
#print vSphericalDistance(p1[0],p1[1],p2[0],p2[1])
######################################################################
# orbit_id
orbit_id = '6am_700_5_conf4e'
# Line of sight (LOS) to Earth's limb angle
sl_angle = 45
file_orbit = 'orbit_%s.dat' % orbit_id
file_sun = 'sun_%s.dat' % orbit_id
minute_ini = 4036
minute_ini = 4036
minute_end = 4136
minute_end = 4136
n_alpha = param.resx
n_delta = param.resy
fancy = False
save = False
######################################################################
# Initialisation
file_flux = 'flux_'
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
folder_figures= '%s_figures/maps/' % (orbit_id)
if fancy:
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino'],'size':14})
rc('text', usetex=True)
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2 + i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2 + i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
distance = np.zeros([n_delta,n_alpha])
ra_grid, dec_grid = np.meshgrid(ras,decs)
angular_diameters={"sun":0.545/180.*np.pi,"moon":0.5685/180.*np.pi} # must be in rad
######################################################################
sys.stdout.write("Loading orbit file...\t\t\t")
sys.stdout.flush()
try:
sat = np.loadtxt('resources/'+file_orbit, delimiter='\t')
except ValueError:
sat = np.loadtxt('resources/'+file_orbit, delimiter=' ')
print "Done."
sys.stdout.write("Loading sun file...\t\t\t")
sys.stdout.flush()
try:
sun = np.loadtxt('resources/'+file_sun, delimiter='\t')
except ValueError:
sun = np.loadtxt('resources/'+file_sun, delimiter=' ')
print "Done."
alphas = right_ascension(sun[:,1],sun[:,2])
deltas = declination(sun[:,1],sun[:,2],sun[:,3])
rs = R_3d(sun[:,1],sun[:,2],sun[:,3])
sun = np.asarray([sun[:,0],alphas,deltas,rs]).T
minute = minute_ini
while ( minute < minute_end+1 ):
sys.stdout.write("Ploting minute "+str(minute)+'...\t\t')
sys.stdout.flush()
ra, dec, S_sl = load_flux_file(minute, file_flux, folder=folder_flux)
id_sat = find_nearest(sat[:,0],minute)
x = -1*sat[id_sat,1]
y = -1*sat[id_sat,2]
z = -1*sat[id_sat,3]
ra_sat = rev( right_ascension(x,y) )
dec_sat= declination(x,y,z)
points = np.empty([np.shape(ra)[0],2])
points[:,0] = vrev(ra)
points[:,1] = dec
r = R_3d(x,y,z)
d_earthsat = R_3d(x,y,z)
psi0 = np.arcsin((atmosphere + R_Earth)/1e5/d_earthsat)
psi = sl_angle / 180. * np.pi
for ii in range(0, n_alpha):
for jj in range(0, n_delta):
distance[jj, ii] = vSphericalDistance(ra_sat,dec_sat,ras[ii],decs[jj])
alpha= np.arcsin((R_Earth+atmosphere)/r/1e5)
beta = alpha#np.pi/2. - alpha
gamma= np.abs(dec_sat) + beta
distance = (distance-beta)*180/np.pi
distance_t = (vSphericalDistance(ra_sat,dec_sat,ra,dec) - beta)*180/np.pi
LIMIT_e = limit_Earth(gamma,ra_sat,dec_sat)
beta = np.pi/2. - alpha + psi
gamma= np.abs(dec_sat) + beta
LIMIT = limit_Earth(gamma,ra_sat,dec_sat)
################### SUN
coord_body = sun[minute]
corr = angular_diameters['sun']
distance_sun = np.zeros_like(distance)
for ii in range(0, n_alpha):
for jj in range(0, n_delta):
distance_sun[jj, ii] = np.abs(vSphericalDistance(coord_body[1],coord_body[2],ras[ii],decs[jj])-corr)
id_bad = np.rad2deg(distance_sun) < 120
id_good = np.rad2deg(distance_sun) > 120
distance_sun[id_bad] = 1
distance_sun[id_good] = np.nan
#####################
plt.figure()#dpi=param.dpi)
ax = plt.subplot(111)
v = np.linspace(0, 180,10, endpoint=True)
CS=plt.contour(ra_grid*RAD,dec_grid*RAD,distance,v,colors='k')
plt.contourf(ra_grid*RAD,dec_grid*RAD,distance,v,cmap=cm.RdYlGn)
plt.clabel(CS, inline=1, fontsize=10)
cbar = plt.colorbar()
cbar.set_label(r'$\theta\ \mathrm{Angular}\ \mathrm{distance}\ \mathrm{to}\ \mathrm{limb}\ \mathrm{[deg]}$')
plt.scatter(LIMIT_e[:,0]*RAD,(LIMIT_e[:,1])*RAD,color="blue", s=8, edgecolor='none')
plt.scatter(LIMIT[:,0]*RAD,(LIMIT[:,1])*RAD,color="red", s=8, edgecolor='none')
plt.scatter(ra*RAD,dec*RAD,c='k')
plt.scatter(ra_sat*RAD,dec_sat*RAD,s=80)
plt.grid(True)
plt.xlabel(r'$\alpha$')
plt.ylabel(r'$\delta$')
plt.xlim([0, 360])
plt.ylim([-90,90])
CSun = plt.contourf(ra_grid*RAD,dec_grid*RAD,distance_sun)
plt.gca().xaxis.set_major_locator( MaxNLocator(nbins = 13) )
plt.gca().yaxis.set_major_locator( MaxNLocator(nbins = 7) )
current_date = minute * 60. + timestamp_2018_01_01
current_date = figures.format_second(time.gmtime(current_date))
plt.text(0.01, 0.96,current_date, transform = ax.transAxes)
plt.show()
if save:
fname = '%sdist_earthlimb_%07d' % (folder_figures, minute)
plt.savefig(fname+'.png', dpi=param.dpi)
if (fancy):
plt.savefig(fname+'.eps')
os.system("epstopdf "+fname+".eps")
os.system('pdfcrop '+fname+'.pdf')
os.system('mv '+fname+'-crop.pdf '+fname+'.pdf')
os.system('pdftocairo -png '+fname+'.pdf'+' '+fname)
minute += 1
plt.close()
del LIMIT, ra, dec, S_sl, points, LIMIT_e
print 'Done.'
print 'finished', minute, minute_end
| {
"repo_name": "kuntzer/SALSA-public",
"path": "10_angle_to_earth.py",
"copies": "1",
"size": "6383",
"license": "bsd-3-clause",
"hash": -4703785933075812000,
"line_mean": 27.2433628319,
"line_max": 109,
"alpha_frac": 0.6313645621,
"autogenerated": false,
"ratio": 2.5915550142103125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3722919576310313,
"avg_score": null,
"num_lines": null
} |
"""10 book samples."""
from store.book.models import Book
b1 = Book('9780439708180', "Harry Potter and the Philosopher's Stone",
"J. K. Rowling", "Scholastic", 1999, 10, 6.79, "paperback", "fantasy", "fantasy")
b2 = Book('9780439064873', "Harry Potter And The Chamber Of Secrets",
"J. K. Rowling", "Scholastic", 2000, 8, 6.70, "paperback", "fantasy", "fantasy")
b3 = Book('9780439136358', "Harry Potter And The Prisoner Of Azkaban",
"J. K. Rowling", "Scholastic", 1999, 11, 15.24, "hardcover", "fantasy", "fantasy")
b4 = Book('9780439139595', "Harry Potter And The Goblet Of Fire",
"J. K. Rowling", "Scholastic", 2000, 9, 18.28, "hardcover", "fantasy", "fantasy")
b5 = Book('9780439358071', "Harry Potter And The Order Of The Phoenix",
"J. K. Rowling", "Scholastic", 2004, 10, 7.86, "paperback", "fantasy", "fantasy")
b6 = Book('9780439784542', "Harry Potter and the Half-Blood Prince",
"J. K. Rowling", "Scholastic", 2005, 5, 16.94, "hardcover", "fantasy", "fantasy")
b7 = Book('9780545139700', "Harry Potter and the Deathly Hallows",
"J. K. Rowling", "Scholastic", 2007, 4, 9.14, "paperback", "fantasy", "fantasy")
b8 = Book('9780345803481', "Fifty Shades of Grey: Book One of the Fifty Shades Trilogy (Fifty Shades of Grey Series)",
"E L James", "Vintage Books", 2012, 7, 9.99, "paperback", "romance", "romance")
b9 = Book('9780345803498', "Fifty Shades Darker",
"E L James", "Vintage Books", 2012, 7, 10.99, "paperback", "romance", "romance")
b10 = Book('9780345803504', "Fifty Shades Freed: Book Three of the Fifty Shades Trilogy (Fifty Shades of Grey Series)",
"E L James", "Vintage Books", 2012, 7, 9.59, "paperback", "romance", "romance")
sample_list = []
sample_list.append(b1)
sample_list.append(b2)
sample_list.append(b3)
sample_list.append(b4)
sample_list.append(b5)
sample_list.append(b6)
sample_list.append(b7)
sample_list.append(b8)
sample_list.append(b9)
sample_list.append(b10)
sample_list.append(Book("9780199997329", "Classical Mythology", "Mark P. O. Morford", "Oxford University Press", 2002, 80, 7.5, "paperback", "history", "history"))
sample_list.append(Book("9781891389221", "Classical Mechanical", "John R. Taylor", "University Science Books", 2005, 70, 20.99, "hardcover", "academic", "academic"))
sample_list.append(Book("9781107179868", "Introduction to Quantum Mechanics", "David J. Griffiths", "Cambridge University Press", 2016, 55, 25.99, "paperback", "physics", "academic"))
sample_list.append(Book("9781420946338", "Relativity: The Special and General Theory", "Albert Eistein", "Digireads.com", 2012, 29, 17.95, "paperback", "physics", "academic"))
sample_list.append(Book("9781501127625", "Steve Jobs", "Walter Isaacson", "Simon & Schuster", 2015, 33, 12.9, "paperback", "biography", "biography"))
sample_list.append(Book("9789814385282", "The Wit and Wisdom of Lee Kuan Yew", "Kuan Yew Lee", "Didier Millet", 2013, 100, 19.9, "hardcover", "biography", "biography"))
sample_list.append(Book("9781537736495", "Bill Gates: The Life and Business Lessons of Bill Gates", "George llian", "CreateSpace Independent Publishing Platform", 2016, 89, 11.95, "paperback", "biography", "biography"))
sample_list.append(Book("9780374533557", "Thinking, Fast and Slow", "Daniel Kahneman", "Farrar, Straus and Giroux", 2013, 46, 9.99, "paperback", "selfhelp", "selfhelp"))
sample_list.append(Book("9780812981605", "The Power of Habit: Why We Do What We Do in Life and Business", "Charles Duhigg", "Random House Trade Paperbacks", 2014, 66, 6.99, "paperback", "selfhelp", "selfhelp"))
sample_list.append(Book("9780316017930", "Outliers: The Story of Success", "Malcolm Gladwell", "Back Bay Books", 2011, 33, 10.95, "hardcover", "selfhelp", "selfhelp"))
sample_list.append(Book("9783442217786", "Rich Dad, Poor Dad", "Sharon L. Letcher Robert T. Kiyosaki", "Goldmann TB", 2007, 50, 5.95, "paperback", "selfhelp", "selfhelp"))
sample_list.append(Book("9780060506070", "Clara Callan", "Richard Bruce Wright", "Harper Perennial", 2003, 77, 15.0, "hardcover", "non-fiction", "non-fiction"))
sample_list.append(Book("9781517309329", "Improve Your Social Skills", "Daniel Wendler", "CreateSpace Independent Publishing Platform", 2014, 22, 13.95, "hardcover", "selfhelp", "selfhelp"))
sample_list.append(Book("9781568522609", "Decision in Normandy", "Carlo D'Este", "Konecky & Konecky Military Books", 2000, 172, 6.9, "paperback", "history", "history"))
sample_list.append(Book("9780439023528", "The Hunger Games", "Suzanne Collins", "Scholastic Press", 2010, 60, 11.9, "paperback", "fantasy", "fantasy"))
sample_list.append(Book("9780545586177", "Catching Fire", "Suzanne Collins", "Scholastic Press", 2013, 23, 11.95, "hardcover", "fantasy", "fantasy"))
sample_list.append(Book("9780545663267", "MockingJay", "Suzanne Collins", "Scholastic Press", 2014, 44, 13.7, "paperback", "fantasy", "fantasy"))
sample_list.append(Book("9780062387240", "Divergent", "Veronica Roth", "Katherine Tegen Books", 2014, 12, 7.99, "paperback", "fantasy", "fantasy"))
sample_list.append(Book("9780062024053", "Insurgent", "Veronica Roth", "Katherine Tegen Books", 2015, 30, 18.99, "hardcover", "fantasy", "fantasy"))
sample_list.append(Book("9780062024077", "Allegiant", "Veronica Roth", "Katherine Tegen Books", 2016, 22, 12.99, "paperback", "fantasy", "fantasy"))
| {
"repo_name": "tankca/store",
"path": "store/dummy_data/books.py",
"copies": "4",
"size": "5374",
"license": "bsd-3-clause",
"hash": 3547542480651429400,
"line_mean": 80.4242424242,
"line_max": 219,
"alpha_frac": 0.6950130257,
"autogenerated": false,
"ratio": 2.620185275475378,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.5315198301175378,
"avg_score": null,
"num_lines": null
} |
# 10_email_notifier.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
# Based on Recipe 7.15 in The Raspberry Pi Cookbook by Simon Monk.
import RPi.GPIO as GPIO
import smtplib, time # smtp - Simple Mail Transport Protocol - library for sending email
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT = 587
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
red_pin = 18
green_pin = 23
switch_pin = 24
GPIO.setup(red_pin, GPIO.OUT)
GPIO.setup(green_pin, GPIO.OUT)
GPIO.setup(switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Use the smtp library to send an email
def send_email(username, password, recipient, subject, text):
print(username, password, recipient, subject, text)
smtpserver = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(username, password)
header = 'To:' + recipient + '\n' + 'From: ' + username
header = header + '\n' + 'Subject:' + subject + '\n'
msg = header + '\n' + text + ' \n\n'
smtpserver.sendmail(username, recipient, msg)
smtpserver.close()
# Utility function to turn the gree LED on and the red off
def green():
GPIO.output(green_pin, True)
GPIO.output(red_pin, False)
# Utility function to turn the red LED on and the green off
def red():
GPIO.output(green_pin, False)
GPIO.output(red_pin, True)
# Prompt the user to enter their email details
# Should only have to do this once each time the program is started
username = raw_input("Sending gmail address? ")
password = raw_input("Sending gmail password? ")
recipient = raw_input("Send email to? ")
subject = raw_input("Subject? ")
message = raw_input("Message ? ")
print("Press the button to send the Email")
while True:
green() # green LED on
if GPIO.input(switch_pin) == False: # button pressed - led red
red()
send_email(username, password, recipient, subject, message)
time.sleep(3)
print("Press the button to send the Email")
| {
"repo_name": "simonmonk/pi_starter_kit",
"path": "10_mail.py",
"copies": "1",
"size": "2127",
"license": "mit",
"hash": 4129862322390031400,
"line_mean": 33.3225806452,
"line_max": 99,
"alpha_frac": 0.6779501646,
"autogenerated": false,
"ratio": 3.4141252006420544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45920753652420543,
"avg_score": null,
"num_lines": null
} |
#"10 Minutes to pandas" tutorial
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class ObjectCreator:
"""Object creation demo"""
def __init__(self):
self.data = []
def createSeries(self):
s = pd.Series( [1,3,5,np.nan,6,8] );
return s
def createDataFrame(self):
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=list('ABCD'))
return df
def createDataFrameFromDict(self):
data = {
'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1, index=list(range(4)), dtype='float32'),
'D' : np.array([3] * 4, dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' }
df = pd.DataFrame(data);
return df
class Viewer:
"""Example of DataFrame data viewer methods"""
def __init__(self, df=None):
if df is None:
creator = ObjectCreator()
df = creator.createDataFrame();
self.df = df
def all(self):
"""View all data"""
print "All data: df"
print self.df
def head(self, lineCount=None):
print "Head: df.head({})".format(lineCount)
print self.df.head(lineCount)
def tail(self, lineCount=None):
print "Tail: df.tail({})".format(lineCount)
print self.df.tail(lineCount)
def getIndex(self, lineCount=None):
print "Data index: df.index"
print self.df.index
def getColumns(self, lineCount=None):
print "Data columns: df.columns"
print self.df.columns
def getValues(self, lineCount=None):
print "Data values: df.values"
print self.df.values
def describe(self, lineCount=None):
print "Describe data: df.describe()"
print self.df.describe()
| {
"repo_name": "mattmcd/PyAnalysis",
"path": "mda/tutorial/pandastut.py",
"copies": "1",
"size": "1920",
"license": "apache-2.0",
"hash": 9077875458698445000,
"line_mean": 27.6567164179,
"line_max": 80,
"alpha_frac": 0.5625,
"autogenerated": false,
"ratio": 3.699421965317919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4761921965317919,
"avg_score": null,
"num_lines": null
} |
"""10. Regular Expression Matching
https://leetcode.com/problems/regular-expression-matching/description/
Given an input string (s) and a pattern (p), implement regular expression
matching with support for '.' and '*'.
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
Note:
s could be empty and contains only lowercase letters a-z.
p could be empty and contains only lowercase letters a-z, and characters like
. or *.
Example 1:
Input:
s = "aa"
p = "a"
Output: false
Explanation: "a" does not match the entire string "aa".
Example 2:
Input:
s = "aa"
p = "a*"
Output: true
Explanation: '*' means zero or more of the precedeng element, 'a'. Therefore,
by repeating 'a' once, it becomes "aa".
Example 3:
Input:
s = "ab"
p = ".*"
Output: true
Explanation: ".*" means "zero or more (*) of any character (.)".
Example 4:
Input:
s = "aab"
p = "c*a*b"
Output: true
Explanation: c can be repeated 0 times, a can be repeated 1 time. Therefore
it matches "aab".
Example 5:
Input:
s = "mississippi"
p = "mis*is*p*."
Output: false
"""
class Solution:
def is_match(self, s: str, p: str) -> bool:
s_len, p_len = len(s), len(p)
if p_len == 0:
return s_len == 0
if p_len == 1:
if p[0] == ".":
return s_len == 1
return s == p
if p[1] == "*":
if self.is_match(s, p[2:]):
return True
if s_len > 0 and (p[0] == "." or s[0] == p[0]):
return self.is_match(s[1:], p)
return False
else:
if s_len > 0 and (p[0] == "." or s[0] == p[0]):
return self.is_match(s[1:], p[1:])
return False
| {
"repo_name": "isudox/leetcode-solution",
"path": "python-algorithm/leetcode/regular_expression_matching.py",
"copies": "1",
"size": "1801",
"license": "mit",
"hash": 2136845455099409200,
"line_mean": 18.2903225806,
"line_max": 77,
"alpha_frac": 0.5735785953,
"autogenerated": false,
"ratio": 3.130890052356021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9204468647656021,
"avg_score": 0,
"num_lines": 93
} |
# 10-regular-expression-matching.py
class Solution(object):
def isMatch_tle(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
# Correct, but TLE
# Same method ACed in C/C++, I've tried eliminate function call,
# which saved around 1.5s
ls, lp = len(s), len(p)
def iter(si, pi):
while si < ls and pi < lp:
if pi == lp - 1 or p[pi+1] != '*':
if p[pi] == '*' or s[si] == p[pi]:
si, pi = si+1, pi+1
else:
return False
else:
if p[pi] == '*' or s[si] == p[pi]:
return iter(si, pi+2) or iter(si+1, pi)
else:
pi += 2
while pi+1 < lp and p[pi+1] == '*': pi += 2
return si == ls and pi >= lp
return iter(0, 0)
def isMatch(self, s, p):
# table[p][s]
# table[0][0]: two empty string
# table[1][1]: is p[0] matching s[0]
table = [[False] * (len(s) + 1) for _ in range(len(p) + 1)]
table[0][0] = True
ls, lp = len(s), len(p)
for i in range(2, lp+1):
if p[i-1] == '*': table[i][0] = table[i-2][0]
for pi in range(1, lp+1):
for si in range(1, ls+1):
if p[pi-1] != '*':
table[pi][si] = table[pi-1][si-1] and \
(p[pi-1] == s[si-1] or p[pi-1] == '.')
else:
table[pi][si] = table[pi-1][si] or table[pi-2][si]
if p[pi-2] == s[si-1] or p[pi-2] == '.':
table[pi][si] |= table[pi][si-1]
return table[-1][-1]
s = Solution()
print(s.isMatch("abc", "abc"))
print(s.isMatch("abc", "ab*bc"))
print(s.isMatch("abbc", "ab*bc"))
print(s.isMatch("aaaaaaaaaaaaac", "a*a*a*a*a*a*a*a*a*a*a*b"))
| {
"repo_name": "daicang/Leetcode-solutions",
"path": "010-regular-expression-matching.py",
"copies": "1",
"size": "2047",
"license": "mit",
"hash": 6385574247902885000,
"line_mean": 30.4923076923,
"line_max": 74,
"alpha_frac": 0.3805569126,
"autogenerated": false,
"ratio": 3.2338072669826223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9056850730756176,
"avg_score": 0.011502689765289235,
"num_lines": 65
} |
# 10_RGB_LED.py
from tkinter import *
import RPi.GPIO as GPIO
import time
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
# Start Pulse Width Modulation (PWM) on the red, green and blue channels
pwmRed = GPIO.PWM(18, 500)
pwmRed.start(100)
pwmGreen = GPIO.PWM(23, 500)
pwmGreen.start(100)
pwmBlue = GPIO.PWM(24, 500)
pwmBlue.start(100)
# group together all of the GUI code into a class called App
class App:
# this function gets called when the app is created
def __init__(self, master):
# A frame holds the various GUI controls
frame = Frame(master)
frame.pack()
# Create the labels and position them in a grid layout
Label(frame, text='Red').grid(row=0, column=0)
Label(frame, text='Green').grid(row=1, column=0)
Label(frame, text='Blue').grid(row=2, column=0)
# Create the sliders and position them in a grid layout
# the 'command' attribute specifys a method to call when
# a slider is moved
scaleRed = Scale(frame, from_=0, to=100,
orient=HORIZONTAL, command=self.updateRed)
scaleRed.grid(row=0, column=1)
scaleGreen = Scale(frame, from_=0, to=100,
orient=HORIZONTAL, command=self.updateGreen)
scaleGreen.grid(row=1, column=1)
scaleBlue = Scale(frame, from_=0, to=100,
orient=HORIZONTAL, command=self.updateBlue)
scaleBlue.grid(row=2, column=1)
# These methods called whenever a slider moves
def updateRed(self, duty):
# change the led brightness to match the slider
pwmRed.ChangeDutyCycle(float(duty))
def updateGreen(self, duty):
pwmGreen.ChangeDutyCycle(float(duty))
def updateBlue(self, duty):
pwmBlue.ChangeDutyCycle(float(duty))
# Set the GUI running, give the window a title, size and position
root = Tk()
root.wm_title('RGB LED Control')
app = App(root)
root.geometry("200x150+0+0")
try:
root.mainloop()
finally:
print("Cleaning up")
GPIO.cleanup()
| {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "10_RGB_LED.py",
"copies": "1",
"size": "2182",
"license": "mit",
"hash": 8000308267194469000,
"line_mean": 30.1714285714,
"line_max": 85,
"alpha_frac": 0.6539871677,
"autogenerated": false,
"ratio": 3.398753894080997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9487808580054495,
"avg_score": 0.012986496345300436,
"num_lines": 70
} |
# 1113, 15 Jan 2018 (NZDT)
# 1305, 9 Jan 2018 (NZDT)
#
# Nevil Brownlee, U Auckland
# imported as wp
'''
Elements not allowed in SVG 1.2:
https://www.ruby-forum.com/topic/144684
http://inkscape.13.x6.nabble.com/SVG-Tiny-td2881845.html
marker
clipPath
style properties come from the CSS, they are allowed in Tiny 1.2
DrawBerry produces attributes with inkscape:,
'http://www.w3.org/XML/1998/namespace', # wordle uses this
e.g. inkscape:label and inkscape:groupmode
DrawBerry and Inkscape seem to use this for layers; don't use it!
XML NameSpaces. Specified by xmlns attribute,
e.g. xmlns:inkscape="http://inkscape..." specifies inkscape elements
such elements are prefixed by the namespace identifier,
e.g. inkscape:label="Background" inkscape:groupmode="layer"
Attributes in elements{} 'bottom lines' added during chek.py testing
'''
elements = {
'svg': ('version', 'baseProfile', 'width', 'viewBox',
'preserveAspectRatio', 'snapshotTime',
'height', 'id', 'role',
'color-rendering', 'fill-rule', '<tbreak>'),
'desc': ( 'id', 'role',
'shape-rendering', 'text-rendering', 'buffered-rendering',
'visibility', '<tbreak>'),
'title': ( 'id', 'role',
'shape-rendering', 'text-rendering', 'buffered-rendering',
'visibility', '<tbreak>'),
'path': ('d', 'pathLength', 'stroke-miterlimit',
'id', 'role', 'fill', 'style', 'transform',
'font-size',
'fill-rule', '<tbreak>'),
'rect': ('x', 'y', 'width', 'height', 'rx', 'ry',
'stroke-miterlimit',
'id', 'role', 'fill', 'style','transform',
'fill-rule', '<tbreak>'),
'circle': ('cx', 'cy', 'r',
'id', 'role', 'fill', 'style', 'transform',
'fill-rule', '<tbreak>'),
'line': ('x1', 'y1', 'x2', 'y2',
'id', 'role', 'fill', 'transform',
'fill-rule', '<tbreak>'),
'ellipse': ('cx', 'cy', 'rx', 'ry',
'id', 'role', 'fill', 'style', 'transform',
'fill-rule', '<tbreak>'),
'polyline': ('points',
'id', 'role', 'fill', 'transform',
'fill-rule', '<tbreak>'),
'polygon': ('points',
'id', 'role', 'fill', 'style', 'transform',
'fill-rule', '<tbreak>'),
'solidColor': ( 'id', 'role', 'fill',
'fill-rule', '<tbreak>'),
'textArea': ('x', 'y', 'width', 'height', 'auto',
'id', 'role', 'fill', 'transform',
'fill-rule', '<tbreak>'),
'text': ('x', 'y', 'rotate', 'space',
'id', 'role', 'fill', 'style', 'transform',
'font-size',
'fill-rule', '<tbreak>'),
'g': ( 'label', 'class',
'id', 'role', 'fill', 'style', 'transform',
'fill-rule', 'visibility', '<tbreak>'),
'defs': ( 'id', 'role', 'fill',
'fill-rule', '<tbreak>'),
'use': ('x', 'y', 'href',
'id', 'role', 'fill', 'transform',
'fill-rule', '<tbreak>'),
'a': ( 'id', 'role', 'fill', 'transform', # Linking
'fill-rule', '<tbreak>'),
'tspan': ('x', 'y', 'id', 'role', 'fill',
'fill-rule', '<tbreak>'),
# 'linearGradient': ('gradientUnits', 'x1', 'y1', 'x2', 'y2',
# 'id', 'role', 'fill',
# '<tbreak>'),
# 'radialGradient': ('gradientUnits', 'cx', 'cy', 'r',
# 'id', 'role', 'fill',
# '<tbreak>'),
# 'stop': ( 'id', 'role', 'fill', # Gradients
# 'fill-rule', '<tbreak>'),
}
# Elements have a list of attributes (above),
# we need to know what attributes each can have ...
# Properties capture CSS info, they have lists of allowable values.
# Attributes have allowed values too;
# we also need to know which elements they're allowed in.
# if string or xsd:string is allowed, we don't check,
# but the 'syntax' values are shown as a comment below
properties = { # Attributes allowed in elements
'about': (), # Allowed values for element attributes,
'base': (), # including those listed in <tbreak>
'baseProfile': (),
'd': (),
'break': (),
'class': (),
'content': (),
'cx': ('<number>'),
'cy': ('<number>'),
'datatype': (),
'height': ('<number>'),
'href': (),
'id': (),
'label': (),
'lang': (),
'pathLength': (),
'points': (),
'preserveAspectRatio': (),
'property': (),
'r': ('<number>'),
'rel': (),
'resource': (),
'rev': (),
'role': (),
'rotate': (),
'rx': ('<number>'),
'ry': ('<number>'),
'space': (),
'snapshotTime': (),
'transform': (),
'typeof': (),
'version': (),
'width': ('<number>'),
'viewBox': ('<number>'),
'x': ('<number>'),
'x1': ('<number>'),
'x2': ('<number>'),
'y': ('<number>'),
'y1': ('<number>'),
'y2': ('<number>'),
'stroke': ('<paint>', 'none'), # Change from I-D
'stroke-width': (), # 'inherit'
'stroke-linecap': ('butt', 'round', 'square', 'inherit'),
'stroke-linejoin': ('miter', 'round', 'bevel', 'inherit'),
'stroke-miterlimit': (), # 'inherit'
'stroke-dasharray': (), # 'inherit', 'none'
'stroke-dashoffset': (), # 'inherit'
'stroke-opacity': (), # 'inherit'
'vector-effect': ('non-scaling-stroke', 'none', 'inherit'),
'viewport-fill': ('none', 'currentColor', '<color>'),
'display': ('inline', 'block', 'list-item', 'run-in', 'compact',
'marker', 'table', 'inline-table', 'table-row-group',
'table-header-group', 'table-footer-group',
'table-row,' 'table-column-group',
'table-column', 'table-cell', 'table-caption',
'none'),
'viewport-fill-opacity': (), # "inherit"
'visibility': ('visible', 'hidden', 'collapse', 'inherit'),
'image-rendering': ('auto', 'optimizeSpeed', 'optimizeQuality', 'inherit'),
'color-rendering': ('auto', 'optimizeSpeed', 'optimizeQuality', 'inherit'),
'shape-rendering': ('auto', 'optimizeSpeed', 'crispEdges',
'geometricPrecision', 'inherit'),
'text-rendering': ('auto', 'optimizeSpeed', 'optimizeLegibility',
'geometricPrecision', 'inherit'),
'buffered-rendering': ('auto', 'dynamic', 'static', 'inherit'),
'solid-opacity': (), # 'inherit'
'solid-color': ('currentColor', '<color>'),
'color': ('currentColor', '<color>'),
'stop-color': ('currentColor', '<color>'),
'stop-opacity': (), # 'inherit'
'line-increment': (''), # 'auto', 'inherit'
'text-align': ('start', 'end', 'center', 'inherit'),
'display-align': ('auto', 'before', 'center', 'after', 'inherit'),
'font-size': (), # 'inherit'
'font-family': ('serif', 'sans-serif', 'monospace', 'inherit'),
'font-weight': ('normal', 'bold', 'bolder', 'lighter',
'<hundreds>', 'inherit'),
'font-style': ('normal', 'italic', 'oblique', 'inherit'),
'font-variant': ('normal', 'small-caps', 'inherit'),
'direction': ('ltr', 'rtl', 'inherit'),
'unicode-bidi': ('normal', 'embed', 'bidi-override', 'inherit'),
'text-anchor': ('start', 'middle', 'end', 'inherit'),
'fill': ('none', '<color>'), # # = RGB val
'fill-rule': ('nonzero', 'evenodd', 'inherit'),
'fill-opacity': (), # 'inherit'
'height': ('<number>'),
'style': () #'[style]'), # Check properties in [style]
# Not needed Jan 2018 versionq
}
basic_types = { # Lists of allowed values
'<color>': ('black', 'white', '#000000', '#ffffff', '#FFFFFF'),
# 'grey', 'darkgrey', 'dimgrey', 'lightgrey',
# 'gray', 'darkgray', 'dimgray', 'lightgray',
# '#808080', '#A9A9A9', '#696969', '#D3D3D3', ,
'<paint>': ('<color>', 'none', 'currentColor', 'inherit'),
# attributes allowed in the rnc. We check their names, but not their values
'<tbreak>': ('id', 'base', 'lang', 'class', 'rel', 'rev', 'typeof',
'content', 'datatype', 'resource', 'about',
'property', 'space', 'fill-rule'),
'<number>': ('+g'),
'<hundreds>': ('+h') ,
}
color_default = 'black'
#property_lists = { # Lists of properties to check (for Inkscape)
# Not needed Jan 2018 versionq
# '[style]': ('font-family', 'font-weight', 'font-style',
# 'font-variant', 'direction', 'unicode-bidi', 'text-anchor',
# 'fill', 'fill-rule'),
# }
# Elements allowed within other elements
svg_child = ('title', 'path', 'rect', 'circle', 'line', 'ellipse',
'polyline', 'polygon', 'solidColor', 'textArea',
'text', 'g', 'defs', 'use', 'a', 'tspan')
# 'stop', 'linearGradient', 'radialGradient'
text_child = ('desc', 'title', 'tspan', 'text', 'a')
element_children = { # Elements allowed within other elements
'svg': svg_child,
'desc': ('text'),
'title': ('text'),
'path': ('title'),
'rect': ('title'),
'circle': ('title'),
'line': ('title'),
'ellipse': ('title'),
'polyline': ('title'),
'polygon': ('title'),
'solidColor': ('title'),
'textArea': text_child,
'text': text_child,
'g': svg_child,
'defs': svg_child,
'use': ('title'),
'a': svg_child,
'tspan': text_child,
# 'linearGradient': ('title'),
# 'radialGradient': ('title'),
# 'stop': ('title'),
}
xmlns_urls = ( # Whitelist of allowed URLs
'http://www.w3.org/2000/svg', # Base namespace for SVG
'http://www.w3.org/1999/xlink', # svgwrite uses this
'http://www.w3.org/XML/1998/namespace', # imagebot uses this
)
| {
"repo_name": "nevil-brownlee/check_svg",
"path": "word_properties.py",
"copies": "1",
"size": "11930",
"license": "mit",
"hash": 5419376553509860000,
"line_mean": 44.0188679245,
"line_max": 85,
"alpha_frac": 0.4177703269,
"autogenerated": false,
"ratio": 3.95163961576681,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48694099426668097,
"avg_score": null,
"num_lines": null
} |
# 1.1.13 Write a code fragment to print the transposition
# (rows and columns changed) of a two-dimensional array with
# M rows and N columns
def transpose(matrix):
'''
Transposes a 2-d array of lists
INPUT: 2-d list of any values
RETURNS: Transposed 2-d list of values
'''
num_rows = len(matrix)
num_cols = len(matrix[0])
output_matrix = list()
for j in range(num_cols):
new_row = list()
for i in range(num_rows):
new_row.append(matrix[i][j])
output_matrix.append(new_row)
return output_matrix
if __name__ == '__main__':
# Create some test matrices to print out
test_matrix_2x2 = [ [1, 2], [3, 4]]
test_matrix_3x3 = [ [1, 2, 3 ],
[4, 5 ,6],
[7, 8, 9 ]]
test_matrix_4x4 = [ [1 , 2, 3 , 4],
[5, 6 , 7, 8 ],
[9 , 10, 11, 12],
[13, 14, 15, 16 ]]
print(transpose(test_matrix_2x2))
print(transpose(test_matrix_3x3))
print(transpose(test_matrix_4x4))
| {
"repo_name": "timgasser/algorithms_4ed",
"path": "ch1_fundamentals/ex1.1.13.py",
"copies": "1",
"size": "1124",
"license": "mit",
"hash": 6568593141977688000,
"line_mean": 25.1395348837,
"line_max": 61,
"alpha_frac": 0.4973309609,
"autogenerated": false,
"ratio": 3.4164133738601823,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9097777279955301,
"avg_score": 0.06319341096097592,
"num_lines": 43
} |
""" 11 150 10 0.01 0.01 0.01
12 50 30 0.1 0.05 0.5
13 100 20 0.01 0.05 0.05
14 400 10 0.005 0.05 0.05
15 100 20 0.01 0.05 0.05
16 100 20 0.01 0.05 0.05
17 100 20 0.01 0.05 0.05
18 100 20 0.01 0.05 0.05
19 100 20 0.01 0.05 0.05
20 100 20 0.01 0.05 0.05
"""
from lib.niching_func import niching_func
from lib.count_goptima import count_goptima
from scipy.optimize import minimize
from bokeh.plotting import *
from scipy.spatial.distance import euclidean, pdist, cdist, squareform
import numpy as np
from deap import base
from deap import creator
from deap import tools
from deap import cma, algorithms
def plot_graph1D(xs,ys):
# plot them
x = np.arange(f.lb,f.ub,0.01)
y = [f(xi) for xi in x]
p = figure(plot_width=1024, plot_height=1024)
hold(True)
line(x,y)
circle(xs[:,0],ys, size=np.ones(ys.shape)*15, color="green")
for xi, yi in zip(xs[:,0],ys):
idx = np.argsort( (xi-xs[:,0])**2 )[1]
print xi, xs[idx,0]
line([xi,xs[idx,0]],[yi,ys[idx]], color="green", line_width=3)
p.ygrid.grid_line_color = "white"
p.ygrid.grid_line_width = 2
p.xgrid.grid_line_color = "white"
p.xgrid.grid_line_width = 2
p.axis.major_label_text_font_size = "18pt"
p.axis.major_label_text_font_style = "bold"
show()
def plot_graph2D(Xs,Ys):
# plot them
p = figure(x_range=[f.lb[0],f.ub[0]],y_range=[f.lb[1],f.ub[1]],plot_width=1024, plot_height=1024)
hold(True)
step = 200
x = np.linspace(f.lb[0],f.ub[0],step)
y = np.linspace(f.lb[1],f.ub[1],step)
X, Y = np.meshgrid(x,y)
Z = [[f(np.array([x,y])) for x,y in zip(xl,yl)] for xl,yl in zip(X,Y)]
image(
image=[Z], x=[f.lb[0]], y=[f.lb[1]], dw=[f.ub[0]-f.lb[0]],
dh=[f.ub[1]-f.lb[1]], palette="Spectral11")
X1 = Xs[:,0]
X2 = Xs[:,1]
#Y = [y-Ys.min()+3 for y in Ys]
Y = [15 for y in Ys]
circle(X1,X2, size=Y, color="green")
tabu = []
for i,xi in enumerate(Xs):
idx = np.argsort( ((xi-Xs)**2).sum(axis=1) )[1]
tabu.append(idx)
line([xi[0],Xs[idx,0]],[xi[1],Xs[idx,1]], color="green", line_width=3)
p.ygrid.grid_line_color = "white"
p.ygrid.grid_line_width = 2
p.xgrid.grid_line_color = "white"
p.xgrid.grid_line_width = 2
p.axis.major_label_text_font_size = "18pt"
p.axis.major_label_text_font_style = "bold"
show()
def tupleize(func):
"""A decorator that tuple-ize the result of a function. This is useful
when the evaluation function returns a single value.
"""
def wrapper(*args, **kargs):
return func(*args, **kargs),
return wrapper
def checkBounds(lb, ub):
def decorator(func):
def wrapper(*args, **kargs):
offspring = func(*args, **kargs)
for child in offspring:
for i in xrange(len(child)):
if child[i] > ub[i]:
child[i] = ub[i]
elif child[i] < lb[i]:
child[i] = lb[i]
return offspring
return wrapper
return decorator
def CMAOpt(X,Y, Adj):
Xopt = np.zeros(X.shape)
Yopt = np.zeros(Y.shape)
fopt = lambda x: -f(x)
nevals = X.shape[0]*50*50 #10*X.shape[1]
for i in range(X.shape[0]):
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", np.ndarray, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("evaluate", f)
toolbox.decorate("evaluate", tupleize)
neigh = np.where(Adj[i,:])[0]
if neigh.shape[0] > 2:
sigma = 2.0*((X[i]-X[neigh])**2).max()
else:
sigma = 0.2
strategy = cma.Strategy(centroid=X[i], sigma=sigma, lambda_=50)#10*X.shape[1])
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
toolbox.decorate("generate", checkBounds(f.lb, f.ub))
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("max", np.max)
hof = tools.HallOfFame(1, similar=np.array_equal)
try:
algorithms.eaGenerateUpdate(toolbox, ngen=100, stats=stats, halloffame=hof, verbose=False)
#algorithms.eaGenerateUpdate(toolbox, ngen=50, stats=stats, halloffame=hof, verbose=False)
#algorithms.eaGenerateUpdate(toolbox, ngen=50, stats=stats, halloffame=hof, verbose=False)
Xopt[i,:] = hof[0]
Yopt[i] = f(hof[0])
except:
Xopt[i,:] = X[i,:]
Yopt[i] = Y[i]
return Xopt,Yopt, nevals
def SciOpt(X,Y):
Xopt = np.zeros(X.shape)
Yopt = np.zeros(Y.shape)
fopt = lambda x: -f(x)
nevals = 0
for i in range(X.shape[0]):
x = np.copy(X[i,:])
y = np.copy(Y[i])
dim = x.shape[0]
xstar = minimize(fopt,x,bounds=zip(f.lb[:dim],f.ub[:dim]))
Xopt[i,:] = xstar.x
Yopt[i] = f(xstar.x)
nevals += xstar.nfev
return Xopt,Yopt, nevals
def Start( idx, mute=False ):
params = {}
with open("parameters2") as f:
for i, line in enumerate(f):
max_it, npop, step, thr, thrL = map(float,line.rstrip().split())
params[i] = (int(max_it), int(npop), step, thr, thrL)
f.closed
nfuncs = [1,2,3,4,5,6,7,6,7,8,9,10,11,11,12,11,12,11,12,12]
dims = [1, 1, 1, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 5, 5, 10, 10, 20]
nopts = [2, 5, 1, 4, 2, 18, 36, 81, 216, 12, 6, 8, 6, 6, 8, 6, 8, 6, 8, 8]
max_fes = [5e4]*5 + [2e5]*2 + [4e5]*2 + [2e5]*4 + [4e5]*7
nea2 = [1.0, 1.0, 1.0, 1.0, 1.0, 0.963, 0.945, 0.241, 0.621, 1.0, 0.98,
0.852, 0.977, 0.83, 0.743, 0.673, 0.695, 0.667, 0.667, 0.362]
nf, dim, nopt = nfuncs[idx], dims[idx], nopts[idx]
f = niching_func[nf]
f()
if not mute:
print 'range: [{},{}], nopts: {}, maxFES: {}, dim: F{} - {}D'.format(f.lb[0], f.ub[0], nopt, max_fes[idx], nf, dim)
# results
cgopt1 = lambda x: count_goptima(x,idx,1e-1)[0]
cgopt2 = lambda x: count_goptima(x,idx,1e-2)[0]
cgopt3 = lambda x: count_goptima(x,idx,1e-3)[0]
cgopt4 = lambda x: count_goptima(x,idx,1e-4)[0]
cgopt5 = lambda x: count_goptima(x,idx,1e-5)[0]
optWhat = lambda x: count_goptima(x,idx,1e-1)[1]
cgopt = (cgopt1, cgopt2, cgopt3, cgopt4, cgopt5, optWhat)
return cgopt, params[idx], (f, dim, nopt, max_fes[idx], nea2[idx])
| {
"repo_name": "folivetti/LINKEDOPT",
"path": "SRC/Startup.py",
"copies": "1",
"size": "6468",
"license": "mit",
"hash": 7253949087926167000,
"line_mean": 32,
"line_max": 123,
"alpha_frac": 0.5632343847,
"autogenerated": false,
"ratio": 2.605962933118453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3669197317818453,
"avg_score": null,
"num_lines": null
} |
# 11.1 Define Apple with four
# instance variables that represent four attributes
# class Apple():
# def __init__(self, color, shape, weight, crunch):
# self.color = color
# self.shape = shape
# self.weight = weight
# self.crunch = crunch
#
# 11.2 Define Circle with method called area that returns the
# area. Create a circle, call area and print result.
# Us the pi of the math module
# import math
# import math
#
# class Circle():
# def __init__(self, radius):
# self.radius = radius
# def area(self):
# return self.radius * self.radius * math.pi
#
# circle = Circle(1)
# print(circle.area())
# 11.3 Create a Triangle class with a method called area
# that calculates and returns its area.
# Then create a Triangle object, call area on it, and
# print the result.
#
# class Triangle():
# def __init__(self,b,h):
# self.base = b
# self.height = h
# def area(self):
# return self.base * self.height / 2
#
# tri = Triangle(2,2)
# print(tri.area())
# 11.4 Make a Hexagon class with a method called calculate_perimeter that
# calculates and returns its perimeter. Then create a Hexagon object, call
# calculate_perimeter on it, and print the result.
class Hexagon():
def __init__(self, s):
self.side = s
def calculate_perimeter(self):
return self.side * 6
hexa = Hexagon(3)
print(hexa.calculate_perimeter())
| {
"repo_name": "Frikeer/LearnPython",
"path": "exc11/exc11.py",
"copies": "1",
"size": "1431",
"license": "unlicense",
"hash": 5325428921563553000,
"line_mean": 25.5,
"line_max": 74,
"alpha_frac": 0.642907058,
"autogenerated": false,
"ratio": 3.282110091743119,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4425017149743119,
"avg_score": null,
"num_lines": null
} |
#11/21/2012
#legacy-client application
"""
<Purpose>
Legacy application with built-in software updater
Application does:
- call built-in software updater for updates
- does its regular work (contact google server)
- quit
Built in software updater does the following:
- get manifest from server
- compare hashes of new manifest and old
- download files that need to be updated
legacy-client application options:
u: perform software update ->uses built-in software updater
w: perform regular work (sample work is connect and send of msg to google server)
q: quit client application
"""
import errno
import socket
import sys
import os
import urllib
import httplib
import sys
from check_hash import *
from random import choice
#server and mirrors are hardcoded
SERVER_ADDRESS = "127.0.0.1:8101"
MIRRORS = ["127.0.0.1:8000", "127.0.0.1:8001","127.0.0.1:8002"]
#basic method that gets contents of file from server or mirrors
def get_file_from_network(url,file_path):
try:
conn=httplib.HTTPConnection(url)
conn.request("GET", file_path)
rsp = conn.getresponse()
data_received=rsp.read()
conn.close()
return data_received
except:
return None
#built-in software updater
def perform_update():
manifest_contents = get_file_from_network(SERVER_ADDRESS,"/MANIFEST")
if manifest_contents is None:
print "Error getting MANIFEST from server"
return None
manifest = open("MANIFEST_temp", 'wb')
manifest.write(manifest_contents )
manifest.close()
#perform hash comparison
hash_diff = check_hashes(r'MANIFEST', r'MANIFEST_temp')
if hash_diff: #update manifest file
to_update = read_manifest("MANIFEST")
new_manifest = read_manifest("MANIFEST_temp")
# perform update on files all files that are out of date
for file_to_update in hash_diff:
file_basename = os.path.basename(file_to_update)
print "Updating "+ file_basename+ "\n"
try:
file_contents= get_file_from_mirror(file_basename)
#update files locally
print "File "+ file_basename +" updated\n"
file_handle = open(file_basename ,"w")
file_handle.write(file_contents)
file_handle.close()
to_update[file_to_update] = new_manifest[file_to_update]
except:
print "Could not update file: "+ file_basename
#update local manifest
handle = open("MANIFEST","w")
for key in to_update:
handle.write(str(key)+" "+to_update[key][0]+ " "+to_update[key][1]+"\n")
handle.close()
else: # no hash difference
print "All files are up to date"
os.remove(os.path.abspath("MANIFEST_temp"))
#use mirrors to download files
#TODO: Mechanism for mirror to sync file with server (tried rsync and PUT request.. none worked)
def get_file_from_mirror(file_to_update):
#mirrorlist = MIRRORS 12/06/2012. Bug found by David. Contents of MIRRORS are removed
mirrorlist =["127.0.0.1:8000", "127.0.0.1:8001","127.0.0.1:8002"]
while(True):
if len(mirrorlist) == 0:
raise errno.ENOENT #imitate file not found. should do custom error for "out of mirrors"
mirror = choice(mirrorlist)
mirrorlist.remove(mirror)
(ip, port) = mirror.split(':')
#perform file update
try:
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((ip,int(port)))
s.send("GET " + str(file_to_update) + " HTTP/0.9\n\n",0)
update = s.recv(8192)
s.close()
except:
continue #go onto the next IP
return update
#Purpose: simulate regular (network) work being done by legacy application
def legacy_work():
try:
sock =socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect(('173.194.75.99',80))
sock.send("GET /index.html HTTP/1.1\r\n\r\n")
sock.recv(1024)
sock.close()
print "success"
except:
print "error in doing work of legacy application"
#Purpose: quit application
def quit_application():
sys.exit(0)
def main():
print("Welcome to the legacy-app\nOptions:")
print("u: check server for updates and perform upddate")
print("w: perform regular work of legacy application")
print("q: quit the legacy application\n")
while(True):
print("\nWhat would you like to do ?(u,w or q)")
try:
op = str(raw_input())
if op == 'u':
perform_update()
elif op == 'q':
quit_application()
elif op == 'w':
legacy_work()
else:
print "Invalid options type"
except ValueError:
print "Invalid option type"
if __name__ == "__main__":
main()
| {
"repo_name": "monzum/tuf-legacy",
"path": "scripts/client/legacy-client.py",
"copies": "1",
"size": "4637",
"license": "mit",
"hash": 5968488940114300000,
"line_mean": 26.9337349398,
"line_max": 96,
"alpha_frac": 0.6560276041,
"autogenerated": false,
"ratio": 3.328786791098349,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8946998927140836,
"avg_score": 0.10756309361150274,
"num_lines": 166
} |
# 112. Path Sum - LeetCode
# https://leetcode.com/problems/path-sum/description/
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def has_sum_with_parent( self, root, current_sum, target_sum ):
if root is None:
return False
current_sum += root.val
if root.left is None and root.right is None:
if current_sum == target_sum:
return True
# Cause of WA1
# if current_sum > target_sum:
# return False
return ( self.has_sum_with_parent( root.left, current_sum, target_sum )
or self.has_sum_with_parent( root.right, current_sum, target_sum ) )
def hasPathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
return self.has_sum_with_parent( root, 0, sum )
s = Solution()
tn = TreeNode(1)
tn.left = TreeNode(2)
tn.right = TreeNode(3)
tn.left.left = TreeNode(4)
tn.left.right = TreeNode(5)
tn.left.left.left = TreeNode(6)
print s.hasPathSum(tn,13)
print s.hasPathSum(tn,12)
# WA1, [-2,null,-3]
tn = TreeNode(-2)
tn.right = TreeNode(-3)
print s.hasPathSum(tn,-5) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/112_path-sum.py",
"copies": "1",
"size": "1269",
"license": "mit",
"hash": 8305903028106875000,
"line_mean": 26.0212765957,
"line_max": 80,
"alpha_frac": 0.5831363278,
"autogenerated": false,
"ratio": 3.141089108910891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4224225436710891,
"avg_score": null,
"num_lines": null
} |
# 112. Path Sum
# Given a binary tree and a sum,
# determine if the tree has a root-to-leaf path
# such that adding up all the values along the path equals the given sum.
#
# For example:
# Given the below binary tree and sum = 22,
# 5
# / \
# 4 8
# / / \
# 11 13 4
# / \ \
# 7 2 1
#
# return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def hasPathSum(self, root, sum):
if root is None:
return False
# leaf node
if root.left is None and root.right is None and root.val == sum:
return True
# reduce sum by node value at each recursion
return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(root.right, sum - root.val)
if __name__ == "__main__":
root = TreeNode(5)
root.left = TreeNode(4)
root.right = TreeNode(8)
root.left.left = TreeNode(11)
root.left.left.right = TreeNode(2)
print Solution().hasPathSum(root, 22)
| {
"repo_name": "gengwg/leetcode",
"path": "112_path_sum.py",
"copies": "1",
"size": "1183",
"license": "apache-2.0",
"hash": -5073569330014601000,
"line_mean": 25.2888888889,
"line_max": 104,
"alpha_frac": 0.5562130178,
"autogenerated": false,
"ratio": 3.3994252873563218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4455638305156322,
"avg_score": null,
"num_lines": null
} |
# 113. Path Sum II
#
# Given a binary tree and a sum,
# find all root-to-leaf paths where each path's sum equals the given sum.
#
# For example:
# Given the below binary tree and sum = 22,
# 5
# / \
# 4 8
# / / \
# 11 13 4
# / \ / \
# 7 2 5 1
# return
# [
# [5,4,11,2],
# [5,8,4,5]
# ]
#
# http://www.tangjikai.com/algorithms/leetcode-112-path-sum
# dfs to track each path
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
res = []
self.dfs(root, sum, res, [])
return res
def dfs(self, root, sum, res, path):
if not root:
return[]
if not root.left and not root.right and sum == root.val:
res.append(path + [root.val])
self.dfs(root.left, sum - root.val, res, path + [root.val])
self.dfs(root.right, sum - root.val, res, path + [root.val])
if __name__ == "__main__":
root = TreeNode(5)
root.left = TreeNode(4)
root.right = TreeNode(8)
root.left.left = TreeNode(11)
root.left.left.right = TreeNode(2)
root.right = TreeNode(8)
root.right.right = TreeNode(4)
root.right.right.left = TreeNode(5)
root.right.right.right = TreeNode(1)
print Solution().pathSum(root, 22)
| {
"repo_name": "gengwg/leetcode",
"path": "113_path_sum_ii.py",
"copies": "1",
"size": "1573",
"license": "apache-2.0",
"hash": 4854118787909974000,
"line_mean": 23.2,
"line_max": 74,
"alpha_frac": 0.5301970757,
"autogenerated": false,
"ratio": 3.0662768031189085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40964738788189087,
"avg_score": null,
"num_lines": null
} |
# 1.1.3 Write a program that takes three integer command-line arguments and
# prints equal if all three are equal, and not equal otherwise.
# Needed to get command line arguments
import sys
def main(argv=None):
'''
Function called to run main script including unit tests
INPUT: List of arguments from the command line
RETURNS: Exit code to be passed to sys.exit():
-1: Invalid input
0: Script completed successfully
'''
if argv is None:
argv = sys.argv
options = argv[1:]
return check_and_compare_opts(options)
def check_and_compare_opts(options):
'''
Checks options are integers, and compares them if so.
INPUT: List of options
RETURNS: None if options aren't 3 integers,
False/True otherwise depending on comparison
'''
int_options = convert_args(options)
if int_options is None or len(int_options) != 3:
print('Error - need 3 integer arguments, got {}'.format(options))
return -1
if check_equal(int_options):
print('equal')
return 0
else:
print('not equal')
return 0
def list_contains_ints(vals):
'''
Checks if all elements in a list contains ints
INPUT: list of arbitrary lengths
RETURNS: bool showing whether all values are ints
'''
for val in vals:
if type(val) is not int:
return False
return True
def convert_args(args):
'''
Convert all entries in list to integer
INPUT: List of arguments
RETURN: List of integers (if they can be converted)
'''
try:
arg_ints = [int(arg) for arg in args]
except:
# print('Error converting {} to integer'.format(args))
return None
return arg_ints
def check_equal(vals):
'''
Checks if all the values are equal (assumes list of ints)
INPUT: List of integer values
RETURNS: Boolean when all values are equal
'''
first_val = vals[0]
for val in vals:
if first_val != val:
return False
return True
def check_equal_recurse(vals):
'''
Checks if all the values are equal (assumes list of ints)
INPUT: List of integer values
RETURNS: Boolean when all values are equal
'''
if len(vals) == 2:
return vals[0] == vals[1]
else:
return check_equal_recurse()
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "timgasser/algorithms_4ed",
"path": "ch1_fundamentals/ex1.1.3.py",
"copies": "1",
"size": "2468",
"license": "mit",
"hash": 5104426533229425000,
"line_mean": 23.69,
"line_max": 76,
"alpha_frac": 0.6077795786,
"autogenerated": false,
"ratio": 4.154882154882155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5262661733482155,
"avg_score": null,
"num_lines": null
} |
# 116. Populating Next Right Pointers in Each Node
#
# Given a binary tree
#
# struct TreeLinkNode {
# TreeLinkNode *left;
# TreeLinkNode *right;
# TreeLinkNode *next;
# }
# Populate each next pointer to point to its next right node.
# If there is no next right node, the next pointer should be set to NULL.
#
# Initially, all next pointers are set to NULL.
#
# Note:
#
# You may only use constant extra space.
# You may assume that it is a perfect binary tree
# (ie, all leaves are at the same level, and every parent has two children).
#
# For example,
# Given the following perfect binary tree,
# 1
# / \
# 2 3
# / \ / \
# 4 5 6 7
# After calling your function, the tree should look like:
# 1 -> NULL
# / \
# 2 -> 3 -> NULL
# / \ / \
# 4->5->6->7 -> NULL
# Definition for binary tree with next pointer.
class TreeLinkNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
if root and root.left:
root.left.next = root.right
if root.next:
root.right.next = root.next.left
else:
root.right.next = None
self.connect(root.left)
self.connect(root.right)
def connect(self, root):
while root and root.left:
nxt = root.left
while root:
root.left.next = root.right
root.right.next = root.next and root.next.left
root = root.next
root = nxt
print 'xxx'
| {
"repo_name": "gengwg/leetcode",
"path": "116_populate_next_right_pointers_in_each_node.py",
"copies": "1",
"size": "1722",
"license": "apache-2.0",
"hash": 3474218222078998000,
"line_mean": 25.0909090909,
"line_max": 76,
"alpha_frac": 0.5569105691,
"autogenerated": false,
"ratio": 3.6176470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4674557627923529,
"avg_score": null,
"num_lines": null
} |
# 117. Populating Next Right Pointers in Each Node II
#
# Follow up for problem "Populating Next Right Pointers in Each Node".
#
# What if the given tree could be any binary tree? Would your previous solution still work?
#
# Note:
#
# You may only use constant extra space.
# For example,
# Given the following binary tree,
# 1
# / \
# 2 3
# / \ \
# 4 5 7
#
# After calling your function, the tree should look like:
# 1 -> NULL
# / \
# 2 -> 3 -> NULL
# / \ \
# 4-> 5 -> 7 -> NULL
# Definition for binary tree with next pointer.
class TreeLinkNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
# https://shenjie1993.gitbooks.io/leetcode-python/117%20Populating%20Next%20Right%20Pointers%20in%20Each%20Node%20II.html
def connect(self, root):
dummy = TreeLinkNode(-1)
node = dummy
while root:
while root:
node.next = root.left
node = node.next or node
node.next = root.right
node = node.next or node
root = root.next
root, node = dummy.next, dummy
| {
"repo_name": "gengwg/leetcode",
"path": "117_populate_next_right_pointers_in_each_node_ii.py",
"copies": "1",
"size": "1316",
"license": "apache-2.0",
"hash": -6016741793466921000,
"line_mean": 25.8571428571,
"line_max": 125,
"alpha_frac": 0.5607902736,
"autogenerated": false,
"ratio": 3.5663956639566394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46271859375566393,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Libao Jin'
__date__ = 'December 17, 2015'
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
tri = []
if numRows == 0:
tri = []
else:
tri = [[1]]
n = numRows - 1
for i in range(1, numRows):
tmp = []
for j in range(i+1):
tmp.append(self.nchoosek(i, j))
tri.append(tmp)
return tri
def nchoosek(self, n, k):
numerator = 1
denominator = 1
i = 1
while i <= k:
denominator *= i
i += 1
j = n
while j >= n - k + 1:
numerator *= j
j -= 1
frac = numerator / denominator
return int(frac)
if __name__ == '__main__':
s = Solution()
print(s.generate(0))
print(s.generate(1))
print(s.generate(2))
print(s.generate(3))
print(s.generate(4))
print(s.generate(5))
| {
"repo_name": "jinlibao/LeetCode-Solutions",
"path": "solutions/118_Pascals_Triangle.py",
"copies": "2",
"size": "1044",
"license": "mit",
"hash": -4623694493488764000,
"line_mean": 21.6956521739,
"line_max": 47,
"alpha_frac": 0.4444444444,
"autogenerated": false,
"ratio": 3.5753424657534247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017109500805152977,
"num_lines": 46
} |
"""
Copyright 2015 Ericsson AB
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
import sys
sys.path.append('../OpenStreetMap')
from routeGenerator import coordinates_to_nearest_stops, get_route
from dbConnection import DB
from datetime import datetime
import datetime
from datetime import timedelta
from datetime import date
class Fitness():
# Main [class] variables
diffMinutes = 0
formatString = '%d-%m-%Y %H:%M'
formatTime = '%H:%M'
secondMinute = 60.0
firstMinute = "00:00"
lastMinute = "23:59"
requests = []
routes = []
request = []
requestIndex = []
requestOut = []
requestIndexOut = []
totalRequestsBusline = {}
# yesterday = date.today() - timedelta(13)
yesterdayDate = datetime.datetime.now() - timedelta(1)
yesterday = datetime.datetime(yesterdayDate.year, yesterdayDate.month, yesterdayDate.day)
# A decorator is a function that can accept another function as
# a parameter to be able to modify or extend it
def __init__(self):
self.runOnce()
def decorator(afunction):
# A wrapper function is used to wrap functionalites you want around the original function
def wrapper(*args):
# Checks whether or not the original function as been executed once
if not wrapper.has_run:
wrapper.has_run = True
return afunction(*args)
else:
pass
wrapper.has_run = False
return wrapper
@decorator
def runOnce(self):
db = DB()
# Setting the start time boundary of request that we want
startTime = datetime.datetime.combine(Fitness.yesterday, datetime.datetime.strptime(Fitness.firstMinute, Fitness.formatTime).time())
endTime = datetime.datetime.combine(Fitness.yesterday, datetime.datetime.strptime(Fitness.lastMinute, Fitness.formatTime).time())
# Create index for the people going on the bus
Fitness.request = db.grpReqByBusstopAndTime(startTime, endTime)
self.createRequestIndex(Fitness.request)
# Create index for the people going down the bus
Fitness.requestOut = db.getReqCountByEndBusStop(startTime, endTime)
self.createRequestIndexOut(Fitness.requestOut)
'''
# Functions for new encoding including multiple line
busLines = set(db.busLine)
for line in busLines:
for x in db.timeSliceArray:
start = datetime.datetime.combine(Fitness.yesterday,datetime.time(x[0], 0, 0))
end = datetime.datetime.combine(Fitness.yesterday, datetime.time(x[1], 59, 59))
requestBetweenTimeSlices = db.getTravelRequestBetween(start, end, line)
for count in enumerate(requestBetweenTimeSlices, start=1):
countingNoOfRequest = (count[0])
finalNoReqBetweenTimeSlice = countingNoOfRequest
Fitness.totalRequestsBusline[(line, start, end)] = finalNoReqBetweenTimeSlice
print Fitness.totalRequestsBusline
'''
def search(self, initialTime, NextTime, BusStop, Line):
res = []
counting = 0
for match in Fitness.request:
if initialTime <= match["_id"]["RequestTime"] <= NextTime and match["_id"]["line"]==Line and match["_id"]["BusStop"] == BusStop:
counting+=1
res.append(match)
if match["total"] > 1:
counting += match["total"]-1
return res, counting
def search(self, initialTime, NextTime, BusStop, Line):
''' what does it do?
'''
res = []
for match in Fitness.request:
if (initialTime <= match["_id"]["RequestTime"] <= NextTime) and (match["_id"]["line"] == Line and
match["_id"]["BusStop"] == BusStop):
res.append(match)
return res
def timeDiff(self, time1, time2):
''' Evaluates the difference between two times.
Args: time1 and time2 in datetime format, time1 > time2
Returns: the timedelta between time1 and time2.
'''
return datetime.strptime(time1, Fitness.formatTime) - datetime.strptime(time2, Fitness.formatTime)
def getMinutes(self, td):
return (td.seconds//Fitness.secondMinute) % Fitness.secondMinute
def getMinutesFromTimedelta(td):
return (td.seconds//60) % 60
def createRequestIndex(self, request):
''' Creates a structure that stores the hour, the minute and the position on the request array for this particular time
@param: request (array): Structure that stores the requests grouped by bus stop, hour and minute. It also includes a COUNT column
'''
# requestTime = 0
Fitness.requestIndex.append([request[0]["_id"]["RequestTime"], 0])
requestTime = request[0]["_id"]["RequestTime"]
for i in range(1, len(request)):
if request[i]["_id"]["RequestTime"] != requestTime:
# Fitness.requestIndex.append([request[i]["_id"]["RequestTime"].day, request[i]["_id"]["RequestTime"].hour, request[i]["_id"]["RequestTime"].minute, i])
Fitness.requestIndex.append([request[i]["_id"]["RequestTime"], i])
requestTime = request[i]["_id"]["RequestTime"]
def searchRequest(self, initialTime, finalTime, busStop, line):
''' Search on the request array based on an inital time, a final time and a particular bus stop
@param: initialTime (datetime): Initial time to perform the request's search
@param: finalTime (datetime): Final time to perform the request's search
@param: busStop (string): Bus stop name used on the request's search
'''
result = []
index = self.searchRequestIndex(Fitness.requestIndex, initialTime, finalTime)
if index != False:
request = Fitness.request[index[0]:index[1]]
for i in range(len(request)):
if request[i]["_id"]["BusStop"] == busStop and request[i]["_id"]["line"] == line:
result.append(request[i])
return result
def searchRequestIndex(self, index, initialDate, finalDate):
''' Search the index to get the position on the request array for a specific time frame
@param: index (array): Structure that stores hour, minute and the request's array position for this time
@param: initialHour (int): Initial hour to perform the search over the index
@param: initialMinute (int): Final minute to perform the search over the index
@param: finalHour (int): Final hour to perform the search over the index
@param: finalMinute (int): Final minute to perform the search over the index
'''
result = []
position = 0
test = True
# Look for the first index on the search
for i in range(len(index)):
if index[i][0] >= initialDate and index[i][0] < finalDate:
result.append(index[i][1])
indexDate = index[i][0]
position = i
break
if index[i][0] >= finalDate:
break
if len(result) == 0:
test = False
# Evaluate if the first index was found
if test:
# If found, look for the second index, however the index has to go backwards
for j in reversed(range(position, len(index))):
if index[j][0] > indexDate and index[j][0] <= finalDate:
result.append(index[j][1])
break
if index[j][0] <= indexDate:
break
if len(result) == 1:
test = False
# Check if both values were generated, if not return an array with false values
if test:
return result
else:
return False
def createRequestIndexOut(self, request):
''' Creates a structure that stores the hour, the minute and the position on the request array for this particular time
@param: request (array): Structure that stores the requests grouped by bus stop, hour and minute. It also includes a COUNT column
'''
Fitness.requestIndexOut.append([request[0]["_id"]["endTime"], 0])
requestTime = request[0]["_id"]["endTime"]
for i in range(1, len(request)):
if request[i]["_id"]["endTime"] != requestTime:
Fitness.requestIndexOut.append([request[i]["_id"]["endTime"], i])
requestTime = request[i]["_id"]["endTime"]
def searchRequestOut(self, initialTime, finalTime, busStop, line):
''' Search on the request array based on an inital time, a final time and a particular bus stop
@param: initialTime (datetime): Initial time to perform the request's search
@param: finalTime (datetime): Final time to perform the request's search
@param: busStop (string): Bus stop name used on the request's search
'''
result = []
index = self.searchRequestIndex(Fitness.requestIndexOut, initialTime, finalTime)
if index != False:
request = Fitness.requestOut[index[0]:index[1]]
for i in range(len(request)):
if request[i]["_id"]["busStop"] == busStop and request[i]["_id"]["line"] == line:
result.append(request[i])
return result
def calculateCost(self, individual, totalWaitingTime, penaltyOverCapacity):
''' Calculate cost for an individual in the population.
@param individual: individual in the population;
totalWaitingTime: total waiting time for that individual
penaltyOverCapacity: a positive integer to represent a large cost to individual if capacity cannot handle all request of that trip
@return cost: positive integer for this individual, if input param is out of range, cost will be -1
Less cost, better individual
Assume one minute's waiting per person equals to 1kr
'''
cost = 0
costOfBus = [[20, 1000], [60, 1200], [120, 1400]]
waitingCostPerMin = 1
busCost = 0
if penaltyOverCapacity < 0 or individual is None or totalWaitingTime < 0:
cost = -1
else:
for i in range(len(individual)):
busCapacity = individual[i][1]
for j in range(len(costOfBus)):
if busCapacity == costOfBus[j][0]:
busCost = busCost + costOfBus[j][1]
break
waitingCost = totalWaitingTime * waitingCostPerMin
cost = busCost + waitingCost + penaltyOverCapacity
# return cost
return waitingCost
def generateStartTimeBasedOnFreq(self, busLine, capacity, frequency, startTime):
""" Generate all the trips within a time slice given a single starting time
Args:
busLine: an integer representing the bus line ID
frequency: the headway in minutes between successive buses
startTime: a datetime object representing the start time within the time slice
Return:
an array containing all the starting times for the bus trips within the corresponding time slice.
"""
# we make sure the starting time is in between the upper and lower bound of our time slices
startTimeArray = []
lineTimes = {}
for x in DB.timeSliceArray:
start = datetime.datetime.combine(Fitness.yesterday, datetime.time(x[0], 0, 0))
end = datetime.datetime.combine(Fitness.yesterday, datetime.time(x[1], 59, 59))
if start <= startTime <= end:
nextStartTime = startTime + datetime.timedelta(minutes=frequency)
nextStartTime2 = startTime - datetime.timedelta(minutes=frequency)
startTimeArray.append([startTime, capacity])
if nextStartTime <= end:
startTimeArray.append([nextStartTime, capacity])
if nextStartTime2 >= start:
startTimeArray.append([nextStartTime2, capacity])
while nextStartTime <= end:
nextStartTime = nextStartTime + datetime.timedelta(minutes=frequency)
if nextStartTime <= end:
startTimeArray.append([nextStartTime, capacity])
while nextStartTime2 >= start:
nextStartTime2 = nextStartTime2 - datetime.timedelta(minutes=frequency)
if nextStartTime2 >= start:
startTimeArray.append([nextStartTime2, capacity])
return sorted(startTimeArray)
def genTimetable(self, individual):
""" Generate a timetable for the whole day, for all the bus lines."""
timetable = {}
counter = 0
busLines = set([x[0] for x in individual])
for line in busLines:
ind = [y for y in individual if y[0] == line]
for i, val in enumerate(ind):
counter+=1
generate = self.generateStartTimeBasedOnFreq(line, val[1], val[2], val[3])
if line not in timetable:
timetable[line] = generate
else:
timetable[line] = timetable[line] + generate
timetable = sorted(timetable.items(), key = lambda e: e[0])
ttLines = []
for i, item in enumerate(timetable):
for trip in item[1]:
ttLines.append([item[0], trip[1], trip[0]])
#print trip
#print sorted(timetable.items(), key = lambda e: e[0])
return ttLines
def getTimeSlice(self, startTime):
''' Evaluates the time slice a given starting time in a gene belongs to.
@ param startTime datetime
@ return (start, end) datetime.datetime objects
'''
startTimeArray = []
for x in DB.timeSliceArray:
start = datetime.datetime.combine(Fitness.yesterday, datetime.time(x[0], 0, 0))
end = datetime.datetime.combine(Fitness.yesterday, datetime.time(x[1], 59, 59))
if start <= startTime <= end:
return start
| {
"repo_name": "EricssonResearch/monad",
"path": "LookAhead/fitness.py",
"copies": "2",
"size": "14794",
"license": "apache-2.0",
"hash": -7549709397258059000,
"line_mean": 44.2415902141,
"line_max": 168,
"alpha_frac": 0.6111261322,
"autogenerated": false,
"ratio": 4.4240430622009566,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005805850694822586,
"num_lines": 327
} |
''' 11-extract_long_periods.py
===============================================
AIM: Prepare cumulative plots (THIS SCRIPT IS WITHOUT STRAY LIGHT)
INPUT: files: - <orbit_id>_misc/orbits.dat
- <orbit_id>_flux/flux_*.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_misc/ : complicated name files depending on the case (handled by 12-<...>.py)
CMD: python 11-extract_long_periods.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: THIS SCRIPT IS WITHOUT STRAY LIGHT
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import os
import time
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.constants as const
import resources.figures as figures
###########################################################################
### PARAMETERS
# altitude of the orbit in km
apogee=700
perigee=700
orbit_id = 301
# First minute in data set !
minute_ini = 0
# Last minute to look for
minute_end = 1440*365
# File name for the list of orbit file
orbits_file = 'orbits.dat'
# Minimum observable time for plots
threshold_obs_time = 50
# Time to acquire a target
t_acquisition = 3
# Show preview ?
show = True
# Include SAA ?
SAA = True
# File name for the output file
output_fname = 'TEST-data_%d' % (threshold_obs_time)
extension = '.dat'
# Factor in the SL post treatment correction ?
SL_post_treat = True
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee,perigee)
###########################################################################
### INITIALISATION
file_flux = 'flux_'
# changes the threshold by addition the acquisition time:
threshold_obs_time += t_acquisition
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
if not os.path.isdir(folder_figures):
print '\tError: figure folder %s does not exists.' % (folder_figures)
exit()
sys.stdout.write("Loading list of computed orbits...\t\t")
sys.stdout.flush()
orbits = np.loadtxt(folder_misc+orbits_file,dtype='i4')
list_minutes = -1. * np.ones( ( np.shape(orbits)[0] + 2 ) * period )
id_min = 0
times = np.loadtxt('resources/minute_table_%d.dat' % orbit_id, delimiter=',',dtype='Int32')
for ii, orbit_current in enumerate(orbits[:,0]):
t_ini, t_end, a_ini, a_end = fast_orbit2times(times,orbit_current,orbit_id)
for minute in range(a_ini, a_end+1):
list_minutes[id_min] = int(minute)
id_min += 1
list_minutes = list_minutes[list_minutes > -1]
# apply conditions
list_minutes = list_minutes[list_minutes >= minute_ini]
list_minutes = list_minutes[list_minutes <= minute_end]
print 'Done.'
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
visibility = np.zeros(np.shape(ra_grid))
visibility_save = np.zeros([np.shape(ra_grid)[0], np.shape(ra_grid)[1], int(period+2)])
workspace = np.zeros(np.shape(ra_grid))
data = np.zeros(np.shape(ra_grid))
numberofminutes = minute_end+1 - minute_ini
minutes_orbit_iditude = np.loadtxt('resources/minute_table_%d.dat' % orbit_id, delimiter=',',dtype='Int32')
if SAA:
SAA_data = np.loadtxt('resources/SAA_table_%d.dat' % orbit_id, delimiter=',')
SAA_data = SAA_data[SAA_data[:,0]>= minute_ini]
SAA_data = SAA_data[SAA_data[:,0]<= minute_end]
###########################################################################
### LOAD AND COMPUTE LARGEST OBSERVATION PERIOD
start = time.time()
lp = -1
try:
for minute in range(minute_ini,minute_end+1):
minute = int(minute)
if SAA and fast_SAA(SAA_data, minute): SAA_at_minute = True
else: SAA_at_minute = False
orbit_current = fast_minute2orbit(minutes_orbit_iditude, minute, orbit_id)
junk, junk, atc_ini, junk = fast_orbit2times(minutes_orbit_iditude, orbit_current, orbit_id)
if orbit_current > lp:
lp = orbit_current
message = "Loading stray light data orbit %d on %d...\t" % (lp, minutes_orbit_iditude[-1,0])
sys.stdout.write( '\r'*len(message) )
sys.stdout.write(message)
sys.stdout.flush()
try:
ra, dec, S_sl = load_flux_file(minute, file_flux, folder=folder_flux)
# Apply the flux correction (SL post-treatment removal)
if SL_post_treat: S_sl *= (1.0 - param.SL_post_treat_reduction)
load = True
except IOError:
# if there is nothing then well, do nothing ie we copy the past values
# in which orbit are we ?
# get the previous orbit computed and copy the stray light data of this orbit :
# orbit_previous = orbits[orbits[:,0] < orbit_current][-1,0]
# junk, junk, at_ini, junk = fast_orbit2times(minutes_orbit_iditude, orbit_previous, orbit_id)
minute_replacement = minute - atc_ini# + at_ini
load = False
# try: ra, dec, S_sl = fast_load_flux_file(minute_replacement, file_flux, folder=folder_flux)
# except IOError: ra, dec, S_sl = fast_load_flux_file(minute_replacement-1, file_flux, folder=folder_flux)
# pass
# populate the visbility matrix
if SAA_at_minute: visibility = np.zeros(np.shape(ra_grid))
elif load:
visibility_save[...,minute-atc_ini] = 0
for index, ra_ in enumerate(ra):
id_ra = find_nearest(ras,ra_)
id_dec = find_nearest(decs,dec[index])
visibility[id_dec,id_ra] = 1
visibility_save[id_dec,id_ra,minute-atc_ini] = 1
else: visibility = visibility_save[...,minute_replacement]
if minute == minute_ini: workspace=visibility.copy()
else :
# if there is an interruption then, reset the value in workspace
# but before saves the value if it is larger than "threshold_obs_time" minutes
data[ (workspace>threshold_obs_time-1) & (visibility < 1) ] += \
workspace[(workspace>threshold_obs_time-1)&(visibility< 1)]
workspace[visibility < 1] = 0
# if the point existed already, then add one minute
workspace[visibility > 0] += 1
# reset visibility without taking a chance of wrong something
del visibility
visibility = np.zeros(np.shape(ra_grid))
except KeyboardInterrupt: print hilite('\nWARNING! USER STOPPED LOADING AT MINUTE %d' % minute,False,False)
# Check that we did not left anything behind (in a try structure to avoid weird things...)
try:
data[ (workspace>threshold_obs_time-1) ] += \
workspace[(workspace>threshold_obs_time-1)&(visibility< 1)]
except ValueError: pass
del workspace
end = time.time()
elapsed_time = round((end-start)/60.,1)
sys.stdout.write( '\r'*len(message) )
sys.stdout.flush()
print
print "Loaded stray light data\tTime needed: %2.2f min" % elapsed_time
if SAA: note = '_SAA'
else: note = ''
np.savetxt(folder_misc+output_fname+note+extension,data)
print "Data saved in %s%s" % (folder_misc,output_fname+note+extension)
if not show : exit()
plt.figure()
ax = plt.subplot(111)
extent = (-np.pi,np.pi,-np.pi/2.,np.pi/2.)
CS = ax.contour((ra_grid-np.pi)*180. / np.pi,dec_grid*180. / np.pi,data,colors='k',extent=extent)
CS = ax.contourf((ra_grid-np.pi)*180. / np.pi,dec_grid*180. / np.pi,data,cmap=plt.cm.jet,extent=extent)
plt.xlim([-180, 180])
plt.ylim([-90, 90])
plt.colorbar(CS)
ax.grid(True)
ax.set_xlabel(r'$\alpha$')
ax.set_ylabel(r'$\delta$')
plt.title('PREVIEW OF THE DATA [MINUTES]')
plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "11_extract_long_periods.py",
"copies": "1",
"size": "7874",
"license": "bsd-3-clause",
"hash": -8099464799526264000,
"line_mean": 29.8784313725,
"line_max": 117,
"alpha_frac": 0.6558293117,
"autogenerated": false,
"ratio": 2.9646084337349397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.89858837662757,
"avg_score": 0.026910795831848192,
"num_lines": 255
} |
11import datetime
import hashlib
import time
from google.appengine.ext import db
# Used to normalize to UTC from wherever the app is running
UTC_DELTA = datetime.datetime.utcnow() - datetime.datetime.now()
class Profile(db.Model):
user = db.UserProperty()
username = db.StringProperty()
timezone_offset = db.IntegerProperty(default=-5)
digest_hour = db.IntegerProperty(default=9)
prompt_hour = db.IntegerProperty(default=17)
def today(self):
today = datetime.datetime.fromtimestamp(time.mktime(datetime.date.today().timetuple()))
return today + UTC_DELTA + datetime.timedelta(hours=self.timezone_offset)
def yesterday(self):
return self.today() - datetime.timedelta(days=1)
def now(self):
return datetime.datetime.now() + UTC_DELTA + datetime.timedelta(hours=self.timezone_offset)
@property
def digest_now(self):
return self.now().hour == self.digest_hour and self.now().weekday() < 5
@property
def prompt_now(self):
return self.now().hour == self.prompt_hour and self.now().weekday() < 5
@property
def entry_today(self):
return Entry.all().filter('user =', self.user) \
.filter('created >=', self.today()).get()
@property
def entry_yesterday(self):
return Entry.all().filter('user =', self.user) \
.filter('created >=', self.yesterday()) \
.filter('created <', self.today()).get()
@property
def following(self):
return Profile.all().filter('user !=', self.user)
@property
def entries(self):
return Entry.all().filter('user =', self.user).order('-created')
@property
def gravatar_hash(self):
return hashlib.md5(self.user.email().strip().lower()).hexdigest()
@classmethod
def get_or_create(cls, user):
p = cls.all().filter('user =', user).get()
if not p:
p = cls(user=user, username=user.nickname().split('@')[0])
p.put()
if not p.username:
p.username = user.nickname().split('@')[0]
p.put()
return p
@classmethod
def get_by_username(cls, username):
return cls.all().filter('username =', username).get()
class Entry(db.Model):
body = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
user = db.UserProperty(auto_current_user_add=True)
@property
def date(self):
return datetime.datetime.fromtimestamp(time.mktime(self.created.date().timetuple()))
@property
def summary(self):
return self.body.split('\n\r', 1)[0]
@property
def details(self):
if '\n\r' in self.body:
return self.body.split('\n\r', 1)[-1]
else:
return None | {
"repo_name": "progrium/workjournal",
"path": "models.py",
"copies": "1",
"size": "2834",
"license": "bsd-3-clause",
"hash": -5283953797323795000,
"line_mean": 29.8152173913,
"line_max": 99,
"alpha_frac": 0.6139731828,
"autogenerated": false,
"ratio": 3.798927613941019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9781961421748874,
"avg_score": 0.026187874998429044,
"num_lines": 92
} |
# 1.1 Life and Death
# Programmed by Rachel J Morris
import pygame, sys, math
from pygame.locals import *
maximumPeople = input( "HOW MANY PEOPLE SHOULD BE IN A CIRCLE? " )
pygame.init()
fpsClock = pygame.time.Clock()
window = pygame.display.set_mode( ( 768, 768 ) )
pygame.display.set_caption( "Life and Death" )
bgColor = pygame.Color( 140, 225, 255 )
txtColor = pygame.Color( 0, 0, 0 )
fontObj = pygame.font.Font( "content/cnr.otf", 20 )
people = []
#maximumPeople = 29
circleRatio = 360 / maximumPeople
index = 0
w = 64
h = 96
radius = 300
offsetY = 350
for index in range( maximumPeople ):
x = math.cos( math.radians( circleRatio * index ) ) * radius + offsetY
y = math.sin( math.radians( circleRatio * index ) ) * radius + offsetY
imgNum = ( index % 16 ) + 1
txtObj = fontObj.render( str( index+1 ), False, txtColor )
person = { "number" : index, "alive" : True,
"x" : x, "y" : y, "w" : 64, "h" : 96,
"image" : pygame.image.load( "content/" + str( imgNum ) + ".png" ), "label" : txtObj,
"labelpos" : ( x, y ) }
people.append( person )
print( "Person ", index, circleRatio * index, " = ", x, ", ", y )
index += 1
def IsClicked( mouseX, mouseY, obj ):
return ( mouseX >= obj["x"]
and mouseX <= obj["x"] + obj["w"]
and mouseY >= obj["y"]
and mouseY <= obj["y"] + obj["h"] )
def ClickPerson( mouseX, mouseY ):
for person in people:
if ( IsClicked( mouseX, mouseY, person ) ):
person["alive"] = not person["alive"]
print( "Person ", person["number"], person["alive"] )
if ( person["alive"] ):
print( "Alive" )
person["image"] = pygame.transform.rotate( person["image"], 90 )
else:
person["image"] = pygame.transform.rotate( person["image"], -90 )
while True:
window.fill( bgColor )
for event in pygame.event.get():
if ( event.type == QUIT ):
pygame.quit()
sys.exit()
elif ( event.type == MOUSEBUTTONDOWN ):
mouseX, mouseY = event.pos
ClickPerson( mouseX, mouseY )
elif ( event.type == MOUSEBUTTONUP ):
mouseX, mouseY = event.pos
for person in people:
window.blit( person["image"], ( person["x"], person["y"] ) )
window.blit( person["label"], person["labelpos"] )
#window.blit( lblBottom, lblBottomPos )
pygame.display.update()
fpsClock.tick( 30 )
| {
"repo_name": "Rachels-Courses/CS210-Discrete-Structures",
"path": "Resources/Program Illustrations/1.1/1.1 Life and Death/main.py",
"copies": "1",
"size": "2594",
"license": "mit",
"hash": -6946892307444602000,
"line_mean": 28.816091954,
"line_max": 94,
"alpha_frac": 0.5447185813,
"autogenerated": false,
"ratio": 3.449468085106383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9242678656737184,
"avg_score": 0.05030160193383966,
"num_lines": 87
} |
# 1.1 Magic Trick Illustration
# Programmed by Rachel J Morris
import pygame, sys
from pygame.locals import *
pygame.init()
fpsClock = pygame.time.Clock()
window = pygame.display.set_mode( ( 1024, 768 ) )
pygame.display.set_caption( "Magic Trick" )
bgColor = pygame.Color( 140, 225, 255 )
txtColor = pygame.Color( 0, 0, 0 )
fontObj = pygame.font.Font( "content/cnr.otf", 32 )
lblTop = fontObj.render( "Top", False, txtColor )
lblTopPos = ( 800, 20 )
lblBottom = fontObj.render( "Bottom", False, txtColor )
lblBottomPos = ( 50, 20 )
cardImages = {
"heart" : pygame.image.load( "content/heartcard.png" ),
"club" : pygame.image.load( "content/clubcard.png" ),
"diamond" : pygame.image.load( "content/diamondcard.png" ),
"spade" : pygame.image.load( "content/spadecard.png" ),
"back" : pygame.image.load( "content/cardback.png" )
}
cards = [
{ "name" : "heart", "faceup" : True, "x" : 50, "y" : 50, "w" : 100, "h" : 125, "image" : cardImages["heart"] },
{ "name" : "club", "faceup" : True, "x" : 300, "y" : 50, "w" : 100, "h" : 125, "image" : cardImages["club"] },
{ "name" : "diamond", "faceup" : True, "x" : 550, "y" : 50, "w" : 100, "h" : 125, "image" : cardImages["diamond"] },
{ "name" : "spade", "faceup" : True, "x" : 800, "y" : 50, "w" : 100, "h" : 125, "image" : cardImages["spade"] }
]
def IsCardClicked( card ):
return ( mouseX >= card["x"]
and mouseX <= card["x"] + card["w"]
and mouseY >= card["y"]
and mouseY <= card["y"] + card["h"] )
def FlipCard( mouseX, mouseY ):
for card in cards:
if ( IsCardClicked( card ) ):
# flip it
card["faceup"] = not card["faceup"]
if ( card["faceup"] ):
card["image"] = cardImages[ card["name"]]
else:
card["image"] = cardImages["back"]
def MoveCard( mouseX, mouseY, card ):
# Offset so that moving based on center of card
card["x"] = mouseX - card["w"] / 2
card["y"] = mouseY - card["h"] / 2
mouseDown = False
movingCard = None
while True:
window.fill( bgColor )
for event in pygame.event.get():
if ( event.type == QUIT ):
pygame.quit()
sys.exit()
elif ( event.type == MOUSEMOTION ):
mouseX, mouseY = event.pos
if ( mouseDown and movingCard != None ):
MoveCard( mouseX, mouseY, movingCard )
elif ( event.type == MOUSEBUTTONDOWN ):
mouseDown = True
for card in cards:
if ( IsCardClicked( card ) ):
movingCard = card
print( "Move card:", movingCard["name"] )
elif ( event.type == MOUSEBUTTONUP ):
mouseX, mouseY = event.pos
mouseDown = False
movingCard = None
if ( event.button == 3 ):
FlipCard( mouseX, mouseY )
for card in cards:
window.blit( card["image"], ( card["x"], card["y"] ) )
window.blit( lblTop, lblTopPos )
window.blit( lblBottom, lblBottomPos )
pygame.display.update()
fpsClock.tick( 30 )
| {
"repo_name": "Rachels-Courses/CS210-Discrete-Structures",
"path": "Resources/Program Illustrations/1.1/1.1 Magic Trick/main.py",
"copies": "1",
"size": "3305",
"license": "mit",
"hash": -758687320300613500,
"line_mean": 33.4270833333,
"line_max": 136,
"alpha_frac": 0.5167927383,
"autogenerated": false,
"ratio": 3.2593688362919133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42761615745919135,
"avg_score": null,
"num_lines": null
} |
#1.1
def isunique(s):
letters = []
for letter in s:
if letter in letters:
return False
else:
letters.append(letter)
return True
def isUniqueInPlace(s):
for i in xrange(0,len(s)):
for j in xrange(i+1,len(s)):
if s[j]==s[i]:
return False
return True
#1.2
def checkPermutation(s1,s2):
if sorted(list(s1)) == sorted(list(s2)): return True
return False
#1.3
def urlify(s):
return s1.replace(' ','%20')
#1.4
def isPalindrome(s):
return s == s[::-1]
#1.5
def checkEdited(s1,s2):
#True if: - insert char
# - remove char
# - replace char
if len(s1) - len(s2) == 1 or len(s2) - len(s1) == 1: return True
elif len(s1) == len(s2):
return is1CharReplaced(s1,s2)
else:
return False
#Bad bad function
def is1CharReplaced(s1,s2):
letterdict1 = {}
letterdict2 = {}
for s11,s22 in zip(s1,s2):
if s11 not in letterdict1: letterdict1[s11] = 0
else: letterdict1[s11] += 1
if s22 not in letterdict2: letterdict2[s22] = 0
else: letterdict2[s22] += 1
haschanged = 0
print letterdict1,letterdict2
for ld1,ld2 in zip(letterdict1,letterdict2):
if letterdict1[ld1]-letterdict2[ld2]==1 or letterdict1[ld1]-letterdict2[ld2]==-1:
print haschanged
haschanged += 1
return True if haschanged == 1 else False
#1.6 meh
def stringCompression(s):
final = ''
repeat = 1
for i in xrange(0,len(s)):
if i < len(s)-1:
if s[i]==s[i+1]:
repeat+=1
else:
final += s[i]+str(repeat)
repeat=1
else:
final += s[i]+str(repeat)
return final
#1.7 Should return [[4,8,12,16],[3,7,11,15],[2,6,10,14],[1,5,9,13]] for [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
def rotate90(arr):
outer = []
for i in xrange(0,len(arr)):
inner = []
for j in xrange(0,len(arr[i])):
print '\n',i,j,arr[i][j]
print j,len(arr[j])-1-j,arr[j][len(arr[j])-1-j]
inner.append(arr[len(arr[i])-1-i][i])
outer.append(inner)
return outer
#1.8
def dozero(arr):
r0 = []
c0 = []
for i in xrange(0,len(arr)):
for j in xrange(0,len(arr[i])):
if arr[i][j] == 0:
if i not in r0: r0.append(i)
if j not in c0: c0.append(j)
for i in xrange(0,len(arr)):
for j in xrange(0,len(arr[i])):
if i in r0 or j in c0: arr[i][j]=0
return arr
def main():
s1 = 'helloo'
s2 = 'hellol'
arr = [[1,2,3,0],[5,6,7,8],[9,0,11,12],[13,0,15,16]]
print dozero(arr)
if __name__ == '__main__':
main() | {
"repo_name": "singhjaideep/Stuff",
"path": "stringsnarray.py",
"copies": "1",
"size": "2349",
"license": "mit",
"hash": -5602252068129803000,
"line_mean": 21.1698113208,
"line_max": 120,
"alpha_frac": 0.6083439762,
"autogenerated": false,
"ratio": 2.2223273415326394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33306713177326397,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.