text
stringlengths 0
1.05M
| meta
dict |
---|---|
'''
Created on Mar 13, 2016
@author: Dead Robot Society
'''
import wallaby as w
# Start light threshold
startLightThresh = 2000
# TIME
startTime = -1
# Motor ports
LMOTOR = 0
RMOTOR = 3
# SERVO ports
frontArm = 0
frontClaw = 1
backArm = 2
backClaw = 3
# ANALOG ports
FRONT_TOPHAT = 0
REAR_TOPHAT = 1
STARTLIGHT = 2
# DIGITAL ports
CLONE_SWITCH = 9
RIGHT_BUTTON = 13
# PRIME servo positions
frontArmUp = 1200 #Arm at 90 degrees up
frontArmMidCube = 450 #Arm at 30 degrees up
frontArmGrabBot = 550 #grabs BotGuy well
frontArmMidPom = 700 #Arm at 30 degrees up
frontArmMidDown = 325 #added during comp for crease
frontArmDown = 100 #Arm forward on ground
frontClawOpen = 2000 #Claw open
frontClawMid = 1400 # claw cube grab
frontClawCube = 1150
frontClawClose = 830 # Claw closed
backClawOpen = 00
backClawOpenComp = 350
backClawMid = 1250
backClawMidSolar = 1500
backClawClose = 2000
backClawCompGrab = 1550
backArmDown = 250
backArmBinGrab = 560 #when the backArm pulls bin
backArmCompGrab = 580
backArmMid = 730
backArmUp = 1200
# PRIME analog sensor values
frontLineFollowerGrey = 1000
isClone = w.digital(CLONE_SWITCH)
isPrime = not isClone
if isPrime:
print "running Prime"
else:
print "running Clone"
# servo positions
frontArmUp = 1900 # Arm at 90 degrees up
frontArmMidCube = 1100 #Arm at 30 degrees up
frontArmGrabBot = 1200 #grabs BotGuy well
frontArmMidPom = 1300
frontArmDown = 630 # Arm forward on ground
frontArmMidDown = 1000 #added during comp for crease
frontClawOpen = 1400 # Claw open
frontClawMid = 600
frontClawCube = 500
frontClawClose = 200 # Claw closed
backClawOpen = 100
backClawOpenComp = 350
backClawMid = 700
backClawMidSolar = 1300
backClawClose = 1275
backClawCompGrab = 1550
backArmDown = 440
backArmBinGrab = 750
backArmCompGrab = 780
backArmMid = 1000
backArmUp = 1400
| {
"repo_name": "gras/16-ValleyBot",
"path": "src/constants.py",
"copies": "1",
"size": "2072",
"license": "mit",
"hash": -3648507060537311700,
"line_mean": 20.0638297872,
"line_max": 56,
"alpha_frac": 0.6829150579,
"autogenerated": false,
"ratio": 2.8152173913043477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39981324492043474,
"avg_score": null,
"num_lines": null
} |
'''
Created on Mar 13, 2016
@author: Dead Robot Society
'''
import constants as c
# from sensors import DEBUG
from wallaby import motor
from wallaby import msleep
from wallaby import ao
from wallaby import seconds
from sensors import onBlackFront, onBlackBack
def driveTimed(left, right, time):
drive(left, right)
msleep(time)
drive(0, 0)
def drive(left, right):
if c.isPrime:
# PRIME motor settings
motor(c.LMOTOR, left)
motor(c.RMOTOR, right)
else:
# CLONE motor settings
motor(c.LMOTOR, left)
motor(c.RMOTOR, right)
def testMotors():
drive(-100, -100)
while not onBlackFront(): # wait to see line
pass
stop()
drive(100, 100)
while not onBlackBack(): # wait to see line
pass
stop()
driveTimed(-70, 0, 1000)
driveTimed(70, 0, 1200)
msleep(1000)
def timedLineFollowLeft(time):
sec = seconds() + time
while seconds() < sec :
if onBlackFront():
driveTimed(20, 90, 20)
else:
driveTimed(90, 20, 20)
msleep(10)
# Follows black line on right for specified amount of time
def timedLineFollowRight(time):
sec = seconds() + time
while seconds() < sec:
if not onBlackFront():
driveTimed(20, 90, 20)
else:
driveTimed(90, 20, 20)
msleep(10)
def timedLineFollowRightSmooth(time):
sec = seconds() + time
while seconds() < sec:
if not onBlackFront():
driveTimed(20, 40, 20)
else:
driveTimed(40, 20, 20)
msleep(10)
def lineFollowRightSmoothCount(amount):
count = 0
while count < amount :
if not onBlackFront():
driveTimed(10, 30, 10)
count = count + 1
else:
driveTimed(30, 10, 10)
count = 0
def timedLineFollowLeftSmooth(time):
sec = seconds() + time
while seconds() < sec :
if onBlackFront():
driveTimed(20, 40, 20)
else:
driveTimed(40, 20, 20)
msleep(10)
def timedLineFollowLeftBack(time): # follows on starboard side
sec = seconds() + time
while seconds() < sec :
if onBlackBack():
driveTimed(-90, -20, 20)
else:
driveTimed(-20, -90, 20)
msleep(10)
def stop():
ao()
| {
"repo_name": "gras/16-ValleyBot",
"path": "src/drive.py",
"copies": "1",
"size": "2528",
"license": "mit",
"hash": 595671205507470300,
"line_mean": 21.8490566038,
"line_max": 63,
"alpha_frac": 0.5415348101,
"autogenerated": false,
"ratio": 3.669085631349782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.962705553207064,
"avg_score": 0.01671298187582849,
"num_lines": 106
} |
'''
Created on Mar 13, 2016
@author: Dead Robot Society
'''
import constants as c
from wallaby import ao
from wallaby import msleep
from wallaby import analog
from wallaby import digital
from wallaby import seconds
from wallaby import a_button_clicked
from wallaby import b_button_clicked
def crossBlackFront():
while not onBlackFront(): # wait for black
pass
while onBlackFront(): # wait for white
pass
ao()
def crossBlackBack():
while not onBlackBack(): # wait for black
pass
while onBlackBack(): # wait for white
pass
ao()
def onBlackFront():
return analog(c.FRONT_TOPHAT) > c.frontLineFollowerGrey
def onBlackBack():
return analog(c.REAR_TOPHAT) > c.frontLineFollowerGrey
def waitForButton():
print "Press Button..."
while not digital(c.RIGHT_BUTTON):
pass
msleep(1)
print "Pressed"
msleep(1000)
def testSensors():
if onBlackFront():
print "Problem with front tophat."
print "Check for unplugged tophat or bad robot setup"
DEBUG()
if onBlackBack():
print "Problem with back tophat."
print "Check for unplugged tophat or bad robot setup"
DEBUG()
def DEBUG():
ao()
print 'Program stop for DEBUG\nSeconds: ', seconds() - c.startTime
exit(0)
def wait4light():
while not calibrate(c.STARTLIGHT):
pass
wait4(c.STARTLIGHT)
def calibrate(port):
print "Press A button with light on"
while not a_button_clicked():
if digital(c.RIGHT_BUTTON):
DEBUG()
lightOn = analog(port)
print "On value =", lightOn
if lightOn > 200:
print "Bad calibration"
return False
print "Press B button with light off"
while not b_button_clicked():
if digital(c.RIGHT_BUTTON):
DEBUG()
lightOff = analog(port)
print "Off value =", lightOff
if lightOff < 3000:
print "Bad calibration"
return False
if (lightOff - lightOn) < 2000:
print "Bad calibration"
return False
c.startLightThresh = (lightOff - lightOn) / 2
print "Good calibration! ", c.startLightThresh
return True
def wait4(port):
print "waiting for light!! "
while analog(port) > c.startLightThresh:
pass
| {
"repo_name": "gras/16-ValleyBot",
"path": "src/sensors.py",
"copies": "1",
"size": "2436",
"license": "mit",
"hash": 375815771757496960,
"line_mean": 22.8571428571,
"line_max": 70,
"alpha_frac": 0.6091954023,
"autogenerated": false,
"ratio": 3.7708978328173375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48800932351173376,
"avg_score": null,
"num_lines": null
} |
'''
Created on Mar 13, 2016
@author: Dead Robot Society
'''
import constants as c
from wallaby import set_servo_position
from wallaby import enable_servos
from wallaby import msleep
from wallaby import get_servo_position
from wallaby import ao
def testServos():
set_servo_position(c.frontArm, c.frontArmUp)
set_servo_position(c.frontClaw, c.frontClawClose)
set_servo_position(c.backArm, c.backArmUp)
set_servo_position(c.backClaw, c.backClawClose)
enable_servos()
msleep(1000)
moveFrontArm(c.frontArmDown, 25)
msleep(500)
moveFrontArm(c.frontArmUp, 25)
moveFrontClaw(c.frontClawOpen, 25)
msleep(500)
moveFrontClaw(c.frontClawClose, 25)
moveBackArm(c.backArmDown, 25)
msleep(500)
moveBackArm(c.backArmUp, 25)
moveBackClaw(c.backClawOpen, 25)
msleep(500)
moveBackClaw(c.backClawClose, 25)
msleep(1000)
def moveFrontArm(endPos, speed=15):
_moveServo(c.frontArm, endPos, speed)
def moveFrontClaw(endPos, speed=15):
_moveServo(c.frontClaw, endPos, speed)
def moveBackArm(endPos, speed=15):
_moveServo(c.backArm, endPos, speed)
def moveBackClaw(endPos, speed=15):
_moveServo(c.backClaw, endPos, speed)
# Moves specified servo to specified position at specified speed
def _moveServo(servo, endPos, speed) :
# speed of 1 is slow
# speed of 2000 is fast
# speed of 10 is the default
now = get_servo_position(servo)
if now > 2048 :
PROGRAMMER_ERROR("Servo setting too large ", servo)
if now < 0 :
PROGRAMMER_ERROR("Servo setting too small ", servo)
if now > endPos:
speed = -speed
for i in range (now, endPos, speed):
set_servo_position(servo, i)
msleep(10)
set_servo_position(servo, endPos)
msleep(10)
def PROGRAMMER_ERROR(msg, value) :
ao()
print msg, value
exit()
| {
"repo_name": "gras/16-ValleyBot",
"path": "src/servos.py",
"copies": "1",
"size": "1963",
"license": "mit",
"hash": -5492893014358025000,
"line_mean": 26.0428571429,
"line_max": 64,
"alpha_frac": 0.6602139582,
"autogenerated": false,
"ratio": 2.9430284857571216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41032424439571213,
"avg_score": null,
"num_lines": null
} |
# 1703. Minimum Adjacent Swaps for K Consecutive Ones
# O(len(nums))
class Solution:
def minMoves(self, nums: List[int], k: int) -> int:
if sum(nums) < k:
return 0
# Records the positions of 1s.
pos = []
for i in range(len(nums)):
if nums[i] == 1:
pos.append(i)
# Calculates prefix sums.
preSum = [0]
for p in pos:
preSum.append(preSum[-1] + p)
# Considers a sliding window on pos, the cost is equal to
# sum(pos[x2:y2]) - sum(pos[x1:y1]) - extra.
#
# Examples:
# when k = 5, the cost is (pos[3]-pos[1]) + (pos[4]-pos[0]) - extra =
# sum(pos[3:5]) - sum(pos[0:2]) - extra
#
# when k = 4, the cost is (pos[2]-pos[1]) + (pos[3]-pos[0]) - extra =
# sum(pos[2:4]) - sum(pos[0:2]) - extra
#
half = k // 2
if k % 2 == 1:
x1, y1 = 0, half
x2, y2 = half + 1, k
extra = (1 + half) * half
else:
x1, y1 = 0, half
x2, y2 = half, k
extra = half * half
ans = len(nums) * len(nums)
while y2 <= len(pos):
left = preSum[y1] - preSum[x1]
right = preSum[y2] - preSum[x2]
ans = min(ans, right - left - extra)
x1, y1 = x1 + 1, y1 + 1
x2, y2 = x2 + 1, y2 + 1
return ans
| {
"repo_name": "digiter/Arena",
"path": "1703-minimum-adjacent-swaps-for-k-consecutive-ones.py",
"copies": "1",
"size": "1425",
"license": "mit",
"hash": 2631643709693587000,
"line_mean": 29.3191489362,
"line_max": 77,
"alpha_frac": 0.4385964912,
"autogenerated": false,
"ratio": 3.0319148936170213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8970511384817021,
"avg_score": 0,
"num_lines": 47
} |
# caselessList
# A case insensitive list that only permits strings as keys.
# Implemented for ConfigObj
# Requires Python 2.2 or above
# Copyright Michael Foord
# Not for use in commercial projects without permission. (Although permission will probably be given).
# If you use in a non-commercial project then please credit me and include a link back.
# If you release the project non-commercially then let me know (and include this message with my code !)
# No warranty express or implied for the accuracy, fitness to purpose or otherwise for this code....
# Use at your own risk !!!
# E-mail fuzzyman AT atlantibots DOT org DOT uk (or michael AT foord DOT me DO
class caselessList(list):
"""A case insensitive lists that has some caseless methods. Only allows strings as list members.
Most methods that would normally return a list, return a caselessList. (Except list() and lowercopy())
Sequence Methods implemented are :
__contains__, remove, count, index, append, extend, insert,
__getitem__, __setitem__, __getslice__, __setslice__
__add__, __radd__, __iadd__, __mul__, __rmul__
Plus Extra methods:
findentry, copy , lowercopy, list
Inherited methods :
__imul__, __len__, __iter__, pop, reverse, sort
"""
def __init__(self, inlist=[]):
list.__init__(self)
for entry in inlist:
if not isinstance(entry, str): raise TypeError('Members of this object must be strings. You supplied \"%s\" which is \"%s\"' % (entry, type(entry)))
self.append(entry)
def findentry(self, item):
"""A caseless way of checking if an item is in the list or not.
It returns None or the entry."""
if not isinstance(item, str): raise TypeError('Members of this object must be strings. You supplied \"%s\"' % type(item))
for entry in self:
if item.lower() == entry.lower(): return entry
return None
def __contains__(self, item):
"""A caseless way of checking if a list has a member in it or not."""
for entry in self:
if item.lower() == entry.lower(): return True
return False
def remove(self, item):
"""Remove the first occurence of an item, the caseless way."""
for entry in self:
if item.lower() == entry.lower():
list.remove(self, entry)
return
raise ValueError(': list.remove(x): x not in list')
def copy(self):
"""Return a caselessList copy of self."""
return caselessList(self)
def list(self):
"""Return a normal list version of self."""
return list(self)
def lowercopy(self):
"""Return a lowercase (list) copy of self."""
return [entry.lower() for entry in self]
def append(self, item):
"""Adds an item to the list and checks it's a string."""
if not isinstance(item, str): raise TypeError('Members of this object must be strings. You supplied \"%s\"' % type(item))
list.append(self, item)
def extend(self, item):
"""Extend the list with another list. Each member of the list must be a string."""
if not isinstance(item, list): raise TypeError('You can only extend lists with lists. You supplied \"%s\"' % type(item))
for entry in item:
if not isinstance(entry, str): raise TypeError('Members of this object must be strings. You supplied \"%s\"' % type(entry))
list.append(self, entry)
def count(self, item):
"""Counts references to 'item' in a caseless manner.
If item is not a string it will always return 0."""
if not isinstance(item, str): return 0
count = 0
for entry in self:
if item.lower() == entry.lower():
count += 1
return count
def index(self, item, minindex=0, maxindex=None):
"""Provide an index of first occurence of item in the list. (or raise a ValueError if item not present)
If item is not a string, will raise a TypeError.
minindex and maxindex are also optional arguments
s.index(x[, i[, j]]) return smallest k such that s[k] == x and i <= k < j
"""
if maxindex == None: maxindex = len(self)
minindex = max(0, minindex)-1
maxindex = min(len(self), maxindex)
if not isinstance(item, str): raise TypeError('Members of this object must be strings. You supplied \"%s\"' % type(item))
index = minindex
while index < maxindex:
index += 1
if item.lower() == self[index].lower():
return index
raise ValueError(': list.index(x): x not in list')
def insert(self, i, x):
"""s.insert(i, x) same as s[i:i] = [x]
Raises TypeError if x isn't a string."""
if not isinstance(x, str): raise TypeError('Members of this object must be strings. You supplied \"%s\"' % type(x))
list.insert(self, i, x)
def __setitem__(self, index, value):
"""For setting values in the list.
index must be an integer or (extended) slice object. (__setslice__ used for simple slices)
If index is an integer then value must be a string.
If index is a slice object then value must be a list of strings - with the same length as the slice object requires.
"""
if isinstance(index, int):
if not isinstance(value, str): raise TypeError('Members of this object must be strings. You supplied \"%s\"' % type(value))
list.__setitem__(self, index, value)
elif isinstance(index, slice):
if not hasattr(value, '__len__'): raise TypeError('Value given to set slice is not a sequence object.')
for entry in value:
if not isinstance(entry, str): raise TypeError('Members of this object must be strings. You supplied \"%s\"' % type(entry))
list.__setitem__(self, index, value)
else:
raise TypeError('Indexes must be integers or slice objects.')
def __setslice__(self, i, j, sequence):
"""Called to implement assignment to self[i:j]."""
for entry in sequence:
if not isinstance(entry, str): raise TypeError('Members of this object must be strings. You supplied \"%s\"' % type(entry))
list.__setslice__(self, i, j, sequence)
def __getslice__(self, i, j):
"""Called to implement evaluation of self[i:j].
Although the manual says this method is deprecated - if I don't define it the list one is called.
(Which returns a list - this returns a caselessList)"""
return caselessList(list.__getslice__(self, i, j))
def __getitem__(self, index):
"""For fetching indexes.
If a slice is fetched then the list returned is a caselessList."""
if not isinstance(index, slice):
return list.__getitem__(self, index)
else:
return caselessList(list.__getitem__(self, index))
def __add__(self, item):
"""To add a list, and return a caselessList.
Every element of item must be a string."""
return caselessList(list.__add__(self, item))
def __radd__(self, item):
"""To add a list, and return a caselessList.
Every element of item must be a string."""
return caselessList(list.__add__(self, item))
def __iadd__(self, item):
"""To add a list in place."""
for entry in item: self.append(entry)
def __mul__(self, item):
"""To multiply itself, and return a caselessList.
Every element of item must be a string."""
return caselessList(list.__mul__(self, item))
def __rmul__(self, item):
"""To multiply itself, and return a caselessList.
Every element of item must be a string."""
return caselessList(list.__rmul__(self, item))
####################################################################################
# brief test stuff
if __name__ == '__main__':
print
print 'caselessList Tests :'
a = caselessList(['hello', 'HELLO', 'HellO'])
print 'A caselessList : ', a
print 'a.findentry(\'hELLO\') = ', a.findentry('hELLO')
print '(prints the first entry that matches this)', '\n'
print '\'HeLLo\' in a : ', 'HeLLo' in a, '\n' # tests __contains__
a.remove('HeLlO')
print 'a.remove(\'HeLlO\'), print a : ', a
print 'type(a.copy()) : ', type(a.copy())
print 'type(a.list()) : ', type(a.list())
print 'a.lowercopy() : ', a.lowercopy()
a.append('HeLlO')
print 'a.append(\'HeLlO\'), print a : ', a
a.extend([char for char in 'AaAaA'])
print 'a.extend([char for char in \'AaAaA\']), print a, type(a) : '
print a, ',', type(a)
print 'a.count(\'A\') : ', a.count('A')
print 'a.index(\'A\') : ', a.index('a')
a.insert(1, 'WisH')
print 'a.insert(1, \'WisH\') : ',a
print
print 'The __setitem__ method is only novel for extended slice operations.'
a[0:10:3] = ['Fish', 'fIsh', 'fiSh']
print "a[0:10:3] = ['Fish', 'fIsh', 'fiSh'] : ", a
print
print 'Most interesting thing about __getitem__ is that if you ask for a slice - it will be an instance of caselessList'
print 'type(a[0:4:1]) : ', type(a[0:4:1])
"""
15-05-04 Version 1.1.0
Added caselessList a caseless List implementation.
Lot more work than dict actually - more methods to implement for a sequence object.
Changed module name from caselessDict to caseless.
"""
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/284569_caselessList/recipe-284569.py",
"copies": "1",
"size": "9584",
"license": "mit",
"hash": -8931909770362768000,
"line_mean": 42.9633027523,
"line_max": 160,
"alpha_frac": 0.596515025,
"autogenerated": false,
"ratio": 3.9359342915811086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9979538241257581,
"avg_score": 0.0105822150647055,
"num_lines": 218
} |
# 1707. Maximum XOR With an Element From Array
# O(len(nums)*30 + len(queries)*30)
class Solution:
def maximizeXor(self, nums: List[int], queries: List[List[int]]) -> List[int]:
# The length of 10**9 in binary format.
LEN = 30
# Contains prefixes with length i, 0 <= i < LEN.
prefix = [set() for _ in range(LEN)]
# Adds prefixes of num (in binary format).
def add(num):
for i in range(LEN):
prefix[i].add(num >> i)
def findMaxXor(num):
want = ((1 << LEN) - 1) ^ num
for i in range(LEN - 1, -1, -1):
if (want >> i) not in prefix[i]:
want ^= 1 << i
return want ^ num
# Processes queries.
for i in range(len(queries)):
queries[i].append(i)
queries.sort(key=lambda q: q[1])
nums.sort()
j = 0
# The -2 is an invalid output which would indicate bugs if -2 occurs.
ans = [-2] * len(queries)
for (x, m, index) in queries:
while j < len(nums) and nums[j] <= m:
add(nums[j])
j += 1
if j == 0:
# All elements in nums are larger than m.
ans[index] = -1
else:
# Looks for max(nums[0:j] XOR x).
ans[index] = findMaxXor(x)
return ans
| {
"repo_name": "digiter/Arena",
"path": "1707-maximum-xor-with-an-element-from-array.py",
"copies": "1",
"size": "1402",
"license": "mit",
"hash": -320253503161872800,
"line_mean": 30.8636363636,
"line_max": 82,
"alpha_frac": 0.4721825963,
"autogenerated": false,
"ratio": 3.660574412532637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.963138789601774,
"avg_score": 0.0002738225629791895,
"num_lines": 44
} |
# 17.09.14
import collections
from datetime import datetime
def thr_output(output_file, processed_data):
print("Output _ for _ thr")
with open(output_file, "wt") as fp:
for item in processed_data:
fp.write(str(item) + ",")
fp.write("\n")
fp.close()
def thr_data_gathering(output_file, real_data, quote_list):
print("Data_Gathering")
with open(output_file, 'at') as fp:
fp.write("time" + "," + "now" + ",")
for q in quote_list:
fp.write(str(q) + ",") # 주문 가격
fp.write("\n")
fp.write(str(real_data['time']) + "," + str(real_data['now']) + ",")
for q in quote_list:
fp.write(str(real_data[q][0]) + ",") # 주문 잔량
fp.write("\n,,")
for q in quote_list:
fp.write(str(real_data[q][1]) + ",") # 추가 주문량
fp.write("\n")
fp.close()
def dict_output_batch_new(output_file, data, order, data_bat, order_bat, bat_size, quote_list):
print("NEW OUTPUT BATCH FUNCTION")
d = collections.OrderedDict(data)
data_bat.append(d)
o = collections.OrderedDict(order)
order_bat.append(o)
for i in order:
order[i] = 0 # 주문 가능 호가가 변경되는 경우, 주문 불가능 호가에 주문량이 저장되는 것을 방지
if len(data_bat) >= bat_size: # bat_size 개 정보가 들어 있으면
with open(output_file, "at") as fp:
for i in range(0, len(data_bat)):
fp.write("time" + "," + "now" + ",")
for j in range(0, len(quote_list)):
fp.write(str(quote_list[j]) + ",")
fp.write("\n")
fp.write(str(data_bat[i]['time']) + ',' + str(data_bat[i]["now"]) + ',')
for q in quote_list:
fp.write(str(data_bat[i][q]) + ',')
fp.write("\n,,")
for q in quote_list:
fp.write(str(order_bat[i][q]) + ',')
fp.write("\n")
fp.close()
print(" ** NEW DICT 출력 완료 ** ")
del data_bat[:]
del order_bat[:]
def dict_output_batch(output_file, data, order, data_bat, order_bat, bat_size):
d = collections.OrderedDict(data)
data_bat.append(d)
o = collections.OrderedDict(order)
order_bat.append(o)
for i in order:
order[i] = 0 # 주문 가능 호가가 변경되는 경우, 주문 불가능 호가에 주문량이 저장되는 것을 방지
if len(data_bat) >= bat_size: # bat_size 개 정보가 들어 있으면
with open(output_file, "at") as fp:
for i in range(0, len(data_bat)):
for key in data_bat[i].keys():
fp.write(str(key) + ",") # 호가
fp.write("\n")
for key in data_bat[i].keys():
fp.write(str(data_bat[i][key]) + ",") # 시간, 현재가, 주문 잔량
fp.write("\n,,")
for key in order_bat[i].keys():
fp.write(str(order_bat[i][key]) + ",") # 추가 주문량
fp.write("\n")
fp.close()
print(" ** DICT 출력 완료 ** ")
del data_bat[:]
del order_bat[:]
""" ======================================================================================== """
def output_batch(output_file, data, data_bat, bat_size):
data_seq = [58, 52, 46, 40, 34, 28, 22, 16, 10, 4, 1, 7, 13, 19, 25, 31, 37, 43, 49, 55]
# +0 : 호가 오름차순, +1 : 잔량, +2 : 추가주문량
data_bat.append(data)
try:
if len(data_bat) >= bat_size: # bat_size 개 정보가 들어 있으면
with open(output_file, "at") as fp:
for i in range(0, len(data_bat)):
fp.write((str(data_bat[i][0]) + ","))
for k in data_seq:
fp.write((str(data_bat[i][k]) + ","))
fp.write("\n,")
for k in data_seq:
fp.write((str(data_bat[i][k + 1]) + ",")) # 주문 잔량
fp.write("\n,")
for k in data_seq:
fp.write((str(data_bat[i][k + 2]) + ",")) # 추가 주문량
fp.write("\n")
fp.close()
del data_bat[:]
except:
# with open(output_file+"error.csv", "at") as fp:
# fp.write(str(datetime.now())+"\n")
# fp.close()
print("Permissoin Error - {}".format(datetime.now()))
with open(output_file + "err.csv", "at") as fp:
for i in range(0, len(data_bat)):
fp.write((str(data_bat[i][0]) + ","))
for k in data_seq:
fp.write((str(data_bat[i][k]) + ","))
fp.write("\n,")
for k in data_seq:
fp.write((str(data_bat[i][k + 1]) + ",")) # 주문 잔량
fp.write("\n,")
for k in data_seq:
fp.write((str(data_bat[i][k + 2]) + ",")) # 추가 주문량
fp.write("\n")
fp.close()
del data_bat[:]
print("* 베치 데이터 출력 완료")
def output_result(output_file, data):
"""
입력된 데이터를 바로 출력
"""
data_seq = [58, 52, 46, 40, 34, 28, 22, 16, 10, 4, 1, 7, 13, 19, 25, 31, 37, 43, 49,
55] # 호가 오름차순 +1 : 잔량, +2 : 추가주문량
try:
with open(output_file, "at") as fp:
fp.write(str(data[0]) + ",")
for i in data_seq:
fp.write(str(data[i]) + ",") # 주문 가격
fp.write("\n,")
for i in data_seq:
fp.write(str(data[i + 1]) + ",") # 주문 잔량
fp.write("\n,")
for i in data_seq:
fp.write(str(data[i + 2]) + ",") # 추가 주문량
fp.write("\n")
fp.close()
except PermissionError:
print("************ PermissionError ************")
with open(output_file + "_error.csv", "at") as fp:
fp.write(str(data[0]) + ",")
for i in data_seq:
fp.write(str(data[i]) + ",")
fp.write("\n,")
for i in data_seq:
fp.write(str(data[i + 1]) + ",")
fp.write("\n,")
for i in data_seq:
fp.write(str(data[i + 2]) + ",")
fp.write("\n")
fp.close()
print("* 실시간 데이터 출력 완료")
| {
"repo_name": "Aishelre/untitled",
"path": "Output_data.py",
"copies": "1",
"size": "6731",
"license": "mit",
"hash": 7092547808309403000,
"line_mean": 35.0411764706,
"line_max": 96,
"alpha_frac": 0.424011434,
"autogenerated": false,
"ratio": 2.697943444730077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8615076593563756,
"avg_score": 0.0013756570332641809,
"num_lines": 170
} |
17134.1.amd64fre.rs4_release.180410-1804
import os
from collections import defaultdict
from csv import DictReader
import math, time as t
from datetime import datetime
from datetime import datetime
from csv import DictReader
'''
# Binary Feats
```
117 -----> 'fe_guy_didnt_update',
118 -----> 'fst_public_ver_still_users',
137 -----> 'fe_EngineVersion_2_less_13701',
145 -----> 'fe_AvSigVersion_more_275',
156 -----> 'fe_hghdec_cnt1',
157 -----> 'fe_hghdec_cnt_3',
158 -----> 'fe_hghdec_cnt_5',
159 -----> 'fe_hghdec_cnt_8',
160 -----> 'fe_hghdec_cnt_9',
161 -----> 'fe_hghdec_cnt_10',
```
# # Numerical Feats Name
```
83 -----> 'fe_EngineVersion_factor',
84 -----> 'fe_AppVersion_factor',
114 -----> 'temp_x1',
115 -----> 'temp_x2',
116 -----> 'temp_diffs',
120 -----> 'DateASYear',
121 -----> 'DateASMonth',
122 -----> 'DateASWeek',
123 -----> 'DateASDay',
124 -----> 'DateASDayofweek',
125 -----> 'DateASDayofyear',
126 -----> 'DateOSYear',
127 -----> 'DateOSMonth',
128 -----> 'DateOSWeek',
129 -----> 'DateOSDay',
130 -----> 'DateOSDayofweek',
131 -----> 'DateOSDayofyear',
132 -----> 'fe_aspect_ratio',
133 -----> 'fe_ResolutionRatio',
134 -----> 'fe_dpi',
135 -----> 'fe_MegaPixels',
138 -----> 'fe_one_less_AVproductInstalled',
139 -----> 'diff_AV_Products',
140 -----> 'sun_AV_Products',
147 -----> 'fe_AvSigVersion_sum',
148 -----> 'fe_AppVersion_sum',
149 -----> 'fe_EngineVersion_sum',
150 -----> 'fe_magic_4',
151 -----> 'fe_primary_drive_c_ratio',
152 -----> 'fe_Census_SystemVolumeTotalCapacity_GB',
153 -----> 'fe_non_primary_drive_MB',
154 -----> 'fe_ram_per_processor',
155 -----> 'fe_physical_cores',
```
# GroupBy Feats
```
90 -----> 'fe_avsig_org_freq',
91 -----> 'fe_avsig_cty_freq',
92 -----> 'fe_avsig_gamer_freq',
93 -----> 'fe_cpucores_region_freq',
94 -----> 'fe_cpucores_oemname_freq',
95 -----> 'fe_geoname_oemname_freq',
96 -----> 'fe_cntiden_oemname_freq',
97 -----> 'fe_cty_oemname_freq',
98 -----> 'fe_orgs_oemname_freq',
99 -----> 'fe_avsig_org_touch_freq',
100 -----> 'fe_avsig_cty_touch_freq',
101 -----> 'fe_avsig_gamer_touch_freq',
102 -----> 'fe_cpucores_region_touch_freq',
103 -----> 'fe_cpucores_oemname_touch_freq',
104 -----> 'fe_geoname_oemname_touch_freq',
105 -----> 'fe_cntiden_oemname_touch_freq',
106 -----> 'fe_cty_oemname_touch_freq',
107 -----> 'fe_orgs_oemname_touch_freq',
108 -----> 'fe_cpucores_oemname_bridenti_freq',
109 -----> 'fe_geoname_oemname_bridenti_freq',
110 -----> 'fe_cntiden_oemname_bridenti_freq',
111 -----> 'fe_cty_oemname_bridenti_freq',
112 -----> 'fe_orgs_oemname_bridenti_freq',
119 -----> 'fe_orgs_cnt_cty_ring_freq',
```
# Count Feats
```
85 -----> cnt_cnt_org_os
86 -----> cnt_cnt_org_city_os_x
87 -----> cnt_cnt_org_city_os_y
88 -----> cnt_cnt_org_build_osbld_type
89 -----> cnt_cnt_org_city_frwar
162 -----> fe_count_Census_FirmwareManufacturerIdentifier
163 -----> fe_count_Census_OSBuildNumber
164 -----> fe_count_Census_OSBuildRevision
165 -----> fe_count_Census_OEMModelIdentifier
166 -----> fe_count_Wdft_IsGamer
167 -----> fe_count_CountryIdentifier
168 -----> fe_count_OrganizationIdentifier
169 -----> fe_count_Census_FirmwareVersionIdentifier
170 -----> fe_count_Wdft_RegionIdentifier
```
'''
binary_feats = ['fe_guy_didnt_update','fst_public_ver_still_users','fe_EngineVersion_2_less_13701','fe_AvSigVersion_more_275',
'fe_hghdec_cnt1','fe_hghdec_cnt_3','fe_hghdec_cnt_5','fe_hghdec_cnt_8','fe_hghdec_cnt_9','fe_hghdec_cnt_10',]
num_feats = ['fe_EngineVersion_factor','fe_AppVersion_factor','temp_x1','temp_x2','temp_diffs','DateASYear','DateASMonth','DateASWeek',
'DateASDay','DateASDayofweek','DateASDayofyear','DateOSYear','DateOSMonth','DateOSWeek','DateOSDay','DateOSDayofweek','DateOSDayofyear',
'fe_aspect_ratio','fe_ResolutionRatio','fe_dpi','fe_MegaPixels','fe_one_less_AVproductInstalled','diff_AV_Products','sun_AV_Products','fe_AvSigVersion_sum',
'fe_AppVersion_sum','fe_EngineVersion_sum','fe_magic_4','fe_primary_drive_c_ratio','fe_Census_SystemVolumeTotalCapacity_GB',
'fe_non_primary_drive_MB','fe_ram_per_processor','fe_physical_cores',]
grp_feats=['fe_avsig_org_freq','fe_avsig_cty_freq','fe_avsig_gamer_freq','fe_cpucores_region_freq','fe_cpucores_oemname_freq',
'fe_geoname_oemname_freq','fe_cntiden_oemname_freq','fe_cty_oemname_freq','fe_orgs_oemname_freq','fe_avsig_org_touch_freq',
'fe_avsig_cty_touch_freq','fe_avsig_gamer_touch_freq','fe_cpucores_region_touch_freq','fe_cpucores_oemname_touch_freq',
'fe_geoname_oemname_touch_freq','fe_cntiden_oemname_touch_freq','fe_cty_oemname_touch_freq','fe_orgs_oemname_touch_freq',
'fe_cpucores_oemname_bridenti_freq','fe_geoname_oemname_bridenti_freq','fe_cntiden_oemname_bridenti_freq',
'fe_cty_oemname_bridenti_freq','fe_orgs_oemname_bridenti_freq','fe_orgs_cnt_cty_ring_freq',]
cnt_feats = [
'cnt_cnt_org_os','cnt_cnt_org_city_os_x',
'cnt_cnt_org_city_os_y','cnt_cnt_org_build_osbld_type','cnt_cnt_org_city_frwar','fe_count_Census_FirmwareManufacturerIdentifier',
'fe_count_Census_OSBuildNumber','fe_count_Census_OSBuildRevision','fe_count_Census_OEMModelIdentifier','fe_count_Wdft_IsGamer',
'fe_count_CountryIdentifier','fe_count_OrganizationIdentifier','fe_count_Census_FirmwareVersionIdentifier','fe_count_Wdft_RegionIdentifier',]
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16'
}
dont_use = ['MachineIdentifier', 'Census_FirmwareVersionIdentifier', 'Census_OEMModelIdentifier', 'CityIdentifier']
num_cols = [
'Census_ProcessorCoreCount',
'Census_PrimaryDiskTotalCapacity',
'Census_SystemVolumeTotalCapacity',
'Census_TotalPhysicalRAM',
'Census_InternalPrimaryDiagonalDisplaySizeInInches',
'Census_InternalPrimaryDisplayResolutionHorizontal',
'Census_InternalPrimaryDisplayResolutionVertical',
'Census_InternalBatteryNumberOfCharges']
too_many_vals = ["Census_PrimaryDiskTotalCapacity","Census_SystemVolumeTotalCapacity",
"Census_TotalPhysicalRAM",
"Census_InternalPrimaryDiagonalDisplaySizeInInches",
"Census_InternalPrimaryDisplayResolutionHorizontal",
"Census_InternalPrimaryDisplayResolutionVertical",
"Census_InternalBatteryNumberOfCharges"
]
categories = [k for k, v in dtypes.items() if k not in dont_use]
categories_index = dict(zip(categories, range(len(categories))))
field_features = defaultdict()
def csv_to_vw(loc_csv, file_name, train=True):
start = datetime.now()
print('Converting .csv to .vw')
cnt, imp = 0, 1
print("\nTurning %s into %s Is_train_set? %s"%(loc_csv, file_name, train))
with open(file_name, 'a') as the_file:
for t, row in enumerate(DictReader(open(train_path))):
if t % 1e5 == 0:
print("%s\t%s"%(t, str(datetime.now() - start)))
numerical_feats = ""
cnt_feats = ""
boolean_feats = ""
categorical_feats = ""
for field in categories:
if field == 'HasDetections':
continue
feature = row[field].strip()
if feature == '':
feature = 'unk'
if field not in num_cols:
feature = feature.replace(':','').replace( "|", "" )
categorical_feats += " %s" % (feature)
else:
if feature == "unk" or float(feature) == -1:
categorical_feats += " %s" % (str(0))
else:
if field in too_many_vals:
numerical_feats += " %s:%s" % (field, (int(round(math.log(1 + float(feature))))))
else:
numerical_feats += " %s:%s" % (field, (int(float(feature))))
if train:
if row['HasDetections'] == "1":
label = 1
imp = 1
elif row['HasDetections'] == "0":
label = -1
imp = 1 #str imp
the_file.write( "%s %s |num %s |cat %s\n" % (label, imp, numerical_feats, categorical_feats))
else:
#test set
label = "1"
the_file.write( "%s |num %s |cat %s\n" % (label, numerical_feats, categorical_feats))
#csv_to_vw(train_path, train=True)
train_path = './train/train.csv'
print(train_path)
csv_to_vw(train_path, file_name='train_2.vw', train=True)
train_path = './test/test.csv'
print(train_path)
csv_to_vw(train_path, file_name='test_2.vw', train=False)
'''
from kaggle Discussion's
vw --compressed x0_f0_v6.vw.gz -c -k -l 0.1 -b 22 -q cc -q nn -q cn --loss_function logistic --passes 8 --holdout_off -f model_f0.vw
'''
###################
def to_vw(loc_csv, file_name, categ_column_ids, binary_feats_ids, num_column_ids, cnt_columns_ids, grpby_column_ids, column_names, train=True):
"""
Converts a string to VW format.
:param line: a string with comma-separated feature values, str
:param categ_column_ids: ids of categorical features, list
:param num_column_ids: ids of numeric features, list
:param column_names: column (or feature) names to use (both categorical and numeric), list
:param train: whether the line belongs to a training set
:return: processed line, str
"""
with open(file_name, 'a') as the_file:
for t, line in enumerate(DictReader(open(train_path))):
if t % 1e5 == 0:
print("%s\t%s"%(t, str(datetime.now() - start)))
values = line.strip().split(',')
# VW treats '|' and ':' as special symbols, so jnust in case we'll replace them
for i in range(len(values)):
values[i] = values[i].replace('|', '').replace(':', '')
label = '-1'
if train:
label, values = values[-1], values[:-1] #last , rest all row vals
# in case of binary classification, VW eats labels 1 and -1, so 1 -> 1, 0 -> -1
label = str(2 * int(label) - 1)
# for categorical features, we fill in missing values with 'unk'
for i in categ_column_ids:
if not values[i]:
values[i] = 'unk'
# for numeric features, we fill in missing values with '-1'
for i in num_column_ids:
if values[i] == '':
values[i] = '-1'
for i in cnt_columns_ids:
if values[i] == '':
values[i] = '-1'
for i in grpby_column_ids:
if values[i] == '':
values[i] = '-1'
categ_vw = ' '.join(['{}={}'.format(column_names[i], values[i]) for i in categ_column_ids])
# we apply log1p transformation to numeric features
numeric_vw = ' '.join(['{}:{}'.format(column_names[i], round(math.log(1 + float(values[i]) + 1e-7))) for i in num_column_ids])
binary_vw = ' '.join(['{}:{}'.format(column_names[i], values[i]) for i in binary_feats_ids])
cnt_vw = ' '.join(['{}:{}'.format(column_names[i], values[i]) for i in cnt_columns_ids])
grp_vw = ' '.join(['{}:{}'.format(column_names[i], values[i]) for i in grpby_column_ids])
new_line = label + ' |num ' + numeric_vw + ' |cat ' + categ_vw + ' |bi ' + binary_vw + ' |cnt ' + cnt_vw + ' |grp '+ grp_vw
return new_line
# def csv_to_vw(loc_csv, loc_output, train=True):
# """
# Munges a CSV file (loc_csv) to a VW file (loc_output). Set "train"
# to False when munging a test set.
# TODO: Too slow for a daily cron job. Try optimize, Pandas or Go.
# """
# start = datetime.now()
# print("\nTurning %s into %s. Is_train_set? %s"%(loc_csv,loc_output,train))
# cnt = 0
# with open(loc_output,"wb") as outfile:
# for e, row in enumerate( DictReader(open(loc_csv)) ):
# #Creating the features
# numerical_features = ""
# categorical_features = ""
# for k,v in row.items():
# if k not in ["HasDetections","MachineIdentifier"]:
# if "I" in k: # numerical feature, example: I5
# if len(str(v)) > 0: #check for empty values
# numerical_features += " %s:%s" % (k,(float(v)))
# if "C" in k: # categorical feature, example: C2
# if len(str(v)) > 0:
# categorical_features += " %s" % v
# #Creating the labels
# if train: #we care about labels
# if row['HasDetections'] == "1":
# label = 1
# else:
# label = -1 #we set negative label to -1
# outfile.write( "%s '%s |i%s |c%s\n" % (label,row['MachineIdentifier'],numerical_features,categorical_features) )
# else: #we dont care about labels
# outfile.write( "1 '%s |i%s |c%s\n" % (row['MachineIdentifier'],numerical_features,categorical_features) )
# #Reporting progress
# if e % 1e6 == 0:
# print("%s\t%s"%(e, str(datetime.now() - start)))
# cnt += 1
# if cnt %2 ==0:
# exit()
# print("\n %s Task execution time:\n\t%s"%(e, str(datetime.now() - start)))
# csv_to_vw(train_path, "click.train.vw",train=True)
# csv_to_vw('./test/test.csv', "click.test.vw",train=False)
| {
"repo_name": "AdityaSoni19031997/Machine-Learning",
"path": "kaggle/microsoft_malware_competition/csv_to_vw.py",
"copies": "1",
"size": "19477",
"license": "mit",
"hash": -7316423496030462000,
"line_mean": 46.6210268949,
"line_max": 156,
"alpha_frac": 0.5136314628,
"autogenerated": false,
"ratio": 3.5548457747764193,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45684772375764193,
"avg_score": null,
"num_lines": null
} |
# 1728. Cat and Mouse II
class Solution:
def canMouseWin(self, grid: List[str], catJump: int, mouseJump: int) -> bool:
rowCnt = len(grid)
colCnt = len(grid[0])
index = lambda x, y: x * colCnt + y
def buildGraph(jumpCnt):
g = [list() for _ in range(rowCnt * colCnt)]
for x in range(rowCnt):
for y in range(colCnt):
if grid[x][y] != "#":
node = g[index(x, y)]
node.append(index(x, y))
for d in range(1, jumpCnt + 1):
if not (0 <= x - d and grid[x - d][y] != "#"):
break
node.append(index(x - d, y))
for d in range(1, jumpCnt + 1):
if not (x + d < rowCnt and grid[x + d][y] != "#"):
break
node.append(index(x + d, y))
for d in range(1, jumpCnt + 1):
if not (0 <= y - d and grid[x][y - d] != "#"):
break
node.append(index(x, y - d))
for d in range(1, jumpCnt + 1):
if not (y + d < colCnt and grid[x][y + d] != "#"):
break
node.append(index(x, y + d))
return g
mouseGraph = buildGraph(mouseJump)
catGraph = buildGraph(catJump)
food, mouse, cat = None, None, None
for x in range(rowCnt):
for y in range(colCnt):
if grid[x][y] == "F":
food = index(x, y)
elif grid[x][y] == "M":
mouse = index(x, y)
elif grid[x][y] == "C":
cat = index(x, y)
MOUSE_WIN, CAT_WIN = 1, 2
@cache
def play(step, mouse, cat):
if step == 1000:
return CAT_WIN
if step == 2 * rowCnt * colCnt:
return CAT_WIN
if mouse == cat:
return CAT_WIN
if cat == food:
return CAT_WIN
if mouse == food:
return MOUSE_WIN
if step % 2 == 0:
for nextMouse in mouseGraph[mouse]:
if play(step + 1, nextMouse, cat) == MOUSE_WIN:
return MOUSE_WIN
return CAT_WIN
else:
for nextCat in catGraph[cat]:
if play(step + 1, mouse, nextCat) == CAT_WIN:
return CAT_WIN
return MOUSE_WIN
return play(0, mouse, cat) == MOUSE_WIN
| {
"repo_name": "digiter/Arena",
"path": "1728-cat-and-mouse-ii.py",
"copies": "1",
"size": "2772",
"license": "mit",
"hash": 5641895441421040000,
"line_mean": 36.4594594595,
"line_max": 81,
"alpha_frac": 0.3795093795,
"autogenerated": false,
"ratio": 4.1066666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4986176046166666,
"avg_score": null,
"num_lines": null
} |
''' 17b-observation_regions.py
=========================
AIM: Determines the time spent in one region of the sky.
INPUT: files: - <orbit_id>_<SL_angle>misc/ephemerids_obs<transit_duration>h_<max_interruptions>inter_V<mag_max><_SAA?>.npz (from 17a...py
variables: see section PARAMETERS (below)
OUTPUT: 'skycoverage_region_%dmin_V%3.1f%s.txt' % (min_t_obs_per_orbit,mag_max,note)
CMD: python 17b-observation_regions.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/maps/ --> figures
* <orbit_id>_misc/ --> storages of data
REMARKS: <none>
'''
###########################################################################
### INCLUDES
import numpy as np
import os
import matplotlib.cm as cm
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.figures as figures
from resources.targets import *
import resources.geometry as geometry
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# Orbit id
alt = 700
orbit_id = '6am_%d_5_conf4e' % alt
apogee=alt
perigee=alt
# File name for the list of orbit file
orbits_file = 'orbits.dat'
# Minimum observable time for plots [h]
transit_duration = None
# Maximum interruption time tolerated [min]
max_interruptions = None
# Maximum visible magnitude
mag_max = 12.
# Take SAA into account?
SAA = True
# Print much information ?
verbose = False
# If set to True, then it will be observations of at least (period - max_interruptions)
# If set to False, then it is minimum (period - max_interruptions) minutes per orbit,
# not necesseraly consecutive.
consecutive = False
# Factor in the SL post treatment correction ?
SL_post_treat = True
# Stop before last reshaping of data and saving (if consecutive == False)
early_stop = True
# Minimal # of days of obs (if consecutive == False), must be a list
nb_obs_days = [13]#range(10,17,1)#[13]#range(10,110,10)#
# Minimal minutes to be observed per orbit (if consecutive == False)
min_t_obs_per_orbit = 79
# This is a way to vary the results by multiplying the whole pst by a number.
# This is very easy as if the pst is multiplied by a constant, it can be taken out of the
# integral and only multplying the flux is equivalent to re-running all the simulations
pst_factor=1.
#Examples:
#observation_region = geometry.Polygon(points=[(25, 2), (15, 3), (15, 7), (45, 7), (45, 2)])
#observation_region = geometry.Interval(axis='delta',min_val=-30,max_val=30])
#observation_region = geometry.Interval(axis='alpha',min_val=3.14,max_val=6.28],unit='rad')
#The points are in degree by default.
observation_region=geometry.Interval(axis="delta",max_val=0) # Southern sky
# File name for the input file (in a compressed binary Python format)
if SAA: note = '_SAA'
else: note = ''
if not pst_factor == 1.: note += '_%1.1fpst' % pst_factor
if SL_post_treat: note+= '_%4.3fSLreduction' % param.SL_post_treat_reduction
if not consecutive: note += '_cumul_'
skycoverage_fname = 'skycoverage_region_%dmin_V%3.1f%s.txt' % (min_t_obs_per_orbit,mag_max,note)
### INITIALISATION
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = -np.pi
ra_f = np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
data_grid = np.zeros(np.shape(ra_grid))*np.nan
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
output=open(os.path.join(folder_misc,skycoverage_fname),"a")
print >> output, observation_region
output.close()
for nb_obs_day in nb_obs_days:
# File name for the input file (in a compressed binary Python format)
if consecutive:
input_fname = 'ephemerids_obs%dh_%dinter_V%3.1f%s.npz' % (transit_duration,max_interruptions,mag_max,note)
else:
input_fname = 'ephemerids_%ddays_%dmin_V%3.1f%s.npz' % (nb_obs_day,min_t_obs_per_orbit,mag_max,note)
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee, perigee)
###########################################################################
### INITIALISATION
data_grid*=np.nan
output=open(os.path.join(folder_misc,skycoverage_fname),"a")
print
print 'ORBIT ID:\t\t%s\nPST factor:\t\t%d\nMin Days of Coverage:\t%d\nmin_t_obs_per_orbit\t%d (%1.4g%%)\nMAGNITIUDE:\t\t%02.1f' % (orbit_id,pst_factor,nb_obs_day,min_t_obs_per_orbit, min_t_obs_per_orbit/period*100., mag_max)
print "Loadind from %s" % input_fname
# loading data
sys.stdout.write("Loading worthy targets...\t")
sys.stdout.flush()
worthy_targets = np.load(folder_misc+input_fname)
obs_tot = worthy_targets['obs_tot']
worthy_targets = worthy_targets['worthy_targets']
count_T = 0
count_all=0
coords=[]
obs_sky_region = 0
obs_sky = 0
# cc=[]
for tgt, obs in zip(worthy_targets, obs_tot):
if obs == 0.: continue
count_all+=1
rat, dect = tgt.Coordinates()
obs_sky +=0.5/param.resx/param.resy*np.pi*np.cos(dect)
if observation_region.is_inside(tgt.Coordinates()):
count_T+=1
obs_sky_region +=0.5/param.resx/param.resy*np.pi*np.cos(dect)
id_ra, id_dec = find_nearest(ras+np.pi, tgt.Coordinates()[0]), find_nearest(decs, tgt.Coordinates()[1])
data_grid[id_dec,id_ra]=obs
# coords.append(np.asarray(tgt.Coordinates()))
print observation_region
print '%d cells on %d tot' % (count_T,count_all)
print 'Percentage of observation in region: %2.1f' % ((obs_sky_region/obs_sky)*100.), '%'
print >> output, nb_obs_day,'\t',(obs_sky_region/obs_sky)*100.,'\t\%'
output.close()
import pylab as plt
plt.figure()
plt.contourf(np.degrees(ra_grid), np.degrees(dec_grid),data_grid)
plt.grid()
plt.xlim([-180,180])
plt.ylim([-90,90])
plt.title(input_fname)
# coords=np.asarray(coords)
# plt.scatter(coords[:,0],coords[:,1])
plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "17b_observation_regions.py",
"copies": "1",
"size": "6325",
"license": "bsd-3-clause",
"hash": 1637545244907618300,
"line_mean": 30.157635468,
"line_max": 225,
"alpha_frac": 0.6652964427,
"autogenerated": false,
"ratio": 2.8236607142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39889571569857146,
"avg_score": null,
"num_lines": null
} |
#17 choices
#STICKERS = ['SPY', 'QQQ', 'IYR','XLF', 'XLV', 'XLI', 'XLY', 'XLP', 'XLB', 'XLK', 'XLU', 'XLE', 'USO', 'GLD', 'TLT', 'ITA']
FUND_STICKERS = {'lucas' :['SPY', 'QQQ', 'IYR','XLF', 'XLV', 'XLI', 'XLY', 'XLP', 'XLB', 'XLK', 'XLU', 'XLE', 'USO', 'GLD', 'TLT', 'ITA'],
'winwin' :['SPY', 'QQQ', 'IYR', 'EWZ', 'FXI', 'EWJ', 'RSX', 'EWQ', 'EEM', 'VGK', 'TLT', 'GLD', 'USO', 'EZA', 'EWG','EWW', 'IBB', 'XLF', 'EIS']}
#TOP = len(STICKERS)/10 +1
N_DAYS_THREDHOLD = 0.9
OUT_FILE_NAME = 'trade.txt'
OUT_FOLDER = './../results/'
OUT_FOLDER2 = './../results_indicator/'
OUT_LOG_NAME = 'log.txt'
START_DATE = '2005-01-01'
END_DATE = '2017-08-01'
RUN_START_DATE = '2017-08-01'
MIN_HOLD_DAYS = 5
MAX_HOLD_DAYS= 35
# For feature extraction thread...
# Define daily features, drop if necessary
FEATURES = {'_20_SMA' : '_daily_20_SMA',
'_50_SMA' : '_daily_50_SMA',
'_100_SMA' : '_daily_100_SMA',
'_HIST_MOM' : '_daily_HIST_MOM', #20,50,100SMA average
'_ROC' : '_daily_ROC', #Rate of change in the last 10 days
'_K' : '_daily_K',
'_D' : '_daily_D',
'_KD' : '_daily_KD', #K,D average
'_MFI' : '_daily_MFI',
'_KD_SLOPE' : '_daily_KD_SLOPE',
'_MFI_SLOPE': '_daily_MFI_SLOPE'
}
RANK_FEATURES = {'_KD_RANK' : '_daily_KD_RANK', # Slope rank
'_MFI_RANK' : '_daily_MFI_RANK'} # MFI slope rank
FUND_FEATURES = {'FUND_PERF' : 'daily_FUND_PERF', #Fund overall features
'FUND_MONEY_FLOW' : 'daily_FUND_MONEY_FLOW',
'FUND_PRICE_FLOW' : 'daily_FUND_PRICE_FLOW',
'FUND_HIST' : 'daily_FUND_HIST'}
DROP_OFF_COLUMN_NAMES = ['_daily_20_SMA','_daily_50_SMA','_daily_100_SMA','_daily_K','_daily_D']
#'_open', '_high', '_low', '_close','_volume',
# Define weekly features, use or not is another story
WEEKLY_FEATURES = {
'_ROC' : '_weekly_ROC',
'_K' : '_weekly_K',
'_D' : '_weekly_D',
'_KD' : '_weekly_KD',
'_MFI' : '_weekly_MFI',
'_KD_SLOPE' : '_weekly_KD_SLOPE',
'_MFI_SLOPE': '_weekly_MFI_SLOPE'
}
WEEKLY_RANK_FEATURES = {'_KD_RANK' : '_weekly_KD_RANK', # KD Slope rank
'_MFI_RANK' : '_weekly_MFI_RANK'} # MFI Slope rank
#Need to drop some features.
WEEKLY_FUND_FEATURES = {'FUND_PERF' : 'weekly_FUND_PERF',
'FUND_MONEY_FLOW': 'weekly_FUND_MONEY_FLOW',
'FUND_PRICE_FLOW': 'weekly_FUND_PRICE_FLOW',
'FUND_HIST' : 'weekly_FUND_HIST'}
WEEKLY_DROP_OFF_COLUMN_NAMES = ['_open', '_high', '_low', '_close','_volume','_weekly_K','_weekly_D', \
'_weekly_ROC', '_weekly_KD_SLOPE','_weekly_MFI_SLOPE']
# Define daily features, use or not is another story
MONTHLY_FEATURES = {
'_ROC' : '_monthly_ROC',
'_K' : '_monthly_K',
'_D' : '_monthly_D',
'_KD' : '_monthly_KD',
'_MFI' : '_monthly_MFI',
'_KD_SLOPE' : '_monthly_KD_SLOPE',
'_MFI_SLOPE': '_monthly_MFI_SLOPE'
}
MONTHLY_RANK_FEATURES = {'_KD_RANK' : '_monthly_KD_RANK',
'_MFI_RANK' : '_monthly_MFI_RANK'}
'''
MONTHLY_FUND_FEATURES={
'FUND_PERF' : 'monthly_FUND_PERF',
'FUND_MONEY_FLOW': 'monthly_FUND_MONEY_FLOW',
'FUND_PRICE_FLOW': 'monthly_FUND_PRICE_FLOW',
'FUND_HIST' : 'monthly_FUND_HIST'}
'''
MONTHLY_DROP_OFF_COLUMN_NAMES = ['_monthly_K','_monthly_D', '_monthly_ROC']
#Features status
#Daily keep kd, hist_mom, kd_slope, mfi, mfi_slope and roc
STAT_FEATURES = {'_KD' : '_daily_KD',
'_MFI' : '_daily_MFI',
'_HIST_MOM' : '_daily_HIST_MOM', #contains min and max, two values
'_KD_SLOPE' : '_daily_KD_SLOPE',
'_MFI_SLOPE': '_daily_MFI_SLOPE',
'_ROC' : '_daily_ROC',
}
#Weekly stat features: kd, mfi, roc
WEEKLY_STAT_FEATURES = {'_KD' : '_weekly_KD',
'_MFI' : '_weekly_MFI',
'_ROC' : '_weekly_ROC',
}
#Monthly stat features:
MONTHLY_STAT_FEATURES = {
'_ROC' : '_monthly_ROC',
}
def getNameList(dict):
temp = []
for key in sorted(dict):
temp.append(dict[key])
return temp
DF_COLUMN_NAMES = getNameList(FEATURES)
QUANT_DF_COLUMN_NAMES = ['kd_mom', 'kd_tr', 'mfi_mom', 'mfi_tr', 'price_mom', 'kd_rank', 'mfi_rank', 'cash_kd_rank', 'cash_mfi_rank', 'kd_compare_cash', 'mfi_compare_cash', 'fund_perf', 'fund_money_flow', 'fund_price_flow', 'fund_hist']
MARKET_GENERAL_COLUMN_NAMES = ['cash_kd_rank', 'cash_mfi_rank']
STATICS= {
'fund_perf': ['fund_strong', 'fund_good', 'fund_bad', 'fund_weak'], # 4 levels'
'fund_money_flow':['fund_money_in', 'fund_money_out'], #2 levels, +1 is overbrought, +0.5 if is bull, -0.5
'fund_price_flow':['fund_kd_down', 'fund_kd_up'], #2 levels,
'fund_hist': ['fund_hist_strong', 'fund_hist_good', 'fund_hist_bad', 'fund_hist_weak'],
'kd_momentum':['kd_oversold', 'kd_bear', 'kd_bull','kd_overbrought', 'kd_unquantify'], #5 levels, from small to large
'kd_slope': ['price_downfast', 'price_down', 'price_up', 'price_upfast'], #4 levels
'mfi': ['mfi_weak', 'mfi_bad', 'mfi_good', 'mfi_strong'], # 4 levels
'mfi_slope': ['money_outfast', 'money_out', 'money_in', 'money_infast'], #4 levels
'history_momenton' : ['hist_weak', 'hist_bad', 'hist_good', 'hist_strong'], # 4 levels
'kd_rank': ['kdrank_weak', 'kdrank_bad', 'kdrank_good', 'kdrank_strong'], # 4 levels
'mfi_rank': ['mfirank_weak', 'mfirank_bad', 'mfirank_good', 'mfirank_strong'], # 4 levels
'cash_kd_rank': ['cash_kdrank_weak', 'cash_kdrank_ok', 'cash_kdrank_strong'], #3 levels
'cash_mfi_rank': ['cash_mfirank_weak', 'cash_mfirank_ok','cash_mfirank_strong'], #3 levels
'kd_compare_cash': ['kd>cash', 'kd<cash'], #2 levels
'mfi_compare_cash': ['mfi>cash', 'mfi<cash'] #2 levels
}
MASSIVE_DROP = 0
MODERATE_DROP = 1
MODERATE_UP = 2
MASSIVE_UP = 3
WEAK = 0
MODERATE = 1
STRONG = 2
OVERALL_BEAR = 0
OVERALL_MODERATE = 1
OVERALL_BULL = 2
BETTER_THAN_CASH = 0
WORSE_THAN_CASH = 1
MUCH_HIGHER = 0
AROUND = 1
MUCH_WORSE = 2
QUANT_BUY_PICKLE = "./../pickle/quant_buy_df.pick"
QUANT_SELL_PICKLE = "./../pickle/quant_sell_df.pick"
BUY_STATICS = './../pickle/buy_statics.json'
SELL_STATICS = './../pickle/sell_statics.json'
HOLD_QUANT = './../pickle/hold_quant.json'
TRADE_DF_PICKLE = '/pickle/trade_df.pick'
QUANT_DF_PICKLE = '/pickle/quant_df.pick'
TRADE_QUANT = '/pickle/trade_quant.json'
SHORT_TRADE_QUANT = '/pickle/short_trade_quant.json'
#Initiate the trade strategy dict
TRADE_STRATEGY = {'buy':[],
'hold':[],
'sell':[]}
QUANT_CODE = {'VERY_STRONG' : '0',
'STRONG' :'1',
'BEAR' :'2',
'VERY_BEAR' :'3'}
QUANT_MFISLOPE_CODE = { 'VERY_STRONG' :0,
'STRONG' :1,
'BEAR' :2,
'VERY_BEAR' :3}
ALPHA_KEY= {'lucas': '15XTFIWMEJA8UMWW',
'winwin': '4LZS44C4NSWB80VG'}
| {
"repo_name": "martinggww/lucasenlights",
"path": "ETF/my_config.py",
"copies": "1",
"size": "7647",
"license": "cc0-1.0",
"hash": -387642500161463900,
"line_mean": 38.828125,
"line_max": 236,
"alpha_frac": 0.5049038839,
"autogenerated": false,
"ratio": 2.7457809694793536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8665588534954862,
"avg_score": 0.017019263684898252,
"num_lines": 192
} |
# 17 - Collect more digits - python
answer();
# ask for a single digit
result = ask( "Hello. Please enter any single digit", { 'choices' : "[1 DIGIT]" })
if result.name == 'choice' :
say( "Great, you said " + result.value )
# ask for a 5 digit long ZIP code
result = ask( "Hello. Please enter your 5 digit ZIP code", { 'choices' : "[5 DIGITS]" })
if result.name == 'choice' :
say( "Great, you said " + result.value )
# Digits work with speech or touch-tone input...
result = ask( "Hello. Please say or enter your 5 digit ZIP code", { 'choices' : "[5 DIGITS]" })
if result.name == 'choice' :
say( "Great, you said " + result.value )
# ask for 1 to 6 digit long an account ID
result = ask( "Please enter your account ID followed by the pound key.", { 'choices' : "[1-6 DIGITS]" })
if result.name == 'choice' :
say( "Great, you said " + result.value )
# ask for a US phone number (7 digits without area code, 10 digits with)
result = ask( "Please enter your 7 to 10 digit U.S. phone number", { 'choices' : "[7-10 DIGITS]" })
if result.name == 'choice' :
say( "Great, you said " + result.value )
# digit collection also supports all other prompt properties and event handlers
while result.name != "hangup" :
# collect 3 digits. Reprompt up to 3 times. Use a 7 second timeout...
result = ask( "Now please enter a number between 1 and 999",
{ 'choices' : "[1-3 DIGITS]", 'repeat' : 3, 'timeout' : 7,
'onTimeout' : lambda : say( "I'm sorry, I didn't hear anything." ),
'onBadChoice' : lambda : say( "I'm sorry, I did not understand your response." )})
log( "result name " + result.name );
log( "number is " + result.value );
if result.name == 'choice' :
say( "Great, you said " + result.value )
hangup();
| {
"repo_name": "tropo/tropo-samples",
"path": "python/tutorial/17-collectmoredigits.py",
"copies": "3",
"size": "1846",
"license": "mit",
"hash": 28721770616323384,
"line_mean": 29.7666666667,
"line_max": 105,
"alpha_frac": 0.6067172264,
"autogenerated": false,
"ratio": 3.2964285714285713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02164985773425414,
"num_lines": 60
} |
# 17. Consider a function which, for a given whole number n, returns the number of ones required when writing out all numbers between 0 and n.
# For example, f(13)=6. Notice that f(1)=1. What is the next largest n such that f(n)=n?
import csv, os # für CSV-Export und Aufruf von Gnuplot
def einser(zahl): # Anzahl der "1" in einer Zahl
return list(str(zahl)).count("1")
# Berechnung beginnt immer mit 0
# def f(n): # Berechnung von f(n)
# a = 0
# for x in range(0, n + 1):
# a += einser(x)
# return a
# x = 0
# while True:
# b = f(x)
# if(b == x):
# print(b)
# x += 1
# print(x, b)
# print(f(200000))
n = 0
sum = 0
liste = [] # Liste der f(n) Werte
try:
while True:
b = einser(n)
sum += b
liste.append(sum) # Wert zur Liste hinzufügen
if(n == sum): # falls f(n)=n ...
print(n) # ... die Zahl ausgeben
n += 1
except: # Wenn das Programm abgebrochen wurde ...
print("Das Programm wurde beendet, die Daten werden gespeichert und dargestellt.")
out = csv.writer(open("daten.csv", "w"), delimiter = '\n')
out.writerow(liste)
os.system("gnuplot gnuplot.plt")
print("Fertig") | {
"repo_name": "Findus23/mathe_python",
"path": "einser.py",
"copies": "1",
"size": "1116",
"license": "mit",
"hash": 5514299444299266000,
"line_mean": 26.1951219512,
"line_max": 142,
"alpha_frac": 0.6328545781,
"autogenerated": false,
"ratio": 2.3112033195020745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34440578976020747,
"avg_score": null,
"num_lines": null
} |
# 17 Nov 2011
import time
import os
import sys
import numpy
import h5py
from PnSC_ui import *
from PnSC_dataimport import *
from PnSC_SCui import *
from PnSC_math import *
from PnSC_h5io import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#class MainMenu(QMainWindow):
# def __init__(self, TreeWidg):
# super(MainMenu, self).__init__(None)
#
# self.setObjectName("MainMenu")
# self.bodywidget = QWidget(self)
# self.bodywidget.setObjectName("bodywidget")
# self.tasklistLabel = QLabel(self.bodywidget)
# self.tasklistLabel.setGeometry(QRect(9, 10, 1006, 16))
# self.tasklistLabel.setObjectName("tasklistLabel")
# self.setCentralWidget(self.bodywidget)
# self.main_menu_pulldown = QMenuBar(self)
# self.main_menu_pulldown.setGeometry(QRect(0, 0, 1025, 27))
# self.main_menu_pulldown.setObjectName("main_menu_pulldown")
# self.menuExit = QMenu(self.main_menu_pulldown)
# self.menuExit.setObjectName("menuExit")
# self.menuExit.setTitle('EXIT')
# self.setMenuBar(self.main_menu_pulldown)
# self.statusbar = QStatusBar(self)
# self.statusbar.setEnabled(False)
# self.statusbar.setObjectName("statusbar")
# self.setStatusBar(self.statusbar)
# self.actionExit = QAction(self)
# self.actionExit.setObjectName("actionExit")
# self.actionExit.setText('exit')
# self.menuExit.addAction(self.actionExit)
# self.main_menu_pulldown.addAction(self.menuExit.menuAction())
#
# QMetaObject.connectSlotsByName(self)
#
# @pyqtSignature("")
# def on_actionExit_triggered(self):
# print 'init h5'
class MainMenu(QMainWindow):
def __init__(self, previousmm):#, TreeWidg):
super(MainMenu, self).__init__(None)
#self.setupUi(self)
self.setWindowTitle('Vlassak Group PnSC Analysis')
#self.treeWidget=TreeWidg
self.h5path="%s" % os.getcwd()
self.bodywidget = QWidget(self)
self.bodywidget.setObjectName("bodywidget")
self.treeWidget=QTreeWidget(self.bodywidget)
QObject.connect(self.treeWidget,SIGNAL("itemSelectionChanged()"),self.processtreeselection)
self.setupmenu()
self.setCentralWidget(self.bodywidget)
self.statusdict={'h5open':False}
self.actionenable()
self.resize(820, 620)
self.treeWidget.setGeometry(QRect(10, 10, 800, 520))
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
#sizePolicy.setHeightForWidth(self.treeWidget.sizePolicy().hasHeightForWidth())
self.treeWidget.setSizePolicy(sizePolicy)
self.redrawPushButton = QPushButton(self.bodywidget)
self.redrawPushButton.setGeometry(QRect(10, 550, 200, 25))
self.redrawPushButton.setText('Draw h5 Tree')
QObject.connect(self.redrawPushButton, SIGNAL("pressed()"), self.redraw)
self.expandPushButton = QPushButton(self.bodywidget)
self.expandPushButton.setGeometry(QRect(210, 550, 200, 25))
self.expandPushButton.setText('Expand h5 Tree')
QObject.connect(self.expandPushButton, SIGNAL("pressed()"), self.expandtree)
self.expandexceptPushButton = QPushButton(self.bodywidget)
self.expandexceptPushButton.setGeometry(QRect(410, 550, 200, 25))
self.expandexceptPushButton.setText('Expand Groups')
QObject.connect(self.expandexceptPushButton, SIGNAL("pressed()"), self.expandgrouptree)
self.sortattrLineEdit = QLineEdit(self.bodywidget)
self.sortattrLineEdit.setGeometry(QRect(610, 550, 200, 25))
self.sortattrLineEdit.setText('epoch')
if previousmm is None:
self.on_action_openh5_triggered()
else:
oldselection=mm.geth5selectionpath(liststyle=True, removeformatting=False)
self.h5path=previousmm.h5path
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
self.statusdict['h5open']=True
self.actionenable()
def setupmenu(self):
self.setObjectName("MainMenu")
self.main_menu_pulldown = QMenuBar(self)
self.main_menu_pulldown.setObjectName("main_menu_pulldown")
self.ActionDict={}
#setup a menu section
self.menufileio = QMenu(self.main_menu_pulldown)
self.menufileio.setObjectName("menufileio")
self.menufileio.setTitle('File IO')
self.main_menu_pulldown.addAction(self.menufileio.menuAction())
#end of menu head
#setup a menu item in a menu section. self.<NAME>=....,(self, <NAME>, <text>, <self.menufileio>, <list of tuples, tuple is name of requirement and list of acceptable values>, self.ActionDict), keep this last item the same
self.action_openh5=MainMenuQAction(self,'action_openh5', 'open h5 file', self.menufileio, [], self.ActionDict)
self.action_importscdata=MainMenuQAction(self,'action_importscdata', 'import calorimetry data', self.menufileio, [('h5open', [True])], self.ActionDict)
self.action_batchimportscdata=MainMenuQAction(self,'action_batchimportscdata', 'batch import calorimetry data setup', self.menufileio, [('h5open', [True])], self.ActionDict)
self.action_batchimportdatafixedmsma=MainMenuQAction(self,'action_batchimportdatafixedmsma', 'batch import calorimetry data using segment info from selected HeatProgram', self.menufileio, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_batchimportdatadfltmsma=MainMenuQAction(self,'action_batchimportdatadfltmsma', 'batch import calorimetry data with no segment info', self.menufileio, [('h5open', [True])], self.ActionDict)
self.action_copymsma=MainMenuQAction(self,'action_copymsma', 'copy selected segment info to all Heat Programs in experiment group', self.menufileio, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_createh5=MainMenuQAction(self,'action_createh5', 'new h5 file', self.menufileio, [], self.ActionDict)
self.action_createexpgrp=MainMenuQAction(self,'action_createexpgrp', 'new experiment group', self.menufileio, [('h5open', [True])], self.ActionDict)
self.action_delh5grp=MainMenuQAction(self,'action_delh5grp', 'DELETE selected group', self.menufileio, [('h5open', [True]), ('selectiontype', ['Group'])], self.ActionDict)
self.action_deldataset=MainMenuQAction(self,'action_deldataset', 'DELETE dataset', self.menufileio, [('h5open', [True]), ('selectiontype', ['Dataset'])], self.ActionDict)
self.action_copydataset=MainMenuQAction(self,'action_copydataset', 'Copy (and rename) dataset', self.menufileio, [('h5open', [True]), ('selectiontype', ['Dataset'])], self.ActionDict)
#self.action_delexpgrp=MainMenuQAction(self,'action_delexpgrp', 'DELETE experiment group', self.menufileio, [('h5open', [True])], self.ActionDict)
self.action_editattrs=MainMenuQAction(self,'action_editattrs', 'Edit import attrs (select a heat program)', self.menufileio, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
#setup a menu section
self.menuplot = QMenu(self.main_menu_pulldown)
self.menuplot.setObjectName("menuplot")
self.menuplot.setTitle('Visualization')
self.main_menu_pulldown.addAction(self.menuplot.menuAction())
#end of menu head
#setup a menu item in a menu section.
self.action_plotraw=MainMenuQAction(self,'action_plotraw', 'plot Dataset values (select dataset)', self.menuplot, [('h5open', [True]), ('selectiontype', ['Dataset'])], self.ActionDict)
self.action_printdata=MainMenuQAction(self,'action_printdata', 'print Dataset values (select dataset or attribute)', self.menuplot, [('h5open', [True]), ('selectiontype', ['Dataset', 'Attr'])], self.ActionDict)
self.action_plotmetadata=MainMenuQAction(self,'action_plotmetadata', 'Plot Heat Program MetaData(select heat program)', self.menuplot, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_getsegd=MainMenuQAction(self,'action_getsegd', 'send SegDict to data (select a heat program)', self.menuplot, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_plotsegs=MainMenuQAction(self,'action_plotsegs', 'plot Segs by color (select a heat program)', self.menuplot, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_viewSCanalysis=MainMenuQAction(self,'action_viewSCanalysis', 'SC data viewer (select a heat program)', self.menuplot, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_viewACharmanalysis=MainMenuQAction(self,'action_viewACharmanalysis', 'AC harmonics data viewer (select a heat program)', self.menuplot, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_viewFit=MainMenuQAction(self,'action_viewFit', 'Fit data viewer (select a heat program)', self.menuplot, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_printattrs=MainMenuQAction(self,'action_printattrs', 'print Table of attrs (select dataset or group)', self.menuplot, [('h5open', [True]), ('selectiontype', ['Dataset', 'Group'])], self.ActionDict)
self.action_exporttext=MainMenuQAction(self,'action_exporttext', 'Export text data (select a heat program)', self.menuplot, [('h5open', [True]), ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
#setup a menu section
self.calprep = QMenu(self.main_menu_pulldown)
self.calprep.setObjectName("calprep")
self.calprep.setTitle('Calibration Prep')
self.main_menu_pulldown.addAction(self.calprep.menuAction())
#end of menu head
#setup a menu item in a menu section.
self.action_calcresistance=MainMenuQAction(self,'action_calcresistance', 'Calc cell Res (select heat program or experiment)', self.calprep, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)
self.action_setuprescal=MainMenuQAction(self,'action_setuprescal', 'Setup R(T) cal', self.calprep, [('h5open', [True])], self.ActionDict)
self.action_assignrescal=MainMenuQAction(self,'action_assignrescal', 'Assign R(T) cal (select experiment)', self.calprep, [('h5open', [True]), ('selectiongrouptype', ['experiment'])], self.ActionDict)
self.action_calcresextraptoTo=MainMenuQAction(self,'action_calcresextraptoTo', 'Calc Res that gives To (select heat program or experiment)', self.calprep, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)
self.action_calcresbycycle=MainMenuQAction(self,'action_calcresbycycle', 'Calc Res for each cycle using first soak segment (select heat program or experiment)', self.calprep, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)
self.action_entertwopointres=MainMenuQAction(self,'action_entertwopointres', 'Enter list of 2 points R values', self.calprep, [('h5open', [True])], self.ActionDict)
#end of actions
#setup a menu section
self.anmenu = QMenu(self.main_menu_pulldown)
self.anmenu.setObjectName("anmenu")
self.anmenu.setTitle('Calorimetry Analysis')
self.main_menu_pulldown.addAction(self.anmenu.menuAction())
#end of menu head
#setup a menu item in a menu section.
self.action_delan=MainMenuQAction(self,'action_delan', 'Delete analysis Group (select analysis group)', self.anmenu, [('h5open', [True]), ('selectiongrouptype', ['analysis'])], self.ActionDict)
self.action_screcipe=MainMenuQAction(self,'action_screcipe', 'Build SC analysis recipe (select heat program or experiment)', self.anmenu, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)#, ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_fitlossrecipe=MainMenuQAction(self,'action_fitlossrecipe', 'Build heat loss fit model recipe (select heat program or experiment)', self.anmenu, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)#, ('selectiongrouptype', ['heatprogram']), ('samplepowerperrateexists', [True])], self.ActionDict)
self.action_heatcaprecipe=MainMenuQAction(self,'action_heatcaprecipe', 'Build heat capacity recipe (select heat program or experiment)', self.anmenu, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)#, ('selectiongrouptype', ['heatprogram']), ('samplepowerperrateexists', [True])], self.ActionDict)
self.action_heatcappeaksrecipe=MainMenuQAction(self,'action_heatcappeaksrecipe', 'Build C(T) peak search+fit recipe (select heat program or experiment)', self.anmenu, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)#, ('selectiongrouptype', ['heatprogram']), ('samplepowerperrateexists', [True])], self.ActionDict)
self.action_acrecipe=MainMenuQAction(self,'action_acrecipe', 'Build AC freq analysis recipe (select heat program or experiment)', self.anmenu, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)#, ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_acheatcaprecipe=MainMenuQAction(self,'action_acheatcaprecipe', 'Build AC Cp analysis recipe (select heat program or experiment)', self.anmenu, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)#, ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_acheatcapallrecipe=MainMenuQAction(self,'action_acheatcapallrecipe', 'Build AC recipe for calculating mCp multiple ways(select heat program or experiment)', self.anmenu, [('h5open', [True]), ('selectiongrouptype', ['heatprogram', 'experiment'])], self.ActionDict)#, ('selectiongrouptype', ['heatprogram'])], self.ActionDict)
self.action_applyscrecipe=MainMenuQAction(self,'action_applyscrecipe', 'Apply analysis recipe (select experiment or heat program)', self.anmenu, [('h5open', [True]), ('selectiongrouptype', ['experiment', 'heatprogram'])], self.ActionDict)
self.setMenuBar(self.main_menu_pulldown)
QMetaObject.connectSlotsByName(self)
def redraw(self):
if os.path.exists(self.h5path) and self.h5path.endswith('.h5'):
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
self.statusdict['h5open']=True
self.actionenable()
def expandgrouptree(self):
self.expandtree(groupsonly=True)
def expandtree(self, groupsonly=False):
def expandchildren(item):#recursive
for i in range(item.childCount()):
child=item.child(i)
if not groupsonly or True in [not ('(' in child.child(j).text(0) or str(child.child(j).text(0)).startswith("'")) for j in range(child.childCount())]:
child.setExpanded(True)
expandchildren(child)
for i in range(self.treeWidget.topLevelItemCount()):
item=self.treeWidget.topLevelItem(i)
item.setExpanded(True)
expandchildren(item)
def settreeselection_list(self, selectionpathlist):
item=self.treeWidget.topLevelItem(0)
for itemname in selectionpathlist:
chn=[item.child(i).text(0) for i in range(item.childCount())]
if itemname in chn:
item.setExpanded(True)
item=item.child(chn.index(itemname))
else:
break
self.treeWidget.setCurrentItem(item)
def actionenable(self):
for aname, ad in self.ActionDict.iteritems():
ad['ref'].setDisabled(False in [(k in self.statusdict.keys()) and (self.statusdict[k] in vals) for k, vals in ad['enable_reqs']])
# if aname=='action_calcresistance':
# print [(k in self.statusdict.keys()) and (self.statusdict[k] in vals) for k, vals in ad['enable_reqs']]
# print [(k, vals) for k, vals in ad['enable_reqs']]
def h5nodename_treeitem(self, treeitem, removeformatting=True):
if removeformatting:
return ((str(treeitem.text(0)).partition(':')[0]).partition('(')[0]).strip("'")
else:
return str(treeitem.text(0))
def geth5selectionpath(self, liststyle=False, removeformatting=True):
try:
treeitem=self.currenttreeitem
except:
return '/'
attrname=None
if self.statusdict['selectiontype']=='Attr':
attrname=self.h5nodename_treeitem(treeitem, removeformatting=removeformatting)
treeitem=treeitem.parent()
s=[]
while not treeitem.parent() is None:
s=[self.h5nodename_treeitem(treeitem, removeformatting=removeformatting)]+s
treeitem=treeitem.parent()
if not liststyle:
s='/'.join((s))
if not attrname is None:
return s, attrname
return s
def processtreeselection(self):
treeitem=self.treeWidget.currentItem()
self.currenttreeitem=treeitem
print 'selection changed to ', treeitem.text(0)
if treeitem.parent() is None:
self.statusdict['selectiontype']='File'
elif str(treeitem.text(0)).startswith("'"):
self.statusdict['selectiontype']='Attr'
elif '(' in treeitem.text(0):
self.statusdict['selectiontype']='Dataset'
else:
self.statusdict['selectiontype']='Group'
self.statusdict['selectionname']=self.h5nodename_treeitem(treeitem)
if self.statusdict['selectiontype']=='File':
self.statusdict['selectionparentname']=''
else:
self.statusdict['selectionparentname']=self.h5nodename_treeitem(treeitem.parent())
self.statusdict['samplepowerperrateexists']=True#TODO: write code for checking on existence of analysis data arrays
if self.statusdict['selectiontype']=='Group':
if self.statusdict['selectionparentname']=='HeatProgram':
self.statusdict['selectiongrouptype']='heatprogram'
elif self.statusdict['selectionparentname']=='Calorimetry':
self.statusdict['selectiongrouptype']='experiment'
elif self.statusdict['selectionparentname']=='analysis':
self.statusdict['selectiongrouptype']='analysis'
else:
self.statusdict['selectiongrouptype']='other'
else:
self.statusdict['selectiongrouptype']=''
print self.statusdict
self.actionenable()
@pyqtSignature("")
def on_action_createh5_triggered(self):
temp=mygetsavefile(parent=self, xpath=self.h5path,markstr='Enter name of new h5 file', filename='.h5')
if temp=='':
return
self.h5path=str(temp)
createh5file(self.h5path)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
self.statusdict['h5open']=True
self.actionenable()
@pyqtSignature("")
def on_action_openh5_triggered(self):
self.statusdict['h5open']=False
self.actionenable()
temp=mygetopenfile(parent=self, xpath=self.h5path, markstr='h5 file with calorimetry data', filename='.h5' )
if temp=='':
return
self.h5path=str(temp)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
self.statusdict['h5open']=True
self.actionenable()
@pyqtSignature("")
def on_action_createexpgrp_triggered(self):
idialog=lineeditDialog(self, title='Enter name for the new h5 experiment', deftext='')
if not idialog.exec_():
return
h5expname=idialog.text
h5file=h5py.File(self.h5path, mode='r')
if h5expname in h5file['Calorimetry']:
h5file.close()
QMessageBox.warning(self,"FAILED", "Experiment Group Exists - must first delete")
return
h5file.close()
create_exp_grp(self.h5path, h5expname)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_editattrs_triggered(self):
path=self.geth5selectionpath(liststyle=False)
editattrs(self, self.h5path, path)
@pyqtSignature("")
def on_action_copydataset_triggered(self):
nam=self.geth5selectionpath(liststyle=True)[-1]
idialog=lineeditDialog(self, title='Select new dataset name', deftext=nam)
if not idialog.exec_():
return
newnam=idialog.text
h5file=h5py.File(self.h5path, mode='r+')
srcds=h5file[self.geth5selectionpath(liststyle=False)]
g=srcds.parent
if newnam in g:
h5file.close()
QMessageBox.warning(self,"Aborting", "entered name already exists")
return
newds=g.create_dataset(newnam, data=readh5pyarray(srcds))
for k, v in srcds.attrs.items():
newds.attrs[k]=v
h5file.close()
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_deldataset_triggered(self):
idialog=messageDialog(self, "Continue with DELETE? there's no 'undo'")
if not idialog.exec_():
return
h5file=h5py.File(self.h5path, mode='r+')
del h5file[self.geth5selectionpath(liststyle=False)]
h5file.close()
print 'DELETED:', self.geth5selectionpath(liststyle=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_delh5grp_triggered(self):
idialog=messageDialog(self, "Continue with DELETE? there's no 'undo'")
if not idialog.exec_():
return
h5file=h5py.File(self.h5path, mode='r+')
del h5file[self.geth5selectionpath(liststyle=False)]
h5file.close()
print 'DELETED:', self.geth5selectionpath(liststyle=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_delexpgrp_triggered(self):
h5file=h5py.File(self.h5path, mode='r+')
idialog=selectgroupDialog(self, h5file['Calorimetry'], title='Select h5 experiment group to DELETE')
if not idialog:
h5file.close()
return
del h5file[idialog.grp.name]
h5file.close()
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
def batchrun_files(self, folder, startsendswith=('', ''), skiperrors=False):
for fn in os.listdir(folder):
if not (fn.startswith(startsendswith[0]) and fn.endswith(startsendswith[1])):
continue
p=os.path.join(folder, fn)
self.batchattrdict['path']=p
print 'running ', p
if skiperrors:
try:
self.batchfcn(batchattrdict=self.batchattrdict)
except:
print 'ERROR IMPORTING ', p
else:
self.batchfcn(batchattrdict=self.batchattrdict)
@pyqtSignature("")
def on_action_copymsma_triggered(self):
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
pathlist=self.geth5selectionpath(liststyle=True)
h5file, h5hpgrp=gethpgroup(self.h5path, pathlist[1], pathlist[4])
segms=h5hpgrp.attrs['segment_ms'][:]
segmA=h5hpgrp.attrs['segment_mA'][:]
h5file.close()
assign_segmsma(self.h5path, pathlist[1], segms, segmA)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_batchimportdatafixedmsma_triggered(self):
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
pathlist=self.geth5selectionpath(liststyle=True)
h5file, h5hpgrp=gethpgroup(self.h5path, pathlist[1], pathlist[4])
segms=h5hpgrp.attrs['segment_ms'][:]
segmA=h5hpgrp.attrs['segment_mA'][:]
sgd=selectgroupDialog(self, h5file['Calorimetry'], title='Select h5 experiment group for import')
if not sgd:
h5file.close()
return
h5file.close()
h5expname=sgd.grpname
idialog=selectorDialog(self, FileFormatFunctionLibrary.keys(), title='Select data import protocol')
if not idialog.exec_():
return
protname=idialog.name
batchattrdict=getemptybatchattrdict()
batchattrdict['grpname']=h5expname
batchattrdict['protname']=protname
plist=mygetopenfiles(parent=self, xpath=os.path.split(self.h5path)[0], markstr='Select ONE data file for each experiment to be imported', filename='.h5' )
for p in plist:
batchattrdict['path']=p
ans=FileImport(self, protname, batchattrdict=batchattrdict)
#print ans
if not ans:
continue
AttrDict, DataSetDict, SegmentData=ans
grpname=os.path.splitext(os.path.split(AttrDict['importpath'])[1])[0]
writenewh5heatprogram(self.h5path, h5expname, grpname, AttrDict, DataSetDict, (segms, segmA))
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_batchimportdatadfltmsma_triggered(self):
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
#pathlist=self.geth5selectionpath(liststyle=True)
h5file=h5py.File(self.h5path, mode='r')
sgd=selectgroupDialog(self, h5file['Calorimetry'], title='Select h5 experiment group for import')
if not sgd:
h5file.close()
return
h5file.close()
h5expname=sgd.grpname
idialog=selectorDialog(self, FileFormatFunctionLibrary.keys(), title='Select data import protocol')
if not idialog.exec_():
return
protname=idialog.name
batchattrdict=getemptybatchattrdict()
batchattrdict['grpname']=h5expname
batchattrdict['protname']=protname
plist=mygetopenfiles(parent=self, xpath=os.path.split(self.h5path)[0], markstr='Select ONE data file for each experiment to be imported', filename='.h5' )
for p in plist:
batchattrdict['path']=p
ans=FileImport(self, protname, batchattrdict=batchattrdict)
#print ans
if not ans:
continue
AttrDict, DataSetDict, SegmentData=ans
mA=DataSetDict['samplecurrent'][1][0]*DataSetDict['samplecurrent'][0]['Aunit']*1000.
ms=1000.*numpy.float32(range(len(mA)))/AttrDict['daqHz']
segms=[0., ms[-1]]
segmA=[0., 0.]
grpname=os.path.splitext(os.path.split(AttrDict['importpath'])[1])[0]
writenewh5heatprogram(self.h5path, h5expname, grpname, AttrDict, DataSetDict, (segms, segmA))
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_batchimportscdata_triggered(self, batchattrdict=None):
self.batchfcn=self.on_action_importscdata_triggered
self.batchattrdict=getemptybatchattrdict()
#Not finished??
@pyqtSignature("")
def on_action_importscdata_triggered(self, batchattrdict=None):
if batchattrdict is None:
h5file=h5py.File(self.h5path, mode='r')
sgd=selectgroupDialog(self, h5file['Calorimetry'], title='Select h5 experiment group for import')
if not sgd:
h5file.close()
return
h5file.close()
h5expname=sgd.grpname
idialog=selectorDialog(self, FileFormatFunctionLibrary.keys(), title='Select data import protocol')
if not idialog.exec_():
return
protname=idialog.name
else:
h5expname=batchattrdict['grpname']
protname=batchattrdict['protname']
ans=FileImport(self, protname, batchattrdict=batchattrdict)
#print ans
if not ans:
return
AttrDict, DataSetDict, SegmentData=ans
mA=DataSetDict['samplecurrent'][1][0]*DataSetDict['samplecurrent'][0]['Aunit']*1000.
ms=1000.*numpy.float32(range(len(mA)))/AttrDict['daqHz']
idialog=SegmentEditor(self, SegmentData, cycledata=(ms, mA))
if batchattrdict is None:
if not idialog.exec_():
return
else:
for sb, k in [(idialog.firstderptsSpinBox, 'firstderptsSpinBox'), (idialog.secderptsSpinBox, 'secderptsSpinBox'), (idialog.secdervalSpinBox, 'secdervalSpinBox')]:
if k in batchattrdict and not (batchattrdict[k] is None):
sb.setValue(batchattrdict[k])
idialog.findsegs()
idialog.ExitRoutine()
SegmentData=idialog.SegmentData
idialog=lineeditDialog(self, title='Enter name for h5 group', deftext=os.path.splitext(os.path.split(AttrDict['importpath'])[1])[0])
if batchattrdict is None:
if not idialog.exec_():
return
grpname=idialog.text
else:
grpname=os.path.splitext(os.path.split(AttrDict['importpath'])[1])[0]
if 'savegrpname' in batchattrdict and not batchattrdict['savegrpname'] is None:
grpname=batchattrdict['savegrpname']
writenewh5heatprogram(self.h5path, h5expname, grpname, AttrDict, DataSetDict, SegmentData)
oldselection=['Calorimetry', h5expname, 'measurement', 'HeatProgram', grpname]
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_calcresistance_triggered(self, critms_step=1., critmAperms_constmA=0.01, critdelmA_constmA=10., critmA_zero=0.1):
pathlist=self.geth5selectionpath(liststyle=True)
if self.statusdict['selectiongrouptype']=='experiment':
h5file, hplist=experimenthppaths(self.h5path, pathlist[1])
h5file.close()
hplist=[hpp.rpartition('/')[2] for hpp in hplist]
else:
hplist=[pathlist[4]]
for hp in hplist:
#print hp
dlist=CreateHeatProgSegDictList(self.h5path, pathlist[1], hp, critms_step=critms_step, critmAperms_constmA=critmAperms_constmA, critdelmA_constmA=critdelmA_constmA, critmA_zero=critmA_zero)
segtypelist=[d['segmenttype'] for d in dlist]
if segtypelist.count('soak')==1:
dsoak=dlist[segtypelist.index('soak')]
elif segtypelist.count('soak')>1:
print 'ERROR - MORE THAN ONE SOAK SEGMENT WAS FOUND - THIS IS UNEXPECTED FOR AN Ro HEAT PROGRAM'
return
else:
print 'ERROR - NO SOAK SEGMENTS WERE FOUND - ONE IS REQUIRED FOR AN Ro HEAT PROGRAM'
return
if segtypelist.count('zero')>0:
if segtypelist[segtypelist.index('soak')-1]=='zero':
dzero=dlist[segtypelist.index('soak')-1]#take the preceding zero if possible so that user can segment the initial transients separately and avoid issue
else:
dzero=dlist[segtypelist.index('zero')]
else:
dzero=None
# vals=[]
# vals+=[CalcR0_segdict(dsoak, AveBeforeDivision=True, dzero=dzero)]
# vals+=[CalcR0_segdict(dsoak, AveBeforeDivision=False, dzero=dzero)]
# vals+=[(vals[0]+vals[1])/2.]
# desc=['ratio of the means', 'mean of the ratios', 'Ave of these 2 values']
# choices=['%.4f : %s' %(v, d) for v, d in zip(vals, desc)]
# idialog=selectorDialog(self, choices, title='select value of R0 to use')
# if not idialog.exec_():
# return
# R0=vals[idialog.index]
R0=CalcR0_segdict(dsoak, AveBeforeDivision=True, dzero=dzero)
writecellres(self.h5path, pathlist[1], hp, R0)
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_calcresextraptoTo_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
if self.statusdict['selectiongrouptype']=='experiment':
h5file, hplist=experimenthppaths(self.h5path, pathlist[1])
h5file.close()
hplist=[hpp.rpartition('/')[2] for hpp in hplist]
else:
hplist=[pathlist[4]]
pardict={}
pardict['h5path']=self.h5path
pardict['h5expname']=pathlist[1]
for i, hp in enumerate(hplist):
pardict['h5hpname']=hp
if i==0:
idialog=rescal_ExtraptoToDialog(self, pardict)
idialog.exec_()
Ro=idialog.Ro
self.data=idialog.calcd
else:
Ro, self.data=calcRo_extraptoTo(**pardict)
writecellres_calc(self.h5path, pathlist[1], hp, Ro)
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_calcresbycycle_triggered(self, critms_step=1., critmAperms_constmA=0.01, critdelmA_constmA=10., critmA_zero=0.1):
pathlist=self.geth5selectionpath(liststyle=True)
if self.statusdict['selectiongrouptype']=='experiment':
h5file, hplist=experimenthppaths(self.h5path, pathlist[1])
h5file.close()
hplist=[hpp.rpartition('/')[2] for hpp in hplist]
else:
hplist=[pathlist[4]]
for hp in hplist:
#print hp
dlist=CreateHeatProgSegDictList(self.h5path, pathlist[1], hp, critms_step=critms_step, critmAperms_constmA=critmAperms_constmA, critdelmA_constmA=critdelmA_constmA, critmA_zero=critmA_zero)
segtypelist=[d['segmenttype'] for d in dlist]
if 'soak' in segtypelist:
dsoak=dlist[segtypelist.index('soak')]
else:
print 'ERROR - NO SOAK SEGMENTS WERE FOUND - ONE IS REQUIRED'
return
if segtypelist.count('zero')>0:
if segtypelist[segtypelist.index('soak')-1]=='zero':
dzero=dlist[segtypelist.index('soak')-1]#take the preceding zero if possible so that user can segment the initial transients separately and avoid issue
else:
dzero=dlist[segtypelist.index('zero')]
else:
dzero=None
R0=numpy.array([CalcR0_segdict(extractcycle_SegDict(dsoak, i), AveBeforeDivision=True, dzero=extractcycle_SegDict(dzero, i)) for i in range(dsoak['cycletime'].shape[0])])
writecellres_calc(self.h5path, pathlist[1], hp, R0)
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_setuprescal_triggered(self):
idialog=rescalDialog(self, self.h5path)
idialog.exec_()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_assignrescal_triggered(self):
rescalpath_getorassign(self.h5path, self.statusdict['selectionname'], parent=self, forceassign=True)
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
self.actionenable()
@pyqtSignature("")
def on_action_entertwopointres_triggered(self):
idialog=TwoPointResTableDialog(self, self.h5path)
idialog.exec_()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_plotraw_triggered(self):
h5file=h5py.File(self.h5path, mode='r')
path=self.geth5selectionpath()
self.data=readh5pyarray(h5file[path])
h5file.close()
if self.data.ndim==1:
idialog=simpleplotDialog(self, self.data)
else:
plotdata=self.data.swapaxes(self.data.ndim-1,numpy.argmax(self.data.shape))#assume plot should be vs the longest dimmension
plotdata=[plotdata[ind] for ind in numpy.ndindex(*plotdata.shape[:-1])]
idialog=simpleplotDialog(self, plotdata)
idialog.exec_()
@pyqtSignature("")
def on_action_plotmetadata_triggered(self):
fcndict=heatprogrammetadatafcns
idialog=selectorDialog(self, fcndict.keys(), title='select type of metadata to plot')
if not idialog.exec_():
return
fcn=fcndict[idialog.name]
pathlist=self.geth5selectionpath(liststyle=True)
self.data=fcn(self.h5path, pathlist[1], pathlist[4])
idialog=simpleplotDialog(self, self.data[1], xdata=self.data[0])
idialog.exec_()
@pyqtSignature("")
def on_action_printdata_triggered(self):
h5file=h5py.File(self.h5path, mode='r')
if self.statusdict['selectiontype']=='Attr':
path, attrname=self.geth5selectionpath()
self.data=h5file[path].attrs[attrname]
print attrname, ': ', self.data
elif self.statusdict['selectiontype']=='Dataset':
path=self.geth5selectionpath()
self.data=readh5pyarray(h5file[path])
print path.rpartition('/')[2], ': ', self.data
h5file.close()
@pyqtSignature("")
def on_action_printattrs_triggered(self):
h5file=h5py.File(self.h5path, mode='r')
path=self.geth5selectionpath()
self.data=h5file[path].attrs.items()
for k, v in self.data:
print k, '\t', v
h5file.close()
@pyqtSignature("")
def on_action_getsegd_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
self.data=CreateHeatProgSegDictList(self.h5path, pathlist[1], pathlist[4])
@pyqtSignature("")
def on_action_exporttext_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
idialog=textexportDialog(self, self.h5path, pathlist[1], pathlist[4])
idialog.exec_()
@pyqtSignature("")
def on_action_plotsegs_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
self.data=CreateHeatProgSegDictList(self.h5path, pathlist[1], pathlist[4])
idialog=SegmentCyclePlot(self, self.data)
idialog.show()
return idialog
@pyqtSignature("")
def on_action_viewSCanalysis_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
self.data=CreateHeatProgSegDictList(self.h5path, pathlist[1], pathlist[4])
idialog=analysisviewerDialog(self, self.data, pathlist[1])
idialog.show()
@pyqtSignature("")
def on_action_viewACharmanalysis_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
idialog=acharmonicsDialog(self, self.h5path, pathlist[1], pathlist[4])
idialog.show()
@pyqtSignature("")
def on_action_viewFit_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
self.data=getfitdictlist_hp(self.h5path, pathlist[1], pathlist[4])
hpsdl=CreateHeatProgSegDictList(self.h5path, pathlist[1], pathlist[4])
h5file, filterdict=getfilterdict(self.h5path, pathlist[1])
h5file.close()
fitviewer(self, hpsdl, self.data, filterdict)
@pyqtSignature("")
def on_action_delan_triggered(self):
path=self.geth5selectionpath(liststyle=False)
g, garb, p=path.strip('/').rpartition('/')
h5file=h5py.File(self.h5path, mode='r+')
h5g=h5file[g]
del h5g[p]
h5file.close()
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
self.actionenable()
@pyqtSignature("")
def on_action_screcipe_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
idialog=SCrecipeDialog(self, self.h5path, pathlist[1])
idialog.show()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_fitlossrecipe_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
idialog=SCrecipeDialog(self, self.h5path, pathlist[1], calctype='FitPS')
idialog.show()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_heatcaprecipe_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
idialog=SCrecipeDialog(self, self.h5path, pathlist[1], calctype='QUC')
idialog.show()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_heatcappeaksrecipe_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
idialog=SCrecipeDialog(self, self.h5path, pathlist[1], calctype='CTpk')
idialog.show()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_acrecipe_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
idialog=SCrecipeDialog(self, self.h5path, pathlist[1], calctype='AC')
idialog.show()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_acheatcaprecipe_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
idialog=SCrecipeDialog(self, self.h5path, pathlist[1], calctype='AC_RTPSD')
idialog.show()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_acheatcapallrecipe_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
idialog=SCrecipeDialog(self, self.h5path, pathlist[1], calctype='AC_mCpall')
idialog.show()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
@pyqtSignature("")
def on_action_applyscrecipe_triggered(self):
pathlist=self.geth5selectionpath(liststyle=True)
if self.statusdict['selectiongrouptype']=='heatprogram':
h5hpdflt=pathlist[4]
else:
h5hpdflt=None
idialog=SCanalysisDialog(self, self.h5path, pathlist[1], h5hpdflt=h5hpdflt)
idialog.show()
oldselection=self.geth5selectionpath(liststyle=True, removeformatting=False)
h5file=h5py.File(self.h5path, mode='r')
fillh5tree(self.treeWidget, h5file, selectionpathlist=oldselection, hpsortattr=str(self.sortattrLineEdit.text()))
h5file.close()
class MainMenuQAction(QAction):
def __init__(self, parent, actionname, actiontext, hostmenu, reqs, adict):
super(MainMenuQAction, self).__init__(parent)
self.setObjectName(actionname)
self.setText(actiontext)
hostmenu.addAction(self)
adict[actionname]={'ref':self, 'enable_reqs':reqs}
#class TreeWindow(QDialog):#Creates a side window for displaying the contents of an .h5 file in tree format
# def __init__(self):
# super(TreeWindow, self).__init__(None)
# self.setWindowTitle('h5 File contents')
# self.treeWidget=QTreeWidget()
# mainlayout=QGridLayout()
# mainlayout.addWidget(self.treeWidget, 0, 0)
# self.setLayout(mainlayout)
def start(previousmm=None):
mainapp=QApplication(sys.argv)
form=MainMenu(previousmm)
form.show()
form.setFocus()
global PARENT
PARENT=form
mainapp.exec_()
return form
mm=None
mm=start()
print 'done'
#fitd=getfitdictlist_hp(mm.h5path, 'heat1a','2010Nov27_Cell2_61mA_50ms_500ms_cool_1C')[0]
#segd=CreateHeatProgSegDictList(mm.h5path, 'heat1a', '2010Nov27_Cell2_61mA_50ms_500ms_cool_1C')[2]
#
#
#fild={}
#
#fild['reggrid']={'gridinterval':0.2}
#fild['peaksearch']={'pkfcn':'GaussHalfLorentz', 'critpeakheight':5.e-7, 'critsep':20., 'firstdernpts':10, 'firstderorder':1, 'secdernpts':20, 'secderorder':1, 'critcurve':None, 'pospeaks':1, 'negpeaks':1}
#def Cpk_secder(segd, fild, C, T, h5path=None, h5expname=None, h5hpname=None):
# C=('sampleheatcapacity', 'peaksearch')
# T=('sampletemperature', 'reggrid')
#for tup in [C, T]:
# (segkey, filkey)=tup
# if not '~'.join(tup) in segd.keys():
# segd['~'.join(tup)]=performgenericfilter(segd[segkey], fild[filkey])
# #if True in [v>0 for k, v in fild[filkey].iteritems() if 'deriv' in k]: #this handle deriv filters other than SG but if the deriv is not wrt dt something needs to change
# # segd['~'.join(tup)]/=dt
#C_=segd['~'.join(C)]
#T_=segd['~'.join(T)]
#(segkey, filkey)=T
#dx=fild[filkey]['gridinterval']
#(segkey, filkey)=C
#ch=fild[filkey]['critpeakheight']
#cs=fild[filkey]['critsep']/dx
#fp=fild[filkey]['firstdernpts']
#fo=fild[filkey]['firstderorder']
#sp=fild[filkey]['secdernpts']
#so=fild[filkey]['secderorder']
#
#X=T_[0]
#Y=C_[0]
#Xgrid=numpy.linspace(X.min(), X.max(), (X.max()-X.min())/dx+1)
#Ygrid=numpy.empty(Xgrid.shape, dtype='float32')
#gridind=[numpy.argmin((x-Xgrid)**2) for x in X]
#indsgot=numpy.sort(numpy.uint32(list(set(gridind))))
#indsinterp=numpy.sort(numpy.uint32(list(set(range(len(Xgrid)))-set(gridind))))
#gridind=numpy.uint32(gridind)
#for i in indsgot:
# Ygrid[i]=Y[gridind==i].mean()
#Ygrid[indsinterp]=numpy.float32(scipy.interpolate.interp1d(indsgot, Ygrid[indsgot])(indsinterp))
#
#import pylab
#pylab.plot(X, Y, 'b.', markersize=1)
#pylab.plot(Xgrid, Ygrid, 'k-', lw=1)
#
##pkind=peaksearch1dSG(Ygrid, dx=dx, critcounts=ch, critsepind=cs, critcurve=None, max_withincritsep=False, firstdernpts=fp, firstderorder=fo, secdernpts=sp, secderorder=so)
#x=Ygrid
#dx=dx
#critcounts=ch
#critsepind=cs
#critcurve=None
#max_withincritsep=False
#firstdernpts=fp
#firstderorder=fo
#secdernpts=sp
#secderorder=so
#ifirstder=savgolsmooth(x, nptsoneside=firstdernpts, order=firstderorder, dx=dx, deriv=1)
#zeroind=arrayzeroind1d(ifirstder, postoneg=True)
#temp=numpy.where(x[(numpy.uint32(numpy.round(zeroind)),)]>critcounts)
#fullpkind=zeroind[temp]
#if fullpkind.size==0:
# print '#$%^#$%^#%&$%^&'
#pkind=clustercoordsbymax1d(x, numpy.uint32(numpy.round(fullpkind)), critsepind)
#if critcurve is not None:
# isecder=savgolsmooth(x, nptsoneside=secdernpts, order=secderorder, dx=dx, deriv=2)
# temp=numpy.where(isecder[(numpy.uint32(numpy.round(pkind)),)]<(-1*critcurve))
# pkind=numpy.array(pkind)[temp]
## pkind=list(pkind)
## pkind.reverse()#highest to smallest for pairing below
#pkind=numpy.array(pkind, dtype=numpy.float32)
#pkht=Ygrid[numpy.uint32(numpy.round(pkind))]
#pkposn=Xgrid[numpy.uint32(numpy.round(pkind))]
#iarr=numpy.uint32(range(len(Xgrid)))
#hwposns1=[(numpy.any((Ygrid<(h/2.))&(iarr>i)) and (numpy.where((Ygrid<(h/2.))&(iarr>i))[0][0],) or (i,))[0] for i, h in zip(pkind, pkht)]
#hwposns0=[(numpy.any((Ygrid<(h/2.))&(iarr<i)) and (numpy.where((Ygrid<(h/2.))&(iarr<i))[0][0],) or (i,))[0] for i, h in zip(pkind, pkht)]
#pkhw=dx*(numpy.float32(hwposns1)+numpy.float32(hwposns0))/2.
#pylab.plot(Xgrid[numpy.uint32(numpy.round(pkind))], Ygrid[numpy.uint32(numpy.round(pkind))], 'ro')
#
#pks=numpy.float32([pkposn, pkhw, pkht]).T#, numpy.ones(pkht.shape, dtype='float32')*.5]).T
#
#pkfcn=PeakFcnLibrary[fild[filkey]['pkfcn']]
#
#pars, sigs, resid=fitpeakset(X, Y, pks, GaussHalfLorentz)
#
#
#
#fitY=numpy.float32([GaussHalfLorentz(p, X) for p in pars]).sum(axis=0)
#gridfitY=numpy.float32([GaussHalfLorentz(p, Xgrid) for p in pars]).sum(axis=0)
#igridfitY=numpy.float32([gridfitY[:i+1].sum() for i in range(len(Xgrid))])*dx
#print igridfitY[-1]
#critval0_iY=igridfitY[-1]/25.
#critval1_iY=igridfitY[-1]/5.
#critval2_iY=igridfitY[-1]/2.
#print Xgrid[igridfitY>critval0_iY][0]
#print Xgrid[igridfitY>critval1_iY][0]
#print Xgrid[igridfitY>critval2_iY][0]
#
#
#pylab.plot(X, fitY, 'r-')
#pylab.show()
| {
"repo_name": "johnmgregoire/NanoCalorimetry",
"path": "PnSC_main.py",
"copies": "1",
"size": "52510",
"license": "bsd-3-clause",
"hash": -5818774641615642000,
"line_mean": 50.0301263362,
"line_max": 369,
"alpha_frac": 0.6577794706,
"autogenerated": false,
"ratio": 3.3675367151927147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9414614082026866,
"avg_score": 0.022140420753169805,
"num_lines": 1029
} |
''' 17-treat-ephemerids.py
=========================
AIM: Using the ephemerids computed by 16-compute-ephemerids.py and observational constraints
(period of the planet, transit time) calculates observations period.
To be used by the two next scripts (18, 19) to treat and plot.
INPUT: files: - <orbit_id>_misc/ephemerids_inter_<max_interruptions>_mag_<mag_max><_SAA?>.npz
variables: see section PARAMETERS (below)
OUTPUT: <orbit_id>_<SL_angle>misc/ephemerids_obs<transit_duration>h_<max_interruptions>inter_V<mag_max><_SAA?>.npz
CMD: python 17-treat-ephemerids.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/maps/ --> figures
* <orbit_id>_misc/ --> storages of data
REMARKS: Not with real catalogue.
'''
###########################################################################
### INCLUDES
import numpy as np
import os
import matplotlib.cm as cm
import time
import pylab as plt
from resources.routines import *
from resources.TimeStepping import *
import resources.constants as const
import parameters as param
from resources.coordinates import ecliptic2equatorial
import resources.figures as figures
from resources.targets import *
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# Orbit id
orbit_id = '6am_700_25_conf4'
apogee=700
perigee=700
ddate = [{'name':'1st January', 'dayofyear':1}, {'name':'5th April', 'dayofyear':95}, {'name':'21st June', 'dayofyear':172}, {'name':'5th September', 'dayofyear':248}, {'name':'21st December', 'dayofyear':355}]
ddate = [{'name':'21 January 2018', 'dayofyear':20},
{'name':'21 February 2018', 'dayofyear':51},
{'name':'21 March 2018', 'dayofyear':79},
{'name':'21 April 2018', 'dayofyear':110},
{'name':'21 May 2018', 'dayofyear':140},
{'name':'21 June 2018', 'dayofyear':171},
{'name':'21 July 2018', 'dayofyear':201},
{'name':'19 August 2018', 'dayofyear':230},
{'name':'18 September 2018', 'dayofyear':260},
{'name':'17 October 2018', 'dayofyear':289},
{'name':'16 November 2018', 'dayofyear':309},
{'name':'15 December 2018', 'dayofyear':343},
]
# Maximum interruption time tolerated [min]
max_interruptions = 97
# Maximum visible magnitude
mag_max = 9.
# Take SAA into account?
SAA = True
# Show plot ?
show = False
# Save plot ?
save = True
fancy = True
# min of scale
min_val=0
# max of scale
max_val=24
#
step_scale=2
# Print much information ?
verbose = False
# Factor in the SL post treatment correction ?
SL_post_treat = True
# Stop before saving results to file.
early_stop = False
# Minimal minutes to be observed per orbit (if consecutive == False), must be a list
min_t_obs_per_orbit = 0 # 79
# This is a way to vary the results by multiplying the whole pst by a number.
# This is very easy as if the pst is multiplied by a constant, it can be taken out of the
# integral and only multplying the flux is equivalent to re-running all the simulations
pst_factor=1.
for iddate in range(len(ddate)):
date_looked_for = ddate[iddate]['name']
# First minute in data set !
minute_ini = (ddate[iddate]['dayofyear'] - 1) * 1440
# Last minute to look for
minute_end = (ddate[iddate]['dayofyear']) * 1440
print '*'*30, 'min_t_obs_per_orbit %1.1f' % min_t_obs_per_orbit
# File name for the input file (in a compressed binary Python format)
if SAA: note = '_SAA'
else: note = ''
if not pst_factor == 1.: note += '_%1.1fpst' % pst_factor
if SL_post_treat: note+= '_%4.3fSLreduction' % param.SL_post_treat_reduction
input_fname = 'ephemerids_inter_%d_mag_%3.1f%s.npz' % (max_interruptions,mag_max,note)
skycoverage_fname = 'skycoverage_%dmin_V%3.1f%s.txt' % (min_t_obs_per_orbit,mag_max,note)
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee, perigee)
###########################################################################
### INITIALISATION
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
sky_coverage=0.
print 'ORBIT ID:\t\t%s\nPST factor:\t\t%d\nmin_t_obs_per_orbit\t%d (%.1f%%)\nMAGNITIUDE:\t\t%02.1f\nSAA :\t%g' % (orbit_id,pst_factor,min_t_obs_per_orbit,min_t_obs_per_orbit/period*100., mag_max, SAA)
# loading data
sys.stdout.write("Loading worthy targets from %s ...\t" % input_fname)
sys.stdout.flush()
worthy_targets = np.load(folder_misc+input_fname)
worthy_targets = worthy_targets['worthy_targets']
max_len = 0
for k in range(0, len(worthy_targets)):
if max_len < np.shape(worthy_targets[k].Visibility())[0]:
max_len = np.shape(worthy_targets[k].Visibility())[0]
# too optimistic
max_len = int(max_len)
start_obs = np.empty([len(worthy_targets),max_len])
stop_obs = np.empty([len(worthy_targets),max_len])
interruptions_obs = np.empty([len(worthy_targets),max_len])
print 'Done\n.%d targets loaded' % len(worthy_targets)
###########################################################################
### COMPUTATIONS
###########################################################################
count = 0
period = altitude2period(apogee,perigee)
check=np.zeros(len(worthy_targets))
sky_coverage=0.
for ii in range(len(worthy_targets)):
y = float(ii)
message = '\r%3.1f %%' % (y/float(len(worthy_targets))*100.)
sys.stdout.write(message)
sys.stdout.flush()
visi = worthy_targets[ii].Visibility()
invi = worthy_targets[ii].Invisibility()
inter = worthy_targets[ii].get_interruption_time()
# *much* slower than with broadcast, but so much easier too because of the conditions
for a, d, i in zip(visi, invi, inter) :
if (a >= minute_ini or d >= minute_end) and a <= minute_end :
o = d - a -i
if o < min_t_obs_per_orbit : continue
if a < minute_ini :
effective_percentage = (d - minute_end) / (d - a)
a = minute_ini
i = effective_percentage * i
o = d - a -i
# Some of the observations can continue after the end time, stop them and compensate the interruption time by reducing the amount of interruption by the fraction of time spent observing
if d > minute_end:
effective_percentage = (minute_end - a) / (d - a)
d = minute_end
i = effective_percentage * i
o = d - a -i
check[ii] += o
'''exit()
ids_in_range = np.where(np.logical_and(np.logical_or(visi >= minute_ini, visi >= minute_ini), visi <= minute_end))
print ids_in_range
ids_in_range = ids_in_range[0]
if len(ids_in_range) == 0: continue
visi = visi[ids_in_range]
invi = invi[ids_in_range]
inter = inter[ids_in_range]
observations = invi - visi - inter
validated_ids = observations>=min_t_obs_per_orbit
validated_observations = observations[validated_ids]
vinter = inter[validated_ids]
vvis = visi[validated_ids]
vinvi = invi[validated_ids]
# Some of the observations can start before the ini time, stop them and compensate the interruption time by reducing the amount of interruption by the fraction of time spent observing
before_end_obs_id = np.where(vvis < minute_ini)[0]
effective_percentage = (vinvi[before_end_obs_id] - minute_ini) / (vinvi[before_end_obs_id] - vvis[before_end_obs_id])
vinter[before_end_obs_id] = effective_percentage * vinter[before_end_obs_id]
vvis[before_end_obs_id] = minute_ini
validated_observations = vinvi - vvis - vinter
# Some of the observations can continue after the end time, stop them and compensate the interruption time by reducing the amount of interruption by the fraction of time spent observing
after_end_obs_id = np.where(vinvi > minute_end)[0]
effective_percentage = (minute_end - vvis[after_end_obs_id]) / (vinvi[after_end_obs_id] - vvis[after_end_obs_id])
vinter[after_end_obs_id] = effective_percentage * vinter[after_end_obs_id]
vinvi[after_end_obs_id] = minute_end
validated_observations = vinvi - vvis - vinter
if np.size(validated_observations)>0:
if validated_observations.sum() > 1440:
for i, vo in enumerate(validated_observations):
print
print vo, vvis[i], vinvi[i], vinter[i], vinter[i]/(vinvi[i] - vvis[i])
exit()
check[ii] += validated_observations.sum()
"""#print validated_observations;
obs_efficiency_in_orbit = validated_observations/period
time_lost = np.ceil(obs_efficiency_in_orbit) - obs_efficiency_in_orbit
#print obs_efficiency_in_orbit
#print validated_visi; print validated_ids
print
for i, vo in enumerate(validated_observations):
if i == len(validated_observations) : continue
print (vvis[i+1] - vinvi[i]) / period, vinter[i] / period
print vinvi[i] - vvis[i], vinvi[i] - vvis[i]-vinter[i], vo, vo >=min_t_obs_per_orbit
if i == len(validated_visi) - 1 : continue
dt_between_obs = (validated_visi[i + 1] - vs)/period
if dt_between_obs < 0.9 :
print
print dt_between_obs
print (validated_visi[i + 1] - vs)
print vs
print validated_visi[i + 1]
print validated_observations[i]
exit()"""
'''
rat, dect = worthy_targets[ii].Coordinates()
sky_coverage+=0.5/param.resx/param.resy*np.pi*np.cos(dect)
message = '\rComputations done.'
sys.stdout.write(message)
sys.stdout.flush()
print '\nSky coverage for', ddate[iddate]['name']
print '\t***', round(sky_coverage*100.,3), ' % ***'
worthy_targets=worthy_targets
obs_tot=check
###########################################################################
### INITIALISATION
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = -np.pi
ra_f = np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
data_grid = np.zeros(np.shape(ra_grid))
data_grid_days = np.zeros(np.shape(ra_grid))
###########################################################################
# cycling through the targets:
obs_time = np.zeros(len(worthy_targets))
totxtdata = []
for index_target, target in enumerate(worthy_targets):
# tar_start = start_obs[index_target,:]
# tar_stop = stop_obs[index_target,:]
#print target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
#if verbose: print index_target, target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
if obs_tot[index_target]>0.:
obs_time[index_target]=obs_tot[index_target]/60.##/1440. * 100.
# Associate the density to a grid point
if target.Coordinates()[0] < np.pi:
id_ra = np.where(np.abs(ras-target.Coordinates()[0]) < 0.05)[0]
else:
id_ra = np.where(np.abs(ras-(target.Coordinates()[0]-2.*np.pi)) < 0.05)[0]
id_dec= np.where(np.abs(decs-target.Coordinates()[1]) < 0.05)[0]
if data_grid[id_dec, id_ra] == 0 and obs_tot[index_target]>0.:
data_grid_days[id_dec, id_ra] = 1
data_grid[id_dec, id_ra] = obs_tot[index_target]/60.#1440. * 100.
if verbose: print target.Coordinates()[0]*180./np.pi,'\t',target.Coordinates()[1]*180./np.pi,'\t', obs_tot[index_target]/60.##/1440. * 100.
totxtdata.append([target.Coordinates()[0], target.Coordinates()[1], obs_tot[index_target]])
if verbose: print 'obs start | obs end | hours of obs'
print np.amin(data_grid), np.amax(data_grid)
print np.shape(obs_tot)
totxtdata = np.asarray(totxtdata)
###########################################################################
### Plotting
# transform 0 into no plotting in the data matrix
mag_min= np.amin(data_grid[data_grid>0])
data_grid[data_grid < mag_min] = np.nan
mag_min= np.amin(data_grid_days[data_grid_days>0])
data_grid_days[data_grid_days < mag_min] = np.nan
if fancy: figures.set_fancy()
fig = plt.figure()
ax=plt.subplot(111)
ax.set_aspect(2.)
min_nb_obs_day = np.nanmin(data_grid)
max_nb_obs_day = np.nanmax(data_grid)
plt.grid()
ra_grid *= const.RAD
dec_grid *= const.RAD
data_grid[data_grid<min_nb_obs_day]=0
v = np.arange(min_val,max_val+step_scale, step_scale)
CS = plt.contour(ra_grid,dec_grid,data_grid,colors='k',levels=v)
plt.clabel(CS, inline=1,fmt='%d',colors='red', fontsize=12)
CS = plt.contourf(ra_grid, dec_grid, data_grid, levels=v, cmap=plt.cm.winter)
plt.yticks(np.arange(-80, 100, 20.))
#print v
#print np.nanmin(data_grid)
#print np.nanmax(data_grid)
#v = np.arange(0,1440, 60)
cbar = plt.colorbar(CS, ticks=v)
#cbar.ax.set_yticklabels([r'$%g\%%$' % tv for tv in v])
cbar.set_label(r'$\mathrm{Observation\ time\ [h]}$')
plt.xlabel('RA [deg]')
plt.ylabel('Dec [deg]')
plt.title(date_looked_for)
###########################################################################
if not SAA: note = '_noSAA'
else: note = '_SAA'
if not pst_factor == 1.: note += '_%1.1fpst' % pst_factor
# Save plot
if save:
fname = '%s-sky_map-%d-mag%d_onday%d%s' % (orbit_id,min_t_obs_per_orbit,mag_max,ddate[iddate]['dayofyear'],note)
figures.savefig(folder_figures+fname, fig, fancy)
print 'figure saved as %s' % fname
np.savetxt(folder_misc+fname+'.dat', totxtdata, fmt='%1.3f, %1.3f, %d')#, header='ra [rad], dec [rad], obstime [min]')
print 'ASCII file saved as %s' % fname
if show: plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "17d_single_day_obs.py",
"copies": "1",
"size": "13567",
"license": "bsd-3-clause",
"hash": -4629968037601565000,
"line_mean": 31.770531401,
"line_max": 210,
"alpha_frac": 0.6350703914,
"autogenerated": false,
"ratio": 2.8118134715025906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39468838629025904,
"avg_score": null,
"num_lines": null
} |
# 187. Repeated DNA Sequences
#
# All DNA is composed of a series of nucleotides abbreviated as A, C, G, and T, for example: "ACGAATTCCG".
# When studying DNA, it is sometimes useful to identify repeated sequences within the DNA.
#
# Write a function to find all the 10-letter-long sequences (substrings)
# that occur more than once in a DNA molecule.
#
# For example,
#
# Given s = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT",
#
# Return:
# ["AAAAACCCCC", "CCCCCAAAAA"].
#
# this Problem can be solve with O(n) time and O(n) space with hashing
# http://blog.csdn.net/hyperbolechi/article/details/44302991
class Solution(object):
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
dict = {}
for i in range(len(s) - 9):
if s[i:i + 10] not in dict:
dict[s[i:i + 10]] = 1
else:
dict[s[i:i + 10]] += 1
res = []
for elem in dict:
if dict[elem] > 1:
res.append(elem)
return res
if __name__ == '__main__':
print Solution().findRepeatedDnaSequences("AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT")
| {
"repo_name": "gengwg/leetcode",
"path": "187_repeated_dna_sequences.py",
"copies": "1",
"size": "1163",
"license": "apache-2.0",
"hash": -6084029767162964000,
"line_mean": 28.075,
"line_max": 106,
"alpha_frac": 0.6018916595,
"autogenerated": false,
"ratio": 3.2486033519553073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9346428770538713,
"avg_score": 0.0008132481833188467,
"num_lines": 40
} |
# 188. Best Time to Buy and Sell Stock IV
# The split-or-add approach.
# O(nlogn), 40 ms.
#
# Critical test case:
# Case #1: 1 [1, 10, 8, 9, 6, 12]
# Case #2: 3 [1, 10, 8, 9, 6, 12]
# Case #3: 1 [1, 5, 2, 4, 3, 6]
class Trade:
def __init__(self, low, high):
self.low = low
self.high = high
def profit(self):
return self.high - self.low
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
# Saves all the profitable dayTrades.
dayTrades = []
for x in range(1, len(prices)):
if prices[x - 1] < prices[x]:
dayTrades.append(Trade(prices[x - 1], prices[x]))
def merge(x, y):
return Trade(x.low, y.high)
# Overall strategy: tries to merge as many consecutive dayTrades as possible.
# A mergedTrade has the characteristic of being better than any of its containing dayTrades.
mergedTrades = []
# A splitGain records how much profit will be gained when spliting two consecutive dayTrades inside a mergedTrade.
splitGains = []
stack = []
for t in dayTrades:
stack.append(t)
# While merging stack[-2] is not better than stack[-1].
# (Below condition is equivalent to `stack[-2].low >= stack[-1].low`)
while (
len(stack) >= 2
and merge(stack[-2], stack[-1]).profit() <= stack[-1].profit()
):
# Why we can safely remove stack[-1] without worrying breaking consecutiveness?
# If later stack[before] is merged with t, the splitGain is always >= stack[-1].
# So that if we need to add stack[-1] to the final result, the splitting between stack[before] and t must already happened.
# See test case #2 for an example.
mergedTrades.append(stack[-2])
stack[-2] = stack[-1]
stack.pop()
# While we should merge stack[-2] and stack[-1].
# See test case #3 for an example.
while len(stack) >= 2:
merged = merge(stack[-2], stack[-1])
# (Below condition is equivalent to `stack[-2].low < stack[-1].low and stack[-2].high < stack[-1].high)
if (
merged.profit() > stack[-2].profit()
and merged.profit() > stack[-1].profit()
):
splitGains.append(
stack[-2].profit() + stack[-1].profit() - merged.profit()
)
stack[-2] = merged
stack.pop()
else:
break
while stack:
mergedTrades.append(stack.pop())
# Greedily chooses the largest k gains.
# There are two type of gains:
# - add a mergedTrade (which is always safe, see test case #2 for an example)
# - split two merged trades
gains = [t.profit() for t in mergedTrades] + splitGains
gains.sort(reverse=True)
return sum(gains[:k])
| {
"repo_name": "digiter/Arena",
"path": "188-best-time-to-buy-and-sell-stock-iv.py",
"copies": "1",
"size": "3105",
"license": "mit",
"hash": -626532666136625900,
"line_mean": 38.3037974684,
"line_max": 139,
"alpha_frac": 0.5272141707,
"autogenerated": false,
"ratio": 3.781973203410475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48091873741104746,
"avg_score": null,
"num_lines": null
} |
# 189. Rotate Array - LeetCode
# https://leetcode.com/problems/rotate-array/description/
# Rotate an array of n elements to the right by k steps.
# For example, with n = 7 and k = 3, the array [1,2,3,4,5,6,7] is rotated to [5,6,7,1,2,3,4].
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
k = k % len(nums)
for j in range(k):
i = nums.pop()
nums.insert(0,i)
# # WA,
# i = 0
# j = nums[0]
# k = k % len(nums) # TLE, [1],1
# while True:
# next_i = ( i - k ) % len(nums)
# nums[i] = nums[next_i]
# i = next_i
# if i == k:
# nums[k] = j
# break
test = [
([1],1,[1]),
([1,2,3,4],7,[2,3,4,1]),
([1,2,3,4,5,6],2,[5,6,1,2,3,4]),
([1,2,3,4,5,6,7],3,[5,6,7,1,2,3,4]),
([1,2,3,4,5,6,7,8],3,[6,7,8,1,2,3,4,5]),
([1,2,3,4,5,6,7,8],4,[5,6,7,8,1,2,3,4]),
(range(9),3,[6,7,8,0,1,2,3,4,5]),
]
s = Solution()
for i in test:
a = i[0]
r = s.rotate(a,i[1])
print a, a == i[2] | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/189_rotate-array.py",
"copies": "1",
"size": "1252",
"license": "mit",
"hash": 7448828536861209000,
"line_mean": 26.2391304348,
"line_max": 93,
"alpha_frac": 0.4273162939,
"autogenerated": false,
"ratio": 2.3802281368821294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3307544430782129,
"avg_score": null,
"num_lines": null
} |
# -*18^- coding: utf-8 -*-
import traceback
from time import time
from twisted.internet import defer
from twisted.words.protocols.jabber.jid import JID
import tornado.web
#import txmongo
import bnw.core.base
import bnw.core.bnw_objects as objs
from bnw.formatting import linkify, thumbify
from widgets import widgets
from bnw.core.base import config
from bnw.core import statsd
class BnwWebRequest(object):
def __init__(self, user=None):
self.body = None
self.to = None
self.jid = JID(user['jid']) if user else None
self.user = user
def get_defargs(handler=None):
args = {
'linkify': linkify,
'thumbify': thumbify,
'config': config,
'w': widgets,
}
if handler:
args['auth_user'] = getattr(handler, 'user', None)
args['secure'] = handler.request.protocol=="https"
return args
class BnwWebHandler(tornado.web.RequestHandler):
errortemplate = '500.html'
# Fucked twisted. How to run chain without passing result?
def passargs(self, f, *args, **kwargs):
return f(*args, **kwargs)
@tornado.web.asynchronous
def get(self, *args, **kwargs):
d = defer.Deferred()
d.addCallback(self.passargs, *args, **kwargs)
d.addCallbacks(self.writeandfinish, self.errorfinish)
self.start_time = self.render_time = time()
statsd.send('web-gets', 1, 'c')
d.callback(self.respond)
@tornado.web.asynchronous
def post(self, *args, **kwargs):
d = defer.Deferred()
d.addCallback(self.passargs, *args, **kwargs)
d.addCallbacks(self.writeandfinish, self.errorfinish)
self.start_time = self.render_time = time()
statsd.send('web-posts', 1, 'c')
d.callback(self.respond_post)
def respond(self, *args, **kwargs):
"""Default GET response."""
self.set_status(500)
return 'No GET handler'
def respond_post(self, *args, **kwargs):
"""Default POST response."""
self.set_status(500)
return 'No POST handler'
def render(self, templatename, **kwargs):
args = get_defargs(self)
args.update(kwargs)
return super(BnwWebHandler, self).render(templatename, **args)
def writeandfinish(self, text):
self.render_time = time()
if isinstance(text, dict):
try:
self.render(self.templatename, **text)
except Exception:
handlerclass = self.__class__.__name__
self.set_status(500)
self.render(self.errortemplate, text=traceback.format_exc(),
handlerclass=handlerclass)
else:
# TODO: We shouldn't use private variables.
if not self._finished:
self.write(text)
self.finish()
self.logperformance()
def errorfinish(self, text):
self.render_time = time()
text = text.getTraceback()
handlerclass = self.__class__.__name__
self.set_status(500)
self.render(self.errortemplate, text=text, handlerclass=handlerclass)
self.logperformance()
def logperformance(self):
end_time = time()
statsd.send('web-reqtime', (end_time - self.start_time)*1000, 'ms')
print 'PERFORMANCE',self.render_time-self.start_time, end_time-self.render_time, self.request.uri
def static_url(self, path, include_host=None):
if self.request.host in (config.webui_base, 'www.'+config.webui_base):
path = tornado.web.RequestHandler.static_url(self, path, False)
return self.request.protocol + "://" + config.webui_static + path
return tornado.web.RequestHandler.static_url(self, path, include_host)
| {
"repo_name": "ojab/bnw",
"path": "bnw/web/base.py",
"copies": "1",
"size": "3765",
"license": "bsd-2-clause",
"hash": -4909122210413813000,
"line_mean": 33.2272727273,
"line_max": 105,
"alpha_frac": 0.6156706507,
"autogenerated": false,
"ratio": 3.75,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9858706650747424,
"avg_score": 0.0013927999905151867,
"num_lines": 110
} |
# 18 continued. gt/pm/tree
# New documentation says parameter name is 'taxa'.
# Examples in old documentation are wrong, fixed below.
import sys, unittest, json
sys.path.append('./')
sys.path.append('../')
import webapp
from test_gt_ot_get_tree import GtTreeTester
service = webapp.get_service(5004, 'gt/pm/tree')
class TestGtPmTree(GtTreeTester):
@classmethod
def get_service(self):
return service
@classmethod
def http_method(cls):
return 'POST'
# Insert here: edge case tests
# Insert here: inputs out of range, leading to error or long delay
# Insert here: error-generating conditions
# (See ../README.md)
def test_example_42(self):
x = self.start_request_tests(example_42)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
def test_example_43(self):
x = self.start_request_tests(example_43)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
null=None; false=False; true=True
example_42 = service.get_request('POST', {u'taxa': [u'Setophaga striata', u'Setophaga magnolia', u'Setophaga angelae', u'Setophaga plumbea', u'Setophaga virens']})
example_43 = service.get_request('POST', {u'taxa': [u'Helianthus annuus', u'Passiflora edulis', u'Rosa arkansana', u'Saccharomyces cerevisiae']})
if __name__ == '__main__':
webapp.main()
| {
"repo_name": "jar398/tryphy",
"path": "tests/test_gt_pm_tree.py",
"copies": "1",
"size": "1424",
"license": "bsd-2-clause",
"hash": -3134316180386459000,
"line_mean": 31.3636363636,
"line_max": 163,
"alpha_frac": 0.6818820225,
"autogenerated": false,
"ratio": 3.2072072072072073,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4389089229707207,
"avg_score": null,
"num_lines": null
} |
# 18.
print_log('\n18. Prover gets Credentials for Proof Request\n')
proof_request = {
'nonce': '123432421212',
'name': 'proof_req_1',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {
'name': 'name',
"restrictions": {
"issuer_did": trust_anchor_did,
"schema_id": issuer_schema_id
}
}
},
'requested_predicates': {
'predicate1_referent': {
'name': 'age',
'p_type': '>=',
'p_value': 18,
"restrictions": {
"issuer_did": trust_anchor_did
}
}
}
}
print_log('Proof Request: ')
pprint.pprint(proof_request)
# 19.
print_log('\n19. Prover gets Credentials for attr1_referent anf predicate1_referent\n')
proof_req_json = json.dumps(proof_request)
prover_cred_search_handle = \
await anoncreds.prover_search_credentials_for_proof_req(prover_wallet_handle, proof_req_json, None)
creds_for_attr1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover_cred_search_handle,
'attr1_referent', 1)
prover_cred_for_attr1 = json.loads(creds_for_attr1)[0]['cred_info']
print_log('Prover credential for attr1_referent: ')
pprint.pprint(prover_cred_for_attr1)
creds_for_predicate1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover_cred_search_handle,
'predicate1_referent', 1)
prover_cred_for_predicate1 = json.loads(creds_for_predicate1)[0]['cred_info']
print_log('Prover credential for predicate1_referent: ')
pprint.pprint(prover_cred_for_predicate1)
await anoncreds.prover_close_credentials_search_for_proof_req(prover_cred_search_handle)
| {
"repo_name": "peacekeeper/indy-sdk",
"path": "docs/how-tos/negotiate-proof/python/step3.py",
"copies": "2",
"size": "2179",
"license": "apache-2.0",
"hash": 7759627374575975000,
"line_mean": 43.4897959184,
"line_max": 112,
"alpha_frac": 0.4947223497,
"autogenerated": false,
"ratio": 3.9762773722627736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5470999721962774,
"avg_score": null,
"num_lines": null
} |
''' 18-plot-transit-proba.py
=========================
AIM: Plots transit probabilities according to 17-treat-ephemerids.py.
A probability of 100% corresponds to being able to observe the target for
its whole period.
INPUT: files: - <orbit_id>_misc/ephemerids_obs<transit_duration>h_<max_interruptions>inter_V<mag_max><_SAA?>.npz
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_figures/ with the following format:
proba_<orbit_id>_<exoplanet_period>obs_<max_interruptions>inter_V%3.1f.png/.eps/.pdf
CMD: python 18-plot-transit-proba.py
ISSUES: <none known>
REQUIRES:
- Latex
- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/maps/ --> figures
* <orbit_id>_misc/ --> storages of data
REMARKS: Not with real catalogue.
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import matplotlib.cm as cm
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.constants as const
import resources.figures as figures
from resources.targets import *
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
from mpl_toolkits.basemap import Basemap
###########################################################################
### PARAMETERS
# orbit_iditude of the orbit in km
orbit_id = 702
apogee = 700
perigee = 700
# First minute in data set !
minute_ini = 0
# Last minute to look for
minute_end = 1440*365/12
# File name for the list of orbit file
orbits_file = 'orbits.dat'
typep = 'Neptunes'
if typep == 'Super-Earths':
# Orbit period [d]
exoplanet_period = 50
# Minimum observable time for plots [h]
transit_duration = 6
elif typep == 'Neptunes':
# Orbit period [d]
exoplanet_period = 13
# Minimum observable time for plots [h]
transit_duration = 3
# Maximum visible magnitude
mag_max = 10.
# Plot a few stars as well ?
stars= False
targets_exo=False
# Maximum interruption time tolerated [min]
max_interruptions = 20
# Take SAA into account?
SAA = True
# File name for the input file (in a compressed binary Python format)
if SAA: note = '_SAA'
else: note = ''
# File name for the input file (in a compressed binary Python format)
input_fname = 'ephemerids_obs%dh_%dinter_V%3.1f%s.npz' % (transit_duration,max_interruptions,mag_max,note)
# Print much information ?
verbose = False
# Nice plots?
fancy=True
# Save plots?
save = True
# Show figures ?
show = True
#####################################################################################################################
# for every region in the sky/worthy target:
# >> Find when you can look with transit_duration [h] with maximal max_interruptions [min]
# >>>> return start and end time of observations with duration of interruptions [min]
# >> can we observe a transit ?
# >>>> Vary the start of transit time by transit_duration [h] until exoplanet_period [h]
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee, perigee)
number_of_transit = exoplanet_period * 24. / transit_duration
###########################################################################
### INITIALISATION
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
data_grid = np.zeros(np.shape(ra_grid))
if stars:
ra_stars=[101.2833, 95.9875, 213.9167, 219.9, 279.2333, 78.6333, 114.8250, 88.7917]
dec_stars=[-16.7161, -52.6956, 19.1822, -60.8339, 38.7836, -8.2014, 5.2250, 7.4069]
y_offset=[0.5e6,0.5e6,-0.8e6,0.5e6,0.5e6,0.5e6,-0.8e6,0.5e6]
labels = ['Sirius','Canopus','Arcturus',r'$\alpha\mathrm{Centauri}$','Vega','Rigel','Procyon','Betelgeuse']
if targets_exo: ra_tar, dec_tar, magn = np.loadtxt('resources/defined-exo.csv', delimiter=';', unpack=True)
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
# loading data
print 'loading %s' % input_fname
sys.stdout.write("Loading worthy targets...\t")
sys.stdout.flush()
data = np.load(folder_misc+input_fname)
worthy_targets = data['worthy_targets']
start_obs=data['start_obs']
stop_obs=data['stop_obs']
interruptions_obs=data['interruptions_obs']
print 'Done, %d targets loaded' % len(worthy_targets)
###########################################################################
# cycling through the targets:
density = np.zeros(len(worthy_targets))
for index_target, target in enumerate(worthy_targets):
tar_start = start_obs[index_target,:]
tar_stop = stop_obs[index_target,:]
# print target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
if verbose: print index_target, target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
for i, f in zip(tar_start, tar_stop):
if i >= 0 and f > 0:
if verbose: print i/60/24, f/60/24, (f-i)/60
density[index_target]+=np.floor((f-i)/60 / transit_duration)
if verbose: print '-'*30
density[index_target]=float(density[index_target]) / number_of_transit * 100.
# Associate the density to a grid point
id_ra = np.where(np.abs(ras-target.Coordinates()[0]) < 0.05)[0]
id_dec= np.where(np.abs(decs-target.Coordinates()[1]) < 0.05)[0]
# Transform density in prob of transit:
data_grid[id_dec, id_ra] = density[index_target]
if verbose: print 'obs start | obs end | hours of obs'
#plt.figure()
#for index_target, target in enumerate(worthy_targets):
# c = density[index_target]
# plt.scatter(target.Coordinates()[0]*180./np.pi,target.Coordinates()[1]*180./np.pi,c=c, cmap=cm.jet, vmin=np.amin(density), vmax=np.amax(density), edgecolor='none', s=50)
#plt.xlim([0,360])
#plt.ylim([-90,90])
#plt.grid()
#cb=plt.colorbar()
#cb.set_label('Probabilty of transit of min. %d hours' % transit_duration)
###########################################################################
### Plotting
# transform 0 into no plotting in the data matrix
if fancy: figures.set_fancy()
fig = plt.figure()
m = Basemap(projection='moll',lon_0=180,resolution='c')
extent = (-np.pi,np.pi,-np.pi/2.,np.pi/2.)
ra_grid *= const.RAD
#ra_grid -= 180.
#ra_grid = ra_grid - 180 #= (ra_grid-np.pi) #*180. / np.pi
dec_grid *= const.RAD
m.contour( ra_grid,dec_grid,data_grid,10,colors='k',latlon=True)
CS = m.contourf( ra_grid ,dec_grid,data_grid,100,cmap=plt.cm.gist_stern,latlon=True,vmin=0)
#m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-60.,90.,30.),labels=[1,0,0,0])
m.drawmeridians(np.arange(0.,360.,30.))
ra__ = np.arange(0., 360., 30.)
#print ra__
x, y = m(ra__,ra__*0)
for x,y,ra in zip(x,y,ra__):
plt.text(x, y, figures.format_degree(ra), color='black', ha='center', weight='black', size='small') ##93c6ed
t = np.linspace(0., np.amax(density),5)
labels = ['%3.1f\%%' % a for a in t]
cbar = plt.colorbar(CS, orientation='horizontal',shrink=.8, ticks=t)
cbar.ax.set_xticklabels(labels)
l,b,w,h = plt.gca().get_position().bounds
ll,bb,ww,hh = cbar.ax.get_position().bounds
cbar.ax.set_position([ll, bb+0.1, ww, hh])
cbar.set_label('Probabilty of seeing a transit of %d hours for V=%3.1f' % (transit_duration,mag_max))
if stars:
x,y = m(ra_stars, dec_stars)
m.plot(x,y, 'w*', markersize=10)
for label, xpt, ypt, y_offset in zip(labels, x, y,y_offset):
plt.text(xpt, ypt+y_offset, label, color='white', size='x-small', ha='center', weight='black') # #93a4ed
if targets_exo:
x,y = m(ra_tar*180./np.pi, dec_tar*180./np.pi)
x,y = m(ra_tar, dec_tar)
m.scatter(x,y, c='white', edgecolor='k', marker='+', s=20,zorder=10, lw=0.5)
# Save plot
if save:
fname = 'proba_%d_%dobs_%dinter_V%3.1f' % (orbit_id, transit_duration, max_interruptions, mag_max)
figures.savefig(folder_figures+fname, fig, fancy)
print 'saved as %s' % folder_figures+fname
if show: plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "18_plot_transit_proba.py",
"copies": "1",
"size": "8321",
"license": "bsd-3-clause",
"hash": -5888129707187427000,
"line_mean": 30.0485074627,
"line_max": 171,
"alpha_frac": 0.6434322798,
"autogenerated": false,
"ratio": 2.7589522546419096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39023845344419095,
"avg_score": null,
"num_lines": null
} |
# 18 septembre 2017
# astro.py
# projet S3
"""equation de la ligne B3V pour un graphique u-g vs g-r : 0.9909 * x - 0.8901"""
import re
def lire_fichier(fichier):
"""
:param fichier: nom du fichier en chaine de caractere. Le fichier est trie par colonnes
:return: retourne liste de listes qui correspondent chacune a une colonne du fichier
"""
nombre = re.compile('-?[0-9]+\.?[0-9]*')
fo = open(fichier, 'r')
data = fo.read()
fo.close()
nb_colonne = 1
for char in data:
if char == "\n":
break
if char == "|":
nb_colonne += 1
tableau = []
for i in range(nb_colonne):
tableau.append([])
colonne_actuelle = 0
chaine = ""
for char in data:
print(colonne_actuelle)
if char == "|":
if nombre.fullmatch(chaine) is not None:
tableau[colonne_actuelle].append(float(chaine))
else:
tableau[colonne_actuelle].append(chaine)
chaine = ""
colonne_actuelle += 1
elif char == "\n":
if nombre.fullmatch(chaine) is not None:
tableau[colonne_actuelle].append(float(chaine))
else:
tableau[colonne_actuelle].append(chaine)
chaine = ""
colonne_actuelle = 0
elif char != " ":
chaine += char
return tableau
def B3V_fo(x, a, b):
"""
:param x: abcsisse du point de la ligne B3V dont on veut obtenir l'ordonnée
:param a: equation de la ligne B3V = a*x+b
:param b: equation de la ligne B3V = a*x+b
:return: ordonnée du point de la ligne B3V correspondant à l'abscisse x (dans un graphique u-g vs g-r)
"""
return a * x + b
def find_hot_stars(u_g, g_r, a, b):
"""
:param u_g: liste des valeurs de u-g
:param g_r: liste des valeurs de g-r
la valeur numero i de u_g et celle de g_r doivent correspondre a la meme etoile (celle qui est a la ligne i dans le fichier texte
:param a: equation de la ligne B3V = a*x+b
:param b: equation de la ligne B3V = a*x+b
:return: listes des numeros des lignes a garder (correspondant aux etoiles chaudes), la premiere ligne est numerote 0
"""
to_keep = []
for i, x in enumerate(g_r): #la y a moyen d ameliorer la rapidite je pense
if u_g[i] != "" and x[i] != "" and u_g[i] < B3V_fo(x, a, b):
to_keep.append(i)
return to_keep
| {
"repo_name": "anthonygi13/Recherche_etoiles_chaudes",
"path": "astro.py",
"copies": "1",
"size": "2449",
"license": "apache-2.0",
"hash": -5172307262663984000,
"line_mean": 30.358974359,
"line_max": 133,
"alpha_frac": 0.5744071954,
"autogenerated": false,
"ratio": 2.850815850815851,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3925223046215851,
"avg_score": null,
"num_lines": null
} |
# 198 - House Robber (Easy)
# https://leetcode.com/problems/house-robber/
class Solution:
def rob(self, nums: List[int]) -> int:
# From a list of houses, rob houses that are not adjacent.
# It's not enough with just robbing the odd houses or the even
# houses, there may be a case where in 4 houses it's better to
# rob the first and last one, but none of the in-between.
# Store the result of being at a given house and deciding whether
# to rob it or not how it impacts the long term result.
cache = {}
# Recursively at each position branch deciding whether to rob it
# or not, unless you robbed the previous house which means can't
# branch at current position.
def recursively_rob(house_idx : int, nums : List[int]) -> int:
# If we had already found this house previously...
if cache.get(house_idx):
return cache.get(house_idx)
# If it's the last house, just return what if we robbed it or not.
if house_idx + 1 == len(nums):
last_rob = nums[house_idx]
last_not_rob = 0
return (last_rob, last_not_rob)
# Get the values of the next house onwards.
next_rob, next_not_rob = recursively_rob(house_idx + 1, nums)
# Get the value of the money in the current house.
curr_house_money = nums[house_idx]
# Now, if we choose to rob the current house, we just return as rob
# the value of the current house + the value onwards without robbing
# the next house.
curr_rob = curr_house_money + next_not_rob
# Then, if we choose to not rob the current house, we return the best
# option of robbing or not robbing the next house.
curr_not_rob = max(next_not_rob, next_rob)
cache[house_idx] = (curr_rob, curr_not_rob)
return cache[house_idx]
# This solution apparently is efficient but gets a stack overflow because
# there's cases in Leetcode with more than 1000 length so it goes really deep.
return max(recursively_rob(0, nums))
# This is a solution that instead of using recursion, uses the problem similar
# to fibonacci.
class Solution:
def rob(self, nums: List[int]) -> int:
# From a list of houses, rob houses that are not adjacent.
# It's not enough with just robbing the odd houses or the even
# houses, there may be a case where in 4 houses it's better to
# rob the first and last one, but none of the in-between.
# For no elements, the result is 0.
if len(nums) == 0:
return 0
# For one element, the result is the element.
if len(nums) == 1:
return sum(nums)
# For two elements, the result is the max of both elements.
if len(nums) == 2:
return max(nums)
# Initialize a lookup table.
table = [0 for _ in range(len(nums))]
# The base cases are, for the initial house, it's own value.
table[0] = nums[0]
# For the second house, the max of it's value and the initial house.
table[1] = max(nums[0], nums[1])
# Now, for every house onwards, compute what's best, if taking the value
# if the previous house without robbing at current, or rob at current and
# take the value of the 2nd previous house.
for idx in range(2, len(nums)):
rob = table[idx - 2] + nums[idx]
not_rob = table[idx - 1]
table[idx] = max(rob, not_rob)
return table[-1]
# Here's also a solution that does not use extra O(N) space but O(1):
class Solution:
def rob(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
if len(nums) == 1:
return sum(nums)
if len(nums) == 2:
return max(nums)
maxPrev2 = nums[0]
maxPrev1 = max(nums[0], nums[1])
# We do not need to keep the state for every position, we just need to keep
# track of the previous two positions visited.
for idx in range(2, len(nums)):
rob = maxPrev2 + nums[idx]
not_rob = maxPrev1
maxPrev2 = maxPrev1
maxPrev1 = max(rob, not_rob)
return maxPrev1
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_198.py",
"copies": "1",
"size": "4390",
"license": "mit",
"hash": 1040088376132992000,
"line_mean": 36.5213675214,
"line_max": 86,
"alpha_frac": 0.5881548975,
"autogenerated": false,
"ratio": 3.7779690189328745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9855959610760174,
"avg_score": 0.002032861134540018,
"num_lines": 117
} |
# 198. House Robber - LeetCode
# https://leetcode.com/problems/house-robber/description/
class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
if len(nums) == 1:
return nums[0]
if len(nums) == 2:
return max(nums[0],nums[1])
i = 2
prev_prev = nums[0]
prev = max(nums[0],nums[1])
while i < len(nums):
current_max = max( nums[i] + prev_prev, prev )
i += 1
prev_prev = prev
prev = current_max
return current_max
# TLE
# class Solution(object):
# def rob(self, nums):
# """
# :type nums: List[int]
# :rtype: int
# """
# if len(nums) == 0:
# return 0
# if len(nums) == 1:
# return nums[0]
# if len(nums) == 2:
# return max(nums[0],nums[1])
# return max( nums[-1] + self.rob(nums[:-2]), nums[-2] + self.rob(nums[:-3]) )
ans = [
([],0),
([1],1),
([2,3],3),
([1,2,3],4),
([1,2,3,4],6),
([1,5,10,10,5,1],16),
([2,3,1],3),
([2,3,1,0,2],5),
([2,3,1,3,5],8),
]
s = Solution()
for i in ans:
r = s.rob(i[0])
print r, r == i[1] | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/198_house-robber.py",
"copies": "1",
"size": "1335",
"license": "mit",
"hash": -9088809548364366000,
"line_mean": 21.6440677966,
"line_max": 86,
"alpha_frac": 0.4172284644,
"autogenerated": false,
"ratio": 2.877155172413793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37943836368137934,
"avg_score": null,
"num_lines": null
} |
# 1 9 h
# 2 8 a g
# 3 7 b f
# 4 6 c e
# 5 d
from io import StringIO
class Solution:
def convert(self, s:str, num_rows: int) -> str:
if num_rows == 1:
return s
chars = [None] * len(s)
loop_size = 1 if num_rows == 1 else (num_rows - 1) * 2
loop_cnt, loop_rem = len(s) // loop_size, len(s) % loop_size
start_point = 0
for loop_idx in range(0, loop_cnt):
chars[loop_idx] = s[loop_size * loop_idx]
start_point += loop_cnt
if loop_rem > 0:
chars[loop_cnt] = s[loop_size * loop_cnt]
start_point += 1
for row_idx in range(1, num_rows-1):
for loop_idx in range(0, loop_cnt):
chars[start_point + 2 * loop_idx] = s[loop_size * loop_idx + row_idx]
chars[start_point + 2 * loop_idx + 1] = s[loop_size * loop_idx + loop_size - row_idx]
start_point += 2 * loop_cnt
if loop_rem > row_idx:
chars[start_point] = s[loop_size * loop_cnt + row_idx]
start_point += 1
if loop_rem >= 2 * num_rows - row_idx - 1:
chars[start_point] = s[loop_size * loop_cnt + 2 * num_rows - row_idx - 2]
start_point += 1
for loop_idx in range(0, loop_cnt):
chars[start_point + loop_idx] = s[loop_size * loop_idx + num_rows - 1]
start_point += loop_cnt
if loop_rem >= num_rows:
chars[start_point] = s[loop_size * loop_cnt + num_rows - 1]
return ''.join(chars)
# 下面这个算法刚好弄反了,按行给定输入,输出zigzag后的字符串。
def _convert(self, s: str, numRows: int) -> str:
if not s:
return s
num_rows = numRows
chars = [None] * len(s)
loop_size = 1 if num_rows == 1 else (num_rows-1) * 2
loop_cnt, loop_rem = len(s) // loop_size, len(s) % loop_size
fst_row_size = loop_cnt + (1 if loop_rem else 0)
for idx, ch in enumerate(s):
if idx >= fst_row_size:
break
chars[idx * loop_size] = ch
for row_idx in range(1, num_rows-1):
mid_row_size = loop_cnt * 2
start_point = fst_row_size + mid_row_size * (row_idx - 1)
if loop_rem >= 2 * num_rows - row_idx - 1:
mid_row_size += 2
elif loop_rem > row_idx:
mid_row_size += 1
if mid_row_size % 2 == 0:
for idx in range(0, mid_row_size, 2):
loop_idx = idx // 2
chars[loop_idx * loop_size + row_idx] = s[start_point + idx]
chars[loop_idx * loop_size + 2 * num_rows - row_idx - 2] = s[start_point + idx + 1]
else:
for idx in range(0, mid_row_size - 1, 2):
loop_idx = idx // 2
chars[loop_idx * loop_size + row_idx] = s[start_point + idx]
chars[loop_idx * loop_size + 2 * num_rows - row_idx - 2] = s[start_point + idx + 1]
loop_idx = idx // 2
chars[loop_idx * loop_size + row_idx] = s[start_point + idx]
if num_rows > 1:
start_point = fst_row_size + mid_row_size * row_idx
for idx in range(0, len(s) - start_point):
chars[idx * loop_size + num_rows - 1] = s[start_point + idx]
print(chars)
return ''.join(chars)
if __name__ == '__main__':
sol = Solution()
# s = "19h28ag37bf46ce5d"
# print(sol._convert(s, 5))
# s = "PAYPALISHIRING"
# print(sol.convert(s, 3))
# s = "PAYPALISHIRING"
# print(sol.convert(s, 4))
# s = "ABCDEFGHIJKL"
# print(sol.convert(s, 3))
s = "ABC"
print(sol.convert(s, 3))
| {
"repo_name": "y-usuzumi/survive-the-course",
"path": "leetcode/6.ZigZag_Conversion/main.py",
"copies": "1",
"size": "3817",
"license": "bsd-3-clause",
"hash": -4860767147748007000,
"line_mean": 38.6105263158,
"line_max": 103,
"alpha_frac": 0.4820621844,
"autogenerated": false,
"ratio": 3.1022258862324814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40842880706324813,
"avg_score": null,
"num_lines": null
} |
''' 19-plot-transit-proba-mag-limits.py
=========================
AIM: Plots deepest achiveable magnitude according to 17-treat-ephemerids.py.
A minimum detection capability of 100% means that a whole orbit must be observable
INPUT: files: - <orbit_id>_misc/ephemerids_obs<transit_duration>h_<max_interruptions>inter_V<mag_max><_SAA?>.npz
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_figures/ with the following format:
<orbit_id>-inter<max_interruptions>-mag-<typep>-<detection_rate>.png/.eps/.pdf
CMD: python 19-plot-transit-proba-mag-limits.py
ISSUES: <none known>
REQUIRES:
- Latex
- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/maps/ --> figures
* <orbit_id>_misc/ --> storages of data
REMARKS: Not with real catalogue.
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import matplotlib.cm as cm
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.constants as const
import resources.figures as figures
from resources.targets import *
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
from mpl_toolkits.basemap import Basemap
###########################################################################
### PARAMETERS
# orbit_id
orbit_id = 702
apogee = 700
perigee = 700
# First minute in data set !
minute_ini = 0
# Last minute to look for
minute_end = 1440*365/12
# File name for the list of orbit file
orbits_file = 'orbits.dat'
# Maximum interruption time tolerated [min]
max_interruptions = 97
# Planet type
typep = 'Super-Earths'
if typep == 'coverage':
# Orbit period [d]
exoplanet_period = 50
# Minimum observable time for plots [h]
transit_duration = 0.0001
min_detection_rate = 100
elif typep == 'Super-Earths':
# Orbit period [d]
exoplanet_period = 50
# Minimum observable time for plots [h]
transit_duration = 6
min_detection_rate = 50
elif typep == 'Neptunes':
# Orbit period [d]
exoplanet_period = 13
# Minimum observable time for plots [h]
transit_duration = 3
min_detection_rate = 200
# Maximum visible magnitude
mag_max = 6.
# Minimum visible magnitude
mag_min = 6.
# Magnitude step
mag_sep = 1.
# Plot a few stars as well ?
stars= False
targets_exo=False
# Take SAA into account?
SAA = True
# Print much information ?
verbose = False
# Nice plots?
fancy=True
# Save plots?
save = True
# Show figures ?
show = True
# If set to True, then it will be observations of at least (period - max_interruptions)
# If set to False, then it is minimum (period - max_interruptions) minutes per orbit,
# not necesseraly consecutive.
consecutive = False
# Minimal # of days of obs (if consecutive = False)
nb_obs_day = 50
# Minimal minutes to be observed per orbit (if consecutive = False)
min_t_obs_per_orbit = 50
if SAA: note = '_SAA'
else: note = ''
if not consecutive: note += '_cumul_'
#####################################################################################################################
# for every region in the sky/worthy target:
# >> Find when you can look with transit_duration [h] with maximal max_interruptions [min]
# >>>> return start and end time of observations with duration of interruptions [min]
# >> can we observe a transit ?
# >>>> Vary the start of transit time by transit_duration [h] until exoplanet_period [h]
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee, perigee)
number_of_transit = exoplanet_period * 24. / transit_duration
###########################################################################
### INITIALISATION
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
data_grid = np.zeros(np.shape(ra_grid))
if stars:
ra_stars=[101.2833, 95.9875, 213.9167, 219.9, 279.2333, 78.6333, 114.8250, 88.7917]
dec_stars=[-16.7161, -52.6956, 19.1822, -60.8339, 38.7836, -8.2014, 5.2250, 7.4069]
y_offset=[0.5e6,0.5e6,-0.8e6,0.5e6,0.5e6,0.5e6,-0.8e6,0.5e6]
labels = ['Sirius','Canopus','Arcturus',r'$\alpha\mathrm{Centauri}$','Vega','Rigel','Procyon','Betelgeuse']
if targets_exo: ra_tar, dec_tar, magn = np.loadtxt('resources/defined-exo.csv', delimiter=';', unpack=True)
for current_mag in np.arange(mag_max, mag_min-mag_sep, -1.*mag_sep):
# File name for the input file (in a compressed binary Python format)
if consecutive:
input_fname = 'ephemerids_obs%dh_%dinter_V%3.1f%s.npz' % (transit_duration,max_interruptions,current_mag,note)
else:
input_fname = 'ephemerids_%ddays_%dmin_V%3.1f%s.npz' % (nb_obs_day,min_t_obs_per_orbit,current_mag,note)
# loading data
print 'loading %s' % input_fname
sys.stdout.write("Loading worthy targets...\t")
sys.stdout.flush()
data = np.load(folder_misc+input_fname)
worthy_targets = data['worthy_targets']
start_obs=data['start_obs']
stop_obs=data['stop_obs']
interruptions_obs=data['interruptions_obs']
print 'Done, %d targets loaded for mag %3.1f' % (len(worthy_targets), current_mag)
###########################################################################
# cycling through the targets:
density = np.zeros(len(worthy_targets))
for index_target, target in enumerate(worthy_targets):
tar_start = start_obs[index_target,:]
tar_stop = stop_obs[index_target,:]
# print target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
if verbose: print index_target, target.Coordinates()[0]*180./np.pi, target.Coordinates()[1]*180./np.pi
for i, f in zip(tar_start, tar_stop):
if i >= 0 and f > 0:
if verbose: print i, f, (f-i)/60
density[index_target]+=np.floor((f-i)/60 / transit_duration)
if verbose: print '-'*30
density[index_target]=float(density[index_target]) / number_of_transit * 1.e2
if density[index_target] < min_detection_rate : density[index_target] = 0
# Associate the density to a grid point
id_ra = np.where(np.abs(ras-target.Coordinates()[0]) < 0.05)[0]
id_dec= np.where(np.abs(decs-target.Coordinates()[1]) < 0.05)[0]
# Transform density in prob of transit:
if data_grid[id_dec, id_ra] == 0 and density[index_target] > 0:
data_grid[id_dec, id_ra] = current_mag
if verbose: print 'obs start | obs end | hours of obs'
co = np.size(data_grid[np.where(data_grid>0)])
print 'coverage', float(co)/float(np.size(data_grid))*100, '%'
if typep == 'coverage': exit()
#plt.figure()
#for index_target, target in enumerate(worthy_targets):
# c = density[index_target]
# plt.scatter(target.Coordinates()[0]*180./np.pi,target.Coordinates()[1]*180./np.pi,c=c, cmap=cm.jet, vmin=np.amin(density), vmax=np.amax(density), edgecolor='none', s=50)
#plt.xlim([0,360])
#plt.ylim([-90,90])
#plt.grid()
#cb=plt.colorbar()
#cb.set_label('Probabilty of transit of min. %d hours' % transit_duration)
###########################################################################
### Plotting
# transform 0 into no plotting in the data matrix
mag_min= np.amin(data_grid[data_grid>0])
data_grid[data_grid < mag_min] = np.nan
if fancy: figures.set_fancy()
fig = plt.figure()
axes=fig.add_subplot(1,1,1, axisbg='black')
m = Basemap(projection='moll',lon_0=180,resolution='c')
extent = (-np.pi,np.pi,-np.pi/2.,np.pi/2.)
ra_grid *= const.RAD
#ra_grid -= 180.
#ra_grid = ra_grid - 180 #= (ra_grid-np.pi) #*180. / np.pi
dec_grid *= const.RAD
m.contour( ra_grid,dec_grid,data_grid,10,colors='k',latlon=True)
CS = m.contourf( ra_grid ,dec_grid,data_grid,int((mag_max-mag_min)/mag_sep+1),cmap=plt.cm.gist_rainbow,latlon=True)
#m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-60.,90.,30.),labels=[1,0,0,0])
m.drawmeridians(np.arange(0.,360.,30.))
ra__ = np.arange(0., 360., 30.)
#print ra__
x, y = m(ra__,ra__*0)
for x,y,ra in zip(x,y,ra__):
plt.text(x, y, figures.format_degree(ra), color='black', ha='center', weight='black', size='small') ##93c6ed
v = np.linspace(mag_min,mag_max, (mag_max-mag_min+1), endpoint=True)
t = map(figures.format_mag, v)
cbar = plt.colorbar(CS, ticks=v, orientation='horizontal',shrink=.8)
cbar.set_ticklabels(t)
#cbar = plt.colorbar(CS, orientation='horizontal',shrink=.8, ticks=t)
#cbar.ax.set_xticklabels(labels)
l,b,w,h = plt.gca().get_position().bounds
ll,bb,ww,hh = cbar.ax.get_position().bounds
cbar.ax.set_position([ll, bb+0.1, ww, hh])
cbar.set_label(r'$\mathrm{faintest}\ V\ \mathrm{magnitude\ for\ %s\ (%d\%%\ detection)}$' % (typep,min_detection_rate))
if stars:
x,y = m(ra_stars, dec_stars)
m.plot(x,y, 'w*', markersize=10)
for label, xpt, ypt, y_offset in zip(labels, x, y,y_offset):
plt.text(xpt, ypt+y_offset, label, color='white', size='x-small', ha='center', weight='black') # #93a4ed
if targets_exo:
x,y = m(ra_tar*180./np.pi, dec_tar*180./np.pi)
x,y = m(ra_tar, dec_tar)
m.scatter(x,y, c='white', edgecolor='k', marker='+', s=20,zorder=10, lw=0.5)
# Save plot
if save:
fname = '%d-inter%d-mag-%s-%d%s' % (orbit_id,max_interruptions,typep,min_detection_rate,note)
figures.savefig(folder_figures+fname, fig, fancy)
print 'saved as %s' % folder_figures+fname
if show: plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "19_plot_transit_proba_mag_limits.py",
"copies": "1",
"size": "9783",
"license": "bsd-3-clause",
"hash": -3155017024147321000,
"line_mean": 30.3557692308,
"line_max": 171,
"alpha_frac": 0.6493918021,
"autogenerated": false,
"ratio": 2.744949494949495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38943412970494945,
"avg_score": null,
"num_lines": null
} |
"""19. Remove Nth Node From End of List
Medium
URL: https://leetcode.com/problems/remove-nth-node-from-end-of-list/
Given a linked list, remove the n-th node from the end of list and return
its head.
Example:
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Follow up:
Could you do this in one pass?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class SolutionTwoPassesGetSize(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
Time complexity: O(m), where m is the length of list.
Space complexity: O(1).
"""
# If no node will be removed.
if n == 0:
return head
# Get the size of the linked list.
size = 0
current = head
while current:
size += 1
current = current.next
# If the head is removed.
if n == size:
return head.next
# Arrive at the (N-1)th node.
pos = -1
previous = None
current = head
while pos < size - n - 1:
pos += 1
previous = current
current = current.next
# If the Nth node exists, replace the Nth node by the (N+1)th,
# and return head.
if current:
previous.next = previous.next.next
return head
class SolutionOnePass(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
Time complexity: O(m), where m is the length of list.
Space complexity: O(1).
"""
# If no node will be removed.
if n == 0:
return head
# Apply Two Pointers: fast and slow.
fast = head
slow = head
previous = ListNode(None)
# Move fast n nodes into the list.
for i in range(n):
fast = fast.next
# Move all nodes at the same pace, and stop at the left of nth node.
while fast.next:
fast = fast.next
previous = slow
slow = slow.next
# Remove nth node.
previous.next = previous.next.next
return head
def main():
# Solution = SolutionTwoPassesGetSize
Solution = SolutionOnePass
# Given linked list: 1->2->3->4->5, and n = 2
# Remove the 2nd node from the end: 1->2->3->5.
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
print Solution().removeNthFromEnd(head, 2).next.next.next.val
if __name__ == '__main__':
main()
| {
"repo_name": "bowen0701/algorithms_data_structures",
"path": "lc0019_remove_nth_node_from_end_of_list.py",
"copies": "1",
"size": "2892",
"license": "bsd-2-clause",
"hash": -1116133990393282000,
"line_mean": 23.3025210084,
"line_max": 80,
"alpha_frac": 0.5605117566,
"autogenerated": false,
"ratio": 3.7268041237113403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.478731588031134,
"avg_score": null,
"num_lines": null
} |
#1,a 2,b 3,c
class Node(object):
nextPointer = None
number = None
def removeNext(self):
if(self.nextPointer != None):
self.nextPointer = self.nextPointer.nextPointer
testList1 = Node()
testList1.number = 1
testList2 = Node()
testList2.number = 1
testList3 = Node()
testList3.number = 2
testList4 = Node()
testList4.number = 3
testList5 = Node()
testList5.number = 5
testList6 = Node()
testList6.number = 8
testList1.nextPointer = testList2
testList2.nextPointer = testList3
testList3.nextPointer = testList4
testList4.nextPointer = testList5
testList5.nextPointer = testList6
#node = Node()
#node.nextPointer = 1
#print(node.nextPointer)
def verifyListEquals(A, B):
if len(A) != len(B):
return False
for i in range(0, len(A)):
if (A[i] != B[i]):
return False
return True
# given a linked list by its head node, given a new node to append, append it.
def append(head, node):
currentNode = head
if currentNode == None:
return node
else:
while (currentNode.nextPointer != None):
currentNode = currentNode.nextPointer
currentNode.nextPointer = node
return head
# given a linked list by its head node, print all its data, one per line
def printList(head):
while (head != None):
print(head.number)
head = head.nextPointer
print("--")
# insert node after a node
def insert(node,newNode):
oldNodePointer = node.nextPointer
node.nextPointer = newNode
newNode.nextPointer = oldNodePointer
def reverse(node):
#### REPLACEMENT METHOD overwrites the list and returns the last (now first) element of the linked list
# BUG: passing a non-head node causes issues
prevNode = None
while(node != None):
nextNode = node.nextPointer
node.nextPointer = prevNode
prevNode = node
node = nextNode
return prevNode
def deepReverse2(node):
if(node == None):
return None
else:
newHead = Node()
newHead.number = node.number
newHead.nextPointer = node.nextPointer
return newHead
def deepReverse(node):
if(node == None):
print("Called deepReverse on None")
print("deepReverse is returning from being called on None")
return None
else:
print("Called deepReverse on " +str(node.number))
deepReverseOfRemainder = deepReverse(node.nextPointer)
if node.nextPointer == None:
print("deepReverse returned from being called on None")
else:
print("deepReverse returned from being called on "+ str(node.nextPointer.number))
newAppend = Node()
newAppend.number = node.number
newAppend.nextPointer = None
append(deepReverseOfRemainder,newAppend)
print("deepReverse is returning from being called on "+ str(node.number))
printList(deepReverseOfRemainder)
return deepReverseOfRemainder
# return deepReverse(node)
testList4.removeNext()
testList6.removeNext()
#printList(testList1)
newHead = reverse(testList1)
#printList(newHead)
def runTests():
null = None
printList(deepReverse(null))
printList(null)
singleton = Node()
singleton.number = 17
deepReversedSingleton = deepReverse(singleton)
copyOfSingleton = deepReverse(deepReversedSingleton)
#if verifyListEquals(singleton, copyOfSingleton):
printList(deepReversedSingleton)
deepReversedSingleton.number = 11000000000
printList(singleton)
middleton = Node()
middleton.number = 5
middleton.nextPointer = singleton
deepReversedSingleton = deepReverse(singleton)
deepReversedMiddleton = deepReverse(middleton, deepReversedSingleton)
#deepReversedMiddleton = deepReverse(middleton)
printList(deepReversedMiddleton)
deepReversedMiddleton.number = 11000000000
printList(middleton)
talisker = Node()
talisker.number = "scotch"
talisker.nextPointer = middleton
#deepReversedSingleton = deepReverse(singleton)
#deepReversedMiddleton = deepReverseHelper(middleton, deepReversedSingleton)
deepReversedTalisker = deepReverseHelper(talisker, deepReversedMiddleton)
printList(deepReversedTalisker)
deepReversedTalisker.number = 11111
printList(talisker)
#runTests()
singleton = Node()
singleton.number = "singleton"
middleton = Node()
middleton.number = "middleton"
middleton.nextPointer = singleton
talisker = Node()
talisker.number = "talisker"
talisker.nextPointer = middleton
deepReversedTalisker = deepReverse(talisker)
printList(deepReversedTalisker)
deepReversedTalisker.number = 11111
printList(talisker)
| {
"repo_name": "petersrinivasan/neopeng",
"path": "scratch.py",
"copies": "1",
"size": "4681",
"license": "unlicense",
"hash": -5926890737106559000,
"line_mean": 27.5426829268,
"line_max": 103,
"alpha_frac": 0.6964323862,
"autogenerated": false,
"ratio": 3.682926829268293,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9761125287119503,
"avg_score": 0.02364678566975803,
"num_lines": 164
} |
# 1. ABBA
# 2. AABB
# 3. ABAABAAAAAAAAA
# if consecutive positions of A are apart by even number of positions then word is bubbly --- no this logic does not work for example #3.
import pdb
def isBubbly(word):
arr_A = []
arr_B = []
for i in range(len(word)):
if word[i] == 'A':
arr_A.append(i)
if word[i] == 'B':
arr_B.append(i)
if len(arr_A) == 0:
return True
if len(arr_A)%2 != 0:
return False
prev = -1
check_B = False
for i in range(0, len(arr_A)):
curr = arr_A[i]
if prev == -1:
prev = arr_A[i]
continue
if (curr-prev)%2 != 0:
prev = -1
continue
else:
check_B = True
break
prev = -1
if(not check_B):
return True
for i in range(0, len(arr_B)):
curr = arr_B[i]
if prev == -1:
prev = arr_B[i]
continue
if (curr-prev)%2 != 0:
prev = -1
continue
else:
return False
return True
if __name__ == "__main__":
M = int(raw_input())
#M = 1
num_bubs = 0
for i in range(M):
word = raw_input()
#word = "ABAABAAAAAAAAA"
if len(word)%2 != 0:
continue
if(isBubbly(word)):
num_bubs += 1
print word, "True"
else:
print word, "False"
print num_bubs
| {
"repo_name": "atishbits/101",
"path": "bubblyArray.py",
"copies": "1",
"size": "1453",
"license": "mit",
"hash": -3488390448324114400,
"line_mean": 22.8196721311,
"line_max": 137,
"alpha_frac": 0.4528561597,
"autogenerated": false,
"ratio": 3.3790697674418606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9286601668029136,
"avg_score": 0.009064851822544896,
"num_lines": 61
} |
# 1a finished
def decimals(n):
temp = list(divmod(10,n))
yield temp[0]
while temp[1]:
temp = list(divmod(temp[1]*10,n))
yield temp[0]
return
# 1b finished
def genlimit(g, limit):
for i in range(limit):
yield next(g)
# 2 finished
def decimals2(n):
remainder = 1
record1 = [0]
record2 = [1]
while remainder != 0:
remainder *= 10
num = remainder//n
yield num
record1.append(num)
remainder = remainder - num * n
record2.append(remainder)
for i in range(len(record2)):
for j in range(i+1, len(record2)):
if record2[i] == record2[j]:
if remainder == 0:
break
else:
num2 = (10 * remainder) // n
remainder *= 10
yield record1[(i+1):len(record2)]
return
# 3a finished
def select(l, selectors):
result = []
for i in l:
if selectors[i]:
result.append(l[i])
return result
# 3b finished
def nbitIntToDigits(i, n):
result=[]
count = 0
while i > 0:
result.append(i&1)
i = i // 2
count += 1
if count < n:
for i in range(n - count):
result.append(0)
return result[::-1]
# 3c finished
def powerSet(l):
result = []
def powerset(l):
if len(l) > 0:
head = l[0]
for tail in powerSet(l[1:]):
yield [head] + tail
yield tail
else:
yield []
for item in powerset(l):
result.append(item)
return result
# 4 finished
# when testing this function, change the path to your own file path
def oil(path):
f = open(path)
count = 1
data = []
leftside = True
for i in range(7):
next(f)
while True:
data = []
x = next(f)
years = x.split()
for n in f:
temp = n.split()
data.append(temp)
if not (count % 13):
break
count += 1
count += 1
if len(years)==0:
return
yield report(years, data, leftside)
yield report(years, data, not leftside)
def report(years, data, leftside):
price = []
if leftside:
year = years[0]
total = data[0][1]
for i in range(1,len(data)):
price.append(float(data[i][5]))
mean = sum(price)/float(len(data)-1)
maxPrice = float(max(price))
minPrice = float(min(price))
else:
year = years[1]
total = data[0][8]
for i in range(1,len(data)):
price.append(float(data[i][12]))
mean = sum(price)/float(len(data)-1)
maxPrice = max(price)
minPrice = min(price)
return year + ": quan: total=" + total + " prices: max = " + str(maxPrice) + " min = " + str(minPrice) + " avg = " + str(mean)
# 5a finished
def countBases(dna):
result = []
counta = 0
countc = 0
countg = 0
countt = 0
for i in range(len(dna)):
if dna[i] == 'A':
counta += 1
elif dna[i] == 'C':
countc += 1
elif dna[i] == 'G':
countg += 1
elif dna[i] == 'T':
countt += 1
result = [counta, countc, countg, countt]
return result
# 5b finished
def reverseComplement(dna):
result = ''
for i in range(len(dna)):
if dna[i] == 'A':
result += 'T'
elif dna[i] == 'C':
result += 'G'
elif dna[i] == 'G':
result += 'C'
elif dna[i] == 'T':
result += 'A'
return result[::-1]
| {
"repo_name": "iamacewhite/COMSW3101",
"path": "hw2.py",
"copies": "1",
"size": "3211",
"license": "mit",
"hash": -1390300911266678000,
"line_mean": 19.7161290323,
"line_max": 127,
"alpha_frac": 0.5471815634,
"autogenerated": false,
"ratio": 2.821616871704745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3868798435104745,
"avg_score": null,
"num_lines": null
} |
"""[-1-]Attribution-Share Alike 3.0 License, copyright (c) 2010 Pieter Hintjens, modified @alpaca-tc
[-2-]=====================================================================
[-3-]kvmsg - key-value message class for example applications
[-4-]
[-5-]Author: Min RK <benjaminrk@gmail.com>
[-6-]
"""
import struct #[-9-] for packing integers
import sys
from uuid import uuid4
import zmq
import zmq.utils.jsonapi as json
class KVMsg(object):
"""[-17-]
[-18-]Message is formatted on wire as 5 frames:
"""
key = None
sequence = 0
uuid=None
properties = None
body = None
def __init__(self, sequence, uuid=None, key=None, properties=None, body=None):
assert isinstance(sequence, int)
self.sequence = sequence
if uuid is None:
uuid = uuid4().bytes
self.uuid = uuid
self.key = key
self.properties = {} if properties is None else properties
self.body = body
def __getitem__(self, k):
return self.properties[k]
def __setitem__(self, k, v):
self.properties[k] = v
def get(self, k, default=None):
return self.properties.get(k, default)
def store(self, dikt):
"""[-46-] Store me in a dict if I have anything to store
[-47-] else delete me from the dict.[-end-]"""
if self.key is not None and self.body is not None:
dikt[self.key] = self
elif self.key in dikt:
del dikt[self.key]
def send(self, socket):
key = '' if self.key is None else self.key
seq_s = struct.pack('!q', self.sequence)
body = '' if self.body is None else self.body
prop_s = json.dumps(self.properties)
socket.send_multipart([ key, seq_s, self.uuid, prop_s, body ])
@classmethod
def recv(cls, socket):
return cls.from_msg(socket.recv_multipart())
@classmethod
def from_msg(cls, msg):
key, seq_s, uuid, prop_s, body = msg
key = key if key else None
seq = struct.unpack('!q',seq_s)[0]
body = body if body else None
prop = json.loads(prop_s)
return cls(seq, uuid=uuid, key=key, properties=prop, body=body)
def dump(self):
if self.body is None:
size = 0
data='NULL'
else:
size = len(self.body)
data=repr(self.body)
print >> sys.stderr, "[seq:{seq}][key:{key}][size:{size}] {props} {data}".format(
seq=self.sequence,
key=self.key,
size=size,
props=json.dumps(self.properties),
data=data,
)
#[-91-] Runs self test of class
def test_kvmsg (verbose):
print " * kvmsg: ",
ctx = zmq.Context()
output = ctx.socket(zmq.DEALER)
output.bind("ipc://kvmsg_selftest.ipc")
input = ctx.socket(zmq.DEALER)
input.connect("ipc://kvmsg_selftest.ipc")
kvmap = {}
kvmsg = KVMsg(1)
kvmsg.key = "key"
kvmsg.body = "body"
if verbose:
kvmsg.dump()
kvmsg.send(output)
kvmsg.store(kvmap)
kvmsg2 = KVMsg.recv(input)
if verbose:
kvmsg2.dump()
assert kvmsg2.key == "key"
kvmsg2.store(kvmap)
assert len(kvmap) == 1
kvmsg = KVMsg(2, key="key", body="body")
kvmsg["prop1"] = "value1"
kvmsg["prop2"] = "value2"
kvmsg["prop3"] = "value3"
assert kvmsg["prop1"] == "value1"
if verbose:
kvmsg.dump()
kvmsg.send(output)
kvmsg2 = KVMsg.recv(input)
if verbose:
kvmsg2.dump()
#[-130-] ensure properties were preserved
assert kvmsg2.key == kvmsg.key
assert kvmsg2.body == kvmsg.body
assert kvmsg2.properties == kvmsg.properties
assert kvmsg2["prop2"] == kvmsg["prop2"]
print "OK"
if __name__ == '__main__':
test_kvmsg('-v' in sys.argv)
| {
"repo_name": "alpaca-tc/comment_extractor",
"path": "spec/assets/source_code/python.py",
"copies": "1",
"size": "3781",
"license": "mit",
"hash": 4323281875604764700,
"line_mean": 26.2014388489,
"line_max": 100,
"alpha_frac": 0.5681036763,
"autogenerated": false,
"ratio": 3.3371579876434248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9378040894277342,
"avg_score": 0.005444153933216746,
"num_lines": 139
} |
# 1/bin/python
from deap import base
def _maturePopulation(population):
for W in range(len(population)):
try:
assert (population[W].Age)
except:
population[W].Age = 0
population[W].Age += 1
def _checkRetirement(individue, statistics, ageBoundary):
# (Minetti, 2005)
indscore = individue.fitness.values[0]
N = (ageBoundary[1] - ageBoundary[0]) / 2
aptitude = indscore - statistics['avg']
if aptitude > 0:
ABC = sum(ageBoundary) / 2
RSB = statistics['max'] - statistics['avg']
else:
ABC = ageBoundary[0]
RSB = statistics['avg'] - statistics['min']
RSB = max(1, RSB)
survival = ABC + (N * aptitude / RSB)
# oldenough = individue.Age > ageBoundary[0]
#relativeAge = (individue.Age-ageBoundary[0]) / (ageBoundary[1]-ageBoundary[0])
retires = individue.Age - survival > ageBoundary[1]
# print(survival)
return retires
def _killElders(population, statistics, ageBoundary):
for I in range(len(population)):
if _checkRetirement(population[I], statistics, ageBoundary):
population[I] = None
population = [x for x in population if x]
return population
def ageZero(population):
for q in range(len(population)):
population[q].Age = 0
def populationAges(ageBoundary, population, averageScore):
_maturePopulation(population)
population = _killElders(population, averageScore, ageBoundary)
return population
| {
"repo_name": "Gab0/gekkoJaponicus",
"path": "promoterz/supplement/age.py",
"copies": "1",
"size": "1494",
"license": "mit",
"hash": -7418057726121785000,
"line_mean": 28.88,
"line_max": 83,
"alpha_frac": 0.6445783133,
"autogenerated": false,
"ratio": 3.5319148936170213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4676493206917021,
"avg_score": null,
"num_lines": null
} |
# 1.) Calculate lowest stats needed to win
# -Cheapest gear correlates to least stat bonuses
# a.) Most possible rounds is min(playerHP, bossHP)
# b.) # of rounds is min(boss_HP / (player_DMG - boss_AMR), player_HP / (boss_DMG - player_AMR))
# c.) Player wins if rounds * (player_DMG - boss_AMR) >= boss_HP
# AND if (rounds - 1) * (boss_DMG - player_AMR) < player_HP
# 2.) Calculate cheapest gear set to achieve calculated stats
#After viewing reddit solution, probably simpler to calculate all possible gear combos and pick most expensive losing one
from math import ceil
from itertools import product
import sys
def win(player_MAXHP, player_DMG, player_AMR, boss_MAXHP, boss_DMG, boss_AMR):
if ceil(boss_MAXHP / (max(player_DMG - boss_AMR, 1))) <= ceil(player_MAXHP / (max(boss_DMG - player_AMR, 1))):
#player wins
return True
else:
return False
def set_stats(equipment_set):
cost = sum([item[1] for item in equipment_set])
dam = sum([item[2] for item in equipment_set])
amr = sum([item[3] for item in equipment_set])
return (cost, dam, amr)
#key: (cost, dmg, amr)
weapon = [
('Dagger', 8 , 4, 0),
('Shortsword', 10 , 5, 0),
('Warhammer', 25 , 6, 0),
('Longsword', 40 , 7, 0),
('Greataxe', 74 , 8, 0)
]
armor = [
('No Armor', 0 , 0, 0),
('Leather', 13 , 0, 1),
('Chainmail', 31 , 0, 2),
('Splintmail', 53 , 0, 3),
('Bandedmail', 75 , 0, 4),
('Platemail', 102, 0, 5)
]
ring = [
('Damage +1', 25 , 1, 0),
('Damage +2', 50 , 2, 0),
('Damage +3', 100, 3, 0),
('Defense +1', 20 , 0, 1),
('Defense +2', 40 , 0, 2),
('Defense +3', 80 , 0, 3),
('Bare R Hand', 0 , 0, 0),
('Bare L Hand', 0 , 0, 0)
]
MAX_player_DMG = 13
MAX_player_AMR = 10
player_MAXHP = 100
parsed_input = [item.split(': ') for item in sys.stdin.read().split('\n')]
boss_MAXHP = int(parsed_input[0][1])
boss_DMG = int(parsed_input[1][1])
boss_AMR = int(parsed_input[2][1])
#generate all possible armor sets, with right hand ring not equal to left hand ring
equipment_sets = [x for x in list(product(weapon, armor, ring, ring)) if x[2] != x[3]]
max_set_cost = 0
for equipment_set in equipment_sets:
set_cost, player_DMG, player_AMR = set_stats(equipment_set)
if not win(player_MAXHP, player_DMG, player_AMR, boss_MAXHP, boss_DMG, boss_AMR):
if set_cost > max_set_cost:
max_set_cost = set_cost
print(max_set_cost) | {
"repo_name": "twrightsman/advent-of-code-2015",
"path": "advent_day21_pt2.py",
"copies": "1",
"size": "2536",
"license": "unlicense",
"hash": -2349327138498958300,
"line_mean": 31.1139240506,
"line_max": 121,
"alpha_frac": 0.5871451104,
"autogenerated": false,
"ratio": 2.715203426124197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3802348536524197,
"avg_score": null,
"num_lines": null
} |
#1. call the default constructor: variable = variable = NewsAPI.NewsAPI(2,1,2015,4,11,2016,'BP','35b806d26e76f895fe31669dea30f528c36c94e6')
#2. try to get data : variable.startGetData(), it will returns success if it is works or error (Check API key if it returns error)
#3. get the sentiment score in list format double: variable.getSentiment()
#4. get the Difference price percentage in list format double: variable.getDifferencePercentage(). For now, difference price will return 0 if the news date is on the weekend or holiday
#5. install yahoo-finance API: pip install yahoo-finance
#6. another API Key for Alchemy: 56283d7d6075b9d30773e1ceb440e1b2d029f438
# Final Output
#1. BPData = variable = NewsAPI.NewsAPI(2,1,2015,4,11,2016,'BP','35b806d26e76f895fe31669dea30f528c36c94e6')
#2. BPData.startGetData()
#3. Make the condition statement to check if it is returning 'success' or not
#4. BPData.output()
#Verison 1.0: Only do the NewsAPI (Get only the sentimentscore and timestamp)
#Version 1.1: Get the stock price
#Version 1.2: Get the sentiment score and type
import requests
import time
import datetime
from yahoo_finance import Share
from pprint import pprint
import numpy as np
import csv
class NewsAPI:
#Default Constructor
def __init__(self,ms,ds,ys,me,de,ye,company,apikey):
#Start Date in int (Month, Day, Year)
self.ms = ms
self.ds = ds
self.ys = ys
#End Date in int (Month, Day, Year)
self.me = me
self.de = de
self.ye = ye
#Company Name and AlchemyAPI key
self.company = company
self.apikey = apikey
#Results global variable
self.rawdata = ''
self.rawjson = ''
self.results = ''
self.times = ''
self.timeString = ''
self.sentimentScore = ''
self.differencePercentage = ''
#Try to get the data
def startGetData(self):
start = datetime.date(self.ys,self.ms,self.ds)
startunix = str(time.mktime(start.timetuple()))[0:10]
end = datetime.date(self.ye,self.me,self.de)
endunix = str(time.mktime(end.timetuple()))[0:10]
#Sent the API address
url = 'https://access.alchemyapi.com/calls/data/GetNews?apikey=' + self.apikey + '&return=enriched.url.enrichedTitle.docSentiment&start=' + startunix + '&end=' + endunix + '&q.enriched.url.enrichedTitle.entities.entity=|text=' + self.company + ',type=company|&q.enriched.url.enrichedTitle.taxonomy.taxonomy_.label=finance&count=200&outputMode=json'
#Request the data
self.raw = requests.get(url)
#Error handling
while (self.raw.status_code != 200):
sleep(1)
self.rawjson = self.raw.json()
if (self.rawjson['status'] == 'ERROR'):
return 'cannot fetch data'
else:
if (self.rawjson['result'].has_key('docs') == True):
self.results = self.rawjson['result']['docs']
return 'success'
else:
return 'empty data'
#Extract the timestamp from the alchemyapi
def getTimeStamp(self):
times = []
timesString = []
for time in self.results:
timeszzz = time['timestamp']
times.append(timeszzz)
timeStr = str(datetime.datetime.fromtimestamp(timeszzz).strftime('%Y-%m-%d'))
timesString.append(timeStr)
self.times = times
self.timeString = timesString
return timesString
#Get the sentiment score from the alchemyapi [-1,1]
def getSentimentScore(self):
sentiment = []
for sent in self.results:
sentimentzzz = sent['source']['enriched']['url']['enrichedTitle']['docSentiment']['score']
sentiment.append(sentimentzzz)
self.sentimentScore = sentiment
return sentiment
#Get the sentiment type whether if: possitive, neutral, or negative
def getSentimentType(self):
sentiment = []
for sent in self.results:
sentimentzzz = sent['source']['enriched']['url']['enrichedTitle']['docSentiment']['type']
sentiment.append(sentimentzzz)
return sentiment
#Get the difference percentage of closing price from yahoofinanceapi
def getDifferencePercentage(self):
closing = []
shareName = Share(self.company)
self.getTimeStamp()
startDate=''
endDate=''
hist=''
for t in self.times:
todayTimeStamp = t
yesterdayTimeStamp = t-86400
startDate = str(datetime.datetime.fromtimestamp(todayTimeStamp).strftime('%Y-%m-%d'))
yesterdayDate=str(datetime.datetime.fromtimestamp(yesterdayTimeStamp).strftime('%Y-%m-%d'))
todayHist = shareName.get_historical(startDate, startDate)
yesterdayHist = shareName.get_historical(yesterdayDate,yesterdayDate)
while(len(todayHist)==0):
todayTimeStamp = todayTimeStamp+86400
startDate = str(datetime.datetime.fromtimestamp(todayTimeStamp).strftime('%Y-%m-%d'))
todayHist = shareName.get_historical(startDate, startDate)
while(len(yesterdayHist)==0):
yesterdayTimeStamp= yesterdayTimeStamp-86400
yesterdayDate=str(datetime.datetime.fromtimestamp(yesterdayTimeStamp).strftime('%Y-%m-%d'))
yesterdayHist = shareName.get_historical(yesterdayDate,yesterdayDate)
closingPriceToday = float(todayHist[0]['Close'])
closingPriceYesterday = float(yesterdayHist[0]['Close'])
difference = (float(closingPriceYesterday) - float(closingPriceToday))*100.0/float(closingPriceYesterday)
diff2 = float(format(difference, '.3f'))
closing.append(diff2)
self.differencePercentage = closing
return closing
#call the overall function and return the csv file of preprocessed data for training the machine learning
def output(self):
csvFile = csv.writer(open("OutputFile.csv","w"))
a = self.getTimeStamp()
b = self.getSentimentScore()
c = self.getDifferencePercentage()
timeStamp = np.array(a)
d = np.array(b)
sentiment = np.around(d,decimals = 3)
difference = np.array(c)
uniqueTimeStamp, indices = np.unique(timeStamp,return_index = True)
finalArray = []
#Find each unique value
for i in uniqueTimeStamp:
index = timeStamp==i
sent = np.average(sentiment[index])
diff = difference[index]
arrayTemp = [round(diff[0],2),np.around(sent,decimals = 3)]
csvFile.writerow([round(diff[0],2),sent])
finalArray.append(arrayTemp)
return finalArray
| {
"repo_name": "ryanstrat/stock-predictions",
"path": "NewsAPI.py",
"copies": "1",
"size": "5991",
"license": "apache-2.0",
"hash": -2555928798839856000,
"line_mean": 38.1568627451,
"line_max": 350,
"alpha_frac": 0.723251544,
"autogenerated": false,
"ratio": 3.0566326530612247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42798841970612245,
"avg_score": null,
"num_lines": null
} |
# 1. Cara mendefinisikan kelas
class PublicClass(object):
# 2. Cara mendefiniskan atribut bertipe int
__privateInt=0 #private
# 6. Cara mendefinisikan konstruktor
def __init__(self):
# 3. Cara mendefinisikan atribut bertipe string, dan mengisi dengan nilai awal
self.publicString="Hello"
# 5.Cara mendefinisikan konstanta
self.PI=3.14
# 7a. Cara mendefinisikan method dengan parameter tanpa return value
# 7b. Cara menuliskan dokumentasi untuk method 7a
def setPrivateInt(self, privateInt):
# 8. Cara mengisi atribut dari dalam kelas itu sendiri
self.__privateInt=privateInt
# 9a. Cara mendefinisikan method dengan return value
# 9b. Cara menuliskan dokumentasi untuk method 9a
def getPrivateInt(self):
# 10. Cara mengembalikan nilai
return self.__privateInt
# 11. Cara menurunkan kelas
class PublicSubClass(PublicClass):
# 12. Cara melakukan override method
def getPrivateInt(self):
# 13. Cara mengakses method pada superclass
return 2 * super(PublicSubClass, self).getPrivateInt()
class LanguagesExploration():
#def main(self):
# 14. Cara menginstansiasi kelas baru
publicClass=PublicClass()
# 15. Cara memanggil method dengan parameter
publicClass.setPrivateInt(13)
# 16. Cara memanggil method dan mendapatkan return value-nya
print("[13] publicClass.getPrivateInt() = %s" % publicClass.getPrivateInt())
# 17. Cara mengubah isi atribut publik dari luar kelas
publicClass.publicString = "World!"
# 18. Cara mendapatkan isi atribut publik dari luar kelas
print("[World!] publicClass.publicString = %s" % publicClass.publicString)
# 19. Cara mengisi nilai atribut statik
publicClass.staticInt=10
# 20. Cara mendapatkan nilai atribut statik
print("[10] publicClass.staticInt = %s"%publicClass.staticInt)
# 21. Cara mendapatkan nilai konstanta
print("[3.14] publicClass.PI = %s"%publicClass.PI)
# 22. Cara menginstansiasi kelas dengan turunannya
publicSubClass = PublicSubClass();
publicSubClass.setPrivateInt(13);
#23. Memastikan method overriding bekerja
print("[26] publicSubClass.getPrivateInt= %s"%publicSubClass.getPrivateInt())
| {
"repo_name": "pascalalfadian/LanguagesExploration",
"path": "B/LanguagesExploration.py",
"copies": "2",
"size": "2151",
"license": "mit",
"hash": -6863973044293738000,
"line_mean": 38.1090909091,
"line_max": 82,
"alpha_frac": 0.7475592748,
"autogenerated": false,
"ratio": 2.950617283950617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4698176558750617,
"avg_score": null,
"num_lines": null
} |
# 1. Claim a message
# 2. Grab HTML
# 3. Delete message
# 4. Push more URLs
from bs4 import BeautifulSoup
import sys
from helpers import client
import requests
import urlparse
def scrape_generator(url):
parent = urlparse.urlsplit(url)
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
for tag in soup.findAll('a'):
link = tag.get('href')
if '#' in link:
link = ''.join(link.split('#')[:-1])
if link.startswith('mailto:'):
continue
elif urlparse.urlsplit(link).scheme in ('http', 'https'):
yield link
elif link.startswith('/'):
yield parent.scheme + '://' + parent.netloc + link
elif parent.path.endswith('/'):
yield parent.scheme + '://' + parent.netloc + parent.path + link
else:
yield parent.scheme + '://' + parent.netloc + parent.path + '/' + link
seen = {}
def scrapable(uri):
if not uri.startswith(sys.argv[1]):
return False
if seen.get(uri):
return False
seen[uri] = True
return True
if __name__ == '__main__':
scrape = client.queue('scrape')
ingest = client.queue('ingest')
complete = client.queue('completed')
while True:
claimed = scrape.claim(ttl=180, grace=60, limit=1)
for msg in claimed:
messages = [
{'body': u, 'ttl': 180}
for u in scrape_generator(msg.body)
if scrapable(u)
]
if len(messages):
ingest.post(messages)
complete.post({'body': msg.body, 'ttl': 300})
msg.delete()
| {
"repo_name": "ryansb/zaqar-webscraper-demo",
"path": "scraper.py",
"copies": "1",
"size": "1654",
"license": "mit",
"hash": -9025912232424904000,
"line_mean": 27.0338983051,
"line_max": 82,
"alpha_frac": 0.5507859734,
"autogenerated": false,
"ratio": 3.919431279620853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9961820199647284,
"avg_score": 0.0016794106747139216,
"num_lines": 59
} |
# 1. Clear result High word (Bytes 2 and 3)
# 2. Load Loop counter with 16.
# 3. Shift multiplier right
# 4. If carry (previous bit 0 of multiplier Low byte) set, add multiplicand to result High word.
# 5. Shift right result High word into result Low word/multiplier.
# 6. Shift right Low word/multiplier.
# 7. Decrement Loop counter.
# 8. If Loop counter not zero, go to Step 4.
#
# Heavily rewritten !
#
import random
class Multiplier:
def __init__(self):
random.seed(42)
def multiply(self,m1,m2):
result = 0 # step 1. clear result
for i in range(0,16):
carry = m1 & 1 # step 3. shift multiplier right
m1 = m1 >> 1
if carry != 0: # if carry set, add multiplicand to result
result = result + m2
result = (result >> 1) | ((result & 1) << 15) # rotate result right.
self.result = result
def checkMultiply(self,m1,m2):
self.multiply(m1,m2)
result = (m1 * m2) & 0xFFFF
#print("{0} x {1} = {2} ({3})".format(m1,m2,self.result,result))
errorMsg = "{0} * {1}".format(m1,m2)
assert result == self.result,errorMsg
def checkRandom(self):
ok = False
while not ok:
r1 = random.randint(0,65535)
r2 = random.randint(0,65535)
if random.randint(0,1) == 0:
r1 = int(r1 / 1000)
if random.randint(0,1) == 0:
r2 = int(r2 / 1000)
ok = (r1 * r2) <= 65535
self.checkMultiply(r1,r2)
d = Multiplier()
d.checkMultiply(133,8)
d.checkMultiply(44,328)
d.checkMultiply(0,12)
d.checkMultiply(123,0)
for i in range(0,1000*1000*10):
if i % 10000 == 0:
print("Done ",i)
d.checkRandom() | {
"repo_name": "paulscottrobson/vtl-1802",
"path": "expression/multiply.py",
"copies": "1",
"size": "1599",
"license": "mit",
"hash": -130821361926656580,
"line_mean": 26.1186440678,
"line_max": 113,
"alpha_frac": 0.6203877423,
"autogenerated": false,
"ratio": 2.5831987075928917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37035864498928917,
"avg_score": null,
"num_lines": null
} |
## 1. Computer components ##
print('Hello World!')
## 2. Data storage ##
my_int = 6
int_addr = id(my_int)
my_str = 'Alien'
str_addr = id(my_str)
## 4. Data storage in Python ##
import sys
my_int = 200
size_of_my_int = sys.getsizeof(my_int)
int1 = 10
int2 = 100000
str1 = "Hello"
str2 = "Hi"
int_diff = sys.getsizeof(int1)- sys.getsizeof(int2)
print(int_diff)
str_diff = sys.getsizeof(str1)- sys.getsizeof(str2)
print(str_diff)
## 6. Disk storage ##
import time
import csv
f = open("list.csv", "r")
before = time.clock()
list_from_file = list(csv.reader(f))
after = time.clock()
file_time = after - before
print(file_time)
a = time.clock()
list_from_RAM = "1,2,3,4,5,6,7,8,9,10".split(",")
b = time.clock()
RAM_time = b-a
print(RAM_time)
## 9. Binary ##
num1 = 6
num2 = 9
num3 = 36
## 10. Computation and control flow ##
a = 5
b = 10
print("On line 3")
if a == 5:
print("On line 5")
else:
print("On line 7")
if b < a:
print("On line 9")
elif b == a:
print("On line 11")
else:
for i in range(3):
print("On line 14")
printed_lines = [3]
printed_lines = [3, 5, 14, 14, 14]
## 11. Functions in memory ##
def my_func():
print("On line 2")
a = 5
b = 10
print("On line 5")
my_func()
print("On line 7")
my_func()
printed_lines = []
printed_lines = [5, 2, 7, 2] | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Advanced/Introduction to computer architecture-170.py",
"copies": "1",
"size": "1301",
"license": "mit",
"hash": -799057504789401200,
"line_mean": 14.686746988,
"line_max": 51,
"alpha_frac": 0.6087624904,
"autogenerated": false,
"ratio": 2.459357277882798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.851686814893552,
"avg_score": 0.010250323869455569,
"num_lines": 83
} |
# 1. convert tables to use 'tabu'
# Based on Wagner Macedo's filter.py posted at
# https://groups.google.com/forum/#!msg/pandoc-discuss/RUC-tuu_qf0/h-H3RRVt1coJ
import pandocfilters as pf
def latex(s):
return pf.RawBlock('latex', s)
def inlatex(s):
return pf.RawInline('latex', s)
def tbl_caption(s):
return pf.Para([inlatex(r'\caption{')] + s + [inlatex(r'}')])
def tbl_alignment(a):
aligns = {
"AlignDefault": 'l',
"AlignLeft": 'l',
"AlignCenter": 'c',
"AlignRight": 'r',
}
s = ''.join(map(lambda al: aligns[al['t']], a[:-1]))
s += 'X[1,'+aligns[a[-1]['t']]+']'
return s
def tbl_headers(s):
result = s[0][0]['c'][:]
for i in range(1, len(s)):
result.append(inlatex(' & '))
result.extend(s[i][0]['c'])
result.append(inlatex(r'\\' '\n'))
return pf.Para(result)
def tbl_contents(s):
result = []
for row in s:
para = []
for col in row:
if col:
para.extend(col[0]['c'])
para.append(inlatex(' & '))
result.extend(para)
result[-1] = inlatex(r'\\' '\n')
return pf.Para(result)
def do_filter(k, v, f, m):
if k == "Table":
w = v[2]
if sum(w) == 0:
w = [1 for e in w]
wd = ''
ha = r'\centering'
else:
wd = '*'
ha = r'\raggedright'
return [latex(r'\begin{table'+wd+'}[!h]'),
tbl_caption(v[0]),
latex(ha),
latex(r'\begin{tabu}{' + tbl_alignment(v[1]) + '}'),
latex(r'\toprule'),
tbl_headers(v[3]),
latex(r'\midrule'),
tbl_contents(v[4]),
latex(r'\bottomrule' '\n' r'\end{tabu}'),
latex(r'\end{table'+wd+'}')]
if __name__ == "__main__":
pf.toJSONFilter(do_filter)
| {
"repo_name": "wilsonCernWq/ospray",
"path": "doc/filter-latex.py",
"copies": "2",
"size": "1884",
"license": "apache-2.0",
"hash": -6274346409928951000,
"line_mean": 26.7058823529,
"line_max": 79,
"alpha_frac": 0.4729299363,
"autogenerated": false,
"ratio": 3.0436187399030694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9487057776307759,
"avg_score": 0.005898179979062331,
"num_lines": 68
} |
# 1. copy this file to settings_secret.py
# 2. make sure the settings_secret.py file is ignored (should be listed in .gitignore)
# 3. EITHER...
# a) add your TWITTER secrets directly to this file in the `except:` block below
# **OR**
# b) set your environment variables to contain your keys (see below for env var names)
# 4. `twyth = Twython(settings_secret.TWITTER_API_KEY, settings_secret.TWITTER_API_SECRET)`
# TWITTER_API_KEY and _SECRET are required to download tweets
import os
try:
TWITTER_API_KEY = os.getenv('TWITTER_API_KEY')
TWITTER_API_SECRET = os.getenv('TWITTER_API_SECRET')
except:
TWITTER_API_KEY = '25AlphaNumericCharactersX'
TWITTER_API_SECRET = '50AlphaNumericCharactersX50AlphaNumericCharactersX'
# Optional account identifying information that doesn't need to be protected
TWITTER_API_OWNER = 'hobsonlane' # the app owner's @username
TWITTER_API_OWNER_ID = 59275999 # see the app
# This is how to use these keys to instantiate a Twython connection to the twitter API
# twyth = Twython(TWITTER_API_KEY, TWITTER_API_SECRET)
| {
"repo_name": "totalgood/twip",
"path": "twip/settings_template.py",
"copies": "1",
"size": "1074",
"license": "mit",
"hash": 8207027805120538000,
"line_mean": 43.75,
"line_max": 91,
"alpha_frac": 0.7458100559,
"autogenerated": false,
"ratio": 3.0685714285714285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9276435188889807,
"avg_score": 0.007589259116324243,
"num_lines": 24
} |
## 1. Counting in Python ##
import sqlite3
conn = sqlite3.connect('factbook.db')
facts = conn.cursor().execute('select * from facts;').fetchall()
print(facts)
facts_count = len(facts)
## 2. Counting in SQL ##
conn = sqlite3.connect("factbook.db")
birth_rate_count = conn.cursor().execute('select count(birth_rate) from facts;').fetchall()
print(birth_rate_count)
## 3. Min and max in SQL ##
conn = sqlite3.connect("factbook.db")
min_population_growth = conn.cursor().execute('select min(population_growth) from facts;').fetchall()
print(min_population_growth)
max_death_rate = conn.cursor().execute('select max(death_rate) from facts;').fetchall()
print(max_death_rate)
## 4. Sum and average in SQL ##
conn = sqlite3.connect("factbook.db")
total_land_area = conn.cursor().execute('select sum(area_land) from facts;').fetchall()
print(total_land_area)
avg_water_area = conn.cursor().execute('select avg(area_water) from facts;').fetchall()
print(avg_water_area)
## 5. Multiple aggregation functions ##
conn = sqlite3.connect("factbook.db")
facts_stats = conn.cursor().execute('select avg(population),sum(population),max(birth_rate) from facts;').fetchall()
print(facts_stats)
## 6. Conditional aggregation ##
conn = sqlite3.connect("factbook.db")
population_growth = conn.cursor().execute('select avg(population_growth) from facts where population > 10000000;').fetchall()
print(population_growth)
## 7. Selecting unique rows ##
conn = sqlite3.connect("factbook.db")
unique_birth_rates = conn.cursor().execute('select distinct birth_rate from facts;').fetchall()
print(unique_birth_rates)
## 8. Distinct aggregations ##
conn = sqlite3.connect("factbook.db")
average_birth_rate = conn.cursor().execute('select avg(distinct birth_rate) from facts where population > 20000000;').fetchall()
print(average_birth_rate)
sum_population = conn.cursor().execute('select sum(distinct population) from facts where area_land > 1000000;').fetchall()
print(sum_population)
## 9. Arithmetic in SQL ##
conn = sqlite3.connect("factbook.db")
population_growth_millions = conn.cursor().execute('select population_growth/1000000.0 from facts;').fetchall()
print(population_growth_millions)
## 10. Arithmetic between columns ##
conn = sqlite3.connect("factbook.db")
next_year_population = conn.cursor().execute('select population + (population_growth*population) from facts;').fetchall()
print(next_year_population) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "SQL and Databases Begineer/SQL Summary Statistics-181.py",
"copies": "1",
"size": "2414",
"license": "mit",
"hash": 6153737363980810000,
"line_mean": 34.5147058824,
"line_max": 128,
"alpha_frac": 0.7377796189,
"autogenerated": false,
"ratio": 3.352777777777778,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9555401141604225,
"avg_score": 0.007031251014710558,
"num_lines": 68
} |
# 1. Create a dictionary that connects the numbers 1-12 with each
# number's corresponding month. 1 --> January, for example.
months = {1:'January',
2:'February',
3:'March',
4:'April',
5:'May',
6:'June',
7:'July',
8:'August',
9:'September',
10:'October',
11:'November',
12:'December'}
# 2. Write a function called write_date() that takes a date written in
# the format "6/28/1983" and prints it out in the format "28 of June, 1983".
# Hint: The split function can work with more than just spaces!
def write_date(date):
month_day_year = date.split('/')
return '{} of {}, {}'.format(month_day_year[1],
months[int(month_day_year[0])],
month_day_year[2])
# 3. Write a dictionary that contains at least five words as keys,
# and choose a value for each that will replace the word. For example,
# you may have the word "Handelman" as a key and "Batman" as its value.
# Shhh...don't tell anyone.
words_dict = {'Handelman':'Batman',
'is':'isn\'t',
'I':'you',
'will':'won\'t',
'can':'can\'t'}
# 4. Write a function called replace_words() that takes a sentence and
# returns the same sentence, but with any of the keys in your dictionary
# from problem 3 replaced with their respective values. Note: if more
# than one of the words appears in the sentence, then all words should
# be correctly replaced!
# Ex: replace_words('Mr. Handelman is my programming teacher')
# 'Mr. Batman is my programming teacher.'
#This is pretty finnicky and sensitive to punctuation, but it works
def replace_words(sentence):
word_list = sentence.split()
new_string = []
for word in word_list:
if word in words_dict:
new_string.append(words_dict[word])
else:
new_string.append(word)
return ' '.join(new_string).capitalize()
# 4. Write a function called count_words() that takes a string (possibly
# a long string!) as input and counts the number of time each word
# appears in the string. It then returns the count of all words as a
# dictionary.
# Ex: count_words('So patient a doctor to doctor a patient so')
# {'so':2, 'patient': 2, 'a': 2, 'doctor': 2, 'to': 1}
def count_words(string):
count_words = {}
for word in string.lower().split():
if word in count_words:
count_words[word] += 1
else:
count_words[word] = 1
return count_words
| {
"repo_name": "Nethermaker/school-projects",
"path": "intro/dictionary_assignment.py",
"copies": "1",
"size": "2734",
"license": "mit",
"hash": -975624272765644500,
"line_mean": 22.4107142857,
"line_max": 76,
"alpha_frac": 0.5716898317,
"autogenerated": false,
"ratio": 3.6211920529801325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46928818846801323,
"avg_score": null,
"num_lines": null
} |
# 1) Create an empty stack S.
# 2) Initialize current node as root
# 3) Push the current node to S and set current = current->left until current is NULL
# 4) If current is NULL and stack is not empty then
# a) Pop the top item from stack.
# b) Print the popped item, set current = popped_item->right
# c) Go to step 3.
# 5) If current is NULL and stack is empty then we are done.
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
current = root
solution = []
stack = []
# Go down till the left most node.
while True:
while current != None:
stack.append(current)
current = current.left
# Exit out of the infinite loop.
while len(stack) == 0 and current == None:
return solution
while current == None and len(stack) > 0:
popped_item = stack.pop()
solution.append(popped_item.val)
current = popped_item.right | {
"repo_name": "saisankargochhayat/algo_quest",
"path": "leetcode/94. Inorder/iterative_solution.py",
"copies": "1",
"size": "1033",
"license": "apache-2.0",
"hash": 6044954042956955000,
"line_mean": 40.36,
"line_max": 85,
"alpha_frac": 0.577928364,
"autogenerated": false,
"ratio": 4.115537848605578,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5193466212605578,
"avg_score": null,
"num_lines": null
} |
# 1.create projects skeleton based on defined scaffolds
#
# Project: https://github.com/molee1905/ShenMa
# License: MIT
#
import sublime, sublime_plugin
import os
import tempfile
import shutil
import re
import subprocess
import datetime, time
import json
SHORTCUTS_PATH_RE = re.compile(r'sc[/|\\]shortcuts', re.I)
INPUT_SC_NAME = 'please input sc name: '
INPUT_SC_PATH = 'please input shortcuts path(e.g. xxx/sc/shortcuts): '
INVALID_SC_PATH = '''please input correct shortcuts path\r(e.g. $HOME/sc/shortcuts) '''
ALREADY_EXISTED_ERROR = 'The shortcut “{}” already exists.'
COPY_ERROR = 'An error occurred while copying the template: {}'
SETTINGS_FILE = 'ShenMa.sublime-settings'
def open_directory(path):
cmd = (get_subl_executable_path(),'-a',path)
subprocess.Popen(cmd)
def get_subl_executable_path():
executable_path = sublime.executable_path()
if sublime.platform() == 'osx':
suffix = '.app/'
app_path = executable_path[:executable_path.rfind(suffix) + len(suffix)]
executable_path = app_path + 'Contents/SharedSupport/bin/subl'
return executable_path
class CreateScCommand(sublime_plugin.WindowCommand):
"""A command that creates a new sc """
def run(self):
self.settings = sublime.load_settings(SETTINGS_FILE)
path = self.settings.get('shortcuts')
if not path:
self.window.show_input_panel(
INPUT_SC_PATH,
'',
on_done=self.checkPath,
on_change=None,
on_cancel=None)
else:
self.checkPath(path)
def checkPath(self, path):
if self.isScPath(path):
self.window.show_input_panel(
INPUT_SC_NAME,
'',
on_done=self.render,
on_change=None,
on_cancel=None)
else:
if not sublime.ok_cancel_dialog(INVALID_SC_PATH):
return
self.window.show_input_panel(
INPUT_SC_PATH,
'',
on_done=self.checkPath,
on_change=None,
on_cancel=None)
def isScPath(self, path):
match = SHORTCUTS_PATH_RE.search(path);
if match:
index = path.index('shortcuts')
scpath = path[0:index]
if os.path.exists(scpath):
self.settings.set('shortcuts', path)
sublime.save_settings(SETTINGS_FILE)
return True
else:
self.settings.erase('shortcuts')
return False
def render(self, scpath):
self.scpath = scpath
self.name = os.path.basename(scpath) or os.path.dirname(scpath)
self.author = os.getlogin()
self.id = 'sc_{}'.format(self.name)
self.clzName = ('' if self.name.endswith('_lg') else 'card ') + self.id
self.cssPath = 'sc_advanced_{}.css'.format(self.name)
self.jsPath = 'sc_{}.js'.format(self.name)
self.dest = os.path.join(self.settings.get('shortcuts'), self.scpath)
if os.path.exists(self.dest):
sublime.error_message(ALREADY_EXISTED_ERROR.format(self.scpath))
return
else:
os.makedirs(self.dest)
src = os.path.join(sublime.packages_path(), os.path.dirname(__file__), 'template')
self.temp_dir = None
try:
self.temp_dir = tempfile.mkdtemp()
self.temp_dest = os.path.join(self.temp_dir, self.scpath)
shutil.copytree(src, self.temp_dest)
os.mkdir(os.path.join(self.temp_dest, 'data'))
os.mkdir(os.path.join(self.temp_dest, 'img'))
os.mkdir(os.path.join(self.temp_dest, 'res'))
os.mkdir(os.path.join(self.temp_dest, 'tmpl'))
if not self.fill_template(self.temp_dir, self.name):
return
for f in os.listdir(self.temp_dest):
shutil.move(os.path.join(self.temp_dest, f), self.dest)
open_directory(self.dest)
except Exception as ex:
if self.temp_dir and os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
sublime.error_message(COPY_ERROR.format(str(ex)))
def fill_template(self, template_dir, name):
placeholders = {
'__path__': self.scpath,
'__author__': self.author,
'__id__': self.id,
'__name__': self.name,
'__clz__': self.clzName,
'__csspath__': self.cssPath,
'__jspath__': self.jsPath,
'__version__': '0.0.1'
}
for dirpath, dirnames, filenames in os.walk(template_dir):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in ('.scss', '.js', '.html', '.md'):
path = os.path.join(dirpath, filename)
with open(path, encoding='utf-8') as f:
text = f.read()
for placeholder, value in placeholders.items():
text = text.replace(placeholder, value)
with open(path, mode='w', encoding='utf-8') as f:
f.write(text)
if extension in ('.scss', '.js'):
os.rename(path, os.path.join(dirpath, filename.format(name)))
return True
| {
"repo_name": "molee1905/ShenMa",
"path": "sm.py",
"copies": "1",
"size": "5493",
"license": "mit",
"hash": -1436819093862397400,
"line_mean": 30.3657142857,
"line_max": 90,
"alpha_frac": 0.5452723629,
"autogenerated": false,
"ratio": 3.795988934993084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9787150233251558,
"avg_score": 0.010822212928305175,
"num_lines": 175
} |
# 1. Creates png files with ellipsis of various widths, heights,
# and blue intensity.
# 2. Creates user.json with gallery configuration with file
# list in fullfiles node and launches browser. Note that file
# must be named user.json.
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import os as os
import json
import webbrowser
if os.path.isdir('thumb') == False:
os.mkdir('thumb')
if os.path.isdir('full') == False:
os.mkdir('full')
def plot_ellipse(w,h,b):
fig = plt.figure(0)
fig.clf()
ax = fig.add_subplot(111, aspect='equal')
e = Ellipse((0.,0.),w,h)
e.set_facecolor((0.,0.,b))
ax.add_artist(e)
ax.set_xlim(-1., 1.)
ax.set_ylim(-1., 1.)
N = 3;
i = 0
files = []
for b in range(N):
for h in range(N):
for w in range(N):
W = 2.*(w+1.)/3.
H = 2.*(h+1.)/3.
B = (b+1.)/3.
plot_ellipse(W,H,B)
file = 'Ellipse_%04d.png' % i
print "Saving {full/thumb}/" + file
plt.savefig('full/' + file,dpi=100)
plt.savefig('thumb/' + file,dpi=30)
# List of files and their attributes
files.append([file,float("%.2f" % W),float("%.2f" % H),float("%.2f" % B)])
i = i + 1;
print files
# Gallery configuration object
attributes = ['filename','width','height','blue level']
filters = \
[ \
[{}],\
[{"name": "All widths", "value": "width > 0"},{"name": "Width greater than 1.5", "value": "width > 1.5"}], \
[{}],\
[{}],\
]
data = [
{ 'id':'Ellipses', \
'attributes': attributes, \
'filters': filters, \
'fulldir': 'demos/full/', 'thumbdir': 'demos/thumb/', \
'files': files } ]
dataj = json.dumps(data)
# File must be named user.json.
print "Saving ../catalogs/user.json"
file = open("../catalogs/user.json", "w")
file.write('VIVIZ["catalogs"]["User"] =' + dataj)
file.close()
cwd = os.path.dirname(os.path.realpath(__file__))
url = "file://" + cwd + "/../index.htm#catalog=User&mode=thumb"
print "Attempting to open " + url
webbrowser.open(url,new=2) | {
"repo_name": "rweigel/viviz",
"path": "demos/create_ellipse_gallery.py",
"copies": "1",
"size": "2087",
"license": "mit",
"hash": 7028335680115412000,
"line_mean": 27.602739726,
"line_max": 112,
"alpha_frac": 0.5706756109,
"autogenerated": false,
"ratio": 2.9644886363636362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8875738164078049,
"avg_score": 0.03188521663711768,
"num_lines": 73
} |
# 1) curve of big order by 30m, 60m, dayly
# 2) Alert at threshold
# 3) list possible watch list
#-*-coding:utf-8-*-
#!/usr/bin/python
# coding: UTF-8
"""
This script parse stock info
"""
import pandas as pd
import tushare as ts
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mpdates
import sys,getopt,argparse
import datetime as dt
import time
from myutils import *
from matplotlib import animation
def usage():
print (sys.argv[0] + ' -i stock list file')
print (sys.argv[0] + ' -h #get help info')
print (sys.argv[0] + ' -t show data today')
print (sys.argv[0] + ' -v threshold, for example 800')
print (sys.argv[0] + ' -a check all list')
print (sys.argv[0] + ' -d set date')
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], "t:i:o:v:d:s:", ["help", "input=", "output="])
stock_list= '.\stock.txt'
date='2017-05-19'
volume=800
single_stock=False
for op, value in opts:
if op == '-i':
stock_list = value
elif op == '-t':
#date=n_days_ago(float(value))
n=float(value)
elif op == '-d':
date=value
elif op == '-v':
volume=float(value)
elif op == '-s':
single_stock=True
stock_selected = value
elif op == '-h':
usage()
sys.exit()
if single_stock == True:
df1,df2,df3,df4=get_big_order_data_v2(stock_selected,n,volume)
print(df1)
df1.to_csv("data.csv")
if len(df2.volume)==0:
print ("There was no big order")
sys.exit()
fig = plt.figure(figsize=(9.8,6.18))
ax0 = fig.add_subplot(3,1,1, ylim=(min(df1.price[1:]), max(df1.price[1:])))
ax0.set_title('Real Time Price')
ax0.grid(True)
ax1 = fig.add_subplot(3,1,2, ylim=(min(df1.volume), max(df1.volume)))
ax1.set_title('Net Volume of Big Order')
ax1.grid(True)
if min(df2.volume)+ max(df2.volume) >0:
ax2 = fig.add_subplot(3,1,3, ylim=(0-max(df2.volume), max(df2.volume)))
else:
ax2 = fig.add_subplot(3,1,3, ylim=(min(df2.volume), 0-min(df2.volume)))
ax2.set_title('Buy/Sell Volume of Big Order')
ax2.grid(True)
line0, = ax0.plot_date(df1.time[1:], df1.price[1:], '-',lw=2)
line0.set_color('red')
line1, = ax1.plot_date(df1.time, df1.volume, '-',lw=2)
line1.set_color('red')
line2, = ax2.plot_date(df3.time, df3.volume, lw=2)
line2.set_color('red')
line3, = ax2.plot_date(df4.time, df4.volume, lw=2)
line3.set_color('green')
plt.show()
| {
"repo_name": "yunfeiz/py_learnt",
"path": "quant/big_order_monitor.py",
"copies": "1",
"size": "2376",
"license": "apache-2.0",
"hash": 5277615488317966000,
"line_mean": 21.4150943396,
"line_max": 88,
"alpha_frac": 0.6296296296,
"autogenerated": false,
"ratio": 2.4444444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8217502303161122,
"avg_score": 0.0713143541766644,
"num_lines": 106
} |
# 1d approixmation to beta binomial model
# https://github.com/aloctavodia/BAP
import pymc3 as pm
import numpy as np
import seaborn as sns
import scipy.stats as stats
import matplotlib.pyplot as plt
import arviz as az
import math
#data = np.repeat([0, 1], (10, 3))
data = np.repeat([0, 1], (10, 1))
h = data.sum()
t = len(data) - h
# Exact
plt.figure()
x = np.linspace(0, 1, 100)
xs = x #grid
dx_exact = xs[1]-xs[0]
post_exact = stats.beta.pdf(xs, h+1, t+1)
post_exact = post_exact / np.sum(post_exact)
plt.plot(xs, post_exact)
plt.yticks([])
plt.title('exact posterior')
plt.savefig('../figures/bb_exact.pdf')
# Grid
def posterior_grid(heads, tails, grid_points=100):
grid = np.linspace(0, 1, grid_points)
prior = np.repeat(1/grid_points, grid_points) # uniform prior
likelihood = stats.binom.pmf(heads, heads+tails, grid)
posterior = likelihood * prior
posterior /= posterior.sum()
#posterior = posterior * grid_points
return grid, posterior
n = 20
grid, posterior = posterior_grid(h, t, n)
dx_grid = grid[1] - grid[0]
sf = dx_grid / dx_exact # Jacobian scale factor
plt.figure()
#plt.stem(grid, posterior, use_line_collection=True)
plt.bar(grid, posterior, width=1/n, alpha=0.2)
plt.plot(xs, post_exact*sf)
plt.title('grid approximation')
plt.yticks([])
plt.xlabel('θ');
plt.savefig('../figures/bb_grid.pdf')
# Laplace
with pm.Model() as normal_aproximation:
theta = pm.Beta('theta', 1., 1.)
y = pm.Binomial('y', n=1, p=theta, observed=data) # Bernoulli
mean_q = pm.find_MAP()
std_q = ((1/pm.find_hessian(mean_q, vars=[theta]))**0.5)[0]
mu = mean_q['theta']
print([mu, std_q])
plt.figure()
plt.plot(xs, stats.norm.pdf(xs, mu, std_q), '--', label='Laplace')
post_exact = stats.beta.pdf(xs, h+1, t+1)
plt.plot(xs, post_exact, label='exact')
plt.title('Quadratic approximation')
plt.xlabel('θ', fontsize=14)
plt.yticks([])
plt.legend()
plt.savefig('../figures/bb_laplace.pdf');
# HMC
with pm.Model() as hmc_model:
theta = pm.Beta('theta', 1., 1.)
y = pm.Binomial('y', n=1, p=theta, observed=data) # Bernoulli
trace = pm.sample(1000, random_seed=42)
thetas = trace['theta']
axes = az.plot_posterior(thetas, credible_interval=0.95)
plt.savefig('../figures/bb_hmc.pdf');
az.plot_trace(trace)
plt.savefig('../figures/bb_hmc_trace.pdf', dpi=300)
# ADVI
with pm.Model() as mf_model:
theta = pm.Beta('theta', 1., 1.)
y = pm.Binomial('y', n=1, p=theta, observed=data) # Bernoulli
mean_field = pm.fit(method='advi')
trace_mf = mean_field.sample(1000)
thetas = trace_mf['theta']
axes = az.plot_posterior(thetas, credible_interval=0.95)
plt.savefig('../figures/bb_mf.pdf');
# track mean and std
with pm.Model() as mf_model:
theta = pm.Beta('theta', 1., 1.)
y = pm.Binomial('y', n=1, p=theta, observed=data) # Bernoulli
advi = pm.ADVI()
tracker = pm.callbacks.Tracker(
mean=advi.approx.mean.eval, # callable that returns mean
std=advi.approx.std.eval # callable that returns std
)
approx = advi.fit(callbacks=[tracker])
trace_approx = approx.sample(1000)
thetas = trace_approx['theta']
plt.figure()
plt.plot(tracker['mean'])
plt.title('Mean')
plt.savefig('../figures/bb_mf_mean.pdf');
plt.figure()
plt.plot(tracker['std'])
plt.title('Std ')
plt.savefig('../figures/bb_mf_std.pdf');
plt.figure()
plt.plot(advi.hist)
plt.title('Negative ELBO');
plt.savefig('../figures/bb_mf_elbo.pdf');
plt.figure()
sns.kdeplot(thetas);
plt.title('KDE of posterior samples')
plt.savefig('../figures/bb_mf_kde.pdf');
fig,axs = plt.subplots(1,4, figsize=(30,10))
mu_ax = axs[0]
std_ax = axs[1]
elbo_ax = axs[2]
kde_ax = axs[3]
mu_ax.plot(tracker['mean'])
mu_ax.set_title('Mean')
std_ax.plot(tracker['std'])
std_ax.set_title('Std ')
elbo_ax.plot(advi.hist)
elbo_ax.set_title('Negative ELBO');
kde_ax = sns.kdeplot(thetas);
kde_ax.set_title('KDE of posterior samples')
plt.savefig('../figures/bb_mf_panel.pdf');
fig = plt.figure(figsize=(16, 9))
mu_ax = fig.add_subplot(221)
std_ax = fig.add_subplot(222)
hist_ax = fig.add_subplot(212)
mu_ax.plot(tracker['mean'])
mu_ax.set_title('Mean track')
std_ax.plot(tracker['std'])
std_ax.set_title('Std track')
hist_ax.plot(advi.hist)
hist_ax.set_title('Negative ELBO track');
plt.savefig('../figures/bb_mf_tracker.pdf');
trace_approx = approx.sample(1000)
thetas = trace_approx['theta']
axes = az.plot_posterior(thetas, credible_interval=0.95)
| {
"repo_name": "probml/pyprobml",
"path": "scripts/beta_binom_approx_post_pymc3.py",
"copies": "1",
"size": "4414",
"license": "mit",
"hash": -5357475146503276000,
"line_mean": 25.1065088757,
"line_max": 66,
"alpha_frac": 0.6672710789,
"autogenerated": false,
"ratio": 2.616844602609727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3784115681509727,
"avg_score": null,
"num_lines": null
} |
""" 1d array of prediction values with properties (labels, reference to the predictor)
"""
import numpy
from pySPACE.resources.data_types import base
class PredictionVector(base.BaseData):
""" Represents a prediction vector
It contains a label, a prediction and a reference to the predictor.
I doesn't matter if it uses one or multiple predictions.
The object might be even used for regression, where no label is needed.
In contrast to :class:`~pySPACE.resources.data_types.time_series.TimeSeries`
or :class:`~pySPACE.resources.data_types.feature_vector.FeatureVector`
objects, prediction vectors are currently generated in a node chain
with classifiers for example and not loaded.
For evaluation the
:class:`~pySPACE.missions.nodes.sink.classification_performance_sink.PerformanceSinkNode`
can be used to evaluate the predictions.
For multiple predictions, nodes from the
:mod:`~pySPACE.missions.nodes.classification.ensemble`
module can be used.
For creating a prediction vector, there are four
**Parameters**
:input_array:
The prediction vector is (for historical reasons) a 2d numpy array
with some additional (mode important parameters).
The content of the input_array should be/is the same
as used in the *prediction* parameter.
If you do not specify this parameter, it is generated from
the *prediction* and vice versa.
Any object, which can be converted to a 2d-numpy array can be
used to specify this parameter.
:label:
The label normally gives a semantic meaning to the prediction value
and is a string, e.g., "ill" or "Target".
For regression this parameter can be ignored and is set to None.
For multiple predictions, it is a list.
:prediction:
For regression, this is the regression value and
for binary classification it is the prediction value.
For SVMs it can be any real value and for algorithms
with probabilistic output it should be the probability
of the respective data belonging to the second and not the first
class or vice versa.
For multiple predictions this is not a single number,
but a list of floats.
The prediction value is used to generate the *input_array*
parameter or vice versa.
:predictor:
For accessing special parameters of the decision algorithm,
this parameter is used (default: None).
It is typically a pointer to the Node, which created the vector.
For multiple predictions, a list might be used, which might be
replaced during the processing by an ensemble classifier.
One main usage is when reading out additional metrics in the
evaluation process like convergence behaviour or weights of
a linear classifier.
The last 3 parameters are directly to object variables with the same name.
Currently, the object is by default like an array, with access to
the different other parameters.
For future developments, only these parameters should be used.
.. todo:: Implement a method _generate_tag for BaseData type (if desired)
.. todo:: Eliminate 2d-array behaviour incl. modifications in some nodes
:Author: Mario Micheal Krell
:Created: 2010/07/28
"""
def __new__(subtype, input_array=None, label=None, prediction=None,
predictor=None, tag=None, **kwargs):
""" Create the object including several type mappings """
# Input array is not an already formed ndarray instance
# We first cast to be our class type
if input_array is None:
if type(prediction) == list:
input_array = [prediction]
elif type(prediction) == numpy.ndarray:
input_array = numpy.atleast_2d(prediction)
elif prediction is None:
raise TypeError(
"You should at least give a prediction value " +
"of 1 or -1 in the input array or the prediction component")
else:
if type(prediction) == numpy.float64:
pass
elif type(prediction) == float:
prediction = numpy.float64(prediction)
elif type(prediction) == int or type(prediction) == numpy.int64:
prediction *= 1.0
else:
import warnings
warnings.warn("Type mismatch in Prediction Vector: %s!"%type(prediction))
prediction = float(prediction)
input_array = [[prediction]]
if not numpy.isfinite(input_array).all():
if type(prediction) == list:
input_array = [0 for i in range(len(prediction))]
elif prediction > 0:
prediction = 10**9
input_array = [[float(prediction)]]
else:
prediction = -10**9
input_array = [[float(prediction)]]
obj = base.BaseData.__new__(subtype, input_array)
# add subclasses attributes to the created instance
# obj.feature_names = ["prediction value"]
obj.label = label
obj.predictor = predictor
# using the input array is not necessary any more
if prediction is None:
l = list(input_array[0])
if len(l) == 1:
obj.prediction = l[0]
else:
obj.prediction = l
else:
obj.prediction = prediction
if not tag is None:
obj.tag = tag
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
super(PredictionVector, self).__array_finalize__(obj)
# set default values for attributes, since normally they are not needed
# when taking just the values
if not (obj is None) and not (type(obj) == numpy.ndarray):
# reset the attributes from passed original object
self.label = getattr(obj, 'label', None)
self.predictor = getattr(obj, 'predictor', None)
self.prediction = getattr(obj, 'prediction', None)
else:
self.label = None
self.predictor = None
self.prediction = None
# which is a good printing format? "label, value"?
def __str__(self):
str_repr = ""
if hasattr(self.label, "__iter__"):
for label, prediction in zip(self.label, self.prediction):
str_repr += "%s : %.4f \t" % (label, prediction)
else:
str_repr += "%s : %.4f \t" % (self.label, self.prediction)
return str_repr
def __reduce__(self):
""" Refer to
http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html#
for infos about pickling ndarray subclasses
"""
object_state = list(super(PredictionVector, self).__reduce__())
subclass_state = (self.label, self.predictor, self.prediction)
object_state[2].append(subclass_state)
object_state[2] = tuple(object_state[2])
return tuple(object_state)
def __setstate__(self, state):
nd_state, base_state, own_state = state
super(PredictionVector, self).__setstate__((nd_state, base_state))
(self.label, self.predictor, self.prediction) = own_state
def __eq__(self, other):
""" Same label and prediction value """
if type(other) != type(self):
return False
return (self.label == other.label and
numpy.allclose(self.prediction, other.prediction))
| {
"repo_name": "pyspace/pyspace",
"path": "pySPACE/resources/data_types/prediction_vector.py",
"copies": "3",
"size": "7879",
"license": "bsd-3-clause",
"hash": 3188084685319099400,
"line_mean": 41.5891891892,
"line_max": 93,
"alpha_frac": 0.6062952151,
"autogenerated": false,
"ratio": 4.692674210839786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6798969425939786,
"avg_score": null,
"num_lines": null
} |
" 1D cylindrical PB, modelling crossection of pore "
# Comment: this is a prime example of simplicity and flexibility :)
# the file solves a real PDE with precise specifications but depends ONLY on core library functions!
from dolfin import *
from nanopores import *
from nanopores.physics.simplepnps import *
# --- create 1D geometry ---
R = 1. # [nm] pore radius
domain = Interval(0., R)
domain.addsubdomain(domain, "fluid")
domain.addboundaries(
wall = domain.boundary("right"),
center = domain.boundary("left")
)
domain.params["lscale"] = 1e9
domain.synonymes = dict(
water = "fluid",
)
geo = domain.create_geometry(lc=.01)
print geo
# --- define physical parameters ---
phys_params = dict(
surfcharge = dict(wall = -0.1*qq/nm**2),
bulkcon = 300,
#volcharge = -.01*qq/nm**3, # to account for \partial_{zz}\phi?
)
phys = Physics("electrolyte", geo, **phys_params)
print geo
# --- solve pdes ---
# nonlinear PB
pb = solve_pde(SimplePBProblem, geo, phys, cyl=True, iterative=False, tolnewton=1e-10)
# linear PB -- imax=1 implies only one Newton iteration
pblin = solve_pde(SimplePBProblem, geo, phys, cyl=True, iterative=False, imax=1)
u0 = pb.solution
u1 = pblin.solution
plot1D({"linear PB":u1, "nonlinear PB":u0},
(0., R, 101), "x", dim=1, axlabels=("r [nm]", "potential [V]"))
showplots()
# PNP doesn't converge and probably doesn't make sense, since bulk concentration of ions can not be assigned
#pnps = solve_pde(SimplePNPProblem, geo, phys, cyl=True, visualize=True)
| {
"repo_name": "mitschabaude/nanopores",
"path": "scripts/toy_models/pnp1Dcyl.py",
"copies": "1",
"size": "1522",
"license": "mit",
"hash": -2864117952657960400,
"line_mean": 28.2692307692,
"line_max": 108,
"alpha_frac": 0.6931668857,
"autogenerated": false,
"ratio": 2.955339805825243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4148506691525243,
"avg_score": null,
"num_lines": null
} |
# # 1. Define a function max() that takes two numbers as arguments and returns the largest of them.
# # Use the if-then-else construct available in Python.
# # (It is true that Python has the max() function built in, but writing it yourself is nevertheless a good exercise.)
#
# def max (a, b):
# if a>b:
# return a
# else:
# return b
#
# print(max(8, 11))
#
# # 6. Define a function sum() and a function multiply() that sums and multiplies (respectively) all the numbers in a list of numbers.
# # For example, sum([1, 2, 3, 4]) should return 10, and multiply([1, 2, 3, 4]) should return 24.
#
# # n+=x means store n + x in n (means n = n + x)
# # n*=x means store n * x in n
# # = is not equals to it is store in
#
# NumList=[1, 2, 3, 4]
#
# def sum (list):
# n=0
# for element in list:
# n+= element
# return n
# print (sum(NumList))
#
# def mult (list):
# n=1
# for element in list:
# n*= element
# return n
# print (mult(NumList))
# 7. Define a function reverse() that computes the reversal of a string (string is a list of characters.
# For example, reverse("I am testing") should return the string "gnitset ma I". (Strings enver need to be reversed, dumb question.
# to do so hwoever, is "snake kobra" [::-1]
print ("snake kobra" [::-1])
pokemon = "snake kobra"
print (pokemon [::-1])
#the follwoing is a more complicated method to teach what each indivual thing in it means
def reverse(list):
length = len(list) # len gets the length of a list
newList = [] # creates a new, empty list
for element in range (0, length): # rangecreates a new list (x, y) from start number (x) to end number (y)
newList.append(list[(length-1) - element]) # "for containerName in" is a loop method
# .append is add to newList. A list is x long but python coutns starting from 0
# so length-1 is the position of the last element.
# it is building the list start at element 0
# ending position minus puts the ending element first then the next position is 1
# so it works backwards
return "".join(newList) #join the string in newList as string eg turn ["q","w","x"] into [qwx]
# PList= "snake kobra"
#
# print (PList.reverse ())
#
# # def reverse(list):
# # length = len(list) #len will get the length as a number
# # RevList = [] #Creates a new, empty list
# # for element in range(0, length): #creates a new list (x, y) from x to y
# # tempIndex = (length-1) - element
# # RevList.apend(list(tempIndex))
# # return "".join(RevList)
#Splitting Practise
| {
"repo_name": "openUniverse/singularity",
"path": "BensPractice/Practise2.py",
"copies": "1",
"size": "2825",
"license": "mit",
"hash": -4883824510394490000,
"line_mean": 36.1710526316,
"line_max": 134,
"alpha_frac": 0.5886725664,
"autogenerated": false,
"ratio": 3.491965389369592,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9475705807751131,
"avg_score": 0.020986429603692083,
"num_lines": 76
} |
1 def shuffle2(rules, datalines):
2 """An alternative way to code shuffle().
3 Instead of writing files at every step of the way, collects lines in a dictionary structure.
4 Initial tests on 2011-03-30 suggest that this might actually be _slower_.
5 Takes as arguments a list of rules and a list of data lines as a starting point.
6 For the first rule only:
7 writes data lines matching a regular expression to the target file,
8 writes data lines not matching the regular expression to the source file.
9 For each subsequent rule:
10 reads data lines from source file,
11 writes lines matching a regular expression to the target file,
12 writes lines not matching a regular expression to the source file, overwriting the source file."""
13 rulenumber = 0
14 all = {}
15 for rule in rules:
16 rulenumber += 1
17 field = rule[0]
18 searchkey = rule[1]
19 source = rule[2]
20 target = rule[3]
21 sortorder = rule[4]
22 sourcelines = []
23 targetlines = []
24 if sortorder:
25 print '%s [%s] "%s" to "%s", sorted by field %s' % (field, searchkey, source, target, sortorder)
26 else:
27 print '%s [%s] "%s" to "%s"' % (field, searchkey, source, target)
28 if rulenumber > 1:
29 #datalines = list(open(source))
30 datalines = list(all[source])
31 if field == 0:
32 if searchkey == ".":
33 sourcelines = []
34 targetlines = [ line for line in datalines ]
35 else:
36 sourcelines = [ line for line in datalines if not re.search(searchkey, line) ]
37 targetlines = [ line for line in datalines if re.search(searchkey, line) ]
38 else:
39 ethfield = field - 1
40 for line in datalines:
41 if field > len(line.split()):
42 sourcelines.append(line)
43 else:
44 if re.search(searchkey, line.split()[ethfield]):
45 targetlines.append(line)
46 else:
47 sourcelines.append(line)
48 if sortorder:
49 targetlines = dsusort(targetlines, sortorder)
50 if sourcelines:
51 all[source] = sourcelines
52 else:
53 all[source] = []
54 if not all.has_key(target):
55 all[target] = []
56 if all[target]:
57 all[target] = all[target] + targetlines
58 else:
59 all[target] = targetlines
60 for key in all:
61 newfile = open(key, 'w')
62 newfile.writelines(all[key])
63 newfile.close()
| {
"repo_name": "harsha-mudi/shawkle",
"path": "shawkle-alt.py",
"copies": "1",
"size": "3154",
"license": "apache-2.0",
"hash": 972171087484712300,
"line_mean": 48.28125,
"line_max": 116,
"alpha_frac": 0.496829423,
"autogenerated": false,
"ratio": 4.332417582417582,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004904468840474273,
"num_lines": 64
} |
#1.def关键字
#2.函数名
#3.()
#4.函数体
#5.返回值
'''
def send(x,n):
try:
import smtplib
from email.mime.text import MIMEText
from email.header import Header
sender = '18761515328@163.com'
receiver = x
subject = 'python email test'
smtpserver = 'smtp.163.com'
username = '18761515328@163.com'
password = 'a845219'
msg = MIMEText( n, 'text', 'utf-8' )
msg['Subject'] = "主题"
smtp = smtplib.SMTP()
smtp.connect( smtpserver )
smtp.login( username, password )
smtp.sendmail( sender, receiver, msg.as_string() )
smtp.quit()
except:
return "失败"
else:
return "成功"
falg=True
while falg:
y=input("请输入你要发送人的邮箱:")
re=send(y,"SB")
if re=="成功":
print("发送成功")
else:
print("发送失败")
'''
#普通参数
#默认参数
#指定参数
#* 默认将传入的参数,全部房子元组中 f1(*[1,2,2,3,])
#** 默认将传入的参数,全部放在字典中f1(**{1:2,3:4})
#万能参数 *args,**kwarges
'''
def f1(**args):
print(args,type(args))
#a={"1":"2","2":"3"}
f1(k1=1,k2=2) #直接的附值
'''
def y(*args,**arg1s):
print(args,arg1s)
#l=["qwdsad","ddwqeq",3]
l={"das":"dasd","weqw":"xcv"}
y(1,2,3,**l)
| {
"repo_name": "xiaoyongaa/ALL",
"path": "函数和常用模块/第一阶段/第一课.py",
"copies": "1",
"size": "1425",
"license": "apache-2.0",
"hash": -7724437380874339000,
"line_mean": 8.7559055118,
"line_max": 58,
"alpha_frac": 0.5052461663,
"autogenerated": false,
"ratio": 2.252727272727273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.814369665506545,
"avg_score": 0.022855356792364667,
"num_lines": 127
} |
# 1. del: funkcije
#gender: female = 2, male = 0
def calculate_score_for_gender(gender):
if gender == "male":
return 0
else: return 2
#age: 0-100 if age < 10 --> 0, 11 < age < 20 --> 5, 21 < age < 35 --> 2, 36 < age < 50 --> 4, 50+ --> 1
def calculate_score_for_age(age):
if (age > 11 and age <= 20) or (age > 36 and age <= 50):
return 5
elif age > 20 and age <= 35:
return 2
elif age < 10:
return 0
else:
return 1
#status: 0 = single, 1 = relationship, 2 = in open relationship, 3 = it's complicated, 4 = I'm a pizza, 5 = depends who's asking
def calculate_score_for_status(status):
if status == "single":
return 0
elif status == "in a relationship":
return 1
elif status == "in an open relationship":
return 2
elif status == "it's complicated":
return 3
elif status == "I'm a pizza":
return 0
else:
return 5
# ignorance: 0 = Problem is my challenge, 1 = Who gives a fuck, 2 = I'm an angel
def calculate_score_for_ignorance(ignorance):
if ignorance == "Ignorance is bliss":
return 0
elif ignorance == "not at all":
return 2
elif ignorance == "I'm an angel":
return 4
# money_have: -10000+ = 6, (-10000)-(-5000) = 5, -5000-0 = 4, 0-500 = 3, 500-3000 = 2, 3000-10000 = 1, 10000+ = 0
def calculate_score_for_money_have(money_have):
if money_have <= (-10000.0):
return 8.0
elif money_have > (-10000.0) and money_have <= (-5000.0):
return 5.0
elif money_have > (-5000.0) and money_have <= 0.0:
return 4.0
elif money_have > 0.0 and money_have <= 500.0:
return 3.0
elif money_have > 500.0 and money_have <= 3000.0:
return 2.0
else:
return 0.0
# ---ZAKAJ MI NE PREPOZNA POZITIVNIH FLOATING NUMBERS IN NOBENE NEGATIVE (INTEGER ALI FLOATING NEGATIVNE) KOT STEVILKO?
# -->PRED RAW INPUT MORAS DAT FLOAT, CE NI CELA STEVILKA IN ODSTRANI .ISDIGIT, KER .ISDIGIT JE LE ZA CELE STEVILKE!
# money_want: 0 = 0, 0-1000 = 1, 1000-5000 = 3, 5000-10000 = 4, 10000+ = 5
def caluculate_score_for_money_want(money_want):
if money_want == 0:
return 0
elif money_want > 0.0 and money_want <= 1000.0:
return 1
elif money_want > 1000.0 and money_want <= 5000.0:
return 3
elif money_want > 5000.0 and money_want <= 10000.0:
return 4
else:
return 5
#real friends: 0 = 5, 1-3 = 1, 4-6 = 2, 7-9 = 3, 10+ = 4
def calculate_score_for_rl_friends(rl_friends):
if rl_friends == 0:
return 5
elif rl_friends >= 1 and rl_friends <= 3:
return 1
elif rl_friends >= 4 and rl_friends <= 6:
return 2
elif rl_friends >= 7 and rl_friends <= 9:
return 3
else:
return 4
#children: 0 = 1, 1-2 = 2, 3 = 3, 4 = 4, 5+ = 5
def calculate_score_for_children(children):
if children == 0:
return 1
elif children == 1 and children == 2:
return 2
elif children == 3:
return 3
elif children == 4:
return 4
else:
return 5
# 2. del: sestevek funkcij
def calculate_score(gender, age, status, ignorance, money_have, money_want, rl_friends, children):
result = calculate_score_for_gender(gender)
result += calculate_score_for_age(age)
result += calculate_score_for_status(status)
result += calculate_score_for_ignorance(ignorance)
result += calculate_score_for_money_have(money_have)
result += caluculate_score_for_money_want(money_want)
result += calculate_score_for_rl_friends(rl_friends)
result += calculate_score_for_children(children)
return result
# 3. del: ------------- output za userja
#gender
print "Are you male or female?"
gender = raw_input(">> ")
#note to self: "while" pomeni da cekira na loop, "if" cekira enkratno
while (gender != "male") and (gender != "female"):
gender = raw_input("Check your gender again: ")
#age
print "How old are you?"
age = raw_input(">> ")
while not age.isdigit():
age = raw_input("Admit it, you're old. Now write your real age: ")
#status
print "What is your marital status?"
status = raw_input(">> ")
while (status != "single") and (status != "in a relationship") and (status != "in an open relationship") and (status != "it's complicated") and (status != "I'm a pizza"):
status = raw_input("Yeah, right... Think again: ")
#ignorance
print "How ignorant are you?"
ignorance = raw_input(">> ")
while (ignorance != "problem is my challenge") and (ignorance != "who gives a fuck") and (ignorance != "I'm an angel"):
ignorance = raw_input("You can't be that ignorant. Try again: ")
#money_have
print "How much money have you got?"
money_have = float(raw_input(">> "))
while not money_have:
money_have = float(raw_input("We aren't tax collectors, so be honest: "))
# PRED RAW INPUT MORAS DAT FLOAT, CE NI CELA STEVILKA IN ODSTRANI .ISDIGIT, KER .ISDIGIT JE LE ZA CELE STEVILKE!
#money_want
print "In addition to the money you've got, how much money do you want to have?"
money_want = float(raw_input(">> "))
while money_want < 0: #---->zato, da je pozitivno stevilo!
money_want = float(raw_input("I didn't ask for apples and peaches. So, how much money do you want? "))
#rl_friends
print "How many real friends have you got?"
rl_friends = raw_input(">> ")
while not rl_friends.isdigit():
rl_friends = raw_input("Spock doesn't count. Think again - how many? ")
#children
print "How many children have you got?"
children = raw_input(">> ")
while not children.isdigit():
children = raw_input("No aliens, just humans, please: ")
# 4.del: sestevek
print "On a scale from 0 to 40, your life complication is : ", calculate_score(gender, int(age), status, ignorance, money_have, money_want, rl_friends, children)
| {
"repo_name": "CodeCatz/litterbox",
"path": "ajda/complicajda.py",
"copies": "1",
"size": "5477",
"license": "mit",
"hash": 8154240385631336000,
"line_mean": 24.2396313364,
"line_max": 170,
"alpha_frac": 0.6605806098,
"autogenerated": false,
"ratio": 2.7412412412412412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3901821851041241,
"avg_score": null,
"num_lines": null
} |
import scipy.optimize as opt
from numpy import sqrt, log
#when solving numerically, it is assumed that all mach numbers are less than this
MAX_M = 100
MIN_M = 1e-6
#thermo properties
def getT0(T, gamma, M):
return T*(1 + (gamma-1)*M**2/2)
def getP0(P, gamma, M):
return P*(1 + (gamma-1)*M**2/2)**(gamma/(gamma-1))
def getcstar(gamma, Rgas, T0):
return sqrt(((gamma+1)/2)**((gamma+1)/(gamma-1))*Rgas*T0/gamma)
#ideal nozzle
def getIdealCf(gamma, Pe, P0, Pa, Ae, Astar):
return sqrt(2*gamma**2/(gamma-1)* \
(2/(gamma+1))**((gamma+1)/(gamma-1))* \
(1-(Pe/P0)**((gamma-1)/gamma))) + (Pe-Pa)/P0*Ae/Astar
#isentropic flow functions
def isentropicT(T0, gamma, M):
return T0/(1 + (gamma-1)*M**2/2)
def isentropicP(P0, gamma, M):
return P0/(1 + (gamma-1)*M**2/2)**(gamma/(gamma-1))
def isentropicRho(rho0, gamma, M):
return rho0/(1 + (gamma-1)*M**2/2)**(1/(gamma-1))
def isentropicmdot(A, P0, T0, Rgas, gamma, M):
return M*P0*A*sqrt(gamma/Rgas/T0)/ \
(1 + (gamma-1)*M**2/2)**((gamma+1)/2/(gamma-1))
def isentropicA(Astar, gamma, M):
return (Astar/M)*(2/(gamma+1)* \
(1 + (gamma-1)*M**2/2))**((gamma+1)/2/(gamma-1))
def getIsentropicMs(Astar, A, gamma):
f = lambda M: isentropicA(Astar, gamma, M) - A
subsonicM = opt.brentq(f, MIN_M, 1)
supersonicM = opt.brentq(f, 1, MAX_M)
return subsonicM, supersonicM
def getIsentropicMFromP(P0, P, gamma):
return sqrt(2*((P0/P)**((gamma-1)/gamma) - 1)/(gamma-1))
#Fanno flow functions
def fannoLstar(Cf, D, M, gamma):
return (D/4/Cf)*((1-M**2)/gamma/M**2 + \
(gamma+1)*log((gamma+1)*M**2/2/(1+(gamma-1)*M**2/2))/2/gamma)
#returns subsonic M
def fannoMafter(Cf, D, Min, gamma, L):
Lstar1 = fannoLstar(Cf, D, Min, gamma)
Lstar2 = Lstar1 - L
f = lambda M: Lstar2 - fannoLstar(Cf, D, M, gamma)
return opt.brentq(f, Min, 1)
def fannoPstar(P, M, gamma):
return P*M/sqrt((gamma+1)/(2+(gamma-1)*M**2))
def fannoP0star(P0, M, gamma):
return P0*M/((2/(gamma+1))*(1+(gamma-1)*M**2/2)) \
**((gamma+1)/2/(gamma-1))
def fannoP0(P0star, M, gamma):
return P0star* \
((2/(gamma+1))*(1+(gamma-1)*M**2/2))**((gamma+1)/2/(gamma-1))/M
#Rayleigh Flow
def rayleighP0toP01(M, M1, gamma):
return ((1+gamma*M1**2)/(1+gamma*M**2))* \
((1+(gamma-1)*M**2/2)/(1+(gamma-1)*M1**2/2))
def rayleighT0toT01(M, M1, gamma):
return ((1+gamma*M1**2)/(1+gamma*M**2))**2*(M/M1)**2 \
*((1+(gamma-1)*M**2/2)/(1+(gamma-1)*M1**2/2))
## Returns the subsonic machnumber for a given ratio of T0s
# @return subsonic M
def rayleighM(T0, T01, M, gamma):
f = lambda M1: T0/T01 - rayleighT0toT01(M, M1, gamma)
return opt.brentq(f, MIN_M, 1)
##Calculates conditions after a normal shock given conditions before the shock
#@return P2, M2, T2
def normalShockP(P1, M1, T1, gamma):
M2 = sqrt((M1**2 + 2/(gamma-1))/(2*M1**2*gamma/(gamma-1) - 1))
P2 = P1*(1+gamma*M1**2)/(1+gamma*M2**2)
T2 = T1*(M2*P2/M1/P1)**2
return P2, M2, T2
| {
"repo_name": "USCLiquidPropulsionLaboratory/Engine-sizing-snake",
"path": "Flows1D.py",
"copies": "1",
"size": "3251",
"license": "mit",
"hash": 2254067939418139600,
"line_mean": 30.8585858586,
"line_max": 81,
"alpha_frac": 0.580129191,
"autogenerated": false,
"ratio": 2.3439077144917086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34240369054917086,
"avg_score": null,
"num_lines": null
} |
""" 1D Function plotter.
This example creates a simple 1D function examiner, illustrating the use of
ChacoPlotEditors for displaying simple plot relations, as well as TraitsUI
integration. Any 1D numpy/scipy.special function should work in the function
text box.
- Left-drag pans the plot.
- Mousewheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import linspace, pi
# Enthought library imports
from traits.api import Array, Dict, Enum, HasTraits, Str
from traitsui.api import Item, View
# Chaco imports
from chaco.chaco_plot_editor import ChacoPlotEditor, \
ChacoPlotItem
class Foo(HasTraits):
# Public Traits
xdata = Array
plot_type = Enum("scatter", "line")
eq = Str("sin(x)")
# Default TraitsUI view
traits_view = View(
ChacoPlotItem("xdata", "_ydata",
type_trait="plot_type",
# Basic axis and label properties
show_label=False,
resizable=True,
orientation="h",
x_label = "Index data",
y_label = "Value data",
# Plot properties
color = "green",
bgcolor = "white",
# Specific to scatter plot
marker = "circle",
marker_size = 2,
outline_color = "none",
# Border, padding properties
border_visible=True,
border_width=1,
padding_bg_color = "lightgray"),
Item("plot_type"),
Item("eq"),
resizable=True,
width=500, height=500)
# Private Traits
_d = Dict
_ydata = Array
def __init__(self, **kwtraits):
super(Foo, self).__init__(**kwtraits)
self._d = dict(x=self.xdata)
exec "from scipy import *" in self._d
exec "from scipy.special import *" in self._d
self._ydata = eval(self.eq, self._d)
def _eq_changed(self, old, new):
try:
self._ydata = eval(new, self._d)
except:
pass
#===============================================================================
# # demo object that is used by the demo.py application.
#===============================================================================
demo = Foo(xdata=linspace(-2*pi, 2*pi ,100), eq="sin(x)")
if __name__ == "__main__":
demo.edit_traits(kind="modal")
| {
"repo_name": "tommy-u/chaco",
"path": "examples/demo/basic/traits_editor.py",
"copies": "3",
"size": "3151",
"license": "bsd-3-clause",
"hash": 4105821228618536400,
"line_mean": 34.8068181818,
"line_max": 80,
"alpha_frac": 0.4642970486,
"autogenerated": false,
"ratio": 4.759818731117825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6724115779717825,
"avg_score": null,
"num_lines": null
} |
"""1-D Gaussian Processes for Regression and Bayesian Optimization"""
# Author: Charles Franzen
# License: MIT
from functools import partial
import numpy as np
import numpy.linalg as LA
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import dillinger.kernel_functions as kern
# main Gaussian Process class, with sample and fit methods.
class GaussianProcess:
'''Gaussian Processes for Regression.
Args:
domain (ndarray): Points in the domain.
kernel_function (string or Kernel):
Kernel to be used for covariance matrices. Can be a string
specifying a kernel function or a Kernel instance.
kernel_args (dict):
Dictionary of keyword arguments to pass the the kernel function.
noise (float): Gaussan noise assumed in observations.
'''
def __init__(self, domain: np.ndarray, kernel_function,
kernel_args=None, noise=1.):
self.domain = domain
self.domain.shape = -1, 1
self.n = np.prod(domain.shape)
self.noise = noise
if type(kernel_function) is str:
kernel_function = kern._kernel_dict[kernel_function]
self.kernel_obj = None
if kernel_args:
self.kernel_args = kernel_args
self.kernel = partial(kernel_function, **kernel_args)
else:
self.kernel = kernel_function
else: # for Kernel objects
if kernel_args:
self.kernel_args = kernel_args
self.kernel_obj = kernel_function(**kernel_args)
else:
self.kernel_obj = kernel_function()
self.kernel = self.kernel_obj.covariance
self.K = kern.cov_mat(self.kernel, self.domain, self.domain)
self.μ = np.zeros(self.n)
self.μ.shape = -1, 1
self.obs = None
def expected_improvement(self):
'''Expected improvement over the domain.
Returns:
ei (ndarray):
Expected improvement values at each point in the domain.
'''
obs_df = pd.DataFrame(self.obs, columns=['x', 'y'])
# mean observation at each point in the domain
mean_obs = obs_df.groupby('x').mean()
best_val = mean_obs.y.max()
# EI calculation
sigma = np.sqrt(np.diag(self.K))
gamma = (self.μ.flatten() - best_val) / sigma
ei = sigma * (gamma * stats.norm.cdf(gamma) + stats.norm.pdf(gamma))
return ei
def fit(self, x, y, clear_obs=False, **optimizer_args):
'''Fit the GP to observed data
Args:
x (ndarray): domain values
y (ndarray): noisy observations
clear_obs (bool): if True, clears previously stored observations
optimizer_args: args to be passed to Kernel.optimize_params()
'''
if clear_obs:
self.obs = None
if self.obs is not None:
self.obs = np.concatenate((self.obs, np.concatenate((x, y),
axis=1)))
else:
self.obs = np.concatenate((x, y), axis=1)
x = self.obs[:, 0]
x.shape = -1, 1
y = self.obs[:, 1]
y.shape = -1, 1
n_obs = x.shape[0]
if self.kernel_obj:
self.kernel_obj.optimize_params(x, y, **optimizer_args)
self.set_kernel_args(self.kernel_obj.params)
# create block matrix
K_X_X = kern.cov_mat(self.kernel, x, x)
K_X_Xt = kern.cov_mat(self.kernel, x, self.domain)
K_Xt_X = kern.cov_mat(self.kernel, self.domain, x)
K_Xt_Xt = kern.cov_mat(self.kernel, self.domain, self.domain)
# get means
shared_term = K_Xt_X.dot(LA.inv(K_X_X + self.noise**2 * np.eye(n_obs)))
self.μ = shared_term.dot(y) # see equation (4)
# get covariances
self.K = K_Xt_Xt - shared_term.dot(K_X_Xt) # see equation (3)
def plot(self, n_samples=0):
# get sigmas
sigmas = np.sqrt(np.diag(self.K))
sigmas.shape = -1, 1
# get confidence intervals
upper_ci = self.μ + 1.96 * sigmas
lower_ci = self.μ - 1.96 * sigmas
x = self.domain.flatten()
upper_ci = upper_ci.flatten()
lower_ci = lower_ci.flatten()
samples = self.sample(n_samples)
for sample in samples:
plt.plot(x, sample, alpha=.5)
plt.plot(x, self.μ, color='k', label='GP mean estimate')
plt.fill_between(x, lower_ci, upper_ci, alpha=.5, color='m')
if self.obs is not None:
plt.scatter(self.obs[:, 0],
self.obs[:, 1],
color='r',
marker='.',
linewidths=2,
s=100,
label='Observations')
plt.legend()
def plot_expected_improvement(self, objective=None):
# get sigmas
sigmas = np.sqrt(np.diag(self.K))
sigmas.shape = -1, 1
# get confidence intervals
upper_ci = self.μ + 1.96 * sigmas
lower_ci = self.μ - 1.96 * sigmas
x = self.domain.flatten()
upper_ci = upper_ci.flatten()
lower_ci = lower_ci.flatten()
# get expected improvement
ei = self.expected_improvement()
ei_max = ei.max()
ei_arg = np.argmax(ei)
# plotting
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
# plot the GP
ax1.plot(x, self.μ, color='k', label='GP mean estimate')
ax1.fill_between(x, lower_ci, upper_ci, alpha=.5, color='m')
ax1.scatter(self.obs[:, 0],
self.obs[:, 1],
color='r',
marker='.',
linewidths=2,
s=100,
label='Observations')
if objective is not None:
if type(objective) == np.ndarray:
ax1.plot(self.domain,
objective,
linestyle='dashed',
linewidth=3,
c='g',
label='True objective')
else:
obj_x = np.linspace(np.min(self.domain),
np.max(self.domain),
100)
obj_y = objective(obj_x)
ax1.plot(obj_x,
obj_y,
linestyle='dashed',
linewidth=3,
c='y',
label='True objective')
ax1.set_title('GP')
ax1.legend(bbox_to_anchor=(1.1, 1.05))
# plot EI
ax2.plot(x, ei, linewidth=3)
ax2.vlines(x[ei_arg], 0, ei_max,
color='r', linestyles='dashed', label='max EI')
ax2.set_title('Expected improvement')
ax2.set_xlabel('$x$')
ax2.set_ylabel('$a(x)$')
ax2.legend()
def sample(self, n_samples):
# uses the cholesky decomp of the covariance matrix to draw samples
samples = np.zeros((n_samples, self.n))
try:
L = LA.cholesky(self.K + 1e-5 * np.eye(self.n))
except LA.LinAlgError:
# attempt rank 1 update if not positive definite
print('attempting rank 1 update')
e, v = LA.eig(self.K)
v1 = v[:, 0]
v1.shape = -1, 1
e1 = e[0]
print(f'negative eigenvalue: {e1:.4f}')
perturbed_K = self.K + v1.dot(v1.T).dot(np.spacing(e1) - e1)
L = LA.cholesky(perturbed_K)
for i in range(n_samples):
# draw samples
u = np.random.randn(self.n, 1)
z = L.dot(u)
z += self.μ
z = z.reshape(1, self.n)
samples[i] = z
return samples
def set_kernel_args(self, kernel_args):
self.kernel_args = kernel_args
if self.kernel_obj:
self.kernel_obj.set_params(**kernel_args)
self.kernel = self.kernel_obj.covariance
else:
self.kernel = partial(self.kernel, **kernel_args)
def x_next(self):
# get next point to sample
ei = self.expected_improvement()
return np.argmax(ei)
| {
"repo_name": "chipfranzen/dillinger",
"path": "dillinger/gaussian_process.py",
"copies": "1",
"size": "8314",
"license": "mit",
"hash": -4726010513304370000,
"line_mean": 33.5958333333,
"line_max": 79,
"alpha_frac": 0.5169215946,
"autogenerated": false,
"ratio": 3.7638259292837715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9780747523883772,
"avg_score": 0,
"num_lines": 240
} |
# 1d grid approixmation to beta binomial model
# https://github.com/aloctavodia/BAP
import pymc3 as pm
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import arviz as az
def posterior_grid(heads, tails, grid_points=100):
grid = np.linspace(0, 1, grid_points)
prior = np.repeat(1/grid_points, grid_points) # uniform prior
likelihood = stats.binom.pmf(heads, heads+tails, grid)
posterior = likelihood * prior
posterior /= posterior.sum()
return grid, posterior
data = np.repeat([0, 1], (10, 3))
h = data.sum()
t = len(data) - h
grid, posterior = posterior_grid(h, t, 20)
plt.stem(grid, posterior, use_line_collection=True)
plt.title('grid approximation')
plt.yticks([])
plt.xlabel('θ');
plt.savefig('../figures/bb_grid.pdf')
plt.figure()
x = np.linspace(0, 1, 100)
xs = x #grid
post_exact = stats.beta.pdf(xs, h+1, t+1)
post_exact = post_exact / np.sum(post_exact)
plt.plot(xs, post_exact)
plt.title('exact posterior')
plt.savefig('../figures/bb_exact.pdf') | {
"repo_name": "probml/pyprobml",
"path": "scripts/bb_grid_approx.py",
"copies": "1",
"size": "1039",
"license": "mit",
"hash": -2323872741914506000,
"line_mean": 26.3421052632,
"line_max": 66,
"alpha_frac": 0.7003853565,
"autogenerated": false,
"ratio": 2.867403314917127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4067788671417127,
"avg_score": null,
"num_lines": null
} |
'''1d linear AD equation.'''
import math
import numpy as np
import numpy.fft as fft
import pfasst.imex
class LinearAD(pfasst.imex.IMEXFEval):
Lx = 1.0
nu = 0.02
acst = 1.0
t0 = 1.0
def __init__(self, size, Lx=1.0, acst=1.0, nu=0.02, t0=1.0, **kwargs):
super(LinearAD, self).__init__()
self.shape = (size,)
self.size = size
self.Lx = Lx
self.acst = acst
self.nu = nu
self.t0 = t0
self.wave_numbers = 2*math.pi/Lx * fft.fftfreq(size) * size
self.laplacian = -(2*math.pi/Lx * fft.fftfreq(size) * size)**2
def f1_evaluate(self, y, t, f1, **kwargs):
n = y.shape[0]
z = fft.fft(y)
z_x = z * self.wave_numbers * 1j
y_x = np.real(fft.ifft(z_x))
f1[:] = -self.acst * y_x
def f2_evaluate(self, y, t, f2, **kwargs):
n = y.shape[0]
z = fft.fft(y)
op = self.nu * self.laplacian
z = op * z
f2[:] = np.real(fft.ifft(z))
def f2_solve(self, rhs, y, t, dt, f2, **kwargs):
n = y.shape[0]
z = fft.fft(rhs)
invop = 1.0 / (1.0 - self.nu*dt*self.laplacian)
z = invop * z
y[:] = np.real(fft.ifft(z))
op = self.nu * self.laplacian
z = op * z
f2[:] = np.real(fft.ifft(z))
def exact(self, t, q):
size = self.size
Lx = self.Lx
acst = self.acst
nu = self.nu
t0 = self.t0
q[:] = 0.0
for ii in range(-5, 6):
for i in range(size):
x = Lx*(i-size/2)/size + ii*Lx - acst*t
q[i] += (4.0*math.pi*nu*(t+t0))**(-0.5) * math.exp(-x**2/(4.0*nu*(t+t0)))
| {
"repo_name": "memmett/PyPFASST",
"path": "tests/linearad.py",
"copies": "1",
"size": "1716",
"license": "bsd-2-clause",
"hash": 6755589115639598000,
"line_mean": 20.45,
"line_max": 89,
"alpha_frac": 0.4656177156,
"autogenerated": false,
"ratio": 2.6359447004608296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36015624160608295,
"avg_score": null,
"num_lines": null
} |
" 1D PNP, modelling reservoirs and membrane far away from pore "
from dolfin import *
from nanopores import *
from nanopores.physics.simplepnps import *
# --- create 1D geometry ---
h = 20.
hmem = 3.
domain = Interval(-h/2, h/2)
membrane = Interval(-hmem/2, hmem/2)
lowerb = domain.boundary("left")
upperb = domain.boundary("right")
domain.addsubdomains(
fluid = domain - membrane,
membrane = membrane
)
domain.addboundaries(
lowerb = lowerb,
upperb = upperb,
chargedmembraneb = membrane.boundary(),
)
domain.params["lscale"] = 1e9
domain.synonymes = dict(
solid = "membrane",
bulkfluid = "fluid",
pore = set()
)
geo = domain.create_geometry(lc=.01)
print geo
# --- define physical parameters ---
phys_params = dict(
Membraneqs = -0.0,
bulkcon = 300.,
bV = -.1,
)
phys = Physics("pore", geo, **phys_params)
print geo # this tests how the default synonymes in physics/pore are incorporated
# --- define and solve PDE ---
pnps = solve_pde(SimplePNPProblem, geo, phys)
v, cp, cm = pnps.solutions()
plot1D({"potential": v}, (-5., 5., 101), "x", dim=1, axlabels=("z [nm]", "potential [V]"))
plot1D({"c+": cp, "c-":cm}, (hmem/2, h/2, 101), "x", dim=1, axlabels=("z [nm]", "concentrations [mol/m^3]"))
showplots()
| {
"repo_name": "mitschabaude/nanopores",
"path": "scripts/toy_models/pnp1D.py",
"copies": "1",
"size": "1272",
"license": "mit",
"hash": -4181582711238732000,
"line_mean": 23,
"line_max": 116,
"alpha_frac": 0.6422955975,
"autogenerated": false,
"ratio": 2.735483870967742,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.3877779468467742,
"avg_score": null,
"num_lines": null
} |
"""1D quantum particle in a box."""
from __future__ import print_function, division
from sympy import Symbol, pi, sqrt, sin, Interval, S
from sympy.physics.quantum.operator import HermitianOperator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.constants import hbar
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.physics.quantum.hilbert import L2
m = Symbol('m')
L = Symbol('L')
__all__ = [
'PIABHamiltonian',
'PIABKet',
'PIABBra'
]
class PIABHamiltonian(HermitianOperator):
"""Particle in a box Hamiltonian operator."""
@classmethod
def _eval_hilbert_space(cls, label):
return L2(Interval(S.NegativeInfinity, S.Infinity))
def _apply_operator_PIABKet(self, ket, **options):
n = ket.label[0]
return (n**2*pi**2*hbar**2)/(2*m*L**2)*ket
class PIABKet(Ket):
"""Particle in a box eigenket."""
@classmethod
def _eval_hilbert_space(cls, args):
return L2(Interval(S.NegativeInfinity, S.Infinity))
@classmethod
def dual_class(self):
return PIABBra
def _represent_default_basis(self, **options):
return self._represent_XOp(None, **options)
def _represent_XOp(self, basis, **options):
x = Symbol('x')
n = Symbol('n')
subs_info = options.get('subs', {})
return sqrt(2/L)*sin(n*pi*x/L).subs(subs_info)
def _eval_innerproduct_PIABBra(self, bra):
return KroneckerDelta(bra.label[0], self.label[0])
class PIABBra(Bra):
"""Particle in a box eigenbra."""
@classmethod
def _eval_hilbert_space(cls, label):
return L2(Interval(S.NegativeInfinity, S.Infinity))
@classmethod
def dual_class(self):
return PIABKet
| {
"repo_name": "lidavidm/sympy",
"path": "sympy/physics/quantum/piab.py",
"copies": "124",
"size": "1756",
"license": "bsd-3-clause",
"hash": 8298416204019598000,
"line_mean": 24.4492753623,
"line_max": 67,
"alpha_frac": 0.6577448747,
"autogenerated": false,
"ratio": 3.251851851851852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 69
} |
"""1D quantum particle in a box."""
from sympy import Symbol, pi, sqrt, sin, conjugate, Interval, S
from sympy.physics.quantum.operator import HermitianOperator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.constants import hbar
from sympy.physics.quantum.kronecker import KroneckerDelta
from sympy.physics.quantum.hilbert import L2
m = Symbol('m')
L = Symbol('L')
__all__ = [
'PIABHamiltonian',
'PIABKet',
'PIABBra'
]
class PIABHamiltonian(HermitianOperator):
"""Particle in a box Hamiltonian operator."""
@classmethod
def _eval_hilbert_space(cls, label):
return L2(Interval(S.NegativeInfinity,S.Infinity))
def _apply_operator_PIABKet(self, ket, **options):
n = ket.label[0]
return (n**2*pi**2*hbar**2)/(2*m*L**2)*ket
class PIABKet(Ket):
"""Particle in a box eigenket."""
@classmethod
def _eval_hilbert_space(cls, args):
return L2(Interval(S.NegativeInfinity,S.Infinity))
@property
def dual_class(self):
return PIABBra
def _represent_default_basis(self, **options):
return self._represent_XOp(None, **options)
def _represent_XOp(self, basis, **options):
x = Symbol('x')
n = Symbol('n')
subs_info = options.get('subs',{})
return sqrt(2/L)*sin(n*pi*x/L).subs(subs_info)
def _eval_innerproduct_PIABBra(self, bra):
return KroneckerDelta(bra.label[0], self.label[0])
class PIABBra(Bra):
"""Particle in a box eigenbra."""
@classmethod
def _eval_hilbert_space(cls, label):
return L2(Interval(S.NegativeInfinity,S.Infinity))
@property
def dual_class(self):
return PIABKet
| {
"repo_name": "tarballs-are-good/sympy",
"path": "sympy/physics/quantum/piab.py",
"copies": "1",
"size": "1700",
"license": "bsd-3-clause",
"hash": 3383580177551355400,
"line_mean": 24,
"line_max": 63,
"alpha_frac": 0.6547058824,
"autogenerated": false,
"ratio": 3.2015065913371,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9277234292003231,
"avg_score": 0.015795636346773632,
"num_lines": 68
} |
"""1D quantum particle in a box."""
from sympy import Symbol, pi, sqrt, sin, Interval, S
from sympy.physics.quantum.operator import HermitianOperator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.constants import hbar
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.physics.quantum.hilbert import L2
m = Symbol('m')
L = Symbol('L')
__all__ = [
'PIABHamiltonian',
'PIABKet',
'PIABBra'
]
class PIABHamiltonian(HermitianOperator):
"""Particle in a box Hamiltonian operator."""
@classmethod
def _eval_hilbert_space(cls, label):
return L2(Interval(S.NegativeInfinity,S.Infinity))
def _apply_operator_PIABKet(self, ket, **options):
n = ket.label[0]
return (n**2*pi**2*hbar**2)/(2*m*L**2)*ket
class PIABKet(Ket):
"""Particle in a box eigenket."""
@classmethod
def _eval_hilbert_space(cls, args):
return L2(Interval(S.NegativeInfinity,S.Infinity))
@classmethod
def dual_class(self):
return PIABBra
def _represent_default_basis(self, **options):
return self._represent_XOp(None, **options)
def _represent_XOp(self, basis, **options):
x = Symbol('x')
n = Symbol('n')
subs_info = options.get('subs',{})
return sqrt(2/L)*sin(n*pi*x/L).subs(subs_info)
def _eval_innerproduct_PIABBra(self, bra):
return KroneckerDelta(bra.label[0], self.label[0])
class PIABBra(Bra):
"""Particle in a box eigenbra."""
@classmethod
def _eval_hilbert_space(cls, label):
return L2(Interval(S.NegativeInfinity,S.Infinity))
@classmethod
def dual_class(self):
return PIABKet
| {
"repo_name": "flacjacket/sympy",
"path": "sympy/physics/quantum/piab.py",
"copies": "2",
"size": "1703",
"license": "bsd-3-clause",
"hash": 8265089972198359000,
"line_mean": 24.4179104478,
"line_max": 67,
"alpha_frac": 0.6564885496,
"autogenerated": false,
"ratio": 3.219281663516068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9870240118221993,
"avg_score": 0.00110601897881503,
"num_lines": 67
} |
# 1d walk, FPT process
import random
import numpy as np
import matplotlib.pyplot as plt
# get simulation parameter values
f = open( 'input.txt', 'r+')
content = [x.strip('\n') for x in f.readlines()]
f.close()
runTotal = int(content[1])
N = float(content[2])
L = float(content[3])
vi = 0.40;
vf = 0.45;
dv = 0.05;
nv = int(round( (vf-vi)/dv )) + 1
v = [ (vi + float(x)*dv) for x in range(nv) ];
for vRun in v:
nList = []
FPTmean = []
FPTstdv = []
pRight = 0.5 + vRun;
pLeft = 0.5 - vRun;
for i in range(1,int(N)+1,2):
nList.append(i)
x = []
tList = []
for iRun in range(runTotal):
# print ' n = ' + str(i) + ' run # '+ str(iRun)
# FPT process
x = [ float(j) for j in range(i)]
xCOMi = np.mean(x)
dCOM = 0.0
t = 0
while dCOM < L:
iList = [ii for ii in range(i)]
# random.shuffle(iList)
for j in iList:
r = random.random()
if r < pRight:
if ((j != (i-1)) and (i != 0)):
if (x[j]+1.0) != x[j+1]:
x[j] += 1.0
else:
x[j] += 1.0
else:
if ((j != 0) and (i != 0)):
if (x[j]-1.0) != x[j-1]:
x[j] += -1.0
else:
x[j] += -1.0
xCOM = np.mean(x)
dCOM = xCOM - xCOMi
t += 1
tList.append(t)
FPTmean.append(np.mean(tList))
FPTstdv.append(np.std(tList))
# re-scale data
# print nList
nList = [ n/L for n in nList]
vStr = '%.3f' %(vRun*2.0)
tScale = 2.0*vRun/L
FPTmean = [ mean*tScale for mean in FPTmean]
FPTstdv = [ stdv*tScale for stdv in FPTstdv]
plt.errorbar( nList, FPTmean, yerr=FPTstdv, label=vStr)
plt.legend(loc=2)
# plt.ylim([1.0, 2.2])
# plt.xlim([min(nList)-0.01, max(nList)+0.01])
# plt.xscale('log')
# plt.yscale('log')
plt.xlabel(r'$N/L$')
plt.ylabel(r'$<\tau>v/L$')
plt.title('Mean FPT for different Drift Velocity (python version)')
# plt.savefig('fig/mfptPY_n300_1.png')
plt.show()
| {
"repo_name": "varennes/1dwalk",
"path": "1dwalk.py",
"copies": "1",
"size": "2329",
"license": "mit",
"hash": -9053713721988728000,
"line_mean": 24.5934065934,
"line_max": 67,
"alpha_frac": 0.4401030485,
"autogenerated": false,
"ratio": 2.9593392630241424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.885672006672308,
"avg_score": 0.008544448960212446,
"num_lines": 91
} |
# --------------- 1. Explicit Logging
def info(msg):
print("INFO - {}".format(msg))
# some business logic with logging
def do_something1(n):
info("do_something1 called with: n={}".format(n))
return n + 1
# --------------- 2 a) Logging with self-made decorator
def with_logging1(fun):
def wrapper(*args, **kwargs):
info("{} called with : {},{}".format(fun.__name__, args, kwargs))
return fun(*args, **kwargs)
return wrapper
# -- hand-made
def do_something2(n):
return n + 2
do_something2 = with_logging1(do_something2)
# -- with @
@with_logging1 # just a short way of saying: do_something3 = with_logging(do_something3)
def do_something3(n):
"""
some docstring for do_something3
:param n: number
:return: n + 3
"""
return n + 3
# --------------- 2 b) Logging with self-made decorator, complete
def with_logging2(fun):
def wrapper(*args, **kwargs):
info("{} called with : {},{}".format(fun.__name__, args, kwargs))
return fun(*args, **kwargs)
wrapper.__name__ = fun.__name__
wrapper.__doc__ = fun.__doc__
return wrapper
@with_logging2
def do_something4(n):
"""
some docstring for do_something4
:param n: number
:return: n + 4
"""
return n + 4
# --------------- 2 c) Logging with decorator and functools-support
from functools import wraps
def with_logging3(fun):
@wraps(fun)
def wrapper(*args, **kwargs):
info("{} called with : {},{}".format(fun.__name__, args, kwargs))
return fun(*args, **kwargs)
return wrapper
@with_logging3
def do_something5(n):
"""
some docstring for do_something5
:param n: number
:return: n + 5
"""
return n + 5
# -------------------------------------------------------------------
if __name__ == '__main__':
print('{} 1. Explicit Logging\n'.format('-' * 20))
print(do_something1(4))
# print('{} 2 a) Logging with self-made decorator\n'.format('-' * 20))
#
# print(do_something2(4))
# print(do_something3(n=4))
#
# print("do_something3.__name__: {}".format(do_something3.__name__))
# print("do_something3.__doc__ : {}".format(do_something3.__doc__))
#
# print('{} 2 b) Logging with self-made decorator, complete\n'.format('-' * 20))
#
# print(do_something4(n=4))
# print("do_something4.__name__: {}".format(do_something4.__name__))
# print("do_something4.__doc__ : {}".format(do_something4.__doc__))
#
# print('{} 2 c) Logging with decorator and functools-support\n'.format('-' * 20))
#
# print(do_something5(4))
# print("do_something5.__name__: {}".format(do_something5.__name__))
# print("do_something5.__doc__ : {}".format(do_something5.__doc__))
# ... further reading
# - http://book.pythontips.com/en/testing/decorators.html
# - http://jamescooke.info/things-to-remember-about-decorators.html
| {
"repo_name": "plipp/Python-Coding-Dojos",
"path": "katas/XX-Primers/decorator_sample.py",
"copies": "1",
"size": "2916",
"license": "mit",
"hash": 1063078293232352000,
"line_mean": 22.9016393443,
"line_max": 89,
"alpha_frac": 0.5665294925,
"autogenerated": false,
"ratio": 3.4549763033175354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45215057958175353,
"avg_score": null,
"num_lines": null
} |
## 1. Exploring the data ##
# The first 5 rows of the data.
print(income.head())
lowest_income_county = income["county"][income["median_income"].idxmin()]
high_pop = income[income["pop_over_25"] > 500000]
lowest_income_high_pop_county = high_pop["county"][high_pop["median_income"].idxmin()]
## 2. Random numbers ##
import random
# Returns a random integer between the numbers 0 and 10, inclusive.
num = random.randint(0, 10)
# Generate a sequence of 10 random numbers between the values of 0 and 10.
random_sequence = [random.randint(0, 10) for _ in range(10)]
# Sometimes, when we generate a random sequence, we want it to be the same sequence whenever the program is run.
# An example is when you use random numbers to select a subset of the data, and you want other people
# looking at the same data to get the same subset.
# We can ensure this by setting a random seed.
# A random seed is an integer that is used to "seed" a random number generator.
# After a random seed is set, the numbers generated after will follow the same sequence.
random.seed(10)
print([random.randint(0,10) for _ in range(5)])
random.seed(10)
# Same sequence as above.
print([random.randint(0,10) for _ in range(5)])
random.seed(11)
# Different seed means different sequence.
print([random.randint(0,10) for _ in range(5)])
random.seed(20)
new_sequence = [random.randint(0,10) for _ in range(10)]
## 3. Selecting items from a list ##
# Let's say that we have some data on how much shoppers spend in a store.
shopping = [300, 200, 100, 600, 20]
# We want to sample the data, and only select 4 elements.
random.seed(1)
shopping_sample = random.sample(shopping, 4)
# 4 random items from the shopping list.
print(shopping_sample)
## 4. Population vs sample ##
import matplotlib.pyplot as plt
# A function that returns the result of a die roll.
def roll():
return random.randint(1, 6)
random.seed(1)
small_sample = [roll() for _ in range(10)]
# Plot a histogram with 6 bins (1 for each possible outcome of the die roll)
plt.hist(small_sample, 6)
plt.show()
random.seed(1)
medium_sample = [roll() for _ in range(100)]
plt.hist(medium_sample, 6)
plt.show()
random.seed(1)
large_sample = [roll() for _ in range(10000)]
plt.hist(large_sample, 6)
plt.show()
## 5. Finding the right sample size ##
def probability_of_one(num_trials, num_rolls):
"""
This function will take in the number of trials, and the number of rolls per trial.
Then it will conduct each trial, and record the probability of rolling a one.
"""
probabilities = []
for i in range(num_trials):
die_rolls = [roll() for _ in range(num_rolls)]
one_prob = len([d for d in die_rolls if d==1]) / num_rolls
probabilities.append(one_prob)
return probabilities
random.seed(1)
small_sample = probability_of_one(300, 50)
plt.hist(small_sample, 20)
plt.show()
random.seed(1)
small_sample = probability_of_one(300, 100)
plt.hist(small_sample, 20)
plt.show()
random.seed(1)
small_sample = probability_of_one(300,1000)
plt.hist(small_sample, 20)
plt.show()
## 6. What are the odds? ##
import numpy
large_sample_std = numpy.std(large_sample)
avg = numpy.mean(large_sample)
deviations_from_mean = (.18 - avg) / large_sample_std
over_18_count = len([p for p in large_sample if p >= .18])
## 7. Sampling counties ##
# This is the mean median income in any US county.
mean_median_income = income["median_income"].mean()
print(mean_median_income)
def get_sample_mean(start, end):
return income["median_income"][start:end].mean()
def find_mean_incomes(row_step):
mean_median_sample_incomes = []
# Iterate over the indices of the income rows
# Starting at 0, and counting in blocks of row_step (0, row_step, row_step * 2, etc).
for i in range(0, income.shape[0], row_step):
# Find the mean median for the row_step counties from i to i+row_step.
mean_median_sample_incomes.append(get_sample_mean(i, i+row_step))
return mean_median_sample_incomes
nonrandom_sample = find_mean_incomes(100)
plt.hist(nonrandom_sample, 20)
plt.show()
# What you're seeing above is the result of biased sampling.
# Instead of selecting randomly, we selected counties that were next to each other in the data.
# This picked counties in the same state more often that not, and created means that didn't represent the whole country.
# This is the danger of not using random sampling -- you end up with samples that don't reflect the entire population.
# This gives you a distribution that isn't normal.
import random
def select_random_sample(count):
random_indices = random.sample(range(0, income.shape[0]), count)
return income.iloc[random_indices]
random.seed(1)
random_sample = [select_random_sample(100)["median_income"].mean() for _ in range(1000)]
plt.hist(random_sample, 20)
plt.show()
## 8. An experiment ##
def select_random_sample(count):
random_indices = random.sample(range(0, income.shape[0]), count)
return income.iloc[random_indices]
random.seed(1)
mean_ratios = []
for i in range(1000):
sample = select_random_sample(100)
ratios = sample["median_income_hs"] / sample["median_income_college"]
mean_ratios.append(ratios.mean())
plt.hist(mean_ratios, 20)
plt.show()
## 9. Statistical significance ##
significance_value = None
a = [v for v in mean_ratios if v >=0.675]
significance_value = len(a)/len(mean_ratios)
## 10. Final result ##
# This is "steeper" than the graph from before, because it has 500 items in each sample.
random.seed(1)
mean_ratios = []
for i in range(1000):
sample = select_random_sample(500)
ratios = sample["median_income_hs"] / sample["median_income_college"]
mean_ratios.append(ratios.mean())
plt.hist(mean_ratios, 20)
plt.show() | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Probability Statistics Beginner/Distributions and sampling-28.py",
"copies": "1",
"size": "5749",
"license": "mit",
"hash": -9019339273218168000,
"line_mean": 29.4232804233,
"line_max": 120,
"alpha_frac": 0.7072534354,
"autogenerated": false,
"ratio": 3.162266226622662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9319600445228339,
"avg_score": 0.009983843358864604,
"num_lines": 189
} |
# [1] "Feature Pyramid Networks for Object Detection" - Tsung-Yi Lin, Piotr Dollár,
# Ross Girshick, Kaiming He, Bharath Hariharan, Serge Belongie, arxiv 2016
# https://arxiv.org/abs/1612.03144
#
# [2] "DSSD : Deconvolutional Single Shot Detector" - Cheng-Yang Fu, Wei Liu, Ananth Ranga,
# Ambrish Tyagi, Alexander C. Berg, arxiv 2017
#
# [3] "Aggregated Residual Transformations for Deep Neural Networks" - Saining Xie,
# Ross Girshick, Piotr Dollár, Zhuowen Tu, Kaiming He, arxiv 2016
# https://github.com/D-X-Y/ResNeXt/blob/master/models/resnext.py
#
# [4] "Is object localization for free? – Weakly-supervised learning with convolutional neural networks" -
# Maxime Oquab, Léon Bottou, Ivan Laptev, Josef Sivic, cvpr 2015
#
#
from net.util import *
#----- helper functions --------
def make_linear_bn_prelu(in_channels, out_channels):
return [
nn.Linear(in_channels, out_channels, bias=False),
nn.BatchNorm1d(out_channels),
nn.PReLU(out_channels),
]
def make_conv_bn_relu(in_channels, out_channels, kernel_size=3, stride=1, padding=1, groups=1):
return [
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
def make_linear_bn_relu(in_channels, out_channels):
return [
nn.Linear(in_channels, out_channels, bias=False),
nn.BatchNorm1d(out_channels),
nn.ReLU(inplace=True),
]
def make_max_flat(out):
flat = F.adaptive_max_pool2d(out,output_size=1) ##nn.AdaptiveMaxPool2d(1)(out)
flat = flat.view(flat.size(0), -1)
return flat
def make_avg_flat(out):
flat = F.adaptive_avg_pool2d(out,output_size=1)
flat = flat.view(flat.size(0), -1)
return flat
def make_shortcut(out, modifier):
if modifier is None:
return out
else:
return modifier(out)
def make_flat(out):
#out = F.adaptive_avg_pool2d(out,output_size=4)
#out = F.avg_pool2d(out,kernel_size=3, stride=1, padding=0)
out = F.adaptive_max_pool2d(out,output_size=1)
flat = out.view(out.size(0), -1)
return flat
#############################################################################3
class PyNet_10(nn.Module):
def __init__(self, in_shape, num_classes):
super(PyNet_10, self).__init__()
in_channels, height, width = in_shape
self.preprocess = nn.Sequential(
*make_conv_bn_relu(in_channels, 16, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(16, 16, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(16, 16, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(16, 16, kernel_size=1, stride=1, padding=0 ),
) # 128
self.conv1d = nn.Sequential(
*make_conv_bn_relu(16,32, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(32,32, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(32,64, kernel_size=1, stride=1, padding=0 ),
) # 128
self.shortld = nn.Conv2d(16, 64, kernel_size=1, stride=1, padding=0, bias=False)
self.conv2d = nn.Sequential(
*make_conv_bn_relu(64,64, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(64,64, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(64,128, kernel_size=1, stride=1, padding=0 ),
) # 64
self.short2d = nn.Conv2d(64, 128, kernel_size=1, stride=1, padding=0, bias=False)
self.conv3d = nn.Sequential(
*make_conv_bn_relu(128,128, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,256, kernel_size=1, stride=1, padding=0 ),
) # 32
self.short3d = nn.Conv2d(128, 256, kernel_size=1, stride=1, padding=0, bias=False)
self.conv4d = nn.Sequential(
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
) # 16
self.short4d = None #nn.Identity()
self.conv5d = nn.Sequential(
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
) # 8
self.short5d = None # nn.Identity()
self.conv4u = nn.Sequential(
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
) # 16
self.conv3u = nn.Sequential(
*make_conv_bn_relu(256,128, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,128, kernel_size=1, stride=1, padding=0 ),
) # 32
self.conv2u = nn.Sequential(
*make_conv_bn_relu(128,64, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu( 64,64, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu( 64,64, kernel_size=1, stride=1, padding=0 ),
) # 64
self.conv1u = nn.Sequential(
*make_conv_bn_relu(64,64, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(64,64, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(64,64, kernel_size=1, stride=1, padding=0 ),
) # 128
self.cls2d = nn.Sequential(
*make_linear_bn_relu(128, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls3d = nn.Sequential(
*make_linear_bn_relu(256, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls4d = nn.Sequential(
*make_linear_bn_relu(256, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls5d = nn.Sequential(
*make_linear_bn_relu(256, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls1u = nn.Sequential(
*make_linear_bn_relu(64, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls2u = nn.Sequential(
*make_linear_bn_relu( 64, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls3u = nn.Sequential(
*make_linear_bn_relu(128, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls4u = nn.Sequential(
*make_linear_bn_relu(256, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
def forward(self, x):
out = self.preprocess(x) #128
conv1d = self.conv1d(out) #128
out = F.max_pool2d(conv1d, kernel_size=2, stride=2) # 64
conv2d = self.conv2d(out) + make_shortcut(out, self.short2d) # 64
out = F.max_pool2d(conv2d, kernel_size=2, stride=2) # 32
flat2d = make_max_flat(out)
conv3d = self.conv3d(out) + make_shortcut(out, self.short3d) # 32
out = F.max_pool2d(conv3d, kernel_size=2, stride=2) # 16
flat3d = make_max_flat(out)
conv4d = self.conv4d(out) + make_shortcut(out, self.short4d) # 16
out = F.max_pool2d(conv4d, kernel_size=2, stride=2) # 8
flat4d = make_max_flat(out)
conv5d = self.conv5d(out) + make_shortcut(out, self.short5d) # 8
out = conv5d # 4
flat5d = make_max_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) # 16
out = out + conv4d
out = self.conv4u(out)
flat4u = make_max_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) # 32
out = out + conv3d
out = self.conv3u(out)
flat3u = make_max_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) # 64
out = out + conv2d
out = self.conv2u(out)
flat2u = make_max_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) #128
out = out + conv1d
out = self.conv1u(out)
flat1u = make_max_flat(out)
logit2d = self.cls2d(flat2d).unsqueeze(2)
logit3d = self.cls3d(flat3d).unsqueeze(2)
logit4d = self.cls4d(flat4d).unsqueeze(2)
logit5d = self.cls5d(flat5d).unsqueeze(2)
logit1u = self.cls1u(flat1u).unsqueeze(2)
logit2u = self.cls2u(flat2u).unsqueeze(2)
logit3u = self.cls3u(flat3u).unsqueeze(2)
logit4u = self.cls4u(flat4u).unsqueeze(2)
logit = torch.cat((
logit2d,logit3d,logit4d,logit5d,
logit1u,logit2u,logit3u,logit4u,
),dim=2)
logit = F.dropout(logit, p=0.15,training=self.training)
logit = logit.sum(2)
logit = logit.view(logit.size(0),logit.size(1)) #unsqueeze(2)
prob = F.sigmoid(logit)
return logit,prob
#############################################################################3
class PyNet_11(nn.Module):
def __init__(self, in_shape, num_classes):
super(PyNet_11, self).__init__()
in_channels, height, width = in_shape
self.preprocess = nn.Sequential(
*make_conv_bn_relu(in_channels, 16, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(16, 16, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(16, 16, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(16, 16, kernel_size=1, stride=1, padding=0 ),
) # 128
self.conv1d = nn.Sequential(
*make_conv_bn_relu(16,32, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(32,32, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(32,64, kernel_size=1, stride=1, padding=0 ),
) # 128
self.shortld = nn.Conv2d(16, 64, kernel_size=1, stride=1, padding=0, bias=False)
self.conv2d = nn.Sequential(
*make_conv_bn_relu(64,64, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(64,64, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(64,128, kernel_size=1, stride=1, padding=0 ),
) # 64
self.short2d = nn.Conv2d(64, 128, kernel_size=1, stride=1, padding=0, bias=False)
self.conv3d = nn.Sequential(
*make_conv_bn_relu(128,128, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,256, kernel_size=1, stride=1, padding=0 ),
) # 32
self.short3d = nn.Conv2d(128, 256, kernel_size=1, stride=1, padding=0, bias=False)
self.conv4d = nn.Sequential(
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
) # 16
self.short4d = None #nn.Identity()
self.conv5d = nn.Sequential(
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
) # 8
self.short5d = None # nn.Identity()
self.conv4u = nn.Sequential(
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
) # 16
self.conv3u = nn.Sequential(
*make_conv_bn_relu(256,128, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,128, kernel_size=1, stride=1, padding=0 ),
) # 32
self.conv2u = nn.Sequential(
*make_conv_bn_relu(128,64, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu( 64,64, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu( 64,64, kernel_size=1, stride=1, padding=0 ),
) # 64
self.conv1u = nn.Sequential(
*make_conv_bn_relu(64,64, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(64,64, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(64,64, kernel_size=1, stride=1, padding=0 ),
) # 128
self.cls2d = nn.Sequential(
*make_linear_bn_relu(128, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls3d = nn.Sequential(
*make_linear_bn_relu(256, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls4d = nn.Sequential(
*make_linear_bn_relu(256, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls5d = nn.Sequential(
*make_linear_bn_relu(256, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls1u = nn.Sequential(
*make_linear_bn_relu(64, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls2u = nn.Sequential(
*make_linear_bn_relu( 64, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls3u = nn.Sequential(
*make_linear_bn_relu(128, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
self.cls4u = nn.Sequential(
*make_linear_bn_relu(256, 512),
*make_linear_bn_relu(512, 512),
nn.Linear(512, num_classes)
)
def forward(self, x):
out = self.preprocess(x) #128
conv1d = self.conv1d(out) #128
out = F.max_pool2d(conv1d, kernel_size=2, stride=2) # 64
conv2d = self.conv2d(out) + make_shortcut(out, self.short2d) # 64
out = F.max_pool2d(conv2d, kernel_size=2, stride=2) # 32
flat2d = make_flat(out)
conv3d = self.conv3d(out) + make_shortcut(out, self.short3d) # 32
out = F.max_pool2d(conv3d, kernel_size=2, stride=2) # 16
flat3d = make_flat(out)
conv4d = self.conv4d(out) + make_shortcut(out, self.short4d) # 16
out = F.max_pool2d(conv4d, kernel_size=2, stride=2) # 8
flat4d = make_flat(out)
conv5d = self.conv5d(out) + make_shortcut(out, self.short5d) # 8
out = conv5d # 4
flat5d = make_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) # 16
out = out + conv4d
out = self.conv4u(out)
flat4u = make_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) # 32
out = out + conv3d
out = self.conv3u(out)
flat3u = make_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) # 64
out = out + conv2d
out = self.conv2u(out)
flat2u = make_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) #128
out = out + conv1d
out = self.conv1u(out)
flat1u = make_flat(out)
logit2d = self.cls2d(flat2d).unsqueeze(2)
logit3d = self.cls3d(flat3d).unsqueeze(2)
logit4d = self.cls4d(flat4d).unsqueeze(2)
logit5d = self.cls5d(flat5d).unsqueeze(2)
logit1u = self.cls1u(flat1u).unsqueeze(2)
logit2u = self.cls2u(flat2u).unsqueeze(2)
logit3u = self.cls3u(flat3u).unsqueeze(2)
logit4u = self.cls4u(flat4u).unsqueeze(2)
logit = torch.cat((
logit2d,logit3d,logit4d,logit5d,
logit1u,logit2u,logit3u,logit4u,
),dim=2)
logit = F.dropout(logit, p=0.15,training=self.training)
logit = logit.sum(2)
logit = logit.view(logit.size(0),logit.size(1)) #unsqueeze(2)
prob = F.sigmoid(logit)
return logit,prob
#############################################################################3
class PyNet_12(nn.Module):
def __init__(self, in_shape, num_classes):
super(PyNet_12, self).__init__()
in_channels, height, width = in_shape
self.preprocess = nn.Sequential(
*make_conv_bn_relu(in_channels, 16, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(16, 16, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(16, 16, kernel_size=1, stride=1, padding=0 ),
) # 128
self.conv1d = nn.Sequential(
*make_conv_bn_relu(16, 16, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(16, 16, kernel_size=3, stride=1, padding=1 ),
*make_conv_bn_relu(16,128, kernel_size=1, stride=1, padding=0 ),
) # 128
self.shortld = nn.Conv2d(16, 128, kernel_size=1, stride=1, padding=0, bias=False)
self.conv2d = nn.Sequential(
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,128, kernel_size=1, stride=1, padding=0 ),
) # 64
self.short2d = None #nn.Conv2d(64, 128, kernel_size=1, stride=1, padding=0, bias=False)
self.conv3d = nn.Sequential(
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,256, kernel_size=1, stride=1, padding=0 ),
) # 32
self.short3d = nn.Conv2d(128, 256, kernel_size=1, stride=1, padding=0, bias=False)
self.conv4d = nn.Sequential(
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
) # 16
self.short4d = None #nn.Identity()
self.conv5d = nn.Sequential(
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
) # 8
self.short5d = None # nn.Identity()
self.conv4u = nn.Sequential(
*make_conv_bn_relu(256,256, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(256,256, kernel_size=3, stride=1, padding=1, groups=16 ),
) # 16
self.conv3u = nn.Sequential(
*make_conv_bn_relu(256,128, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
) # 32
self.conv2u = nn.Sequential(
*make_conv_bn_relu(128,128, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
) # 64
self.conv1u = nn.Sequential(
*make_conv_bn_relu(128,128, kernel_size=1, stride=1, padding=0 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
*make_conv_bn_relu(128,128, kernel_size=3, stride=1, padding=1, groups=16 ),
) # 128
self.cls2d = nn.Sequential(
*make_linear_bn_relu(128, 512),
nn.Linear(512, num_classes)
)
self.cls3d = nn.Sequential(
*make_linear_bn_relu(256, 512),
nn.Linear(512, num_classes)
)
self.cls4d = nn.Sequential(
*make_linear_bn_relu(256, 512),
nn.Linear(512, num_classes)
)
self.cls5d = nn.Sequential(
*make_linear_bn_relu(256, 512),
nn.Linear(512, num_classes)
)
self.cls1u = nn.Sequential(
*make_linear_bn_relu(128, 512),
nn.Linear(512, num_classes)
)
self.cls2u = nn.Sequential(
*make_linear_bn_relu(128, 512),
nn.Linear(512, num_classes)
)
self.cls3u = nn.Sequential(
*make_linear_bn_relu(128, 512),
nn.Linear(512, num_classes)
)
self.cls4u = nn.Sequential(
*make_linear_bn_relu(256, 512),
nn.Linear(512, num_classes)
)
def forward(self, x):
out = self.preprocess(x) #128
conv1d = self.conv1d(out) #128
out = F.max_pool2d(conv1d, kernel_size=2, stride=2) # 64
conv2d = self.conv2d(out) + make_shortcut(out, self.short2d) # 64
out = F.max_pool2d(conv2d, kernel_size=2, stride=2) # 32
flat2d = make_flat(out)
conv3d = self.conv3d(out) + make_shortcut(out, self.short3d) # 32
out = F.max_pool2d(conv3d, kernel_size=2, stride=2) # 16
flat3d = make_flat(out)
conv4d = self.conv4d(out) + make_shortcut(out, self.short4d) # 16
out = F.max_pool2d(conv4d, kernel_size=2, stride=2) # 8
flat4d = make_flat(out)
conv5d = self.conv5d(out) + make_shortcut(out, self.short5d) # 8
out = conv5d # 4
flat5d = make_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) # 16
out = out + conv4d
out = self.conv4u(out)
flat4u = make_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) # 32
out = out + conv3d
out = self.conv3u(out)
flat3u = make_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) # 64
out = out + conv2d
out = self.conv2u(out)
flat2u = make_flat(out)
out = F.upsample_bilinear(out,scale_factor=2) #128
out = out + conv1d
out = self.conv1u(out)
flat1u = make_flat(out)
logit2d = self.cls2d(flat2d)
logit3d = self.cls3d(flat3d)
logit4d = self.cls4d(flat4d)
logit5d = self.cls5d(flat5d)
logit1u = self.cls1u(flat1u)
logit2u = self.cls2u(flat2u)
logit3u = self.cls3u(flat3u)
logit4u = self.cls4u(flat4u)
p=0.15
logit2d = F.dropout(logit2d, p=p, training=self.training)
logit3d = F.dropout(logit3d, p=p, training=self.training)
logit4d = F.dropout(logit4d, p=p, training=self.training)
logit5d = F.dropout(logit5d, p=p, training=self.training)
logit1u = F.dropout(logit1u, p=p, training=self.training)
logit2u = F.dropout(logit2u, p=p, training=self.training)
logit3u = F.dropout(logit3u, p=p, training=self.training)
logit4u = F.dropout(logit4u, p=p, training=self.training)
logit = logit2d + logit3d + logit4d + logit5d + logit1u + logit2u + logit3u + logit4u
prob = F.sigmoid(logit)
return logit,prob
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
# https://discuss.pytorch.org/t/print-autograd-graph/692/8
#inputs = torch.randn(96,3,128,128)
inputs = torch.randn(96,3,112,112)
#inputs = torch.randn(96,3,96,96)
in_shape = inputs.size()[1:]
num_classes = 17
if 1:
net = PyNet_12(in_shape,num_classes).cuda().train()
x = Variable(inputs).cuda()
start = timer()
logit,prob = net.forward(x)
end = timer()
print ('cuda(): end-start=%0.0f ms'%((end - start)*1000))
#dot = make_dot(y)
#dot.view()
print(type(net))
print(net)
print(prob) | {
"repo_name": "chicm/carvana",
"path": "08-02/software/car-segment/net/imagenet/pyramidnet.py",
"copies": "2",
"size": "25198",
"license": "apache-2.0",
"hash": -7466655524780501000,
"line_mean": 36.2692307692,
"line_max": 129,
"alpha_frac": 0.5488826261,
"autogenerated": false,
"ratio": 2.945516193148603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9428908580073345,
"avg_score": 0.013098047835051583,
"num_lines": 676
} |
# 1. fn/names_url
import sys, unittest, json
sys.path.append('./')
sys.path.append('../')
import webapp
service = webapp.get_service(5004, 'fn/names_url')
class TestFnNamesUrl(webapp.WebappTestCase):
@classmethod
def get_service(self):
return service
def test_no_parameter(self):
"""See how a call with no parameters is handled.
This returns a 500 (as of 2017-10-30). More http-like would be for it to give a 400.
But the error message is not informative. TBD: issue."""
request = service.get_request('GET', {})
x = self.start_request_tests(request)
self.assertTrue(x.status_code >= 400)
self.assertTrue(u'url' in x.json()[u'message'], #informative?
'no "url" in "%s"' % x.json()[u'message'])
def test_non_http_uri(self):
"""Try a non-HTTP URI.
This returns a 500 (as of 2017-10-30). More http-like would be for it
to give a 400. But the error message is not informative. TBD: issue."""
request = service.get_request('GET', {u'url': u'urn:nbn:de:bvb:19-146642'})
x = self.start_request_tests(request)
self.assertTrue(x.status_code >= 400)
def test_nonexistent(self):
"""Nonexistent target.
This returns a 500 (as of 2017-10-30). More http-like would be for it
to give a 400. In any case, the error message is not informative. TBD: issue."""
request = service.get_request('GET', {u'url': u'https://example.com/nonexistent.txt'})
x = self.start_request_tests(request)
self.assertTrue(x.status_code >= 400)
self.assertTrue(x.status_code < 500)
# TBD: test for informative message
def test_bad_type(self):
"""See what happens when you give it a file of an unrecognized type.
It succeeds with no results. I would think that GNRD should have told
us that the file couldn't be processed, but it doesn't.
There is no way to distinguish in the GNRD response the absence of names
from an invalid file, so not sure what we should expect here.
TBD: GNRD issue.
NOTE: If this URL stops working, simply replace it by any other
similar URL on the web - a zip or tarball, or even an image."""
request = service.get_request('GET', {u'url': u'http://files.opentreeoflife.org/silva/silva-115/silva-115.tgz'})
x = self.start_request_tests(request)
self.assertEqual(x.json()[u'scientificNames'], [])
def test_large_input(self):
"""Test large input.
This takes 240 seconds (4 minutes) on a 10 Mb input file - but it works."""
# TBD: where should this file be located? Github?
request = service.get_request('GET', {u'url': u'https://github.com/jar398/tryphy/raw/master/some-names.txt'})
print >>sys.stderr, '\nBe patient, takes four minutes'
x = self.start_request_tests(request)
self.assert_success(x)
self.assertTrue(len(x.json()[u'scientificNames']) > 1000)
# TBD: test what happens if extraneous parameter supplied
# Insert here: edge case tests
# Insert here: inputs out of range, leading to error or long delay
# Insert here: error-generating conditions
# (See ../README.md)
def test_example_1(self):
"""Try example from the documentation"""
x = self.start_request_tests(example_1)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
self.assertTrue(len(x.json()[u'scientificNames']) > 20)
self.assertTrue(u'Odontomachus bauri' in x.json()[u'scientificNames'])
def test_example_2(self):
x = self.start_request_tests(example_2)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
self.assertTrue(len(x.json()[u'scientificNames']) > 5)
self.assertTrue(u'Patagioenas inornata' in x.json()[u'scientificNames'])
def test_example_3(self):
x = self.start_request_tests(example_3)
self.assert_success(x)
# Insert: whether result is what it should be according to docs
self.assertTrue(len(x.json()[u'scientificNames']) > 20)
self.assertTrue(u'Lolium perenne' in x.json()[u'scientificNames'])
null=None; false=False; true=True
example_1 = service.get_request('GET', {u'url': u'http://en.wikipedia.org/wiki/Ant'})
example_2 = service.get_request('GET', {u'engine': u'1', u'url': u'https://en.wikipedia.org/wiki/Plain_pigeon'})
example_3 = service.get_request('GET', {u'url': u'http://www.fws.gov/westvirginiafieldoffice/PDF/beechridgehcp/Appendix_D_Table_D-1.pdf'})
if __name__ == '__main__':
webapp.main()
| {
"repo_name": "jar398/tryphy",
"path": "tests/test_fn_names_url.py",
"copies": "1",
"size": "4728",
"license": "bsd-2-clause",
"hash": 7092956164174308000,
"line_mean": 42.7777777778,
"line_max": 138,
"alpha_frac": 0.6429780034,
"autogenerated": false,
"ratio": 3.4815905743740796,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4624568577774079,
"avg_score": null,
"num_lines": null
} |
## 1. Geographic Data ##
import pandas as pd
airlines = pd.read_csv('airlines.csv')
airports = pd.read_csv('airports.csv')
routes = pd.read_csv('routes.csv')
print(airlines.iloc[0])
print(airports.iloc[0])
print(routes.iloc[0])
#What's the best way to link the data from these 3 different datasets together?
# We can link all tables using ids
#What are the formats of the latitude and longitude values? Float
## 4. Workflow With Basemap ##
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='merc',llcrnrlat = '-80',urcrnrlat = '80',llcrnrlon = '-180',urcrnrlon = '180')
## 5. Converting From Spherical to Cartesian Coordinates ##
m = Basemap(projection='merc', llcrnrlat=-80, urcrnrlat=80, llcrnrlon=-180, urcrnrlon=180)
longitudes = airports["longitude"].tolist()
latitudes = airports["latitude"].tolist()
x, y = m(longitudes, latitudes)
## 6. Generating A Scatter Plot ##
m = Basemap(projection='merc', llcrnrlat=-80, urcrnrlat=80, llcrnrlon=-180, urcrnrlon=180)
x, y = m(longitudes, latitudes)
m.scatter(x,y,s=1)
## 7. Customizing The Plot Using Basemap ##
m = Basemap(projection='merc', llcrnrlat=-80, urcrnrlat=80, llcrnrlon=-180, urcrnrlon=180)
longitudes = airports["longitude"].tolist()
latitudes = airports["latitude"].tolist()
x, y = m(longitudes, latitudes)
m.scatter(x, y, s=1)
m.drawcoastlines()
## 8. Customizing The Plot Using Matplotlib ##
# Add code here, before creating the Basemap instance.
fig, ax = plt.subplots(figsize=(20, 15))
m = Basemap(projection='merc', llcrnrlat=-80, urcrnrlat=80, llcrnrlon=-180, urcrnrlon=180)
longitudes = airports["longitude"].tolist()
latitudes = airports["latitude"].tolist()
x, y = m(longitudes, latitudes)
m.scatter(x, y, s=1)
m.drawcoastlines()
ax.set_title('Scaled Up Earth With Coastlines')
plt.show()
## 9. Introduction to Great Circles ##
geo_routes = pd.read_csv('geo_routes.csv')
print(geo_routes.info)
print(geo_routes.head(5))
## 10. Displaying Great Circles ##
fig, ax = plt.subplots(figsize=(15,20))
m = Basemap(projection='merc', llcrnrlat=-80, urcrnrlat=80, llcrnrlon=-180, urcrnrlon=180)
m.drawcoastlines()
def create_great_circles(df):
for index, row in df.iterrows():
end_lat, start_lat = row['end_lat'], row['start_lat']
end_lon, start_lon = row['end_lon'], row['start_lon']
if abs(end_lat - start_lat) < 180:
if abs(end_lon - start_lon) < 180:
m.drawgreatcircle(start_lon, start_lat, end_lon, end_lat)
dfw = geo_routes[geo_routes['source'] == "DFW"]
create_great_circles(dfw)
plt.show() | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Storytelling Data Visualization/Visualizing Geographic Data-223.py",
"copies": "1",
"size": "2587",
"license": "mit",
"hash": -5117652022457479000,
"line_mean": 32.1794871795,
"line_max": 102,
"alpha_frac": 0.6992655586,
"autogenerated": false,
"ratio": 2.6947916666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38940572252666666,
"avg_score": null,
"num_lines": null
} |
def get_url(a_board, a_num):
result = []
a = 0
while a < a_num:
a_board_n = a_board[a]
a_href = a_board_n['href'].split('/')[0]
print('a_href = ', a_href)
if len(a_href) == 0:
board_href = a_board_n['href']
board_url = 'https://www.ptt.cc' + board_href
result.append(board_url)
a += 1
else:
break
print('result = ', result)
return result
def get_title(a_board, a_num):
result = []
a = 0
while a < a_num:
a_board_n = a_board[a]
a_href = a_board_n['href'].split('/')[0]
if len(a_href) == 0:
board_title = a_board_n.find('div', 'board-title').string.strip()
result.append(board_title)
a += 1
else:
break
return result
def get_title3(a_board, a_num):
result = []
a = 0
while a < a_num:
a_board_n = a_board[a]
a_href = a_board_n['href'].split('/')[0]
if len(a_href) == 0:
board_title = a_board_n.find('div', 'board-title').string
result.append(board_title)
a += 1
else:
break
return result
def get_class(a_board, a_num):
result = []
a = 0
while a < a_num:
a_board_n = a_board[a]
a_href = a_board_n['href'].split('/')[0]
if len(a_href) == 0:
board_class = a_board_n.find('div', 'board-class').string.strip()
result.append(board_class)
a += 1
else:
break
return result
def get_nuser(a_board, a_num):
result = []
a = 0
while a < a_num:
a_board_n = a_board[a]
a_href = a_board_n['href'].split('/')[0]
if len(a_href) == 0:
board_nuser = a_board_n.find('div', 'board-nuser').string.strip()
result.append(board_nuser)
a += 1
else:
break
return result
def get_boardname(a_board, a_num):
result = []
a = 0
while a < a_num:
a_board_n = a_board[a]
a_href = a_board_n['href'].split('/')[0]
if len(a_href) == 0:
board_nuser = a_board_n.find('div', 'board-name').string.strip()
result.append(board_nuser)
a += 1
else:
break
return result
##### Get article :
def get_a_board(url):
import requests
from bs4 import BeautifulSoup
# 發送請求
# ex: r = requests.get('https://api.github.com/user', auth=('user', 'pass'))
req = requests.get(url)
if req.status_code == 200:
# get page text
content = req.text
# 進行解析
soup = BeautifulSoup(content, "html.parser")
a_board = soup.find_all('a', 'board')
return a_board
else:
print('status_code != 200')
# 取得各熱門看板第一頁:
def get_into_board(board_name):
import requests
import urllib3
# from requests.packages.urllib3.exceptions import InsecureRequestWarning
url = 'https://www.ptt.cc/bbs/' + board_name + '/index.html'
load = {
'from': '/bbs/' + board_name + '/index.html',
'yes': 'yes'
}
# For GET:
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:56.0) Gecko/20100101 Firefox/56.0',
'Host': 'www.ptt.cc',
'Connection': 'keep-alive',
}
rs = requests.session()
urllib3.disable_warnings()
# requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
res = rs.post('https://www.ptt.cc/ask/over18', verify=False, data=load)
res = rs.get(url,headers=headers)
if res.status_code == 200:
# get page text
content = res.text
return content
else:
print('status_code != 200')
def ptt_content_to_url(content):
from bs4 import BeautifulSoup
# 進行解析
soup = BeautifulSoup(content, "html.parser")
# get next page :
next_page = soup.find_all('a', 'btn wide')[1]['href']
next_page_url = 'https://www.ptt.cc' + next_page
return next_page_url
def ptt_url_to_content(url):
import requests
import urllib3
board_name = url.split('/', 3)[3]
load = {
'from': '/bbs/' + board_name,
'yes': 'yes'
}
rs = requests.session()
urllib3.disable_warnings()
res = rs.post('https://www.ptt.cc/ask/over18', verify=False, data=load)
res = rs.get(url)
# 取得各版內容標題:
def ptt_content_to_title(content):
from bs4 import BeautifulSoup
import pandas as pd
import time
# 進行解析
soup = BeautifulSoup(content, "html.parser")
rent_soup = soup.find_all('div', 'r-ent')
# get next page :
# next_page = soup.find_all('a', 'btn wide')[1]['href']
# get board name :
board_name = soup.find('a', 'board')['href'].split('/')[2]
# get nrec :
nrec_lists = []
for nrec in rent_soup:
nrec_lists.append(nrec.find('div', 'nrec').string)
# get mark :
mark_lists = []
for mark in rent_soup:
mark_lists.append(mark.find('div', 'mark').string)
# get title & href:
title_lists = []
href_lists = []
for title in rent_soup:
if title.find('div', 'title').a != None:
title_lists.append(title.find('div', 'title').a.string)
href_lists.append(title.find('div', 'title').a['href'])
else:
title_lists.append(title.find('div', 'title').string.strip())
href_lists.append('None')
# get date :
date_lists = []
for md in rent_soup:
date_lists.append(md.find('div', 'date').string)
# get author :
author_lists = []
for author in rent_soup:
author_lists.append(author.find('div', 'author').string)
# get info time :
timenow = time.localtime()
get_time = time.strftime("%Y-%m-%d %H:%M:%S", timenow)
# get r-ent info :
r_ent_df = pd.DataFrame({ 'board': board_name,
'nrec': nrec_lists,
'mark': mark_lists,
'title': title_lists,
'href': href_lists,
'dates': date_lists,
'author': author_lists,
'get_time': get_time})
return r_ent_df
| {
"repo_name": "AmberFu/ptt_crawler",
"path": "ptt_board_url.py",
"copies": "1",
"size": "8265",
"license": "mit",
"hash": -6821261631350505000,
"line_mean": 28.0640569395,
"line_max": 101,
"alpha_frac": 0.5238153545,
"autogenerated": false,
"ratio": 3.2615814696485623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42853968241485624,
"avg_score": null,
"num_lines": null
} |
### 1. Get board Page html:
def get_js_page(url):
from bs4 import BeautifulSoup
from selenium import webdriver
# driver = webdriver.Firefox()
driver = webdriver.PhantomJS()
driver.get(url) # 把網址交給瀏覽器
pagesource = driver.page_source # 取得網頁原始碼
soup = BeautifulSoup(pagesource, "html.parser")
a_board = soup.find_all('a', 'board')
return a_board
### 2. Get board dataframe:
""" 範例:
<div class="b-ent">
<a class="board" href="/bbs/Gossiping/index.html">
<div class="board-name">Gossiping</div>
<div class="board-nuser"><span class="hl f6">12533</span></div>
<div class="board-class">綜合</div>
<div class="board-title">◎[八卦板] 快來支持你喜歡的候選人</div>
</a>
</div>
"""
def get_hotboard_df(hot_board):
import pandas as pd
import time
url_list = []
board_list = []
user_num_list = []
class_list = []
title_list = []
getTime_list = []
for a in hot_board:
# Url:
board_href = a['href']
board_url = 'https://www.ptt.cc' + board_href
url_list.append(board_url)
# board-name:
board_name = a.find('div', 'board-name').string.strip()
board_list.append(board_name)
# board-nuser:
board_nuser = a.find('div', 'board-nuser').string.strip()
user_num_list.append(board_nuser)
# board-class:
board_class = a.find('div', 'board-class').string.strip()
class_list.append(board_class)
# board-title:
board_title = a.find('div', 'board-title').string.strip()
title_list.append(board_title)
# get info time :
timenow = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
getTime_list.append(timenow)
# Combine as a DataFrame:
columns_order = ['board', 'nuser', 'class', 'title', 'href', 'get_time']
boards_df = pd.DataFrame({'board': board_list,
'class': class_list,
'nuser': user_num_list,
'title': title_list,
'href': url_list,
'get_time': getTime_list},
columns = columns_order)
return boards_df
### main():
import time
import sys
start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
hoturl = 'https://www.ptt.cc/bbs/hotboards.html'
hot_board = get_js_page(hoturl)
df = get_hotboard_df(hot_board)
end_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print('start time : ', start_time)
print('end time : ', end_time)
try:
outputFile = '~/Documents/ptt_hotboards_' + str(time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())) + '.csv'
df.to_csv(outputFile, sep=',', encoding='utf-8')
except:
print("Unexpected error:", sys.exc_info()[0])
| {
"repo_name": "AmberFu/ptt_crawler",
"path": "ptt_hotboards_v1.py",
"copies": "1",
"size": "2948",
"license": "mit",
"hash": -4770832903510172000,
"line_mean": 32.488372093,
"line_max": 114,
"alpha_frac": 0.5493055556,
"autogenerated": false,
"ratio": 3.1101511879049677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9121317963376172,
"avg_score": 0.007627756025759058,
"num_lines": 86
} |
# 1 Gold Star
# The built-in <string>.split() procedure works
# okay, but fails to find all the words on a page
# because it only uses whitespace to split the
# string. To do better, we should also use punctuation
# marks to split the page into words.
# Define a procedure, split_string, that takes two
# inputs: the string to split and a string containing
# all of the characters considered separators. The
# procedure should return a list of strings that break
# the source string up by the characters in the
# splitlist.
def split_string(source, splitlist):
if len(splitlist) == 1:
source = source.split(splitlist)
else:
for element in splitlist:
source = source.split(element)
source = '|'.join(source)
source = source.split('|')
while '' in source:
source.remove('')
return source
out = split_string("This is a test-of the,string separation-code!", " ,!-")
print(out)
# >>> ['This', 'is', 'a', 'test', 'of', 'the', 'string', 'separation', 'code']
out = split_string("After the flood ... all the colors came out.", " .")
print(out)
# >>> ['After', 'the', 'flood', 'all', 'the', 'colors', 'came', 'out']
out = split_string("First Name,Last Name,Street Address,City,State,Zip Code", ",")
print(out)
# >>>['First Name', 'Last Name', 'Street Address', 'City', 'State', 'Zip Code']
out = split_string("http://this.domain.com/here/there/everywhere.html", '/')
print(out)
# >>>['http:', 'this.domain.com', 'here', 'there', 'everywhere.html']
| {
"repo_name": "JoseALermaIII/python-tutorials",
"path": "pythontutorials/Udacity/CS101/Lesson 16 - Problem Set/Q4-Better Splitting.py",
"copies": "1",
"size": "1524",
"license": "mit",
"hash": -6791508595602956000,
"line_mean": 33.6363636364,
"line_max": 82,
"alpha_frac": 0.6456692913,
"autogenerated": false,
"ratio": 3.4093959731543624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4555065264454362,
"avg_score": null,
"num_lines": null
} |
# 1. go to the page
# 2. collect all links
# 3. check the contents against the keyword dictionary
# 4. rank the pages according to keyword contents
# 5. group external links in a separate dictionary
# sites to crawl:
# http://hackaday.com/
# http://hackaday.io/
# http://dangerousprototypes.com/
# http://www.theledart.com/
# http://www.instructables.com/
# http://arduino.cc/
# https://github.com/
# http://people.ece.cornell.edu/land/courses/ece4760/
# http://fabacademy.org/archive/
# http://seeedstudio.com
# http://www.weirdlab.fr/
# http://fw.hardijzer.nl/
# other sites of interest:
# kickstarter projects:
# https://www.kickstarter.com/projects/mossmann/hackrf-an-open-source-sdr-platform
# partlists:
# http://dangerousprototypes.com/docs/Bus_Pirate_v3.6
# https://greatscottgadgets.com/tc13badge/tc13badge-kit-parts.pdf
# BOM's
# https://github.com/mossmann/hackrf/blob/master/doc/hardware/hackrf-one-bom.csv
# https://docs.google.com/spreadsheets/d/11RTZvoxy8NDVNH0rqRNsO86LkCeCrhs6zsusw9pf6KE/edit#gid=46576701
# KiCad .sch documents with BOM's
# https://github.com/greatscottgadgets/ubertooth/blob/master/hardware/broccoli/broccoli.sch
import requests
import os
import time
import json
from bs4 import BeautifulSoup
url = 'hackaday.com'
large_cache = {}
def get_cache_age(cachepath):
current_time = time.time()
last_updated = os.path.getmtime(cachepath)
return current_time - last_updated
def scrape_page(url,):
data = requests.get(url).text
def update_cache(url):
cache = {}
data = requests.get("http://" + url).text
soup = BeautifulSoup(data)
for link in soup.find_all('a'):
cache[link.get('href')] = '<<<nothing here yet>>>'
return cache
def get_cache(url):
max_age = 300
cache_path = url + '.cache.json'
try:
cache_file = open(cache_path, 'r')
cache_age = get_cache_age(cache_path)
except IOError:
cache_age = max_age + 1
if cache_age > max_age:
print "+++ REFRESHING CACHE +++"
cache_file = open(cache_path, 'w')
cache = update_cache(url)
json.dump(cache, cache_file)
else:
cache = json.load(cache_file)
return cache
def get_all_links(content):
links = []
soup = BeautifulSoup(content)
for link in soup.find_all('a'):
lins.append(link)
return links
# def add_page_to_index(index, url, content):
# words = content.split()
# for word in words:
# add_to_index(index, word, url)
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
# content = get_page(page)
content = requests.get(page).text
# add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
cache = get_cache(url)
for i in cache:
if 'http://hackaday.com' in i:
print i | {
"repo_name": "s8/octopart-cpl-gallery",
"path": "code/octopart_crawler.py",
"copies": "1",
"size": "3017",
"license": "mit",
"hash": 6987369205041670000,
"line_mean": 23.3387096774,
"line_max": 103,
"alpha_frac": 0.6771627444,
"autogenerated": false,
"ratio": 2.8275538894095593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4004716633809559,
"avg_score": null,
"num_lines": null
} |
# 1gram, line
# '# 1574 1 1 1'
def read_line(line, n=1, version='20090715'):
parts = line.strip().split('\t')
return parts[0], int(parts[2])
def merge_lines(lines):
tokens = []
cur_token = None
cur_token_count = 0
for line in lines:
if cur_token == None:
cur_token = line[0]
cur_token_count = line[1]
else:
if cur_token == line[0]:
cur_token_count += line[1]
else:
tokens.append((cur_token, cur_token_count))
cur_token = line[0]
cur_token_count = line[1]
tokens.append((cur_token, cur_token_count))
return tokens
if __name__ == '__main__':
l1 = '# 1574 1 1 1'
lr = read_line(l1)
assert len(lr) == 2
assert lr[0] == '#'
assert lr[1] == 1
# multilines
lines = [('#', 5), ('#', 9), ('#', 10), ('$1380', 3), ('$2195', 10), ('$2195', 20)]
line_tokens = merge_lines(lines)
assert len(line_tokens) == 3
assert line_tokens[0][0] == '#'
assert line_tokens[0][1] == 24
assert line_tokens[1][0] == '$1380'
assert line_tokens[1][1] == 3
assert line_tokens[2][0] == '$2195'
assert line_tokens[2][1] == 30
| {
"repo_name": "anderscui/spellchecker",
"path": "ngrams/read_ngram_tests.py",
"copies": "1",
"size": "1225",
"license": "mit",
"hash": -8556453844077711000,
"line_mean": 23.0196078431,
"line_max": 87,
"alpha_frac": 0.4995918367,
"autogenerated": false,
"ratio": 3.0625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9057709780847356,
"avg_score": 0.0008764111705288175,
"num_lines": 51
} |
"""1H-13C(methyl) - Multiple Quantum CPMG (2-state)
Analyzes HyCx methyl group multiple quantum CPMG measured on site-specific
13CH3-labeled methyl groups in a highly deuterated background. This is a
simplified basis set, which assumes you are on-resonance for 13C (ie, off-
resonance effects are not taken into account) as described in the reference:
[HxCx(a), HyCx(a), HxCy(a), HyCy(a),
HxCx(b), HyCx(b), HxCy(b), HyCy(b)]
Note
----
This calculation is designed specifically to analyze data from the experiment
found in the reference and can be run with either small_protein_flag='y' or 'n'.
Lewis Kay experiment: hmqc_CH3_exchange_bigprotein_*00_lek_v2
Reference
---------
Journal of the American Chemical Society (2004), 126, 3964-73
"""
import numpy as np
from numpy import linalg as la
from chemex.experiments.cpmg.base_cpmg import ProfileCPMG1
_EXP_DETAILS = {"small_protein": {"type": str, "default": "False"}}
class ProfileCPMGCH3MQ(ProfileCPMG1):
"""TODO: class docstring."""
EXP_DETAILS = dict(**ProfileCPMG1.EXP_DETAILS, **_EXP_DETAILS)
SPIN_SYSTEM = "ixysxy"
def __init__(self, name, data, exp_details, model):
super().__init__(name, data, exp_details, model)
self.small_protein = self.get_bool(self.exp_details["small_protein"])
# Set the delays in the experiments
self.t_zeta = 1.0 / (8.0 * 125.3)
self.delays = [self.t_zeta] + list(self.tau_cps.values())
# Set the row vector for detection
self.detect = self.liouv.detect["2iysx_a"]
# Set the varying parameters by default
for name, full_name in self.map_names.items():
if name.startswith(("dw", "r2_mq_a")):
self.params[full_name].set(vary=True)
def _calculate_unscaled_profile(self, params_local, **kwargs):
"""TODO: class docstring."""
self.liouv.update(params_local)
# Calculation of the propagators corresponding to all the delays
delays = dict(zip(self.delays, self.liouv.delays(self.delays)))
d_zeta = delays[self.t_zeta]
# Calculation of the propagators corresponding to all the pulses
p180_sx = self.liouv.perfect180["sx"]
p180_ix = self.liouv.perfect180["ix"]
p180_iy = self.liouv.perfect180["iy"]
# Calculate starting magnetization vector
mag0 = self.liouv.compute_mag_eq(params_local, term="2iysx")
if self.small_protein:
mag0 = d_zeta @ p180_sx @ p180_ix @ d_zeta @ mag0
# Calculating the cpmg trains
cp = {0: self.liouv.identity}
for ncyc in set(self.data["ncycs"][~self.reference]):
tau_cp = delays[self.tau_cps[ncyc]]
echo = tau_cp @ p180_iy @ tau_cp
cp_train = la.matrix_power(echo, int(ncyc))
cp[ncyc] = cp_train @ p180_sx @ cp_train
profile = [
self.liouv.collapse(self.detect @ cp[ncyc] @ mag0)
for ncyc in self.data["ncycs"]
]
return np.asarray(profile)
| {
"repo_name": "gbouvignies/chemex",
"path": "chemex/experiments/cpmg/ch3_mq.py",
"copies": "1",
"size": "3018",
"license": "bsd-3-clause",
"hash": 2170835028613479200,
"line_mean": 32.9101123596,
"line_max": 80,
"alpha_frac": 0.640490391,
"autogenerated": false,
"ratio": 3.0985626283367558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4239053019336756,
"avg_score": null,
"num_lines": null
} |
# 1: have scripts which extract from .pbit to .pbit.extract - gitignore .pbit (and .pbix), AND creates .pbix.chksum (which is only useful for versioning purposes - one can confirm the state of their pbix)
# - script basically extracts .pbit to new folder .pbit.extract, but a) also extracts double-zipped content, and b) formats stuff nicely so it's readable/diffable/mergeable.
# 2: have git hooks which check, before a commit:
# - checks that the .pbit.extract folder is up to date with the latest .pbit (i.e. they match exactly - and the .pbit hasn't been exported but user forgot to run the extract script)
# - adds a warning (with y/n continue feedback) if the .pbix has been updated *after* the latest .pbit.extract is updated. (I.e. they maybe forgot to export the latest .pbit and extract, or exported .pbit but forgot to extract.) Note that this will be obvious in the case of only a single change (as it were) - since .pbix aren't tracked, they'll see no changes to git tracked files.
import zipfile
import os
import shutil
import fnmatch
import converters
CONVERTERS = [
('DataModelSchema', converters.JSONConverter('utf-16-le')),
('DiagramState', converters.JSONConverter('utf-16-le')),
('Report/Layout', converters.JSONConverter('utf-16-le')),
('Report/LinguisticSchema', converters.XMLConverter('utf-16-le', False)),
('[Content_Types].xml', converters.XMLConverter('utf-8-sig', True)),
('SecurityBindings', converters.NoopConverter()),
('Settings', converters.NoopConverter()),
('Version', converters.NoopConverter()),
('Report/StaticResources/', converters.NoopConverter()),
('DataMashup', converters.DataMashupConverter()),
('Metadata', converters.MetadataConverter()),
('*.json', converters.JSONConverter('utf-8'))
]
def find_converter(path):
for pattern, converter in CONVERTERS:
if fnmatch.fnmatch(path, pattern):
conv = converter
break
else:
conv = converters.NoopConverter()
return conv
def extract_pbit(pbit_path, outdir, overwrite):
"""
Convert a pbit to vcs format
"""
# TODO: check ends in pbit
# TODO: check all expected files are present (in the right order)
# wipe output directory and create:
if os.path.exists(outdir):
if overwrite:
shutil.rmtree(outdir)
else:
raise Exception('Output path "{0}" already exists'.format(outdir))
os.mkdir(outdir)
order = []
with zipfile.ZipFile(pbit_path, compression=zipfile.ZIP_DEFLATED) as zd:
# read items (in the order they appear in the archive)
for name in zd.namelist():
order.append(name)
outpath = os.path.join(outdir, name)
# get converter:
conv = find_converter(name)
# convert
conv.write_raw_to_vcs(zd.read(name), outpath)
# write order files:
open(os.path.join(outdir, ".zo"), 'w').write("\n".join(order))
def compress_pbit(extracted_path, compressed_path, overwrite):
"""Convert a vcs store to valid pbit."""
# TODO: check all paths exists
if os.path.exists(compressed_path):
if overwrite:
os.remove(compressed_path)
else:
raise Exception('Output path "{0}" already exists'.format(compressed_path))
# get order
with open(os.path.join(extracted_path, ".zo")) as f:
order = f.read().split("\n")
with zipfile.ZipFile(compressed_path, mode='w',
compression=zipfile.ZIP_DEFLATED) as zd:
for name in order:
# get converter:
conv = find_converter(name)
# convert
with zd.open(name, 'w') as z:
conv.write_vcs_to_raw(os.path.join(extracted_path, name), z)
def _find_confs(path):
"""
Find all .pbivcs.conf files (if any) furthest down the path, ordered by hierarchy i.e.
'/path/to/my/.pbivcs.conf' would come before '/path/to/.pbivcs.conf'
"""
splat = tuple(i for i in os.path.split(os.path.abspath(os.path.normpath(path))) if i)
confs = []
for i in range(1, len(splat)):
parent = os.path.join(*splat[:i])
confpath = os.path.join(parent, '.pbivcs.conf')
if os.path.exists(confpath):
confs.append(confpath)
return confs
if __name__ == '__main__':
import configargparse
parser = configargparse.ArgumentParser(description="A utility for converting *.pbit files to and from a VCS-friendly format")
parser.add_argument('input', type=str, help="the input path")
parser.add_argument('output', type=str, help="the output path")
parser.add_argument('-x', action='store_true', dest="extract", default=True, help="extract pbit at INPUT to VCS-friendly format at OUTPUT")
parser.add_argument('-c', action='store_false', dest="extract", default=True, help="compress VCS-friendly format at INPUT to pbit at OUTPUT")
parser.add_argument('--over-write', action='store_true', dest="overwrite", default=False, help="if present, allow overwriting of OUTPUT. If not, will fail if OUTPUT exists")
# parse args first to get input path:
input_path = parser.parse_args().input
# now set config files for parser:
parser._default_config_files = _find_confs(input_path)
# now parse again to get final args:
args = parser.parse_args()
if args.input == args.output:
parser.error('Error! Input and output paths cannot be same')
if args.extract:
extract_pbit(args.input, args.output, args.overwrite)
else:
compress_pbit(args.input, args.output, args.overwrite)
| {
"repo_name": "kodonnell/powerbi-vcs",
"path": "pbivcs.py",
"copies": "1",
"size": "5644",
"license": "mit",
"hash": 6026769030615471000,
"line_mean": 40.8074074074,
"line_max": 387,
"alpha_frac": 0.6576895819,
"autogenerated": false,
"ratio": 3.7551563539587494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9908044966546968,
"avg_score": 0.0009601938623562889,
"num_lines": 135
} |
# --1.-- hello
from django.http import HttpResponse
# --2.-- hello_template
# helper function that uses settings.py to find templates and get them
from django.template.loader import get_template
# templates have a Context object to insert data we have generated to the template
from django.template import Context
# --3.-- HelloTemplate
from django.views.generic.base import TemplateView
# --4.-- hello_template_simple
from django.shortcuts import render_to_response
def hello(request):
name = "Mike"
html = "<html><body>Hi %s welcome! </body></html>" % name
return HttpResponse(html)
def hello_template(request):
name = "Mike"
t = get_template('hello.html')
html = t.render(Context({'name': name}))
return HttpResponse(html)
def hello_template_simple(request):
name = "Mike"
return render_to_response('hello.html', {'name': name})
class HelloTemplate(TemplateView):
template_name = 'hello_class.html'
def get_context_data(self, **kwargs):
context = super(HelloTemplate, self).get_context_data(**kwargs)
context['name'] = 'Mike'
return context | {
"repo_name": "pyjosh/djangoprojects",
"path": "django_test/article/tut1_views.py",
"copies": "1",
"size": "1123",
"license": "mit",
"hash": -8881926080553042000,
"line_mean": 28.5789473684,
"line_max": 82,
"alpha_frac": 0.696349065,
"autogenerated": false,
"ratio": 3.6699346405228757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9840187034049781,
"avg_score": 0.005219334294619101,
"num_lines": 38
} |
"""1H - Pure Anti-phase Proton CPMG
Analyzes amide proton chemical exchange that is maintained as anti-phase
magnetization throughout the CPMG block. This results in lower intrinsic
relaxation rates and therefore better sensitivity. The calculations use a 12x12,
2-spin exchange matrix:
[ Hx(a), Hy(a), Hz(a), 2HxNz(a), 2HyNz(a), 2HzNz(a),
Hx(b), Hy(b), Hz(b), 2HxNz(b), 2HyNz(b), 2HzNz(b)]
Note
----
Off resonance effects are taken into account. The calculation is designed
explicitly for analyzing the Lewis Kay pulse sequence:
H1_CPMG_Rex_hsqc_lek_x00
with antiphase_flg set to 'y'
Journal of Biomolecular NMR (2011) 50, 13-8
"""
import numpy as np
from numpy import linalg as la
from chemex.experiments.cpmg.base_cpmg import ProfileCPMG2
class ProfileCPMGHNAP(ProfileCPMG2):
"""TODO: class docstring."""
SPIN_SYSTEM = "ixyzsz"
CONSTRAINTS = "hn_ap"
def __init__(self, name, data, exp_details, model):
super().__init__(name, data, exp_details, model)
# Set the row vector for detection
self.detect = self.liouv.detect["2izsz_a"]
# Set the varying parameters by default
for name, full_name in self.map_names.items():
if name.startswith(("dw", "r2_i_a")):
self.params[full_name].set(vary=True)
def _calculate_unscaled_profile(self, params_local, **kwargs):
"""TODO: class docstring."""
self.liouv.update(params_local)
# Calculation of the propagators corresponding to all the delays
delays = dict(zip(self.delays, self.liouv.delays(self.delays)))
d_neg = delays[self.t_neg]
d_eq = delays[self.time_eq]
# Calculation of the propagators corresponding to all the pulses
pulses = self.liouv.pulses_90_180_i()
p90 = np.array([pulses[name] for name in ["90px", "90py", "90mx", "90my"]])
p180 = np.array([pulses[name] for name in ["180px", "180py", "180mx", "180my"]])
p180pmx = 0.5 * (p180[0] + p180[2]) # +/- phase cycling
# Calculate starting magnetization vector
mag0 = self.liouv.compute_mag_eq(params_local, term="2izsz")
# Calculating the cpmg trains
cp1 = {0: self.liouv.identity}
cp2 = {0: self.liouv.identity}
for ncyc in set(self.data["ncycs"][~self.reference]):
tau_cp = delays[self.tau_cps[ncyc]]
echo = tau_cp @ p180[1] @ tau_cp
cp_train = la.matrix_power(echo, int(ncyc))
cp1[ncyc] = cp_train @ d_neg
cp2[ncyc] = d_neg @ cp_train
profile = [
self.liouv.collapse(
self.detect
@ d_eq
@ p90[0]
@ cp2[ncyc]
@ p180pmx
@ cp1[ncyc]
@ p90[0]
@ mag0
)
for ncyc in self.data["ncycs"]
]
return np.asarray(profile)
| {
"repo_name": "gbouvignies/chemex",
"path": "chemex/experiments/cpmg/hn_ap.py",
"copies": "1",
"size": "2909",
"license": "bsd-3-clause",
"hash": 8403258207461491000,
"line_mean": 31.6853932584,
"line_max": 88,
"alpha_frac": 0.5971124098,
"autogenerated": false,
"ratio": 3.127956989247312,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4225069399047312,
"avg_score": null,
"num_lines": null
} |
""" 1) implementing __call__ method in BingoCage class
which gives it a function like properties"""
import random
words = ['pandas', 'numpy', 'matplotlib', 'seaborn', 'Tenserflow', 'Theano']
def reverse(x):
"""Reverse a letter"""
return x[::-1]
def key_len(x):
"""Sorting a list by their length"""
return sorted(x, key=len)
def key_reverse(x):
"""Sorting a list by reverse spelling here reverse() f(x) is defined by us
and key should be a function without execution"""
return sorted(x, key=reverse)
print(key_reverse(words))
print(sorted(words, key = lambda y:y[::-1]))
# EVEN CLASS INSTANCE CAN BE TREATED AS FUNCTION BY IMPLEMENTING __call__ METHOD
class BingoCage:
"""Giving a random element"""
def __init__(self, items):
self.item = list(items)
random.shuffle(self.item)
def pick(self):
try:
return self.item.pop()
except:
raise LookupError("Picking from empty")
def __call__(self, *args, **kwargs):
return self.pick()
if __name__ == '__main__':
foo = BingoCage(range(2, 20, 2))
print(foo)
print("see that foo() and foo.pick() act in similar manner")
print(foo.pick())
print(foo())
| {
"repo_name": "Aneesh540/python-projects",
"path": "NEW/one.py",
"copies": "1",
"size": "1237",
"license": "apache-2.0",
"hash": -674820254150940300,
"line_mean": 21.9074074074,
"line_max": 80,
"alpha_frac": 0.6127728375,
"autogenerated": false,
"ratio": 3.494350282485876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9590590769734391,
"avg_score": 0.0033064700502972106,
"num_lines": 54
} |
# 1 - Import library
import pygame
from pygame.locals import *
import math
import random
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
keys = [False, False, False, False]
playerpos=[100,100]
acc=[0,0]
arrows=[]
badtimer=100
badtimer1=0
badguys=[[640,100]]
healthvalue=194
pygame.mixer.init()
# 3 - Load image
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/bgmain.jpg")
castle = pygame.image.load("resources/images/castle.png")
arrow = pygame.image.load("resources/images/bullet.png")
badguyimg1 = pygame.image.load("resources/images/badguy.png")
badguyimg=badguyimg1
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
gameover = pygame.image.load("resources/images/gameover.png")
youwin = pygame.image.load("resources/images/youwin.png")
# 3.1 - Load audio
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
hit.set_volume(0.05)
enemy.set_volume(0.05)
shoot.set_volume(0.05)
pygame.mixer.music.load('resources/audio/moonlight.mp3')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.25)
# 4 - keep looping through
running = 1
exitcode = 0
while running:
badtimer-=1
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the player on the screen at X:100, Y:100
for x in range(width/grass.get_width()+1):
for y in range(height/grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
screen.blit(castle,(0,30))
screen.blit(castle,(0,135))
screen.blit(castle,(0,240))
screen.blit(castle,(0,345 ))
# 6.1 - Set player position and rotation
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32),position[0]-(playerpos[0]+26))
playerrot = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
# 6.2 - Draw arrows
for bullet in arrows:
index=0
velx=math.cos(bullet[0])*10
vely=math.sin(bullet[0])*10
bullet[1]+=velx
bullet[2]+=vely
if bullet[1]<-64 or bullet[1]>640 or bullet[2]<-64 or bullet[2]>480:
arrows.pop(index)
index+=1
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
# 6.3 - Draw badgers
if badtimer==0:
badguys.append([640, random.randint(50,430)])
badtimer=100-(badtimer1*2)
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for badguy in badguys:
if badguy[0]<-64:
badguys.pop(index)
badguy[0]-=7
# 6.3.1 - Attack castle
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
hit.play()
healthvalue -= random.randint(5,20)
badguys.pop(index)
#6.3.2 - Check for collisions
index1=0
for bullet in arrows:
bullrect=pygame.Rect(arrow.get_rect())
bullrect.left=bullet[1]
bullrect.top=bullet[2]
if badrect.colliderect(bullrect):
enemy.play()
acc[0]+=1
badguys.pop(index)
arrows.pop(index1)
index1+=1
# 6.3.3 - Next bad guy
index+=1
for badguy in badguys:
screen.blit(badguyimg, badguy)
# 6.4 - Draw clock
font = pygame.font.Font(None, 24)
survivedtext = font.render(str((90000-pygame.time.get_ticks())/60000)+":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2), True, (0,0,0))
textRect = survivedtext.get_rect()
textRect.topright=[635,5]
screen.blit(survivedtext, textRect)
# 6.5 - Draw health bar
screen.blit(healthbar, (5,5))
for health1 in range(healthvalue):
screen.blit(health, (health1+8,8))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key==K_w:
keys[0]=True
elif event.key==K_a:
keys[1]=True
elif event.key==K_s:
keys[2]=True
elif event.key==K_d:
keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w:
keys[0]=False
elif event.key==pygame.K_a:
keys[1]=False
elif event.key==pygame.K_s:
keys[2]=False
elif event.key==pygame.K_d:
keys[3]=False
if event.type==pygame.MOUSEBUTTONDOWN:
shoot.play()
position=pygame.mouse.get_pos()
acc[1]+=1
arrows.append([math.atan2(position[1]-(playerpos1[1]+32),position[0]-(playerpos1[0]+26)),playerpos1[0]+32,playerpos1[1]+32])
# 9 - Move player
if keys[0]:
playerpos[1]-=5
elif keys[2]:
playerpos[1]+=5
if keys[1]:
playerpos[0]-=5
elif keys[3]:
playerpos[0]+=5
#10 - Win/Lose check
if pygame.time.get_ticks()>=90000:
running=0
exitcode=1
if healthvalue<=0:
running=0
exitcode=0
if acc[1]!=0:
accuracy=acc[0]*1.0/acc[1]*100
else:
accuracy=0
# 11 - Win/lose display
if exitcode==0:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (255,0,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(gameover, (0,0))
screen.blit(text, textRect)
else:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (0,255,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(youwin, (0,0))
screen.blit(text, textRect)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip()
| {
"repo_name": "kp96/Galaxy-Wars",
"path": "game.py",
"copies": "1",
"size": "6735",
"license": "apache-2.0",
"hash": -2329969878622506000,
"line_mean": 32.1773399015,
"line_max": 147,
"alpha_frac": 0.5988121752,
"autogenerated": false,
"ratio": 3.0965517241379312,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9070069017260309,
"avg_score": 0.0250589764155246,
"num_lines": 203
} |
import pygame
import math
import random
import sys
from pygame.locals import *
__author__ = 'piratf'
__blog__ = 'http://piratf.github.io/'
# thank to Julian Meyer
# https://plus.google.com/u/0/117404693911977592313?rel=author
# DIY
class gameConfig(object):
def __init__(self):
self.castleCount = 4;
self.width = 640
self.height = 480
self.keys = [False, False, False, False]
self.playerpos = [100, 100] # init position of player
self.vol = 0.3
self.running = 1
self.exitcode = 0
class protector(object):
def __init__(self):
self.acc = [0, 0]
self.arrows = []
self.speed = 5
self.healthvalue = 194
class enemy(object):
def __init__(self):
self.damage = 10
self.badtime = 100
self.badtimeAdd = 0
self.badguys = [[game.width, 100]]
self.speed = 7
# 2 -Initialize the game
pygame.init()
pygame.mixer.init()
# class above #
game = gameConfig()
rabbit = protector()
badger = enemy()
screen = pygame.display.set_mode((game.width, game.height))
# 3 - Load images
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
arrow = pygame.image.load("resources/images/bullet.png")
badguyimg1 = pygame.image.load("resources/images/badguy.png")
badguyimg=badguyimg1
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
gameover = pygame.image.load("resources/images/gameover.png")
youwin = pygame.image.load("resources/images/youwin.png")
# 3.1 - Load audio
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
hit.set_volume(game.vol)
enemy.set_volume(game.vol)
shoot.set_volume(game.vol)
pygame.mixer.music.load('resources/audio/moonlight.wav')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(game.vol / 2)
# 4 - keep looping through
while game.running:
badger.badtime -= 1
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the screen elements
for x in range(game.width / grass.get_width() + 1):
for y in range(game.height / grass.get_height() + 1):
screen.blit(grass, (x*grass.get_rect().width, y*grass.get_rect().height))
### global x y
castleHeight = castle.get_rect().height
pos = (game.height - castleHeight * game.castleCount) / 2
for i in xrange(game.castleCount):
screen.blit(castle, (0, pos + castleHeight * i))
# screen.blit(player, playerpos)
# 6.1 - Set player position and rotation
position = pygame.mouse.get_pos()
angle = math.atan2(position[1] - (game.playerpos[1]+32), position[0] - (game.playerpos[0] + 26))
playerrot = pygame.transform.rotate(player, 360 - angle*57.29)
playerposRecorrect = (game.playerpos[0] - playerrot.get_rect().width/2, game.playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerposRecorrect)
# 6.2 - Draw rabbit.arrows
for bullet in rabbit.arrows:
index = 0
velx = math.cos(bullet[0]) * 10
vely = math.sin(bullet[0]) * 10
bullet[1] += velx
bullet[2] += vely
if bullet[1] < -64 or bullet[1] > game.width or bullet[2] < -64 or bullet[2] > game.height:
rabbit.arrows.pop(index)
index += 1
for projectile in rabbit.arrows:
arrow1 = pygame.transform.rotate(arrow, 360 - projectile[0] * 57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
# 6.3 Draw enemys
if badger.badtime == 0:
badger.badguys.append([game.width, random.randint(50, game.height - 50)])
badger.badtime = 100 - (badger.badtimeAdd * 2)
if(badger.badtimeAdd >= 35):
badger.badtimeAdd = 35
else:
badger.badtimeAdd += 5
index = 0
for badguy in badger.badguys:
if( badguy[0] < -64):
badger.badguys.pop(index)
badguy[0] -= badger.speed
# 6.3.1 - Attack castle
badrect = pygame.Rect(badguyimg.get_rect())
badrect.top = badguy[1]
badrect.left = badguy[0]
deadline = castle.get_rect().width / 2 # when cross the line will get damage
if badrect.left < deadline:
# section 6.3.1 after if badrect.left<64:
hit.play()
rabbit.healthvalue -= random.randint(badger.damage / 2, badger.damage)
badger.badguys.pop(index)
# 6.3.2 - Check for collisions
indexarrow = 0
for bullet in rabbit.arrows:
bullrect = pygame.Rect(arrow.get_rect())
bullrect.left = bullet[1]
bullrect.top = bullet[2]
if badrect.colliderect(bullrect):
# section 6.3.2 after if badrect.colliderect(bullrect):
enemy.play()
rabbit.acc[0] += 1
badger.badguys.pop(index)
rabbit.arrows.pop(indexarrow)
indexarrow += 1
# 6.3.3 - Next bad guy
index += 1
for badguy in badger.badguys:
screen.blit(badguyimg, badguy)
# 6.4 - Draw clock
font = pygame.font.Font("resources\YaHei.Consolas.1.11b.ttf", 24)
survivedtext = font.render(str((90000 - pygame.time.get_ticks()) / 60000) + ":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2), True, (0,0,0))
textRect = survivedtext.get_rect()
textRect.topright = [635, 5]
screen.blit(survivedtext, textRect)
# 6.5 - Draw health bar
screen.blit(healthbar, (5, 5))
for healthadd in range(rabbit.healthvalue):
screen.blit(health, (healthadd + 8, 8))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type == pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key == K_w:
game.keys[0] = True
elif event.key==K_a:
game.keys[1]=True
elif event.key==K_s:
game.keys[2]=True
elif event.key==K_d:
game.keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w:
game.keys[0]=False
elif event.key==pygame.K_a:
game.keys[1]=False
elif event.key==pygame.K_s:
game.keys[2]=False
elif event.key==pygame.K_d:
game.keys[3]=False
if event.type == pygame.MOUSEBUTTONDOWN:
# section 8, play shooting sound
shoot.play()
position = pygame.mouse.get_pos()
rabbit.acc[1] += 1
rabbit.arrows.append([math.atan2(position[1] - (playerposRecorrect[1]+32), position[0] - (playerposRecorrect[0]+26)), playerposRecorrect[0]+32, playerposRecorrect[1]+32])
# 9 - Move player
if game.keys[0]:
game.playerpos[1] -= rabbit.speed
elif game.keys[2]:
game.playerpos[1] += rabbit.speed
if game.keys[1]:
game.playerpos[0] -= rabbit.speed
elif game.keys[3]:
game.playerpos[0] += rabbit.speed
# 10 - Win/Lose check
if pygame.time.get_ticks() >= 90000:
game.running = 0
game.exitcode = 1
if rabbit.healthvalue <= 0:
game.running = 0
game.exitcode = 0
if rabbit.acc[1] != 0:
rabbit.accuracy = rabbit.acc[0] * 1.0 / rabbit.acc[1] * 100
else:
rabbit.accuracy = 0
# 11 - Win/lose display
if game.exitcode == 0:
pygame.font.init()
font = pygame.font.Font("resources\YaHei.Consolas.1.11b.ttf", 24)
text = font.render("Accuracy: " + str(rabbit.accuracy) + "%", True, (255, 0, 0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery + 24
screen.blit(gameover, (0, 0))
screen.blit(text, textRect)
else:
pygame.font.init()
font = pygame.font.Font("resources\YaHei.Consolas.1.11b.ttf", 24)
text = font.render("Accuracy:" + str(rabbit.accuracy) + "%", True, (255, 0, 0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery + 24
screen.blit(youwin, (0, 0))
screen.blit(text, textRect)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip()
pygame.quit()
sys.exit() | {
"repo_name": "piratf/python",
"path": "myCastle/myCastle.py",
"copies": "1",
"size": "8724",
"license": "mpl-2.0",
"hash": -6953928184163962000,
"line_mean": 35.8143459916,
"line_max": 183,
"alpha_frac": 0.6035075653,
"autogenerated": false,
"ratio": 3.1415196254951385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4245027190795138,
"avg_score": null,
"num_lines": null
} |
############1. import modules
import math
import tkinter
############2a. def classes
class Board:
def __init__(self, state,row_length):
self.state = state
self.row_length = row_length
self.cells = [Cells(self,x) for x in range(row_length**2)]
def advance_state(self):
self.state = cycle(self.state, self.row_length)
def get_state(self):
return self.state
def get_row_length(self):
return int(self.row_length)
def get_cell(self,index):
return self.state[index]
def update_cells(self):
for cell in self.cells:
cell.update_cell()
class Cells:
def __init__(self, board, index):
self.x = index % board.get_row_length()
self.y = index // board.get_row_length()
self.index = index
self.board = board
self.state = board.get_state()[index]
self.colour = self.state_colour()
self.cell = canvas.create_rectangle(self.x *10, 10+self.y *10, 10+self.x *10, self.y *10, fill = self.colour)
def __str__(self):
return str(self.state)
def state_colour(self):
if self.state == '0':
return 'black'
else:
return 'red'
def update_cell(self):
self.state = self.board.get_cell(self.index)
self.colour = self.state_colour()
self.colouring()
def colouring(self):
canvas.itemconfigure(self.cell, fill = self.colour)
############2b. set Global Variables
state = '0000000000000000000000000000000000000011000001100000000011000110000000100101010100100001110110110111000001010101010100000001110001110000000000000000000000000111000111000000010101010101000001110110110111000010010101010010000000110001100000000011000001100000000000000000000000000000000000000'
#state = '0001000000000111000000000010000000000011000001100000000011000110000000100001010100100001110110110111000001010101010100000001110001110000000000000010000000000111000111000000010101010101001001110110110111000010010101010010000000110001100000000011000001100000000000000000000000000000000000000'
row_length = int(math.sqrt(len(state)))
cell_size = 10
############3. define helper functions
def life(neighbors, cell):
'''(int, str) -> str
neighbors is the number of neighbors a cell has, evals based on rules and
returns, 1 or 0 depending on life or death
>>> life(4,1)
'0'
>>> life(3,0)
'1'
>>> life(2,0)
'0'
'''
if neighbors == 3:
life = '1'
elif neighbors == 2 and cell == '1':
life = '1'
else:
life = '0'
return life
def count_neighbors(index, state, row_length):
''' (int, str) -> int
index is the position the cell is in the state, state is a string
of 1 and 0s representing, living or dead.
Returns the number of live cells adjacent or diagnal to the cell
'''
neighbors = []
left = index - 1
right = index + 1
top = index - row_length
bottom = index + row_length
upper_right = index - row_length + 1
upper_left = index - row_length - 1
lower_right = index + row_length + 1
lower_left = index + row_length - 1
left_side = False
right_side = False
if index % row_length != 0:
left_side = True
neighbors.append(int(state[left]))
if index % row_length != row_length-1:
right_side = True
neighbors.append(int(state[right]))
if index//row_length != 0:
neighbors.append(int(state[top]))
if left_side:
neighbors.append(int(state[upper_left]))
if right_side:
neighbors.append(int(state[upper_right]))
if index//row_length != row_length-1:
neighbors.append(int(state[bottom]))
if left_side:
neighbors.append(int(state[lower_left]))
if right_side:
neighbors.append(int(state[lower_right]))
return sum(neighbors)
def cycle(state, row_length):
''' (str, int) ->
state is current game state, row_length is board dimension - returns updated board
'''
new_state = ''
for index in range(row_length**2):
neighbors = count_neighbors(index,state,row_length)
cell = life(neighbors,state[index])
new_state = new_state + cell
return new_state
############4. Event Handlers
def tick(board):
board.advance_state()
board.update_cells()
frame.after(500, tick, board)
############5. create frame & frame
frame = tkinter.Tk()
canvas = tkinter.Canvas(frame, width= row_length*cell_size, height=row_length*cell_size)
############6. register event handers
board = Board(state,row_length)
canvas.pack()
############7. initailize
tick(board)
| {
"repo_name": "Trafire/gameoflife",
"path": "game_of_life3.py",
"copies": "1",
"size": "4795",
"license": "artistic-2.0",
"hash": -8820360692040151000,
"line_mean": 27.0409356725,
"line_max": 300,
"alpha_frac": 0.6229405631,
"autogenerated": false,
"ratio": 3.705564142194745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4828504705294745,
"avg_score": null,
"num_lines": null
} |
# 1 Imports
import serial
import numpy as np
import matplotlib.pyplot as plt
import sys
from select import select
from time import sleep
from mpl_toolkits.mplot3d import Axes3D
# 2 GLOBAL FUNCTIONS
# Makesphere
def sphere():
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x1 = 1 * np.outer(np.cos(u), np.sin(v))
y1 = 1 * np.outer(np.sin(u), np.sin(v))
z1 = 1 * np.outer(np.ones(np.size(u)), np.cos(v))
return x1, y1, z1
# Read values from serial port
def read_values():
values = []
temp = (ser.readline()).decode()
print(temp[:-1])
values = list(map(float, temp.split('\t')))
return values[0], values[1], values[2], values[3]
# Redraw Axis
def Axis():
ax.set_xlim3d([0.0, 5.0])
ax.set_xlabel('X')
ax.set_ylim3d([0.0, 5.0])
ax.set_ylabel('Y')
ax.set_zlim3d([0.0, 5.0])
ax.set_zlabel('Z')
ax.set_title('CapSense')
# ReadWrite reads or writes from to serial buffer
def ReadWrite():
isdata = False
inp, outp, err = select([sys.stdin, ser], [], [], 0.1)
if sys.stdin in inp:
line = sys.stdin.readline()
ser.write(line.encode())
if ser in inp:
line = ser.readline().decode()
print(line[:-1])
isdata = line[:-2].replace('\t', '').isdigit() and not line[:-2].isdigit()
return isdata
# Main LOOP
# Serial Port Initialization
ser = serial.Serial('/dev/ttyACM0', 115200)
ser.flush()
sleep(0.5)
# Figure Generation
fig = plt.figure()
plt.ion()
ax = fig.add_subplot(111, projection='3d')
x, y, z = sphere()
# loop
while True:
Axis()
isData = ReadWrite()
if isData:
t, x0, y0, z0 = read_values()
# Plot the surface
plt.gca().invert_yaxis()
ax.plot_surface(x+(x0-0.5), y+(y0-0.5), z+(z0-0.5), color='r',rcount = 20, ccount=20)
plt.pause(0.01)
ax.clear()
| {
"repo_name": "vpetrog/CapSens_3d",
"path": "Python/CapSens_3d.py",
"copies": "1",
"size": "1866",
"license": "mit",
"hash": -1674483276459868400,
"line_mean": 21.756097561,
"line_max": 93,
"alpha_frac": 0.5927116827,
"autogenerated": false,
"ratio": 2.7934131736526946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38861248563526946,
"avg_score": null,
"num_lines": null
} |
1import sys as s
import subprocess as sb
from time import time
from parsingInfo import parseInfo
from actions import userNodeSelectionAct,randomSubSamplingAct,parseList
from featuresVector import featuresCreate
from misc import mergeList
from preformat import process
#/!\ The list of samples ID is supposed to be the same as the list of .match files! Each .match file must correspond to one single sample!
def main():
iMatrix = raw_input("Write down the CSV file name of the data matrix in the folder \"meta\" [ without the extension .csv ]\n")
if (iMatrix == ""):
iMatrix = "Info"
fastaFileName = raw_input("Write down the FASTA file names in the folder \"meta\" [ without the extension .fasta ]\n")
if (fastaFileName == ""):
fastaFileName = "GREENGENES_gg16S_unaligned_10022015"
print "/!\ Data getting parsed..."
try:
samplesInfoList,infoList = parseInfo(iMatrix)
filenames = [sample[0] for sample in samplesInfoList]
except IOError:
print "\nERROR: Maybe the filename",iMatrix,".csv does not exist in \"meta\" folder.\n"
s.exit(0)
print "-- End of parsing\n"
sb.call("ls ./meta/match > sampleidlist",shell=True)
sampleidlist = sb.check_output("sed 's/.match//g' sampleidlist | sed 's/testfiles//g' | sed '/^$/d'",shell=True).split()
sb.call("rm -f sampleidlist",shell=True)
result = sb.check_output("ls ./meta/match/testfiles",shell=True)
if not result:
print "/!\ Pre-processing files for parsing..."
process(sampleidlist)
print "/!\ Pre-processing done."
print "/!\ Constructing the features vectors..."
sampleList = mergeList(sampleidlist,filenames)
try:
matchingNodes,idSequences,_,_ = featuresCreate(sampleList,fastaFileName)
except ValueError:
print "/!\ ERROR: Please look at the line above."
print "/!\ ERROR: If the line above is blank, it may be an uncatched ValueError.\n"
s.exit(0)
print "-- End of construction\n"
dataArray = [samplesInfoList,infoList,idSequences,sampleList,matchingNodes]
answer = ""
while not ((answer == "exit") or (answer == "exit()") or (answer == "quit")):
try:
print "What do you want to do?"
print "[Write down the number matching with the action required. Details are in README file]"
print " 1: User node selection"
print " 2: Random sub-sampling"
print "[To quit, write down exit]"
answer = raw_input("Your answer?\n")
if (answer =="1"):
userNodeSelectionAct(dataArray)
print "-- End \n"
elif (answer == "2"):
randomSubSamplingAct(dataArray)
print "-- End \n"
elif not ((answer == "exit") or (answer == "exit()") or (answer == "quit")):
print "\n/!\ ERROR: Please enter a number between 1 and 2 included, or 'exit' if you want to quit."
raise ValueError
except ValueError:
print "/!\ ERROR: Please look at the line above."
print "/!\ ERROR: If the line above is blank, it may be an uncatched ValueError.\n"
| {
"repo_name": "kuredatan/taxoclassifier",
"path": "main.py",
"copies": "1",
"size": "3192",
"license": "mit",
"hash": -4673505485289917000,
"line_mean": 47.3636363636,
"line_max": 138,
"alpha_frac": 0.6284461153,
"autogenerated": false,
"ratio": 3.9850187265917603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013065035104214974,
"num_lines": 66
} |
## 1. Introduction ##
import matplotlib.pyplot as plt
import pandas as pd
movie_reviews = pd.read_csv("fandango_score_comparison.csv")
fig = plt.figure(figsize=(5,12))
ax1 = fig.add_subplot(4,1,1)
ax2 = fig.add_subplot(4,1,2)
ax3 = fig.add_subplot(4,1,3)
ax4 = fig.add_subplot(4,1,4)
ax1.set_xlim(0,5.0)
ax2.set_xlim(0,5.0)
ax3.set_xlim(0,5.0)
ax4.set_xlim(0,5.0)
movie_reviews["RT_user_norm"].hist(ax=ax1)
movie_reviews["Metacritic_user_nom"].hist(ax=ax2)
movie_reviews["Fandango_Ratingvalue"].hist(ax=ax3)
movie_reviews["IMDB_norm"].hist(ax=ax4)
## 2. Mean ##
def calc_mean(data):
data = data.values
m = sum(data)/len(data)
return(m)
user_reviews = movie_reviews[['RT_user_norm','Metacritic_user_nom','Fandango_Ratingvalue','IMDB_norm']]
reviews = user_reviews.apply(calc_mean)
rt_mean = reviews['RT_user_norm']
mc_mean = reviews["Metacritic_user_nom"]
fg_mean = reviews["Fandango_Ratingvalue"]
id_mean = reviews["IMDB_norm"]
print("Rotten Tomatoes (mean):", rt_mean)
print("Metacritic (mean):", mc_mean)
print("Fandango (mean):",fg_mean)
print("IMDB (mean):",id_mean)
## 3. Variance and standard deviation ##
def calc_mean(series):
vals = series.values
mean = sum(vals) / len(vals)
return mean
def calc_variance(series):
vals = series.values
mean = sum(vals)/len(vals)
v = [(i-mean)**2 for i in vals]
v = sum(v)/len(v)
return(v)
def calc_std(v):
return(v**(1/2))
mean_reviews = user_reviews.apply(calc_mean)
var_reviews = user_reviews.apply(calc_variance)
std_reviews = var_reviews.apply(calc_std)
rt_var = var_reviews['RT_user_norm']
mc_var = var_reviews["Metacritic_user_nom"]
fg_var = var_reviews["Fandango_Ratingvalue"]
id_var = var_reviews["IMDB_norm"]
rt_stdev = std_reviews['RT_user_norm']
mc_stdev = std_reviews["Metacritic_user_nom"]
fg_stdev = std_reviews["Fandango_Ratingvalue"]
id_stdev = std_reviews["IMDB_norm"]
print("Rotten Tomatoes (mean):", rt_mean)
print("Metacritic (mean):", mc_mean)
print("Fandango (mean):",fg_mean)
print("IMDB (mean):",id_mean)
print("Rotten Tomatoes (variance):", rt_var)
print("Metacritic (variance):", mc_var)
print("Fandango (variance):",fg_var)
print("IMDB (variance):",id_var)
print("Rotten Tomatoes (Standard Deviation):", rt_stdev)
print("Metacritic (Standard Deviation):", mc_stdev)
print("Fandango (Standard Deviation):",fg_stdev)
print("IMDB (Standard Deviation):",id_stdev)
## 4. Scatter plots ##
fig = plt.figure(figsize=(4,8))
ax1 = fig.add_subplot(3,1,1)
ax2 = fig.add_subplot(3,1,2)
ax3 = fig.add_subplot(3,1,3)
ax1.set_xlim(0,5.0)
ax2.set_xlim(0,5.0)
ax3.set_xlim(0,5.0)
ax1.scatter(movie_reviews["RT_user_norm"], movie_reviews["Fandango_Ratingvalue"])
ax2.scatter(movie_reviews["Metacritic_user_nom"], movie_reviews["Fandango_Ratingvalue"])
ax3.scatter(movie_reviews["IMDB_norm"], movie_reviews["Fandango_Ratingvalue"])
## 5. Covariance ##
def calc_mean(series):
vals = series.values
mean = sum(vals) / len(vals)
return mean
def calc_covariance(x,y):
x_mean = calc_mean(x)
y_mean = calc_mean(y)
xval = [(i-x_mean) for i in x]
yval = [(i-y_mean) for i in y]
codeviates = [xval[i] * yval[i] for i in range(len(x))]
return sum(codeviates) / len(codeviates)
rt_fg_covar = calc_covariance(movie_reviews['RT_user_norm'],movie_reviews['Fandango_Ratingvalue'])
mc_fg_covar = calc_covariance(movie_reviews['Metacritic_user_nom'],movie_reviews['Fandango_Ratingvalue'])
id_fg_covar = calc_covariance(movie_reviews['IMDB_norm'],movie_reviews['Fandango_Ratingvalue'])
print("Covariance between Rotten Tomatoes and Fandango:", rt_fg_covar)
print("Covariance between Metacritic and Fandango", mc_fg_covar)
print("Covariance between IMDB and Fandango", id_fg_covar)
## 6. Correlation ##
def calc_mean(series):
vals = series.values
mean = sum(vals) / len(vals)
return mean
def calc_variance(series):
mean = calc_mean(series)
squared_deviations = (series - mean)**2
mean_squared_deviations = calc_mean(squared_deviations)
return mean_squared_deviations
def calc_covariance(series_one, series_two):
x = series_one.values
y = series_two.values
x_mean = calc_mean(series_one)
y_mean = calc_mean(series_two)
x_diffs = [i - x_mean for i in x]
y_diffs = [i - y_mean for i in y]
codeviates = [x_diffs[i] * y_diffs[i] for i in range(len(x))]
return sum(codeviates) / len(codeviates)
def calc_correlation(s1,s2):
co = calc_covariance(s1,s2)
std1 = calc_variance(s1)**(1/2)
std2 = calc_variance(s2)**(1/2)
return (co/(std1*std2))
rt_fg_covar = calc_covariance(movie_reviews["RT_user_norm"], movie_reviews["Fandango_Ratingvalue"])
mc_fg_covar = calc_covariance(movie_reviews["Metacritic_user_nom"], movie_reviews["Fandango_Ratingvalue"])
id_fg_covar = calc_covariance(movie_reviews["IMDB_norm"], movie_reviews["Fandango_Ratingvalue"])
rt_fg_corr = calc_correlation(movie_reviews["RT_user_norm"], movie_reviews["Fandango_Ratingvalue"])
mc_fg_corr = calc_correlation(movie_reviews["Metacritic_user_nom"], movie_reviews["Fandango_Ratingvalue"])
id_fg_corr = calc_correlation(movie_reviews["IMDB_norm"], movie_reviews["Fandango_Ratingvalue"])
print("Correlation between Rotten Tomatoes and Fandango:", rt_fg_covar)
print("Correlation between Metacritic and Fandango", mc_fg_covar)
print("Correlation between IMDB and Fandango", id_fg_covar) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Probability Statistics Beginner/Challenge_ Descriptive Statistics-199.py",
"copies": "1",
"size": "5360",
"license": "mit",
"hash": -8992975822006001000,
"line_mean": 34.0392156863,
"line_max": 106,
"alpha_frac": 0.6972014925,
"autogenerated": false,
"ratio": 2.6666666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3863868159166667,
"avg_score": null,
"num_lines": null
} |
## 1. Introduction ##
import pandas as pd
import matplotlib.pyplot as plt
women_degrees = pd.read_csv('percent-bachelors-degrees-women-usa.csv')
major_cats = ['Biology', 'Computer Science', 'Engineering', 'Math and Statistics']
fig = plt.figure(figsize=(12, 12))
for sp in range(0,4):
ax = fig.add_subplot(2,2,sp+1)
ax.plot(women_degrees['Year'], women_degrees[major_cats[sp]], c='blue', label='Women')
ax.plot(women_degrees['Year'], 100-women_degrees[major_cats[sp]], c='green', label='Men')
for key,spine in ax.spines.items():
spine.set_visible(False)
ax.set_xlim(1968, 2011)
ax.set_ylim(0,100)
ax.set_title(major_cats[sp])
ax.tick_params(bottom="off", top="off", left="off", right="off")
plt.legend(loc='upper right')
plt.show()
## 3. Setting Line Color Using RGB ##
fig = plt.figure(figsize=(12, 12))
for sp in range(0,4):
ax = fig.add_subplot(2,2,sp+1)
# The color for each line is assigned here.
ax.plot(women_degrees['Year'], women_degrees[major_cats[sp]], c=(0/255,107/255,164/255), label='Women')
ax.plot(women_degrees['Year'], 100-women_degrees[major_cats[sp]], c=(255/255,128/255,14/255), label='Men')
for key,spine in ax.spines.items():
spine.set_visible(False)
ax.set_xlim(1968, 2011)
ax.set_ylim(0,100)
ax.set_title(major_cats[sp])
ax.tick_params(bottom="off", top="off", left="off", right="off")
plt.legend(loc='upper right')
plt.show()
## 4. Setting Line Width ##
cb_dark_blue = (0/255, 107/255, 164/255)
cb_orange = (255/255, 128/255, 14/255)
fig = plt.figure(figsize=(12, 12))
for sp in range(0,4):
ax = fig.add_subplot(2,2,sp+1)
# Set the line width when specifying how each line should look.
ax.plot(women_degrees['Year'], women_degrees[major_cats[sp]], c=cb_dark_blue, label='Women',linewidth = 3)
ax.plot(women_degrees['Year'], 100-women_degrees[major_cats[sp]], c=cb_orange, label='Men',linewidth = 3)
for key,spine in ax.spines.items():
spine.set_visible(False)
ax.set_xlim(1968, 2011)
ax.set_ylim(0,100)
ax.set_title(major_cats[sp])
ax.tick_params(bottom="off", top="off", left="off", right="off")
plt.legend(loc='upper right')
plt.show()
## 5. Improve the Layout and Ordering ##
stem_cats = ['Engineering', 'Computer Science', 'Psychology', 'Biology', 'Physical Sciences', 'Math and Statistics']
fig = plt.figure(figsize=(18, 3))
for sp in range(0,6):
ax = fig.add_subplot(1,6,sp+1)
ax.plot(women_degrees['Year'], women_degrees[stem_cats[sp]], c=cb_dark_blue, label='Women', linewidth=3)
ax.plot(women_degrees['Year'], 100-women_degrees[stem_cats[sp]], c=cb_orange, label='Men', linewidth=3)
for key,spine in ax.spines.items():
spine.set_visible(False)
ax.set_xlim(1968, 2011)
ax.set_ylim(0,100)
ax.set_title(stem_cats[sp])
ax.tick_params(bottom="off", top="off", left="off", right="off")
plt.legend(loc='upper right')
plt.show()
## 7. Annotating in Matplotlib ##
fig = plt.figure(figsize=(18, 3))
for sp in range(0,6):
ax = fig.add_subplot(1,6,sp+1)
ax.plot(women_degrees['Year'], women_degrees[stem_cats[sp]], c=cb_dark_blue, label='Women', linewidth=3)
ax.plot(women_degrees['Year'], 100-women_degrees[stem_cats[sp]], c=cb_orange, label='Men', linewidth=3)
for key,spine in ax.spines.items():
spine.set_visible(False)
ax.set_xlim(1968, 2011)
ax.set_ylim(0,100)
ax.set_title(stem_cats[sp])
if sp == 0:
ax.text(2005, 87, 'Men')
ax.text(2002, 8, 'Women')
elif sp == 5:
ax.text(2005, 62, 'Men')
ax.text(2001, 35, 'Women')
ax.tick_params(bottom="off", top="off", left="off", right="off")
plt.legend(loc='upper right')
plt.show() | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Storytelling Data Visualization/Color, Layout, and Annotations-221.py",
"copies": "1",
"size": "3726",
"license": "mit",
"hash": 1087989128212456200,
"line_mean": 33.8317757009,
"line_max": 116,
"alpha_frac": 0.6425120773,
"autogenerated": false,
"ratio": 2.6614285714285715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8732774336750004,
"avg_score": 0.014233262395713337,
"num_lines": 107
} |
## 1. Introduction ##
import pandas as pd
titanic_survival = pd.read_csv("titanic_survival.csv")
## 2. Finding the Missing Data ##
age = titanic_survival["age"]
print(age.loc[10:20])
age_is_null = pd.isnull(age)
age_null_true = age[age_is_null]
age_null_count = len(age_null_true)
print(age_null_count)
## 3. Whats the big deal with missing data? ##
age_is_null = pd.isnull(titanic_survival["age"])
good_ages = titanic_survival["age"][age_is_null == False]
correct_mean_age = sum(good_ages) / len(good_ages)
## 4. Easier Ways to Do Math ##
correct_mean_age = titanic_survival["age"].mean()
correct_mean_fare = titanic_survival["fare"].mean()
## 5. Calculating Summary Statistics ##
passenger_classes = [1, 2, 3]
fares_by_class = {}
for this_class in passenger_classes:
pclass_rows = titanic_survival[titanic_survival["pclass"] == this_class]
pclass_fares = pclass_rows["fare"]
fare_for_class = pclass_fares.mean()
fares_by_class[this_class] = fare_for_class
## 6. Making Pivot Tables ##
passenger_survival = titanic_survival.pivot_table(index="pclass", values="survived")
passenger_age = titanic_survival.pivot_table(index="pclass", values="age")
print(passenger_age)
## 7. More Complex Pivot Tables ##
import numpy as np
port_stats = titanic_survival.pivot_table(index="embarked", values=["fare","survived"], aggfunc=np.sum)
print(port_stats)
## 8. Drop Missing Values ##
drop_na_rows = titanic_survival.dropna(axis=0)
drop_na_columns = titanic_survival.dropna(axis=1)
new_titanic_survival = titanic_survival.dropna(axis=0,subset=["age", "sex"])
## 9. Using iloc to Access Rows by Position ##
# We have already sorted new_titanic_survival by age
first_five_rows = new_titanic_survival.iloc[0:5]
first_ten_rows = new_titanic_survival.iloc[0:10]
row_index_25 = new_titanic_survival.loc[25]
row_position_fifth = new_titanic_survival.iloc[4]
## 10. Using Column Indexes ##
first_row_first_column = new_titanic_survival.iloc[0,0]
all_rows_first_three_columns = new_titanic_survival.iloc[:,0:3]
row__index_83_age = new_titanic_survival.loc[83,"age"]
row_index_1000_pclass = new_titanic_survival.loc[766,"pclass"]
row_index_1100_age = new_titanic_survival.loc[1100, "age"]
row_index_25_survived = new_titanic_survival.loc[25, "survived"]
five_rows_three_cols = new_titanic_survival.iloc[0:5,0:3]
## 11. Reindexing Rows ##
titanic_reindexed = new_titanic_survival.reset_index(drop=True)
print(titanic_reindexed.iloc[0:5,0:3])
## 12. Apply Functions Over a DataFrame ##
def hundredth_row(column):
hundredth_item = column.iloc[99]
return hundredth_item
hundredth_row = titanic_survival.apply(hundredth_row)
def not_null_count(column):
column_null = pd.isnull(column)
null = column[column_null]
return len(null)
column_null_count = titanic_survival.apply(not_null_count)
## 13. Applying a Function to a Row ##
def is_minor(row):
if row["age"] < 18:
return True
else:
return False
minors = titanic_survival.apply(is_minor, axis=1)
import pandas as pd
def generate_age_label(row):
age = row["age"]
if pd.isnull(age):
return "unknown"
elif age < 18:
return "minor"
else:
return "adult"
age_labels = titanic_survival.apply(generate_age_label, axis=1)
## 14. Calculating Survival Percentage by Age Group ##
age_group_survival = titanic_survival.pivot_table(index="age_labels", values="survived") | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Analysis with Pandas Intermediate/Working with Missing Data-12.py",
"copies": "1",
"size": "3414",
"license": "mit",
"hash": -5665321346692120000,
"line_mean": 28.188034188,
"line_max": 103,
"alpha_frac": 0.7059168131,
"autogenerated": false,
"ratio": 2.5844057532172595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8750281320994808,
"avg_score": 0.008008249064490066,
"num_lines": 117
} |
## 1. Introduction ##
import sqlite3
conn = sqlite3.connect("factbook.db")
query_plan_one = conn.execute("explain query plan select * from facts where population > 1000000 and population_growth < 0.05;").fetchall()
print(query_plan_one)
## 2. Query plan for multi-column queries ##
conn = sqlite3.connect("factbook.db")
conn.execute("create index if not exists pop_idx on facts(population);").fetchall()
conn.execute("create index if not exists pop_growth_idx on facts(population_growth);").fetchall()
query_plan_two = conn.execute("explain query plan select * from facts where population > 1000000 and population_growth < 0.05;").fetchall()
print(query_plan_two)
## 5. Creating a multi-column index ##
conn = sqlite3.connect("factbook.db")
conn.execute("create index if not exists pop_pop_growth_idx on facts(population, population_growth);")
query_plan_three = conn.execute("explain query plan select * from facts where population > 1000000 and population_growth < 0.05;").fetchall()
print(query_plan_three)
## 6. Covering index ##
conn = sqlite3.connect("factbook.db")
conn.execute("create index if not exists pop_pop_growth_idx on facts(population, population_growth);")
query_plan_four = conn.execute("explain query plan select population, population_growth from facts where population > 1000000 and population_growth < 0.05;").fetchall()
print(query_plan_four)
## 7. Covering index for single column ##
conn = sqlite3.connect("factbook.db")
conn.execute("create index if not exists pop_pop_growth_idx on facts(population, population_growth);")
query_plan_five = conn.execute("explain query plan select population from facts where population > 1000000;").fetchall()
print(query_plan_five) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "SQL and Databases Advanced/Multi-column indexing-192.py",
"copies": "1",
"size": "1702",
"license": "mit",
"hash": -3558964067178076000,
"line_mean": 47.6571428571,
"line_max": 168,
"alpha_frac": 0.7555816686,
"autogenerated": false,
"ratio": 3.5458333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9759844548110727,
"avg_score": 0.008314090764521406,
"num_lines": 35
} |
## 1. Introduction ##
import sqlite3
conn = sqlite3.connect('factbook.db')
schema = conn.cursor().execute('pragma table_info(facts);').fetchall()
for item in schema:
print(item)
## 3. Explain query plan ##
conn = sqlite3.connect("factbook.db")
query_plan_one = conn.execute("explain query plan select * from facts where area > 40000;").fetchall()
query_plan_two = conn.execute("explain query plan select area from facts where area > 40000;").fetchall()
query_plan_three = conn.execute("explain query plan select * from facts where name = 'Czech Republic';").fetchall()
print(query_plan_one)
print(query_plan_two)
print(query_plan_three)
## 5. Time complexity ##
conn = sqlite3.connect("factbook.db")
query_plan_four = conn.execute("explain query plan select * from facts where id = 20;").fetchall()
print(query_plan_four)
## 9. All together now ##
conn = sqlite3.connect("factbook.db")
query_plan_six = conn.execute("explain query plan select * from facts where population > 10000 ;").fetchall()
print(query_plan_six)
conn.execute("create index if not exists pop_idx on facts(population)")
query_plan_seven = conn.execute("explain query plan select * from facts where population > 10000 ;").fetchall()
print(query_plan_seven) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "SQL and Databases Advanced/Introduction to Indexing-191.py",
"copies": "1",
"size": "1237",
"license": "mit",
"hash": 7707703663825764000,
"line_mean": 36.5151515152,
"line_max": 115,
"alpha_frac": 0.7299919159,
"autogenerated": false,
"ratio": 3.4265927977839334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46565847136839333,
"avg_score": null,
"num_lines": null
} |
## 1. Introduction ##
strings = ["data science", "big data", "metadata"]
regex = "data"
## 2. Wildcards in Regular Expressions ##
strings = ["bat", "robotics", "megabyte"]
regex = "b.t"
## 3. Searching the Beginnings And Endings Of Strings ##
strings = ["better not put too much", "butter in the", "batter"]
bad_string = "We also wouldn't want it to be bitter"
regex = "^b.tter"
## 5. Reading and Printing the Data Set ##
import csv
posts_with_header = list(csv.reader(open('askreddit_2015.csv','r')))
posts = posts_with_header[1:]
for item in posts[:10]:
print(item)
## 6. Counting Simple Matches in the Data Set with re() ##
import re
of_reddit_count = 0
for item in posts:
if re.search('of Reddit', item[0]) is not None:
of_reddit_count +=1
## 7. Using Square Brackets to Match Multiple Characters ##
import re
of_reddit_count_old = 0
of_reddit_count = 0
for row in posts:
if re.search("of Reddit", row[0]) is not None:
of_reddit_count_old += 1
if re.search("of [rR]eddit", row[0]) is not None:
of_reddit_count += 1
## 8. Escaping Special Characters ##
import re
serious_count = 0
for item in posts:
if re.search('\[Serious\]',item[0]) is not None:
serious_count +=1
## 9. Combining Escaped Characters and Multiple Matches ##
import re
serious_count_old = 0
serious_count = 0
for row in posts:
if re.search("\[Serious\]", row[0]) is not None:
serious_count_old += 1
if re.search("\[[sS]erious\]", row[0]) is not None:
serious_count += 1
## 10. Adding More Complexity to Your Regular Expression ##
import re
serious_count_old,serious_count = 0,0
for row in posts:
if re.search("\[[Ss]erious\]", row[0]) is not None:
serious_count_old += 1
if re.search("[\[\(][Ss]erious[\]\)]", row[0]) is not None:
serious_count += 1
## 11. Combining Multiple Regular Expressions ##
import re
serious_start_count = 0
serious_end_count = 0
serious_count_final = 0
for item in posts:
if re.search('^[\[\(][Ss]erious[\]\)]',item[0]) is not None:
serious_start_count +=1
if re.search('[\[\(][Ss]erious[\]\)]$',item[0]) is not None:
serious_end_count +=1
if re.search('^[\[\(][Ss]erious[\]\)]|[\[\(][Ss]erious[\]\)]$',item[0]) is not None:
serious_count_final +=1
## 12. Using Regular Expressions to Substitute Strings ##
import re
posts_new = []
for item in posts:
item[0] = re.sub('[\[\(][Ss]erious[\]\)]','[Serious]',item[0])
posts_new.append(item)
## 13. Matching Years with Regular Expressions ##
import re
year_strings = []
for item in strings:
val = int(re.search('[0-9][0-9][0-9][0-9]',item).group(0))
if val is not None and val > 999 and val < 3000:
year_strings.append(item)
## 14. Repeating Characters in Regular Expressions ##
import re
year_strings = []
for item in strings:
if re.search('[1-2][0-9]{3}',item) is not None:
year_strings.append(item)
## 15. Challenge: Extracting all Years ##
import re
years = re.findall('[1-2][0-9]{3}',years_string) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Intermediate/Regular Expressions-164.py",
"copies": "1",
"size": "3090",
"license": "mit",
"hash": -6379589411207080000,
"line_mean": 24.1300813008,
"line_max": 88,
"alpha_frac": 0.6187702265,
"autogenerated": false,
"ratio": 2.898686679174484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8919714657777179,
"avg_score": 0.01954844957946094,
"num_lines": 123
} |
## 1. Introduction to the data ##
import pandas as pd
cars = pd.read_csv("auto.csv")
unique_regions = cars['origin'].unique()
print(unique_regions)
## 2. Dummy variables ##
dummy_cylinders = pd.get_dummies(cars["cylinders"], prefix="cyl")
cars = pd.concat([cars, dummy_cylinders], axis=1)
print(cars.head())
dummy_years = pd.get_dummies(cars["year"], prefix="year")
cars = pd.concat([cars, dummy_years], axis=1)
cars = cars.drop("year", axis=1)
cars = cars.drop("cylinders", axis=1)
print(cars.head())
## 3. Multiclass classification ##
shuffled_rows = np.random.permutation(cars.index)
shuffled_cars = cars.iloc[shuffled_rows]
highest_train_row = int(cars.shape[0] * .70)
train = shuffled_cars.iloc[0:highest_train_row]
test = shuffled_cars.iloc[highest_train_row:]
## 4. Training a multiclass logistic regression model ##
from sklearn.linear_model import LogisticRegression
import re
unique_origins = cars["origin"].unique()
unique_origins.sort()
models = {}
X = train[[x for x in train.columns if x.startswith("cyl") or x.startswith("year")]]
print(X.shape)
for origin in unique_origins:
y = (train["origin"] == origin)
lr = LogisticRegression()
lr.fit(X, y)
models[origin] = lr
print(models)
## 5. Testing the models ##
testing_probs = pd.DataFrame(columns=unique_origins)
test = test[[x for x in test.columns if x.startswith("cyl") or x.startswith("year")]]
print(test.shape)
for origin in unique_origins:
X_test = test[features]
testing_probs[origin] = models[origin].predict_proba(X_test)[:,1]
## 6. Choose the origin ##
predicted_origins = testing_probs.idxmax(axis = 1)
print(predicted_origins) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Machine learning Intermediate/Multiclass classification-75.py",
"copies": "1",
"size": "1641",
"license": "mit",
"hash": 1720536649932301600,
"line_mean": 27.3103448276,
"line_max": 85,
"alpha_frac": 0.704448507,
"autogenerated": false,
"ratio": 2.894179894179894,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9061898243852781,
"avg_score": 0.007346031465422648,
"num_lines": 58
} |
## 1. Introduction to the Data ##
import pandas
import matplotlib.pyplot as plt
%matplotlib inline
pisa = pandas.DataFrame({"year": range(1975, 1988),
"lean": [2.9642, 2.9644, 2.9656, 2.9667, 2.9673, 2.9688, 2.9696,
2.9698, 2.9713, 2.9717, 2.9725, 2.9742, 2.9757]})
print(pisa)
plt.scatter(pisa["year"], pisa["lean"])
## 2. Fit the Linear Model ##
import statsmodels.api as sm
y = pisa.lean # target
X = pisa.year # features
X = sm.add_constant(X) # add a column of 1's as the constant term
# OLS -- Ordinary Least Squares Fit
linear = sm.OLS(y, X)
# fit model
linearfit = linear.fit()
linearfit.summary()
## 3. Define a Basic Linear Model ##
# Our predicted values of y
yhat = linearfit.predict(X)
print(yhat)
residuals = y - yhat
## 4. Histogram of Residuals ##
# The variable residuals is in memory
residuals.hist(bins = 5)
## 6. Sum of Squares ##
import numpy as np
# sum the (predicted - observed) squared
SSE = np.sum((y.values-yhat)**2)
RSS = np.sum((np.mean(y.values) - yhat)**2)
TSS = np.sum((y.values - np.mean(y.values))**2)
## 7. R-Squared ##
SSE = np.sum((y.values-yhat)**2)
ybar = np.mean(y.values)
RSS = np.sum((ybar-yhat)**2)
TSS = np.sum((y.values-ybar)**2)
R2 = RSS / TSS
## 9. Coefficients of the Linear Model ##
# Print the models summary
#print(linearfit.summary())
#The models parameters
print("\n",linearfit.params)
delta = linearfit.params[1] * 15
## 10. Variance of Coefficients ##
# Enter your code here.
SSE = np.sum((y.values - yhat)**2)
# Compute variance in X
xvar = np.sum((pisa.year - pisa.year.mean())**2)
# Compute variance in b1
s2b1 = (SSE / (y.shape[0] - 2)) / xvar
## 11. T-Distribution ##
from scipy.stats import t
# 100 values between -3 and 3
x = np.linspace(-3,3,100)
# Compute the pdf with 3 degrees of freedom
print(t.pdf(x=x, df=3))
# Pdf with 3 degrees of freedom
tdist3 = t.pdf(x=x, df=3)
# Pdf with 30 degrees of freedom
tdist30 = t.pdf(x=x, df=30)
# Plot pdfs
plt.plot(x, tdist3)
plt.plot(x, tdist30)
## 12. Statistical Significance of Coefficients ##
# The variable s2b1 is in memory. The variance of beta_1
tstat = linearfit.params["year"] / ((s2b1) ** (1/2))
## 13. The P-Value ##
# At the 95% confidence interval for a two-sided t-test we must use a p-value of 0.975
pval = 0.975
# The degrees of freedom
df = pisa.shape[0] - 2
# The probability to test against
p = t.cdf(tstat, df=df)
beta1_test = True | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Machine learning Intermediate/Intermediate linear regression-91.py",
"copies": "1",
"size": "2453",
"license": "mit",
"hash": -8755501218436172000,
"line_mean": 21.7222222222,
"line_max": 90,
"alpha_frac": 0.6526701998,
"autogenerated": false,
"ratio": 2.6123535676251333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3765023767425133,
"avg_score": null,
"num_lines": null
} |
## 1. Introduction to the data ##
import pandas
import matplotlib.pyplot as plt
# Read data from csv
pga = pandas.read_csv("pga.csv")
# Normalize the data
pga.distance = (pga.distance - pga.distance.mean()) / pga.distance.std()
pga.accuracy = (pga.accuracy - pga.accuracy.mean()) / pga.accuracy.std()
print(pga.head())
plt.scatter(pga.distance, pga.accuracy)
plt.xlabel('normalized distance')
plt.ylabel('normalized accuracy')
plt.show()
## 2. Linear model ##
from sklearn.linear_model import LinearRegression
import numpy as np
# We can add a dimension to an array by using np.newaxis
print("Shape of the series:", pga.distance.shape)
print("Shape with newaxis:", pga.distance[:, np.newaxis].shape)
# The X variable in LinearRegression.fit() must have 2 dimensions
linear_model = LinearRegression()
linear_model.fit(pga.distance[:,np.newaxis], pga.accuracy)
theta1 = linear_model.coef_[0]
## 3. Cost function, introduction ##
# The cost function of a single variable linear model
def cost(theta0, theta1, x, y):
# Initialize cost
J = 0
# The number of observations
m = len(x)
# Loop through each observation
for i in range(m):
# Compute the hypothesis
h = theta1 * x[i] + theta0
# Add to cost
J += (h - y[i])**2
# Average and normalize cost
J /= (2*m)
return J
# The cost for theta0=0 and theta1=1
print(cost(0, 1, pga.distance, pga.accuracy))
theta0 = 100
theta1s = np.linspace(-3,2,100)
costs = []
for theta1 in theta1s:
costs.append(cost(theta0, theta1, pga.distance, pga.accuracy))
plt.plot(theta1s, costs)
## 4. Cost function, continued ##
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
# Example of a Surface Plot using Matplotlib
# Create x an y variables
x = np.linspace(-10,10,100)
y = np.linspace(-10,10,100)
# We must create variables to represent each possible pair of points in x and y
# ie. (-10, 10), (-10, -9.8), ... (0, 0), ... ,(10, 9.8), (10,9.8)
# x and y need to be transformed to 100x100 matrices to represent these coordinates
# np.meshgrid will build a coordinate matrices of x and y
X, Y = np.meshgrid(x,y)
print(X[:5,:5],"\n",Y[:5,:5])
# Compute a 3D parabola
Z = X**2 + Y**2
# Open a figure to place the plot on
fig = plt.figure()
# Initialize 3D plot
ax = fig.gca(projection='3d')
# Plot the surface
ax.plot_surface(X=X,Y=Y,Z=Z)
plt.show()
# Use these for your excerise
theta0s = np.linspace(-2,2,100)
theta1s = np.linspace(-2,2, 100)
COST = np.empty(shape=(100,100))
T0s,T1s = np.meshgrid(theta0s,theta1s)
for i in range(100):
for j in range(100):
COST[i,j] = cost(T0s[0,i], T1s[j,0], pga.distance,pga.accuracy)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X=X,Y=Y,Z=COST)
plt.show()
## 5. Cost function, slopes ##
def partial_cost_theta1(theta0, theta1, x, y):
# Hypothesis
h = theta0 + theta1*x
# Hypothesis minus observed times x
diff = (h - y) * x
# Average to compute partial derivative
partial = diff.sum() / (x.shape[0])
return partial
def partial_cost_theta0(theta0,theta1,x,y):
h = theta0 + theta1 * x
diff = h - y
partial = diff.sum() / (x.shape[0])
return partial
partial1 = partial_cost_theta1(0, 5, pga.distance, pga.accuracy)
print("partial1 =", partial1)
partial0 = partial_cost_theta0(1,1,pga.distance,pga.accuracy)
print("partial0 = ", partial0)
## 6. Gradient descent algorithm ##
# x is our feature vector -- distance
# y is our target variable -- accuracy
# alpha is the learning rate
# theta0 is the intial theta0
# theta1 is the intial theta1
def gradient_descent(x, y, alpha=0.1, theta0=0, theta1=0):
max_epochs = 1000 # Maximum number of iterations
counter = 0 # Intialize a counter
c = cost(theta1, theta0, pga.distance, pga.accuracy) ## Initial cost
costs = [c] # Lets store each update
# Set a convergence threshold to find where the cost function in minimized
# When the difference between the previous cost and current cost
# is less than this value we will say the parameters converged
convergence_thres = 0.000001
cprev = c + 10
theta0s = [theta0]
theta1s = [theta1]
# When the costs converge or we hit a large number of iterations will we stop updating
while (np.abs(cprev - c) > convergence_thres) and (counter < max_epochs):
cprev = c
# Alpha times the partial deriviative is our updated
update0 = alpha * partial_cost_theta0(theta0, theta1, x, y)
update1 = alpha * partial_cost_theta1(theta0, theta1, x, y)
# Update theta0 and theta1 at the same time
# We want to compute the slopes at the same set of hypothesised parameters
# so we update after finding the partial derivatives
theta0 -= update0
theta1 -= update1
# Store thetas
theta0s.append(theta0)
theta1s.append(theta1)
# Compute the new cost
c = cost(theta0, theta1, pga.distance, pga.accuracy)
# Store updates
costs.append(c)
counter += 1 # Count
return {'theta0': theta0, 'theta1': theta1, "costs": costs}
print("Theta1 =", gradient_descent(pga.distance, pga.accuracy)['theta1'])
gd = gradient_descent(pga.distance,pga.accuracy, 0.01)
plt.scatter(np.arange(0,len(gd["costs"])),gd["costs"])
plt.show() | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Machine learning Intermediate/Gradient descent-120.py",
"copies": "1",
"size": "5374",
"license": "mit",
"hash": -4489831073113787000,
"line_mean": 29.3672316384,
"line_max": 90,
"alpha_frac": 0.6635653145,
"autogenerated": false,
"ratio": 3.100980957876515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4264546272376515,
"avg_score": null,
"num_lines": null
} |
# -1 is an invalid value, stands for empty spot, and used to differentiate full queue from empty
class MyCircularQueue:
def __init__(self, k: int):
self.queue = k*[-1]
self.front = 0 # index of frontmost spot in the queue (unless empty)
self.back = 0 # index of first empty spot behind the queue
def enQueue(self, value: int) -> bool:
if self.isFull():
return False
self.queue[self.back] = value
self.back = (self.back + 1) % len(self.queue)
return True
def deQueue(self) -> bool:
if self.isEmpty():
return False
self.queue[self.front] = -1
self.front = (self.front + 1) % len(self.queue)
return True
def Front(self) -> int:
return self.queue[self.front]
def Rear(self) -> int:
return self.queue[(self.back - 1) % len(self.queue)]
def isEmpty(self) -> bool:
return self.front == self.back and self.queue[self.front] == -1
def isFull(self) -> bool:
return self.front == self.back and self.queue[self.front] != -1
# Your MyCircularQueue object will be instantiated and called as such:
# obj = MyCircularQueue(k)
# param_1 = obj.enQueue(value)
# param_2 = obj.deQueue()
# param_3 = obj.Front()
# param_4 = obj.Rear()
# param_5 = obj.isEmpty()
# param_6 = obj.isFull()
| {
"repo_name": "SelvorWhim/competitive",
"path": "LeetCode/DesignCircularQueue.py",
"copies": "1",
"size": "1355",
"license": "unlicense",
"hash": -925041307391984500,
"line_mean": 29.7954545455,
"line_max": 96,
"alpha_frac": 0.6007380074,
"autogenerated": false,
"ratio": 3.3374384236453203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44381764310453203,
"avg_score": null,
"num_lines": null
} |
# 1 Jan 1900 was a Monday.
# Thirty days has September,
# April, June and November.
# All the rest have thirty-one,
# Saving February alone,
# Which has twenty-eight, rain or shine.
# And on leap years, twenty-nine.
# A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
day = 1
sundays = 0
month = 1
year = 1900
while(year <= 2000):
while (month <= 12):
if (day == 1):
day += 6
sundays += 1
if (month == 4 or month == 6 or month == 9 or month == 11):
if(day <= 30):
day += 7
else:
day -= 30
month += 1
elif(month == 2):
if((year%4 == 0 or year%400 == 0) and (year != 1900 or year != 2000)):
if(day <= 29):
day += 7
else:
day -= 29
month += 1
else:
if(day <= 28):
day += 7
else:
day -= 28
month += 1
else:
if(day <= 31):
day += 7
else:
day -= 31
month += 1
print("Sundays : " , sundays)
print("Day : ", day)
print(month)
year+=1
month=1
print("Sundays : " , sundays)
print("Day : ", day)
print(month)
print(year) | {
"repo_name": "Chane-O/CloudComputing",
"path": "Lab3/Euler19.py",
"copies": "1",
"size": "1320",
"license": "mit",
"hash": 6118242797865544000,
"line_mean": 21.0166666667,
"line_max": 109,
"alpha_frac": 0.5174242424,
"autogenerated": false,
"ratio": 3.1578947368421053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4175318979242105,
"avg_score": null,
"num_lines": null
} |
# 1KHz_SW_OSX.py
#
# A mono _pure_ sinewave generator using STANDARD text mode Python 2.6.7 to at least 2.7.3.
# This DEMO kids level 1KHz generator is mainly for a MacBook Pro, (13 inch in my case), OSX 10.7.5 and above.
# It is another simple piece of testgear for the young amateur electronics enthusiast and
# uses pyaudio fully installed for it to work. Enjoy... ;o)
# PyAudio can be obtained from here:- http://people.csail.mit.edu/hubert/pyaudio/
#
# It also works on Windows Vista, 32 bit, on various machines with Python 2.6.x to 2.7.3.
# It also works on Debian 6.0.0 using Python 2.6.6 on an HP dv2036ea notebook.
# It also works on "Ubuntu 12.04, Python 2.7, Dell built-in soundcard", with many thanks to Hubert Pham, author
# of pyaudio itself, for testing...
#
# The hardware modifictions can be found here:-
# http://code.activestate.com/recipes/578282-for-macbook_pro-heads-only-simple-lf-audio-oscillo/?in=lang-python
#
# Ensure the sound is enabled and the volume is turned up. Use the volume control to vary the amplitude...
#
# Copy the file to a folder/drawer/directory of your choice as "1KHz_SW_OSX.py" without the quotes.
#
# Start the Python interpreter from a Terminal/CLI window.
#
# To run the sinewave generator, (depending upon the platform), just use at the ">>>" prompt:-
#
# >>> execfile("/full/path/to/1KHz_SW_OSX.py")<CR>
#
# And away you go...
#
# This code is issued as GPL3...
#
# Connect an oscilloscope to the earphone socket(s) to see the sinewave waveform(s) being generated.
#
# $VER: 1KHz_SW_OSX.py_Version_0.00.10_(C)2012_B.Walker_G0LCU.
# The only import required...
import pyaudio
# Initialise the only _variable_ in use...
n=0
# Set up a basic user screen...
# This assumes the minimum default 80x24 Terminal window size...
print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n$VER: 1KHz_SW_OSX.py_Version_0.00.10_(C)2012_B.Walker_G0LCU.\n")
print("A DEMO kids level, platform independent, 1KHz _pure_ sinewave generator.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
# Open the stream required, mono mode only...
stream=pyaudio.PyAudio().open(format=pyaudio.paInt8,channels=1,rate=16000,output=True)
# Now generate the 1KHz signal at the speakers/headphone output for about 10 seconds...
# Sine wave, to 8 bit depth only...
for n in range(0,10000,1): stream.write("\x00\x30\x5a\x76\x7f\x76\x5a\x30\x00\xd0\xa6\x8a\x80\x8a\xa6\xd0")
# Close the open _channel(s)_...
stream.close()
pyaudio.PyAudio().terminate()
# End of 1KHz_SW_OSX.py program...
# Enjoy finding simple solutions to often very difficult problems... ;o)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578301_Platform_Independent_1KHz_Pure_Audio_Sinewave/recipe-578301.py",
"copies": "1",
"size": "2570",
"license": "mit",
"hash": 3911356146682183700,
"line_mean": 43.3103448276,
"line_max": 121,
"alpha_frac": 0.7214007782,
"autogenerated": false,
"ratio": 2.7024185068349107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3923819285034911,
"avg_score": null,
"num_lines": null
} |
# 1 max() function
def max(a,b):
if a > b:
return a
elif b > a:
return b
# 2 max_of_three function
def max_of_three(a,b,c):
myList = []
myList.append(a)
myList.append(b)
myList.append(c)
myList.sort()
return myList[len(myList)-1]
# 3 length of string/list
def length(thing):
counter = 0
for i in thing:
counter += 1
return counter
# 4 True if vowel, false if
def isVowel(char):
if char.lower() in vowels:
return True
else:
return False
# 5 rövarspråket Translator
def swedish_translate(string):
translated = ""
for i in string:
if i.lower() not in vowels:
translated += i + "o" + i
else:
translated += i
return translated.capitalize()
# 6 Sum() and multiply()
def sum(sumList):
result = 0
for i in sumList:
result += i
return result
def multiply(multiplyList):
result = 1
for i in multiplyList:
result *= i
return result
# 7 Reversal of string
def reverse(string):
return string[::-1]
vowels = ["a","e","i","o","u"] #This is used in #4 and #5
# 8 is_palindrome()
def is_palindrome(string):
if string.lower() == string[::-1].lower():
return True
# 9 is_member()
def is_member(x,list):
for i in list:
if x == i:
return True
return False
# 10 overlapping()
def overlapping(list0,list1):
for x in list0:
for y in list1:
if x == y:
return True
return False
# 11 generate_n_chars()
def generate_n_chars(n,c):
string = ""
for i in range(0,n):
string += c
return string
pass
# 12 Histogram
def histogram(list):
for i in list:
print(i * "*")
# 13 Max of list
def max_in_list(list):
list.sort()
return list[len(list)-1]
# 14 length in words
def length_of_words(list):
lenWords = []
for i in list:
lenWords.append(len(i))
return lenWords
'''
_______________________________________________________________________________
'''
# 1
print("1.")
print ("{0} is the biggest number\n".format(max(100,101)))
# 2
print ("2.")
print ("{0} is the biggest number\n".format(max_of_three(164,143,201000)))
# 3
print ("3.")
print ("This string has {0} elements".format(length("Hello Boy")))
print ("This list has {0} elements\n".format(length([1,54,3,63,23,64,3])))
# 4
charForExcercise4 = "B"
print("4.")
print ("Its {0} that \"{1}\" is a vowel\n".format\
(isVowel(charForExcercise4),charForExcercise4))
# 5
stringForExcercise5 = "Otorrinolaringologo"
print ("5.")
print ("{0} in rövarspråket is {1}\n".format\
(stringForExcercise5,swedish_translate(stringForExcercise5)))
# 6
sumListForExcercise6 = [1,2,3,4]
multiplyListForExcercise6 = [1,2,3,4]
print("6.")
print ("The sum of {0} is {1}".format\
(sumListForExcercise6,sum(sumListForExcercise6)))
print ("The multiplication of {0} is {1}\n".\
format(multiplyListForExcercise6,multiply(multiplyListForExcercise6)))
# 7
stringForExcercise7 = "Good bye"
print ("7.")
print ("{0} reversed is:\n{1}\n".format\
(stringForExcercise7,reverse(stringForExcercise7)))
# 8
print ("8.")
if (is_palindrome("sanas")):
print ("Sanas is a word palindrome")
print ()
print ("9.")
print ("Is {0} that {1} is in {2}".format\
(is_member(5,[1,2,3,4,5]),5,[1,2,3,4,5]))
print()
print ("10.")
print ([1,2,3,4],[532,34,12,1,356],overlapping([1,2,3,4],[532,34,12,1,356]))
print ()
print ("11.")
print (generate_n_chars(10,'z'))
print ()
print ("12.")
histogram([5,1,25,6])
print ()
print ("13.")
print ([1,2,4,6,3,5,6,3,6,8,54,643,443,4,21,14,23,121],max_in_list\
([1,2,4,6,3,5,6,3,6,8,54,643,443,4,21,14,23,121]))
print()
print ("14.")
print (["Hola","Como","Te","Va"],length_of_words(["Hola","Como","Te","Va"]))
print ()
print ("15.")
print (["Apple", "Banana", "Juan", "Trash", "Car"], max_in_list(length_of_words\
(["Apple", "Banana", "Juan", "Trash", "Car"])))
print ()
| {
"repo_name": "QuirinoC/Python",
"path": "verySimple1-15.py",
"copies": "1",
"size": "4026",
"license": "apache-2.0",
"hash": 7025417077723305000,
"line_mean": 21.0989010989,
"line_max": 80,
"alpha_frac": 0.5808055694,
"autogenerated": false,
"ratio": 2.8106219426974146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38914275120974146,
"avg_score": null,
"num_lines": null
} |
1##############################################
# combine reads over run over multiple lanes
##############################################
import os, sys, re
import collections
import glob
# script for linking files
scriptsdir = "/ifs/projects/proj029/src"
# first link to data in working directory
os.system("python %s/map_samples_rna.py" % scriptsdir)
# to remove at the end
to_remove = glob.glob("*.fastq*")
# iterate over files and combine those that have
# the same index i.e. condition
file_list = collections.defaultdict(list)
for inf in to_remove:
name_split = inf.split("-")
index = name_split[1]
file_list[index].append(inf)
reps = ["R1", "R2", "R3", "R4"]
for condition, files in file_list.iteritems():
for rep in reps:
outprefix = "stool-" + condition + "-%s" % rep
p1 = [inf for inf in files if inf.endswith(".1.gz") and inf.find(rep) != -1]
p1.sort()
p1 = " ".join(p1)
outname1 = outprefix + ".fastq.1.gz"
if os.path.exists(outname1): continue
statement = "zcat %(p1)s | gzip > %(outname1)s" % locals()
os.system(statement)
p2 = [inf for inf in files if inf.endswith(".2.gz") and inf.find(rep) != -1]
p2.sort()
p2 = " ".join(p2)
outname2 = outprefix + ".fastq.2.gz"
if os.path.exists(outname2): continue
statement = "zcat %(p2)s | gzip > %(outname2)s" % locals()
os.system(statement)
to_remove = " ".join(to_remove)
#os.system("rm -rf %s" % to_remove)
| {
"repo_name": "CGATOxford/proj029",
"path": "scripts/combine_lanes_rna.py",
"copies": "1",
"size": "1520",
"license": "bsd-3-clause",
"hash": -4403520974888712000,
"line_mean": 30.6666666667,
"line_max": 84,
"alpha_frac": 0.5730263158,
"autogenerated": false,
"ratio": 3.19327731092437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.426630362672437,
"avg_score": null,
"num_lines": null
} |
1#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages
from sys import platform as _platform
import sys
import glob
import os
from distutils.core import setup
from distutils.extension import Extension
from distutils.util import get_platform
from glob import glob
# monkey-patch for parallel compilation
import multiprocessing
import multiprocessing.pool
def parallelCCompile(self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
N = 2 * multiprocessing.cpu_count() # number of parallel compilations
try:
# On Unix-like platforms attempt to obtain the total memory in the
# machine and limit the number of parallel jobs to the number of Gbs
# of RAM (to avoid killing smaller platforms like the Pi)
mem = os.sysconf('SC_PHYS_PAGES') * os.sysconf('SC_PAGE_SIZE') # bytes
except (AttributeError, ValueError):
# Couldn't query RAM; don't limit parallelism (it's probably a well
# equipped Windows / Mac OS X box)
pass
else:
mem = max(1, int(round(mem / 1024 ** 3))) # convert to Gb
N = min(mem, N)
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
newcc_args = cc_args
if _platform == "darwin":
if src.endswith('.cpp') or src.endswith('.cc'):
newcc_args = cc_args + ["-mmacosx-version-min=10.15", "-std=c++17", "-stdlib=libc++"]
self._compile(obj, src, ext, newcc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
pool = multiprocessing.pool.ThreadPool(N)
list(pool.imap(_single_compile, objects))
return objects
import distutils.ccompiler
distutils.ccompiler.CCompiler.compile = parallelCCompile
# see http://stackoverflow.com/a/8719066/295157
import os
platform = get_platform()
print(platform)
CXX_FLAGS = ''
CXX_FLAGS += '-fpermissive '
CXX_FLAGS += '-D_USE_MATH_DEFINES '
CXX_FLAGS += '-DUSE_EIGEN '
# libraries += [current_python]
libraries = []
include_dirs = ['src', '.','python', 'third_party/eigen3', 'third_party/tinyxml2/include', 'third_party/pybind11/include']
pytinyopengl3_libraries = []
pytinyopengl3_include_dirs = ['src', 'third_party/tinyobjloader']
try:
import numpy
NP_DIRS = [numpy.get_include()]
except:
print("numpy is disabled. getCameraImage maybe slower.")
else:
print("numpy is enabled.")
CXX_FLAGS += '-DPYBULLET_USE_NUMPY '
for d in NP_DIRS:
print("numpy_include_dirs = %s" % d)
include_dirs += NP_DIRS
sources = ["third_party/tinyxml2/tinyxml2.cpp"]
pytinyopengl3_sources = ["python/pytinyopengl3.cc",\
"src/visualizer/opengl/tiny_camera.cpp",\
"src/visualizer/opengl/tiny_font_stash.cpp",\
"src/visualizer/opengl/tiny_fonts.cpp",\
"src/visualizer/opengl/tiny_gl_instancing_renderer.cpp",\
"src/visualizer/opengl/tiny_gl_primitive_renderer.cpp",\
"src/visualizer/opengl/tiny_gl_render_to_texture.cpp",\
"src/visualizer/opengl/tiny_glfw_opengl_window.cpp",\
"src/visualizer/opengl/tiny_load_shader.cpp",\
"src/visualizer/opengl/tiny_open_sans.cpp",\
"src/visualizer/opengl/tiny_opengl_fontstashcallbacks.cpp",\
"src/visualizer/opengl/tiny_opengl3_app.cpp",\
"third_party/stb_image/stb_image_write.cpp",\
"third_party/glad/gl.c",\
"third_party/stb_image/stb_image.cpp",\
"third_party/tinyobjloader/tiny_obj_loader.cc",\
]
if _platform == "linux" or _platform == "linux2":
print("linux")
libraries = ['dl', 'pthread', 'stdc++fs']
CXX_FLAGS += '-D_LINUX '
CXX_FLAGS += '-DGLEW_STATIC '
CXX_FLAGS += '-DGLEW_INIT_OPENGL11_FUNCTIONS=1 '
CXX_FLAGS += '-DGLEW_DYNAMIC_LOAD_ALL_GLX_FUNCTIONS=1 '
CXX_FLAGS += '-DDYNAMIC_LOAD_X11_FUNCTIONS '
CXX_FLAGS += '-DHAS_SOCKLEN_T '
CXX_FLAGS += '-fno-inline-functions-called-once '
CXX_FLAGS += '-fvisibility=hidden '
CXX_FLAGS += '-fvisibility-inlines-hidden '
CXX_FLAGS += '-std=c++1z '
CXX_FLAGS += '-Wno-sign-compare '
CXX_FLAGS += '-Wno-reorder '
CXX_FLAGS += '-Wno-unused-local-typedefs '
CXX_FLAGS += '-Wno-unused-variable '
CXX_FLAGS += '-Wno-unused-but-set-variable '
pytinyopengl3_libraries += ['dl','pthread']
pytinyopengl3_sources += ["src/visualizer/opengl/tiny_x11_opengl_window.cpp",\
"third_party/glad/glx.c"]
elif _platform == "win32":
print("win32!")
libraries = ['User32', 'kernel32']
CXX_FLAGS += '-DWIN32 '
CXX_FLAGS += '-DGLEW_STATIC '
CXX_FLAGS += '/std:c++17 '
pytinyopengl3_libraries = ['Ws2_32', 'Winmm', 'User32', 'Opengl32', 'kernel32', 'glu32', 'Gdi32', 'Comdlg32']
pytinyopengl3_sources += ["src/visualizer/opengl/tiny_win32_opengl_window.cpp",\
"src/visualizer/opengl/tiny_win32_window.cpp"]
elif _platform == "darwin":
print("darwin!")
os.environ['LDFLAGS'] = '-framework Cocoa -mmacosx-version-min=10.15 -stdlib=libc++ -framework OpenGL'
CXX_FLAGS += '-DB3_NO_PYTHON_FRAMEWORK '
CXX_FLAGS += '-DHAS_SOCKLEN_T '
CXX_FLAGS += '-D_DARWIN '
CXX_FLAGS += '-mmacosx-version-min=10.15 '
# CXX_FLAGS += '-framework Cocoa '
pytinyopengl3_sources += ["src/visualizer/opengl/tiny_mac_opengl_window.cpp",\
"src/visualizer/opengl/tiny_mac_opengl_window_objc.m"]
else:
print("bsd!")
libraries = ['GL', 'GLEW', 'pthread']
os.environ['LDFLAGS'] = '-L/usr/X11R6/lib'
CXX_FLAGS += '-D_BSD '
CXX_FLAGS += '-I/usr/X11R6/include '
CXX_FLAGS += '-DHAS_SOCKLEN_T '
CXX_FLAGS += '-fno-inline-functions-called-once'
setup_py_dir = os.path.dirname(os.path.realpath(__file__))
need_files = []
datadir = "diffphys_data"
hh = setup_py_dir + "/" + datadir
for root, dirs, files in os.walk(hh):
for fn in files:
ext = os.path.splitext(fn)[1][1:]
if ext and ext in 'yaml index meta data-00000-of-00001 png gif jpg urdf sdf obj txt mtl dae off stl STL xml '.split(
):
fn = root + "/" + fn
need_files.append(fn[1 + len(hh):])
print("found resource files: %i" % len(need_files))
for n in need_files:
print("-- %s" % n)
print("packages")
print(find_packages('examples/pybullet'))
print("-----")
extensions = []
CXX_FLAGS_TDS = CXX_FLAGS + '-DENABLE_TEST_ENVS ' + '-DNOMINMAX '
pytinydiffsim_ext = Extension(
"pytinydiffsim",
sources=sources+["python/pytinydiffsim.cc"],
libraries=libraries,
extra_compile_args=CXX_FLAGS_TDS.split(),
include_dirs=include_dirs + ["."])
extensions.append(pytinydiffsim_ext)
pytinydiffsim_dual_ext = Extension(
"pytinydiffsim_dual",
sources=sources+["python/pytinydiffsim_dual.cc"],
libraries=libraries,
extra_compile_args=CXX_FLAGS.split(),
include_dirs=include_dirs + ["."])
extensions.append(pytinydiffsim_dual_ext)
if os.path.exists("third_party/CppAD/include"):
platform_include_dirs = []
if _platform == "win32":
platform_include_dirs=["third_party/patches/CppADCodeGenWindows/include"]
if _platform == "linux" or _platform == "linux2":
platform_include_dirs=["third_party/patches/CppADCodeGenLinux/include"]
if _platform == "darwin":
platform_include_dirs=["third_party/patches/CppADCodeGenOSXIntel/include"]
pytinydiffsim_ad_ext = Extension(
"pytinydiffsim_ad",
sources=sources+["python/pytinydiffsim_ad.cc"],
libraries=libraries,
extra_compile_args=CXX_FLAGS.split(),
include_dirs=include_dirs + platform_include_dirs + [".",
"third_party/CppADCodeGen/include",
"third_party/CppAD/include" ])
extensions.append(pytinydiffsim_ad_ext)
else:
print("Skipping pytinydiffsim_ad extension since CppAD is missing.")
pytinyopengl3_ext = Extension(
"pytinyopengl3",
sources=pytinyopengl3_sources,
libraries=pytinyopengl3_libraries,
extra_compile_args=CXX_FLAGS.split(),
include_dirs=pytinyopengl3_include_dirs + [
".", "third_party/pybind11/include",
"third_party/optionalX11",
"third_party/glad",
"third_party",
])
extensions.append(pytinyopengl3_ext)
setup(
name='pytinydiffsim',
version='0.0.6',
description=
'Tiny Differentiable Physics Library for Robotics Simulation and Reinforcement Learning',
long_description=
'Tiny Differentiable Physics Library for Robotics Simulation and Reinforcement Learning',
url='https://github.com/google-research/tiny-differentiable-simulator',
author='Eric Heiden, David Millard, Erwin Coumans',
author_email='erwincoumans@google.com',
license='Apache License 2.0',
platforms='any',
keywords=[
'physics simulation', 'optimal control',
'robotics', 'collision detection', 'opengl',
'reinforcement learning'
],
install_requires=[
'numpy',
],
ext_modules=extensions,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: zlib/libpng License',
'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux',
'Operating System :: MacOS', 'Intended Audience :: Science/Research',
"Programming Language :: Python", 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8', 'Topic :: Games/Entertainment :: Simulation',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Framework :: Robot Framework'
],
package_dir={'': 'python'},
packages=[x for x in find_packages('python')],
package_data={'pytinydiffsim_data': need_files})
| {
"repo_name": "google-research/tiny-differentiable-simulator",
"path": "setup.py",
"copies": "1",
"size": "10828",
"license": "apache-2.0",
"hash": 996629850098382500,
"line_mean": 34.1558441558,
"line_max": 124,
"alpha_frac": 0.6535833025,
"autogenerated": false,
"ratio": 3.3637775706741224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9446299443803378,
"avg_score": 0.014212285874148916,
"num_lines": 308
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.