text
stringlengths 0
1.05M
| meta
dict |
---|---|
# 20140106
# Jan Mojzis
# Public domain.
import nacl.raw as nacl
from util import fromhex, flip_bit
def verify_32_test():
"""
"""
for x in range(0, 10):
x = nacl.randombytes(nacl.crypto_verify_32_BYTES)
y = x
nacl.crypto_verify_32(x, y)
y1 = flip_bit(y)
try:
nacl.crypto_verify_32(x, y1)
except ValueError:
pass
else:
raise ValueError("forgery")
def verify_32_constant_test():
"""
"""
if nacl.crypto_verify_32_BYTES != 32:
raise ValueError("invalid crypto_verify_32_BYTES")
x = nacl.crypto_verify_32
x = nacl.crypto_verify_32_BYTES
x = nacl.crypto_verify_32_IMPLEMENTATION
x = nacl.crypto_verify_32_VERSION
def run():
"'"
"'"
verify_32_test()
verify_32_constant_test()
if __name__ == '__main__':
run()
| {
"repo_name": "warner/python-tweetnacl",
"path": "test/test_verify_32.py",
"copies": "1",
"size": "1052",
"license": "mit",
"hash": -8473345606953848000,
"line_mean": 20.4693877551,
"line_max": 66,
"alpha_frac": 0.4657794677,
"autogenerated": false,
"ratio": 3.8254545454545457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9628207473510295,
"avg_score": 0.03260530792885004,
"num_lines": 49
} |
# 2014-01 Jason Roebuck
# Product of work for GEOG 590 @ Portland State University
# May be used for whatever!
# github.com/jtroe/GEOG-590 - Fork me on github!
def main():
# Declare a good, old fashioned greeting.
greeting = 'Hello, Portland!'
print greeting
# print a separator
print '======'
# prints every character from 'Hello, Portland!' on it's very own line!
for char in greeting:
print char
print '======'
# should print 'Hell Portland!'
print greeting[0:4], greeting[7:]
print '======'
# declare a list of smurf strings
mySmurfList = ['Papa', 'Smurfette', 'Hefty', 'Brainy', 'Grouchy', 'Clumsy']
for smurf in mySmurfList:
# if string length is greater than 4, print it! Sorry, papa.
if len(smurf) > 4:
print smurf
print '======'
# equivalent of the more traditional for loop.
# instead of getting the actual object of the list, gets the index
# for(int i = 0; i < mySmurfList.Length; i++) <= C# equivalent
for i in range(len(mySmurfList)):
print mySmurfList[i]
if __name__ == "__main__":
main() | {
"repo_name": "jtroe/GEOG-590",
"path": "Assignment1/helloworld.py",
"copies": "1",
"size": "1136",
"license": "unlicense",
"hash": -78463452423160820,
"line_mean": 27.425,
"line_max": 79,
"alpha_frac": 0.6126760563,
"autogenerated": false,
"ratio": 3.2737752161383287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43864512724383287,
"avg_score": null,
"num_lines": null
} |
# 2014-01 Jason Roebuck
# Product of work for GEOG 590 @ Portland State University
# May be used for whatever!
# github.com/jtroe/GEOG-590 - Fork me on github!
import arcpy
import os
def main():
for shp in AllShapefiles():
shpDesc = arcpy.Describe(shp)
print shpDesc.name, 'is a' ,shpDesc.shapeType # x.shp is a Polygon/Point/etc.
if len(shpDesc.fields) > 6:
print shpDesc.name,'has',str(len(shpDesc.fields)),'fields:' # x.shp has n fields:
for f in shpDesc.fields: # and they are:
print f.name
if shpDesc.shapeType == 'Polygon':
print
print 'Life is', shpDesc.featureType # hopefully this polygon feature class is simple... since that's what I want life to be
# walks the path of this script and subdirectories to get all the shapefiles
def AllShapefiles():
result = [] # empty list to add shapefile path string to
thisDir = os.path.dirname(os.path.abspath(__file__))
for (new_path, dirs, files) in os.walk(thisDir):
for f in files:
if os.path.splitext(f)[1] == '.shp': # if it's a shapefile...
shapefile = os.path.join(new_path, f) # directory + filename
result.append(shapefile)
return result
# nerdism
if __name__ == "__main__":
main() | {
"repo_name": "jtroe/GEOG-590",
"path": "Assignment1/shapefile.py",
"copies": "1",
"size": "1316",
"license": "unlicense",
"hash": -6873687935475607000,
"line_mean": 37.7352941176,
"line_max": 136,
"alpha_frac": 0.6253799392,
"autogenerated": false,
"ratio": 3.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46253799391999995,
"avg_score": null,
"num_lines": null
} |
# 2014.04.29
# S.Rodney
# HST Filter transmission curves: plotting and such
import numpy as np
from matplotlib import pylab as pl
import os
topdir = os.path.abspath( '.' )
try :
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST_CANDELS')
w435, f435 = np.loadtxt( 'ACS_WFC_F435W.dat', unpack=True )
w606, f606 = np.loadtxt( 'ACS_WFC_F606W.dat', unpack=True )
w625, f625 = np.loadtxt( 'ACS_WFC_F625W.dat', unpack=True )
w814, f814 = np.loadtxt( 'ACS_WFC_F814W.dat', unpack=True )
w350, f350 = np.loadtxt( 'WFC3_UVIS_F350LP.dat', unpack=True )
w606u, f606u = np.loadtxt( 'WFC3_UVIS_F606W.dat', unpack=True )
w763u, f763u = np.loadtxt( 'WFC3_UVIS_F763M.dat', unpack=True )
w845u, f845u = np.loadtxt( 'WFC3_UVIS_F845M.dat', unpack=True )
w127, f127 = np.loadtxt( 'WFC3_IR_F127M.dat', unpack=True )
w125, f125 = np.loadtxt( 'WFC3_IR_F125W.dat', unpack=True )
w160, f160 = np.loadtxt( 'WFC3_IR_F160W.dat', unpack=True )
w153, f153 = np.loadtxt( 'WFC3_IR_F153M.dat', unpack=True )
w139, f139 = np.loadtxt( 'WFC3_IR_F139M.dat', unpack=True )
w140, f140 = np.loadtxt( 'WFC3_IR_F140W.dat', unpack=True )
os.chdir( sndataroot+'/filters/Bessell90')
wB, fB = np.loadtxt( 'Bessell90_B.dat', unpack=True )
wV, fV = np.loadtxt( 'Bessell90_V.dat', unpack=True )
wR, fR = np.loadtxt( 'Bessell90_R.dat', unpack=True )
wI, fI = np.loadtxt( 'Bessell90_I.dat', unpack=True )
except KeyError :
pass
finally :
os.chdir(topdir)
def filtername2datfile( filtername, camera=None):
""" Given an abbreviated filter name, returns the name of the .dat file
containing the transmission curve.
"""
fname = filtername.upper()
if fname.startswith('F1') : return( 'WFC3_IR_%s.dat'%fname )
elif 'UV' in camera.upper():
return( 'WFC3_UVIS_%s.dat'%fname )
elif 'ACS' in camera.upper():
return( 'ACS_WFC_%s.dat'%fname )
elif fname=='F350LP' :
return( 'WFC3_UVIS_%s.dat'%fname )
else :
print("Must specify a camera for filter %s."%fname)
return(None)
def computeScaling( filt1, filt2, camera1=None, camera2=None ) :
"""determine the flux scaling factor that should be multiplied to
filt1 to match the throughput of filt2. This returns just a
single number, effectively assuming the source SED is flat across
the bandpass, so that we just need to correct for total
throughput, not for the shape of the filter.
"""
from scipy import integrate as scint
if filt1.lower().startswith('f') :
filt1 = filtername2datfile( filt1, camera=camera1 )
if filt2.lower().startswith('f') :
filt2 = filtername2datfile( filt2, camera=camera2 )
if not filt1.endswith('.dat') or not filt2.endswith('.dat') :
print("Must specify a filter name (e.g. F160W) or a .dat file.")
return( None )
# read in the transmission curves for filters 1 and 2
topdir = os.path.abspath( '.' )
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST')
w1, f1 = np.loadtxt( filt1, unpack=True )
w2, f2 = np.loadtxt( filt2, unpack=True )
os.chdir( topdir )
# integrate
int1 = scint.simps( f1, w1 )
int2 = scint.simps( f2, w2 )
# divide
return( int2 / int1 )
def computeScaling2to1( filt1, filt2, filt3,
camera1=None, camera2=None, camera3=None) :
"""Determine the flux scaling factor for matching the sum of filt1+filt2
to filt3. This returns the value that should be multiplied to
(filt1+filt2) to match the throughput of filt3. This returns just a
single number, effectively assuming the source SED is flat across
the bandpass, so that we just need to correct for total
throughput, not for the shape of the filter.
"""
from scipy import integrate as scint
if filt1.lower().startswith('f') :
filt1 = filtername2datfile( filt1, camera=camera1 )
if filt2.lower().startswith('f') :
filt2 = filtername2datfile( filt2, camera=camera2 )
if filt3.lower().startswith('f') :
filt3 = filtername2datfile( filt3, camera=camera3 )
if not (filt1.endswith('.dat') and filt2.endswith('.dat')
and filt3.endswith('.dat') ):
print("Must specify a filter name (e.g. F160W) or a .dat file.")
return( None )
# read in the transmission curves for filters
topdir = os.path.abspath( '.' )
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST')
w1, f1 = np.loadtxt( filt1, unpack=True )
w2, f2 = np.loadtxt( filt2, unpack=True )
w3, f3 = np.loadtxt( filt3, unpack=True )
os.chdir( topdir )
# integrate
int1 = scint.simps( f1, w1 )
int2 = scint.simps( f2, w2 )
int3 = scint.simps( f3, w3 )
# sum and divide
return( int3 / (int1+int2) )
def plotmedbands( z = 2, day=5 ):
from hstsntools import snana
w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/Hsiao07.dat', day=day )
w1az = w1a * (1+z)
f1az = f1a / f1a.max() / 2.
clf()
ax1 = subplot(3,1,1)
plot(w125, f125, 'b--', label='F125W')
plot(w127, f127, 'b-', label='F127M')
plot(w1az, f1az, 'r-', label='_nolegend_')
ax1.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
ax1.set_xlim( 9000, 20000 )
ax1.text(9500,0.2, 'SNIa\nz=%.1f\nt=%i'%(z,day), color='r',ha='left',va='bottom')
setp(ax1.get_xticklabels(), visible=False)
setp(ax1.get_yticklabels(), visible=False)
ax2 = subplot(3,1,2, sharex=ax1, sharey=ax1)
plot(w140, f140, 'g--',label='F140W')
plot(w139, f139, 'g-',label='F139M')
plot(w1az, f1az, 'r-', label='_nolegend_')
ax2.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
ax2.set_xlim( 9000, 20000 )
setp(ax2.get_xticklabels(), visible=False)
setp(ax2.get_yticklabels(), visible=False)
ax2.set_ylabel('Flux / Transmission (arbitrary units)')
ax3= subplot(3,1,3, sharex=ax1, sharey=ax1)
plot(w160, f160, 'm--',label='F160W')
plot(w153, f153, 'm-',label='F153M')
plot(w1az, f1az, 'r-',label='_nolegend_')
ax3.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
setp(ax3.get_yticklabels(), visible=False)
ax1.set_xlim( 9000, 20000 )
ax1.set_xlabe
l('observed wavelength (Angstroms)')
fig = gcf()
fig.subplots_adjust( wspace=0, hspace=0, left=0.05, bottom=0.12, right=0.95, top=0.95)
def plotbroadbandz( zvals=[1,1.5,2.0], day=0 ):
""" show how broad bands cover the SED at high z"""
from hstsnpipe import tools
from tools import snana
w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/Hsiao07.extrap.dat', day=day )
print("SALT2")
# w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/models/SALT2/SALT2.Guy10_UV2IR/salt2_template_0.dat', day=day )
#w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/models/SALT2/SALT2.Guy10_UV2IR/salt2_template_1.dat', day=day )
#wII, fII = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/non1a/SDSS-000018.DAT', day=0 )
#wIb, fIb = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/non1a/SDSS-000020.DAT', day=0 )
clf()
i = 0
for z in zvals:
i+=1
w1az = w1a * (1+z)
f1az = f1a / f1a.max() / 2.
#wII = wII * (1+z)
#fII = fII / fII.max() / 2.
#wIb = wIb * (1+z)
#fIb = fIb / fIb.max() / 2.
ax = subplot(3,1,i)
plot(w350, f350, 'b--', label='F350LP(W)')
plot(w125, f125, 'g--', label='F125W(J)')
plot(w160, f160, 'r--', label='F160W(H)')
plot(w1az, f1az, 'k-', label='_nolegend_')
#ax.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
ax.set_xlim( 3000, 20000 )
ax.text(0.98,0.95, 'z=%.1f'%(z), color='k',ha='right',va='top',transform=ax.transAxes)
setp(ax.get_yticklabels(), visible=False)
if i==1 :
top = ax.get_ylim()[1]
ax.text(16000,top, 'F160W(H)', color='r',ha='center',va='bottom')
ax.text(12500,top, 'F125W(J)', color='g',ha='center',va='bottom')
ax.text(3500,top, 'F350LP(W)', color='b',ha='left',va='bottom')
if i<3 :
setp(ax.get_xticklabels(), visible=False)
if i==2 :
ax.set_ylabel('Flux or Transmission (arbitrary units)')
if i==3 :
ax.set_xlabel('observed wavelength (Angstroms)')
fig = gcf()
fig.subplots_adjust( wspace=0, hspace=0, left=0.05, bottom=0.12, right=0.95, top=0.95)
def plotBVRI( ):
""" show how broad ACS bands cover the SN SED """
from hstsnpipe import tools
from tools import snana
w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/Hsiao07.extrap.dat', day=0 )
clf()
f1a = f1a / f1a.max()
plot(wB, fB, 'b--', label='B')
plot(wV, fV, 'g--', label='V')
plot(wR, fR, 'r--', label='R')
plot(wI, fI, 'k--', label='I')
plot(w435, f435, 'b-', label='F435W')
plot(w606, f606, 'g-', label='F606W')
plot(w625, f625, 'r-', label='F625W')
plot(w814, f814, 'k-', label='F814W')
plot(w1a, f1a, 'k-', label='_nolegend_')
ax = gca()
ax.set_xlim( 3000, 10000 )
#setp(ax.get_yticklabels(), visible=False)
| {
"repo_name": "srodney/hstsntools",
"path": "filters.py",
"copies": "1",
"size": "9454",
"license": "mit",
"hash": -2819375483928643600,
"line_mean": 36.6653386454,
"line_max": 132,
"alpha_frac": 0.6180452718,
"autogenerated": false,
"ratio": 2.697289586305278,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8693512025765242,
"avg_score": 0.024364566468007106,
"num_lines": 251
} |
# 2014/07/01
####
class Solution:
# @param s, a string
# @return a boolean
def isPalindrome(self, s):
i = 0 # left side of string
j = len(s)-1 # right side of string
while i < j:
# Skip non-alphanumeric entries
while not Solution.isAlphaNumeric(s[i]) and i < j:
i+=1
while not Solution.isAlphaNumeric(s[j]) and i < j:
j+=-1
# Compare entries
if (s[i].upper() != s[j].upper()):
return False
i+=1
j+=-1
return True
# Check ASCII value for alphanumeric values
# @param s single character
# @return boolean
@staticmethod
def isAlphaNumeric(s):
c = ord(s)
if (c >= 48 and c <= 57): return True
if (c >= 65 and c <= 90): return True
if (c >= 97 and c <= 122): return True
return False
####
s = Solution()
assert s.isPalindrome("A man, a plan, a canal: Panama")
assert not s.isPalindrome("race a car") | {
"repo_name": "ldamewood/leetcode",
"path": "python/valid-palindrome.py",
"copies": "1",
"size": "1044",
"license": "mit",
"hash": 201068602755902370,
"line_mean": 27.2432432432,
"line_max": 62,
"alpha_frac": 0.5114942529,
"autogenerated": false,
"ratio": 3.7419354838709675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4753429736770967,
"avg_score": null,
"num_lines": null
} |
# 2014/07/01
####
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
idx = sorted((val,i) for i,val in enumerate(num)) # N LogN time
# Ignore parts of the array that are larger than (target - min)
end = 0;
while (end < len(num) and idx[end][0] <= target - idx[0][0]):
end+=1
idx = idx[:end]
# Create index list
idx = [l[1] for l in idx]
for i in idx:
check = target - num[i]
# Binary search
j = Solution.search(num,idx,check)
if j > -1: return tuple(sorted([i+1,j+1]))
@staticmethod
def search(num,idx,val):
if len(idx)<1: return -1
half = len(idx)/2
if (num[idx[half]] == val):
return idx[half]
elif (num[idx[half]] < val):
return Solution.search(num, idx[half+1:],val)
else:
return Solution.search(num, idx[:half],val)
####
s = Solution()
assert s.twoSum([2, 7, 11, 15], 9) == (1,2)
assert s.twoSum([5,75,25], 100) == (2,3)
assert s.twoSum([0,4,3,0], 0) == (1,4)
assert s.twoSum([-3,4,3,90], 0) == (1,3) | {
"repo_name": "ldamewood/leetcode",
"path": "python/two-sum.py",
"copies": "1",
"size": "1183",
"license": "mit",
"hash": -7263006440049146000,
"line_mean": 28.6,
"line_max": 71,
"alpha_frac": 0.5012679628,
"autogenerated": false,
"ratio": 3.10498687664042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8962557017615632,
"avg_score": 0.028739564364957504,
"num_lines": 40
} |
# 2014 07 02 I.Zliobaite
# visualization of events over time based on grid ("heatmaps")
# place a grid over the city, count events in each square
import data_processing
import plotting
#define parameters
param_grid_width = 1 #km
param_discretization_step = 30 #in minutes
param_map_alpha = 0.97 #fading factor for previous
param_hour_range = range(7,18) #first inclusive, last exclusive, maximum range
param_number_of_days = 3
param_file_name = 'events.csv'
param_movie_name = 'events_movie.mp4'
#create a dataset for demo
data_processing.create_data(param_file_name,param_number_of_days)
print('done creating demo data')
#prepare data
coordinates_look_up,stop_sequence,times_start_trip = data_processing.make_coordinate_dictionary(param_file_name)
times_start_trip,stop_sequence = data_processing.sort_by_time(times_start_trip,stop_sequence)
coordinates_grid, stops_grid = data_processing.coordinates_to_grid(coordinates_look_up,param_grid_width)
stop_sequence_grid = data_processing.convert_stop_sequence_grid(stop_sequence,stops_grid)
print('done coordinate extraction')
time_discrete,demand_true_discrete,stops_unique = data_processing.discretize_observations(times_start_trip,stop_sequence,param_discretization_step,param_hour_range)
demand_fading_discrete = data_processing.fade_for_video(demand_true_discrete,param_map_alpha)
time_discrete_grid,demand_true_discrete_grid,stops_unique_grid = data_processing.discretize_observations(times_start_trip,stop_sequence_grid,param_discretization_step,param_hour_range)
demand_fading_discrete_grid = data_processing.fade_for_video(demand_true_discrete_grid,param_map_alpha)
print('done discretization')
#make a movie
plotting.make_movie_with_heatmap(demand_true_discrete,stops_unique,time_discrete,coordinates_look_up,demand_fading_discrete_grid,stops_unique_grid,coordinates_grid,param_movie_name)
print('done movie')
| {
"repo_name": "zliobaite/Heatmaps",
"path": "run_make_heatmap_movie.py",
"copies": "1",
"size": "1873",
"license": "mit",
"hash": -8190499199132564000,
"line_mean": 51.0277777778,
"line_max": 184,
"alpha_frac": 0.8051254672,
"autogenerated": false,
"ratio": 3.1908006814310053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44959261486310054,
"avg_score": null,
"num_lines": null
} |
# 2014-07-10/DN - attempting to implement argparse so that user, pwd & IP can be
# passed in from CLI.
# 2014-07-10/DN - Works with my ASA but I had to add an enable option as the enable pwd
# is different. Might be nice to default the enable password to the user password if
# that was supplied.
import pexpect #module for logging into the ASA
import sys #module for writing files to log/linux shell
import argparse #parsing command line arguments
# 2014-07-10/DN - debugging to clear the screen with each run
#import os #operating system options
#os.system('cls' if os.name == 'nt' else 'clear')
parser = argparse.ArgumentParser(description='Get "show version" from a Cisco ASA.')
parser.add_argument('-u', '--user', default='cisco', help='user name to login with (default=cisco)')
parser.add_argument('-p', '--password', default='cisco', help='password to login with (default=cisco)')
parser.add_argument('-e', '--enable', default='cisco', help='password for enable (default=cisco)')
parser.add_argument('-d', '--device', default='192.168.120.160', help='device to login to (default=192.168.120.160)')
args = parser.parse_args()
#child becomes the object to send/receive commands from the ASA
child = pexpect.spawn('ssh '+args.user+'@'+args.device)
#for debugging we send the input and output to the linux shell
child.logfile_read = sys.stdout
child.logfile_send = sys.stdout
#familiar process of logging into a cisco device
#expect waits for response from the console
#some special characters here like:
# . means any character
# + means the previous character 1 or more times
# * means the previous character 0 or more times
#the print commands are here in case you run into trouble and will give you an idea where the script stopped
print 'expecting password'
child.expect('.*password: ')
print 'sending password'
child.sendline(args.password)
print 'expecting login'
#expecting the hostname> prompt
child.expect('.*> ')
child.sendline('enable')
#expecting the enable password prompt
child.expect('Password.*')
print 'sending password'
child.sendline(args.enable)
print 'expecting exec'
#expecting a login prompt of hostname#
child.expect('.*#.*')
#setting the terminal length to infinity so we don't need to press space or enter to continue the prompt
child.sendline('terminal pager 0')
#setting a new file for output so we can write output from the screen to a file for later
fout = file(args.device+'.log','w')
child.expect('.*#.*')
#setting the show version output to a file
child.logfile_read = fout
child.sendline('show version')
#expecting the hostname# prompt
child.expect('.*#.*')
fout.close() #closing the file for best practice
child.sendline('exit') # logout of the ASA
exit()
| {
"repo_name": "SivagnanamCiena/asa-capture",
"path": "asa-capture.py",
"copies": "2",
"size": "2741",
"license": "mit",
"hash": -7041156504277109000,
"line_mean": 36.5479452055,
"line_max": 119,
"alpha_frac": 0.7362276541,
"autogenerated": false,
"ratio": 3.6304635761589403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5366691230258941,
"avg_score": null,
"num_lines": null
} |
# 2014 10 21 I.Zliobaite
# extracts cansat data
#RUN: python run_extract_distribution.py
file_data = "2014-10-18 14-30-47.txt"
from collections import defaultdict
import math
import numpy as np
import time, datetime
import matplotlib.pyplot as plt
#param_filter = 'mavlink_global_position_int_t'
param_filter = 'mavlink_gps_raw_int_t'
do_filter = 0
ind0 = 2558
ind1 = 3844
param_plot_what = 'alt'
out_file_name = 'data.csv'
#print geo_distance(lat[0],lon[0],lat[108000],lon[108000]) - veikia
def geo_distance(lat1,lon1,lat2,lon2):
rearth = 6371 #Earth Radius in km
#print(lat1,lon1,lat2,lon2)
inner1 = math.cos(radians(lat1)) * math.cos(radians(lat2))*math.cos(radians(lon2-lon1))
inner2 = math.sin(radians(lat1))*math.sin(radians(lat2))
insum = min(1.0,(inner1+inner2))
dist = rearth*math.acos(insum)
#Dist = 6378 * ACOS(COS(LatA) * COS(LatB) * COS(LngB - LngA) + SIN(LatA) * SIN(LatB))
return dist
def radians(degrees):
rr = 1.0*degrees*math.pi/180
return rr
#read file
time_stamp = []
time_raw = []
lat = []
lon = []
alt = []
f = open(file_data)
lines = f.readlines()
for ln in lines:
parts = ln.strip().split(' ')
sk = 0
found_gps = 0
for pt in parts:
if pt==param_filter:
found_gps=1
ts = datetime.datetime.strptime(parts[1], "%H:%M:%S")
if len(time_stamp)==0:
ts0 = ts #+ datetime.timedelta(seconds = 1000) #iki 607
time_stamp.append(10000)
time_raw.append('na')
else:
delta_t = ts - ts0
time_stamp.append(int(delta_t.seconds))
time_raw.append(ts)
#print time_stamp[-1], parts[1]
if found_gps==1:
if pt=='lat':
lat.append(float(parts[sk+1])*1.0/10000000)
if pt=='lon':
lon.append(float(parts[sk+1])*1.0/10000000)
if pt=='alt':
alt.append(float(parts[sk+1])*1.0/1000000)
sk += 1
#if len(alt)>0:
#print lon[-1], lat[-1], alt[-1]
f.close()
lat = np.array(lat)
lon = np.array(lon)
alt = np.array(alt)
time_stamp = np.array(time_stamp)
print np.shape(lat), np.shape(lon), np.shape(alt), np.shape(time_stamp)
#ind = np.nonzero(lat > 0)
if do_filter:
lat = lat[ind0:ind1+1]
lon = lon[ind0:ind1+1]
alt = alt[ind0:ind1+1]
time_stamp = time_stamp[ind0:ind1+1]
time_stamp[0] = time_stamp[1]
time_stamp = time_stamp - time_stamp[0]
time_stamp = time_stamp*1.0/60 #min
#for aa in alt:
#print aa
print time_raw[-35],time_raw[-1]
print time_stamp[-35],time_stamp[-1]
alt_speed = []
ground_speed = []
for sk in range(len(lat)):
if sk==0:
alt_speed_now = 0
ground_speed_now = 0
alt_before = alt[0]
time_before = time_stamp[0]
lat_before = lat[0]
lon_before = lon[0]
else:
d_time = time_stamp[sk] - time_before
if d_time>0:
d_alt = -alt[sk] + alt_before
d_ground = geo_distance(lat_before,lon_before,lat[sk],lon[sk])
alt_before = alt[sk]
time_before = time_stamp[sk]
lat_before = lat[sk]
lon_before = lon[sk]
d_time = d_time*60.0 # in s
d_alt = d_alt*1000.0 #in m
d_ground = d_ground*1000.0 #in m
print d_alt
alt_speed_now = d_alt/d_time
ground_speed_now = d_ground/d_time
alt_speed.append(alt_speed_now)
ground_speed.append(ground_speed_now)
f = open(out_file_name,'w')
for sk in range(len(lat)):
f.write(str(time_stamp[sk])+' '+str(lat[sk])+' '+str(lon[sk])+' '+str(alt[sk])+' '+str(alt_speed[sk])+' '+str(ground_speed[sk])+'\n')
f.close()
#plt.plot(time_stamp, alt)
#plt.axis([xmin, xmax, ymin, ymax])
#plt.plot(time_stamp, alt, 'ro')
plt.plot(time_stamp, lon, 'ro')
#plt.plot(lat, lon, 'ro')
plt.xlabel('Time (min)')
plt.ylabel('Altitude (km)')
plt.title(param_filter)
#plt.savefig('altitude__'+param_filter+'.png')
plt.savefig('test.png')
| {
"repo_name": "zliobaite/cansat2014_analysis",
"path": "run_extract_data.py",
"copies": "1",
"size": "3601",
"license": "mit",
"hash": 9087469836820177000,
"line_mean": 23.6643835616,
"line_max": 134,
"alpha_frac": 0.6475978895,
"autogenerated": false,
"ratio": 2.37532981530343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.352292770480343,
"avg_score": null,
"num_lines": null
} |
import os, sys, re
from optparse import OptionParser
import matplotlib
# If you want to use a different backend, replace Agg with
# Cairo, PS, SVG, GD, Paint etc.
# Agg stands for "antigrain rendering" and produces PNG files
matplotlib.use('Agg')
from pylab import *
##**Phillip Kent** 2014-12-10: Commented out
# Avoid name collisions with min and max functions from numarray module
##min = __builtins__.min
##max = __builtins__.max
class Sparkplot:
"""
Creates sparkline graphics, as described by Edward Tufte. Uses the matplotlib library.
The 2 styles of plots implemented so far are: 'line' and 'bars'
"""
def __init__(self, type='line', data=[], input_file="data.txt", output_file="",
plot_first=True, plot_last=True,
label_first_value=False, label_last_value=False,
plot_min=False, plot_max=False,
label_min=False, label_max=False,
draw_hspan=False, hspan_min=-1, hspan_max=0,
label_format="", currency='$', transparency=False, verbose=0):
self.type = type
self.data = data
self.input_file = input_file
self.output_file = output_file
self.plot_first = plot_first
self.plot_last = plot_last
self.label_first_value = label_first_value
self.label_last_value = label_last_value
self.plot_min = plot_min
self.plot_max = plot_max
self.label_min = label_min
self.label_max = label_max
self.draw_hspan = draw_hspan
self.hspan_min = hspan_min
self.hspan_max = hspan_max
self.label_format = label_format
self.currency = currency
self.transparency = transparency
self.verbose = verbose
def process_args(self):
parser = OptionParser()
parser.add_option("-m", "--type", dest="type",
default="line", help="graphic type (can be 'line' [default], 'bars')")
parser.add_option("-i", "--input", dest="input_file",
default="data.txt", help="input data file (default is data.txt)")
parser.add_option("-o", "--output", dest="output_file",
default="", help="output data file (default is data.png)")
parser.add_option("--noplot_first", action="store_false", dest="plot_first",
default=True, help="do not plot first data point in different color")
parser.add_option("--noplot_last", action="store_false", dest="plot_last",
default=True, help="do not plot last data point in different color")
parser.add_option("--label_first", action="store_true", dest="label_first_value",
default=False, help="label first data value (default=False)")
parser.add_option("--label_last", action="store_true", dest="label_last_value",
default=False, help="label last data value (default=False)")
parser.add_option("--plot_min", action="store_true", dest="plot_min",
default=False, help="plot min data point in different color (default=False)")
parser.add_option("--plot_max", action="store_true", dest="plot_max",
default=False, help="plot max data point in different color (default=False)")
parser.add_option("--label_min", action="store_true", dest="label_min",
default=False, help="label min data value (default=False)")
parser.add_option("--label_max", action="store_true", dest="label_max",
default=False, help="label max data value (default=False)")
parser.add_option("--draw_hspan", action="store_true", dest="draw_hspan",
default=False, help="draw a horizontal band along the x axis (default=False)")
parser.add_option("--hspan_min", dest="hspan_min", type="int",
default=-1, help="specify the min y value for the hspan (default=-1)")
parser.add_option("--hspan_max", dest="hspan_max", type="int",
default=0, help="specify the max y value for the hspan (default=0)")
parser.add_option("--format", dest="label_format", metavar="FORMAT",
default="", help="format for the value labels (can be empty [default], 'comma', 'currency')")
parser.add_option("--currency", dest="currency",
default="$", help="currency symbol (default='$')")
parser.add_option("-t", "--transparency", action="store_true", dest="transparency",
default=False, help="set transparency for the image background (default=False)")
parser.add_option("--verbose", action="store_true", dest="verbose",
default=False, help="show diagnostic messages (default=False)")
(options, args) = parser.parse_args()
self.type = options.type
self.input_file = options.input_file
self.output_file = options.output_file
self.plot_first = options.plot_first
self.plot_last = options.plot_last
self.label_first_value = options.label_first_value
self.label_last_value = options.label_last_value
self.plot_min = options.plot_min
self.plot_max = options.plot_max
self.label_min = options.label_min
self.label_max = options.label_max
self.draw_hspan = options.draw_hspan
self.hspan_min = options.hspan_min
self.hspan_max = options.hspan_max
self.label_format = options.label_format
self.verbose = options.verbose
self.currency = options.currency
self.transparency = options.transparency
def get_input_data(self):
"""
Read input file and fill data list.
Data file is assumed to contain one column of numbers which will
be plotted as a timeseries.
"""
try:
f = open(self.input_file)
except:
print "Input file %s could not be opened" % self.input_file
sys.exit(1)
data = [float(line.rstrip('\n')) for line in f.readlines() if re.search('\d+', line)]
f.close()
return data
def plot_sparkline(self):
"""
Plot sparkline graphic by using various matplotlib functions.
"""
if len(self.data) == 0:
self.data = self.get_input_data()
num_points = len(self.data)
min_data = min(self.data)
max_data = max(self.data)
sum_data = sum(self.data)
avg_data = sum(self.data) / num_points
min_index = self.data.index(min_data)
max_index = self.data.index(max_data)
if self.verbose:
print "Plotting %d data points" % num_points
print "Min", min_index, min_data
print "Max", max_index, max_data
print "Avg", avg_data
print "Sum", sum_data
# last_value_len is used for dynamically adjusting the width of the axes
# in the axes_position list
if self.label_last_value:
last_value_len = len(self.format_text(self.data[num_points-1]))
elif self.label_max:
last_value_len = len(self.format_text(max_data))
else:
last_value_len = 1
# delta_height is used for dynamically adjusting the height of the axes
# in the axes_position list
if self.plot_max or self.label_max or self.label_last_value:
delta_height = 0.32
else:
delta_height = 0.1
axes_position = [0.02,0.02,1-0.035*last_value_len,1-delta_height]
# Width of the figure is dynamically adjusted depending on num_points
fig_width = min(5, max(1.5, 0.03 * num_points))
# Height of the figure is set differently depending on plot type
if self.type.startswith('line'):
fig_height = 0.3
elif self.type.startswith('bar'):
if self.label_max:
fig_height = 0.5
else:
fig_height = 0.1
if self.verbose:
print "Figure width:", fig_width
print "Figure height:", fig_height
print "Axes position:", axes_position
# Create a figure with the given width, height and dpi
fig = figure(figsize=(fig_width, fig_height), dpi=150)
if self.type.startswith('line'):
# For 'line' plots, simply plot the line
plot(range(num_points), self.data, color='gray')
elif self.type.startswith('bar'):
# For 'bars' plots, simulate bars by plotting vertical lines
for i in range(num_points):
if self.data[i] < 0:
color = 'r'
else:
color = 'b' # Use color = '#003163' for a dark blue
plot((i, i), (0, self.data[i]), color=color, linewidth=1.25)
if self.draw_hspan:
axhspan(ymin=self.hspan_min, ymax=self.hspan_max, xmin=0, xmax=1, linewidth=0.5, edgecolor='gray', facecolor='gray')
if self.type == 'line':
# Plotting the first, last, min and max data points in a different color only makes sense for 'line' plots
if self.plot_first:
plot([0,0], [self.data[0], self.data[0]], 'r.')
if self.plot_last:
plot([num_points-1, num_points-1], [self.data[num_points-1], self.data[num_points-1]], 'r.')
if self.plot_min:
plot([min_index, min_index], [self.data[min_index], self.data[min_index]], 'b.')
if self.plot_max:
plot([max_index, max_index], [self.data[max_index], self.data[max_index]], 'b.')
if self.label_first_value:
text(0, self.data[0], self.format_text(self.data[0]), size=6)
if self.label_last_value:
text(num_points-1, self.data[num_points-1], self.format_text(self.data[num_points-1]), size=6)
if self.label_min:
text(min_index*1.05, self.data[min_index]*1.05, self.format_text(min_data), size=8)
if self.label_max:
text(max_index*1.05, self.data[max_index]*1.05, self.format_text(max_data), size=8)
# IMPORTANT: commands affecting the axes need to be issued AFTER the plot commands
# Set the axis limits instead of letting them be computed automatically by matplotlib
# We leave some space around the data points so that the plot points for
# the first/last/min/max points are displayed
axis([-1, num_points, min_data - (abs(min_data)*0.1), max_data + (abs(max_data)*0.1) ])
# Turn off all axis display elements (frame, ticks, tick labels)
axis('off')
# Note that these elements can also be turned off via the following calls,
# but I had problems setting the axis limits AND settings the ticks to empty lists
#a.set_xticks([])
#a.set_yticks([])
#a.set_frame_on(False)
# Set the position for the current axis so that the data labels fit in the figure
a = gca()
a.set_position(axes_position)
if self.transparency:
fig.figurePatch.set_alpha(0.5)
a.axesPatch.set_alpha(0.5)
# Save the plotted figure to a data file
self.generate_output_file()
# Delete the fig
close()
def generate_output_file(self):
"""
Save plotted figure to output file.
The AGG backend will automatically append .PNG to the file name
"""
if not self.output_file:
self.output_file = os.path.splitext(self.input_file)[0]
if self.verbose:
print "Generating output file " + self.output_file + '.png'
savefig(self.output_file)
def format_text(self, data):
"""
Format text for displaying data values.
The only 2 formats implemented so far are:
'currency' (e.g. $12,249)
'comma' (e.g. 34,256,798)
"""
if self.label_format == 'currency' or self.label_format == 'comma':
t = str(int(data))
text = ""
if self.label_format == 'currency':
text += self.currency
l = len(t)
if l > 3:
quot = l / 3
rem = l % 3
text += t[:rem]
for i in range(quot):
text += ',' + t[rem:rem+3]
rem += 3
else:
text += t
else:
text = str(data)
return text
if __name__ == '__main__':
sparkplot = Sparkplot()
sparkplot.process_args()
sparkplot.plot_sparkline()
| {
"repo_name": "Interoute/API-fun-and-education",
"path": "Sparkplot.py",
"copies": "1",
"size": "13056",
"license": "apache-2.0",
"hash": -8528301233139460000,
"line_mean": 42.3754152824,
"line_max": 128,
"alpha_frac": 0.5736060049,
"autogenerated": false,
"ratio": 3.858156028368794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9885344544718042,
"avg_score": 0.009283497710150542,
"num_lines": 301
} |
# 2014-12-17
# build by qianqians
# deletenote
def deletenote(filestr):
genfilestr = []
count = 0
errornote = ""
for i in range(len(filestr)):
str = filestr[i]
while(1):
if count == 1:
indexafter = str.find("*/")
if indexafter is not -1:
str = str[indexafter+2:]
count = 0
else:
break
index = str.find('//')
if index is not -1:
str = str[0:index]
else:
indexbegin = str.find("/*")
if indexbegin is not -1:
errornote = str
indexafter = str.find("*/")
if indexafter is not -1:
str = str[0:indexbegin] + str[indexafter+2:]
else:
count = 1
break
if str is not "":
genfilestr.append(str)
break
if count is 1:
raise Exception("c/c++ coding error unpaired /* ", errornote)
return genfilestr
| {
"repo_name": "yinchunlong/abelkhan-1",
"path": "juggle/parser/deletenote.py",
"copies": "2",
"size": "1136",
"license": "mit",
"hash": -6030613838607477000,
"line_mean": 24.8181818182,
"line_max": 69,
"alpha_frac": 0.4093309859,
"autogenerated": false,
"ratio": 4.47244094488189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.588177193078189,
"avg_score": null,
"num_lines": null
} |
# 2014-12-18
# build by qianqians
# statemachine
#module name{
# void func();
# int func(int);
# array func(int, string)
# map func(int, array, bool, float, string)
#}
from deletenonespacelstrip import deleteNoneSpacelstrip
class struct(object):
def __init__(self):
self.keyworld = ''
self.name = ''
self.struct = []
self.argvdef = []
def push(self, ch):
if ch == ' ' or ch == '\0':
if self.keyworld != '':
self.argvdef.append(self.keyworld)
if ch == '{':
self.name = deleteNoneSpacelstrip(self.keyworld)
self.keyworld = ''
return False
if ch == ';':
self.struct.append(self.argvdef)
self.argvdef = []
if ch == '}':
return True
self.keyworld += ch
return False
class func(object):
def __init__(self):
self.keyworld = ''
self.func = []
self.argvtuple = None
def clear(self):
self.keyworld = ''
self.func = []
self.argvtuple = None
def push(self, ch):
if ch == ' ' or ch == '\0':
self.keyworld = deleteNoneSpacelstrip(self.keyworld)
if self.keyworld != '':
if self.argvtuple is None:
self.func.append(self.keyworld)
else:
self.argvtuple.append(self.keyworld)
self.keyworld = ''
return False
if ch == ',':
if self.keyworld != '':
self.argvtuple.append(deleteNoneSpacelstrip(self.keyworld))
self.func.append(self.argvtuple)
self.keyworld = ''
self.argvtuple = []
return False
if ch == '(':
self.func.append(deleteNoneSpacelstrip(self.keyworld))
self.argvtuple = []
self.keyworld = ''
return False
if ch == ')':
if self.keyworld != '':
self.argvtuple.append(deleteNoneSpacelstrip(self.keyworld))
self.func.append(self.argvtuple)
self.keyworld = ''
return False
if ch == ';':
return True
self.keyworld += ch
return False
class module(object):
def __init__(self):
self.keyworld = ''
self.name = ''
self.module = []
self.machine = None
def push(self, ch):
if ch == '}':
self.machine = None
return True
if self.machine is not None:
if self.machine.push(ch):
self.module.append(self.machine.func)
self.machine.clear()
else:
if ch == '{':
self.name = deleteNoneSpacelstrip(self.keyworld)
self.keyworld = ''
self.machine = func()
return False
self.keyworld += ch
return False
class statemachine(object):
Moduledefine = 0
Funcdefine = 1
def __init__(self):
self.keyworld = ''
self.module = {}
self.struct = {}
self.machine = None
def push(self, ch):
if self.machine is not None:
if self.machine.push(ch):
if isinstance(self.machine, module):
self.module[self.machine.name] = self.machine.module
self.machine = None
if isinstance(self.machine, struct):
self.struct[self.machine.name] = self.machine.struct
self.machine = None
else:
self.keyworld += ch
if self.keyworld == 'module':
self.machine = module()
self.keyworld = ''
if self.keyworld == 'struct':
self.machine = struct()
self.keyworld = ''
def getmodule(self):
return self.module
def getstruct(self):
return self.struct
def syntaxanalysis(self, genfilestr):
for str in genfilestr:
for ch in str:
self.push(ch) | {
"repo_name": "darklost/darkforce",
"path": "juggle/codegen/statemachine.py",
"copies": "1",
"size": "4132",
"license": "bsd-3-clause",
"hash": -5010113497845450000,
"line_mean": 25.1582278481,
"line_max": 75,
"alpha_frac": 0.488625363,
"autogenerated": false,
"ratio": 4.25979381443299,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003897898987379564,
"num_lines": 158
} |
# 2014-12-18
# build by qianqians
# statemachine
#module name{
# void func();
# int func(int);
# array func(int, string)
# table func(int, array, bool, float, string)
#}
from deletenonespacelstrip import deleteNoneSpacelstrip
class func(object):
def __init__(self):
self.keyworld = ''
self.func = []
self.argvtuple = None
def clear(self):
self.keyworld = ''
self.func = []
self.argvtuple = None
def push(self, ch):
if ch == ' ' or ch == '\0':
self.keyworld = deleteNoneSpacelstrip(self.keyworld)
if self.keyworld != '':
if self.argvtuple is None:
self.func.append(deleteNoneSpacelstrip(self.keyworld))
else:
if self.keyworld in ['table', 'array', 'int', 'string', 'float', 'bool']:
self.argvtuple.append(deleteNoneSpacelstrip(self.keyworld))
self.keyworld = ''
return False
if ch == ',':
if self.keyworld != '' and self.keyworld in ['table', 'array', 'int', 'string', 'float', 'bool']:
self.argvtuple.append(deleteNoneSpacelstrip(self.keyworld))
self.keyworld = ''
return False
if ch == '(':
self.keyworld = deleteNoneSpacelstrip(self.keyworld)
if self.keyworld != '':
self.func.append(deleteNoneSpacelstrip(self.keyworld))
self.argvtuple = []
self.keyworld = ''
return False
if ch == ')':
if self.keyworld != '' and self.keyworld in ['table', 'array', 'int', 'string', 'float', 'bool']:
self.argvtuple.append(deleteNoneSpacelstrip(self.keyworld))
if self.argvtuple is None:
self.func.append([])
else:
self.func.append(self.argvtuple)
self.keyworld = ''
return False
if ch == ';':
return True
self.keyworld += ch
return False
class module(object):
def __init__(self):
self.keyworld = ''
self.name = ''
self.module = []
self.machine = None
def push(self, ch):
if ch == '}':
self.machine = None
return True
if self.machine is not None:
if self.machine.push(ch):
self.module.append(self.machine.func)
self.machine.clear()
else:
if ch == '{':
self.name = deleteNoneSpacelstrip(self.keyworld)
self.keyworld = ''
self.machine = func()
return False
self.keyworld += ch
return False
class statemachine(object):
def __init__(self):
self.keyworld = ''
self.module = {}
self.machine = None
def push(self, ch):
if self.machine is not None:
if self.machine.push(ch):
if isinstance(self.machine, module):
self.module[self.machine.name] = self.machine.module
self.machine = None
else:
if ch == ' ' or ch == '\0':
if deleteNoneSpacelstrip(self.keyworld) == 'module':
self.machine = module()
self.keyworld = ''
else:
self.keyworld += ch
def getmodule(self):
return self.module
def syntaxanalysis(self, genfilestr):
for str in genfilestr:
for ch in str:
self.push(ch)
| {
"repo_name": "yinchunlong/abelkhan-1",
"path": "juggle/parser/statemachine.py",
"copies": "2",
"size": "3589",
"license": "mit",
"hash": -6355696387092756000,
"line_mean": 27.9435483871,
"line_max": 109,
"alpha_frac": 0.502925606,
"autogenerated": false,
"ratio": 4.182983682983683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5685909288983683,
"avg_score": null,
"num_lines": null
} |
# 2014-12-24
# build by qianqians
# codegenclient
import os
import argvs
from gentools import maketypetocpptype, makeret
def codegencaller(filelist):
if not os.path.isdir(argvs.build_path):
os.mkdir(argvs.build_path)
if not os.path.isdir(argvs.build_path + 'caller'):
os.mkdir(argvs.build_path + 'caller')
file = open('notes.txt', 'r')
note = file.read()
defmodulelist = []
for filename, list in filelist.items():
code = '#include <juggle.h>\n#include <boost/make_shared.hpp>\n\n'
struct = list['struct']
module = list['module']
if len(struct) > 0:
code += '#include \"../struct/' + filename + 'struct.h' + '\"'
for k, v in module.items():
if k in defmodulelist:
raise 'redefined module %s' % k
code += 'namespace sync{\n\n'
code += 'class ' + k + ': public ' + 'Fossilizid::juggle::caller' + '{\n' + 'public:\n'
code += ' ' + k + '(boost::shared_ptr<Fossilizid::juggle::process> __process, boost::shared_ptr<Fossilizid::juggle::channel> ch) : caller(__process, ch, \"' + k + '\"){\n' + ' }\n\n'
code += ' ~' + k + '(){\n' + ' }\n\n'
for func in v:
code += ' ' + maketypetocpptype(func[0]) + ' ' + func[1] + '('
if len(func) > 2:
code += maketypetocpptype(func[2][0]) + ' ' + func[2][1]
for argv in func[3:]:
code += ',' + maketypetocpptype(argv[0]) + ' ' + argv[1]
code += '){\n'
code += ' boost::shared_ptr<boost::unordered_map<std::string, boost::any> > v = boost::make_shared<boost::unordered_map<std::string, boost::any> >();\n'
for argv in func[2:]:
code += ' (*v)[\"' + argv[1] + '\"] = ' + argv[1] + ';\n'
code += ' boost::shared_ptr<boost::unordered_map<std::string, boost::any> > r = call_module_method_sync(\"' + k + '_' + func[1] + '\", v);\n'
code += ' return ' + makeret(func[0], struct) + ';\n'
code += ' }\n\n'
code += '};\n\n'
code += '}\n\n'
code += 'namespace async{\n\n'
code += 'class ' + k + ': public ' + 'Fossilizid::juggle::caller' + '{\n' + 'public:\n'
code += ' ' + k + '(boost::shared_ptr<Fossilizid::juggle::process> __process, boost::shared_ptr<Fossilizid::juggle::channel> ch) : caller(__process, ch, \"' + k + '\"' + '){\n }\n\n'
code += ' ~' + k + '(){\n }\n\n'
for func in v:
code += ' ' + maketypetocpptype(func[0]) + ' ' + func[1] + '('
for argv in func[2:]:
code += maketypetocpptype(argv[0]) + ' ' + argv[1] + ', '
code += 'boost::function<void(' + maketypetocpptype(func[0]) + ')> callback){\n'
code += ' boost::shared_ptr<boost::unordered_map<std::string, boost::any> > v = boost::make_shared<boost::unordered_map<std::string, boost::any> >();\n'
for argv in func[2:]:
code += ' (*v)[\"' + argv[1] + '\"] = ' + argv[1] + ';\n'
code += ' auto cb = [this, callback](boost::shared_ptr<boost::unordered_map<std::string, boost::any> > r){\n'
if func[0] != 'void':
code += ' ' + maketypetocpptype(func[0]) + ' ret = ' + makeret(func[0], struct) + '\n'
code += ' callback(ret);\n };\n'
else:
code += ' };\n'
code += ' call_module_method_async(\"' + k + '_' + func[1] + '\", v, cb' + ');\n'
code += ' }\n\n'
code += '};\n\n'
code += '}\n\n'
defmodulelist.append(k)
if code != '#include <juggle.h>\n#include <boost/make_shared.hpp>\n\n':
file = open(argvs.build_path + 'caller\\' + filename + 'caller.h', 'w')
file.write(note + code) | {
"repo_name": "darklost/darkforce",
"path": "juggle/codegen/codegencaller.py",
"copies": "1",
"size": "3991",
"license": "bsd-3-clause",
"hash": -1620573965216761000,
"line_mean": 50.1794871795,
"line_max": 194,
"alpha_frac": 0.4645452268,
"autogenerated": false,
"ratio": 3.1155347384855583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9068933434242306,
"avg_score": 0.002229306208650624,
"num_lines": 78
} |
# 2014-12-24
# build by qianqians
# codegenclient
import os
import argvs
def maketypegetvalue(type):
if type == 'int':
return 'int64_t'
if type == 'float':
return 'double'
if type == 'bool':
return 'bool'
if type == 'string' or type == 'std::string':
return 'std::sstring'
def maketype(type, name):
if type == 'int':
return ' n.' + name + ' = ' + 'boost::any_cast<int64_t>((*r)[\"ret\"][' + name + ']);'
if type == 'float':
return ' n.' + name + ' = ' + 'boost::any_cast<double>((*r)[\"ret\"][' + name + ']);'
if type == 'bool':
return ' n.' + name + ' = ' + 'boost::any_cast<bool>((*r)[\"ret\"][' + name + ']);'
if type == 'string' or type == 'std::string':
return ' n.' + name + ' = ' + 'boost::any_cast<std::string>((*r)[\"ret\"][' + name + ']);'
if type == 'array':
code = ' for(int i = 0; i < (*r)[\"ret\"].size(); i++){\n'
code += ' n.' + name + '.push_back(boost::any_cast<' + maketypegetvalue(type) + '>((*r)[\"ret\"][i]));'
code += '}\n'
return code
def unpackstruct(type, struct):
for k, v in struct:
for name, define in v:
if type == name:
code = ' name n;\n'
for argv in define:
code += maketype(argv[0], argv[1])
code += ' return n;'
return code
def makearray(type):
indexb = type.find('<')
indexe = type.find('>')
templatetype = type[indexb + 1 : indexe]
if templatetype == 'int':
return 'std::vector<int64_t> v;\n' \
'for(int i = 0; i < (*r)[\"ret\"].size(); i++){\n' \
' v.push_back(boost::any_cast<int64_t>((*r)[\"ret\"][i]));\n' \
'}\n' \
'return v;'
if templatetype == 'float':
return 'std::vector<double> v;\n' \
'for(int i = 0; i < (*r)[\"ret\"].size(); i++){\n' \
' v.push_back(boost::any_cast<double>((*r)[\"ret\"][i]));\n' \
'}\n' \
'return v;'
if templatetype == 'bool':
return 'std::vector<bool> v;\n' \
'for(int i = 0; i < (*r)[\"ret\"].size(); i++){\n' \
' v.push_back(boost::any_cast<bool>((*r)[\"ret\"][i]));\n' \
'}\n' \
'return v;'
if templatetype == 'string' or type == 'std::string':
return 'std::vector<std::string> v;\n' \
'for(int i = 0; i < (*r)[\"ret\"].size(); i++){\n' \
' v.push_back(boost::any_cast<std::string>((*r)[\"ret\"][i]));\n' \
'}\n' \
'return v;'
def makeret(type, struct):
if type == 'int':
return 'boost::any_cast<int64_t>((*r)[\"ret\"]);'
if type == 'float':
return 'boost::any_cast<double>((*r)[\"ret\"]);'
if type == 'bool':
return 'boost::any_cast<bool>((*r)[\"ret\"]);'
if type == 'string' or type == 'std::string':
return 'boost::any_cast<std::string>((*r)[\"ret\"]);'
if type.find('array') != -1:
return makearray(type)
if type == 'map':
return '(*r)[\"ret\"];'
if type == 'void':
return ''
else:
return unpackstruct(type, struct)
def maketypevalue(type, name):
if type == 'int':
return ' n.' + name + ' = ' + 'boost::any_cast<int64_t>((*v)[' + name + ']' + ');'
if type == 'float':
return ' n.' + name + ' = ' + 'boost::any_cast<double>((*v)[' + name + ']' + ');'
if type == 'bool':
return ' n.' + name + ' = ' + 'boost::any_cast<bool>((*v)[' + name + ']' + ');'
if type == 'string' or type == 'std::string':
return ' n.' + name + ' = ' + 'boost::any_cast<std::string>((*v)[' + name + ']' + ');'
if type.find('array') != -1:
indexb = type.find('<')
indexe = type.find('>')
templatetype = type[indexb + 1 : indexe]
code = ' n.' + name + ';\n'
code += ' for(int i = 0; i < (*v).size(); i++){\n'
code += ' n.' + name + '.push_back(boost::any_cast<' + maketypegetvalue(templatetype) + '>((*v)[i]));'
code += '}\n'
return code
def unpackstructvalue(type, name, struct):
for k, v in struct:
for typename, define in v:
if type == typename:
code = ' typename ' + name + ';\n'
for argv in define:
code += maketypevalue(argv[0], argv[1])
return code
def makearrayvalue(type, name):
indexb = type.find('<')
indexe = type.find('>')
templatetype = type[indexb + 1 : indexe]
if templatetype == 'int':
return ' std::vector<int64_t> ' + name + ';\n' \
' for(int i = 0; i < (*v)[\"' + name + '\"].size(); i++){\n' \
' v.push_back(boost::any_cast<int64_t>((*v)[\"' + name + '\"][i]));\n' \
' }\n'
if templatetype == 'float':
return ' std::vector<double> ' + name + ';\n' \
' for(int i = 0; i < (*v)[\"' + name + '\"].size(); i++){\n' \
' v.push_back(boost::any_cast<double>((*v)[\"' + name + '\"][i]));\n' \
' }\n'
if templatetype == 'bool':
return ' std::vector<bool> ' + name + ';\n' \
' for(int i = 0; i < (*v)[\"' + name + '\"].size(); i++){\n' \
' v.push_back(boost::any_cast<bool>((*v)[\"' + name + '\"]));\n' \
' }\n'
if templatetype == 'string' or templatetype == 'std::string':
return ' std::vector<std::string> ' + name + ';\n' \
' for(int i = 0; i < (*v)[\"' + name + '\"].size(); i++){\n' \
' v.push_back(boost::any_cast<std::string>((*v)[\"' + name + '\"][i]));\n' \
' }\n'
def makevalue(type, name, struct):
if type == 'int':
return ' auto ' + name + ' = boost::any_cast<int64_t>((*v)[\"' + name + '\"]);\n'
if type == 'float':
return ' auto ' + name + ' = boost::any_cast<double>((*v)[\"' + name + '\"]);\n'
if type == 'bool':
return ' auto ' + name + ' = boost::any_cast<bool>((*v)[\"' + name + '\"]);\n'
if type == 'string' or type == 'std::string':
return ' auto ' + name + ' = boost::any_cast<std::string>((*v)[\"' + name + '\"]);\n'
if type.find('array') != -1:
return makearrayvalue(type, name)
if type == 'map':
return ' auto ' + name + ' = (*v)[\"' + name + '\"];\n'
else:
return unpackstructvalue(type, name, struct)
def typetocpptype(type):
if type == 'int':
return 'int64_t'
if type == 'float':
return 'double'
if type == 'string':
return 'std::string'
return type
def makearraytocpp(type, name):
indexb = type.find('<')
indexe = type.find('>')
templatetype = type[indexb + 1 : indexe]
return ' ' + 'std::vector<' + typetocpptype(templatetype) + '> ' + name + ';'
def maketypetocpptype(type):
if type.find('array') != -1:
indexb = type.find('<')
indexe = type.find('>')
templatetype = type[indexb + 1 : indexe]
return 'std::vector<' + typetocpptype(templatetype) + '> '
if type == 'int':
return 'int64_t'
if type == 'float':
return 'double'
if type == 'string':
return 'std::string'
return type | {
"repo_name": "darklost/darkforce",
"path": "juggle/codegen/gentools.py",
"copies": "1",
"size": "7307",
"license": "bsd-3-clause",
"hash": -1414411826315958000,
"line_mean": 38.2903225806,
"line_max": 112,
"alpha_frac": 0.448610921,
"autogenerated": false,
"ratio": 3.0144389438943895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39630498648943896,
"avg_score": null,
"num_lines": null
} |
# 2014. 12. 9 by Hans Roh hansroh@gmail.com
__version__ = "0.32.8"
version_info = tuple (map (lambda x: not x.isdigit () and x or int (x), __version__.split (".")))
NAME = "Skitai/%s.%s" % version_info [:2]
import aquests # should be first for psycopg2 compat
from rs4 import deco, importer
from rs4.psutil import service
from rs4.attrdict import AttrDict
import threading
import sys, os
import h2
from aquests.dbapi import (
DB_PGSQL, DB_POSTGRESQL, DB_SQLITE3, DB_REDIS, DB_MONGODB,
DB_SYN_PGSQL, DB_SYN_REDIS, DB_SYN_MONGODB
)
import warnings
from aquests.protocols.smtp import composer
import tempfile
from rs4 import argopt
from .backbone import lifetime
from . import mounted
from .corequest import corequest
from functools import wraps
import copy
import rs4
from rs4.termcolor import tc
argopt.add_option ('-d', desc = "start as daemon, equivalant with using `start` command") # lower version compatible
argopt.add_option (None, '---profile', desc = "log for performance profiling")
argopt.add_option (None, '---gc', desc = "enable manual GC")
argopt.add_option (None, '---memtrack', desc = "show memory status")
argopt.add_option (None, '--production', desc = "run as production mode")
argopt.add_option (None, '--smtpda', desc = "run SMTPDA if not started")
argopt.add_option (None, '--port=PORT_NUMBER', desc = "http/https port number")
argopt.add_option (None, '--quic=UDP_PORT_NUMBER', desc = "http3/quic port number")
if "--production" in sys.argv:
os.environ ["SKITAI_ENV"] = "PRODUCTION"
SMTP_STARTED = False
if "--smtpda" in sys.argv:
os.system ("{} -m skitai.bin.skitai smtpda -d".format (sys.executable))
SMTP_STARTED = True
def set_smtp (server, user = None, password = None, ssl = False, start_service = True):
composer.set_default_smtp (server, user, password, ssl)
start_service and not SMTP_STARTED and os.system ("{} -m skitai.bin.skitai smtpda -d".format (sys.executable))
def test_client (*args, **kargs):
from .testutil.launcher import Launcher
return Launcher (*args, **kargs)
HAS_ATILA = None
DEFAULT_BACKEND_KEEP_ALIVE = 300
DEFAULT_BACKEND_OBJECT_TIMEOUT = 600
DEFAULT_BACKEND_MAINTAIN_INTERVAL = 30
DEFAULT_KEEP_ALIVE = 2
DEFAULT_NETWORK_TIMEOUT = 30
DEFAULT_BACKGROUND_TASK_TIMEOUT = 300
PROTO_HTTP = "http"
PROTO_HTTPS = "https"
PROTO_SYN_HTTP = "http_syn"
PROTO_SYN_HTTPS = "https_syn"
PROTO_WS = "ws"
PROTO_WSS = "wss"
DJANGO = "django"
STA_REQFAIL = REQFAIL = -1
STA_UNSENT = UNSENT = 0
STA_TIMEOUT = TIMEOUT = 1
STA_NETERR = NETERR = 2
STA_NORMAL = NORMAL = 3
WEBSOCKET_SIMPLE = 1
WEBSOCKET_GROUPCHAT = 5
WS_CHANNEL = WS_SIMPLE = 1
WS_GROUPCHAT = 5
WS_THREADSAFE_DEPRECATED = 7
# optional executing ways
WS_THREAD = 0
WS_NOTHREAD = WS_NQ = 128
WS_SESSION = 256
WS_THREADSAFE = 134
WS_EVT_INIT = "init"
WS_EVT_OPEN = "open"
WS_EVT_CLOSE = "close"
WS_EVT_NONE = None
WS_MSG_JSON = "json"
WS_MSG_XMLRPC = "xmlrpc"
WS_MSG_GRPC = "grpc"
WS_MSG_TEXT = "text"
WS_MSG_DEFAULT = "text"
WS_OPCODE_TEXT = 0x1
WS_OPCODE_BINARY = 0x2
WS_OPCODE_CLOSE = 0x8
WS_OPCODE_PING = 0x9
WS_OPCODE_PONG = 0xa
class _WASPool:
def __init__ (self):
self.__wasc = None
self.__p = {}
self.__kargs = {}
def __get_id (self):
return id (threading.currentThread ())
def __repr__ (self):
return "<class skitai.WASPool at %x, was class: %s>" % (id (self), self.__wasc)
def __getattr__ (self, attr):
return getattr (self._get (), attr)
def __setattr__ (self, attr, value):
if attr.startswith ("_WASPool__"):
self.__dict__[attr] = value
else:
setattr (self.__wasc, attr, value)
for _id in self.__p:
setattr (self.__p [_id], attr, value)
def __delattr__ (self, attr):
delattr (self.__wasc, attr)
for _id in self.__p:
delattr (self.__p [_id], attr, value)
def _start (self, wasc, **kargs):
self.__wasc = wasc
self.__kargs = kargs
def _started (self):
return self.__wasc
def _del (self):
_id = self.__get_id ()
try:
del self.__p [_id]
except KeyError:
pass
def _get (self):
_id = self.__get_id ()
try:
return self.__p [_id]
except KeyError:
_was = self.__wasc (**self.__kargs)
self.__p [_id] = _was
return _was
was = _WASPool ()
def start_was (wasc, **kargs):
global was
detect_atila ()
was._start (wasc, **kargs)
def detect_atila ():
# for avoid recursive importing
try:
import atila
except ImportError:
pass
else:
global HAS_ATILA
HAS_ATILA = atila.Atila
def websocket (varname = 60, timeout = 60, onopen = None, onclose = None):
global was
if isinstance (varname, int):
assert not onopen and not onclose, 'skitai.WS_SESSION cannot have onopen or onclose handler'
timeout, varname = varname, None
# for non-atila app
def decorator(f):
@wraps(f)
def wrapper (*args, **kwargs):
was_ = was._get ()
if not was_.wshasevent ():
return f (*args, **kwargs)
if was_.wsinit ():
return was_.wsconfig (varname and 1 or 1|WS_SESSION, timeout, [varname,], not varname and f (*args, **kwargs) or None)
elif was_.wsopened ():
return onopen and onopen () or ''
elif was_.wsclosed ():
return onclose and onclose () or ''
return wrapper
return decorator
#------------------------------------------------
# Configure
#------------------------------------------------
dconf = dict (
mount = {"default": []},
clusters = {},
max_ages = {},
log_off = [],
dns_protocol = 'tcp',
models_keys = set (),
wasc_options = {},
)
def add_wasc_option (k, v):
global dconf
dconf ['wasc_options'][k] = v
def disable_aquests ():
global dconf
dconf ['wasc_options']['use_syn_conn'] = True
def manual_gc (interval = 60.0):
lifetime.manual_gc (interval)
def set_worker_critical_point (cpu_percent = 90.0, continuous = 3, interval = 20):
from .backbone.http_server import http_server
from .backbone.https_server import https_server
http_server.critical_point_cpu_overload = https_server.critical_point_cpu_overload = cpu_percent
http_server.critical_point_continuous = https_server.critical_point_continuous = continuous
http_server.maintern_interval = https_server.maintern_interval = interval
class Preference (AttrDict):
def __init__ (self, path = None):
super ().__init__ ()
self.__path = path
self.__dict__ ["mountables"] = []
def __enter__ (self):
if self.__path:
sys.path.insert (0, joinpath (self.__path))
return self
def __exit__ (self, *args):
pass
def mount (self, *args, **kargs):
self.__dict__ ["mountables"].append ((args, kargs))
def copy (self):
return copy.deepcopy (self)
def preference (preset = False, path = None):
from .wastuff.wsgi_apps import Config
d = Preference (path)
d.config = Config (preset)
return d
pref = preference
PROCESS_NAME = None
def get_proc_title ():
global PROCESS_NAME
if PROCESS_NAME is None:
a, b = os.path.split (os.path.join (os.getcwd (), sys.argv [0]))
script = b.split(".")[0]
PROCESS_NAME = "skitai/%s%s" % (
os.path.basename (a),
script != "app" and "-" + script or ''
)
return PROCESS_NAME
SWD = None
def getswd ():
global SWD
if SWD is None:
SWD = os.path.dirname (os.path.join (os.getcwd (), sys.argv [0]))
return SWD
def is_devel ():
return os.environ.get ('SKITAI_ENV') != "PRODUCTION"
def joinpath (*pathes):
return os.path.normpath (os.path.join (getswd (), *pathes))
abspath = joinpath
Win32Service = None
def set_service (service_class):
global Win32Service
Win32Service = service_class
def log_off (*path):
global dconf
for each in path:
dconf ['log_off'].append (each)
def add_http_rpc_proto (name, class_):
assert name.endswith ("rpc"), "protocol name must be end with 'rpc'"
from corequest.httpbase import task
task.Task.add_proto (name, class_)
def add_database_interface (name, class_):
assert name.startswith ("*"), "database interface name must be start with '*'"
from corequest.dbi import cluster_manager
cluster_manager.ClusterManager.add_class (name, class_)
def set_dns_protocol (protocol = 'tcp'):
global dconf
dconf ['dns_protocol'] = protocol
def set_max_age (path, max_age):
global dconf
dconf ["max_ages"][path] = max_age
def set_max_rcache (objmax):
global dconf
dconf ["rcache_objmax"] = objmax
def set_keep_alive (timeout):
global dconf
dconf ["keep_alive"] = timeout
def config_executors (workers = None, zombie_timeout = DEFAULT_BACKGROUND_TASK_TIMEOUT):
global dconf
dconf ["executors_workers"] = workers
dconf ["executors_zombie_timeout"] = zombie_timeout
def set_backend (timeout, object_timeout = DEFAULT_BACKEND_OBJECT_TIMEOUT, maintain_interval = DEFAULT_BACKEND_MAINTAIN_INTERVAL):
global dconf
dconf ["backend_keep_alive"] = timeout
dconf ["backend_object_timeout"] = object_timeout
dconf ["backend_maintain_interval"] = maintain_interval
def set_backend_keep_alive (timeout):
set_backend (timeout)
def set_proxy_keep_alive (channel = 60, tunnel = 600):
from .handlers import proxy
proxy.PROXY_KEEP_ALIVE = channel
proxy.PROXY_TUNNEL_KEEP_ALIVE = tunnel
def set_request_timeout (timeout):
global dconf
dconf ["network_timeout"] = timeout
set_network_timeout = set_request_timeout
def set_was_class (was_class):
global dconf
dconf ["wasc"] = was_class
def _reserve_states (*names):
if isinstance (names [0], (list, tuple)):
names = list (names [0])
if was._started ():
was._luwatcher.add (names)
else:
for k in names:
dconf ["models_keys"].add (k)
addlu = trackers = lukeys = deflu = _reserve_states
def register_states (*names):
_reserve_states (names)
def decorator (cls):
return cls
return decorator
def maybe_django (wsgi_path, appname):
if not isinstance (wsgi_path, str):
return
if appname != "application":
return
settings = os.path.join (os.path.dirname (wsgi_path), 'settings.py')
if os.path.exists (settings):
root = os.path.dirname (os.path.dirname (wsgi_path))
sys.path.insert (0, root)
return root
def mount (point, target, appname = "app", pref = pref (True), host = "default", path = None):
global dconf
if isinstance (appname, Preference):
pref, appname = appname, "app"
def init_app (modpath, pref):
srvice_root = os.path.dirname (modpath)
# IMP: MUST pathing because reloading module
sys.path.append (srvice_root)
modinit = os.path.join (srvice_root, "__init__.py")
if os.path.isfile (modinit):
mod = importer.from_file ("temp", modinit)
hasattr (mod, "bootstrap") and mod.bootstrap (pref)
maybe_django (target, appname)
if path:
if isinstance (path, str):
path = [path]
path.reverse ()
for each in path:
sys.path.insert (0, abspath (each))
if hasattr (target, "__file__"):
target = (target, '__export__.py')
if type (target) is tuple:
module, appfile = target
target = os.path.join (os.path.dirname (module.__file__), "export", "skitai", appfile)
if type (target) is not str:
# app instance, find app location
target = os.path.normpath (os.path.join (os.getcwd (), sys.argv [0]))
else:
if target [0] == "@":
appname = None
else:
tmp = os.path.basename (target).split (":", 1)
if len (tmp) == 2:
target, appname = os.path.join (os.path.dirname (target), tmp [0]), tmp [1]
target = joinpath (target)
if host not in dconf ['mount']:
dconf ['mount'][host] = []
if os.path.isdir (target) or not appname:
dconf ['mount'][host].append ((point, target, None))
else:
init_app (target, pref)
dconf ['mount'][host].append ((point, (target, appname), pref))
mount_django = mount
def enable_forward (port = 80, forward_port = 443, forward_domain = None, ip = ""):
global dconf
dconf ['fws_address'] = ip
dconf ['fws_port'] = port
dconf ['fws_to'] = forward_port
dconf ['fws_domain'] = forward_domain
def enable_gateway (enable_auth = False, secure_key = None, realm = "Skitai API Gateway"):
global dconf
dconf ["enable_gw"] = True
dconf ["gw_auth"] = enable_auth,
dconf ["gw_realm"] = realm,
dconf ["gw_secret_key"] = secure_key
def _get_django_settings (settings_path):
import importlib
import django
ap = abspath (settings_path)
django_main, settings_file = os.path.split (ap)
django_root, django_main_dir = os.path.split (django_main)
settings_mod = "{}.{}".format (django_main_dir, settings_file.split (".")[0])
if not os.environ.get ("DJANGO_SETTINGS_MODULE"):
sys.path.insert (0, django_root)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_mod)
return importlib.import_module(settings_mod).DATABASES
def _alias_django (name, settings_path):
dbsettings = _get_django_settings (settings_path)
default = dbsettings ['default']
if default ['ENGINE'].endswith ('sqlite3'):
return alias (name, DB_SQLITE3, default ['NAME'])
if default ['ENGINE'].find ("postgresql") != -1:
if not default.get ("PORT"):
default ["PORT"] = 5432
if not default.get ("HOST"):
default ["HOST"] = "127.0.0.1"
if not default.get ("USER"):
default ["USER"] = ""
if not default.get ("PASSWORD"):
default ["PASSWORD"] = ""
return alias (name, DB_PGSQL, "%(HOST)s:%(PORT)s/%(NAME)s/%(USER)s/%(PASSWORD)s" % default)
def alias (name, ctype, members, role = "", source = "", ssl = False, max_coons = 100):
from .corequest.httpbase.cluster_manager import AccessPolicy
global dconf
if name [0] == "@":
name = name [1:]
if dconf ["clusters"].get (name):
return name, dconf ["clusters"][name]
if ctype == DJANGO:
alias = _alias_django (name, members)
if alias is None:
raise SystemError ("Database engine is not compatible")
return alias
policy = AccessPolicy (role, source)
args = (ctype, members, policy, ssl, max_coons)
dconf ["clusters"][name] = args
return name, args
def enable_cachefs (memmax = 0, diskmax = 0, path = None):
global dconf
dconf ["cachefs_memmax"] = memmax
dconf ["cachefs_dir"] = path
dconf ["cachefs_diskmax"] = diskmax
def enable_proxy (unsecure_https = False):
global dconf
dconf ["proxy"] = True
dconf ["proxy_unsecure_https"] = unsecure_https
if os.name == "posix":
dconf ['dns_protocol'] = 'udp'
def enable_file_logging (path = None):
global dconf
dconf ['logpath'] = path
def enable_blacklist (path):
global dconf
dconf ["blacklist_dir"] = path
def enable_ssl (certfile, keyfile = None, passphrase = None):
global dconf
dconf ["certfile"] = certfile
dconf ["keyfile"] = keyfile
dconf ["passphrase"] = passphrase
def get_varpath (name):
name = name.split ("/", 1)[-1].replace (":", "-").replace (" ", "-")
return os.name == "posix" and '/var/tmp/skitai/%s' % name or os.path.join (tempfile.gettempdir(), name)
def get_logpath (name):
name = name.split ("/", 1)[-1].replace (":", "-").replace (" ", "-")
return os.name == "posix" and '/var/log/skitai/%s' % name or os.path.join (tempfile.gettempdir(), name)
options = None
def add_option (sopt, lopt = None, desc = None):
global options
argopt.add_option (sopt, lopt, desc)
options = argopt.options ()
def add_options (*lnames):
global options
# deprecated, use add_option for detail description
for lname in lnames:
assert lname and lname [0] == "-", "Aurgument should start with '-' or '--'"
assert lname != "-d" and lname != "-d=", "Aurgument -d is in ussed"
if lname.startswith ("--"):
argopt.add_option (None, lname [2:])
else:
argopt.add_option (lname [1:])
options = argopt.options ()
def getopt (sopt = "", lopt = []):
global options
# argopt.getopt style
if "d" in sopt:
raise SystemError ("-d is used by skitai, please change")
for each in lopt:
argopt.add_option (None, each)
grps = sopt.split (":")
for idx, grp in enumerate (grps):
for idx2, each in enumerate (grp):
if idx2 == len (grp) - 1 and len (grps) > idx + 1:
argopt.add_option (each + ":")
else:
argopt.add_option (each)
options = argopt.options ()
opts_ = []
for k, v in options.items ():
if k == "-d":
continue
elif k.startswith ("---"):
continue
opts_.append ((k, v))
aopt_ = []
for arg in options.argv:
if arg in ("start", "stop", "status", "restart"):
continue
aopt_.append (arg)
return opts_, aopt_
def get_command ():
global options
options = argopt.options ()
if '--help' in options:
print ("{}: {} [OPTION]... [COMMAND]...".format (tc.white ("Usage"), sys.argv [0]))
print ("COMMAND can be one of [status|start|stop|restart]")
argopt.usage ()
sys.exit ()
cmd = None
if "-d" in options:
cmd = "start"
else:
for cmd_ in ("start", "stop", "status", "restart"):
if cmd_ in options.argv:
cmd = cmd_
break
return cmd
def getsysopt (name, default = None):
try:
return sys.argv [sys.argv.index ("---{}".format (name)) + 1]
except ValueError:
return default
def hassysopt (name):
return "---{}".format (name) in sys.argv
def sched (interval, func):
lifetime.maintern.sched (interval, func)
def run (**conf):
import os, sys, time
from . import Skitai
from rs4.psutil import flock
from rs4 import pathtool
class SkitaiServer (Skitai.Loader):
NAME = 'instance'
def __init__ (self, conf):
self.conf = conf
self.flock = None
Skitai.Loader.__init__ (self, 'config', conf.get ('logpath'), conf.get ('varpath'), conf.get ("wasc"))
def close (self):
if self.wasc.httpserver.worker_ident == "master":
pass
Skitai.Loader.close (self)
def config_logger (self, path):
media = []
if path is not None:
media.append ("file")
if self.conf.get ('verbose', "no") in ("yes", "1", 1):
media.append ("screen")
self.conf ['verbose'] = "yes"
if not media:
media.append ("screen")
self.conf ['verbose'] = "yes"
Skitai.Loader.config_logger (self, path, media, self.conf ["log_off"])
def master_jobs (self):
if os.environ.get ("SKITAI_ENV") == "PRODUCTION":
self.wasc.logger ("server", "[info] running for production mode")
else:
self.wasc.logger ("server", "[info] running for development mode")
self.wasc.logger ("server", "[info] engine tmp path: %s" % self.varpath)
if self.logpath:
self.wasc.logger ("server", "[info] engine log path: %s" % self.logpath)
self.set_model_keys (self.conf ["models_keys"])
def maintern_shutdown_request (self, now):
req = self.flock.lockread ("signal")
if not req: return
self.wasc.logger ("server", "[info] got signal - %s" % req)
if req == "terminate":
lifetime.shutdown (0, 30.0)
elif req == "restart":
lifetime.shutdown (3, 30.0)
elif req == "kill":
lifetime.shutdown (0, 1.0)
elif req == "rotate":
self.wasc.logger.rotate ()
else:
self.wasc.logger ("server", "[error] unknown signal - %s" % req)
self.flock.unlock ("signal")
def configure (self):
options = argopt.options ()
conf = self.conf
self.set_num_worker (conf.get ('workers', 1))
if conf.get ("certfile"):
self.config_certification (conf.get ("certfile"), conf.get ("keyfile"), conf.get ("passphrase"))
self.config_wasc (**dconf ['wasc_options'])
self.config_dns (dconf ['dns_protocol'])
if conf.get ("cachefs_diskmax", 0) and not conf.get ("cachefs_dir"):
conf ["cachefs_dir"] = os.path.join (self.varpath, "cachefs")
self.config_cachefs (
conf.get ("cachefs_dir", None),
conf.get ("cachefs_memmax", 0),
conf.get ("cachefs_diskmax", 0)
)
self.config_rcache (conf.get ("rcache_objmax", 100))
if conf.get ('fws_to'):
self.config_forward_server (
conf.get ('fws_address', '0.0.0.0'),
conf.get ('fws_port', 80), conf.get ('fws_to', 443)
)
port = int (options.get ('--port') or conf.get ('port', 5000))
quic = int (options.get ('--quic') or conf.get ('quic', 0))
self.config_webserver (
port, conf.get ('address', '0.0.0.0'),
NAME, conf.get ("certfile") is not None,
conf.get ('keep_alive', DEFAULT_KEEP_ALIVE),
conf.get ('network_timeout', DEFAULT_NETWORK_TIMEOUT),
conf.get ('fws_domain'),
quic = quic,
thunks = [self.master_jobs]
)
if os.name == "posix" and self.wasc.httpserver.worker_ident == "master":
# master does not serve
return
self.config_executors (conf.get ('executors_workers'), dconf.get ("executors_zombie_timeout", DEFAULT_BACKGROUND_TASK_TIMEOUT))
self.config_threads (conf.get ('threads', 4))
self.config_backends (
conf.get ('backend_keep_alive', DEFAULT_BACKEND_KEEP_ALIVE),
conf.get ('backend_object_timeout', DEFAULT_BACKEND_OBJECT_TIMEOUT),
conf.get ('backend_maintain_interval', DEFAULT_BACKEND_MAINTAIN_INTERVAL)
)
for name, args in conf.get ("clusters", {}).items ():
ctype, members, policy, ssl, max_conns = args
self.add_cluster (ctype, name, members, ssl, policy, max_conns)
self.install_handler (
conf.get ("mount"),
conf.get ("proxy", False),
conf.get ("max_ages", {}),
conf.get ("blacklist_dir"), # blacklist_dir
conf.get ("proxy_unsecure_https", False), # disable unsecure https
conf.get ("enable_gw", False), # API gateway
conf.get ("gw_auth", False),
conf.get ("gw_realm", "API Gateway"),
conf.get ("gw_secret_key", None)
)
lifetime.init (logger = self.wasc.logger.get ("server"))
if os.name == "nt":
lifetime.maintern.sched (11.0, self.maintern_shutdown_request)
self.flock = flock.Lock (os.path.join (self.varpath, ".%s" % self.NAME))
#----------------------------------------------------------------------
global dconf, PROCESS_NAME, Win32Service
for k, v in dconf.items ():
if k not in conf:
conf [k] = v
if conf.get ("name"):
PROCESS_NAME = 'skitai/{}'.format (conf ["name"])
if not conf.get ('mount'):
raise systemError ('No mount point')
conf ["varpath"] = get_varpath (get_proc_title ())
pathtool.mkdir (conf ["varpath"])
if "logpath" in conf and not conf ["logpath"]:
conf ["logpath"] = get_logpath (get_proc_title ())
cmd = get_command ()
working_dir = getswd ()
lockpath = conf ["varpath"]
servicer = service.Service (get_proc_title(), working_dir, lockpath, Win32Service)
if cmd and not servicer.execute (cmd):
return
if not cmd:
if servicer.status (False):
raise SystemError ("daemon is running")
conf ['verbose'] = 'yes'
elif cmd in ("start", "restart"):
sys.stderr = open (os.path.join (conf.get ('varpath'), "stderr.engine"), "a")
server = SkitaiServer (conf)
# timeout for fast keyboard interrupt on win32
try:
try:
server.run (conf.get ('verbose') and 1.0 or 30.0)
except KeyboardInterrupt:
pass
finally:
_exit_code = server.get_exit_code ()
if _exit_code is not None: # master process
sys.exit (_exit_code)
else:
# worker process
# for avoiding multiprocessing.manager process's join error
os._exit (lifetime._exit_code)
| {
"repo_name": "hansroh/skitai",
"path": "skitai/__init__.py",
"copies": "1",
"size": "25472",
"license": "mit",
"hash": -5078284102318700000,
"line_mean": 31.080604534,
"line_max": 139,
"alpha_frac": 0.5772220477,
"autogenerated": false,
"ratio": 3.552084785943383,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.952473006099509,
"avg_score": 0.020915354529658408,
"num_lines": 794
} |
# 2014 — Loïc Sander
# Group spacing in Robofont
from vanilla import FloatingWindow, CheckBox
from mojo.events import addObserver, removeObserver
from mojo.UI import CurrentSpaceCenter
metricsPrefix = '.mtrx'
leftIndicator = '_L_'
rightIndicator = '_R_'
class spacingObserver(object):
def __init__(self):
self.enableGroupSpacing = False
self.popupOpen = False
addObserver(self, 'glyphEditCallback', 'spaceCenterKeyDown')
addObserver(self, 'glyphEditedCallback', 'spaceCenterKeyUp')
addObserver(self, 'spaceCenterOpenCallback', 'spaceCenterDidOpen')
addObserver(self, 'fontOpenCallback', 'fontDidOpen')
self.previousMargins = {'left': 0, 'right': 0}
def processMetricsGroups(self, baseGlyph=None):
for groupName in self.metricsGroups:
if (baseGlyph is None) and len(self.font.groups[groupName]) > 0:
baseGlyph = self.font.groups[groupName][0]
self.previousMargins['left'] = self.font[baseGlyph].angledLeftMargin
self.previousMargins['right'] = self.font[baseGlyph].angledRightMargin
if (metricsPrefix in groupName) and (baseGlyph in self.font.groups[groupName]):
if (leftIndicator in groupName) and (self.previousMargins['left'] != self.font[baseGlyph].angledLeftMargin):
self.setGroupSpacing(baseGlyph, self.font.groups[groupName], 'Left')
elif (rightIndicator in groupName) and (self.previousMargins['right'] != self.font[baseGlyph].angledRightMargin):
self.setGroupSpacing(baseGlyph, self.font.groups[groupName], 'Right')
def setGroupSpacing(self, baseGlyphName, group, side):
for glyphName in group:
baseGlyph = self.font[baseGlyphName]
targetGlyph = self.font[glyphName]
if glyphName is not baseGlyphName:
if (len(targetGlyph.components) > 0) and (side == 'Left'):
for component in targetGlyph.components:
if component.baseGlyph in group:
component.move((self.previousMargins['left']-baseGlyph.angledLeftMargin, 0))
self.setSidebearing(baseGlyph, targetGlyph, side)
elif glyphName is baseGlyphName:
if (len(baseGlyph.components) > 0) and (side == 'Left'):
for component in baseGlyph.components:
if component.baseGlyph in group:
component.move((self.previousMargins['left']-baseGlyph.angledLeftMargin, 0))
targetGlyph.update()
def setSidebearing(self, baseGlyph, targetGlyph, side):
baseMargin = getattr(baseGlyph, 'angled' + side + 'Margin')
targetMargin = getattr(targetGlyph, 'angled' + side + 'Margin')
if targetMargin != baseMargin:
setattr(targetGlyph, 'angled' + side + 'Margin', baseMargin)
def getMetricsGroups(self, notification=None):
self.font = CurrentFont()
if self.font is not None:
self.metricsGroups = [group for group in self.font.groups.keys() if metricsPrefix in group and leftIndicator in group or rightIndicator in group]
if (notification is not None) and (self.enableGroupSpacing == True):
self.processMetricsGroups()
def enableGroupSpacingCallback(self, sender):
self.enableGroupSpacing = sender.get()
def glyphEditCallback(self, notification):
edGlyph = notification['glyph']
self.previousMargins = {'width': edGlyph.width, 'left': edGlyph.angledLeftMargin, 'right': edGlyph.angledRightMargin}
def glyphEditedCallback(self, notification):
if self.enableGroupSpacing == True:
edGlyph = notification['glyph']
if self.font != CurrentFont():
self.getMetricsGroups()
self.processMetricsGroups(edGlyph.name)
def spaceCenterOpenCallback(self, notification):
if (not self.popupOpen) and (len(self.metricsGroups) > 0):
self.w = FloatingWindow((160, 36), 'Group Spacing')
self.w.activateGroups = CheckBox((9, -27, 151, 18), "Activate Group spacing", value=self.enableGroupSpacing, callback=self.enableGroupSpacingCallback, sizeStyle="small")
self.w.bind('close', self.windowCloseCallback)
self.w.open()
self.popupOpen = True
def windowCloseCallback(self, notification):
self.popupOpen = False
def fontOpenCallback(self, notification):
font = notification['font']
font.groups.addObserver(self, 'getMetricsGroups', 'Groups.Changed')
self.getMetricsGroups(notification)
spacingObserver() | {
"repo_name": "miguelsousa/Robofont-scripts",
"path": "SpacingObserver/spacing-observer.py",
"copies": "2",
"size": "4965",
"license": "mit",
"hash": 8432294947806881000,
"line_mean": 41.4188034188,
"line_max": 181,
"alpha_frac": 0.6187021362,
"autogenerated": false,
"ratio": 4.367957746478873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023335347447695862,
"num_lines": 117
} |
"""2015-01-06, mlr:
A new RSS feed has been added: http://www.cafc.uscourts.gov/rss-opinions.php
"""
from juriscraper.OpinionSite import OpinionSite
import time
from datetime import date
from juriscraper.lib.string_utils import clean_string, titlecase
class Site(OpinionSite):
def __init__(self):
super(Site, self).__init__()
self.url = 'http://www.cafc.uscourts.gov/opinions-orders/7/all'
self.back_scrape_iterable = range(0, 185)
self.court_id = self.__module__
def _get_case_names(self):
case_names = []
for case_string in self.html.xpath('//table[@id = "searchResults"]/tr[position() >= 3]/td[4]/a/text()'):
# Takes care of things like [ERRATA] that are often on the end of
# case names.
case_names.append(titlecase(case_string.split('[')[0]))
return case_names
def _get_download_urls(self):
return [e for e in self.html.xpath('//table[@id = "searchResults"]/tr[position() >= 3]/td[4]/a/@href')]
def _get_case_dates(self):
dates = []
for date_string in self.html.xpath('//table[@id = "searchResults"]/tr[position() >= 3]/td[1]/text()'):
if clean_string(date_string) == '2011-09-00':
date_string = '2011-09-02'
dates.append(date.fromtimestamp(time.mktime(time.strptime(clean_string(date_string), '%Y-%m-%d'))))
return dates
def _get_docket_numbers(self):
return [e.split('|')[0] for e in self.html.xpath('//table[@id = "searchResults"]/tr[position() >= 3]/td[2]/text()')]
def _get_precedential_statuses(self):
statuses = []
for status in self.html.xpath('//table[@id = "searchResults"]/tr[position() >= 3]/td[5]/text()'):
if 'nonprecedential' in status.lower():
statuses.append('Unpublished')
elif 'precedential' in status.lower():
statuses.append('Published')
else:
statuses.append('Unknown')
return statuses
def _download_backwards(self, page):
# Sample URLs for page 2 and 3 (as of 2011-02-09)
# http://www.cafc.uscourts.gov/opinions-orders/0/50/all/page-11-5.html
# http://www.cafc.uscourts.gov/opinions-orders/0/100/all/page-21-5.html
if page == 0:
self.url = "http://www.cafc.uscourts.gov/opinions-orders/0/all"
else:
self.url = "http://www.cafc.uscourts.gov/opinions-orders/0/%s/all/page-%s1-5.html" % ((page * 50), page)
self.html = self._download()
| {
"repo_name": "brianwc/juriscraper",
"path": "opinions/united_states/federal_appellate/cafc.py",
"copies": "1",
"size": "2555",
"license": "bsd-2-clause",
"hash": -9006571003247090000,
"line_mean": 41.5833333333,
"line_max": 124,
"alpha_frac": 0.5953033268,
"autogenerated": false,
"ratio": 3.246505717916137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9335638848115155,
"avg_score": 0.0012340393201963487,
"num_lines": 60
} |
# 2015.03.10 14:42:13 UTC
import sys
import os
path = os.path.split(os.path.realpath(__file__))[0]
sys.path.append(path + '/utils/Python_Utils')
sys.path.append(path + '../utils/Python_Utils')
sys.path.append(path + '/utils/liac-arff')
sys.path.append(path + '../utils/liac-arff')
import numpy as np
import scipy as sc
import scipy.sparse as sp
import logging,Logger
label_flag = u'multi_label_';
class SvmWriter:
def __init__(self, filename, num_feature, num_label):
self.file = open(filename, "w")
line = "#num_feature=%d num_label=%d\n"%(num_feature,num_label)
self.file.write(line)
def write(self, x, y):
m,n = x.shape
labels = [[] for r in xrange(m)]
features = [[] for r in xrange(m)]
ij = x.nonzero()
for k in xrange(len(ij[0])):
i = ij[0][k]
j = ij[1][k]
features[i].append("%d:%f"%(j,x[i,j]))
ij = y.nonzero()
for k in xrange(len(ij[0])):
i = ij[0][k]
j = ij[1][k]
labels[i].append("%d"%j)
for i in xrange(m):
#print features[i]
line = ",".join(labels[i]) + " " + " ".join(features[i]) + "\n"
#print line
self.file.write(line)
def close(self):
self.file.close()
class SvmReader:
def __init__(self, filename, batch = 50):
self.file = open(filename)
self.batch = batch
self.num_label = 0
self.num_feature = 0
self.next_x = None
self.next_y = None
##read the comment line
## the comment line should give num_feature and num_labels
## for example '#num_feature=6\tnum_label=10'
line = self.file.readline()
line = line.strip()
line = line.replace("#", "")
eles = line.split(" ")
#print "eles",eles
#print "eles[0].split('=')",eles[0].split("=")
#print "int((eles[0].split('='))[0])", int((eles[0].split("="))[1])
self.num_feature = int((eles[0].split("="))[1])
self.num_label = int((eles[1].split("="))[1])
def parse(self,lines):
num_ins = len(lines)
if num_ins == 0:
return None, None
#x = sp.lil_matrix((num_ins, self.num_feature))
#y = sp.lil_matrix((num_ins, self.num_label))
xr = []
xc = []
xd = []
yr = []
yc = []
yd = []
for i in xrange(len(lines)):
line = lines[i]
line = line.strip()
eles = line.split(" ")
if ":" not in eles[0]:
for j in xrange(1,len(eles)):
kv = eles[j].split(":")
#x[i,int(kv[0])] = float(kv[1])
xr.append(i)
xc.append(int(kv[0]))
xd.append(float(kv[1]))
labels = eles[0].strip().split(",")
#print "xxx",line,labels
for j in xrange(len(labels)):
#y[i,int(labels[j])] = 1
yr.append(i)
yc.append(int(labels[j]))
yd.append(1)
else:
for j in xrange(0,len(eles)):
kv = eles[j].split(":")
#x[i,int(kv[0])] = float(kv[1])
xr.append(i)
xc.append(int(kv[0]))
xd.append(float(kv[1]))
xi = sp.csr_matrix((xd,(xr,xc)),(num_ins,self.num_feature))
yi = sp.csr_matrix((yd,(yr,yc)),(num_ins,self.num_label))
return xi, yi
def full_read(self):
lines = []
for line in self.file:
if line is None or len(line.strip()) == 0: break
#print "full_read",line
lines.append(line.strip())
return self.parse(lines)
def read(self):
if None == self.next_x:
lines = []
for i in xrange(self.batch):
line = self.file.readline()
if line is None or len(line.strip()) == 0: break
lines.append(line)
self.next_x, self.next_y = self.parse(lines)
x = self.next_x
y = self.next_y
lines = []
for i in xrange(self.batch):
line = self.file.readline()
if line is None or len(line.strip()) == 0: break
lines.append(line)
self.next_x, self.next_y = self.parse(lines)
has_next = not (self.next_x is None);
return x, y, has_next;
def close(self):
self.file.close()
| {
"repo_name": "rustle1314/latent_factor_multi_label",
"path": "arffio.py",
"copies": "2",
"size": "4722",
"license": "mit",
"hash": -6347312248906894000,
"line_mean": 30.9054054054,
"line_max": 75,
"alpha_frac": 0.458068615,
"autogenerated": false,
"ratio": 3.434181818181818,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48922504331818184,
"avg_score": null,
"num_lines": null
} |
# 2015-03-23 LLB remove 1s wait time between snapshots
import corr, adc5g, httplib
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
import sys, time
r2 = corr.katcp_wrapper.FpgaClient('r2dbe-1')
r2.wait_connected()
if len(sys.argv) == 2:
rpt = int(sys.argv[1])
else:
rpt = 30
def gaussian(x,a,mu,sig): return a*np.exp(-(x-mu)**2 / (2. * sig**2))
def chisq(par, x, y, yerr):
(a, mu, sig) = par
return np.sum((gaussian(x,a,mu,sig)-y)**2/yerr**2)
counts = np.zeros((2,4,256))
x = np.arange(-128, 128, 1)
for r in range(rpt):
# time.sleep(1)
x0 = np.array(adc5g.get_snapshot(r2, 'r2dbe_snap_8bit_0_data'))
x1 = np.array(adc5g.get_snapshot(r2, 'r2dbe_snap_8bit_1_data'))
for j in range(4):
bc0 = np.bincount((x0[j::4] + 128))
bc1 = np.bincount((x1[j::4] + 128))
counts[0,j,:len(bc0)] += bc0
counts[1,j,:len(bc1)] += bc1
np.save('counts.npy', counts)
for i in [0,1]:
for j in [0,1,2,3]:
y = counts[i,j]
yerr = np.sqrt(1+y+.10*y**2)
p0=(np.max(y), 0., 30.)
ret = scipy.optimize.fmin(chisq, (np.max(y), 0, 40), args=(x, y, yerr), disp=False)
print "IF%d Core %d: mean %5.2f std %5.2f" % (i, j, ret[1], ret[2])
| {
"repo_name": "sao-eht/lmtscripts",
"path": "2017/corestats.py",
"copies": "1",
"size": "1243",
"license": "mit",
"hash": -1473173349834098400,
"line_mean": 27.25,
"line_max": 91,
"alpha_frac": 0.5792437651,
"autogenerated": false,
"ratio": 2.3147113594040967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33939551245040966,
"avg_score": null,
"num_lines": null
} |
# 2015.05.05 18:39:54 CST
# Embedded file name: easycard.py
import sys
import datetime
import hashlib
import urllib
import urllib2
import json
from Crypto.Cipher import DES3
import pytz
version = '0.3'
copyright = 'Copyright (C) 2015 Zhi-Wei Cai.'
key = 'EasyCardToKingay23456789'
iv = '01234567'
salt = 'L0CalKing'
const = 8544
def getID(data, isEncrypt, key, iv, encode):
size = len(data)
if size % 16 != 0:
data += '\x06' * (16 - size % 16)
des3 = DES3.new(key, DES3.MODE_CBC, iv)
if isEncrypt:
result = des3.encrypt(data).encode(encode).rstrip()
else:
result = des3.decrypt(data.decode(encode))
return result
def getVerify(const, seed, salt):
hash = hashlib.md5()
hash.update(str(seed * const) + salt)
return hash.hexdigest().upper()
def proc(data):
e = getID(data, 1, key, iv, 'base64')
cardID = urllib.quote_plus(e)
date = datetime.datetime.now(pytz.timezone('Asia/Taipei'))
seed = date.month + date.day + date.hour
begin = '{:%Y-%m-%d}'.format(date - datetime.timedelta(days=30))
end = '{:%Y-%m-%d}'.format(date)
verify = getVerify(const, seed, salt)
url = 'https://wallet.easycard.com.tw/EasyWallet/QueryManager/V3/GetTXNThinDataInfo?verify={}&cardID={}&begin={}&end={}&ev=1'.format(verify, cardID, begin, end)
req = urllib2.Request(url)
response = urllib2.urlopen(req)
content = response.read()
dict = json.loads(content)
try:
if dict[-1]['B'] != '--':
print '{: ^90}'.format('\xe5\x8d\xa1\xe8\x99\x9f "{} {} {}"\xef\xbc\x8c\xe9\xa4\x98\xe9\xa1\x8d\xef\xbc\x9a{} \xe5\x85\x83'.format(data[0:3], data[3:9], data[-1], dict[-1]['B']))
if len(dict) > 1:
if dict[0]['T'].encode('utf-8') != '\xe6\x9f\xa5\xe7\x84\xa1\xe4\xba\xa4\xe6\x98\x93\xe8\xb3\x87\xe6\x96\x99':
print '\n{:=^90}\n'.format('[ \xe4\xba\xa4\xe6\x98\x93\xe6\x98\x8e\xe7\xb4\xb0 ]')
i = 1
for item in dict:
try:
if item['T']:
if item['T'] == 'D':
action = '\xe6\x89\xa3\xe6\xac\xbe'
else:
action = '\xe5\x84\xb2\xe5\x80\xbc'
print '#{:>4} [{}] {} {:>5} \xe5\x85\x83\xef\xbc\x8c\xe9\xa4\x98\xe9\xa1\x8d {:>5} \xe5\x85\x83\xef\xbc\x8c\xe5\x9c\xb0\xe9\xbb\x9e\xef\xbc\x9a{}'.format(i, item['D'], action, item['Q'], item['A'], item['L'].encode('utf-8').replace('<BR>', '-'))
i += 1
except KeyError as err:
pass
except KeyError as err:
print '\xe5\x8d\xa1\xe8\x99\x9f "{}" \xe4\xb8\x8d\xe5\xad\x98\xe5\x9c\xa8\xef\xbc\x81'.format(data)
except ValueError as err:
print '\xe5\x8d\xa1\xe8\x99\x9f "{}" \xe4\xb8\x8d\xe5\xad\x98\xe5\x9c\xa8\xef\xbc\x81'.format(data)
print '\n{:=^90}\n\n'.format('[ \xe6\x9f\xa5\xe8\xa9\xa2\xe7\xb5\x90\xe6\x9d\x9f ]')
if __name__ == '__main__':
print '\n\xe6\x82\xa0\xe9\x81\x8a\xe5\x8d\xa1\xe9\xa4\x98\xe9\xa1\x8d\xe6\x98\x8e\xe7\xb4\xb0\xe6\x9f\xa5\xe8\xa9\xa2 v{}'.format(version)
print '{}\n'.format(copyright)
if len(sys.argv) > 1:
try:
print '\n{:=^90}\n'.format('[ \xe6\x9f\xa5\xe8\xa9\xa2\xe9\x96\x8b\xe5\xa7\x8b ]')
proc(str(sys.argv[1]))
except ValueError as err:
pass
else:
while 1:
try:
data = raw_input('\xe8\xab\x8b\xe8\xbc\xb8\xe5\x85\xa5\xe5\x8d\xa1\xe7\x89\x87\xe8\x99\x9f\xe7\xa2\xbc\xef\xbc\x9a').replace(' ', '')
if len(data):
print '\n{:=^90}\n'.format('[ \xe6\x9f\xa5\xe8\xa9\xa2\xe9\x96\x8b\xe5\xa7\x8b ]')
proc(data)
else:
break
except ValueError as err:
pass
# okay decompyling easycard.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.05.05 18:39:54 CST
| {
"repo_name": "akpotter/Easy-Card",
"path": "bin/easycard.py",
"copies": "1",
"size": "4031",
"license": "mit",
"hash": 6244177657748126000,
"line_mean": 39.7171717172,
"line_max": 273,
"alpha_frac": 0.5472587447,
"autogenerated": false,
"ratio": 2.590616966580977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8630365456219531,
"avg_score": 0.001502051012289174,
"num_lines": 99
} |
# 20:15:05:11:32:44 - itead - radar
# 30:14:11:21:15:80 - HC-06 - redbot
import time
from bluetooth import *
import matplotlib.pyplot as plt
# Generate the master table
# masterTable = [[0 for x in range(10)] for x in range(10)]
all_results_x = list()
all_results_y = list()
WAIT_TIME = 10
RADAR_MAC = "20:15:05:11:32:44"
REDBOT_MAC = "30:14:11:21:15:80"
ITERATIONS_AMOUNT = 3
def scan():
# Connect
print "Connecting to Radar"
sock=BluetoothSocket( RFCOMM )
sock.connect((RADAR_MAC, 1))
# Send data and wait for output
res = sock.send('r')
data = ""
while True:
data += sock.recv(1024)
if 'end' in data :
break
# Parse result
table = list()
for row in data.split("\n"):
rowToInsert = list()
for column in row.split(","):
rowToInsert.append(column)
try:
table.append({
"degrees": float(rowToInsert[0]),
"x" : float(rowToInsert[1]),
"y": float(rowToInsert[2])
})
except:
pass #print "Table processing done."
# Output result and close connection
print(table)
sock.close()
return table
def move(direction):
print "Connecting to Redbot"
sock=BluetoothSocket( RFCOMM )
sock.connect((REDBOT_MAC, 1))
# Send data and wait for output
res = sock.send("go")
# data = ""
# while True:
# data += sock.recv(1024)
# if data is "1" or data is "0" :
# break
# Parse result
# Output result and close connection
sock.close()
def stopRedBot():
sock=BluetoothSocket( RFCOMM )
sock.connect((REDBOT_MAC, 1))
# Send data and wait for output
res = sock.send("stop")
sock.close()
def calculateMove(coordinates):
pass
def wait():
print "Waiting..."
time.sleep(WAIT_TIME)
def saveToMasterList(x,y):
all_results_x.append(x)
all_results_y.append(y)
def generateGraph():
area = 3.1416 * 10 ** 2
plt.scatter(all_results_x, all_results_y, s=area, alpha=0.5)
plt.show()
# Main iteration
for item in range(0,ITERATIONS_AMOUNT):
# stopRedBot()
res = scan()
if res:
print res[0]
saveToMasterList(res[0]["x"], res[0]["y"])
# move()
# wait()
generateGraph()
| {
"repo_name": "oneasteriskone/IA-Project",
"path": "Core/core.py",
"copies": "1",
"size": "2044",
"license": "apache-2.0",
"hash": -8713766114074854000,
"line_mean": 17.9259259259,
"line_max": 61,
"alpha_frac": 0.6550880626,
"autogenerated": false,
"ratio": 2.590621039290241,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37457091018902405,
"avg_score": null,
"num_lines": null
} |
""" 2015-07-23
Perform coordinate conversions from the command line.
Uses
"""
import argparse
import pyperclip
# p1 = argparse.ArgumentParser()
# p1.add_argument('x')
# print p1.parse_args(['123'])
#
# p2 = argparse.ArgumentParser()
# p2.add_argument('-d', action='store_const',const='dak')
# print p2.parse_args(['-d'])
#
# p3 = argparse.ArgumentParser()
# p3.add_argument('-d', action='store_const',const='dak')
# p3.add_argument('x')
# p3.add_argument('y')
# print p3.parse_args(['-d','1','2'])
#p1.add_argument(
from Coordinate_Transform import DCcoordinate_projector
# #
# # parser = argparse.ArgumentParser()
# # parser.add_argument("coord_1")
# # parser.add_argument("coord_2")
# # args = parser.parse_args()
# # x,y = args.coord_1, args.coord_2
#
def coord_convert():
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dak', action='store_const', const='dak', help="return Dakota County coords on clipboard")
parser.add_argument('-u','--utm', action='store_const', const='utm', help="return UTM NAD 83, Zone 15 coords on clipboard")
parser.add_argument('x')
parser.add_argument('y')
args = parser.parse_args()
print 'args=',args
coordtext = '%s,%s'%( args.x, args.y)
Cprojector = DCcoordinate_projector()
cliptext = Cprojector.handle_unspecified_coords(coordtext)
#print outtext
try:
if args.dak:
cliptext = '%4.2f,%4.2f'%(Cprojector.dakx,Cprojector.daky)
#print 'returning dakx,daky to clipboard "%s"'%cliptext
elif args.utm:
cliptext = '%4.2f,%4.2f'%(Cprojector.utmx,Cprojector.utmy)
#print 'returning utmx,utmy to clipboard "%s"'%cliptext
except:
pass
pyperclip.copy(cliptext)
pyperclip.paste()
return cliptext
def test_parse_args():
import sys
sys.argv = ["prog", '-d', "93.0444", "44.5926"]
rv = coord_convert()
print '>>\n'+ str(rv) +'\n================'
sys.argv = ["prog", '--utm', "93.0444", "44.5926"]
rv = coord_convert()
print '>>\n'+ str(rv) +'\n================'
if __name__ == '__main__':
#test_parse_args()
coord_convert()
'''
ERROR coordinates not recognized or not within Dakota County
"570931,1441"
496475.91,4937695.85
Dakota Co: 570931, 144108
Dakota Co: 570931.0, 144108.0
UTM : 496475.91, 4937695.85
D.d : -93.044399765, 44.592598646
D M.m : -93 2.663986, 44 35.555919
D M S.s : -93 2 39.839", 44 35 33.355"''' | {
"repo_name": "aembillo/MNWellRecordGui",
"path": "src/Coord_cmd.py",
"copies": "1",
"size": "2521",
"license": "bsd-3-clause",
"hash": -3486733237548961300,
"line_mean": 27.6590909091,
"line_max": 127,
"alpha_frac": 0.6049186831,
"autogenerated": false,
"ratio": 2.8977011494252873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4002619832525287,
"avg_score": null,
"num_lines": null
} |
# 2015-9-26
import argparse
import os
from glob import glob
import logging
PASSBOOK_PATH = os.path.expanduser('~/Dropbox/conf/passbook')
class Passbook(object):
def __init__(self, path):
self._path = path
def getgen(self, domain):
path = os.path.join(self._path, domain)
try:
return int(open(path).read()[1:])
except IOError, e:
raise KeyError(str(e))
def setgen(self, domain, gen_number):
path = os.path.join(self._path, domain)
open(path, 'wb').write('g%d\n' % gen_number)
# print >> open(path, 'w'), 'g%d' % gen_number
def list(self):
# logging.info(os.path.join(self._path, '*'))
return [os.path.basename(f) for f in glob(self._path + '/*')]
g = Passbook(PASSBOOK_PATH)
getgen = g.getgen
setgen = g.setgen
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--list', action='store_true')
args = parser.parse_args()
if args.list:
logging.info('list mode')
logging.info(g.list())
return
if __name__ == '__main__':
main()
| {
"repo_name": "axplus/pw",
"path": "pw/passbook.py",
"copies": "1",
"size": "1162",
"license": "bsd-3-clause",
"hash": 1513915822937282000,
"line_mean": 20.9245283019,
"line_max": 69,
"alpha_frac": 0.5851979346,
"autogenerated": false,
"ratio": 3.3011363636363638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4386334298236364,
"avg_score": null,
"num_lines": null
} |
# 2015-9-26
import hmac
import hashlib
SALT = '$$'
class Gen1(object):
@staticmethod
def generate(salt, domain):
pass
class Gen2(object):
@staticmethod
def generate(salt, domain):
op = '%s%s%s' % (salt, domain, SALT)
mobj = hmac.new(domain, op, hashlib.md5)
m = mobj.hexdigest()
L = 8 + len(op) % 5
return m[0:L]
class Gen3(object):
@staticmethod
def generate(salt, domain):
op = '%s%s%s' % (salt, domain, SALT)
mobj = hmac.new(domain, op, hashlib.md5)
m = mobj.hexdigest()
s = 2 + len(op) % 5
L = 8 + s # len(op) % 5
# toupper
r = []
for i, c in enumerate(m[0:L]):
if i % s == 0:
r.append(c.upper())
else:
r.append(c)
return ''.join(r)
class Gen4(object):
@staticmethod
def generate(salt, domain):
myhashcode = '%s@%s' % (salt, domain)
mygen = _md5(myhashcode)
mygen = _upperby2(mygen)
return mygen[:18]
class Gen5(object):
@staticmethod
def generate(salt, domain):
mytmp = Gen2().generate(salt, domain)
s = []
for c in mytmp[:6]:
s.append(ord(c) % 10)
return "".join([str(c) for c in s])
class Gen6(object):
MIN_LEN = 8
MAX_LEN = 14
@staticmethod
def generate(salt, domain):
charmap = [
_charrange('a', 'z'),
_charrange('A', 'Z'),
_charrange('0', '9'),
['~', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '+'],
]
r = []
md5 = _md5(salt + domain)
n = 0
for c in md5:
nn = ord(c) % Gen6.MAX_LEN
if Gen6.MIN_LEN <= nn < Gen6.MAX_LEN:
n = nn
break
if n == 0:
n = Gen6.MAX_LEN
for c in md5[:n]:
n1 = ord(c) % len(charmap)
n2 = ord(c) % len(charmap[n1])
ch = charmap[n1][n2]
r.append(ch)
return ''.join(r)
def create_gen(genn):
return globals()['Gen%d' % genn]
# returns charset [begin, end]
def _charrange(begin, end):
return [chr(r) for r in range(ord(begin), ord(end) + 1)]
def _md5(msg):
md5 = hashlib.md5()
md5.update(msg)
return md5.hexdigest()
def _upperby2(msg):
r = []
for i, c in enumerate(msg):
if i % 2 == 1:
r.append(c.upper())
else:
r.append(c)
return ''.join(r)
if __name__ == '__main__':
print create_gen(1)
| {
"repo_name": "axplus/pw",
"path": "pw/gens.py",
"copies": "1",
"size": "2584",
"license": "bsd-3-clause",
"hash": 1417941303908960500,
"line_mean": 19.3464566929,
"line_max": 78,
"alpha_frac": 0.4593653251,
"autogenerated": false,
"ratio": 3.178351783517835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41377171086178355,
"avg_score": null,
"num_lines": null
} |
# 2015
from sqlalchemy import Column, ForeignKey, Integer,Float,String,UniqueConstraint,Date
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Profile(Base):
__tablename__ = "profile"
id = Column(Integer, primary_key=True)
name = Column(String, unique=True, nullable=False)
number = Column(String, unique=True, nullable=False)
address = Column(String, nullable=False)
company = Column(String, nullable=False)
title = Column(String, nullable=False)
def as_dict(self):
res = {c.name: getattr(self, c.name) for c in self.__table__.columns}
return res
def __repr__(self):
s = "<Profile(id='{0}', name='{1}', company='{2})'>"
return s.format(self.id, self.name, self.company)
def __init__(self, name, number, address, company, title):
self.name = name
self.number = number
self.address = address,
self.company = company,
self.title = title
class Weekend(Base):
__tablename__ = "weekend"
id = Column(Integer, primary_key=True)
week_of = Column(Date, nullable=False)
place = Column(String, nullable=False)
def as_dict(self):
res = {c.name: getattr(self, c.name) for c in self.__table__.columns}
return res
def __repr__(self):
s = "<Weekend(id='{0}', week_of='{1}', place='{2})'>"
return s.format(self.id, self.week_of, self.place)
def __init__(self, week_of, place):
self.week_of = str(week_of)
self.place = place
class Participants(Base):
__tablename__ = "participants"
id = Column(Integer, primary_key=True)
profile_id = Column(Integer, ForeignKey("profile.id", ondelete="CASCADE"))
weekend_id = Column(Integer, ForeignKey("weekend.id", ondelete="CASCADE"))
#Constraint
UniqueConstraint('profile_id', 'weekend_id')
def as_dict(self):
res = {c.name: getattr(self, c.name) for c in self.__table__.columns}
return res
def __repr__(self):
s = "<Participants(id='{0}', profile_id='{1}', weekend_id='{2})'>"
return s.format(self.id, self.profile_id, self.weekend_id)
def __init__(self, profile_id, weekend_id):
self.profile_id = profile_id
self.weekend_id = weekend_id
class Expenses(Base):
__tablename__ = "expenses"
id = Column(Integer, primary_key=True)
weekend_id = Column(Integer, ForeignKey("weekend.id", ondelete="CASCADE"))
expense_type = Column(String, nullable=False)
amount = Column(Float, nullable=False)
profile_id = Column(Integer, ForeignKey("profile.id", ondelete="CASCADE"))
date = Column(Date, nullable=False)
def as_dict(self):
res = {c.name: getattr(self, c.name) for c in self.__table__.columns}
return res
def __repr__(self):
s = "<Expenses(id='{0}', weekend_id='{1}', expense_type='{2})', amount='{3}', profile_id='{4}'>"
return s.format(self.id, self.weekend_id, self.expense_type, self.amount, self.profile_id)
def __init__(self, profile_id, weekend_id, expense_type, date, amount):
self.profile_id = profile_id
self.weekend_id = weekend_id
self.expense_type = expense_type
self.amount = amount
self.date = date
| {
"repo_name": "nikhil1290/weekend-fun",
"path": "database/models.py",
"copies": "1",
"size": "3274",
"license": "apache-2.0",
"hash": -1384027333019080700,
"line_mean": 31.74,
"line_max": 104,
"alpha_frac": 0.620647526,
"autogenerated": false,
"ratio": 3.5053533190578157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.961002991837468,
"avg_score": 0.0031941853366271966,
"num_lines": 100
} |
"""2015 SciPy John Hunter Excellence in Plotting Contest
Author: Robert Nikutta <robert.nikutta@gmail.com>
Title: Clustering of astronomical objects in WISE 3D color space
Based on: Nikutta, Hunt-Walker, Ivezic, Nenkova, Elitzur,
'The meaning of WISE colours - I. The Galaxy and its satellites',
MNRAS 442, 3361-3379 (2014)
http://dx.doi.org/10.1093/mnras/stu1087
http://adsabs.harvard.edu/abs/2014MNRAS.442.3361N
This stereoscopic plot (cross your eyes!) shows the distribution of
different types of astronomical objects in the 3D color space of the
WISE spacecraft (Wide-field Infrared Survey Explorer). Several classes
of objects are identified with differently colored dots. In
traditional 2D color-color plots clusters can overlap, making it
difficult to identify them. A 3D color-color plot, and especially a
stereoscopic view of it, provides a much more intuitive and immersive
experience.
Carbon-rich Asymptotic Giant Branch stars (AGB) are shown in
blue. Most of them are found in the Large Magellanic
Cloud. Oxygen-rich AGB stars are shown in red. Young Stellar Objects
(YSO) which are surrounded by dusty shells with constant radial
density profiles and small optical depths are shown in green. Both
cool (~600 Kelvin) and warm (~1200 Kelvin) shells fall in this
region. Warmer YSO shells of constant density fall in the the cluster
of orange color, but their optical depths are also higher. Finally,
small black dots show other astronomical objects in our Galaxy and its
satellites which have not been associated with the other
clusters. They are typically a mix of everything.
Example:
-------
import plot
F = plot.Figure(nxpix=1920) # full HD
F.make_stereoscopic_3d_scatter() # generates PNG file with default settings
"""
__author__ = 'Robert Nikutta <robert.nikutta@gmail.com>'
__version__ = '20150412'
import numpy as N
import pylab as p
import matplotlib
from matplotlib.gridspec import GridSpec
from mpl_toolkits.mplot3d import Axes3D
class Figure:
def __init__(self,nxpix=1280):
"""Generate a 3D stereoscopic view of ~15k WISE sources. Color
clusters of objects differently.
Parameters:
-----------
nxpix : int
Number of pixels of the output (PNG) file. An aspect ratio
of 16:9 is assumed.
"""
self.dpi = 100
self.aspect = 16./9.
self.ux = nxpix/float(self.dpi)
self.uy = self.ux/self.aspect
# Load data (WISE colors)
print "Loading data..."
with N.load('data.npz') as datafile:
self.x, self.y, self.z = datafile['x'], datafile['y'], datafile['z']
print "Number of objects: %d" % self.x.size
print "Done."
def make_stereoscopic_3d_scatter(self,azimuth=-18,saveformat='png'):
"""Generate two panels, 5 degrees apart in azimuth. Cross eyes for
stereoscopic view.
Parameters:
-----------
azimuth : {float,int}
The azimuth angle (in degrees) at which the camera views
the scene.
saveformat : str
Generate an output file, with the supplied azimuth in the
file name. Must be either 'png' (recommended, default) or
'pdf' (will be rather slow to save).
Returns:
--------
Nothing, but saves an output file.
"""
assert (saveformat in ('png','pdf')), "saveformat must be 'png' (recommended) or 'pdf' (will be very slow to save)."
filename = '3D_color_stereoscopic_az%07.2f.%s' % (azimuth,saveformat)
print "Generating plot %s" % filename
self.setup_figure(figsize=(self.ux,self.uy)) # width, height
# left panel (=right eye)
ax1 = p.subplot(self.gs3D[0],projection='3d',aspect='equal',axisbg='w')
plot_scatter_3D(self.fig,ax1,1,self.x,self.y,self.z,self.uy,azimuth=azimuth)
# right panel (=left eye)
ax2 = p.subplot(self.gs3D[1],projection='3d',aspect='equal',axisbg='w')
plot_scatter_3D(self.fig,ax2,2,self.x,self.y,self.z,self.uy,azimuth=azimuth-5)
if saveformat == 'png':
p.savefig(filename,dpi=100)
else:
p.savefig(filename)
p.close()
def make_movie_frames(self,azstart=1,azstop=10,azstep=1):
"""Helper function to generate frames (for a video) with varying
azimuth angle.
Parameters:
-----------
azstart, azstop, azstep : float-ables
The azimuth angles of first frame, last frame
(approximate), and of the step size. All in degrees. All
can be negative (determines direction of scene rotation)
"""
try:
azstart = float(azstart)
azstop = float(azstop)
azstep = float(azstep)
except ValueError:
raise Exception, "azstart, azstop, azstep must be convertible to a floating point number."
if azstop < azstart:
azstep = -N.abs(azstep)
allaz = N.arange(azstart,azstop,azstep)
for j,az in enumerate(allaz):
print "Generating frame file %d of %d" % (j+1,len(allaz))
self.make_stereoscopic_3d_scatter(azimuth=az)
def setup_figure(self,figsize):
"""Set up the figure and rc params."""
self.fontsize = 2*self.uy
p.rcParams['axes.labelsize'] = self.fontsize
p.rcParams['font.size'] = self.fontsize
p.rcParams['legend.fontsize'] = self.fontsize-2
p.rcParams['xtick.labelsize'] = self.fontsize
p.rcParams['ytick.labelsize'] = self.fontsize
self.fig = p.figure(figsize=figsize) # width, height 300dpi
self.fig.suptitle('Clustering of astronomical objects in WISE 3D color space\n(cross your eyes for stereoscopic view)',color='k',fontsize=self.fontsize+2)
# this will hold the 3D scatter plot
self.gs3D = GridSpec(1,2)
self.gs3D.update(left=0.02,right=0.98,bottom=0.,top=1.,wspace=0.05,hspace=0.)
def plot_scatter_3D(fig,ax,sid,x,y,z,unit,azimuth=-25):
# some constants
lo, hi = -0.5, 4 # plotting limits
s = unit/2.5 # standard marker size for scatter plot
# conditions to select groups of objects
coO = (x > 0.2) & (x < 2) & (y > 0.4) & (y < 2.2) & (z > 0) & (z < 1.3) & (z > 0.722*y - 0.289) # oxygen-rich AGN stars
coC = (x > 0.629*y - 0.198) & (x < 0.629*y + 0.359) & (z > 0.722*y - 0.911) & (z < 0.722*y - 0.289) # carbon-rich AGN stars
coCDSYSOcool = (x < 0.2) & (y < 0.4) # both cool & warm YSO shells w/ constant density profile & low optical depth
coCDSYSOwarm = (x > 0.3) & (x < 1.4) & (y > 1.4) & (y < 3.5) & (z > 1.5) & (z < 2.8) # warm YSO shells w/ constant density profile and high optical depth
coOTHER = ~(coO | coC | coCDSYSOcool | coCDSYSOwarm) # other/unidentified (a mix of everything)
groups = [coO,coC,coCDSYSOcool,coCDSYSOwarm,coOTHER]
# plot side panes
marker = 'o'
colors = ('r','#1A7EFF','g','#FFC81A','0.2') # red, blue, green, orange, very dark gray
alphas = [0.3]*len(groups)
sizes = [s,s,s,s,s/3.] # make 'other' apear a bit less prominent
for j,group in enumerate(groups):
cset = ax.scatter(x[group], y[group], lo, zdir='z', s=sizes[j], marker=marker, facecolors=colors[j], edgecolors=colors[j], linewidths=0., alpha=alphas[j])
cset = ax.scatter(y[group], z[group], hi, zdir='x', s=sizes[j], marker=marker, facecolors=colors[j], edgecolors=colors[j], linewidths=0., alpha=alphas[j])
cset = ax.scatter(x[group], z[group], hi, zdir='y', s=sizes[j], marker=marker, facecolors=colors[j], edgecolors=colors[j], linewidths=0., alpha=alphas[j])
# plot 3D clusters
# labels = ['O-rich AGB','C-rich AGB',r'cool YSO shells, $\rho(r)$=const.',r'warm YSO shells, $\rho(r)$=const., high optical depth','other']
alphas = [0.8,0.8,0.8,0.8,0.4] # make 'other' apear a bit less prominent
for j,group in enumerate(groups):
ax.scatter(x[group], y[group], z[group], s=sizes[j], marker=marker, facecolors=colors[j], edgecolors='w', linewidths=0.1, alpha=alphas[j])
# generate view
ax.view_init(elev=18, azim=azimuth)
# per-axis settings
for prop in ('w_xaxis','w_yaxis','w_zaxis'):
obj = getattr(ax,prop)
obj.set_pane_color((1,1,1,1.0))
obj.gridlines.set_lw(0.3)
obj._axinfo.update({'grid' : {'color': (0.5,0.5,0.5,1)}})
# final touch ups
ax.set_xlim(hi,lo)
ax.set_ylim(lo,hi)
ax.set_zlim(lo,hi)
ax.set_xticks((0,1,2,3,4))
ax.set_yticks((0,1,2,3,4))
ax.set_zticks((0,1,2,3,4))
ax.set_xlabel('W1 - W2 (mag)')
ax.set_ylabel('W2 - W3 (mag)')
ax.set_zlabel('W3 - W4 (mag)')
| {
"repo_name": "rnikutta/wise3Dstereoscopic",
"path": "plot.py",
"copies": "1",
"size": "8856",
"license": "bsd-3-clause",
"hash": -6096504724906592000,
"line_mean": 37.3376623377,
"line_max": 182,
"alpha_frac": 0.6225158085,
"autogenerated": false,
"ratio": 3.1183098591549294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42408256676549294,
"avg_score": null,
"num_lines": null
} |
# 2016-02-12 juerg maier
# Use Danevit Hartenberg parameters to calculate current x,y,z Position of Palm
# As I could not make JNumeric work I used my own matrix calcs
from copy import copy, deepcopy
import math
leftPort = "/dev/ttyACM1"
rightPort = "/dev/ttyACM0"
i01 = Runtime.createAndStart("i01", "InMoov")
leftHand = i01.startLeftHand(leftPort)
rightHand = i01.startRightHand(rightPort)
leftArm = i01.startLeftArm(leftPort)
rightArm = i01.startRightArm(rightPort)
head = i01.startHead(leftPort)
head.neck.setMinMax(15,140)
head.rothead.setMinMax(35,145)
# self-levelling hand, activate for demo
arduino = Runtime.getService("i01.right")
keepHorizontalOutPin = 12
keeHorizontal = True
arduino.pinMode(keepHorizontalOutPin, Arduino.OUTPUT)
arduino.digitalWrite(keepHorizontalOutPin, 1)
dhLeftArm = [
[ 0.0, 110.0, 0.0, 90.0], # body rotation
[ 63.0, 0.0, 330.0, 0.0], # body bend
[-153.0, 0.0, 40.0, -90.0], # omoplate
[ 90.0, 80.0, 0.0, 90.0], # shoulder (front/back)
[ 180.0, 280.0, 0.0, 90.0], # rotate arm
[ 180.0, 0.0, 0.0, 90.0], # bicep
[ 0.0, 300.0, 0.0, 90.0], # wrist rotation
[ 180.0, 0.0, 0.0, 90.0], # wrist bend
[ 90.0, 100.0, 0.0, 0.0]] # palm center
dhRightArm = [
[ 0.0, 110.0, 0.0, 90.0],
[ 117.0, 0.0, 330.0, 0.0],
[ -27.0, 0.0, -40.0, 90.0],
[ -90.0, -80.0, 0.0, 90.0],
[ 180.0, 280.0, 0.0, 90.0],
[ 180.0, 0.0, 0.0, 90.0],
[ 0.0, 300.0, 0.0, 90.0],
[ 180.0, 0.0, 0.0, 90.0],
[ 90.0, 100.0, 0.0, 0.0]]
dhHead = [
[ 0.0, 110.0, 0.0, 90.0], #body yaw to body roll
[ 90.0, 0.0, 440.0, 0.0], #body roll to neck base
[ 0.0, -35.0, 0.0, 90.0], #neck base to neck pitch
[ 0.0, 0.0, 80.0, 0.0], #neck pitch to neck yaw
[ 90.0, 0.0, 0.0, 90.0], #dummy to allow rothead
[ 0.0, 0.0, 0.0, 0.0]] #rothead
# first joint is Z rotation (fixed values for InMoov body Z rotation)
T = [[ 1.0, 0.0, 0.0, 0.0],
[ 0.0, 1.0, 0.0, 0.0],
[ 0.0, 0.0, 1.0, 0.0],
[ 0.0, 0.0, 0.0, 1.0]]
########################################
# matrix multiply for 2 4*4 matrices
########################################
def matrixMul(t0, t1):
t2 = deepcopy(t0)
for j in range(0, 4):
for k in range(0, 4):
t2[j][k] = 0.0
for n in range(0, 4):
t2[j][k] += t0[j][n]*t1[n][k]
return t2
#######################################
# walk through all the joints
#######################################
def dhCalc(dhTable):
t0 = deepcopy(T) # initial Z rotation
for i in range(0, len(dhTable)):
ct = math.cos(math.radians(dhTable[i][0])) #cosinus(theta)
st = math.sin(math.radians(dhTable[i][0])) #sinus(theta)
ca = math.cos(math.radians(dhTable[i][3])) #cosinus(alpha)
sa = math.sin(math.radians(dhTable[i][3])) #sinus(alpha)
#set the matrix values from the dh-List
t1 = [
[ ct, -st*ca, st*sa, ct*dhTable[i][2]],
[ st, ct*ca, -ct*sa, st*dhTable[i][2]],
[ 0.0, sa, ca, dhTable[i][1]],
[ 0.0, 0.0, 0.0, 1.0] ]
t0 = matrixMul(t0, t1)
print t0[0][3],t0[1][3],t0[2][3]
return t0
##########################################
# set current hand positions into DH-table
##########################################
def lookatHand(focus):
if focus == "left":
dhTable = deepcopy(dhLeftArm)
# set current joint values into dhParameters
# calculate degrees from servo settings
dhTable[2][0] = leftArm.omoplate.getPos() - 153 #omoplate - 153
dhTable[3][0] = 90 - leftArm.shoulder.getPos() #shoulder
dhTable[4][0] = 270 - leftArm.rotate.getPos() #rotate
dhTable[5][0] = 180 - leftArm.bicep.getPos() #bicep
print "left o,s,r,b", leftArm.omoplate.getPos(), leftArm.shoulder.getPos(), leftArm.rotate.getPos(), leftArm.bicep.getPos()
print "dh o,s,r,b", dhTable[2][0], dhTable[3][0], dhTable[4][0], dhTable[5][0]
result = dhCalc(dhTable)
posPalm = [result[0][3], result[1][3], result[2][3]]
else: #right arm
dhTable = deepcopy(dhRightArm)
# set current joint values into dhParameters
# calculate degrees from servo settings
dhTable[2][0] = -rightArm.omoplate.getPos() - 27 #-omoplate - 27
dhTable[3][0] = -90 - rightArm.shoulder.getPos() #shoulder, down at 15
dhTable[4][0] = rightArm.rotate.getPos()-270 #rotate
dhTable[5][0] = 180 - rightArm.bicep.getPos() #bicep
result = dhCalc(dhTable)
posPalm = [result[0][3], result[1][3], result[2][3]]
#print "InMoov head.neck: {x:3.2f}, {y:3.2f}, {z:3.2f}".format(x=posPalm[0],y=PosPalm[1],z=PosPalm[2])
print "palm: ", posPalm
dhTableHead = deepcopy(dhHead)
# as changing head neck/rotate changes also the head position
# this might need to be done 2 or 3 times (TODO)
result = dhCalc(dhTableHead)
posHead = [result[0][3], result[1][3], result[2][3]]
print "head: ", posHead
# Position differences between head and palm
pd = (posHead[0] - posPalm[0], posHead[1] - posPalm[1], posHead[2] - posPalm[2])
# Z-rotation, atan of opposite (pd[1],y) / adjacent (pd[0],x)
rotZ = math.degrees(math.atan(pd[1]/pd[0]))
# X-rotation atan of opposite / adjacent (pd[2], z)
# opposite sqrt(x*x, y*y)
rotX = math.degrees(math.atan(math.sqrt(math.pow(pd[0], 2) + math.pow(pd[1], 2))/pd[2]))
print "rotX, rotZ", rotX, rotZ
# My InMoov has a limit of the neck of about +-45 real world degrees with
# servo degrees 15..130
korrFactorForNeckServo = 45.0/75.0
neck = int(rotX * korrFactorForNeckServo)
if neck > 130:
neck = 130;
if neck < 15:
neck = 15
head.neck.moveTo(neck)
print "InMoov head.neck: ", neck
# my InMoov head has a yaw range of about +-55 real world degrees with
# servo degrees 35..145.
if rotZ > 0:
rothead = int(rotZ)
else:
rothead = 180+int(rotZ)
if rothead > 145:
rothead = 145
if rothead < 35:
rothead = 35
head.rothead.moveTo(rothead)
print "InMoov head.rothead", rothead
###############################################
###############################################
def init():
print "start lookat Test"
i01.attach()
i01.moveHand("left", 90,90,90,90,90,90)
i01.moveArm("left", 5,90,30,10)
i01.moveHand("right", 90,90,90,90,90,90)
i01.moveArm("right", 5,90,30,10)
i01.setHeadSpeed(0.95,0.95)
i01.head.rothead.moveTo(90)
i01.head.neck.moveTo(90)
sleep(4)
def Pos1():
# create position 1
leftArm.omoplate.moveTo(10) # 3 o-153
leftArm.shoulder.moveTo(30) # 4 90-s
leftArm.rotate.moveTo(90) # 5 90+r
leftArm.bicep.moveTo(80) # 6 180-b
sleep(2)
lookatHand("left")
def Pos2():
# create a position
rightArm.omoplate.moveTo(20) # 3 -o-27
rightArm.shoulder.moveTo(30) # 4 Theta DH = -90-s+15
rightArm.rotate.moveTo(130) # 5 r
rightArm.bicep.moveTo(68) # 6 180-b
sleep(1)
lookatHand("right")
def Pos3():
leftArm.omoplate.moveTo(20) # 3 o-153
leftArm.shoulder.moveTo(50) # 4 90-s
leftArm.rotate.moveTo(130) # 5 r
leftArm.bicep.moveTo(80) # 6 180-b
sleep(1)
lookatHand("left")
def done():
i01.moveArm("right", 5,90,30,10) #rest
i01.moveArm("left", 5,90,30,10)
head.rothead.moveTo(90)
head.neck.moveTo(90)
sleep(2)
i01.detach()
print "done"
#################################################
init()
sleep(15)
Pos1()
sleep(5)
Pos2()
sleep(5)
Pos3()
sleep(5)
done()
| {
"repo_name": "MyRobotLab/pyrobotlab",
"path": "home/juerg/lookathand.py",
"copies": "1",
"size": "7280",
"license": "apache-2.0",
"hash": -5538957785101586000,
"line_mean": 28.2369477912,
"line_max": 125,
"alpha_frac": 0.5870879121,
"autogenerated": false,
"ratio": 2.3067173637515843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8029101612800857,
"avg_score": 0.07294073261014537,
"num_lines": 249
} |
# 2016-06-06 Updating for Keras 1.0 API
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Layer, Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import GlobalAveragePooling2D
from kapre.time_frequency import Melspectrogram
from kapre.utils import Normalization2D
SR = 12000
def build_convnet_model(args, last_layer=True, sr=None, compile=True):
''' '''
tf = args.tf_type
normalize = args.normalize
if normalize in ('no', 'False'):
normalize = None
decibel = args.decibel
model = raw_vgg(args, tf=tf, normalize=normalize, decibel=decibel,
last_layer=last_layer, sr=sr)
if compile:
model.compile(optimizer=keras.optimizers.Adam(lr=5e-3),
loss='binary_crossentropy')
return model
def raw_vgg(args, input_length=12000 * 29, tf='melgram', normalize=None,
decibel=False, last_layer=True, sr=None):
''' when length = 12000*29 and 512/256 dft/hop,
melgram size: (n_mels, 1360)
'''
assert tf in ('stft', 'melgram')
assert normalize in (None, False, 'no', 0, 0.0, 'batch', 'data_sample', 'time', 'freq', 'channel')
assert isinstance(decibel, bool)
if sr is None:
sr = SR # assumes 12000
conv_until = args.conv_until
trainable_kernel = args.trainable_kernel
model = Sequential()
# decode args
fmin = args.fmin
fmax = args.fmax
if fmax == 0.0:
fmax = sr / 2
n_mels = args.n_mels
trainable_fb = args.trainable_fb
model.add(Melspectrogram(n_dft=512, n_hop=256, power_melgram=2.0,
input_shape=(1, input_length),
trainable_kernel=trainable_kernel,
trainable_fb=trainable_fb,
return_decibel_melgram=decibel,
sr=sr, n_mels=n_mels,
fmin=fmin, fmax=fmax,
name='melgram'))
poolings = [(2, 4), (3, 4), (2, 5), (2, 4), (4, 4)]
if normalize in ('batch', 'data_sample', 'time', 'freq', 'channel'):
model.add(Normalization2D(normalize))
model.add(get_convBNeluMPdrop(5, [32, 32, 32, 32, 32],
[(3, 3), (3, 3), (3, 3), (3, 3), (3, 3)],
poolings, model.output_shape[1:], conv_until=conv_until))
if conv_until != 4:
model.add(GlobalAveragePooling2D())
else:
model.add(Flatten())
if last_layer:
model.add(Dense(50, activation='sigmoid'))
return model
def get_convBNeluMPdrop(num_conv_layers, nums_feat_maps,
conv_sizes, pool_sizes, input_shape, conv_until=None):
# [Convolutional Layers]
model = Sequential(name='ConvBNEluDr')
input_shape_specified = False
if conv_until is None:
conv_until = num_conv_layers # end-inclusive.
for conv_idx in xrange(num_conv_layers):
# add conv layer
if not input_shape_specified:
model.add(Convolution2D(nums_feat_maps[conv_idx],
conv_sizes[conv_idx][0], conv_sizes[conv_idx][1],
input_shape=input_shape,
border_mode='same',
init='he_normal'))
input_shape_specified = True
else:
model.add(Convolution2D(nums_feat_maps[conv_idx],
conv_sizes[conv_idx][0], conv_sizes[conv_idx][1],
border_mode='same',
init='he_normal'))
# add BN, Activation, pooling
model.add(BatchNormalization(axis=1, mode=2))
model.add(keras.layers.advanced_activations.ELU(alpha=1.0)) # TODO: select activation
model.add(MaxPooling2D(pool_size=pool_sizes[conv_idx]))
if conv_idx == conv_until:
break
return model
| {
"repo_name": "keunwoochoi/music-auto_tagging-keras",
"path": "compact_cnn/models.py",
"copies": "1",
"size": "4115",
"license": "mit",
"hash": -5887317292793120000,
"line_mean": 36.752293578,
"line_max": 102,
"alpha_frac": 0.5657351154,
"autogenerated": false,
"ratio": 3.651286601597161,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47170217169971607,
"avg_score": null,
"num_lines": null
} |
# 2016.06.22 21:44:58 EDT
#Embedded file name: e84.py
import random, pylab
ccDeck = chDeck = list(range(12))
random.shuffle(ccDeck)
random.shuffle(chDeck)
def rollDice():
a, b = random.choice(list(range(1, 5))), random.choice(list(range(1, 5)))
return (a + b, a == b)
def nextRailroad(currentSquare):
return (currentSquare + 4 - (currentSquare + 4) % 10 + 5) % 40
def nextUtil(currentSquare):
if currentSquare <= 12 or currentSquare > 28:
return 12
else:
return 28
def commChest(currentSquare):
global ccDeck
card = ccDeck[0]
ccDeck = ccDeck[1:] + [card]
if card == 0:
return 0
if card == 1:
return 10
return currentSquare
def chance(currentSquare):
global chDeck
card = chDeck[0]
chDeck = chDeck[1:] + [card]
if card == 0:
return 0
if card == 1:
return 10
if card == 2:
return 11
if card == 3:
return 24
if card == 4:
return 39
if card == 5:
return 5
if card in (6, 7):
return nextRailroad(currentSquare)
if card == 8:
return nextUtil(currentSquare)
if card == 9:
return currentSquare - 3
return currentSquare
def doTurn(start, doubles = 0):
roll = rollDice()
position = start + roll[0]
position %= 40
if doubles == 3:
return 10
if roll[1] == True:
return doTurn(start, doubles + 1)
if position in (7, 22, 36):
return chance(position)
if position in (2, 17, 33):
return commChest(position)
if position == 30:
return 10
return position % 40
def runGame(numTurns = 500):
freqs = {}
pos = 0
for i in range(40):
freqs[i] = 0
for i in range(numTurns):
pos = doTurn(pos)
freqs[pos] += 1
return freqs
def sortedPlot(l):
pairs = []
for pair in enumerate(l):
pairs.append(pair)
pairs = sorted(pairs,key=lambda pair: pair[1])
## xs,ys = [], []
for pair in pairs:
print(str(pair[0]), str(float(pair[1])/float(500)) + "%")
## xs.append(pair[0]); ys.append(pair[1])
## fig = pylab.figure()
## ax = fig.add_subplot(111)
## ax.plot(ys)
## ax.set_xticklabels(xs)
## pylab.show()
if __name__ == '__main__':
freqs = [0] * 40
for i in range(100):
newFreqs = runGame()
for i in range(40):
freqs[i] += newFreqs[i]
sortedPlot(freqs)
## pylab.bar(range(0,40),freqs)
## pylab.show()
| {
"repo_name": "nickfarrington/euler-misc-py",
"path": "e84.py",
"copies": "1",
"size": "2510",
"license": "mit",
"hash": 8682058603909443000,
"line_mean": 19.5737704918,
"line_max": 77,
"alpha_frac": 0.5561752988,
"autogenerated": false,
"ratio": 3.173198482932996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9153920474197168,
"avg_score": 0.01509066150716565,
"num_lines": 122
} |
# 2016. 1. 10 by Hans Roh hansroh@gmail.com
__version__ = "0.29.3.18"
version_info = tuple (map (lambda x: not x.isdigit () and x or int (x), __version__.split (".")))
import os, sys
from rs4 import asyncore
import timeit
import time, math, random
from . import lifetime, queue, request_builder, response_builder, stubproxy
from rs4 import logger as logger_f, tc
from .client import socketpool
from .dbapi import dbpool
from .client import adns, asynconnect
from .athreads.fifo import await_fifo
from . import client, dbapi
from aquests.protocols import dns
from .protocols.http import localstorage as ls
from .protocols.http import request_handler, response as http_response
from .protocols import http2
from .protocols.http2 import H2_PROTOCOLS
from .dbapi import request as dbo_request
import copy
DEBUG = 0
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def cb_gateway_demo (response):
global _logger
try: cl = len (response.content)
except: cl = 0
if isinstance (response, dbo_request.Request):
status = "DBO %s %s %d records/documents received" % (
response.code,
response.msg,
cl
)
else:
status = "HTTP/%s %s %s %d bytes received" % (
response.version,
response.code,
response.msg,
cl
)
_logger.log (
"REQ %s-%d. %s" % (
response.meta ['req_method'],
response.meta ['req_id'],
status
)
)
#print (response.headers)
#print (response.data)
_request_total = 0
_finished_total = 0
_initialized = False
_logger = None
_cb_gateway = cb_gateway_demo
_concurrent = 1
_workers = 1
_currents = {}
_que = None
_dns_query_req = {}
_timeout = 10
_max_conns = 0
_bytesrecv = 0
_allow_redirects = True
_force_h1 = False
result = None
_http_status = {}
_http_version = {}
def configure (
workers = 1,
logger = None,
callback = None,
timeout = 10,
cookie = False,
force_http1 = False,
http2_constreams = 1,
allow_redirects = True,
qrandom = False,
use_pool = True,
tracking = False,
backend = False,
dns = []
):
global _logger, _cb_gateway, _concurrent, _initialized, _timeout
global _workers, _que, _allow_redirects, _force_h1
if logger is None:
logger = logger_f.screen_logger ()
_logger = logger
if qrandom:
_que = queue.RandomQueue ()
else:
_que = queue.Queue ()
_allow_redirects = allow_redirects
_force_h1 = request_handler.RequestHandler.FORCE_HTTP_11 = force_http1
if not use_pool:
asynconnect.AsynConnect.keep_connect = use_pool
asynconnect.AsynSSLConnect.keep_connect = use_pool
if not _force_h1:
asynconnect.AsynConnect.fifo_class = await_fifo
asynconnect.AsynSSLConnect.fifo_class = await_fifo
http2.MAX_HTTP2_CONCURRENT_STREAMS = http2_constreams
_workers = workers
_concurrent = workers
if not force_http1:
_concurrent = workers * http2_constreams
elif http2_constreams:
pass
#_logger ("parameter http2_constreams is ignored", "warn")
if callback:
_cb_gateway = callback
if cookie:
ls.create (_logger)
_timeout = timeout
client.set_timeout (timeout)
dbapi.set_timeout (timeout)
socketpool.create (_logger, backend = backend, use_pool = use_pool)
dbpool.create (_logger, backend = backend)
adns.init (_logger, dns)
lifetime.init (_timeout / 2., logger) # maintern interval
if tracking:
lifetime.enable_memory_track ()
_initialized = True
def _reque_first (request):
global _que
_que.first (request)
def handle_status_401 (response):
global _que
if not response.request.get_auth () or response.request.reauth_count:
return response
_logger ("authorization failed, %s" % response.url, "info")
request = response.request
request.reauth_count = 1
_reque_first (request)
def handle_status_3xx (response):
global _allow_redirects , _que
if not _allow_redirects:
return response
if response.status_code not in (301, 302, 307, 308):
return response
newloc = response.get_header ('location')
oldloc = response.request.uri
request = response.request
if newloc == oldloc:
response.response = http_response.FailedResponse (711, "Redirect Error", request)
return response
try:
request.relocate (response.response, newloc)
except RuntimeError:
response.response = http_response.FailedResponse (711, "Redirect Error", request)
return response
#_logger ("%s redirected to %s from %s" % (response.status_code, newloc, oldloc), "info")
# DO NOT use relocated response.request, it is None
_reque_first (request)
def _request_finished (handler):
global _cb_gateway, _currents, _concurrent, _finished_total, _logger, _bytesrecv,_force_h1
global _http_status, _http_version
req_id = handler.request.meta ['req_id']
try:
_currents.pop (req_id)
except KeyError:
pass
if isinstance (handler, dbo_request.Request):
response = handler
else:
response = response_builder.HTTPResponse (handler.response)
try:
for handle_func in (handle_status_401, handle_status_3xx):
response = handle_func (response)
if not response:
# re-requested
return req_if_queue (req_id)
except:
_logger.trace ()
_finished_total += 1
response.logger = _logger
_bytesrecv += len (response.content)
try: _http_status [response.status_code] += 1
except KeyError: _http_status [response.status_code] = 1
try: _http_version [response.version] += 1
except KeyError: _http_version [response.version] = 1
callback = response.meta ['req_callback'] or _cb_gateway
try:
callback (response)
except:
_logger.trace ()
req_if_queue (req_id)
def req_if_queue (req_id):
global _logger, _currents
try:
qsize () and _req ()
except RecursionError:
try:
_currents.pop (req_id)
except KeyError:
pass
_logger ("too many error occured, failed requeueing", "fail")
def _req ():
global _que, _logger, _currents, _request_total, _backend
args = _que.get ()
if args is None:
return
_request_total += 1
_is_request = False
_is_db = False
_method = None
if type (args) is not tuple:
req = args
meta = req.meta
_is_request = True
_is_db = hasattr (req, 'dbtype')
else:
_is_request = False
_method = args [0].lower ()
if _is_db or _method in ("postgresql", "redis", "mongodb", "sqlite3"):
if not _is_request:
method, server, (dbmethod, params), dbname, auth, meta = args
asyncon = dbpool.get (server, dbname, auth, "*" + _method)
req = request_builder.make_dbo (_method, server, dbmethod, params, dbname, auth, meta, _logger)
else:
asyncon = dbpool.get (req.server, req.dbname, req.auth, "*" + req.dbtype)
_currents [meta ['req_id']] = [0, req.server]
req.set_callback (_request_finished)
asyncon.execute (req)
else:
if not _is_request:
method, url, params, auth, headers, meta, proxy = args
asyncon = socketpool.get (url)
if _method in ("ws", "wss"):
req = request_builder.make_ws (_method, url, params, auth, headers, meta, proxy, _logger)
else:
req = request_builder.make_http (_method, url, params, auth, headers, meta, proxy, _logger)
else:
asyncon = socketpool.get (req.uri)
_currents [meta ['req_id']] = [0, req.uri]
handler = req.handler (asyncon, req, _request_finished)
if asyncon.get_proto () and asyncon.isconnected ():
asyncon.handler.handle_request (handler)
else:
handler.handle_request ()
def workings ():
global _currents
return len (_currents)
def countreq ():
global _request_total
return _request_total
def qsize ():
global _que
return _que.qsize ()
def mapsize ():
return len (asyncore.socket_map)
def countfin ():
global _finished_total
return _finished_total
def countcli ():
global _currents
return _currents
def concurrent ():
global _concurrent
return _concurrent
def fetchall ():
global _workers, _logger, _que, _timeout, _max_conns, _bytesrecv, _concurrent, _finished_total, _max_conns, _force_h1, _request_total, _bytesrecv
global result, _http_status, _http_version
if not qsize ():
_logger.log ('no item in queue.')
return
if not _initialized:
configure ()
_fetch_started = timeit.default_timer ()
# IMP. mannually set
lifetime._polling = 1
# create initail workers
#_logger ("creating connection pool", "info")
target_socks = min (_workers, qsize ())
for i in range (target_socks):
_req ()
select_timeout = 1.0
if not _force_h1 and http2.MAX_HTTP2_CONCURRENT_STREAMS > 1:
# wait all availabale
while qsize ():
lifetime.lifetime_loop (select_timeout, 1)
target_socks = sum ([1 for conn in asyncore.socket_map.values () if hasattr (conn, "get_proto") and not isinstance (conn, (dns.UDPClient, dns.TCPClient)) and conn.get_proto () in H2_PROTOCOLS and conn.connected and not conn.isactive ()])
if target_socks == _workers:
#_logger ('%d connection(s) created' % target_socks, 'info')
break
# now starting
if http2.MAX_HTTP2_CONCURRENT_STREAMS == 1:
measurement = min
else:
measurement = max
while qsize () or _currents:
lifetime.lifetime_loop (select_timeout, 1)
while _concurrent > measurement (len (_currents), mapsize ()) and qsize ():
_req ()
_max_conns = max (_max_conns, mapsize ())
#print ('--', len (_currents), mapsize (), qsize ())
if not mapsize ():
break
lifetime._polling = 0
_duration = timeit.default_timer () - _fetch_started
socketpool.cleanup ()
dbpool.cleanup ()
result = Result (_finished_total, _duration, _bytesrecv, _max_conns, copy.copy (_http_status), copy.copy (_http_version))
# reinit for next session
_request_total = 0
_finished_total = 0
_max_conns = 0
_bytesrecv = 0
_http_status = {}
_http_version = {}
class Result:
def __init__ (self, tasks, duration, bytes_recv, max_conns, _http_status, _http_version):
self.tasks = tasks
self.duration = duration
self.bytes_recv = bytes_recv
self.max_conns = max_conns
self._http_status = _http_status
self._http_version = _http_version
def report (self):
print (tc.debug ("summary"))
print ("- finished in: {:.2f} seconds".format (self.duration))
print ("- requests: {:,} requests".format (self.tasks))
print ("- requests/sec: {:.2f} requests".format (self.tasks / self.duration))
print ("- bytes recieved: {:,} bytes".format (self.bytes_recv))
print ("- bytes recieved/sec: {:,} bytes".format (int (self.bytes_recv / self.duration)))
print (tc.debug ("response status codes"))
for k, v in sorted (self._http_status.items ()):
print ("- {}: {:,}".format (k, v))
print (tc.debug ("response HTTP versions") )
for k, v in sorted (self._http_version.items ()):
print ("- {}: {:,}".format (k, v))
def suspend (timeout):
a, b = math.modf (timeout)
for i in range (int (b)):
socketpool.noop ()
time.sleep (1)
time.sleep (a)
_dns_reqs = 0
def _add (method, url, params = None, auth = None, headers = {}, callback = None, meta = None, proxy = None):
global _que, _initialized, _dns_query_req, _dns_reqs, _workers
if not _initialized:
configure ()
if not meta:
meta = {}
meta ['req_id'] = _que.req_id
meta ['req_method'] = method
meta ['req_callback'] = callback
_que.add ((method, url, params, auth, headers, meta, proxy))
# DNS query for caching and massive
if not lifetime._polling and _dns_reqs < _workers:
host = urlparse (url) [1].split (":")[0]
if host not in _dns_query_req:
_dns_query_req [host] = None
_dns_reqs += 1
adns.query (host, "A", callback = lambda x: None)
if dns.qsize ():
dns.pop_all ()
asyncore.loop (0.1, count = 2)
#print ('~~~~~~~~~~~~~~~', asyndns.pool.connections)
def log (msg, type = "info"):
global _logger
_logger (msg, type)
#----------------------------------------------------
# Add Reuqest (protocols.*.request) Object
#----------------------------------------------------
def add (request):
global _que
_que.add (request)
#----------------------------------------------------
# HTTP CALL
#----------------------------------------------------
def head (*args, **karg):
_add ('head', *args, **karg)
def trace (*args, **karg):
_add ('trace', *args, **karg)
def options (*args, **karg):
_add ('options', *args, **karg)
def upload (*args, **karg):
_add ('upload', *args, **karg)
def get (*args, **karg):
_add ('get', *args, **karg)
def delete (*args, **karg):
_add ('delete', *args, **karg)
def post (*args, **karg):
_add ('post', *args, **karg)
def patch (*args, **karg):
_add ('patch', *args, **karg)
def put (*args, **karg):
_add ('put', *args, **karg)
def getjson (*args, **karg):
_add ('getjson', *args, **karg)
def deletejson (*args, **karg):
_add ('deletejson', *args, **karg)
def patchjson (*args, **karg):
_add ('patchjson', *args, **karg)
def postjson (*args, **karg):
_add ('postjson', *args, **karg)
def putjson (*args, **karg):
_add ('putjson', *args, **karg)
def getxml (*args, **karg):
_add ('getxml', *args, **karg)
def deletexml (*args, **karg):
_add ('deletexml', *args, **karg)
def patchxml (*args, **karg):
_add ('patchxml', *args, **karg)
def postxml (*args, **karg):
_add ('postxml', *args, **karg)
def putxml (*args, **karg):
_add ('putxml', *args, **karg)
#----------------------------------------------------
# Websocket
#----------------------------------------------------
def ws (*args, **karg):
_add ('ws', *args, **karg)
def wss (*args, **karg):
_add ('wss', *args, **karg)
#----------------------------------------------------
# XMLRPC, gRPC
#----------------------------------------------------
def _addrpc (method, rpcmethod, params, url, auth = None, headers = {}, callback = None, meta = {}, proxy = None):
_add (method, url, (rpcmethod, params), auth, headers, callback, meta, proxy)
def rpc (*args, **karg):
return stubproxy.Proxy ('rpc', _addrpc, *args, **karg)
def jsonrpc (*args, **karg):
return stubproxy.Proxy ('jsonrpc', _addrpc, *args, **karg)
def grpc (*args, **karg):
return stubproxy.Proxy ('grpc', _addrpc, *args, **karg)
#----------------------------------------------------
# DBO QEURY
#----------------------------------------------------
def _adddbo (method, dbmethod, params, server, dbname = None, auth = None, callback = None, meta = {}):
global _que
if not _initialized:
configure ()
if not meta: meta = {}
meta ['req_id'] = _que.req_id
meta ['req_method'] = method
meta ['req_callback'] = callback
_que.add ((method, server, (dbmethod, params), dbname, auth, meta))
def postgresql (*args, **karg):
return stubproxy.Proxy ('postgresql', _adddbo, *args, **karg)
pgsql = pg = postgresql
def redis (*args, **karg):
return stubproxy.Proxy ('redis', _adddbo, *args, **karg)
def mongodb (*args, **karg):
return stubproxy.Proxy ('mongodb', _adddbo, *args, **karg)
def sqlite3 (*args, **karg):
return stubproxy.Proxy ('sqlite3', _adddbo, *args, **karg)
| {
"repo_name": "hansroh/aquests",
"path": "aquests/__init__.py",
"copies": "1",
"size": "14635",
"license": "mit",
"hash": 7939229146424259000,
"line_mean": 24.7658450704,
"line_max": 240,
"alpha_frac": 0.6405876324,
"autogenerated": false,
"ratio": 2.978831671076735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41194193034767346,
"avg_score": null,
"num_lines": null
} |
#2016-2017 PERSONAL PROJECTS: TurtleChat!
#WRITE YOUR NAME HERE!
#####################################################################################
# IMPORTS #
#####################################################################################
#import the turtle module
#import the Client class from the turtle_chat_client module
#Finally, from the turtle_chat_widgets module, import two classes: Button and TextInput
import turtle
from turtle_chat_client import Client
from turtle_chat_widgets import Button, TextInput
#####################################################################################
#####################################################################################
#####################################################################################
# TextBox #
#####################################################################################
#Make a class called TextBox, which will be a subclass of TextInput.
#Because TextInput is an abstract class, you must implement its abstract
#methods. There are two:
#
#draw_box
#write_msg
#
#Hints:
#1. in draw_box, you will draw (or stamp) the space on which the user's input
#will appear.
#
#2. All TextInput objects have an internal turtle called writer (i.e. self will
# have something called writer). You can write new text with it using code like
#
# self.writer.write(a_string_variable)
#
# and you can erase that text using
#
# self.writer.clear()
#
#3. If you want to make a newline character (i.e. go to the next line), just add
# \r to your string. Test it out at the Python shell for practice
#####################################################################################
#####################################################################################
class TextBox(TextInput):
def draw_box(self):
#Draw box inside of which message will appear.
my_turtle=turtle.clone()
my_turtle.hideturtle()
my_turtle.penup()
my_turtle.width(5)
my_turtle.shape('circle')
my_turtle.goto(self.pos[0]-self.width/2,self.pos[1]-self.height/2)
my_turtle.pendown()
my_turtle.goto(self.pos[0]+self.width/2,self.pos[1]-self.height/2)
my_turtle.goto(self.pos[0]+self.width/2,self.pos[1]+self.height/2)
my_turtle.goto(self.pos[0]-self.width/2,self.pos[1]+self.height/2)
my_turtle.goto(self.pos[0]-self.width/2,self.pos[1]-self.height/2)
my_turtle.penup()
def write_msg(self):
self.writer.clear()
#Add newlines every self.letters_per_line for wrapping.
if(len(self.get_msg()) % self.letters_per_line==0 and len(self.get_msg()) != 0):
self.new_msg+='\r'
self.writer.write(self.get_msg())
#####################################################################################
# SendButton #
#####################################################################################
#Make a class called SendButton, which will be a subclass of Button.
#Button is an abstract class with one abstract method: fun.
#fun gets called whenever the button is clicked. It's jobs will be to
#
# 1. send a message to the other chat participant - to do this,
# you will need to call the send method of your Client instance
# 2. update the messages that you see on the screen
#
#HINT: You may want to override the __init__ method so that it takes one additional
# input: view. This will be an instance of the View class you will make next
# That class will have methods inside of it to help
# you send messages and update message displays.
#####################################################################################
#####################################################################################
class SendButton(Button):
def __init__(self,my_turtle=None,shape=None,pos=(0,0),view=None):
#Use super-class __init__, but also store view object
super(SendButton,self).__init__(my_turtle=my_turtle,shape=shape,pos=pos)
if view is None :
self.view=View()
else :
self.view=view
def fun(self,x=None,y=None):
print(self.view.get_msg()) #Debug - print message
self.view.send_msg() #Send the message and update display.
##################################################################
# View #
##################################################################
#Make a new class called View. It does not need to have a parent
#class mentioned explicitly.
#
#Read the comments below for hints and directions.
##################################################################
##################################################################
class View:
_MSG_LOG_LENGTH=5 #Number of messages to retain in view
_SCREEN_WIDTH=300
_SCREEN_HEIGHT=600
_LINE_SPACING=round(_SCREEN_HEIGHT/2/(_MSG_LOG_LENGTH+1))
def __init__(self,username='Me',partner_name='Partner'):
'''
:param username: the name of this chat user
:param partner_name: the name of the user you are chatting with
'''
###
#Store the username and partner_name into the instance.
###
self.username=username
self.partner_name=partner_name
###
#Make a new client object and store it in this instance of View
#(i.e. self). The name of the instance should be my_client
###
self.my_client=Client(username,partner_name)
###
#Set screen dimensions using turtle.setup
#You can get help on this function, as with other turtle functions,
#by typing
#
# import turtle
# help(turtle.setup)
#
#at the Python shell.
###
turtle.setup(View._SCREEN_WIDTH,View._SCREEN_HEIGHT)
###
#This list will store all of the messages.
#You can add strings to the front of the list using
# self.msg_queue.insert(0,a_msg_string)
#or at the end of the list using
# self.msg_queue.append(a_msg_string)
self.msg_queue=[]
###
###
#Create one turtle object for each message to display.
#You can use the clear() and write() methods to erase
#and write messages for each
###
self.msg_disp=[]
turtle.shape('classic')
turtle.penup() #Do not draw - this turtle will display text, only
turtle.hideturtle() #Don't show turtle icon
for i in range(View._MSG_LOG_LENGTH):
self.msg_disp.append(turtle.clone()) #Create a turtle object
self.msg_disp[i].goto(-View._SCREEN_WIDTH/2+10,i*View._LINE_SPACING)
###
#Create a TextBox instance and a SendButton instance and
#Store them inside of this instance
###
self.textbox=TextBox(pos=(0,-100))
self.send_btn=SendButton(pos=(0,-View._SCREEN_HEIGHT/2+100), view=self,shape='send_button.gif')
###
#Call your setup_listeners() function, if you have one,
#and any other remaining setup functions you have invented.
###
self.setup_listeners()
def get_client(self):
return self.my_client
def send_msg(self):
'''
You should implement this method. It should call the
send() method of the Client object stored in this View
instance. It should also call update the list of messages,
self.msg_queue, to include this message. It should
clear the textbox text display (hint: use the clear_msg method).
It should call self.display_msg() to cause the message
display to be updated.
'''
#Send message
self.my_client.send(self.get_msg())
#Add marker that this message is from this (current) user
show_this_msg=self.username+':\r'+self.get_msg() #
#Insert message into queue
self.msg_queue.insert(0,show_this_msg)
#Remove message from textbox.
self.textbox.clear_msg()
#Update message display
self.display_msg()
def get_msg(self): #Probably could get away without using this method.
return self.textbox.get_msg()
def setup_listeners(self):
'''
Set up send button - additional listener, in addition to click,
so that return button will send a message.
To do this, you will use the turtle.onkeypress function.
The function that it will take is
self.send_btn.fun
where send_btn is the name of your button instance
Then, it can call turtle.listen()
'''
#They can get away without writing this method, since listen() gets
#called in the widgets setup_listeners, and they can still click
#the button to send one message.
turtle.onkeypress( self.send_btn.fun, 'Return')
turtle.listen()
def msg_received(self,msg):
'''
This method is called when a new message is received.
It should update the log (queue) of messages, and cause
the view of the messages to be updated in the display.
:param msg: a string containing the message received
- this should be displayed on the screen
'''
print(msg) #Debug - print message
show_this_msg=self.partner_name+' says:\r'+ msg
self.msg_queue.insert(0,show_this_msg) #Insert message into beginning of queue
self.display_msg() #Update input messages
def display_msg(self):
'''
This method should update the messages displayed in the screen.
You can get the messages you want from self.msg_queue
'''
#Display most recent messages, where index, 0, is the most recent
for i in range(min(len(self.msg_disp),len(self.msg_queue))):
self.msg_disp[i].clear() #Clear previous text, if any
self.msg_disp[i].write(self.msg_queue[i])
##############################################################
##############################################################
#########################################################
#Leave the code below for now - you can play around with#
#it once you have a working view, trying to run you chat#
#view in different ways. #
#########################################################
if __name__ == '__main__':
my_view=View()
_WAIT_TIME=200 #Time between check for new message, ms
def check() :
msg_in=my_view.my_client.receive()
#msg_in=my_view.get_client().receive() #Better - for next time
if not(msg_in is None):
if msg_in==Client._END_MSG:
print('End message received')
sys.exit()
else:
my_view.msg_received(msg_in)
turtle.ontimer(check,_WAIT_TIME) #Check recursively
check()
turtle.mainloop()
| {
"repo_name": "golfit/mine",
"path": "turtle_chat_view_soln.py",
"copies": "1",
"size": "11152",
"license": "mit",
"hash": -1906027312392177000,
"line_mean": 40.7677902622,
"line_max": 103,
"alpha_frac": 0.5290530846,
"autogenerated": false,
"ratio": 4.404423380726699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014040971355354167,
"num_lines": 267
} |
# 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 16/5/2017
# Plotting Multiple Simulations of a SIR Epidemic Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
#Choosing the values for c and r to study
cvalues = [0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1]#
rvalues = [0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1]#
maxs = pd.read_csv('infection maxima.csv', index_col = 0)
x = maxs.columns
y = maxs.index
X,Y = np.meshgrid(x,y)
Z = maxs
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z)
#fig = plt.figure()
#ax = fig.gca(projection='3d')
#surf = ax.plot_surface(X, Y, maxs)
#surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
#ax.set_zlim(-1.01, 1.01)
#ax.zaxis.set_major_locator(LinearLocator(10))
#ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#fig.colorbar(surf, shrink=0.5, aspect=5)
#plt.title('Original Code')
plt.show()
print(maxs)
| {
"repo_name": "calatre/epidemics_network",
"path": "plt/SIR 1 plot_maxs.py",
"copies": "1",
"size": "1093",
"license": "apache-2.0",
"hash": -8013712713071753000,
"line_mean": 23.2888888889,
"line_max": 72,
"alpha_frac": 0.6861848124,
"autogenerated": false,
"ratio": 2.355603448275862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3541788260675862,
"avg_score": null,
"num_lines": null
} |
# 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 16/5/2017
# Plotting Multiple Simulations of a SIR Epidemic Model
#import numpy as np
import pandas as pd
#Choosing the values for c and r to study
cvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
rvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
i = 0
lasts = pd.DataFrame(index=rvalues, columns = cvalues)
for cvar in cvalues:
for rvar in rvalues:
i += 1
tblnm = 'c='+str(cvar)+'|r='+ str(rvar)
data = pd.read_excel('data/sphere_light.xlsx', sheetname = tblnm)
print('retrieving last point for... '+str(tblnm))
point = data['R_Avg'].iat[-1]
print(point)
lasts.set_value(rvar,cvar,point)
print(lasts)
print('The Final Table is...')
print(lasts)
print('Saving...')
lasts.to_csv('data/sphere_r.csv')
print('Saved!')
lasts.plot()
| {
"repo_name": "calatre/epidemics_network",
"path": "treat/SIR 1 get_last.py",
"copies": "1",
"size": "1066",
"license": "apache-2.0",
"hash": -7777139695356525000,
"line_mean": 28.6111111111,
"line_max": 73,
"alpha_frac": 0.552532833,
"autogenerated": false,
"ratio": 2.7760416666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8653080825917454,
"avg_score": 0.03509873474984262,
"num_lines": 36
} |
# 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 24/5/2017
# Plotting Multiple Simulations of a SIR Epidemic Model
# Based on the US unemployment example on Bokeh Website:
# http://bokeh.pydata.org/en/latest/docs/gallery/unemployment.html
import pandas as pd
from math import pi
from bokeh.io import show, save
from bokeh.models import (
ColumnDataSource,
HoverTool,
LinearColorMapper,
BasicTicker,
FixedTicker,
ColorBar,
)
from bokeh.plotting import figure
import bokeh.palettes as palet
#Choosing the values for c and r to study
cvalues = [0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1]#
rvalues = [0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1]#
#Lets open our previously generated maxima csv file
maxs = pd.read_csv('infection maxima.csv', index_col = 0)
print(maxs) #to check it
# reshape to 1D array
df = pd.DataFrame(maxs.stack(), columns=['Infected']).reset_index()
print(df) #lets se how it looks like
df.round(1) #making sure there's no wierd huge numbers
#preparing the colors to be used
colors = palet.magma(128)
mapper = LinearColorMapper(palette=colors,
low=df.Infected.min(), high=df.Infected.max())
#and define our data as the source for the bokeh plot
source = ColumnDataSource(df)
#more tools can be added here
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
#starting the whole 'window'
p = figure(title="Infected Maxima for a SIR Epidemic Model",
x_axis_label = 'Removal Rate', y_axis_label = 'Contagion Rate',
x_axis_location="above", plot_width=1024, plot_height=1024,
tools=TOOLS, toolbar_location='below')
#further customization of it
p.title.text_font_size= "30pt"
p.axis.axis_label_text_font_size= "20pt"
p.axis.major_label_text_font_size = "10pt"
p.axis.major_label_standoff = 3
p.xaxis[0].ticker=FixedTicker(ticks=cvalues)
p.yaxis[0].ticker=FixedTicker(ticks=cvalues)
p.xaxis.major_label_orientation = pi / 2
#now deciding on the gliphs to represent our data,
#circles are simpler and avoid trouble
p.circle(x="level_0", y="level_1", size=10,
source=source,
fill_color={'field': 'Infected', 'transform': mapper},
line_color=None)
#puting a colorbar next to it, to interpret our colors
color_bar = ColorBar(color_mapper=mapper, major_label_text_font_size="7pt",
ticker=BasicTicker(desired_num_ticks=10),
label_standoff=6, border_line_color=None, location=(0, 0))
p.add_layout(color_bar, 'right')
#and whenever we hover the mouse over a data point we get the info on it
p.select_one(HoverTool).tooltips = [
('removal | contagion', '@level_0{1.111} | @level_1{1.111}'),
('Infected', '@Infected{1.1}'),
]
#Show the plot, save it, or both
how(p)
save(p, filename = 'SIR_bokeh_interactive_plot.html', title = 'SIR Epidemic Plot') | {
"repo_name": "calatre/epidemics_network",
"path": "plt/SIR 1 plot_maxs_bokeh.py",
"copies": "1",
"size": "2873",
"license": "apache-2.0",
"hash": 2585485054602428400,
"line_mean": 33.6265060241,
"line_max": 87,
"alpha_frac": 0.6912634876,
"autogenerated": false,
"ratio": 2.9049544994944387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4096217987094439,
"avg_score": null,
"num_lines": null
} |
# 2016-7-1
# build by qianqians
# gencaller
import tools
def gencaller(module_name, funcs):
code = "/*this caller file is codegen by juggle for c++*/\n"
code += "#ifndef _" + module_name + "_caller_h\n"
code += "#define _" + module_name + "_caller_h\n"
code += "#include <sstream>\n"
code += "#include <tuple>\n"
code += "#include <string>\n"
code += "#include \"Icaller.h\"\n"
code += "#include \"Ichannel.h\"\n"
code += "#include <any>\n"
code += "#include <JsonParse.h>\n"
code += "#include <memory>\n\n"
code += "namespace caller\n"
code += "{\n"
code += "class " + module_name + " : public juggle::Icaller {\n"
code += "public:\n"
code += " " + module_name + "(std::shared_ptr<juggle::Ichannel> _ch) : Icaller(_ch) {\n"
code += " module_name = \"" + module_name + "\";\n"
code += " }\n\n"
code += " ~" + module_name + "(){\n"
code += " }\n\n"
for i in funcs:
code += " void " + i[1] + "("
count = 0
for item in i[2]:
code += tools.gentypetocpp(item) + " argv" + str(count)
count = count + 1
if count < len(i[2]):
code += ","
code += "){\n"
code += " auto v = Fossilizid::JsonParse::Make_JsonArray();\n"
code += " v->push_back(\"" + module_name + "\");\n"
code += " v->push_back(\"" + i[1] + "\");\n"
code += " v->push_back(Fossilizid::JsonParse::Make_JsonArray());\n"
for count in range(len(i[2])):
code += " (std::any_cast<Fossilizid::JsonParse::JsonArray>((*v)[2]))->push_back(argv" + str(count) + ");\n"
code += " ch->push(v);\n"
code += " }\n\n"
code += "};\n\n"
code += "}\n\n"
code += "#endif\n"
return code
| {
"repo_name": "qianqians/juggle",
"path": "gen/c++/gencaller.py",
"copies": "1",
"size": "2097",
"license": "mit",
"hash": 6737726920838020000,
"line_mean": 38.5660377358,
"line_max": 138,
"alpha_frac": 0.4124940391,
"autogenerated": false,
"ratio": 3.4320785597381342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4344572598838134,
"avg_score": null,
"num_lines": null
} |
# 2016-7-1
# build by qianqians
# gencaller
import tools
def gencaller(module_name, funcs):
code = "/*this caller file is codegen by juggle for c#*/\n"
code += "using System;\n"
code += "using System.Collections;\n"
code += "using System.IO;\n\n"
code += "namespace caller\n"
code += "{\n"
code += " public class " + module_name + " : juggle.Icaller \n"
code += " {\n"
code += " public " + module_name + "(juggle.Ichannel _ch) : base(_ch)\n"
code += " {\n"
code += " module_name = \"" + module_name + "\";\n"
code += " }\n\n"
for i in funcs:
code += " public void " + i[1] + "("
count = 0
for item in i[2]:
code += tools.gentypetocsharp(item) + " argv" + str(count)
count = count + 1
if count < len(i[2]):
code += ","
code += ")\n"
code += " {\n"
code += " ArrayList _argv = new ArrayList();\n"
for n in range(len(i[2])):
code += " _argv.Add(argv" + str(n) + ");\n"
code += " call_module_method(\"" + i[1] + "\", _argv);\n"
code += " }\n\n"
code += " }\n"
code += "}\n"
return code
| {
"repo_name": "qianqians/juggle",
"path": "gen/csharp/gencaller.py",
"copies": "2",
"size": "1483",
"license": "mit",
"hash": 7652083402910832000,
"line_mean": 35.1707317073,
"line_max": 87,
"alpha_frac": 0.3769386379,
"autogenerated": false,
"ratio": 3.744949494949495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5121888132849495,
"avg_score": null,
"num_lines": null
} |
# 2016-7-1
# build by qianqians
# gencharp
import sys
sys.path.append("./parser")
sys.path.append("./gen/csharp")
import os
import gencaller
import genmodule
import jparser
def gen(inputdir, outputdir):
defmodulelist = []
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
if not os.path.isdir(outputdir + '//caller'):
os.mkdir(outputdir + '//caller')
if not os.path.isdir(outputdir + '//module'):
os.mkdir(outputdir + '//module')
for filename in os.listdir(inputdir):
fname = os.path.splitext(filename)[0]
fex = os.path.splitext(filename)[1]
if fex == '.juggle':
file = open(inputdir + '//' + filename, 'r')
genfilestr = file.readlines()
keydict = jparser.parser(genfilestr)
for module_name, funcs in keydict.items():
if module_name in defmodulelist:
raise 'redefined module %s' % module_name
defmodulelist.append(module_name)
callercode = gencaller.gencaller(module_name, funcs)
file = open(outputdir + '//caller//' + module_name + 'caller.cs', 'w')
file.write(callercode)
file.close
modulecode = genmodule.genmodule(module_name, funcs)
file = open(outputdir + '//module//' + module_name + 'module.cs', 'w')
file.write(modulecode)
file.close
if __name__ == '__main__':
gen(sys.argv[1], sys.argv[2])
| {
"repo_name": "yinchunlong/abelkhan-1",
"path": "juggle/gencsharp.py",
"copies": "2",
"size": "1894",
"license": "mit",
"hash": 8161867277421184000,
"line_mean": 36.88,
"line_max": 102,
"alpha_frac": 0.454593453,
"autogenerated": false,
"ratio": 4.563855421686747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6018448874686747,
"avg_score": null,
"num_lines": null
} |
# 2016-7-1
# build by qianqians
# genmodule
import tools
def genmodule(module_name, funcs):
code = "/*this module file is codegen by juggle for c++*/\n"
code += "#ifndef _" + module_name + "_module_h\n"
code += "#define _" + module_name + "_module_h\n"
code += "#include \"Imodule.h\"\n"
code += "#include <memory>\n"
code += "#include <boost/signals2.hpp>\n"
code += "#include <JsonParse.h>\n"
code += "#include <string>\n\n"
code += "namespace module\n{\n"
code += "class " + module_name + " : public juggle::Imodule {\n"
code += "public:\n"
code += " " + module_name + "(){\n"
code += " module_name = \"" + module_name + "\";\n"
for i in funcs:
code += " protcolcall_set.insert(std::make_pair(\"" + i[1] + "\", std::bind(&" + module_name + "::" + i[1] + ", this, std::placeholders::_1)));\n"
code += " }\n\n"
code += " ~" + module_name + "(){\n"
code += " }\n\n"
for i in funcs:
code += " boost::signals2::signal<void("
count = 0
for item in i[2]:
code += tools.gentypetocpp(item)
count = count + 1
if count < len(i[2]):
code += ", "
code += ") > sig_" + i[1] + ";\n"
code += " void " + i[1] + "(Fossilizid::JsonParse::JsonArray _event){\n"
code += " sig_" + i[1] + "("
count = 0
for item in i[2]:
code += "\n std::any_cast<" + tools.gentypetocpp(item) + ">((*_event)[" + str(count) + "])"
count += 1
if count < len(i[2]):
code += ", "
code += ");\n"
code += " }\n\n"
code += "};\n\n"
code += "}\n\n"
code += "#endif\n"
return code
| {
"repo_name": "qianqians/juggle",
"path": "gen/c++/genmodule.py",
"copies": "1",
"size": "2056",
"license": "mit",
"hash": 3931534873124948500,
"line_mean": 37.0740740741,
"line_max": 169,
"alpha_frac": 0.388618677,
"autogenerated": false,
"ratio": 3.557093425605536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44457121026055363,
"avg_score": null,
"num_lines": null
} |
# 2016-7-1
# build by qianqians
# genmodule
import tools
def genmodule(module_name, funcs):
code = "/*this module file is codegen by juggle for c#*/\n"
code += "using System;\n"
code += "using System.Collections;\n"
code += "using System.Collections.Generic;\n\n"
code += "namespace module\n{\n"
code += " public class " + module_name + " : juggle.Imodule \n {\n"
code += " public " + module_name + "()\n {\n"
code += " module_name = \"" + module_name + "\";\n"
code += " }\n\n"
for i in funcs:
code += " public delegate void " + i[1] + "handle("
count = 0
for item in i[2]:
code += tools.gentypetocsharp(item) + " argv" + str(count)
count = count + 1
if count < len(i[2]):
code += ", "
code += ");\n"
code += " public event " + i[1] + "handle on" + i[1] + ";\n"
code += " public void " + i[1] + "(ArrayList _event)\n {\n"
code += " if(on" + i[1] + " != null)\n {\n"
count = 0
for item in i[2]:
code += " var argv" + str(count) + " = ((" + tools.gentypetocsharp(item) + ")_event[" + str(count) + "]);\n"
count = count + 1
code += " on" + i[1] + "("
count = 0
for item in i[2]:
code += " argv" + str(count)
count = count + 1
if count < len(i[2]):
code += ", "
code += ");\n"
code += " }\n"
code += " }\n\n"
code += " }\n"
code += "}\n"
return code
| {
"repo_name": "yinchunlong/abelkhan-1",
"path": "juggle/gen/csharp/genmodule.py",
"copies": "2",
"size": "1981",
"license": "mit",
"hash": -6751743012167336000,
"line_mean": 38.62,
"line_max": 147,
"alpha_frac": 0.3513377082,
"autogenerated": false,
"ratio": 3.9305555555555554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5281893263755556,
"avg_score": null,
"num_lines": null
} |
# 2016-7-4
# build by qianqians
# gencpp
import sys
sys.path.append("./parser")
sys.path.append("./gen/c++")
import os
import gencaller
import genmodule
import jparser
def gen(inputdir, outputdir):
defmodulelist = []
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
if not os.path.isdir(outputdir + '//caller'):
os.mkdir(outputdir + '//caller')
if not os.path.isdir(outputdir + '//module'):
os.mkdir(outputdir + '//module')
for filename in os.listdir(inputdir):
fname = os.path.splitext(filename)[0]
fex = os.path.splitext(filename)[1]
if fex == '.juggle':
file = open(inputdir + '//' + filename, 'r')
genfilestr = file.readlines()
keydict = jparser.parser(genfilestr)
for module_name, funcs in keydict.items():
if module_name in defmodulelist:
raise 'redefined module %s' % module_name
defmodulelist.append(module_name)
callercode = gencaller.gencaller(module_name, funcs)
file = open(outputdir + '//caller//' + module_name + 'caller.h', 'w')
file.write(callercode)
file.close
modulecode = genmodule.genmodule(module_name, funcs)
file = open(outputdir + '//module//' + module_name + 'module.h', 'w')
file.write(modulecode)
file.close
if __name__ == '__main__':
gen(sys.argv[1], sys.argv[2])
| {
"repo_name": "qianqians/juggle",
"path": "gencpp.py",
"copies": "2",
"size": "1887",
"license": "mit",
"hash": -7196260972915257000,
"line_mean": 36.74,
"line_max": 101,
"alpha_frac": 0.4515103339,
"autogenerated": false,
"ratio": 4.557971014492754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014439680060564501,
"num_lines": 50
} |
"""2016 advent of code day 2.
The document goes on to explain that each button to be pressed can be found by
starting on the previous button and moving to adjacent buttons on the keypad:
U moves up, D moves down, L moves left, and R moves right. Each line of
instructions corresponds to one button, starting at the previous button (or,
for the first line, the "5" button); press whatever button you're on at the end
of each line. If a move doesn't lead to a button, ignore it.
You can't hold it much longer, so you decide to figure out the code as you walk
to the bathroom. You picture a keypad like this:
1 2 3
4 5 6
7 8 9
Suppose your instructions are:
ULL
RRDDD
LURDL
UUUUD
You start at "5" and move up (to "2"), left (to "1"), and left (you can't, and
stay on "1"), so the first button is 1.
Starting from the previous button ("1"), you move right twice (to "3") and then
down three times (stopping at "9" after two moves and ignoring the third),
ending up with 9.
Continuing from "9", you move left, up, right, down, and left, ending with 8.
Finally, you move up four times (stopping at "2"), then down once, ending with
5.
So, in this example, the bathroom code is 1985.
Your puzzle input is the instructions from the document you found at the front
desk. What is the bathroom code?
"""
import string
import sys
def move(movements, cur):
"""Move around the keypad."""
for direction in movements:
dir_index = dirs[direction][0]
dir_impact = dirs[direction][1]
cur[dir_index] += dir_impact
if cur[dir_index] < 0:
cur[dir_index] = 0
elif cur[dir_index] > 2:
cur[dir_index] = 2
return cur
with open(sys.argv[1]) as f:
lines = f.read().rstrip("\n").split("\n")
keypad = (("1", "2", "3"), ("4", "5", "6"), ("7", "8", "9"))
# Use dirs to track available keypad directions and their impact on movement
# through the keypad hash
dirs = {"U": [0, -1], "D": [0, 1], "L": [1, -1], "R": [1, 1]}
keyseq = []
cur_loc = [1, 1]
for keydir in lines:
# print(cur_loc)
cur_loc = move(keydir.strip(), cur_loc)
keyseq.append(keypad[cur_loc[0]][cur_loc[1]])
print("".join(keyseq))
| {
"repo_name": "shaggy245/adventofcode",
"path": "day02/day2-1.py",
"copies": "1",
"size": "2171",
"license": "mit",
"hash": 1472131348718046500,
"line_mean": 30.9264705882,
"line_max": 79,
"alpha_frac": 0.6623675725,
"autogenerated": false,
"ratio": 3.1926470588235296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43550146313235294,
"avg_score": null,
"num_lines": null
} |
"""2016 advent of code day 2.
You finally arrive at the bathroom (it's a several minute walk from the lobby
so visitors can behold the many fancy conference rooms and water coolers on
this floor) and go to punch in the code. Much to your bladder's dismay, the
keypad is not at all like you imagined it. Instead, you are confronted with
the result of hundreds of man-hours of bathroom-keypad-design meetings:
1
2 3 4
5 6 7 8 9
A B C
D
You still start at "5" and stop when you're at an edge, but given the same
instructions as above, the outcome is very different:
You start at "5" and don't move at all (up and left are both edges), ending
at 5.
Continuing from "5", you move right twice and down three times (through "6",
"7", "B", "D", "D"), ending at D.
Then, from "D", you move five more times (through "D", "B", "C", "C", "B"),
ending at B.
Finally, after five more moves, you end at 3.
So, given the actual keypad layout, the code would be 5DB3.
Using the same instructions in your puzzle input, what is the correct bathroom
code?
"""
import string
import sys
def move(movements, cur):
"""Move around the keypad."""
new_loc = cur[:]
for direction in movements:
dir_index = dirs[direction][0]
dir_impact = dirs[direction][1]
new_loc[dir_index] = cur[dir_index] + dir_impact
if new_loc[dir_index] < 0:
new_loc[dir_index] = 0
elif new_loc[dir_index] > 4:
new_loc[dir_index] = 4
if keypad[new_loc[0]][new_loc[1]] == "*":
new_loc = cur[:]
else:
cur = new_loc[:]
return cur
with open(sys.argv[1]) as f:
lines = f.read().rstrip("\n").split("\n")
keypad = (("*", "*", "1", "*", "*"), ("*", "2", "3", "4", "*"),
("5", "6", "7", "8", "9"), ("*", "A", "B", "C", "*"),
("*", "*", "D", "*", "*"))
# Use dirs to track available keypad directions and their impact on movement
# through the keypad hash
dirs = {"U": [0, -1], "D": [0, 1], "L": [1, -1], "R": [1, 1]}
keyseq = []
cur_loc = [2, 0]
for keydir in lines:
# print(cur_loc)
cur_loc = move(keydir.strip(), cur_loc)
keyseq.append(keypad[cur_loc[0]][cur_loc[1]])
print("".join(keyseq))
| {
"repo_name": "shaggy245/adventofcode",
"path": "day02/day2-2.py",
"copies": "1",
"size": "2214",
"license": "mit",
"hash": -4353596083384190500,
"line_mean": 30.1830985915,
"line_max": 78,
"alpha_frac": 0.5975609756,
"autogenerated": false,
"ratio": 3.016348773841962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4113909749441962,
"avg_score": null,
"num_lines": null
} |
"""2016 Q3: prime connections"""
from math import log2
from collections import deque
def find_primes(prime_limit):
primes = set()
# For a performance hack, you can swap this for an array, but as it is, a
# list is just about fast enough.
#
# numbers = array.array('L', range(0, prime_limit + 1))
numbers = list(range(0, prime_limit + 1))
numbers[0] = 0
numbers[1] = 0
for i in range(2, prime_limit + 1):
if numbers[i] == 0:
continue
primes.add(i)
for j in range(2 * i, prime_limit + 1, i):
numbers[j] = 0
return primes
def shortest_path(primes, start, end, max_prime):
q = deque([(1, start)])
upper_bit_limit = int(log2(max_prime)) + 1
primes.remove(start)
while len(q) > 0:
(prev_len, node) = q.popleft()
if node == end:
return prev_len
for i in range(upper_bit_limit):
diff = 1 << i
next_one = node + diff
if next_one in primes:
primes.remove(next_one)
q.append((prev_len + 1, next_one))
next_one = node - diff
if next_one in primes:
primes.remove(next_one)
q.append((prev_len + 1, next_one))
print("Not found")
return None
def solve(prime_limit, start, end):
"""
1. Find all of the primes <= prime_limit
2. BFS to find the minimum distance between start and end
"""
primes = find_primes(prime_limit)
return shortest_path(primes, start, end, prime_limit)
if __name__ == "__main__":
prime_limit, start, end = tuple(int(x) for x in input().split())
print(solve(prime_limit, start, end))
| {
"repo_name": "matthewelse/british-informatics-olympiad",
"path": "2016/q3.py",
"copies": "1",
"size": "1713",
"license": "mit",
"hash": -4659171648748305000,
"line_mean": 23.1267605634,
"line_max": 77,
"alpha_frac": 0.5545826036,
"autogenerated": false,
"ratio": 3.4397590361445785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44943416397445785,
"avg_score": null,
"num_lines": null
} |
# 2016. Vlachos Group Geun Ho Gu. University of Delaware.
"""
==========
Exceptions
==========
All the exceptions are listed here.
"""
import re
drawmolonerror = 0
if drawmolonerror:
from .DrawMol import moltosvg
__all__ = []
class SemiEmpiricalMethodError(Exception):
""" errors related to semi-empirical methods """
class GroupSyntaxError(Exception):
"""Exception raised when the group parser encounters invalid input."""
class GroupSchemeError(Exception):
"""Exception raised when a group scheme-related error occurs."""
class GroupMissingDataError(Exception):
"""
Exception raised when a group library lacks the requested data for
some group.
Attributes
----------
groups : list of :class:`chemtk.groupadd.Group`
Groups for which data are missing.
property_set_name : str
Name of property set for which data are missing.
"""
def __init__(self, groups, property_set_name):
self.groups = groups
self.property_set_name = property_set_name
def __str__(self):
if len(self.groups) == 1:
return ('Library does not define property set %r for group %s'
% (str(self.property_set_name), str(self.groups[0])))
else:
return ('Library does not define property set %r for groups { %s }'
% (str(self.property_set_name),
', '.join(repr(str(group)) for group in self.groups)))
class PatternMatchError(Exception):
"""
Exception raised when no known pattern matches part of a chemical
structure.
The atom and (possibly) the bond at which the matching failed are stored
as attributes :attr:`atom` and :attr:`bond`.
For center pattern matches, :attr:`atom` specifies the atom at which the
matching failed and :attr:`bond` is None.
For peripheral pattern matches, :attr:`atom` is the atom adjacent to
the bond (:attr:`bond`), that is being considered in the peripheral match.
Attributes
----------
atom : :class:`chemtk.structure.Atom`
The afflicted atom
bond : :class:`chemtk.structure.Bond`
The afflicted bond (possibly None)
"""
def __init__(self, mesg, atom):
self.mesg = mesg
self.atom = atom
self.mol = atom.GetOwningMol()
self.visualize()
def visualize(self, *args, **kwargs):
if drawmolonerror:
moltosvg(self.mol, highlight=[self.atom.GetIdx()], kekulize=False)
def __str__(self):
return '%s at atom number %s' % (self.mesg, self.atom.GetIdx())
__all__ += ['SemiEmpiricalMethodError', 'GroupSyntaxError', 'GroupSchemeError',
'GroupMissingDataError', 'PatternMatchError']
# RING-related errors.
class RINGError(Exception):
"""Base exception for Parser errors.
All other Parser-related exceptions inherit from this.
"""
class RINGSyntaxError(RINGError):
"""
Exception raised due to invalid (but syntactically correct) input.
"""
_parse = re.compile('[\n]')
def __init__(self, tok, lineno, colno, stream):
self.toks = set([tok])
self.lineno = lineno
self.colno = colno
self.stream = stream
def update(self, other):
if (self.lineno < other.lineno or
(self.lineno == other.lineno and self.colno < other.colno)):
self.lineno = other.lineno
self.colno = other.colno
self.toks = other.toks.copy()
elif (self.lineno == other.lineno and self.colno == other.colno):
self.toks |= other.toks
def __str__(self):
s = 'Expected ' + ' | '.join(
sorted(str(tok) for tok in self.toks if tok))\
+ ' at line %d column %d:\n' % (self.lineno, self.colno)
s += self._parse.split(self.stream)[self.lineno-1] + '\n'
s += ' '*(self.colno-1) + '^\n'
return s
def __repr__(self):
return '%s(%r, %r, %r, %r)' % (
type(self).__name__, self.toks, self.lineno, self.colno,
self.smiles)
class RINGReaderError(RINGError):
"""
Exception raised when input does not conform to RING syntax.
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self.message)
class MolQueryError(Exception):
"""
Exception raised when input does not conform to RING syntax.
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self.message)
class ReactionQueryError(Exception):
"""
Exception raised when input does not conform to RING syntax.
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self.message)
__all__ += ['RINGError', 'RINGSyntaxError', 'RINGReaderError',
'MolQueryError', 'ReactionQueryError']
# Units errors.
class UnitsParseError(Exception):
"""Error from providing bad input to the units parser."""
class UnitsError(Exception):
"""Error from operation involving incompatible physical units."""
__all__ += ['UnitsParseError', 'UnitsError']
# Generic errors.
class OutsideCorrelationError(Exception):
"""Error from attempt to evaluate correlation outside its valid range."""
class ReadOnlyDataError(Exception):
"""Error raised by attempt to modify read-only data."""
class IncompleteDataError(Exception):
"""Error raised when a computation requires more data than is available."""
class IncompleteDataWarning(Warning):
"""
Warning issued when a computation proceeds using less data than is optimal.
This is raised when the absence of certain non-required data may lead to
pontetially severe assumptions in later computations.
"""
__all__ += ['OutsideCorrelationError', 'ReadOnlyDataError',
'IncompleteDataError', 'IncompleteDataWarning']
| {
"repo_name": "VlachosGroup/VlachosGroupAdditivity",
"path": "pgradd/Error.py",
"copies": "1",
"size": "6168",
"license": "mit",
"hash": -4526702245921388500,
"line_mean": 27.1643835616,
"line_max": 79,
"alpha_frac": 0.6214332036,
"autogenerated": false,
"ratio": 3.9361837906828336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5057616994282833,
"avg_score": null,
"num_lines": null
} |
# 20170126 Fixed the wrong last trading day of spot month
import ael
import acm
import HTI_Util
import HTI_FeedTrade_EDD_Util
import os
import sys, traceback
import datetime
import sqlite3
import csv
import decimal
import account_journal
import win32com.client
from collections import defaultdict
ael_variables = [['posdate', 'Date', 'string', [str(ael.date_today()), 'Today'], 'Today', 1, 0, 'Position Date', None, 1], \
['pfs', 'Portfolio(s)', 'string', HTI_Util.getAllPortfolios(), 'EDD Warrant Trading,EDD CBBC Trading,EDD Listed Stock Options MM,EDD Hedging,EDD Market Making 1,EDD Market Making 2,EDD Warrant,EDD Flow Strategy 1,EDD Flow Strategy 2,EDD HFT 1,EDD HFT 2,EDD HFT 3,EDD HFT 4,EDD OMM,EDD OTC OMM', 1, 1, 'Portfolio(s)', None, 1], \
['acq', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTISEC - EDD,HTIFS - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['prd', 'Product Type(s)', 'string', HTI_Util.getAllInstypes(), 'Option', 1, 1, 'Product Type(s)', None, 1], \
['tfs', 'Trade Filter', 'string', None, 'TF_EDD_OPTION_EXERCISE', 0, 0, 'Trade Filter', None, 1], \
['filename_acc', 'Account List', 'string', None, 'S:\\Report\\account_list.csv', 1, 0, 'Account List', None, 1], \
['filename_soe', 'Spot Options Exercise', 'string', None, 'D:\\temp\\option_exercise_YYYYMMDD.xlsx', 1, 0, 'Spot Option Exercise', None, 1]]
def adapt_decimal(d):
return str(d)
def convert_decimal(s):
return decimal.Decimal(s)
def db_cur():
# Register the adapter
sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = [], index_arr = []):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr != []:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def arr_to_xlsx(filename, headers, arrs):
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Add()
for page, header in enumerate(headers, start=1):
ws = wb.Worksheets(page)
arr = arrs[page-1]
for i, cell in enumerate(header.split(',')):
ws.Cells(1,i+1).Value = cell
for i, row in enumerate(arr, start=2):
for j, cell in enumerate(row, start=1):
if str(cell)[0] == '=':
ws.Cells(i,j).Formula = cell
else:
ws.Cells(i,j).Value = cell
wb.Worksheets(1).Range("P:P").NumberFormat = "0.00%"
wb.Worksheets(1).Columns.AutoFit()
wb.Worksheets(1).Range("$A$1:$Z$2000").FormatConditions.Add(win32com.client.constants.xlExpression, "", '=$Q1="ATM" ')
wb.Worksheets(1).Range("$A$1:$Z$2000").FormatConditions(1).Interior.ColorIndex = 6
wb.Worksheets(1).Range("$A$1:$Z$2000").FormatConditions(1).StopIfTrue = False
wb.Worksheets(1).Range("O:O").Interior.ColorIndex = 4
xl.DisplayAlerts = False
wb.SaveAs(filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def option_exercise(cur, filename):
soe_header = "No,SS/Index,Underlying,Type,Call/Put,Exercise Style,A/E,Name,Expiry,Book Name,Quantity,Intrinsic,Board Lot,Strike,Spot Px,Moneyness,ITM/OTM,Buy/Sell Stock,Consideration,Action"
pos_header = "Instrument,Quantity,Price,Consideration"
soe_array = []
pos_array = []
cur.execute("select * from ins order by underlying")
ins_rows = cur.fetchall()
for i, ins_row in enumerate(ins_rows, start=2):
ss_index = "Index" if "Index" in ins_row["underlying"] else "Stock"
ins = acm.FInstrument[str(ins_row["instrument"])]
settlement_type = str(ins.SettlementType()).split(' ')[0] if ' ' in ins.SettlementType() else ins.SettlementType()
moneyness = """=IF(E%s="C",1,-1)*(O%s-N%s)/O%s""" % (i, i, i, i)
itm_otm = """=IF(P%s>0.015,"ITM",IF(P%s<-0.015,"OTM","ATM"))""" % (i, i)
bs_qty = """=IF(AND(K%s<>0,Q%s<>"OTM",F%s="Physical"),IF(E%s="C",1,-1)*K%s*M%s,0)""" % (i, i, i, i, i, i)
consideration = """=R%s*N%s""" % (i, i)
action = """=IF(R%s<>0,IF(R%s>0,"Buy","Sell")&" "&ABS(R%s)&" of "&C%s&"@$"&N%s) """ % (i, i, i, i, i)
soe_array.append([i, ss_index, ins_row["underlying"], ins_row["instrument_type"], ins_row["call_put"][0], settlement_type, str(ins.ExerciseType())[0], ins_row["instrument"],
ins_row["expiry"], ins_row["portfolio"], ins_row["quantity"], ins_row["market_price"], ins_row["conversion_factor"], ins_row["strike_price"], ins_row["underlying_price"],
moneyness, itm_otm, bs_qty, consideration, action ])
rng = len(soe_array)
cur.execute("select distinct underlying from ins order by underlying")
for i, ins_row in enumerate(cur.fetchall(), start=2):
sum_qty = """=SUMIF(Sheet1!$C$2:$C$2000,A%s,Sheet1!$R$2:$R$2000)""" % (i)
price = """=VLOOKUP(A%s,Sheet1!$C$2:$O$2000,13,FALSE)""" % (i)
last_consideration = """=B%s*C%s""" % (i, i)
pos_array.append([ins_row["underlying"], sum_qty, price, last_consideration])
arr_to_xlsx(filename, [soe_header, pos_header], [soe_array, pos_array])
return soe_array
def ael_main(dict):
asofdate = dict['posdate']
if asofdate == 'Today':
posdate = ael.date_today()
else:
asofdateArr = dict['posdate'].split('/')
posdate = ael.date_from_ymd(int(asofdateArr[2]), int(asofdateArr[1]), int(asofdateArr[0]))
posdatetp1 = posdate
hk_cal = acm.FCalendar.Select("name='Hong Kong'")[0]
while True:
posdatetp1 = posdatetp1.add_days(1)
if not hk_cal.IsNonBankingDay(hk_cal, hk_cal, posdatetp1):
break
acq_array_list = dict['acq']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
prod_type_list = dict['prd']
ptype_list = ''
for ptype in prod_type_list:
if ptype_list == '':
ptype_list = "'" + ptype + "'"
else:
ptype_list = ptype_list + ",'" + ptype+ "'"
portfolios = dict['pfs']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
strSql = """
select t.trdnbr
from instrument i, trade t, party acq, portfolio pf
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (@acquirer_list)
and (i.exp_day >= '@dt' or i.exp_day = '0000-01-01')
and (i.exp_day < '@spot_month')
and t.time < '@d_tp1'
and i.instype in (@ptype_list)
and pf.prfid in (@portfolio_list)
"""
strSql = strSql.replace('@acquirer_list', acq_list)
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@d_tp1', posdatetp1.to_string('%Y-%m-%d'))
strSql = strSql.replace('@dt', posdate.to_string('%Y-%m-%d'))
strSql = strSql.replace('@ptype_list', ptype_list)
spot_month = posdate.first_day_of_month().add_months(1).first_day_of_month().add_days(-1)
strSql = strSql.replace('@spot_month', spot_month.to_string('%Y-%m-%d'))
print strSql
trade_filter = dict['tfs']
tobject = ael.TextObject.read('type="SQL Query" and name="%s"' % ("tf_edd_option_exercise_qry"))
tobject_c = tobject.clone()
tobject_c.set_text(strSql)
tobject_c.commit()
ael.poll()
fileNameSoe = dict['filename_soe']
fileNameSoe = fileNameSoe.replace("YYYYMMDD", posdate.to_string('%Y%m%d'))
conn, cur = db_cur()
ins_array = account_journal.ins_qty_and_avgprice(cur, trade_filter, posdate, dict)
soe_array = option_exercise(cur, fileNameSoe)
print "Finished Export"
return | {
"repo_name": "frederick623/HTI",
"path": "option_exercise/spot_option_exercise.py",
"copies": "1",
"size": "8203",
"license": "apache-2.0",
"hash": -6215987411586748000,
"line_mean": 35.7892376682,
"line_max": 332,
"alpha_frac": 0.6411069121,
"autogenerated": false,
"ratio": 2.5836220472440945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37247289593440946,
"avg_score": null,
"num_lines": null
} |
# 20170226 Add more additional info
import acm
import ael
import HTI_Util
import HTI_FeedTrade_EDD_Util
import fnmatch
import datetime
import os
import sys
import csv
import re
import sqlite3
import math
import glob
import win32com.client
import traceback
ael_variables = [['asofdate', 'Date', 'string', [str(ael.date_today()), 'Today'], 'Today', 1, 0, 'Report Date', None, 1], \
['acquirers', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTISEC - EDD,HTIFS - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['counterparties', 'Counterparty(s)', 'string', HTI_Util.getAllParties(), None, 0, 1, 'Counterparty(s)', None, 1], \
['portfolio', 'Portfolio', 'string', HTI_Util.getAllPortfolios(), 'EDD Deltaone', 1, 1, 'Portfolio', None, 1], \
['currclspricemkt', 'Current Closing Price Market', 'string', None, 'Bloomberg_5PM', 1, 0, 'Current Closing Price Market', None, 1],
['histclspricemkt', 'Historical Closing Price Market', 'string', None, 'Bloomberg_5PM_Cls', 1, 0, 'Historical Closing Price Market', None, 1],
['pb_trd_file', 'PB Trade File', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\FA_Trade_Import\\pb_to_fa_YYYYMMDD.csv', 1, 0, 'PB Trade File', None, 1],
['loan_xls_template', 'Loan Template', 'string', None, 'S:\\Prime Brokerage (PB)\\Tools\\Stock Loan Collateral\\template\\ExcelUpload - Cash Entry.xlsm', 1, 0, 'Loan Template', None, 1],
['loan_xls_output', 'Loan Output', 'string', None, 'S:\\Prime Brokerage (PB)\\Tools\\Stock Loan Collateral\\ExcelUpload - Cash Entry YYYYMMDD.xlsm', 1, 0, 'Loan Output', None, 1],
['ss_bb_output', 'SS/BB Output', 'string', None, 'S:\\Prime Brokerage (PB)\\Tools\\Stock Loan Collateral\\ss_bb_trd_YYYYMMDD.xlsx', 1, 0, 'SS/BB Output', None, 1],
['base_ccy', 'Base Ccy', 'string', None, 'HKD', 1, 0, 'Base Ccy', None, 1]]
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def db_cur(source = ":memory:"):
# sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = None, index_arr = None):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
if index_arr is not None:
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr is not None:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def getTRSUnderlying(acm_ins):
acm_und_ins = None
bbticker = ""
for acm_leg in acm_ins.Legs():
if acm_leg.PayLeg() == False:
acm_und_ins = acm_leg.FloatRateReference()
break
return acm_und_ins
def getUndInstrumentBBTicker(acm_ins):
bbticker = ''
acm_und_ins = getTRSUnderlying(acm_ins)
if acm_und_ins != None:
for aliase in acm_und_ins.Aliases():
if aliase.Type().Name() == 'BB_TICKER':
bbticker = aliase.Alias().strip()
break
return bbticker
def getGroupTradeRef(external_ref):
groupTradeRef = None
strSql = """
select trdnbr, t.time
from trade t, instrument i, party a, party c, portfolio pf, leg l, instrument u
where t.insaddr = i.insaddr
and i.instype = 'TotalReturnSwap'
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = a.ptynbr
and t.counterparty_ptynbr = c.ptynbr
and t.prfnbr = pf.prfnbr
and add_info(t, 'External Reference') = '%s'
and i.insaddr = l.insaddr
and l.float_rate = u.insaddr
order by t.time, trdnbr
""" % (external_ref)
print strSql
res = ael.asql(strSql)
columns, buf = res
for table in buf:
for row in table:
groupTradeRef = row[0]
break
return groupTradeRef
def getFirstTRS(external_ref, und_insaddr):
strSql = """select i.insid
from trade t, instrument i, leg l
where i.insaddr = t.insaddr
and i.instype = 'TotalReturnSwap'
and t.status not in ('Void', 'Simulated')
and add_info(t, 'External Reference') = '%s'
and i.insaddr = l.insaddr and l.payleg = 'No' and l.type = 'Total Return'
and add_info(t, 'Trd Pos Closed') ~= 'Yes'
and l.float_rate = %s
and t.trdnbr = t.trx_trdnbr""" % (external_ref, str(und_insaddr))
#print strSql
rs = ael.asql(strSql)
columns, buf = rs
insid = ''
for table in buf:
for row in table:
insid = str(row[0]).strip()
break
if insid == '':
return None
acm_ins = acm.FInstrument[insid]
return acm_ins
def getTotalTradeQuantity(external_ref, und_insaddr, asofdate):
acm_ins = getFirstTRS(external_ref, und_insaddr)
if acm_ins == None:
return None
#print "instrument='%s' and status <> 'Void' and status <> 'Simulated'" % acm_ins.Name()
#acm_trds = acm.FTrade.Select("instrument='%s' and status <> 'Void' and status <> 'Simulated' and tradeTime <= '%s'" % (acm_ins.Name(), asofdate.add_days(1)))
acm_trds = acm.FTrade.Select("instrument='%s' and status <> 'Void' and status <> 'Simulated' and tradeTime < '%s'" % (acm_ins.Name(), asofdate.add_days(1)))
acm_trd = None
if acm_trds != None:
for acm_trd in acm_trds:
if acm_trd.TrxTrade() != None:
if acm_trd.Oid() == acm_trd.TrxTrade().Oid():
break
else:
return None
total_quantity = 0.0
if acm_trd.TrxTrade() == None:
if acm_trd.Status() not in ('Void', 'Simulated'):
total_quantity = total_quantity + acm_trd.Quantity()
return abs(total_quantity)
else:
return None
elif acm_trd.Oid() == acm_trd.TrxTrade().Oid():
if acm_trd.Status() not in ('Void', 'Simulated'):
total_quantity = total_quantity + acm_trd.Quantity()
# find all other trade
#acm_trs_trds = acm.FTrade.Select("trxTrade=%s and tradeTime <= '%s'" % (acm_trd.Oid(), asofdate.add_days(1)))
acm_trs_trds = acm.FTrade.Select("trxTrade=%s and tradeTime < '%s'" % (acm_trd.Oid(), asofdate.add_days(1)))
for acm_trs_trd in acm_trs_trds:
# add this to handle tradeTime lag 8 hours from gmt
ael_trd_date = ael.date(str(acm_trs_trd.TradeTime())[0:10])
if ael_trd_date >= asofdate.add_days(1):
continue
if acm_trs_trd.Oid() != acm_trs_trd.TrxTrade().Oid() and \
acm_trs_trd.Status() not in ('Void', 'Simulated') and \
acm_trs_trd.Instrument().InsType() == 'TotalReturnSwap':
total_quantity = total_quantity + acm_trs_trd.Quantity()
#print total_quantity
'''
if total_quantity == 0.0:
return None
else:
return abs(total_quantity)
'''
return -total_quantity
else:
return -total_quantity
def getUnderlyingPrice(dt, ael_und_ins, currclspricemkt, histclspricemkt):
try:
if dt == ael.date_today():
cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Last', 0, currclspricemkt)
else:
cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Close', 0, histclspricemkt)
except:
#cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Last', 0, currclspricemkt)
cls_price = 0.0
return cls_price
def csv_to_arr(csv_file, start=0, has_header=True, delim=',', encoding='utf-8'):
arr = []
reader = []
if "http" in csv_file:
response = requests.get(csv_file)
text = response.content.decode(encoding)
else:
text = open(csv_file, 'rU')
reader = csv.reader(text, delimiter=delim)
arr = list(reader)
arr = list(zip(*arr))
arr = [x for x in arr if any(x)]
arr = list(zip(*arr))
header = ""
if has_header:
header = ','.join(arr[start])
arr = arr[start+1:]
return re.sub(r"[\*\.#/\$%\"\(\)& \_-]", "", header), arr
else:
return arr[start:]
return
def getFx(dt, fm_ccy, to_ccy, currclspricemkt, histclspricemkt):
if fm_ccy == 'CNY':
fm_ccy = 'CNH'
if to_ccy == 'CNY':
to_ccy = 'CNH'
ins_fm_ccy = ael.Instrument[fm_ccy]
ins_to_ccy = ael.Instrument[to_ccy]
ins_usd = ael.Instrument['USD']
try:
if dt == ael.date_today():
#fx_rate = ins_fm_ccy.used_price(dt, ins_to_ccy.insid, 'Last', 0, currclspricemkt)
fm_usd_rate = ins_fm_ccy.used_price(ael.date_today(), ins_usd.insid, 'Last', 0, currclspricemkt)
to_usd_rate = ins_usd.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
fx_rate = fm_usd_rate * to_usd_rate
else:
#fx_rate = ins_fm_ccy.used_price(dt, ins_to_ccy.insid, 'Close', 0, histclspricemkt)
fm_usd_rate = ins_fm_ccy.used_price(dt, ins_usd.insid, 'Close', 0, histclspricemkt)
to_usd_rate = ins_usd.used_price(dt, ins_to_ccy.insid, 'Close', 0, histclspricemkt)
fx_rate = fm_usd_rate * to_usd_rate
except:
#fm_usd_rate = ins_fm_ccy.used_price(ael.date_today(), ins_usd.insid, 'Last', 0, currclspricemkt)
#to_usd_rate = ins_usd.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
#fx_rate = fm_usd_rate * to_usd_rate
fx_rate = 0.0
#fx_rate = ins_fm_ccy.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
return fx_rate
def mtm_valuation(dict):
header = "cpty,bbg,qty,mkt_price,today_mv"
asofdate = dict['asofdate']
if asofdate == 'Today':
asofdate = ael.date_today()
asofdate = ael.date(asofdate)
# Portfolios
portfolios = dict['portfolio']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
# Acquirers
acq_array_list = dict['acquirers']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# Counterparties
pty_array_list = dict['counterparties']
pty_list = ''
for pty in pty_array_list:
if pty_list == '':
pty_list = "'" + pty + "'"
else:
pty_list = pty_list + ",'" + pty + "'"
currclspricemkt = dict['currclspricemkt']
histclspricemkt = dict['histclspricemkt']
base_ccy = dict['base_ccy']
product_strategy = 'SP_Portfolio Swap' #default no grouping
strSql = """select t.trdnbr, add_info(t, 'External Reference') 'external_ref', l.float_rate, c.ptyid
into externalRef
from instrument i, trade t, party a, portfolio pf, leg l, party c
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and i.instype = 'TotalReturnSwap'
and t.acquirer_ptynbr = a.ptynbr
and a.ptyid in (@accquirer_list)
and pf.prfid in (@portfolio_list)
and t.time < '%s'
and i.insaddr = l.insaddr and l.payleg = 'No'
and t.counterparty_ptynbr = c.ptynbr
and add_info(t, 'Trd Pos Closed') ~= 'Yes'
@counterparty_list_sql
select distinct external_ref, float_rate, ptyid
from externalRef
where external_ref ~= ''""" % (asofdate.add_days(1))
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@accquirer_list', acq_list)
if pty_list != '':
counterparty_list_sql = 'and c.ptyid in (@counterparty_list)'
counterparty_list_sql = counterparty_list_sql.replace("@counterparty_list", pty_list)
strSql = strSql.replace("@counterparty_list_sql", counterparty_list_sql)
else:
strSql = strSql.replace("@counterparty_list_sql", ' ')
print (strSql)
rs = ael.asql(strSql)
columns, buf = rs
arr = []
for table in buf:
for row in table:
rptRow = []
external_ref = str(row[0])
und_insaddr = row[1]
cpty_id = row[2]
acm_ins = getFirstTRS(external_ref, und_insaddr)
#print 'acm_ins', acm_ins.Name()
if acm_ins != None:
underlying_bbg = getUndInstrumentBBTicker(acm_ins)
ins_ccy = acm_ins.Currency().Name()
if ins_ccy == 'CNY':
ins_ccy = 'CNH'
qty = getTotalTradeQuantity(external_ref, und_insaddr, asofdate)
if round(qty, 2) == 0.0:
# suppress all have been closed out
continue
#print 'qty', qty
acm_und_ins = getTRSUnderlying(acm_ins)
today_underlying_price = getUnderlyingPrice(asofdate, ael.Instrument[acm_und_ins.Name()], currclspricemkt, histclspricemkt)
today_fx = getFx(asofdate, ins_ccy, base_ccy, currclspricemkt, histclspricemkt)
original_mv = today_underlying_price * qty * today_fx
rptRow = [cpty_id, underlying_bbg, int(qty), float(today_underlying_price), float(original_mv) ]
# print (rptRow)
arr.append(rptRow)
return header, arr
def client_cash(dict):
header = 'TradeDate,ClientCode,ClientName,TradeReference,CashType,Amount,ExternalReference'
asofdate = dict['asofdate']
if asofdate == 'Today':
asofdate = ael.date_today()
asofdate = ael.date(asofdate)
# Portfolios
portfolios = dict['portfolio']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
# Acquirers
acq_array_list = dict['acquirers']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# Counterparties
pty_array_list = dict['counterparties']
pty_list = ''
for pty in pty_array_list:
if pty_list == '':
pty_list = "'" + pty + "'"
else:
pty_list = pty_list + ",'" + pty + "'"
base_ccy = dict['base_ccy']
currclspricemkt = dict['currclspricemkt']
histclspricemkt = dict['histclspricemkt']
strSql = """select cpty.ptyid, cpty.fullname, t.trdnbr, p.type, c.insid, p.amount, add_info(t, 'External Reference') 'ext_ref'
from trade t, instrument i, payment p, party cpty, party a, portfolio pf, instrument c
where t.insaddr = i.insaddr
and i.instype = 'Curr'
and t.trdnbr = p.trdnbr
and t.counterparty_ptynbr = cpty.ptynbr
and t.acquirer_ptynbr = a.ptynbr
and p.curr = c.insaddr
and a.ptyid in (@accquirer_list)
@counterparty_list_sql
and t.prfnbr = pf.prfnbr
and pf.prfid in (@portfolio_list)
@start_date
and t.time < '@dt'
and t.status not in ('Void', 'Simulated')
"""
strSql = strSql.replace('@dt', asofdate.add_days(1).to_string('%Y-%m-%d'))
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@accquirer_list', acq_list)
strSql = strSql.replace("@start_date", ' ')
if pty_list != '':
counterparty_list_sql = 'and cpty.ptyid in (@counterparty_list)'
counterparty_list_sql = counterparty_list_sql.replace("@counterparty_list", pty_list)
strSql = strSql.replace("@counterparty_list_sql", counterparty_list_sql)
else:
strSql = strSql.replace("@counterparty_list_sql", ' ')
print (strSql)
rs = ael.asql(strSql)
columns, buf = rs
rptContent = []
for table in buf:
for row in table:
client_code = row[0]
client_name = row[1]
trade_ref = row[2]
cash_type = row[3]
currency = row[4]
amt = row[5]
ext_ref = row[6]
acm_trd = acm.FTrade[trade_ref]
if acm_trd != None:
trade_date = acm_trd.TradeTime()[0:10]
today_fx = getFx(asofdate, currency, base_ccy, currclspricemkt, histclspricemkt)
rptRow = [str(trade_date), client_code, client_name, str(trade_ref), cash_type, float(amt*today_fx), ext_ref]
print (rptRow)
rptContent.append(rptRow)
return header, rptContent
def get_value_day(asofdate, pay_cal, spot_day):
value_day = asofdate
cal = acm.FCalendar.Select("name='%s'" % (pay_cal))[0]
for i in range(0, spot_day):
is_holiday = True
while is_holiday:
value_day = value_day.add_days(1)
if not cal.IsNonBankingDay(cal, cal, value_day):
is_holiday = False
return value_day
def calc_stock_loan(cur, coll_arr, cpty, ext_ref, coll_ratio, ia, total_mv, asofdate, base_ccy, currclspricemkt, histclspricemkt):
trade_ref = getGroupTradeRef(ext_ref)
if total_mv is None and ia is not None and ia != 0:
value_day = get_value_day(asofdate, "Hong Kong", 2)
loan_amount = ia
trade_source = "EMSX"
ccy = base_ccy
comment = "IA full return"
coll_arr.append(["EDD Deltaone", "HKD", asofdate.to_string('%Y-%m-%d'), value_day.to_string('%Y-%m-%d'), "HTIFS - EDD", value_day.to_string('%Y-%m-%d'),
"FO Confirmed", cpty, cpty, "", "EDMO2", "Cash Entry", comment,
("Expense" if loan_amount > 0 else "Income"), "", "IM-EDD(Short Pos)", -loan_amount, ccy, "", "", "", "", "Group Trade Ref", trade_ref, "",
"", "External Reference", ext_ref, "", "", "Product_Strategy", "SP_Portfolio Swap", "", "", "Trade Source", trade_source ])
else:
cur.execute("""
select Security, Currency, PayCal1, SpotDays, Sum(case when BS = 'BUY' then Quantity else -Quantity end) as qty, TradeSource
from trd
where Counterparty = ? and (ShortSell = 'Y' or BuyBack = 'Y' )
group by Counterparty, Security, Currency, PayCal1, SpotDays
having qty <> 0
""", (cpty,))
for row in cur.fetchall():
stock_code = str(row["Security"])
ccy = str(row["Currency"])
pay_cal = str(row["PayCal1"])
spot_day = int(row["SpotDays"])
qty = float(row["qty"])
trade_source = str(row["TradeSource"])
value_day = get_value_day(asofdate, pay_cal, spot_day)
today_underlying_price = getUnderlyingPrice(asofdate, ael.Instrument[stock_code], currclspricemkt, histclspricemkt)
ratio = (1 if coll_ratio < 1.2 and coll_ratio > 0 and qty > 0 else 1.2)
loan_amount = qty*today_underlying_price*ratio
comment = "IA " + "{0:.0f}%".format(ratio*100) + (" return " if loan_amount > 0 else " for ") + stock_code
coll_arr.append(["EDD Deltaone", "HKD", asofdate.to_string('%Y-%m-%d'), value_day.to_string('%Y-%m-%d'), "HTIFS - EDD", value_day.to_string('%Y-%m-%d'),
"FO Confirmed", cpty, cpty, "", "EDMO2", "Cash Entry", comment,
("Expense" if loan_amount > 0 else "Income"), "", "IM-EDD(Short Pos)", -loan_amount, ccy, "", "", "", "", "Group Trade Ref", trade_ref, "",
"", "External Reference", ext_ref, "", "", "Product_Strategy", "SP_Portfolio Swap", "", "", "Trade Source", trade_source ])
return
def arr_to_xlsx(xlsx_file, header, arr, sheet="", start_row=1, output_filename=""):
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Open(xlsx_file)
ws = wb.Worksheets(1) if sheet == "" else wb.Worksheets(sheet)
if header != "":
for i, cell in enumerate(header.split(',')):
ws.Cells(start_row,i+1).Value = cell
for i, row in enumerate(arr):
for j, cell in enumerate(row):
if str(cell) != "" and str(cell)[0] == '=':
ws.Cells(i+start_row+1,j+1).Formula = cell
else:
ws.Cells(i+start_row+1,j+1).Value = cell
ws.Columns.AutoFit()
xl.DisplayAlerts = False
wb.SaveAs(xlsx_file if output_filename == "" else output_filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def ael_main(dict):
conn, cur = db_cur()
asofdate = dict['asofdate']
if asofdate == 'Today':
asofdate = ael.date_today()
asofdate = ael.date(asofdate)
currclspricemkt = dict['currclspricemkt']
histclspricemkt = dict['histclspricemkt']
csh_header, csh_arr = client_cash(dict)
mtm_header, mtm_arr = mtm_valuation(dict)
trd_header, trd_arr = csv_to_arr(dict["pb_trd_file"].replace("YYYYMMDD", asofdate.to_string('%Y%m%d')))
create_tbl(cur, "csh", csh_header, csh_arr)
create_tbl(cur, "mtm", mtm_header, mtm_arr)
create_tbl(cur, "trd", trd_header, trd_arr)
cur.execute("""
select trd1.Counterparty, trd1.ExternalReference, -ia/total_mv as CollRatio, ia, total_mv
from
(select distinct Counterparty, ExternalReference
from trd
group by Counterparty, ExternalReference
) trd1
left join
(select ClientCode, ExternalReference, sum(Amount) as ia
from csh
where csh.CashType = 'IM-EDD(Short Pos)'
group by ClientCode, ExternalReference) tmp2
on trd1.Counterparty = tmp2.ClientCode
left join
(select cpty, sum(today_mv) as total_mv
from mtm
where qty < 0
group by cpty) tmp1
on tmp1.cpty = tmp2.ClientCode
""")
coll_arr = []
for row in cur.fetchall():
cpty = row["Counterparty"]
ext_ref = row["ExternalReference"]
coll_ratio = row["CollRatio"]
ia = row["ia"]
total_mv = row["total_mv"]
print (cpty, ia, total_mv)
calc_stock_loan(cur, coll_arr, cpty, ext_ref, coll_ratio, ia, total_mv, asofdate, dict["base_ccy"], currclspricemkt, histclspricemkt)
arr_to_xlsx(dict["loan_xls_template"], "", coll_arr, "Cash Entry", 3, dict["loan_xls_output"].replace("YYYYMMDD", asofdate.to_string('%Y%m%d')))
print ("Finished")
return | {
"repo_name": "frederick623/HTI",
"path": "fa_collateral_upload/HTI_Loan_Collateral_Automation.py",
"copies": "2",
"size": "25027",
"license": "apache-2.0",
"hash": -7546975289725523000,
"line_mean": 38.4141732283,
"line_max": 202,
"alpha_frac": 0.5425340632,
"autogenerated": false,
"ratio": 3.2532172104510595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9708839983536484,
"avg_score": 0.01738225802291495,
"num_lines": 635
} |
# 2017-03-11 jkang
# simple logistic regression
# Python3.5
# Tensorflow1.0.1
# ref:
# - http://web.stanford.edu/class/cs20si/
# - iris dataset from Matlab Neural Network example
#
# Input: iris data (4 features)
# Output: iris label (3 categories)
import tensorflow as tf
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
learning_Rate = 0.01
batch_size = 10
max_epochs = 30
irisInputs_tmp = sio.loadmat('irisInputs.mat')
irisInputs = irisInputs_tmp['irisInputs'].T
irisTargets_tmp = sio.loadmat('irisTargets')
irisTargets = irisTargets_tmp['irisTargets'].T
X = tf.placeholder(tf.float32, [batch_size, 4], name='irisInputs')
Y = tf.placeholder(tf.float32, [batch_size, 3], name='irisTargets')
w = tf.Variable(np.zeros((4, 3)), name='weight', dtype=np.float32)
b = tf.Variable(np.zeros((1, 3)), name='bias', dtype=np.float32)
logits = tf.matmul(X, w) + b
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)
loss = tf.reduce_mean(entropy)
optimizer = tf.train.GradientDescentOptimizer(learning_Rate).minimize(loss)
def softmax(x):
ex_val = np.exp(x - np.max(x))
return ex_val / ex_val.sum()
with tf.Session() as sess:
# training
writer = tf.summary.FileWriter('./graph', sess.graph)
sess.run(tf.global_variables_initializer())
n_batches = int(irisTargets.shape[0] / batch_size)
for i in range(max_epochs):
total_loss = 0
for ibatch in range(n_batches):
x_batch = irisInputs[batch_size *
ibatch: batch_size * ibatch + batch_size]
y_batch = irisTargets[batch_size *
ibatch: batch_size * ibatch + batch_size]
_, loss_batch = sess.run([optimizer, loss], feed_dict={
X: x_batch, Y: y_batch})
total_loss += loss_batch
print('Average loss at epoch {0}: {1}'.format(
i, total_loss / n_batches))
print('Optimization finished!')
weights, bias = sess.run([w, b])
writer.close()
# testing
rand_idx = np.random.permutation(irisInputs.shape[0])[0]
x_data = irisInputs[rand_idx]
y_data = irisTargets[rand_idx]
pred = softmax(np.dot(x_data, weights) + bias)
print('Y:', y_data)
print('pred:', np.argmax(pred) + 1, 'th element')
| {
"repo_name": "jaekookang/useful_bits",
"path": "Machine_Learning/Logistic_Regression/Python/logistic_regression.py",
"copies": "1",
"size": "2300",
"license": "mit",
"hash": -5382456724383367000,
"line_mean": 30.5068493151,
"line_max": 75,
"alpha_frac": 0.6413043478,
"autogenerated": false,
"ratio": 3.1944444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4335748792244445,
"avg_score": null,
"num_lines": null
} |
# [2017-03-27] Challenge #308 [Easy] Let it burn
# https://www.reddit.com/r/dailyprogrammer/comments/61ub0j/20170327_challenge_308_easy_let_it_burn/
house = ["#############/#",
"# | #",
"# # #",
"# # #",
"####### #",
"# _ #",
"###############"]
coords = [(1,1),(1,2),(1,3),(5,6),(2,4),(1,1),(1,2),(5,5),(5,5),(9,1),(7,5),(2,2)]
broken_arch = "/=_"
def input():
for y,x in coords:
burn(x,y)
print("\n".join(house))
def burn(x,y):
c = legend(house[x][y])
house[x] = house[x][:y] + c + house[x][y+1:]
if c == 'F':
fire(x,y)
broken(x,y)
elif c == 'S':
broken(x,y)
if fire_check(x,y):
house[x] = house[x][:y] + 'F' + house[x][y+1:]
def legend(c):
if c == 'S':
return 'F'
elif c == ' ':
return 'S'
else:
return c
def fire(x,y):
tbool = False
if house[x-1][y] == 'S':
x = x-1
tbool = True
elif house[x][y-1] == 'S':
y = y-1
tbool = True
elif house[x][y+1] == 'S':
y = y+1
tbool = True
elif house[x+1][y] == 'S':
x = x+1
tbool = True
if tbool:
house[x] = house[x][:y] + 'F' + house[x][y+1:]
fire(x,y)
def broken(x,y):
tbool = False
if house[x-1][y] in broken_arch:
x = x-1
tbool = True
elif house[x][y-1] in broken_arch:
y = y-1
tbool = True
elif house[x][y+1] in broken_arch:
y = y+1
tbool = True
elif house[x+1][y] in broken_arch:
x = x+1
tbool = True
if tbool:
fire(x,y)
fire_check(x,y)
def fire_check(x,y):
if house[x-1][y] == 'F':
return True
elif house[x][y-1] == 'F':
return True
elif house[x][y+1] == 'F':
return True
elif house[x+1][y] == 'F':
return True
else:
return False
input()
| {
"repo_name": "brendanwr/DailyProgrammer",
"path": "308 [Easy] Let it burn/main.py",
"copies": "1",
"size": "1671",
"license": "apache-2.0",
"hash": 3020778790403995600,
"line_mean": 16.40625,
"line_max": 99,
"alpha_frac": 0.4901256732,
"autogenerated": false,
"ratio": 2.109848484848485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7553714873341343,
"avg_score": 0.10925185694142847,
"num_lines": 96
} |
# [2017-05-05 20:52:55 EDT Karthik Murugadoss]
"""
Classes and functions for file handling utilities
"""
import inspect
import json
import logging
import msgpack
import pandas as pd
import pickle
from rpy2.robjects import pandas2ri, r
class ReadTableFromFile:
"""
Make the contents of a file containing tabular data (matrix) present on disk available as a python object.
The file on disk can be .txt, .csv, .xlsx and resultant python object can be a pandas dataframe, list, dictionary.
"""
def __init__(self, filename, sep='\t', skiprows=0, nrows=None, header=0, index_col=None, names=None, usecols=None):
"""
Define basic characteristics of table to be read. Note: Argument names taken from pandas.read_table() command
Args:
sep: Separator, str | Default: '\t'
skiprows: Number of rows from top to skip, int | Default: 0
nrows: Number of rows to read, int | Default: None (all rows)
header: Row number corresponding to column header. If no header row exists, set as None, int | Default: 0
index_col: Column to use as row labels of the DataFrame, int | Default: None
names: User-specified columns names. Use if header=None, str | Default: None
usecols: Column names or indices to read (instead of reading all columns), str, int | Default=None (all columns)
Returns:
N/A
"""
self.filename = filename
self.sep = sep
self.skiprows = skiprows
self.nrows = nrows
self.header = header
self.index_col = index_col
self.names = names
self.usecols = usecols
self.file_ext = filename.split('.')[-1]
def override_sep(self):
"""
Override seperator attribute based on file extension
Args:
self
Returns:
N/A
"""
sep_dict = {'txt': '\t', 'csv': ','}
self.sep = sep_dict[self.file_ext]
def to_pandas_df(self):
"""
Reads a .txt or .csv file present on disk as pandas dataframe. Also includes a alternate function call to convert an .xlsx file into a pandas dataframe. File extension is checked and the appropriate call is made
Note: sep, nrows and usecols are not passed to the pd.read_excel call
Args:
self
Returns:
N/A
"""
if 'xls' in self.file_ext:
self.table = pd.read_excel(self.filename, skiprows=self.skiprows, header=self.header, index_col=self.index_col, names=self.names)
else:
self.table = pd.read_table(self.filename, sep=self.sep, skiprows=self.skiprows, nrows=self.nrows, header=self.header, index_col=self.index_col, names=self.names, usecols=self.usecols)
def to_1d_list(self):
"""
Reads individual lines of text file into list.
Arg:
self
Returns:
N/A
"""
f = open(self.filename, 'r')
if not self.header:
self.header = 0
lines = [line.strip() for line in f.readlines()[(self.header):]]
f.close()
self.itemlist = lines
def to_2d_list(self):
"""
Reads individual lines of text file into a list of lists.
Arg:
self
Returns:
N/A
"""
f = open(self.filename, 'r')
if not self.header:
self.header = 0
lines = f.readlines()[(self.header):]
lines = [line[:-1] for line in lines]
f.close()
self.itemlist = [line.split(self.sep) for line in lines]
def to_dict(self):
"""
Read individual lines of text file into a dictionary. index_col is used as key and is either 0 or 1. Typically considered to have two columns. Pandas dataframes are more amenable for cases with more that two columns.
Arg:
self
Returns:
N/A
"""
assert self.index_col is not None
f = open(self.filename, 'r')
if not self.header:
self.header = 0
lines = f.readlines()[(self.header):]
lines = [line[:-1] for line in lines]
f.close()
self.itemlist = [line.split(self.sep) for line in lines]
self.itemdict = dict((line[self.index_col], line[int(not self.index_col)]) for line in self.itemlist)
def save_pkl(obj, filename):
"""
Save python object to disk as pickle file (.pkl)
Args:
obj: python object
filename: pickle file name - excluding the .pkl extension, str
Returns:
N/A
"""
with open(filename+'.pkl','wb') as f:
# print('Saving ', name, ' ...')
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_pkl(filename):
"""
Load python object from pickle file (.pkl) on disk
Args:
filename: pickle file name - excluding the .pkl extension, str
Returns:
Python object
"""
with open(filename+'.pkl', 'rb') as f:
return pickle.load(f)
def save_json(obj, filename):
"""
Save python object to disk as json file (.json). Default indentation is 4 spaces
Args:
obj: python object
filename: json file name - excluding the .json extension, str
Returns:
N/A
"""
with open(filename+'.json','w') as f:
# print('Saving ', name, ' ...')
json.dump(obj, f, indent=4)
def load_json(filename):
"""
Load python object from json file (.json) on disk
Args:
filename: json file name - excluding the .json extension, str
Returns:
Python object
"""
with open(filename+'.json', 'r') as f:
return json.load(f)
def save_msg(obj, filename):
"""
Save python object to disk as MessagePack file (.msg)
Args:
obj: python object
filename: MessagePack file name - excluding the .msg extension, str
Returns:
N/A
"""
with open(filename+'.msg','wb') as f:
# print('Saving ', name, ' ...')
msgpack.dump(obj, f)
def load_msg(filename):
"""
Load python object from MessagePack file (.msg) on disk
Args:
filename: MessagePack file name - excluding the .msg extension, str
Returns:
Python object
"""
with open(filename+'.msg', 'rb') as f:
return convert_bytes2str(msgpack.load(f))
def convert_bytes2str(data):
"""
Converts a 'bytes' python object into its 'str' equivalent. Additional if statements can be included if necessary.
Args:
data: 'bytes' python object
Returns
'str' python object
"""
if isinstance(data, bytes):
return data.decode()
if isinstance(data, dict):
return dict(map(convert_bytes2str, data.items()))
if isinstance(data, tuple):
return map(convert_bytes2str, data)
return data
def r2pandas_df(filename, dataframe_name):
"""
Converts R dataframe present on disk to pandas dataframe
Args:
filename: R file name (including the extension), str
dataframe_name: R data frame name, str
Returns:
pandas dataframe
"""
pandas2ri.activate()
r['load'](filename)
return pd.DataFrame(r[dataframe_name])
def pandas2r_df(df, dataframe_name, filename):
"""
Saves pandas dataframe as an R dataframe
Args:
df: pandas dataframe, str
dataframe_name: R data frame name, str
filename: R data frame fie name, str
Returns:
N/A
"""
df = pandas2ri.py2ri(df)
r.assign(dataframe_name, df)
r_command = "save("+dataframe_name+", file='"+filename+"', compress=TRUE)"
r(r_command)
def logger(stdout=True, logfile=False, filename='../output.log'):
"""
Logger. Note that .log files will not be pushed to Github.
Args:
stdout: Display log on stdout, bool | Default: True
logfile: Write log to file, bool | Default: False
filename: Name of log file, str | Default: '../output.log'
Returns:
logger object
"""
name = list(inspect.stack()[1])[3]
logger = logging.getLogger(name)
if stdout:
logging.basicConfig(level=logging.INFO)
if logfile:
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename)
handler.setLevel(logging.INFO)
log_format = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
time_format = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(log_format, time_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def main():
pass
if __name__ == '__main__':
main()
# Github test edit
| {
"repo_name": "karthikmurugadoss/utilities",
"path": "utilities/utilities.py",
"copies": "1",
"size": "8695",
"license": "mit",
"hash": 6074481764486404000,
"line_mean": 28.0802675585,
"line_max": 225,
"alpha_frac": 0.5978148361,
"autogenerated": false,
"ratio": 3.925507900677201,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50233227367772,
"avg_score": null,
"num_lines": null
} |
# 20170614 Add manual trade
import ael
import acm
import HTI_Util
import HTI_Email_Util
import HTI_FeedTrade_EDD_Util
import os
import sys, traceback
import datetime
import sqlite3
import csv
import decimal
import account_journal
import win32com.client
ael_variables = [['startdate', 'Start Date', 'string', [str(ael.date_today())], str(ael.date_today()), 1, 0, 'Start Date', None, 1], \
['enddate', 'End Date', 'string', [str(ael.date_today())], str(ael.date_today()), 1, 0, 'End Date', None, 1], \
['pfs', 'Portfolio(s)', 'string', HTI_Util.getAllPortfolios(), 'EDD Warrant Trading,EDD CBBC Trading,EDD Options,EDD Hedging,EDD Market Making 1,EDD Market Making 2,EDD Warrant,EDD Flow Strategy 1,EDD Flow Strategy 2,EDD HFT 1,EDD HFT 2,EDD HFT 3,EDD HFT 4,EDD OMM,EDD OTC OMM', 1, 1, 'Portfolio(s)', None, 1], \
['acq', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTISEC - EDD,HTIFS - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['prd', 'Product Type(s)', 'string', HTI_Util.getAllInstypes(), 'Stock,Option,Future/Forward,Warrant', 1, 1, 'Product Type(s)', None, 1], \
['created_by', 'Created By', 'string', None, '', 0, 0, 'Created By', None, 1], \
['by_trader', 'Trader', 'string', None, '', 0, 0, 'Trader', None, 1], \
['tfs', 'Trade Filter', 'string', None, 'TF_EDD_ACCOUNT_JOURNAL', 0, 0, 'Trade Filter', None, 1], \
['gen_add_info', 'Generate additional info?', 'string', HTI_Util.get_yesno(), 'Y', 0, 0, 'Generate additional info?', None, 1], \
['gen_value_day', 'Generate Value Day?', 'string', HTI_Util.get_yesno(), 'Y', 0, 0, 'Generate Value Day?', None, 1], \
['gen_manual_trd', 'Generate Manual Trade?', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Generate Manual Trade?', None, 1], \
['ss_bb', 'Short Sell or Buy Back Only?', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Consolidate trades?', None, 1], \
['consolid_trd', 'Consolidate trades?', 'string', HTI_Util.get_yesno(), 'Y', 0, 0, 'Short Sell or Buy Back Only?', None, 1], \
['fileNameTrd', 'Trade File name', 'string', None, 'D:\\Temp\\Trade_Records\\Trade_Record_YYYYMMDD.csv', 1, 0, 'File Name', None, 1], \
['fileNameCon', 'Consolidated File name', 'string', None, 'D:\\Temp\\Trade_Records\\Consolid_Trade_Record_YYYYMMDD.csv', 1, 0, 'Consolidated File Name', None, 1] ]
def adapt_decimal(d):
return str(d)
def convert_decimal(s):
return decimal.Decimal(s)
def db_cur(source = ":memory:"):
# Register the adapter
sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
#conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = [], index_arr = []):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr != []:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def num(s):
if isinstance(s, basestring):
s = str(s).replace("#","")
if s == "" or str(float(s)) == "nan":
return 0
try:
return int(s)
except ValueError:
return float(str(s))
def dec(s):
if isinstance(s, basestring):
s = str(s).replace("#","")
if s == "" or s == "None" or str(float(s)) == "nan":
return 0
try:
return decimal.Decimal(str(s))
except:
return 0
return s
def csv_to_arr(csv_file):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f)
arr = list(reader)
header = ','.join(arr[0])
arr = arr[1:]
return header, arr
def tsv_to_arr(tsv_file):
arr = []
with open(tsv_file, 'rU') as f:
reader = csv.reader(f, dialect="excel-tab")
arr = list(reader)
arr = arr[1:]
return arr
def sortArray(x, y):
i = 0
len_array = len(x)
while i <= len_array - 1:
if x[i] > y[i]:
return 1
else:
return -1
i = i + 1
return 0
def arrs_to_xlsx(filename, header=[], arr=[]):
i = 1
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Add()
for x in range(0, len(header)):
ws = wb.Worksheets(x+1)
for i, cell in enumerate(header[x].split(',')):
ws.Cells(1,i+1).Value = cell
for i, row in enumerate(arr[x]):
for j, cell in enumerate(row):
ws.Cells(i+2,j+1).Value = str(cell)
xl.DisplayAlerts = False
wb.SaveAs(filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def export_to_file(file_name, header, data_arr):
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def header_to_col(header):
arr = {}
i = 0
for x in header.split(','):
arr[x] = i
i = i + 1
return arr
def check_banking_day(posdate):
hk_cal = acm.FCalendar.Select("name='Hong Kong'")[0]
while True:
if not hk_cal.IsNonBankingDay(hk_cal, hk_cal, posdate):
break
posdate = posdate.add_days(1)
return posdate
def ael_main(dict):
header_trd = 'trade_id,entity,portfolio,counterparty,instrument_type,call_put,currency,position,reporting_date,instrument,underlying,price,quantity,premium,fee,ss_bb'
conn, cur = db_cur()
ss_bb = dict['ss_bb']
created_by = dict['created_by']
consolid_trd = dict['consolid_trd']
manual_trd = dict['gen_manual_trd']
if consolid_trd == "Y":
dict['fileNameTrd'] = dict['fileNameCon']
startDateArr = dict['startdate'].split('/')
posdate = ael.date_from_ymd(int(startDateArr[2]), int(startDateArr[1]), int(startDateArr[0]))
endDateArr = dict['enddate'].split('/')
endDate = ael.date_from_ymd(int(endDateArr[2]), int(endDateArr[1]), int(endDateArr[0]))
posdate = check_banking_day(posdate)
endDate = check_banking_day(endDate)
while posdate <= endDate:
posdatetp1 = posdate.add_days(1)
posdatetp1 = check_banking_day(posdatetp1)
# Acquirers
acq_array_list = dict['acq']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# Product Types
prod_type_list = dict['prd']
ptype_list = ''
for ptype in prod_type_list:
if ptype_list == '':
ptype_list = "'" + ptype + "'"
else:
ptype_list = ptype_list + ",'" + ptype+ "'"
portfolios = dict['pfs']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
strSql = """
select t.trdnbr
from instrument i, trade t, party acq, portfolio pf, user u
where i.insaddr = t.insaddr
and u.usrnbr = t.creat_usrnbr
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (@acquirer_list)
and (i.exp_day >= '@dt' or i.exp_day = '0000-01-01')
and t.time < '@d_tp1'
and i.instype in (@ptype_list)
and pf.prfid in (@portfolio_list)
"""
if ss_bb == "Y":
strSql = strSql + """ and (add_info(t, 'Short Sell') = 'Yes' or add_info(t, 'Buy Back') = 'Yes') """
if manual_trd == "Y":
strSql = strSql + """ and ( (add_info(t, 'Manual Trade') = 'Cross' ) or (add_info(t, 'Manual Trade') = 'Give Up' ) or (add_info(t, 'Manual Trade') = 'Take Up' ) ) """
if dict['by_trader'] != "":
strSql = strSql + """ and (add_info(t, 'horizon User Id') = '%s' ) """ % dict['by_trader']
if created_by != "":
strSql = strSql + """ and u.userid = '%s' """ % (created_by)
strSql = strSql.replace('@acquirer_list', acq_list)
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@d_tp1', (endDate.add_days(1).to_string('%Y-%m-%d') if consolid_trd == "Y" else posdatetp1.to_string('%Y-%m-%d') ) )
strSql = strSql.replace('@dt', posdate.to_string('%Y-%m-%d'))
strSql = strSql.replace('@ptype_list', ptype_list)
#print strSql
trade_filter = dict['tfs']
tobject = ael.TextObject.read('type="SQL Query" and name="%s"' % ("tf_edd_account_jorunal_qry"))
tobject_c = tobject.clone()
tobject_c.set_text(strSql)
tobject_c.commit()
ael.poll()
print "Exporting " + posdate.to_string('%Y-%m-%d')
account_journal.trd_records(cur, strSql, posdate, dict)
posdate = endDate.add_days(1) if consolid_trd == "Y" else posdatetp1
print "Finished" | {
"repo_name": "frederick623/HTI",
"path": "position_monitor/trade_record_dump.py",
"copies": "1",
"size": "10535",
"license": "apache-2.0",
"hash": 1223940939909537300,
"line_mean": 34.9590443686,
"line_max": 328,
"alpha_frac": 0.5383958234,
"autogenerated": false,
"ratio": 3.193391936950591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4231787760350591,
"avg_score": null,
"num_lines": null
} |
# 20170714 1日に複数記事があった場合にも対応
import urllib.request
from bs4 import BeautifulSoup
import xlsxwriter
#def show_input():
print('01:石森 虹花')
print('02:今泉 佑唯')
print('03:上村 莉菜')
print('04:尾関 梨香')
print('05:織田 奈那')
print('06:小池 美波')
print('07:小林 由依')
print('08:齋藤 冬優花')
print('09:佐藤 詩織')
print('10:志田 愛佳')
print('11:菅井 友香')
print('12:鈴本 美愉')
print('13:長沢 菜々香')
print('14:土生 瑞穂')
print('15:原田 葵')
print('16:欠番')
print('17:平手 友梨奈')
print('18:守屋 茜')
print('19:米谷 奈々未')
print('20:渡辺 梨加')
print('21:渡邉 理佐')
print('22:長濱 ねる')
print('23:井口 眞緒')
print('24:潮 紗理菜')
print('25:柿崎 芽実')
print('26:影山 優佳')
print('27:加藤 史帆')
print('28:齊藤 京子')
print('29:佐々木 久美')
print('30:佐々木 美玲')
print('31:高瀬 愛奈')
print('32:高本 彩花')
print('33:東村 芽依')
print('対応する番号を入力してください 例:26')
member_id = input('>>> ')
print('取得するデータの開始年月を入力してください 例:20170601 ※01は固定です')
request_date = input('>>> ')
#URL生成のためのメンバー番号と日付を返す
#return member_id, request_date
# ワークブックとワークシートを作成
wb = xlsxwriter.Workbook("blog_data{0}.xlsx".format(member_id))
# フォントをMeiryo UIにセット
meiryo_format = wb.add_format()
meiryo_format.set_font_name('Meiryo UI')
#ハイパーリンク用フォーマット
link_format = wb.add_format({'color': 'blue', 'underline': 1})
ws = wb.add_worksheet("blog_data")
j = 1
#ループで1ヶ月分取得
for i in range(1,31+1):
print('{0}{1:02d}のブログ情報を取得しています...'.format(request_date[0:6], i))
try:
# アクセスするURL
url = "http://www.keyakizaka46.com/s/k46o/diary/member/list?ima=0000&ct={0}&dy={1}{2:02d}".format(member_id, request_date[0:6], i)
# URLにアクセスする htmlが帰ってくる → <html><head><title>経済、株価、ビジネス、政治のニュース:日経電子版</title></head><body....
html = urllib.request.urlopen(url)
# htmlをBeautifulSoupで扱う
soup = BeautifulSoup(html, "html.parser")
# タイトル要素を全て取得する
dayly_titles = soup.find_all(class_='box-ttl')
title = []
# タイトル要素を分解 おそらく2-3つ
for blog_title in dayly_titles:
# 分解したタイトル要素からリストを作成
title.append(blog_title.find('a').getText())
# 本文が記載されているclassを指定し複数のbox-article要素を抽出
dayly_blogs = soup.find_all(class_='box-article')
# 1日にブログが複数あった場合のためカウント
blog_count = 0
#複数のbox-article要素を一つずつに分解して処理
for main_messages in dayly_blogs:
#本文を1行ずつ出力(一つのタグに囲まれた要素毎に出力)
for string in main_messages.strings:
#セルAに日付を出力(MeiryoUIフォーマット)
ws.write('A{0}'.format(j), '{0}/{1}/{2:02d}'.format(request_date[0:4], request_date[4:6], i), meiryo_format)
#セルBにブログタイトルを出力(hyperlinkフォーマット)
ws.write_url('B{0}'.format(j), url, link_format, title[blog_count])
#セルCに本文を出力(MeiryoUIフォーマット)
ws.write('C{0}'.format(j), string, meiryo_format)
j += 1
blog_count += 1
#ブログが書かれてない日では必ずエラーが出るので無視する
except AttributeError:
print('{0}{1:02d}はブログが書かれていません'.format(request_date[0:6], i))
# ワークブックをクローズ
wb.close()
#print('データの取得が完了しました!\n')
#for tag in main_message:
# tag.extract()
#print(main_message.getText())
#print(main_message.string)
#本文を1行ずつ出力(一つのタグに囲まれた要素毎に出力)
#for string in main_message.strings:
# print((string))
#正規表現でタグ除去
#untag_message = re.compile(r"<[^>]*?>")
#untag_message.sub("", main_message)
#print(untag_message)
| {
"repo_name": "connpy/DIY",
"path": "blog_parser/prototype/blog_parser.py",
"copies": "1",
"size": "4459",
"license": "mit",
"hash": 7491866306332680000,
"line_mean": 24.4765625,
"line_max": 138,
"alpha_frac": 0.6323213738,
"autogenerated": false,
"ratio": 1.6337675350701404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7719109980044397,
"avg_score": 0.009395785765148525,
"num_lines": 128
} |
# 20171115 Add Indicative Fee Column
import acm
import ael
import HTI_Util
import HTI_FeedTrade_EDD_Util
import fnmatch
import datetime
import os
import sys
import csv
import re
import sqlite3
import win32com.client
ael_variables = [['asofdate', 'Date', 'string', [str(ael.date_today()), 'Today'], 'Today', 1, 0, 'Report Date', None, 1], \
['acquirers', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTIFS - EDD,HTISEC - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['counterparties', 'Counterparty(s)', 'string', HTI_Util.getAllParties(), None, 0, 1, 'Counterparty(s)', None, 1], \
['portfolio', 'Portfolio', 'string', HTI_Util.getAllPortfolios(), 'EDD Deltaone', 1, 1, 'Portfolio', None, 1], \
['currclspricemkt', 'Current Closing Price Market', 'string', None, 'Bloomberg_5PM', 1, 0, 'Current Closing Price Market', None, 1],
['histclspricemkt', 'Historical Closing Price Market', 'string', None, 'Bloomberg_5PM_Cls', 1, 0, 'Historical Closing Price Market', None, 1],
['sbl_dir', 'SBL directory', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data', 1, 0, 'SBL directory', None, 1], \
['pd_file', 'Pending Trade File', 'string', None, 'Pending_Trades_Extract_MMDD.CSV', 1, 0, 'Pending Trade File', None, 1], \
['os_file', 'Outstanding Trade File', 'string', None, 'OS_Trades_Extract_MMDD.CSV', 1, 0, 'Outstanding Trade File', None, 1], \
['sbl_output', 'SBL Folder', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\SBL Borrow Report\\[ptyid]', 1, 0, 'SBL Folder', None, 1], \
['sbl_csv', 'SBL Output csv', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\SBL Borrow Report\\client\\client_sbl_YYYYMMDD.csv', 1, 0, 'SBL Output csv', None, 1], \
['template_file', 'Template File', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\SBL Borrow Report\\sbl_loan_template.xlsx', 1, 0, 'Template File', None, 1], \
['base_ccy', 'Base Ccy', 'string', None, 'HKD', 1, 0, 'Base Ccy', None, 1]]
RIC_CODE_POS = 5
LOAN_COL_OFFSET = 8
DAILY_FEE_COL_OFFSET = 10
def files_lookup(tgt_dir, pattern, recur_list=False, sub_folder=False, most_recent=True):
filepath_arr = []
for fi in os.listdir(tgt_dir):
full_path = os.path.join(tgt_dir, fi)
if sub_folder and os.path.isdir(full_path):
filepath_arr += files_lookup(full_path, pattern, recur_list, sub_folder, most_recent)
if fnmatch.fnmatch(fi, pattern):
filepath_arr.append(full_path)
filepath_arr.sort(reverse=most_recent)
if recur_list:
return filepath_arr
else:
return filepath_arr[0]
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def db_cur(source = ":memory:"):
# Register the adapter
# sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
# sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = None, index_arr = None):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
if index_arr is not None:
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr is not None:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def csv_to_arr(csv_file, start=0, end=0, has_header=True, delim=',', ignore_col=""):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f, delimiter=delim)
arr = list(reader)
header = ""
if has_header:
header = ','.join(arr[start])
if end == 0:
arr = arr[start+1:]
else:
arr = arr[start+1:end]
return re.sub(r"[\*\.#/\$%\d\" ]", "", header), arr
else:
return arr[start:]
return
def getInstrumentRICCode(acm_ins):
RICCODE = ''
if acm_ins != None:
localexchangecode = acm_ins.AdditionalInfo().Local_Exchange_Code()
MIC = acm_ins.AdditionalInfo().MIC()
if localexchangecode != None and MIC != None:
RICCODE = map_local_exch_code(MIC, localexchangecode) + "." + map_ric_exch_code(MIC)
return RICCODE
def map_ric_exch_code(mic):
RIC_exch_code = ''
if mic in ('XHKG','XHKF'):
RIC_exch_code = 'HK'
elif mic in ('SHSC', 'XSHG'):
RIC_exch_code = 'SS'
elif mic in ('SZSC', 'XSHE'):
RIC_exch_code = 'SZ'
elif mic in ('XTKS'):
RIC_exch_code = 'T'
elif mic in ('APXL'):
RIC_exch_code = 'AX'
elif mic in ('XTAI'):
RIC_exch_code = 'TW'
elif mic in ('XNAS'):
RIC_exch_code = 'OQ'
elif mic in ('XNYS'):
RIC_exch_code = 'N'
return RIC_exch_code
def map_local_exch_code(mic, localexchangecode):
local_exch_code = ''
if mic in ('XHKG','XHKF'):
local_exch_code = '{0:04d}'.format(int(localexchangecode))
elif mic in ('SHSC','SZSC', 'XSHG', 'XSHE'):
local_exch_code = '{0:06d}'.format(int(localexchangecode))
else:
local_exch_code = localexchangecode
return local_exch_code
def arr_to_xlsx_template(filename, template, info_arr, os_arr, pd_arr):
xl = win32com.client.Dispatch('Excel.Application')
# xl.ScreenUpdating = False
wb = xl.Workbooks.Open(template)
ws = wb.Worksheets(1)
rng = ws.UsedRange
ws.Range("K:K").NumberFormat = '#,##0.00'
ws.Range("M:M").NumberFormat = '#,##0.00'
ws.Range("G:G").NumberFormat = '#,##0'
for i in range(rng.Row, rng.Row+rng.Rows.Count):
for j in range(rng.Column, rng.Column+rng.Columns.Count+1):
cell_val = ws.Cells(i ,j).Value
if cell_val != None and cell_val[0] == '[' and cell_val[-1] == ']':
ws.Cells(i ,j).Value = info_arr[cell_val[1:-1]]
row_offset = rng.Row+rng.Rows.Count+1
col_offset = rng.Column+1
ws.Cells(row_offset, col_offset-1).Value = "Oustanding Borrow/Loan"
ws.Cells(row_offset, col_offset-1).Font.Bold = True
for i, row in enumerate(os_arr):
for j, cell in enumerate(row):
if str(cell) != '' and str(cell)[0] == '=':
ws.Cells(i+row_offset,j+col_offset).Formula = cell
else:
ws.Cells(i+row_offset,j+col_offset).Value = cell
row_offset_prev = row_offset
rng = ws.UsedRange
row_offset = rng.Row+rng.Rows.Count
col_offset = rng.Column+1
if row_offset > row_offset_prev+1:
ws.Cells(row_offset, col_offset+LOAN_COL_OFFSET).Formula = "=SUM(K" + str(row_offset_prev) + ":K" + str(row_offset-1) + ")"
ws.Cells(row_offset, col_offset+LOAN_COL_OFFSET).Font.Bold = True
ws.Cells(row_offset, col_offset+LOAN_COL_OFFSET).Borders(8).LineStyle = 1
ws.Cells(row_offset, col_offset+DAILY_FEE_COL_OFFSET).Formula = "=SUM(M" + str(row_offset_prev) + ":M" + str(row_offset-1) + ")"
ws.Cells(row_offset, col_offset+DAILY_FEE_COL_OFFSET).Font.Bold = True
ws.Cells(row_offset, col_offset+DAILY_FEE_COL_OFFSET).Borders(8).LineStyle = 1
row_offset = rng.Row+rng.Rows.Count+2
ws.Cells(row_offset, col_offset-1).Value = "Pending Borrow/Loan"
ws.Cells(row_offset, col_offset-1).Font.Bold = True
for i, row in enumerate(pd_arr):
for j, cell in enumerate(row):
if str(cell) != '' and str(cell)[0] == '=':
ws.Cells(i+row_offset,j+col_offset).Formula = cell
else:
ws.Cells(i+row_offset,j+col_offset).Value = cell
row_offset_prev = row_offset
rng = ws.UsedRange
row_offset = rng.Row+rng.Rows.Count
col_offset = rng.Column+1
if row_offset > row_offset_prev+1:
ws.Cells(row_offset, col_offset+LOAN_COL_OFFSET).Formula = "=SUM(K" + str(row_offset_prev) + ":K" + str(row_offset-1) + ")"
ws.Cells(row_offset, col_offset+LOAN_COL_OFFSET).Font.Bold = True
ws.Cells(row_offset, col_offset+LOAN_COL_OFFSET).Borders(8).LineStyle = 1
row_offset = rng.Row+rng.Rows.Count+2
ws.Cells(row_offset, col_offset-1).Value = "This statement is for your refernce only. The valuations contained herein are not related to your portfolio except for the stock borrowing fee, all or part of which will be chargeable."
xl.DisplayAlerts = False
wb.SaveAs(filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def arr_to_xlsx(filename, header, arr):
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Add()
ws = wb.Worksheets(1)
for i, cell in enumerate(header.split(',')):
ws.Cells(1,i+1).Value = cell
for i, row in enumerate(arr):
for j, cell in enumerate(row):
if str(cell) != '' and str(cell)[0] == '=':
ws.Cells(i+2,j+1).Formula = cell
else:
ws.Cells(i+2,j+1).Value = cell
ws.Columns.AutoFit()
xl.DisplayAlerts = False
wb.SaveAs(filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def arr_to_csv(file_name, header, data_arr):
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def get_price(dt, stock_code, currclspricemkt="Bloomberg_5PM", histclspricemkt="Bloomberg_5PM_Cls"):
ael_dt = ael.date_from_ymd(int(dt[0:4]), int(dt[4:6]), int(dt[6:8]))
ael_ins = ael.Instrument[str(stock_code)]
try:
if ael_dt == ael.date_today():
cls_price = ael_ins.used_price(ael_dt, ael_ins.curr.insid, 'Last', 0, currclspricemkt)
else:
cls_price = ael_ins.used_price(ael_dt, ael_ins.curr.insid, 'Close', 0, histclspricemkt)
except:
#cls_price = ael_ins.used_price(dt, ael_ins.curr.insid, 'Last', 0, currclspricemkt)
cls_price = 0.0
return cls_price
def sbl_daily_report(cur, sbl_code, client_name):
os_arr =[]
pd_arr = []
cur.execute("select * from os where STOCK <> '' and CPTY = ?",(sbl_code,))
for os_row in cur.fetchall():
trd_ref = str(os_row["BGNREF"])
trd_dt = os_row["TRADE"]
sett_dt = os_row["SSET_DT"]
# trd_dt = datetime.datetime.strptime(os_row["TRADE"], "%Y%m%d").date().strftime("%d/%m/%Y")
ccy = str(os_row["LNCUR"])
stock_code = os_row["STOCK"] + " " + ccy[0:2] + " Equity"
acm_ins = acm.FInstrument[str(stock_code)]
ric_code = getInstrumentRICCode(acm_ins)
qty = str(os_row["QTY"])
rate = str(os_row["LNRATE"]) + '%'
loan_return = "LOAN" if str(os_row["PTYPE"]) == "B" else "BORROW"
mkt_val = os_row["LNVAL"]
min_fee = os_row["MIN_FEE"]
coll_mgn = os_row["COLLMGN"] + '%'
days_of_year = 365 if ccy == "HKD" else 360
daily_fee = round(float(mkt_val)*float(os_row["LNRATE"])/100/days_of_year, 2)
os_arr.append([ trd_dt, sett_dt, stock_code, ric_code, qty, ccy, rate, loan_return, mkt_val, min_fee, daily_fee ])
cur.execute("select * from pd where CPTY = ?",(sbl_code,))
for pd_row in cur.fetchall():
trd_ref = str(pd_row["BGNREF"])
trd_dt = pd_row["TRADE"]
sett_dt = pd_row["SSET_DT"]
# trd_dt = datetime.datetime.strptime(pd_row["TRADE"], "%Y%m%d").date().strftime("%d/%m/%Y")
ccy = str(pd_row["LNCUR"])
stock_code = pd_row["STOCK"] + " " + ccy[0:2] + " Equity"
acm_ins = acm.FInstrument[str(stock_code)]
ric_code = getInstrumentRICCode(acm_ins)
qty = str(pd_row["QTY"])
rate = str(pd_row["LNRATE"]) + '%'
loan_return = ("RETURN" if str(pd_row["STATUS"]) == "R" else "NEW") + ' ' + ("LOAN" if str(pd_row["PTYPE"]) == "B" else "BORROW")
mkt_val = pd_row["LNVAL"]
min_fee = pd_row["MIN_FEE"]
coll_mgn = pd_row["COLLMGN"] + '%'
days_of_year = 365 if ccy == "HKD" else 360
daily_fee = ""
pd_arr.append([ trd_dt, sett_dt, stock_code, ric_code, qty, ccy, rate, loan_return, mkt_val, min_fee, daily_fee ])
return os_arr, pd_arr
def getDpsNos(acm_pty, acq_list):
dpsNos = ''
strSql = """
select t.trdnbr, add_info(t, 'External Reference') 'ext_ref'
into tbl_ext_ref
from instrument i, trade t, party p, instrument c, party a
where i.insaddr = t.insaddr
and i.instype = 'TotalReturnSwap'
and t.counterparty_ptynbr = p.ptynbr
and p.ptyid = '%s'
and t.status not in ('Void', 'Simulated')
and i.curr = c.insaddr
and t.acquirer_ptynbr = a.ptynbr
and a.ptyid in (%s)
and add_info(t, 'Product_Strategy') in ('SP_TRS', 'SP_Portfolio Swap')
and add_info(t, 'Trd Pos Closed') ~= 'Yes'
select distinct ext_ref
from tbl_ext_ref
""" % (acm_pty.Name().strip(), acq_list)
print strSql
rs = ael.asql(strSql)
columns, buf = rs
for table in buf:
for row in table:
if dpsNos == '':
dpsNos = row[0]
else:
dpsNos = dpsNos + ', ' + row[0]
return dpsNos
def ael_main(dict):
asofdate = dict['asofdate']
if asofdate == 'Today':
asofdate = ael.date_today()
asofdate = ael.date(asofdate)
# Portfolios
portfolios = dict['portfolio']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
# Acquirers
acq_array_list = dict['acquirers']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# Counterparties
pty_array_list = dict['counterparties']
pty_list = ''
for pty in pty_array_list:
if pty_list == '':
pty_list = "'" + pty + "'"
else:
pty_list = pty_list + ",'" + pty + "'"
currclspricemkt = dict['currclspricemkt']
histclspricemkt = dict['histclspricemkt']
base_ccy = dict['base_ccy']
sbl_dir = dict["sbl_dir"]
os_file = os.path.join(sbl_dir, str(dict["os_file"]).replace("MMDD", asofdate.to_string('%m%d')))
pd_file = os.path.join(sbl_dir, str(dict["pd_file"]).replace("MMDD", asofdate.to_string('%m%d')))
print os_file
print pd_file
os_header, os_arr = csv_to_arr(os_file, 1, -1, True, '\t')
pd_header, pd_arr = csv_to_arr(pd_file, 1, -1, True, '\t')
conn, cur = db_cur()
create_tbl(cur, 'os', os_header, os_arr)
pd_header = pd_header.replace("BL", "BL_1", 1)
create_tbl(cur, 'pd', pd_header, pd_arr)
template_file = dict["template_file"]
strSql = """select c.ptyid, add_info(t, 'External Reference') 'external_ref'
into externalRef
from instrument i, trade t, party a, portfolio pf, leg l, party c
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and i.instype = 'TotalReturnSwap'
and t.acquirer_ptynbr = a.ptynbr
and a.ptyid in (@accquirer_list)
and pf.prfid in (@portfolio_list)
and t.time < '%s'
and i.insaddr = l.insaddr and l.payleg = 'No'
and t.counterparty_ptynbr = c.ptynbr
@counterparty_list_sql
select distinct ptyid
from externalRef
where external_ref ~= ''
""" % (asofdate.add_days(1))
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@accquirer_list', acq_list)
if pty_list != '':
counterparty_list_sql = 'and c.ptyid in (@counterparty_list)'
counterparty_list_sql = counterparty_list_sql.replace("@counterparty_list", pty_list)
strSql = strSql.replace("@counterparty_list_sql", counterparty_list_sql)
else:
strSql = strSql.replace("@counterparty_list_sql", ' ')
print strSql
rs = ael.asql(strSql)
columns, buf = rs
result_arr = []
for table in buf:
for row in table:
client_dict = {}
ptyid = row[0]
acm_pty = acm.FParty[ptyid]
acm_pty_info = acm_pty.AdditionalInfo()
client_dict["Name"] = acm_pty.Fullname()
client_dict["Address"] = acm_pty.Address()
client_dict["Address2"] = acm_pty.Address2()
client_dict["Attention"] = "Attn: " + acm_pty.Attention()
client_dict["Telephone"] = acm_pty.Telephone()
client_dict["as_of"] = asofdate.to_string('%Y-%m-%d')
client_dict["contract_no"] = getDpsNos(acm_pty, acq_list)
print str(acm_pty.Name()) + " " + str(acm_pty_info.SBL_Code())
sbl_output = dict["sbl_output"]
sbl_dir = sbl_output.replace("[ptyid]", str(acm_pty.Name()))
if not os.path.exists(sbl_dir):
os.makedirs(sbl_dir)
client_file = os.path.join(sbl_dir, "sbl_YYYYMMMDD.xlsx".replace("YYYYMMMDD", asofdate.to_string('%Y%m%d')))
os_arr, pd_arr = sbl_daily_report(cur, str(acm_pty_info.SBL_Code()), str(acm_pty.Name()))
result_arr += [ [ acm_pty_info.SBL_Code(), acm_pty.Name() ] + row[0:10] + [ get_price(row[0], row[2]) ] for row in os_arr ]
result_arr += [ [ acm_pty_info.SBL_Code(), acm_pty.Name() ] + row[0:10] + [ get_price(row[0], row[2]) ] for row in pd_arr ]
# arr_to_xlsx(client_file, "Client Code,Client Name,Trade Reference,Trade Date,Stock Code,QTY,Rate,Status,Loan/Return", sbl_arr)
arr_to_xlsx_template(client_file, template_file, client_dict, os_arr, pd_arr)
[ row.pop(RIC_CODE_POS) for row in result_arr ]
arr_to_csv(dict["sbl_csv"].replace("YYYYMMDD", asofdate.to_string('%Y%m%d')), "SBL code,Client,Trade Date,Settlement Date,Stock Code,Quantity,Currency,Rate,Status,Loan Value,Min Fee,Closing Price", result_arr)
print "Finished"
return | {
"repo_name": "frederick623/pb",
"path": "sbl_daily_report/HTI_SBLDailyReport.py",
"copies": "2",
"size": "17147",
"license": "apache-2.0",
"hash": -3280978081480082000,
"line_mean": 33.6424242424,
"line_max": 231,
"alpha_frac": 0.642328104,
"autogenerated": false,
"ratio": 2.5190245335683854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41613526375683857,
"avg_score": null,
"num_lines": null
} |
# 2017-2020 by Gregor Engberding , MIT License
import logging
import sys
from PySide2.QtCore import QAbstractItemModel, QModelIndex, Qt, QJsonDocument, QJsonParseError
from PySide2.QtWidgets import QApplication, QTreeView
DEMO_JSON = b"""{
"firstName": "John",
"lastName": "Smith",
"age": 25,
"address":
{
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021"
},
"phoneNumber":
[
{
"type": "home",
"number": "212 555-1234"
},
{
"type": "fax",
"number": "646 555-4567"
}
]
}"""
DEMO_DICT = {
"firstName" : "John",
"lastName" : "Smith",
"age" : 25,
"address" :
{
"streetAddress": "21 2nd Street",
"city" : "New York",
"state" : "NY",
"postalCode" : "10021"
},
"phoneNumber":
[
{
"type" : "home",
"number": "212 555-1234"
},
{
"type" : "fax",
"number": "646 555-4567"
}
]
}
class QJsonTreeItem:
"""A tree node with parent and children
For easy display by the QJsonModel
"""
def __init__(self, parent=None, value=None):
self.parent = parent
self.children = []
self.value = None
self.key = None
self.typename = None
if value:
self.init_tree(value, parent)
def row(self):
"""Special for Qt, the row(aka. index) in it´s parent children
:return: Own index in parent´s children or -1
"""
if self.parent is not None:
return self.parent.children.index(self)
return -1
def init_tree(self, value, parent=None):
"""Initializes the tree below parent with value
:param value: the value to be inserted below parent
:param parent: insert value below this parent, if None, it´s the root node
:return: the tree-structure of QJsonTreeItems
"""
root_item = QJsonTreeItem(parent=parent)
root_item.key = "root"
if isinstance(value, dict):
for key, val in value.items():
child = self.init_tree(val, root_item)
child.key = key
root_item.children.append(child)
elif isinstance(value, list):
for idx, val in enumerate(value):
child = self.init_tree(val, root_item)
child.key = idx
root_item.children.append(child)
else:
root_item.value = value
root_item.typename = type(value).__name__
return root_item
@property
def as_dict(self):
typename = self.typename
if (children := self.children) and typename == "dict":
return {child.key: child.as_dict for child in children}
elif (children := self.children) and typename == "list":
return [child.as_dict for child in children]
return self.value
class QJsonModel(QAbstractItemModel):
"""To be used as a model with a QTreeView to show contents of a JSON
"""
def __init__(self, parent=None, json_data=None):
super().__init__(parent)
self.document = None
self.root_item = QJsonTreeItem()
self.headers = ["key", "value", "type"]
if json_data:
self.update_data(json_data)
def update_data(self, json_data):
"""New data for the model
:param json_data: binary JSON, a dict or a filename
:return:
"""
error = QJsonParseError()
if isinstance(json_data, dict):
self.document = QJsonDocument.fromVariant(json_data)
else:
try:
self.document = QJsonDocument.fromJson(json_data, error)
except TypeError:
# here the message is generated by Qt
# FIXME Subscripted generics cannot be used with class and instance checks
pass
if self.document is not None:
self.beginResetModel()
if self.document.isArray():
self.root_item.init_tree(list(self.document.array()))
else:
self.root_item = self.root_item.init_tree(self.document.object())
self.endResetModel()
return
else:
# try as file
if self.load_from_file(filename=json_data):
return
msg = f"Unable to load as JSON:{json_data}"
logging.log(logging.ERROR, msg)
raise ValueError(msg)
def load_from_file(self, filename):
"""Loads JSON from filename
:param filename: name of json-file
:return: (bool) True=success, False=failed
"""
if filename is None or filename is False:
return False
with open(filename, "rb") as file:
if file is None:
return False
json_data = file.read()
self.update_data(json_data)
return True
def data(self, index: QModelIndex, role: int = ...):
if not index.isValid():
return None
item = index.internalPointer()
col = index.column()
if role == Qt.DisplayRole:
if col == 0:
return item.key
elif col == 1:
value = item.value
return value
elif col == 2:
return item.typename
elif role == Qt.EditRole:
if col == 0:
return item.key
elif col == 1:
value = item.value
return value
return None
def setData(self, index: QModelIndex, value, role: int = ...) -> bool:
if role == Qt.EditRole:
col = index.column()
item = index.internalPointer()
if col == 0:
item.key = value
return True
elif col == 1:
item.value = value
item.typename = type(value).__name__
return True
return False
def headerData(self, section: int, orientation: Qt.Orientation, role: int = ...):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return self.headers[section]
return None
def index(self, row: int, column: int, parent: QModelIndex = ...):
if not self.hasIndex(row, column, parent):
return QModelIndex()
if not parent.isValid():
parent_item = self.root_item
else:
parent_item = parent.internalPointer()
try:
child_item = parent_item.children[row]
return self.createIndex(row, column, child_item)
except IndexError:
return QModelIndex()
def parent(self, index: QModelIndex):
if not index.isValid():
return QModelIndex()
child_item = index.internalPointer()
parent_item = child_item.parent
if parent_item == self.root_item:
return QModelIndex()
return self.createIndex(parent_item.row(), 0, parent_item)
def rowCount(self, parent: QModelIndex = ...):
if parent.column() > 0:
return 0
if not parent.isValid():
parent_item = self.root_item
else:
parent_item = parent.internalPointer()
return len(parent_item.children)
def columnCount(self, parent: QModelIndex = ...):
return 3
def flags(self, index: QModelIndex) -> Qt.ItemFlags:
if not index.isValid():
return Qt.NoItemFlags
if index.column() != 2:
return Qt.ItemIsEditable | super().flags(index)
return super().flags(index)
@property
def as_dict(self):
return self.root_item.as_dict
if __name__ == '__main__':
app = QApplication(sys.argv)
# model = QJsonModel(json_data=DEMO_JSON)
# or use a dict as data-source
# model = QJsonModel(json_data=DEMO_DICT)
# or use a filename
model = QJsonModel(json_data="json-data.json")
view = QTreeView()
view.setModel(model)
view.show()
print(f"Current data: {model.as_dict}")
sys.exit(app.exec_())
| {
"repo_name": "GrxE/PyQJsonModel",
"path": "PyQtJsonModel.py",
"copies": "1",
"size": "8740",
"license": "mit",
"hash": 7947939591568140000,
"line_mean": 27.6459016393,
"line_max": 94,
"alpha_frac": 0.5108160696,
"autogenerated": false,
"ratio": 4.2702834799608995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5281099549560899,
"avg_score": null,
"num_lines": null
} |
# 2017, Andrei N., Tudor B.
from sphinx.addnodes import centered
from ._ignore_Agent import Agent
from ._ignore_Agent import Transition
import matplotlib.pyplot as plt
from random import choice
import logging
import os
import numpy as np
import math
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import torchvision.transforms as T
from torch.autograd import Variable
class BetaDQNBatchAgent(Agent):
"""
Baseline Agent - Q-Learning with CNN
"""
def __init__(self, name, action_space, model, cfg):
super(BetaDQNBatchAgent, self).__init__(name, cfg)
self.logger.info("On duty...")
self.eps_start = float(0.9)
self.eps_end = float(0.05)
self.eps_decay = float(200)
self.gameMoves = 0
self.gameLoss = 0
self._lastLoss = 0
self._losses = []
self.model_class = model
self.cfg = cfg
super().__post_init__()
def _act(self, observation, reward, done, is_training):
"""Class code here"""
sample = random.random()
eps_threshold = self.eps_end + (self.eps_start - self.eps_end) * \
math.exp(-1. * self._crtStep /
self.eps_decay)
if sample > eps_threshold:
q = self._modelClass._model(Variable(observation, volatile=True))
action = q.data.max(1)
else:
action = torch.LongTensor([[self.action_space.sample()]])
return action
def _restart(self):
pass
def _epochFinished(self):
pass
def _report(self):
self._losses.append(self._lastLoss)
self.logger.info("Loss:: {}".format(self._lastLoss))
self._lastLoss = 0
def _saveModel(self, *args, **kwargs):
pass
def _createLearningArchitecture(self):
model = self.model_class(self.cfg)
optimizer = optim.RMSprop(model.parameters())
criterion = F.smooth_l1_loss
self._modelClass.loadModel(model, optimizer, criterion)
def _optimizeModel(self):
transition = self._memory.last()
BATCH_SIZE = len(transition)
if BATCH_SIZE <= 0:
return
batch = Transition(*zip(*transition))
state_batch = Variable(torch.cat(batch.state), volatile=True)
action_batch = Variable(torch.cat(batch.action), volatile=True)
reward_batch = Variable(torch.cat(batch.reward), volatile=True)
next_state_values = Variable(torch.zeros(BATCH_SIZE), volatile=True)
non_final_mask = torch.ByteTensor(batch.done)
if non_final_mask.any():
non_final_next_states_t = torch.cat(
tuple(s for s in batch.next_state
if s is not batch.done)) \
.type(self.dtype)
non_final_next_states = Variable(non_final_next_states_t,
volatile=True)
next_state_values[non_final_mask] = self._modelClass._model(
non_final_next_states).max(1)[0].cpu()
if self._useCUDA:
action_batch = action_batch.cuda()
expected_state_action_values = (
next_state_values * self.discount) + reward_batch
state_action_values = self._modelClass._model(state_batch). \
gather(1, action_batch).cpu()
loss = self._modelClass._criterion(state_action_values,
expected_state_action_values)
self._lastLoss += loss.data[0]
self._modelClass._optimizer.zero_grad()
loss.backward()
for param in self._modelClass._model.parameters():
param.grad.data.clamp_(-1, 1)
self._modelClass._optimizer.step()
| {
"repo_name": "village-people/flying-pig",
"path": "ai_challenge/agents/_ignorebeta_dqn_agent_batch.py",
"copies": "1",
"size": "3884",
"license": "mit",
"hash": -2802093736148931600,
"line_mean": 29.34375,
"line_max": 88,
"alpha_frac": 0.5816168898,
"autogenerated": false,
"ratio": 3.923232323232323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5004849213032323,
"avg_score": null,
"num_lines": null
} |
# 2017 DeepCrystal Technologies - Patrick Hop
#
# Data loading a splitting file
#
# MIT License - have fun!!
# ===========================================================
import os
import random
from collections import OrderedDict
import deepchem as dc
from deepchem.utils import ScaffoldGenerator
from deepchem.utils.save import log
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from sklearn import preprocessing
from sklearn.decomposition import TruncatedSVD
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
random.seed(2)
np.random.seed(2)
torch.manual_seed(2)
def generate_scaffold(smiles, include_chirality=False):
"""Compute the Bemis-Murcko scaffold for a SMILES string."""
mol = Chem.MolFromSmiles(smiles)
engine = ScaffoldGenerator(include_chirality=include_chirality)
scaffold = engine.get_scaffold(mol)
return scaffold
def split(dataset,
frac_train=.80,
frac_valid=.10,
frac_test=.10,
log_every_n=1000):
"""
Splits internal compounds into train/validation/test by scaffold.
"""
np.testing.assert_almost_equal(frac_train + frac_valid + frac_test, 1.)
scaffolds = {}
log("About to generate scaffolds", True)
data_len = len(dataset)
for ind, smiles in enumerate(dataset):
if ind % log_every_n == 0:
log("Generating scaffold %d/%d" % (ind, data_len), True)
scaffold = generate_scaffold(smiles)
if scaffold not in scaffolds:
scaffolds[scaffold] = [ind]
else:
scaffolds[scaffold].append(ind)
scaffolds = {key: sorted(value) for key, value in scaffolds.items()}
scaffold_sets = [
scaffold_set
for (scaffold, scaffold_set) in sorted(
scaffolds.items(), key=lambda x: (len(x[1]), x[1][0]), reverse=True)
]
train_cutoff = frac_train * len(dataset)
valid_cutoff = (frac_train + frac_valid) * len(dataset)
train_inds, valid_inds, test_inds = [], [], []
log("About to sort in scaffold sets", True)
for scaffold_set in scaffold_sets:
if len(train_inds) + len(scaffold_set) > train_cutoff:
if len(train_inds) + len(valid_inds) + len(scaffold_set) > valid_cutoff:
test_inds += scaffold_set
else:
valid_inds += scaffold_set
else:
train_inds += scaffold_set
return train_inds, valid_inds, test_inds
def load_dataset(filename, whiten=False):
f = open(filename, 'r')
features = []
labels = []
tracer = 0
for line in f:
if tracer == 0:
tracer += 1
continue
splits = line[:-1].split(',')
features.append(splits[-1])
labels.append(float(splits[-2]))
features = np.array(features)
labels = np.array(labels, dtype='float32').reshape(-1, 1)
train_ind, val_ind, test_ins = split(features)
train_features = np.take(features, train_ind)
train_labels = np.take(labels, train_ind)
val_features = np.take(features, val_ind)
val_labels = np.take(labels, val_ind)
return train_features, train_labels, val_features, val_labels
| {
"repo_name": "rbharath/deepchem",
"path": "contrib/mpnn/donkey.py",
"copies": "7",
"size": "3029",
"license": "mit",
"hash": 4020478410432092700,
"line_mean": 28.9900990099,
"line_max": 78,
"alpha_frac": 0.6688676131,
"autogenerated": false,
"ratio": 3.3957399103139014,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.022342758367103997,
"num_lines": 101
} |
# 2017 DeepCrystal Technologies - Patrick Hop
#
# Message Passing Neural Network for Chemical Multigraphs
#
# MIT License - have fun!!
# ===========================================================
import deepchem as dc
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import r2_score
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import random
from collections import OrderedDict
random.seed(2)
torch.manual_seed(2)
np.random.seed(2)
T = 4
BATCH_SIZE = 64
MAXITER = 2000
#A = {}
# valid_bonds = {'SINGLE', 'DOUBLE', 'TRIPLE', 'AROMATIC'}
#for valid_bond in valid_bonds:
# A[valid_bond] = nn.Linear(75, 75)
R = nn.Linear(75, 128)
#GRU = nn.GRU(150, 75, 1)
U = nn.Linear(150, 75)
def load_dataset():
f = open('delaney-processed.csv', 'r')
features = []
labels = []
tracer = 0
for line in f:
if tracer == 0:
tracer += 1
continue
splits = line[:-1].split(',')
features.append(splits[-1])
labels.append(float(splits[-2]))
train_features = np.array(features[:900])
train_labels = np.array(labels[:900])
val_features = np.array(features[900:1100])
val_labels = np.array(labels[900:1100])
train_labels = Variable(torch.FloatTensor(train_labels), requires_grad=False)
val_labels = Variable(torch.FloatTensor(val_labels), requires_grad=False)
return train_features, train_labels, val_features, val_labels
def readout(h):
reads = map(lambda x: F.relu(R(h[x])), h.keys())
readout = Variable(torch.zeros(1, 128))
for read in reads:
readout = readout + read
return readout
def message_pass(g, h, k):
#flow_delta = Variable(torch.zeros(1, 1))
#h_t = Variable(torch.zeros(1, 1, 75))
for v in g.keys():
neighbors = g[v]
for neighbor in neighbors:
e_vw = neighbor[0]
w = neighbor[1]
#bond_type = e_vw.GetBondType()
#A_vw = A[str(e_vw.GetBondType())]
m_v = h[w]
catted = torch.cat([h[v], m_v], 1)
#gru_act, h_t = GRU(catted.view(1, 1, 150), h_t)
# measure convergence
#pdist = nn.PairwiseDistance(2)
#flow_delta = flow_delta + torch.sum(pdist(gru_act.view(1, 75), h[v]))
#h[v] = gru_act.view(1, 75)
h[v] = U(catted)
#print ' flow delta [%i] [%f]' % (k, flow_delta.data.numpy()[0])
def construct_multigraph(smile):
g = OrderedDict({})
h = OrderedDict({})
molecule = Chem.MolFromSmiles(smile)
for i in xrange(0, molecule.GetNumAtoms()):
atom_i = molecule.GetAtomWithIdx(i)
h[i] = Variable(torch.FloatTensor(dc.feat.graph_features.atom_features(atom_i))).view(1, 75)
for j in xrange(0, molecule.GetNumAtoms()):
e_ij = molecule.GetBondBetweenAtoms(i, j)
if e_ij != None:
atom_j = molecule.GetAtomWithIdx(j)
if i not in g:
g[i] = []
g[i].append( (e_ij, j) )
return g, h
train_smiles, train_labels, val_smiles, val_labels = load_dataset()
# training loop
linear = nn.Linear(128, 1)
params = [#{'params': A['SINGLE'].parameters()},
#{'params': A['DOUBLE'].parameters()},
#{'params': A['TRIPLE'].parameters()},
#{'params': A['AROMATIC'].parameters()},
{'params': R.parameters()},
#{'params': GRU.parameters()},
{'params': U.parameters()},
{'params': linear.parameters()}]
optimizer = optim.SGD(params, lr=1e-5, momentum=0.9)
for i in xrange(0, MAXITER):
optimizer.zero_grad()
train_loss = Variable(torch.zeros(1, 1))
y_hats_train = []
for j in xrange(0, BATCH_SIZE):
sample_index = random.randint(0, 799) # TODO: sampling without replacement
smile = train_smiles[sample_index]
g, h = construct_multigraph(smile) # TODO: cache this
for k in xrange(0, T):
message_pass(g, h, k)
x = readout(h)
y_hat = linear(x)
y = train_labels[sample_index]
y_hats_train.append(y_hat)
error = (y_hat - y)*(y_hat - y)
train_loss = train_loss + error
train_loss.backward()
optimizer.step()
if i % 12 == 0:
val_loss = Variable(torch.zeros(1, 1), requires_grad=False)
y_hats_val = []
for j in xrange(0, len(val_smiles)):
g, h = construct_multigraph(val_smiles[j])
for k in xrange(0, T):
message_pass(g, h, k)
x = readout(h)
y_hat = linear(x)
y = val_labels[j]
y_hats_val.append(y_hat)
error = (y_hat - y)*(y_hat - y)
val_loss = val_loss + error
y_hats_val = map(lambda x: x.data.numpy()[0], y_hats_val)
y_val = map(lambda x: x.data.numpy()[0], val_labels)
r2_val = r2_score(y_val, y_hats_val)
train_loss_ = train_loss.data.numpy()[0]
val_loss_ = val_loss.data.numpy()[0]
print 'epoch [%i/%i] train_loss [%f] val_loss [%f] r2_val [%s]' \
% ((i + 1) / 12, maxiter_train / 12, train_loss_, val_loss_, r2_val)
'''
train_labels = train_labels.data.numpy()
val_labels = val_labels.data.numpy()
train_mols = map(lambda x: Chem.MolFromSmiles(x), train_smiles)
train_fps = [AllChem.GetMorganFingerprintAsBitVect(m, 2) for m in train_mols]
val_mols = map(lambda x: Chem.MolFromSmiles(x), val_smiles)
val_fps = [AllChem.GetMorganFingerprintAsBitVect(m, 2) for m in val_mols]
np_fps_train = []
for fp in train_fps:
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps_train.append(arr)
np_fps_val = []
for fp in val_fps:
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps_val.append(arr)
rf = RandomForestRegressor(n_estimators=100, random_state=2)
#rf.fit(np_fps_train, train_labels)
#labels = rf.predict(val_fps)
ave = np.ones( (300,) )*(np.sum(val_labels) / 300.0)
print ave.shape
print val_labels.shape
r2 = r2_score(ave, val_labels)
print 'rf r2 is:'
print r2
'''
| {
"repo_name": "joegomes/deepchem",
"path": "contrib/mpnn/mpnn_baseline.py",
"copies": "1",
"size": "5877",
"license": "mit",
"hash": 3313331438272065000,
"line_mean": 26.8530805687,
"line_max": 96,
"alpha_frac": 0.6212353241,
"autogenerated": false,
"ratio": 2.8529126213592235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3974147945459224,
"avg_score": null,
"num_lines": null
} |
# 2017 DeepCrystal Technologies - Patrick Hop
#
# Message Passing Neural Network SELU [MPNN-S] for Chemical Multigraphs
#
# MIT License - have fun!!
# ===========================================================
import math
import deepchem as dc
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import r2_score
from sklearn.ensemble import RandomForestRegressor
from sklearn import preprocessing
import numpy as np
import random
from collections import OrderedDict
from scipy.stats import pearsonr
import donkey
random.seed(2)
torch.manual_seed(2)
np.random.seed(2)
DATASET = 'az_ppb.csv'
print(DATASET)
T = 3
BATCH_SIZE = 48
MAXITER = 40000
LIMIT = 0
LR = 5e-4
R = nn.Linear(150, 128)
U = {0: nn.Linear(156, 75), 1: nn.Linear(156, 75), 2: nn.Linear(156, 75)}
V = {0: nn.Linear(75, 75), 1: nn.Linear(75, 75), 2: nn.Linear(75, 75)}
E = nn.Linear(6, 6)
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by .8 every 5 epochs"""
lr = LR * (0.9 ** (epoch // 10))
print('new lr [%.5f]' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def load_dataset():
train_features, train_labels, val_features, val_labels = donkey.load_dataset(DATASET)
scaler = preprocessing.StandardScaler().fit(train_labels)
train_labels = scaler.transform(train_labels)
val_labels = scaler.transform(val_labels)
train_labels = Variable(torch.FloatTensor(train_labels), requires_grad=False)
val_labels = Variable(torch.FloatTensor(val_labels), requires_grad=False)
return train_features, train_labels, val_features, val_labels
def readout(h, h2):
catted_reads = map(lambda x: torch.cat([h[x[0]], h2[x[1]]], 1), zip(h2.keys(), h.keys()))
activated_reads = map(lambda x: F.selu( R(x) ), catted_reads)
readout = Variable(torch.zeros(1, 128))
for read in activated_reads:
readout = readout + read
return F.tanh( readout )
def message_pass(g, h, k):
for v in g.keys():
neighbors = g[v]
for neighbor in neighbors:
e_vw = neighbor[0] # feature variable
w = neighbor[1]
m_w = V[k](h[w])
m_e_vw = E(e_vw)
reshaped = torch.cat( (h[v], m_w, m_e_vw), 1)
h[v] = F.selu(U[k](reshaped))
def construct_multigraph(smile):
g = OrderedDict({})
h = OrderedDict({})
molecule = Chem.MolFromSmiles(smile)
for i in xrange(0, molecule.GetNumAtoms()):
atom_i = molecule.GetAtomWithIdx(i)
h[i] = Variable(torch.FloatTensor(dc.feat.graph_features.atom_features(atom_i))).view(1, 75)
for j in xrange(0, molecule.GetNumAtoms()):
e_ij = molecule.GetBondBetweenAtoms(i, j)
if e_ij != None:
e_ij = map(lambda x: 1 if x == True else 0, dc.feat.graph_features.bond_features(e_ij)) # ADDED edge feat
e_ij = Variable(torch.FloatTensor(e_ij).view(1, 6))
atom_j = molecule.GetAtomWithIdx(j)
if i not in g:
g[i] = []
g[i].append( (e_ij, j) )
return g, h
train_smiles, train_labels, val_smiles, val_labels = load_dataset()
linear = nn.Linear(128, 1)
params = [{'params': R.parameters()},
{'params': U[0].parameters()},
{'params': U[1].parameters()},
{'params': U[2].parameters()},
{'params': E.parameters()},
{'params': V[0].parameters()},
{'params': V[1].parameters()},
{'params': V[2].parameters()},
{'params': linear.parameters()}]
num_epoch = 0
optimizer = optim.Adam(params, lr=LR, weight_decay=1e-4)
for i in xrange(0, MAXITER):
optimizer.zero_grad()
train_loss = Variable(torch.zeros(1, 1))
y_hats_train = []
for j in xrange(0, BATCH_SIZE):
sample_index = random.randint(0, len(train_smiles) - 2)
smile = train_smiles[sample_index]
g, h = construct_multigraph(smile) # TODO: cache this
g2, h2 = construct_multigraph(smile)
for k in xrange(0, T):
message_pass(g, h, k)
x = readout(h, h2)
#x = F.selu( fc(x) )
y_hat = linear(x)
y = train_labels[sample_index]
y_hats_train.append(y_hat)
error = (y_hat - y)*(y_hat - y) / Variable(torch.FloatTensor([BATCH_SIZE])).view(1, 1)
train_loss = train_loss + error
train_loss.backward()
optimizer.step()
if i % int(len(train_smiles) / BATCH_SIZE) == 0:
val_loss = Variable(torch.zeros(1, 1), requires_grad=False)
y_hats_val = []
for j in xrange(0, len(val_smiles)):
g, h = construct_multigraph(val_smiles[j])
g2, h2 = construct_multigraph(val_smiles[j])
for k in xrange(0, T):
message_pass(g, h, k)
x = readout(h, h2)
#x = F.selu( fc(x) )
y_hat = linear(x)
y = val_labels[j]
y_hats_val.append(y_hat)
error = (y_hat - y)*(y_hat - y) / Variable(torch.FloatTensor([len(val_smiles)])).view(1, 1)
val_loss = val_loss + error
y_hats_val = np.array(map(lambda x: x.data.numpy(), y_hats_val))
y_val = np.array(map(lambda x: x.data.numpy(), val_labels))
y_hats_val = y_hats_val.reshape(-1, 1)
y_val = y_val.reshape(-1, 1)
r2_val_old = r2_score(y_val, y_hats_val)
r2_val_new = pearsonr(y_val, y_hats_val)[0]**2
train_loss_ = train_loss.data.numpy()[0]
val_loss_ = val_loss.data.numpy()[0]
print 'epoch [%i/%i] train_loss [%f] val_loss [%f] r2_val_old [%.4f], r2_val_new [%.4f]' \
% (num_epoch, 100, train_loss_, val_loss_, r2_val_old, r2_val_new)
num_epoch += 1
| {
"repo_name": "lilleswing/deepchem",
"path": "contrib/mpnn/mpnn.py",
"copies": "5",
"size": "5567",
"license": "mit",
"hash": 5036992968658853000,
"line_mean": 29.5879120879,
"line_max": 114,
"alpha_frac": 0.6215196695,
"autogenerated": false,
"ratio": 2.8578028747433266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01984820920388555,
"num_lines": 182
} |
# 2017-Oct-27 00:08
# WNixalo
# Testing out image-resize speedups without/with pillow-simd
# Image is a (1680,1050) jpg
from os.path import expanduser; import scipy.misc
import matplotlib.pyplot as plt; import cv2; import PIL
from time import time
root = expanduser('~/')
path = root + '/Deshar/DoomPy/images/Hiigaran_Hull_Paint.jpg'
averages = [0. for i in range(5)]
iters = 50
for i in range(iters):
times = []
t0,t1 = 0.,0.
# Openning Image w/ PIL
t0 = time()
img = PIL.Image.open(path)
t1 = time()
imgarr = [img for i in range(50)]
len(imgarr)
del imgarr
times.append(t1-t0)
t0,t1 = 0.,0.
# Openning Image w/ MatplotLib
t0 = time()
plt.imshow(img)
t1 = time()
times.append(t1-t0)
t0,t1 = 0.,0.
# plt.show()
# Resizing Image w/ PIL
t0 = time()
img = img.resize((3360, 2100))
t1 = time()
times.append(t1-t0)
t0,t1 = 0.,0.
# checking it resized correctly
# plt.imshow(img); plt.show()
# Openning Image w/ OpenCV 3.3.0
t0 = time()
img = cv2.imread(path, 0)
# cv2.imshow('', img)
t1 = time()
times.append(t1-t0)
t0,t1 = 0.,0.
# Resizing Image w/ OpenCV
t0 = time()
img = cv2.resize(img, (3360, 2100)) # interpolation=CV_INTER_LINEAR
t1 = time()
times.append(t1-t0)
for i in range(len(times)):
averages[i] += times[i]
for i in range(len(averages)):
averages[i] /= iters
# just checking it resized correctly
# cv2.imshow('',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
print("{:<0} {:>13} {:>24} {:>16}".format('Times (Open):','PIL', 'MatplotLib', 'OpenCV'))
print("{:>36} {:>14} {:>14}".format(averages[0], averages[1], averages[3]))
print("{:<0} {:>8} {:>20}".format('Times (Resize 2x):', 'PIL', 'OpenCV'))
print("{:>36} {:>14}".format(averages[2], averages[4]))
print("Iterations: {}".format(iters))
################################################################################
# OUTPUT w/ regular PIL:
# (FAI) Waynes-MBP:Kaukasos WayNoxchi$ python pillow-simd-benchmark.py
# Times (Open): PIL MatplotLib OpenCV
# 0.0011928749084472656 0.09201124668121338 0.017856025695800783
# Times (Resize 2x): PIL OpenCV
# 0.013700852394104004 0.004898147583007812
# Iterations: 50
################################################################################
# OUTPUT w/ pillow-simd:
# (FAI) Waynes-MBP:Kaukasos WayNoxchi$ python pillow-simd-benchmark.py
# Times (Open): PIL MatplotLib OpenCV
# 0.0012062406539916993 0.08796523094177246 0.017541275024414063
# Times (Resize 2x): PIL OpenCV
# 0.010742950439453124 0.0048766183853149415
# Iterations: 50
# NOTE: I wasn't sure which operations would be sped up w/ SIMD. Realized later
# on it was resizing. This is reflected above. The time to upscale a
# 1680x1050 image by 2X for PIL decreased by ~130μs or a ~21.6% speedup.
# I assume this would be much more dramatic for batches of images and
# more complicated resampling algorithms.
# If I ran this again I'd focus only on capturing time for PIL to resize
# batches of images. Say, from an array of 10k images or so.
# NOTE: All other times remained over multiple runs. PIL resize had a pre-SIMD
# high of 0.019.. seconds, and a post-SIMD high of 0.01100658.. seconds.
| {
"repo_name": "WNoxchi/Kaukasos",
"path": "misc/pillow-simd-benchmark.py",
"copies": "1",
"size": "3479",
"license": "mit",
"hash": -3095876359882642400,
"line_mean": 28.4745762712,
"line_max": 89,
"alpha_frac": 0.5879815986,
"autogenerated": false,
"ratio": 2.9650468883205456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4053028486920546,
"avg_score": null,
"num_lines": null
} |
# 2017-Oct-30 01:20 WNixalo PyTorch tutorial
# http://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html
# Autograd: Automatic Differentiation
# pytorch autograd is a define-by-run framework: meaning your backprop is
# defined by how your code is run, and that every single iteration can be different.
# VARIABLE
# autograd.Variable wraps a Tensor and supports nearly all ops defd on it.
# once you finish your computation you can call .backward() and have all the
# gradients computed automatically.
#
# you can access the raw tensor thru the .data attribute, while the gradient
# wrt this variable is accumulated into .grad
# The Variable and Function classes are interconnected and build up an acyclic
# graph that encodes a complete history of computation. Each variable has a
# .grad_fn attrib tht refs a Function that's created the Variable;
# Variables created by the user have a grad_fn of None.
#
# To compute derivatives, call .backward() on a Variable. If Variable scalar
# dont need specfy args. Otws need specfy grad_output arg thats tensor of
# matching shape.
import torch
from torch.autograd import Variable
# Create a variable:
x = Variable(torch.ones(2, 2), requires_grad=True)
print(x)
# Do an operation on variable:
y = x + 2
print(y)
# y was created as a result of an op --> so it has a grad_fn
print(y.grad_fn)
# Do more operations on y
z = y * y * 3
out = z.mean()
print(z, out)
# GRADIENTS
# out.backward() is equivalent to doing out.backward(torch.Tensor([1.0]))
out.backward()
# print gardients ∆(out)/∆x
print(x.grad)
x = torch.randn(3)
x = Variable(x, requires_grad=True)
y = x * 2
while y.data.norm() < 1000:
y = y * 2
print(y)
gradients = torch.FloatTensor([0.1, 1.0, 0.0001])
y.backward(gradients)
print(x.grad)
#
| {
"repo_name": "WNoxchi/Kaukasos",
"path": "pytorch/pytorch_01_autograd.py",
"copies": "1",
"size": "1795",
"license": "mit",
"hash": 2597864016968073000,
"line_mean": 22.5657894737,
"line_max": 84,
"alpha_frac": 0.7280848688,
"autogenerated": false,
"ratio": 3.2096774193548385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9404093557504684,
"avg_score": 0.00673374613003096,
"num_lines": 76
} |
# 20180205 Catch Barrier state exception
import acm
import ael
import HTI_Util
import HTI_FeedTrade_EDD_Util
import datetime
import os
import sys
import win32com.client
import locale
from itertools import groupby
from operator import itemgetter
ael_variables = [['posdate', 'Date', 'string', [str(ael.date_today()), 'Today'], 'Today', 1, 0, 'Report Date', None, 1], \
['acquirers', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTIFS - EDD,HTISEC - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['portfolio', 'Portfolio', 'string', HTI_Util.getAllPortfolios(), 'EDD ELN', 1, 1, 'Portfolio', None, 1], \
['currclspricemkt', 'Current Closing Price Market', 'string', None, 'Bloomberg_5PM', 1, 0, 'Current Closing Price Market', None, 1],
['histclspricemkt', 'Historical Closing Price Market', 'string', None, 'Bloomberg_5PM_Cls', 1, 0, 'Historical Closing Price Market', None, 1],
['gen_keln', 'Generate KELN?', 'string', HTI_Util.get_yesno(), 'Y', 0, 0, 'Generate KELN?', None, 1], \
['gen_eeln', 'Generate EELN?', 'string', HTI_Util.get_yesno(), 'Y', 0, 0, 'Generate EELN?', None, 1], \
['ko_pricing_template_file', 'KO Pricing Template File', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\ELN\\templates\\KO ELN_Pricing Supplement.docx', 1, 0, 'KO Pricing Template File', None, 1], \
['ko_ts_template_file', 'KO Bull Template File', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\ELN\\templates\\KO Bull Equity Linked Note TS.docx', 1, 0, 'KO Bull Template File', None, 1], \
['eln_pricing_template_file', 'ELN Pricing Template File', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\ELN\\templates\\ELN PS.docx', 1, 0, 'ELN Pricing Template File', None, 1], \
['eln_ts_template_file', 'ELN Bull Template File', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\ELN\\templates\\ELN TS.docx', 1, 0, 'ELN Bull Template File', None, 1], \
['ko_pricing_output', 'KO Pricing Output', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\ELN\\output\\[contract] Pricing Supplement YYYYMMDD.docx', 1, 0, 'KO Pricing Output', None, 1], \
['ko_ts_output', 'KO Bull Output', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\ELN\\output\\[contract] KO Bull Equity Linked Note TS YYYYMMDD.docx', 1, 0, 'KO Bull Output', None, 1], \
['eln_pricing_output', 'ELN Pricing Output', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\ELN\\output\\[contract] ELN PS YYYYMMDD.docx', 1, 0, 'ELN Pricing Output', None, 1], \
['eln_ts_output', 'ELN Bull Template Output', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\ELN\\output\\[contract] ELN TS YYYYMMDD.docx', 1, 0, 'ELN Bull Template Output', None, 1], \
['base_ccy', 'Base Ccy', 'string', None, 'HKD', 1, 0, 'Base Ccy', None, 1]]
def eeln_to_dict(eln_arr):
pricing_dict = {}
ko_dict = {}
for row in eln_arr:
tid = row[0]
insid = row[1]
if insid[:4] != "EELN":
return False, False
elif "_Exercise" in insid:
continue
elif "_Hedging" in insid:
opt_trd = acm.FTrade[tid]
opt_ins = opt_trd.Instrument()
else:
bnd_trd = acm.FTrade[tid]
bnd_ins = bnd_trd.Instrument()
pricing_dict["ValueDay"] = datetime.datetime.strptime(bnd_trd.ValueDay()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["Ins_Description"] = opt_ins.Underlying().AdditionalInfo().Ins_Description()
pricing_dict["MaturityDate"] = datetime.datetime.strptime(bnd_ins.ExpiryDate()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["FaceValue"] = bnd_trd.Currency().Name() + " " + locale.format("%.2f", abs(bnd_trd.FaceValue()), grouping=True)
pricing_dict["Price"] = str(round(bnd_trd.Price(),2)) + "%"
pricing_dict["BBGCode"] = opt_ins.Underlying().Name()
pricing_dict["TradeTime"] = datetime.datetime.strptime(bnd_trd.TradeTime()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["ExpiryDate"] = datetime.datetime.strptime(opt_ins.ExpiryDate()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["Ref_Spot_Price"] = bnd_trd.Currency().Name() + " " + str(bnd_ins.AdditionalInfo().Ref_Spot_Price())
pricing_dict["StrikePrice"] = opt_ins.Currency().Name() + " " + str(opt_ins.StrikePrice())
pricing_dict["StartDate"] = datetime.datetime.strptime(bnd_ins.StartDate()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["Barrier"] = opt_ins.Currency().Name() + " " + str(opt_ins.Barrier())
try:
ko_dict["BarrierCrossDate"] = datetime.datetime.strptime(opt_ins.Exotic().BarrierCrossDate()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
except:
print ("Skipped BarrierCrossDate")
ko_dict["TradeTime"] = datetime.datetime.strptime(bnd_trd.TradeTime()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["FaceValue"] = bnd_trd.Currency().Name() + " " + locale.format("%.2f", abs(bnd_trd.FaceValue()), grouping=True)
ko_dict["BBGCode"] = opt_ins.Underlying().Name()
ko_dict["Ins_Description"] = opt_ins.Underlying().AdditionalInfo().Ins_Description()
ko_dict["Ref_Spot_Price"] = bnd_trd.Currency().Name() + " " + str(bnd_ins.AdditionalInfo().Ref_Spot_Price())
ko_dict["TradeTime"] = datetime.datetime.strptime(bnd_trd.TradeTime()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["StrikePrice"] = opt_ins.Currency().Name() + " " + str(opt_ins.StrikePrice())
ko_dict["Strike2"] = opt_ins.AdditionalInfo().Strike2()
ko_dict["Price"] = str(round(bnd_trd.Price(),2)) + "%"
ko_dict["CallablePrice"] = "{0:.0%}".format(float(opt_ins.Barrier()) / float(bnd_ins.AdditionalInfo().Ref_Spot_Price()) )
ko_dict["ValueDay"] = datetime.datetime.strptime(bnd_trd.ValueDay()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["ExpiryDate"] = datetime.datetime.strptime(opt_ins.ExpiryDate()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["MaturityDate"] = datetime.datetime.strptime(bnd_ins.ExpiryDate()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["SpotBankingDaysOffset"] = opt_ins.SpotBankingDaysOffset()
ko_dict["FaceValue"] = bnd_trd.Currency().Name() + " " + locale.format("%.2f", abs(bnd_trd.FaceValue()), grouping=True)
return pricing_dict, ko_dict
def keln_to_dict(eln_arr):
pricing_dict = {}
ko_dict = {}
for row in eln_arr:
tid = row[0]
insid = row[1]
if insid[:4] != "KELN":
return False, False
elif "_Exercise" in insid:
continue
elif "_Hedging" in insid:
opt_trd = acm.FTrade[tid]
opt_ins = opt_trd.Instrument()
else:
bnd_trd = acm.FTrade[tid]
bnd_ins = bnd_trd.Instrument()
pricing_dict["ValueDay"] = datetime.datetime.strptime(bnd_trd.ValueDay()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["Ins_Description"] = opt_ins.Underlying().AdditionalInfo().Ins_Description()
pricing_dict["MaturityDate"] = datetime.datetime.strptime(bnd_ins.ExpiryDate()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["FaceValue"] = bnd_trd.Currency().Name() + " " + locale.format("%.2f", abs(bnd_trd.FaceValue()), grouping=True)
pricing_dict["Price"] = str(round(bnd_trd.Price(),2)) + "%"
pricing_dict["BBGCode"] = opt_ins.Underlying().Name()
pricing_dict["TradeTime"] = datetime.datetime.strptime(bnd_trd.TradeTime()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["ExpiryDate"] = datetime.datetime.strptime(opt_ins.ExpiryDate()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["Ref_Spot_Price"] = bnd_trd.Currency().Name() + " " + str(bnd_ins.AdditionalInfo().Ref_Spot_Price())
pricing_dict["StrikePrice"] = opt_ins.Currency().Name() + " " + str(opt_ins.StrikePrice())
pricing_dict["StartDate"] = datetime.datetime.strptime(bnd_ins.StartDate()[:10], "%Y-%m-%d").date().strftime("%d %b %Y")
pricing_dict["Barrier"] = opt_ins.Currency().Name() + " " + str(opt_ins.Barrier())
ko_dict["BarrierCrossDate"] = datetime.datetime.strptime(opt_ins.Exotic().BarrierCrossDate()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["TradeTime"] = datetime.datetime.strptime(bnd_trd.TradeTime()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["FaceValue"] = bnd_trd.Currency().Name() + " " + locale.format("%.2f", abs(bnd_trd.FaceValue()), grouping=True)
ko_dict["BBGCode"] = opt_ins.Underlying().Name()
ko_dict["Ins_Description"] = opt_ins.Underlying().AdditionalInfo().Ins_Description()
ko_dict["Ref_Spot_Price"] = bnd_trd.Currency().Name() + " " + str(bnd_ins.AdditionalInfo().Ref_Spot_Price())
ko_dict["TradeTime"] = datetime.datetime.strptime(bnd_trd.TradeTime()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["StrikePrice"] = opt_ins.Currency().Name() + " " + str(opt_ins.StrikePrice())
ko_dict["Strike2"] = opt_ins.AdditionalInfo().Strike2()
ko_dict["Price"] = str(round(bnd_trd.Price(),2)) + "%"
ko_dict["CallablePrice"] = "{0:.0%}".format(float(opt_ins.Barrier()) / float(bnd_ins.AdditionalInfo().Ref_Spot_Price()) )
ko_dict["ValueDay"] = datetime.datetime.strptime(bnd_trd.ValueDay()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["ExpiryDate"] = datetime.datetime.strptime(opt_ins.ExpiryDate()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["MaturityDate"] = datetime.datetime.strptime(bnd_ins.ExpiryDate()[:10], "%Y-%m-%d").date().strftime("%B %d, %Y")
ko_dict["SpotBankingDaysOffset"] = opt_ins.SpotBankingDaysOffset()
ko_dict["FaceValue"] = bnd_trd.Currency().Name() + " " + locale.format("%.2f", abs(bnd_trd.FaceValue()), grouping=True)
return pricing_dict, ko_dict
def dict_to_docx(dic, posdate, contract, template_file, output_file):
if dic:
word = win32com.client.gencache.EnsureDispatch('Word.Application')
docx = word.Documents.Open(template_file)
for key in dic:
result = docx.Content.Find.Execute(FindText="[" + key + "]", ReplaceWith=dic[key], Replace=2)
docx.SaveAs(output_file.replace("YYYYMMDD", posdate.to_string("%Y%m%d")).replace("[contract]", contract) )
docx.Close()
return
def ael_main(dict):
asofdate = dict['posdate']
if asofdate == 'Today':
posdate = ael.date_today()
else:
asofdateArr = dict['posdate'].split('/')
posdate = ael.date_from_ymd(int(asofdateArr[2]), int(asofdateArr[1]), int(asofdateArr[0]))
posdatetp1 = posdate
hk_cal = acm.FCalendar.Select("name='Hong Kong'")[0]
while True:
posdatetp1 = posdatetp1.add_days(1)
if not hk_cal.IsNonBankingDay(hk_cal, hk_cal, posdatetp1):
break
# Portfolios
portfolios = dict['portfolio']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
# Acquirers
acq_array_list = dict['acquirers']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
currclspricemkt = dict['currclspricemkt']
histclspricemkt = dict['histclspricemkt']
base_ccy = dict['base_ccy']
strSql = """select distinct add_info(t, 'Group Trade Ref'), t.trdnbr, i.insid
from instrument i, trade t, party acq, portfolio pf
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (@acquirer_list)
and (i.exp_day >= '@dt' or i.exp_day = '0000-01-01')
and t.time >= '@dt'
and t.time < '@d_tp1'
and pf.prfid in (@portfolio_list)
"""
strSql = strSql.replace('@acquirer_list', acq_list)
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@d_tp1', posdatetp1.to_string('%Y-%m-%d'))
strSql = strSql.replace('@dt', posdate.to_string('%Y-%m-%d'))
print (strSql)
rs = ael.asql(strSql)
columns, buf = rs
groups = groupby(buf[0], itemgetter(0))
locale.setlocale(locale.LC_ALL, 'english')
eln_arrs = [ [ [ item[1], item[2] ] for item in data] for (key, data) in groups ]
for eln_arr in eln_arrs:
ko_pricing_dict = {}
ko_ts_dict = {}
eln_pricing_dict = {}
eln_ts_dict = {}
if dict["gen_keln"] == "Y":
ko_pricing_dict, ko_ts_dict = keln_to_dict(eln_arr)
if dict["gen_eeln"] == "Y":
eln_pricing_dict, eln_ts_dict = eeln_to_dict(eln_arr)
dict_to_docx(ko_pricing_dict, posdate, eln_arr[0][1], dict["ko_pricing_template_file"], dict["ko_pricing_output"])
dict_to_docx(ko_ts_dict, posdate, eln_arr[0][1], dict["ko_ts_template_file"], dict["ko_ts_output"])
dict_to_docx(eln_pricing_dict, posdate, eln_arr[0][1], dict["eln_pricing_template_file"], dict["eln_pricing_output"])
dict_to_docx(eln_ts_dict, posdate, eln_arr[0][1], dict["eln_ts_template_file"], dict["eln_ts_output"])
print ("Finished")
return | {
"repo_name": "frederick623/HTI",
"path": "eln_automation/ELN_Automation.py",
"copies": "1",
"size": "12812",
"license": "apache-2.0",
"hash": -679905762388497500,
"line_mean": 50.6653225806,
"line_max": 213,
"alpha_frac": 0.6463471745,
"autogenerated": false,
"ratio": 2.573724387304138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3720071561804138,
"avg_score": null,
"num_lines": null
} |
# 20180524 Add multiple FA client support
import os
import sys
import re
import sqlite3
import csv
import fnmatch
import decimal
import datetime
import xlrd
import math
from FeeCalc import FeeCalc
PATH_DICT = {
"trd_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Trade_Output",
"trd_filename": "ULLink_Trade_*.txt",
"ins_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_FA_daily" ,
"ins_filename": "PositionDetails_????????.xlsx",
"fa_output": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\FA_Trade_Import\\ullink_to_fa_YYYYMMDD.csv",
"acc_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\account.xlsx",
"emsx_trader_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\ed_emsx_traders.csv",
}
# PATH_DICT = {
# "trd_dir": os.path.dirname(os.path.abspath(__file__)),
# "trd_filename": "ULLink_Trade_*.txt",
# "ins_dir": os.path.dirname(os.path.abspath(__file__)) ,
# "ins_filename": "PositionDetails_????????.xlsx",
# "fa_output": os.path.join(os.path.dirname(os.path.abspath(__file__)), "ullink_to_fa_YYYYMMDD.csv"),
# "acc_file": os.path.join(os.path.dirname(os.path.abspath(__file__)), "account.xlsx"),
# "emsx_trader_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\ed_emsx_traders.csv",
# }
MIC_DICT = {
"XTKS": "JP",
"XHKG": "HK",
"XSEC": "C2",
"XSSC": "C1",
"XSHE": "CH",
"XSHG": "CH",
}
LOC_DICT = {
"XTKS": "JP",
"XHKG": "HK",
"XSEC": "C2",
"XSSC": "C1",
"XSHE": "CH",
"XSHG": "CH",
}
CCY_DICT = {
"XTKS": "JPY",
"XHKG": "HKD",
"XSEC": "CNY",
"XSSC": "CNY",
"XSHE": "CNY",
"XSHG": "CNY",
}
CAL_DICT = {
"XTKS": "Tokyo",
"XHKG": "Hong Kong",
"XSEC": "Hong Kong",
"XSSC": "Hong Kong",
"XSHE": "Beijing",
"XSHG": "Beijing",
}
SPOT_DICT = {
"XTKS": 3,
"XHKG": 2,
"XSEC": 1,
"XSSC": 1,
"XSHE": 1,
"XSHG": 1,
}
def round_half_up(val, digit):
return math.floor(float(str(val))*pow(10, digit)+0.5)/pow(10, digit)
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def files_lookup(tgt_dir, pattern, recur_list=False, sub_folder=False, most_recent=True):
filepath_arr = []
for fi in os.listdir(tgt_dir):
full_path = os.path.join(tgt_dir, fi)
if sub_folder and os.path.isdir(full_path):
filepath_arr += files_lookup(full_path, pattern, recur_list, sub_folder, most_recent)
if fnmatch.fnmatch(fi, pattern):
filepath_arr.append(full_path)
filepath_arr.sort(reverse=most_recent)
if recur_list:
return filepath_arr
else:
print (filepath_arr[0])
return filepath_arr[0]
def db_cur(source = ":memory:"):
# Register the adapter
# sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
# sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = None, index_arr = None):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
if index_arr is not None:
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
else:
cur.execute("""Delete From %s""" % (tbl_name))
if arr is not None:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def csv_to_arr(csv_file, start=0, has_header=True, delim=','):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f, delimiter=delim)
arr = list(reader)
header = ""
if has_header:
header = ','.join(arr[start])
arr = arr[start+1:]
return re.sub(r"[\*\.#/\$%\d\"\(\) ]", "", header), arr
else:
return arr[start:]
return
def xlsx_to_arr(xlsx_file, worksheet=0, row_start=0, col_start=0, row_end=-1, col_end=-1, def_separator=""):
arr = []
wb = xlrd.open_workbook(xlsx_file)
ws = None
try:
ws = wb.sheet_by_index(worksheet)
except:
ws = wb.sheet_by_name(worksheet)
row_end = ws.nrows if row_end == -1 else row_end
col_end = ws.ncols if col_end == -1 else col_end
arr = [ws.row_values(row, start_colx=col_start, end_colx=col_end) for row in range(row_start, row_end)]
header = ','.join(x if x not in arr[0][:n] else x+str(n) for n, x in enumerate(arr[0]) )
return re.sub(r"[\*\.#/\$%\"\(\)& \_]", def_separator, header), arr[1:]
def trd_to_arr(ced_file):
ced_header = "id,instrumentCode,InstrumentName,MarketCode,ISINCode,tradeSide,accountId,ourRefNumber,isShortSell,unitPrice,tradeQuantity,tradeNum,executionDateTime,brokerID,CounterParty,marketmaker,investorId,investorOrderNum,OrderID,etfQuantity,glTradeType,manualTradeKey,houseAccountId,traderId,ourOrderID,clClientID,isOrderFullyFilled,Channel,SEHKTradeRef,BrokerCode,BrokerAccount,Desk,ExVenue,ExecAction,clientLEI,ourLEI,clTraderId"
ced_arr = []
with open(ced_file, 'rU') as f:
rows = f.readlines()
if len(rows) < 1:
return
for row in rows:
if row[0:6].strip().isdigit():
ced_arr.append([row[0:6].strip(), row[7:22].strip(), row[23:43].strip(), row[44:54].strip(), row[55:75].strip(), row[76:91].strip(), row[92:117].strip(),
row[118:143].strip(), row[144:172].strip(), float(row[173:198].strip()), int(row[199:214].strip()), row[215:240].strip(), str(row[241:266].strip()),
row[268:282].strip(), row[284:298].strip(), row[300:309].strip(), row[311:328].strip(), row[330:361].strip(), row[363:412].strip(),
(row[413:423].strip()), row[424:429].strip(), row[430:455].strip(), row[456:472].strip(), row[473:483].strip(), row[484:534].strip(),
row[535:545].strip(), row[546:566].strip(), row[567:574].strip(), row[575:591].strip(), row[592:602].strip(), row[603:623].strip(),
row[624:634].strip(), row[635:642].strip(), row[643:653].strip(), row[654:674].strip(), row[675:695].strip(), row[696:706].strip()])
return ced_header, ced_arr
def remove_error_trd(cur):
# Exclude all error trades and ED EMSX trades
cur.execute("delete from trd where (tradeNum in (select tradeNum from trd where ExecAction = 'Delete')) or (clTraderId in (select ed_emsx_trader_uuid from emt)) ")
return
def prod_type_map():
prod_type = "Portfolio Swap"
return prod_type
def search_row_to_dict(header, arr, search_key, search_value):
header_arr = header.split(',')
for row in arr:
dic = dict((header_arr[idx], ele) for (idx, ele) in enumerate(row) )
if dic[search_key] == search_value:
return dic
return {}
def arr_to_csv(file_name, header, data_arr):
csv_file = None
if sys.version_info >= (3,0,0):
csv_file = open(file_name, 'w', newline='')
else:
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def dict_from_row(row):
return dict(zip(row.keys(), row))
def calc_net_bb(net_buy, net_sell, net_ss, prev_qty):
net_bb = 0
# Cover previous position first
if prev_qty < 0:
if prev_qty + net_buy > 0:
net_bb = abs(prev_qty)
net_buy -= net_bb
else:
net_bb = net_buy
net_buy = 0
return net_buy, net_bb
else:
net_buy += prev_qty
# Net Buy must > Net Sell, so no need check
if net_buy > net_ss + net_sell:
net_buy -= net_ss
net_bb += net_ss
else:
net_bb = net_bb + net_buy - net_sell
net_buy = net_sell
net_buy = net_buy - (prev_qty if prev_qty > 0 else 0)
return net_buy, net_bb
def split_buy_bb(cur):
pos_header = "fa_name,MarketCode,instrumentCode,ls,side,qty,avg_price,seq,handins"
pos_arr = []
cur.execute("""
select acc.fa_name, trd.MarketCode, trd.instrumentCode,
sum(case when trd.tradeSide = 'BUY' and channel = 'DMA' then tradeQuantity else 0 end) as dma_buy,
sum(case when trd.tradeSide = 'BUY' and channel = 'DSA' then tradeQuantity else 0 end) as dsa_buy,
sum(case when trd.tradeSide = 'SELL' and trd.isShortSell <> 'Y' and channel = 'DMA' then tradeQuantity else 0 end) as dma_sell,
sum(case when trd.tradeSide = 'SELL' and trd.isShortSell <> 'Y' and channel = 'DSA' then tradeQuantity else 0 end) as dsa_sell,
sum(case when trd.tradeSide = 'SELL' and trd.isShortSell = 'Y' and channel = 'DMA' then tradeQuantity else 0 end) as dma_ss,
sum(case when trd.tradeSide = 'SELL' and trd.isShortSell = 'Y' and channel = 'DSA' then tradeQuantity else 0 end) as dsa_ss,
sum(case when trd.tradeSide = 'BUY' then tradeQuantity*unitPrice else 0 end) as net_buy_value,
sum(case when trd.tradeSide = 'SELL' and trd.isShortSell <> 'Y' then tradeQuantity*unitPrice else 0 end) as net_sell_value,
sum(case when trd.tradeSide = 'SELL' and trd.isShortSell = 'Y' then tradeQuantity*unitPrice else 0 end) as net_ss_value,
ifnull(ins.Quantity, 0) as prev_qty, trd.instrumentCode||' '||micdict.country||' Equity'
from trd
left join acc
on trd.accountId = acc.account_no
and acc.mic_code like '%'||trd.MarketCode||'%'
left join micdict
on trd.MarketCode = micdict.mic
left join ins
on ins.Counterparty = acc.fa_name
and trd.instrumentCode||' '||micdict.country||' Equity' = ins.BBGCode
group by acc.fa_name, trd.MarketCode, trd.instrumentCode
""")
for net_row in cur.fetchall():
fa_name = net_row["fa_name"]
market_code = net_row["MarketCode"]
ins_code = net_row["instrumentCode"]
net_buy = net_row["dma_buy"] + net_row["dsa_buy"]
net_sell = net_row["dma_sell"] + net_row["dsa_sell"]
net_ss = net_row["dma_ss"] + net_row["dsa_ss"]
prev_qty = net_row["prev_qty"]
net_buy, net_bb = calc_net_bb(net_buy, net_sell, net_ss, prev_qty)
# print (net_buy, net_sell, net_ss, net_bb)
net_buy_price = net_row["net_buy_value"]/(net_buy+net_bb) if (net_buy+net_bb) > 0 else 0
net_sell_price = net_row["net_sell_value"]/net_sell if net_sell > 0 else 0
net_ss_price = net_row["net_ss_value"]/net_ss if net_ss > 0 else 0
net_bb_price = net_row["net_buy_value"]/(net_buy+net_bb) if (net_buy+net_bb) > 0 else 0
dma_buy = net_row["dma_buy"]
dsa_buy = net_row["dsa_buy"]
if dma_buy >= net_buy:
dma_bb = dma_buy - net_buy
dsa_bb = abs(dma_bb - net_bb)
else:
dsa_bb = dsa_buy - net_buy
dma_bb = abs(dsa_bb - net_bb)
dma_buy = dma_buy - dma_bb
dsa_buy = dsa_buy - dsa_bb
pos_arr.append(([fa_name, market_code, ins_code, 'L', 'BUY', dma_buy, net_buy_price, 1, "DMA" ]))
pos_arr.append(([fa_name, market_code, ins_code, 'L', 'SELL', net_row["dma_sell"], net_sell_price, 2, "DMA" ]))
pos_arr.append(([fa_name, market_code, ins_code, 'S', 'SS', net_row["dma_ss"], net_ss_price, 3, "DMA" ]))
pos_arr.append(([fa_name, market_code, ins_code, 'L', 'BB', dma_bb, net_bb_price, 4, "DMA" ]))
pos_arr.append(([fa_name, market_code, ins_code, 'L', 'BUY', dsa_buy, net_buy_price, 1, "DSA" ]))
pos_arr.append(([fa_name, market_code, ins_code, 'L', 'SELL', net_row["dsa_sell"], net_sell_price, 2, "DSA" ]))
pos_arr.append(([fa_name, market_code, ins_code, 'S', 'SS', net_row["dsa_ss"], net_ss_price, 3, "DSA" ]))
pos_arr.append(([fa_name, market_code, ins_code, 'L', 'BB', dsa_bb, net_bb_price, 4, "DSA" ]))
# for row in pos_arr:
# print (row)
return pos_header, pos_arr
def trd_ss_bb(cur):
cur.execute("update trd set tradeSide = 'SS' where tradeSide = 'SELL' and isShortSell = 'Y' ")
# Copy last BUY order for each client, market and instrument as BB
cur.execute("""
With tmp (id,instrumentCode,InstrumentName,MarketCode,ISINCode,tradeSide,accountId,ourRefNumber,isShortSell,unitPrice,tradeQuantity,tradeNum,executionDateTime,brokerID,CounterParty,marketmaker,investorId,investorOrderNum,OrderID,etfQuantity,glTradeType,manualTradeKey,houseAccountId,traderId,ourOrderID,clClientID,isOrderFullyFilled,Channel,SEHKTradeRef,BrokerCode,BrokerAccount,Desk,ExVenue,ExecAction,clientLEI,ourLEI,clTraderId)
as (
select *
from trd
where tradeNum in (select max(tradeNum) from trd where tradeSide = 'BUY' group by accountId, MarketCode, InstrumentCode, Channel)
)
Insert into trd (instrumentCode,InstrumentName,MarketCode,ISINCode,tradeSide,accountId,ourRefNumber,isShortSell,unitPrice,tradeQuantity,tradeNum,executionDateTime,brokerID,CounterParty,marketmaker,investorId,investorOrderNum,OrderID,etfQuantity,glTradeType,manualTradeKey,houseAccountId,traderId,ourOrderID,clClientID,isOrderFullyFilled,Channel,SEHKTradeRef,BrokerCode,BrokerAccount,Desk,ExVenue,ExecAction,clientLEI,ourLEI,clTraderId)
select instrumentCode,InstrumentName,MarketCode,ISINCode,'BB',accountId,ourRefNumber,isShortSell,unitPrice,tradeQuantity,substr(tradeNum,1,28)||'BB',executionDateTime,brokerID,CounterParty,marketmaker,investorId,investorOrderNum,OrderID,etfQuantity,glTradeType,manualTradeKey,houseAccountId,traderId,ourOrderID,clClientID,isOrderFullyFilled,Channel,SEHKTradeRef,BrokerCode,BrokerAccount,Desk,ExVenue,ExecAction,clientLEI,ourLEI,clTraderId
from tmp
""")
return
def ull_to_fa(cur):
fa_header = "Trade Num,Product Type,Trade Date,Execution DateTime,Spot Days,Start Date,End Date,Counterparty,Local Exchange Code,Instrument Name,MIC Code,ISINCode,Security,Security Name,Location,Currency,Pay Cal 1,B/S,MSS Account,Short Sell,Buy Back,Quantity,Gross Price,Commission Currency,Commission,Trading Fee Currency,Trading Fee,Transaction Levy Currency,Transaction Levy,Stamp Duty Currency,Stamp Duty,Normal/Closing,Transaction Ref,Group Ref No,Trader,External Reference,Trade Source,Channel"
fa_arr = []
FC = FeeCalc()
cur.execute("""
select pos.fa_name, pos.qty, pos.avg_price, acc.account_no, acc.fa_dps_ref, trd.*
from pos
join acc
on pos.fa_name = acc.fa_name
and pos.ls = acc.longshort
join trd
on acc.account_no = trd.accountId
and pos.MarketCode = trd.MarketCode
and pos.InstrumentCode = trd.InstrumentCode
and pos.side = trd.tradeSide
and pos.handins = trd.Channel
where pos.qty <> 0 and trd.tradeNum in (
select min(tradeNum) from trd group by accountId, MarketCode, InstrumentCode, tradeSide, Channel
)
order by pos.fa_name, trd.MarketCode, trd.instrumentCode, pos.seq
""")
for trd_dict in cur.fetchall():
tradenum = trd_dict["executionDateTime"][0:8] + trd_dict["tradeNum"]
product_type = prod_type_map()
trade_date = trd_dict["executionDateTime"][0:8]
execution_datetime = trd_dict["executionDateTime"]
start_date = trade_date
end_date = ""
counterparty = trd_dict["fa_name"].upper()
mic_code = trd_dict["MarketCode"]
local_exchange_code = ("000000" + trd_dict["instrumentCode"])[-6:] if mic_code == "XSEC" else trd_dict["instrumentCode"]
instrument_name = ""
spot_days = SPOT_DICT[mic_code]
isin = trd_dict["ISINCode"]
security = local_exchange_code + ' ' + MIC_DICT[mic_code] + ' Equity'
security_name = trd_dict["instrumentName"]
location = LOC_DICT[mic_code]
currency = CCY_DICT[mic_code]
pay_cal_1 = CAL_DICT[mic_code]
bs = "BUY" if trd_dict["tradeSide"] == "BUY" or trd_dict["tradeSide"] == "BB" else "SELL"
account_id = trd_dict["accountId"]
short_sell = "Y" if trd_dict["tradeSide"] == "SS" else "N"
buy_back = "Y" if trd_dict["tradeSide"] == "BB" else "N"
quantity = int(float(trd_dict["qty"]))
signed_qty = quantity if bs == "BUY" else -quantity
gross_price = round_half_up(trd_dict["avg_price"], 4)
trader = "EDMO2"
external_reference = trd_dict["fa_dps_ref"]
trade_source = "ULLINK"
channel = trd_dict["Channel"]
commission_currency = currency
trading_fee_currency = currency
transaction_levy_currency = currency
stamp_duty_currency = currency
commission, trading_fee, transaction_levy, stamp_duty = FC.fee_calc(external_reference, gross_price, signed_qty, local_exchange_code, mic_code, channel)
fa_arr.append([tradenum, product_type, trade_date, execution_datetime, spot_days, start_date, end_date, counterparty, local_exchange_code, instrument_name
, mic_code, isin, security, security_name, location, currency, pay_cal_1, bs, account_id, short_sell, buy_back, quantity, float(gross_price)
, commission_currency, (commission), trading_fee_currency, trading_fee, transaction_levy_currency, transaction_levy, stamp_duty_currency, stamp_duty
, '', '', '', trader, external_reference, trade_source, channel ])
return fa_header, fa_arr
def main():
print ("Ullink to FA")
trd_file = files_lookup(PATH_DICT["trd_dir"], PATH_DICT["trd_filename"])
ins_file = files_lookup(PATH_DICT["ins_dir"], PATH_DICT["ins_filename"])
trd_header, trd_arr = trd_to_arr(trd_file)
ins_header, ins_arr = xlsx_to_arr(ins_file, row_start=1)
acc_header, acc_arr = xlsx_to_arr(PATH_DICT["acc_file"], def_separator="_")
emt_header, emt_arr = csv_to_arr(PATH_DICT["emsx_trader_file"])
conn, cur = db_cur()
create_tbl(cur, "trd", trd_header, trd_arr)
create_tbl(cur, "ins", ins_header, ins_arr)
create_tbl(cur, "acc", acc_header, acc_arr)
create_tbl(cur, "emt", emt_header, emt_arr)
create_tbl(cur, "micdict", "mic,country", MIC_DICT.items())
remove_error_trd(cur)
arr_to_csv(trd_file.replace("txt", "csv"), trd_header, trd_arr)
pos_header, pos_arr = split_buy_bb(cur)
create_tbl(cur, "pos", pos_header, pos_arr)
trd_ss_bb(cur)
fa_header, fa_arr = ull_to_fa(cur)
# for row in fa_arr:
# print (row)
arr_to_csv(PATH_DICT["fa_output"].replace("YYYYMMDD", datetime.date.today().strftime("%Y%m%d")), fa_header, fa_arr)
return
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print ("Ctrl+C pressed. Stopping...") | {
"repo_name": "frederick623/pb",
"path": "ul_automation/ullink_to_fa.py",
"copies": "2",
"size": "17803",
"license": "apache-2.0",
"hash": 7798542864870544000,
"line_mean": 38.389380531,
"line_max": 501,
"alpha_frac": 0.6751109364,
"autogenerated": false,
"ratio": 2.603539046504826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4278649982904826,
"avg_score": null,
"num_lines": null
} |
# 20180524 Add multiple FA client support
import os
import sys
import re
import sqlite3
import csv
import fnmatch
import xlrd
import win32com.client
import traceback
import time
import glob
PATH_DICT = {
"pos_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data" ,
"pos_filename": "PositionDetails_????????.xlsx",
"res_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\restricted_list.xlsx",
"exc_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\restricted_excl_list.xlsx",
"acc_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\account.xlsx",
"nom_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Nomura_Borrow_Availability\\nomIntJpyAvail.csv",
"miz_file": "S:\\SBL\\Reports\\Daily SBL Report\\Daily Availability\\Source\\MizAvailability????????.xls",
"hti_nom_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Nomura_Borrow_Availability\\HTIJPAvail.csv",
"gb1_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\ShowlistHK.csv",
"cln_sbl_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Client_SBL_List",
"sbl_output": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Long_Short_Limit\\BorrowAvailability.csv",
"lmt_output": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Long_Short_Limit\\DailyLimits.csv",
"ath_output": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Long_Short_Limit\\AuthorizationRules.csv",
"showlist_distro": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\PB_Batch_Job\\showlist_distro.py",
"haircut_sbl_hk": 0.5,
}
# PATH_DICT = {
# "pos_dir": os.path.dirname(os.path.abspath(__file__)) ,
# "pos_filename": "PositionDetails_????????.xlsx",
# "res_file": os.path.join(os.path.dirname(os.path.abspath(__file__)), "restricted_list.xlsx"),
# "exc_file": os.path.join(os.path.dirname(os.path.abspath(__file__)), "restricted_excl_list.xlsx"),
# "acc_file": os.path.join(os.path.dirname(os.path.abspath(__file__)), "account.xlsx"),
# "nom_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Nomura_Borrow_Availability\\nomIntJpyAvail.csv",
# "miz_file": "S:\\SBL\\Reports\\Daily SBL Report\\Daily Availability\\Source\\MizAvailability????????.xls",
# "hti_nom_file": os.path.join(os.path.dirname(os.path.abspath(__file__)), "HTIJPAvail.csv"),
# "gb1_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\ShowlistHK.csv",
# "cln_sbl_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Client_SBL_List",
# "sbl_output": os.path.join(os.path.dirname(os.path.abspath(__file__)), "BorrowAvailability.csv"),
# "lmt_output": os.path.join(os.path.dirname(os.path.abspath(__file__)), "DailyLimits.csv"),
# "ath_output": os.path.join(os.path.dirname(os.path.abspath(__file__)), "AuthorizationRules.csv"),
# "showlist_distro": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\PB_Batch_Job\\showlist_distro.py",
# "haircut_sbl_hk": 0.5,
# }
MIC_DICT = { "JP": "XTKS",
"HK": "XHKG",
"C1": "XSSC",
"C2": "XSEC",
"AU": "XASX" }
MKT_DICT = { "XTKS": "373",
"XHKG": "712",
"XSSC": "899",
"XSEC": "898",
"XSHG": "899",
"XSHE": "898",
"XASX": "728",
"XNYS": "1057",
"XNAS": "1061" }
def strx(s):
if isinstance(s, str):
return s
else:
return str(int(s))
def int_to_str(s):
try:
return str(int(float(s)))
except:
return s
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def files_lookup(tgt_dir, pattern, recur_list=False, sub_folder=False, most_recent=True):
filepath_arr = []
for fi in os.listdir(tgt_dir):
full_path = os.path.join(tgt_dir, fi)
if sub_folder and os.path.isdir(full_path):
filepath_arr += files_lookup(full_path, pattern, recur_list, sub_folder, most_recent)
if fnmatch.fnmatch(fi, pattern):
filepath_arr.append(full_path)
filepath_arr.sort(reverse=most_recent)
if recur_list:
return filepath_arr
else:
return filepath_arr[0]
def db_cur(source = ":memory:"):
# Register the adapter
# sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
# sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(source, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def create_tbl(cur, tbl_name, header, arr = None, index_arr = None):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
if index_arr is not None:
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
else:
cur.execute("""Delete From %s""" % (tbl_name))
if arr is not None and len(arr) > 0:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def csv_to_arr(csv_file, start=0, has_header=True, delim=','):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f, delimiter=delim)
arr = list(reader)
header = ""
if has_header:
header = ','.join(arr[start])
arr = arr[start+1:]
return re.sub(r"[\*\.#/\$%\d\"\(\) ]", "", header), arr
else:
return arr[start:]
return
def xlsx_to_arr(xlsx_file, worksheet=0, row_start=0, col_start=0, row_end=-1, col_end=-1, def_separator=""):
arr = []
wb = xlrd.open_workbook(xlsx_file)
ws = None
try:
ws = wb.sheet_by_index(worksheet)
except:
ws = wb.sheet_by_name(worksheet)
row_end = ws.nrows if row_end == -1 else row_end
col_end = ws.ncols if col_end == -1 else col_end
arr = [ws.row_values(row, start_colx=col_start, end_colx=col_end) for row in range(row_start, row_end)]
header = ','.join(x if x not in arr[0][:n] else x+str(n) for n, x in enumerate(arr[0]) )
return re.sub(r"[\*\.#/\$%\"\(\)& \_]", def_separator, header), arr[1:]
def arr_to_csv(file_name, header, data_arr, quote=csv.QUOTE_ALL, lineterm="\n"):
csv_file = None
if sys.version_info >= (3,0,0):
csv_file = open(file_name, 'w', newline='')
else:
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=quote, lineterminator=lineterm)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def arr_to_txt(file_name, header, data_arr):
csv_file = open(file_name, 'w+')
if header !="":
csv_file.write('|'.join(header.split(','))+'\n' )
for data_row in data_arr:
csv_file.write(data_row+'\n')
csv_file.close()
return
def xlsx_to_arr_with_macro(xlsx_file, worksheet=1, xyrange="", macro=""):
arr = []
xl = win32com.client.Dispatch('Excel.Application')
# xl.ScreenUpdating = False
wb = xl.Workbooks.Open(xlsx_file)
if macro != "":
xl.Application.Run(macro)
ws = wb.Worksheets(worksheet)
rng = ws.UsedRange if xyrange == "" else ws.Range(xyrange)
for i in range(rng.Row, rng.Row+rng.Rows.Count):
row_arr = []
for j in range(rng.Column, rng.Column+rng.Columns.Count):
row_arr.append("" if ws.Cells(i, j).Value == None else str(ws.Cells(i, j).Value) )
if "".join(row_arr) != "":
arr.append(row_arr)
wb.Close((macro != ""))
xl.Application.Quit()
header = arr[0]
header = ','.join(header)
return re.sub(r"[\*\.#/\$%\"\(\)& \_]", "", header), arr[1:]
def import_client_sbl(cur):
for root, dirs, files in os.walk(PATH_DICT["cln_sbl_dir"]):
if files != []:
csbl_file = max([ os.path.join(root, csbl_file) for csbl_file in files if '~' not in csbl_file ])
print (csbl_file)
csbl_name = root.split('\\')[-1]
csbl_header, csbl_arr = xlsx_to_arr(csbl_file)
csbl_header = "client," + csbl_header
create_tbl(cur, "csbl", csbl_header, list(map(lambda row: [csbl_name] + row, csbl_arr)))
return
def db_to_sbl(cur):
sbl_arr = []
sbl_header = "stockcode,qty"
create_tbl(cur, "sbl", sbl_header)
import_client_sbl(cur)
cur.execute("""
insert into sbl
select Code||' Equity', NomuraAvailability from nom
union
select BBGStockCode, ShowQty from gb1
""")
cur.execute("""
select acc.client_no, acc.account_no, sbl.stockcode, substr(stockcode, -9, 2), cast (sbl.qty as int)
from acc cross join sbl
left join res
on sbl.stockcode = res.ul_stock
where acc.longshort = 'S'
and res.ul_stock is null
and acc.sbl_mkt like '%'||substr(sbl.stockcode, -9, 2)||'%'
union
select acc.client_no, acc.account_no, csbl.BBGStockCode, substr(csbl.BBGStockCode, -9, 2), ifnull(case when csbl.ConfirmedQty = '' then 0 else cast(csbl.ConfirmedQty as int) end, 0)
from csbl join acc
on csbl.client = acc.fa_name
where acc.longshort = 'S'
""")
for acc_row in cur.fetchall():
if int(float(acc_row[4])) != 0:
mkt_code = MKT_DICT[MIC_DICT[acc_row[3]]]
sbl_arr.append(acc_row[0] + "|" + acc_row[1] + "|IDC;" + mkt_code + ";E:" + (acc_row[2].split(' '))[0] + "|" + str(acc_row[4]) )
arr_to_txt(PATH_DICT["sbl_output"], "", sbl_arr)
return
def db_to_lmt(cur):
lmt_arr = []
ath_arr = []
cur.execute("""
select acc.client_no, acc.account_no, pos.BBGCode, pos.Quantity , case when res.ul_stock is null or exc.ul_stock is not null then '' else 0 end
from pos
join acc
on pos.Counterparty = acc.fa_name
left join res
on pos.BBGCode = res.ul_stock
left join exc
on res.ul_stock = exc.ul_stock
and acc.fa_name = exc.Fullname
where acc.longshort = 'L' and pos.Quantity > 0
union
select acc.client_no, acc.account_no, res.ul_stock, 0, 0
from acc cross join res
left join pos
on pos.BBGCode = res.ul_stock
and pos.Counterparty = acc.fa_name
left join exc
on res.ul_stock = exc.ul_stock
and acc.fa_name = exc.Fullname
where acc.longshort = 'L'
and acc.restricted_list like '%'||res.Market||'%'
and pos.BBGCode is null
and exc.ul_stock is null
""" )
for pos_row in cur.fetchall():
bbg_arr = str(pos_row[2]).split(" ")
ins_code = bbg_arr[0]
market = bbg_arr[1]
# TODO: TEMP WORKAROUND TO SKIP US EQT! REVAMP ASAP
if market == "US":
continue
# TODO: Use FA generated MIC code
mic_code = MIC_DICT[market] if market != "CH" else ("XSHG" if ins_code[0:2] == "60" else "XSHE")
mkt_code = MKT_DICT[mic_code]
tag = ""
if market == "C1" or market == "C2":
tag = ("CHINEXT" if ins_code[0:2] == "30" else "ASZR") if mic_code == "XSEC" else "ASMK"
lmt_arr.append(pos_row[0] + "|" + pos_row[1] + "||||CC;" + mkt_code + ";E:" + ins_code + "|CC;" + mkt_code + ";E:" + ins_code + "|" + mic_code
+ "|equity|" + tag + "||||client|||||||||||||||||||||||||||||||||||||" + str(int(pos_row[3])) + "||||||||||||||||||||||||" + str(pos_row[4])
+ "||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||")
ath_arr.append(pos_row[0] + "|" + pos_row[1] + "||||CC;" + mkt_code + ";E:" + ins_code + "|CC;" + mkt_code + ";E:" + ins_code + "|" + mic_code + "|equity|" + tag + "||||client|true|true|||||" )
else:
tag = "DEFAULT" if mic_code == "XSHE" else ""
lmt_arr.append(pos_row[0] + "|" + pos_row[1] + "||||IDC;" + mkt_code + ";E:" + ins_code + "|IDC;" + mkt_code + ";E:" + ins_code + "|" + mic_code
+ "|equity|" + tag + "||||client|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||" + str(pos_row[4]) + "||||||||||||||||||||||||||||||||||||||||||||||||||||" + str(int(pos_row[3]))
+ "||||||||||||||||||||||||||||||||||||")
ath_arr.append(pos_row[0] + "|" + pos_row[1] + "||||IDC;" + mkt_code + ";E:" + ins_code + "|IDC;" + mkt_code + ";E:" + ins_code + "|" + mic_code + "|equity|" + tag + "||||client|true|true|||||" )
arr_to_txt(PATH_DICT["lmt_output"], "", lmt_arr)
arr_to_txt(PATH_DICT["ath_output"], "", ath_arr)
return
def merge_sbl(cur, nom_header):
cur.execute("""
select nom.Code, nom.SecurityName, nom.PriceUSD,
case when cast(nom.NomuraAvailability as int) = 0 and cast(miz.IndicativeRate as float) <= 8 then ifnull(miz.TotalAvail, 0) else nom.NomuraAvailability end,
case when cast(nom.NomuraAvailability as int) = 0 and cast(miz.IndicativeRate as float) <= 8 and cast(miz.IndicativeRate as float) > 0 then ifnull(replace(miz.IndicativeRate,'-',5), 0) else nom.fee end
from nom
left join miz
on nom.Code = cast(miz.Code as int)||' JP'
""")
merg_arr = cur.fetchall()
create_tbl(cur, "nom", nom_header, merg_arr)
try:
arr_to_csv(PATH_DICT["hti_nom_file"], "Code,SecurityName,Price_USD,Haitong,Fee", merg_arr, csv.QUOTE_NONE)
except:
traceback.print_exc()
return
def massage_restricted(cur):
cur.execute("delete from res where ul_stock in (select ul_stock from exc where Fullname = '') ")
cur.execute("""insert into res (Stock, Market, ul_stock) select Stock, Market, Replace(Replace(ul_stock, "C1", "CH"), "C2", "CH") from res where Market = "SZ" or Market = "SH" """)
return
# 20180528 Request by SBL desk
def sbl_haircut(cur):
cur.execute("update gb1 set ShowQty = ShowQty * ? ",(PATH_DICT["haircut_sbl_hk"],))
return
def main():
print ("Ullink long short limit")
conn, cur = db_cur()
pos_file = files_lookup(PATH_DICT["pos_dir"], PATH_DICT["pos_filename"])
print (pos_file)
pos_header, pos_arr = xlsx_to_arr(pos_file, row_start=1)
acc_header, acc_arr = xlsx_to_arr(PATH_DICT["acc_file"], def_separator="_")
nom_header, nom_arr = csv_to_arr(PATH_DICT["nom_file"])
gb1_header, gb1_arr = csv_to_arr(PATH_DICT["gb1_file"], start=2)
res_header, res_arr = xlsx_to_arr(PATH_DICT["res_file"], "Batch")
exc_header, exc_arr = xlsx_to_arr(PATH_DICT["exc_file"], "Batch")
print (res_header)
res_header = res_header + ",ul_stock"
res_arr = [ [ strx(row[0]), row[1], row[2], strx(row[0])+' '+row[1].replace("SZ", "C2").replace("SH", "C1")+' Equity' ] for row in res_arr ]
exc_header = exc_header + ",ul_stock"
exc_arr = [ [ strx(row[0]), row[1], row[2], strx(row[0])+' '+row[1].replace("SZ", "C2").replace("SH", "C1")+' Equity' ] for row in exc_arr ]
miz_file = max(glob.iglob(PATH_DICT["miz_file"]))
print (miz_file)
miz_header, miz_arr = xlsx_to_arr(miz_file)
create_tbl(cur, "acc", acc_header, acc_arr)
create_tbl(cur, "pos", pos_header, pos_arr)
create_tbl(cur, "nom", nom_header, nom_arr)
create_tbl(cur, "miz", miz_header, miz_arr)
create_tbl(cur, "gb1", gb1_header, gb1_arr)
sbl_haircut(cur)
create_tbl(cur, "res", res_header, res_arr)
create_tbl(cur, "exc", exc_header, exc_arr)
massage_restricted(cur)
merge_sbl(cur, nom_header)
db_to_sbl(cur)
db_to_lmt(cur)
return
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print ("Ctrl+C pressed. Stopping...") | {
"repo_name": "frederick623/HTI",
"path": "ul_automation/ullink_long_short_limit.py",
"copies": "2",
"size": "14883",
"license": "apache-2.0",
"hash": -7219457117454632000,
"line_mean": 35.0387409201,
"line_max": 204,
"alpha_frac": 0.6204394275,
"autogenerated": false,
"ratio": 2.4763727121464227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8835439058306682,
"avg_score": 0.052274616267948056,
"num_lines": 413
} |
# 20180629 add SFMM as default portfolio
import ael
import acm
import HTI_Util
import HTI_Email_Util
import HTI_FeedTrade_EDD_Util
import os
import sys, traceback
import datetime
import sqlite3
import csv
import decimal
import fo_margin
import re
import sys
import win32com.client
import time
import ast
import glob
ael_variables = [['posdate', 'Date', 'string', [str(ael.date_today()), 'Yesterday', 'Today'], 'Today', 1, 0, 'Position Date', None, 1], \
['pfs', 'Portfolio(s)', 'string', HTI_Util.getAllPortfolios(), 'EDD Warrant Trading,EDD CBBC Trading,EDD Options,EDD Hedging,EDD Market Making 1,EDD Market Making 2,EDD Warrant,EDD Flow Strategy 1,EDD Flow Strategy 2,EDD HFT 1,EDD HFT 2,EDD HFT 3,EDD HFT 4,EDD HFT 5,EDD OMM,EDD OTC OMM,EDD Deltaone,EDD SFMM', 1, 1, 'Portfolio(s)', None, 1], \
['acq', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTISEC - EDD,HTIFS - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['prd', 'Product Type(s)', 'string', HTI_Util.getAllInstypes(), 'Stock,Option,Future/Forward,Warrant', 1, 1, 'Product Type(s)', None, 1], \
['tfs', 'Trade Filter', 'string', None, 'TF_EDD_ACCOUNT_JOURNAL', 0, 0, 'Trade Filter', None, 1], \
['fixed_ccy', 'Fixed Currency', 'string', None, 'HKD', 0, 0, 'Fixed Currency', None, 1], \
['filename_seoch', 'SEOCH Margin', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\FRR\\Report\\merte_seoch.htm', 1, 0, 'SEOCH Margin', None, 1], \
['filename_hkcc', 'HKCC Margin', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\FRR\\Report\\merte_hkcc.htm', 1, 0, 'HKCC Margin', None, 1], \
['filename_acc', 'Account List', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\FRR\\Report\\account_list.csv', 1, 0, 'Account List', None, 1], \
['filename_haircut', 'Haircut Ratio', 'string', None, 'T:\\1244-haircut Rate\\FRR - Equity Haircut Rate *.csv', 1, 0, 'Haircut Ratio', None, 1], \
['fileNameIns', 'File name', 'string', None, 'S:\\FRR\\Instrument_Pos_YYYYMMDD.csv', 1, 0, 'File Name', None, 1], \
['fileNameTrd', 'Trade File name', 'string', None, 'S:\\FRR\\Trade_Record_YYYYMMDD.csv', 1, 0, 'File Name', None, 1], \
['filename_frr', 'FRR Report', 'string', None, 'S:\\FRR\\FRR_YYYYMMDD.csv', 1, 0, 'FRR Report', None, 1], \
['filename_fre', 'FRR Exception Report', 'string', None, 'S:\\FRR\\FRR_Exception_YYYYMMDD.csv', 1, 0, 'FRR Exception Report', None, 1], \
['filename_frc', 'FRR checksum', 'string', None, 'S:\\FRR\\FRR_Checksum_YYYYMMDD.xlsx', 1, 0, 'FRR checksum', None, 1], \
['filename_sbl', 'SBL records', 'string', None, '\\\\P7fs0003\\nd\\3033-Horizon-FA-Share\\SBL_Trading_Inventory\\SBL_Trade_Record_.csv', 1, 0, 'SBL records', None, 1], \
['gen_add_info', 'Generate additional info?', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Generate additional info?', None, 1], \
['gen_value_day', 'Generate Value Day?', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Generate Value Day?', None, 1], \
['prf_sbl', 'SBL Portfolios', 'string', HTI_Util.getAllPortfolios(), 'EDD SBL', 1, 0, 'SBL Portfolios', None, 1], \
['gen_sbl', 'Generate SBL', 'string', [ "Y", "N" ], 'Y', 0, 0, 'Generate SBL', None, 1], \
['gen_chk', 'Generate Checksum', 'string', [ "Y", "N" ], 'Y', 0, 0, 'Generate Checksum', None, 1], \
['gen_expired_pos', 'Generate Range', 'string', ["Today", "Monthly", "All"], 'Today', 0, 0, 'Generate Range', None, 1], \
['clspricemkt', 'Closing Price Market', 'string', None, 'Bloomberg_5PM', 0, 0, 'Closing Price Market', None, 1], \
['histclspricemkt', 'Historical Closing Price Market', 'string', None, 'Bloomberg_5PM_Cls', 1, 0, 'Historical Closing Price Market', None, 1], \
['otc_clspricemkt', 'OTC Closing Price Market', 'string', None, 'MSS_SPOT', 0, 0, 'OTC Closing Price Market', None, 1], \
['otc_histclspricemkt', 'OTC Historical Price Market', 'string', None, 'MSS_CLS', 0, 0, 'OTC Historical Price Market', None, 1], \
['fx_clspricemkt', 'FX Closing Price Market', 'string', None, 'bloomberg', 0, 0, 'FX Closing Price Market', None, 1], \
['fx_histclspricemkt', 'FX Historical Price Market', 'string', None, 'internal', 0, 0, 'FX Historical Price Market', None, 1], \
['success_email_subj', 'Success Email Subject', 'string', None, 'FA (PROD) : EDD MSS Trade File Upload - SUCCESS', 1, 0, 'Sucess Email Subject', None, 1], \
['failure_email_subj', 'Failure Email Subject', 'string', None, 'FA (PROD) : EDD MSS Trade File Upload - FAILED', 1, 0, 'Failure Email Subject', None, 1]]
def adapt_decimal(d):
return str(d)
def convert_decimal(s):
return decimal.Decimal(s)
def question_marks(st):
question_marks = '?'
for i in range(0, len(st.split(','))-1):
question_marks = question_marks + ",?"
return question_marks
def num(s):
if isinstance(s, basestring):
s = str(s).replace("#","")
try:
if s == "" or s == "None" or str(float(s)) == "nan":
return 0
return float(str(s))
except:
return s
def dec(s):
if sys.version_info >= (3,0,0):
if isinstance(s, str):
s = str(s).replace("#","")
else:
if isinstance(s, basestring):
s = str(s).replace("#","")
if s == "" or s == "None" or str(float(s)) == "nan":
return 0
try:
return decimal.Decimal(str(s))
except:
return 0
return s
def csv_to_arr(csv_file, start=0, has_header=True, delim=',', encoding='utf-8'):
arr = []
reader = []
if "http" in csv_file:
response = requests.get(csv_file)
text = response.content.decode(encoding)
else:
text = open(csv_file, 'rU')
reader = csv.reader(text, delimiter=delim)
arr = list(reader)
header = ""
if has_header:
header = ','.join(arr[start])
arr = arr[start+1:]
return header, arr
else:
return arr[start:]
return
def tsv_to_arr(tsv_file):
arr = []
with open(tsv_file, 'rU') as f:
reader = csv.reader(f, dialect="excel-tab")
arr = list(reader)
arr = arr[1:]
return arr
def sortArray(x, y):
i = 0
len_array = len(x)
while i <= len_array - 1:
if x[i] > y[i]:
return 1
else:
return -1
i = i + 1
return 0
def arrs_to_xlsx(filename, header=[], arr=[]):
i = 1
try:
xl = win32com.client.gencache.EnsureDispatch('Excel.Application')
except:
xl = win32com.client.Dispatch("Excel.Application")
wb = xl.Workbooks.Add()
for x in range(0, len(header)):
ws = wb.Worksheets(x+1)
for i, cell in enumerate(header[x].split(',')):
ws.Cells(1,i+1).Value = cell
for i, row in enumerate(arr[x]):
for j, cell in enumerate(row):
ws.Cells(i+2,j+1).Value = str(cell)
wb.Worksheets(3).UsedRange.FormatConditions.Add(win32com.client.constants.xlExpression, "", '=AND(ISNUMBER($F1),$F1<>$G1) ')
wb.Worksheets(3).UsedRange.FormatConditions(1).Interior.ColorIndex = 6
wb.Worksheets(3).UsedRange.FormatConditions(1).StopIfTrue = False
xl.DisplayAlerts = False
wb.SaveAs(filename)
xl.DisplayAlerts = True
wb.Close(True)
xl.Quit()
return
def export_to_file(file_name, header, data_arr, mode='wb', quote=csv.QUOTE_ALL):
if sys.version_info >= (3,0,0):
csv_file = open(file_name, 'w', newline='')
else:
csv_file = open(file_name, mode)
wr = csv.writer(csv_file, quoting=quote)
if header != "":
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def header_to_col(header):
arr = {}
i = 0
for x in header.split(','):
arr[x] = i
i = i + 1
return arr
def ins_percent_mv_wrt(cur, e_ent, e_und, mv):
cur.execute("""select sum(market_value)
from ins
where quantity <> 0 and (instrument_type = 'Listed Warrant' or instrument_type = 'CBBC Warrant') and entity = '%s' and underlying = '%s' """ % (e_ent, e_und))
sum_of_mv = dec(list(cur)[0][0])
if sum_of_mv > 0:
return (abs(mv)/abs(sum_of_mv) > 0.05)
else:
return True
def get_last_trade_of_instr(ins, prf):
trdnbr = ''
str_sql = """select t.trdnbr
from instrument i, trade t, portfolio pf
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and t.prfnbr = pf.prfnbr
and i.insid = '%s'
and pf.prfid = '%s' order by t.trdnbr desc""" % (ins, prf)
rs = ael.asql(str_sql)
columns, buf = rs
for table in buf:
for row in table:
return row[0]
return trdnbr
def month_code_mapping(ins_type, month_code):
offset = {'F':69, 'C':64, 'P':76}
if ins_type == 'F':
ins_type_code = 'F'
else:
ins_type_code = 'C' if month_code[0] < 'M' else 'P'
month = "%0*d" % (2, ord(month_code[0]) - offset[ins_type_code])
return ins_type_code + "1" + month_code[1] + month
def margin_lookup(cur, ats_code, pos = ''):
st = ats_code.split('@')[0] if '@' in ats_code else ats_code
ins_type = ""
ins_code = st[:3]
month_code = st[-2:]
strike = st[3:-2]
if len(st) == 5:
cur.execute("""select FXXXX from f_margin where Instrument = '%s' """ % (ins_code))
else:
o_col = month_code_mapping('O', month_code)
if (ins_code == "HSI") or (ins_code == "HHI") or (ins_code == "MHI"):
cur.execute("""select %s from %so_margin where Instrument = '%s' and Strike = '%s' """ % (o_col, ins_code, ins_code, strike+pos))
else:
cur.execute("""select %s from o_margin where Instrument = '%s' and Strike = '%s' """ % (o_col, ins_code, strike))
result = cur.fetchone()
return int(result[0]) if result else 0
def create_tbl(cur, tbl_name, header, arr = [], index_arr = []):
cur.execute("""select count(*) FROM sqlite_master WHERE type='table' AND name = '%s' """ % (tbl_name))
tbl_exists = cur.fetchone()
if tbl_exists[0] == 0:
cur.execute("CREATE TABLE " + tbl_name + " (" + header.replace("id,", "id PRIMARY KEY,") + " );")
for index in index_arr:
cur.execute("CREATE INDEX " + tbl_name + "_" + index + " ON " + tbl_name + " (" + index + ");")
if arr != []:
cur.executemany("INSERT INTO " + tbl_name + " VALUES ("+question_marks(header)+")", arr)
return
def chg_dt_format(row, col, with_time=True):
if len(row) > col and '/' in row[col]:
timestamp_arr = row[col].split(' ')
date_arr = timestamp_arr[0].split('/')
row[col] = date_arr[2] + '-' + date_arr[1].rjust(2, '0') + '-' + date_arr[0].rjust(2, '0')
row[col] = row[col] + ( (' ' + timestamp_arr[1]) if with_time else "" )
return row
def flatten_obj(obj):
header = []
arr = []
obj_arr = str(obj).split('\n')
for i in obj_arr:
j = i.split('=')
if len(j) > 1:
header.append(j[0].strip())
arr.append(j[1])
return ','.join(header), arr
def trd_records(cur, qry, asofdate = "", dict = [], ccy = ""):
print "Generating Trade Records:"
header_trd = 'trade_id,entity,portfolio,counterparty,instrument_type,call_put,currency,position,reporting_date,instrument,underlying,price,quantity,premium,fee,ss_bb,mss_acc'
add_header = ''
trd_array = []
clspricemkt = "Bloomberg_5PM"
histclspricemkt = "Bloomberg_5PM_Cls"
fileNameTrd = ""
if "fileNameTrd" in dict and asofdate != "":
fileNameTrd = dict['fileNameTrd']
fileNameTrd = fileNameTrd.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
gen_add_info = False
if "gen_add_info" in dict:
gen_add_info = True if dict["gen_add_info"] == "Y" else False
gen_value_day = False
if "gen_value_day" in dict:
gen_value_day = True if dict["gen_value_day"] == "Y" else False
# context = acm.GetDefaultContext()
# sheet_type = 'FTradeSheet'
# calc_space = acm.Calculations().CreateCalculationSpace(context, sheet_type)
# calc_space.SimulateValue
#create CalculationSpace (Virtual Trading Manager)
# tf = ael.TradeFilter[tf_id]
# nacmTf = acm.FTradeSelection[tf.fltid]
# top_node = calc_space.InsertItem(nacmTf)
# calc_space.Refresh()
# tf_iter = calc_space.RowTreeIterator().FirstChild()
# #while tf_iter:
# row = tf_iter.Tree().Item()
#query = qry + """ and t.time >= %s """ % ("Yesterday")
if asofdate != "":
query = qry + """ and t.time >= \'%s\' """ % asofdate.to_string('%Y-%m-%d')
print query
rs = ael.asql(query)
columns, buf = rs
for table in buf:
for row in table:
#tid = trd_row.StringKey()
tid = row[0]
if tid % 1000 == 0:
print "tid " + str(tid)
acm_trd = acm.FTrade[tid]
acm_ins = acm_trd.Instrument()
acm_pays = acm_trd.Payments()
fee = 0
for acm_pay in acm_pays:
fee = fee + acm_pay.Amount()
ins = acm_ins.Name()
und = acm_ins.Underlying()
und = ins if und is None else und.Name()
ent = "" if acm_trd.Acquirer() is None else acm_trd.Acquirer().Name()
prf = "" if acm_trd.Portfolio() is None else acm_trd.Portfolio().Name()
cnt_pty = "" if acm_trd.Counterparty() is None else acm_trd.Counterparty().Name()
ccy = acm_trd.Currency().Name()
#otc = "OTC" if acmIns.Otc() else "Listed"
#print ins
price = acm_trd.Price()
qty = acm_trd.Quantity()
prem = acm_trd.Premium()
pos = 'L' if qty > 0 else 'S'
ins_typ = ""
try:
if acm_ins.ProductTypeChlItem() == None:
ins_typ = acm_ins.ValuationGrpChlItem().Name()
else:
ins_typ = acm_ins.ProductTypeChlItem().Name()
except:
ins_typ = ''
cp = ('C' if acm_ins.IsCallOption() else 'P') if ("Option" in ins_typ) or ("Warrant" in ins_typ) else ''
str_price = acm_ins.StrikePrice() if ("Option" in ins_typ) or ("Warrant" in ins_typ) else 0
add_info = acm_trd.AdditionalInfo()
ss_bb = "SS" if add_info.Short_Sell() is not None else "BB" if add_info.Buy_Back() is not None else ""
acc = add_info.MSS_Account()
trd_row = [tid, ent, prf, cnt_pty, ins_typ, cp, ccy, pos, acm_trd.TradeTime(), ins, und, price, qty, prem, abs(fee), ss_bb, acc]
if gen_value_day:
trd_row.append(acm_trd.ValueDay())
if gen_add_info:
add_header, add_arr = flatten_obj(add_info)
trd_row += add_arr
trd_array.append(trd_row)
#tf_iter = tf_iter.NextSibling()
# calc_space.Clear()
if gen_value_day:
header_trd = header_trd + ',value_day'
if gen_add_info:
header_trd = header_trd + ('' if add_header == '' else ',') + add_header
create_tbl(cur, "trd", header_trd, trd_array)
if fileNameTrd != "":
export_to_file(fileNameTrd, header_trd, trd_array)
return trd_array
def getUnderlyingPrice(dt, ael_und_ins, currclspricemkt, histclspricemkt):
try:
if dt == ael.date_today():
cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Last', 0, currclspricemkt)
else:
cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Close', 0, histclspricemkt)
except:
#cls_price = ael_und_ins.used_price(dt, ael_und_ins.curr.insid, 'Last', 0, currclspricemkt)
cls_price = 0.0
return cls_price
def getFx(dt, fm_ccy, to_ccy, currclspricemkt, histclspricemkt):
if fm_ccy == 'CNY':
fm_ccy = 'CNH'
if to_ccy == 'CNY':
to_ccy = 'CNH'
ins_fm_ccy = ael.Instrument[fm_ccy]
ins_to_ccy = ael.Instrument[to_ccy]
ins_usd = ael.Instrument['USD']
try:
if dt == ael.date_today():
#fx_rate = ins_fm_ccy.used_price(dt, ins_to_ccy.insid, 'Last', 0, currclspricemkt)
fm_usd_rate = ins_fm_ccy.used_price(ael.date_today(), ins_usd.insid, 'Last', 0, currclspricemkt)
to_usd_rate = ins_usd.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
fx_rate = fm_usd_rate * to_usd_rate
else:
#fx_rate = ins_fm_ccy.used_price(dt, ins_to_ccy.insid, 'Close', 0, histclspricemkt)
fm_usd_rate = ins_fm_ccy.used_price(dt, ins_usd.insid, 'Close', 0, histclspricemkt)
to_usd_rate = ins_usd.used_price(dt, ins_to_ccy.insid, 'Close', 0, histclspricemkt)
fx_rate = fm_usd_rate * to_usd_rate
except:
#fm_usd_rate = ins_fm_ccy.used_price(ael.date_today(), ins_usd.insid, 'Last', 0, currclspricemkt)
#to_usd_rate = ins_usd.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
#fx_rate = fm_usd_rate * to_usd_rate
fx_rate = 0.0
#fx_rate = ins_fm_ccy.used_price(ael.date_today(), ins_to_ccy.insid, 'Last', 0, currclspricemkt)
return fx_rate
# def d1_sbl_export(cur, asofdate, dict=[]):
# strSql = """
# select t.trdnbr, add_info(t, 'Trd SBL') 'sbl', i.und_insaddr
# into temp
# from trade t, instrument i
# where t.insaddr = i.insaddr
# and t.status not in ('Void', 'Simulated')
# and i.instype = 'Repo/Reverse'
# and t.time < '%s'
# select trdnbr, und_insaddr
# from temp
# where sbl = 'Yes'
# """ % (asofdate.add_days(1))
# d1s_arr = []
# clspricemkt = dict["clspricemkt"]
# histclspricemkt = dict["histclspricemkt"]
# otc_clspricemkt = dict["otc_clspricemkt"]
# print strSql
# rs = ael.asql(strSql)
# columns, buf = rs
# for table in buf:
# for row in table:
# trdnbr = row[0]
# und_ins = row[1]
# acm_trd = acm.FTrade[trdnbr]
# ael_und_ins = ael.Instrument[und_ins]
# # print acm_trd
# if acm_trd != None:
# add_info = acm_trd.AdditionalInfo()
# ins = ael_und_ins.insid
# sbl_quantity = acm_trd.Nominal()
# trd_date = ael.date(acm_trd.TradeTime()[0:10])
# price = getUnderlyingPrice(trd_date, ael_und_ins, clspricemkt, histclspricemkt)
# external_ref = add_info.External_Reference()
# d1s_arr.append([external_ref, trd_date.to_string('%Y%m%d'), ins, price, sbl_quantity])
# export_to_file(dict["filename_d1s"].replace("YYYYMMDD", asofdate.to_string('%Y%m%d')), "contract,trade_date,instrument,price,quantity", d1s_arr)
# return
def sbl_dump(cur, qry, asofdate = "", dict = [], ccy = ""):
print "Generating SBL:"
trd_array = []
fileName_sbl = ""
if "filename_sbl" in dict and asofdate != "":
fileName_sbl = dict['filename_sbl']
fileName_sbl = fileName_sbl.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
sbl_header, sbl_arr = csv_to_arr(fileName_sbl)
sbl_arr = [ chg_dt_format(row, 8, False) for row in sbl_arr if row != [] ]
create_tbl(cur, "fasbl", sbl_header, sbl_arr)
cur.execute("""select 1 from fasbl where reporting_date = ? """, (asofdate.to_string('%Y-%m-%d'),) )
today_result = cur.fetchone()
if today_result is not None:
return []
# context = acm.GetDefaultContext()
# sheet_type = 'FTradeSheet'
# calc_space = acm.Calculations().CreateCalculationSpace(context, sheet_type)
#create CalculationSpace (Virtual Trading Manager)
# calc_space.SimulateValue
# tf = ael.TradeFilter[tf_id]
# nacmTf = acm.FTradeSelection[tf.fltid]
# top_node = calc_space.InsertItem(nacmTf)
# calc_space.Refresh()
# tf_iter = calc_space.RowTreeIterator().FirstChild()
# #while tf_iter:
# row = tf_iter.Tree().Item()
#query = qry + """ and t.time >= %s """ % ("Yesterday")
query = qry
if asofdate != "":
query = qry + """ and t.time >= \'%s\' """ % asofdate.to_string('%Y-%m-%d')
print query
rs = ael.asql(query)
columns, buf = rs
for table in buf:
for row in table:
#tid = trd_row.StringKey()
tid = row[0]
print "tid " + str(tid)
acm_trd = acm.FTrade[tid]
acm_ins = acm_trd.Instrument()
acm_pays = acm_trd.Payments()
fee = 0
for acm_pay in acm_pays:
fee = fee + acm_pay.Amount()
ins = acm_ins.Name()
und = acm_ins.Underlying()
und = ins if und is None else und.Name()
ent = "" if acm_trd.Acquirer() is None else acm_trd.Acquirer().Name()
prf = "" if acm_trd.Portfolio() is None else acm_trd.Portfolio().Name()
cnt_pty = "" if acm_trd.Counterparty() is None else acm_trd.Counterparty().Name()
ccy = acm_trd.Currency().Name()
#otc = "OTC" if acmIns.Otc() else "Listed"
#print ins
price = acm_trd.Price()
qty = acm_trd.Quantity()
prem = acm_trd.Premium()
pos = 'L' if qty > 0 else 'S'
ins_typ = ""
try:
if acm_ins.ProductTypeChlItem() == None:
ins_typ = acm_ins.ValuationGrpChlItem().Name()
else:
ins_typ = acm_ins.ProductTypeChlItem().Name()
except:
ins_typ = ''
cp = ('C' if acm_ins.IsCallOption() else 'P') if ("Option" in ins_typ) or ("Warrant" in ins_typ) else ''
str_price = acm_ins.StrikePrice() if ("Option" in ins_typ) or ("Warrant" in ins_typ) else 0
add_info = acm_trd.AdditionalInfo()
ss_bb = "SS" if add_info.Short_Sell() is not None else "BB" if add_info.Buy_Back() is not None else ""
acc = add_info.MSS_Account()
trd_ael_dt = ael.date(acm_trd.TradeTime()[0:10])
trd_dt = trd_ael_dt.to_string('%d').lstrip('0') + '/' + trd_ael_dt.to_string('%m').lstrip('0') + '/' + trd_ael_dt.to_string('%Y') + acm_trd.TradeTime()[10:]
val_ael_dt = ael.date(acm_trd.ValueDay()[0:10])
val_dt = val_ael_dt.to_string('%d').lstrip('0') + '/' + val_ael_dt.to_string('%m').lstrip('0') + '/' + val_ael_dt.to_string('%Y') + acm_trd.ValueDay()[10:]
lend_to_sbl = "y" if (add_info.Trd_SBL() or add_info.Trd_SBL() == "Yes") else ""
trd_row = [tid, ent, prf, cnt_pty, ins_typ, cp, ccy, pos, trd_dt, ins, und, price, qty, prem, abs(fee), ss_bb, acc, val_dt, lend_to_sbl]
trd_array.append(trd_row)
#tf_iter = tf_iter.NextSibling()
# calc_space.Clear()
if fileName_sbl != "":
export_to_file(fileName_sbl, "", trd_array, "ab", csv.QUOTE_NONE)
return trd_array
def ins_qty_and_avgprice_no_pnl(cur, strSql, asofdate, dict, ccy = ""):
print "Generating Instrument Positions:"
header_ins = 'entity,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,call_put,currency,position,reporting_date,quantity,average_price,market_price,market_value,hkats_code,total_issued,notional,avg_price_exec,qty_exec,fair_price,fair_value'
filenameAcc = dict['filename_acc']
fileNameIns = ""
if "fileNameIns" in dict:
fileNameIns = dict['fileNameIns']
fileNameIns = fileNameIns.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
pos_array = []
header_acc, acc_array = csv_to_arr(filenameAcc)
cur.execute("CREATE TABLE acc (" + header_acc + ");")
cur.executemany("INSERT INTO acc VALUES (?,?,?,?,?,?,?)", acc_array)
pos_array = []
clspricemkt = dict["clspricemkt"]
histclspricemkt = dict["histclspricemkt"]
otc_clspricemkt = dict["otc_clspricemkt"]
otc_histclspricemkt = dict["otc_histclspricemkt"]
fx_clspricemkt = dict["fx_clspricemkt"]
fx_histclspricemkt = dict["fx_histclspricemkt"]
print strSql
rs = ael.asql(strSql)
columns, buf = rs
for table in buf:
for row in table:
insid = row[0]
localcode = row[1]
insMic = row[2]
entity = row[3]
prfid = row[4]
pos = int(round(row[5], 0))
avg_price = round(row[6], 2)
avg_price_exec = round(row[7], 2)
qty_exec = int(round(row[8], 0))
nTrade = 0
print entity + ' - ' + prfid + ' - ' + insid
acm_ins = acm.FInstrument[insid]
ael_ins = ael.Instrument[insid]
und_ins = acm_ins.Underlying()
try:
if acm_ins.ProductTypeChlItem() == None:
ins_typ = acm_ins.ValuationGrpChlItem().Name()
else:
ins_typ = acm_ins.ProductTypeChlItem().Name()
except:
ins_typ = ''
if ins_typ == "OTC Equity Option":
try:
if asofdate == ael.date_today():
mktp = ael_ins.used_price(ael.date_today(), ael_ins.curr.insid, 'Last', 0, otc_clspricemkt)
fairp = ael_ins.used_price(ael.date_today(), ael_ins.curr.insid, 'Last', 0, otc_clspricemkt)
else:
mktp = ael_ins.used_price(asofdate, ael_ins.curr.insid, 'Close', 0, otc_histclspricemkt)
fairp = ael_ins.used_price(asofdate, ael_ins.curr.insid, 'Close', 0, otc_histclspricemkt)
except:
mktp = float(0)
fairp = float(0)
finally:
mktp = float(mktp)
fairp = float(fairp)
else:
try:
if (ins_typ == "FX"):
if asofdate == ael.date_today():
mktp = ael_ins.used_price(ael.date_today(), ael_ins.curr.insid, 'Last', 0, fx_clspricemkt)
fairp = ael_ins.used_price(ael.date_today(), ael_ins.curr.insid, 'Last', 0, fx_clspricemkt)
else:
mktp = ael_ins.used_price(asofdate, ael_ins.curr.insid, 'Close', 0, fx_histclspricemkt)
fairp = ael_ins.used_price(asofdate, ael_ins.curr.insid, 'Close', 0, fx_histclspricemkt)
else:
if asofdate == ael.date_today():
mktp = ael_ins.used_price(ael.date_today(), ael_ins.curr.insid, 'Last', 0, clspricemkt)
if (ins_typ == "Equity") or (ins_typ == "ETF"):
fairp = ael_ins.used_price(ael.date_today(), ael_ins.curr.insid, 'Last', 0, clspricemkt)
else:
fairp = ael_ins.used_price(ael.date_today(), ael_ins.curr.insid, 'Last', 0, otc_clspricemkt)
else:
mktp = ael_ins.used_price(asofdate, ael_ins.curr.insid, 'Close', 0, histclspricemkt)
if (ins_typ == "Equity") or (ins_typ == "ETF"):
fairp = ael_ins.used_price(asofdate, ael_ins.curr.insid, 'Close', 0, histclspricemkt)
else:
fairp = ael_ins.used_price(asofdate, ael_ins.curr.insid, 'Close', 0, otc_histclspricemkt)
except:
mktp = float(0)
fairp = float(0)
finally:
mktp = round(mktp, 4)
fairp = float(fairp)
str_price = acm_ins.StrikePrice() if ("Option" in ins_typ) or ("Warrant" in ins_typ) or ("FX" in ins_typ) else 0
prf_ccy = acm_ins.Currency().Name()
try:
if (ins_typ == "Equity") or (ins_typ == "ETF"):
und_price = mktp
else:
if (ins_typ == "FX"):
str_ccy = acm_ins.StrikeCurrency().Name()
und_price = getFx(asofdate, prf_ccy, str_ccy, fx_clspricemkt, fx_histclspricemkt)
else:
if asofdate == ael.date_today():
und_price = ael_ins.und_insaddr.used_price(ael.date_today(), ael_ins.und_insaddr.curr.insid, 'Last', 0, clspricemkt)
else:
und_price = ael_ins.und_insaddr.used_price(asofdate, ael_ins.und_insaddr.curr.insid, 'Close', 0, histclspricemkt)
except:
und_price = float(0)
if (ins_typ == "FX"):
prf_ccy = str_ccy
und_price = round(und_price, 4)
try:
und_ins_name = insid if (ins_typ == "Equity") or (ins_typ == "ETF") else und_ins.Name()
except:
und_ins_name = insid
conv_fac = acm_ins.ContractSize()
expiry = '' if (ins_typ == "Equity") or (ins_typ == "ETF") else acm_ins.ExpiryDate()
cp = ('C' if acm_ins.IsCallOption() else 'P') if ("Option" in ins_typ) or ("Warrant" in ins_typ) else ''
bs = 'L' if pos >= 0 else 'S'
mktv = mktp*pos*conv_fac if ("Option" in ins_typ) or ("Future" in ins_typ) else mktp*pos
fairv = fairp*pos*conv_fac if ("Option" in ins_typ) or ("Future" in ins_typ) else fairp*pos
delisted_date = ""
divd = 0
horizon_prod_id = localcode + '@' + insMic
if horizon_prod_id == '@':
horizon_prod_id = insid
total_issued = ael_ins.total_issued if ins_typ == "Listed Warrant" else ""
notional_amt = 0
if ins_typ != "Equity" and ins_typ != "ETF":
if "Future" not in ins_typ:
notional_amt = dec(pos)*dec(und_price)*dec(conv_fac) if dec(pos) >= 0 else dec(pos)*dec(str_price)*dec(conv_fac)
else:
notional_amt = dec(pos)*dec(und_price)*dec(conv_fac)
pos_row = [entity, insid, und_ins_name, dec(und_price), dec(str_price), conv_fac, expiry, prfid, ins_typ, cp
, prf_ccy, bs, asofdate.to_string('%Y-%m-%d'), num(pos)
, dec(avg_price), dec(mktp), dec(mktv)
, horizon_prod_id, total_issued, notional_amt, avg_price_exec, qty_exec, fairp, fairv
]
pos_array.append(pos_row)
pos_array.sort(sortArray)
cur.execute("CREATE TABLE ins (" + header_ins + ");")
cur.executemany("INSERT INTO ins VALUES ("+question_marks(header_ins)+")", pos_array)
if fileNameIns != "":
export_to_file(fileNameIns, header_ins, pos_array)
return pos_array
def ins_qty_and_avgprice(cur, tf_id, asofdate, dict, ccy = ""):
print "Generating Instrument Positions:"
header_ins = 'entity,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,call_put,currency,position,reporting_date,quantity,average_price,market_price,market_value,new_trade,upld,uplm,uply,rpld,rplm,rply,tpld,tplm,tply,dividends,delisted_date,hkats_code,total_issued,notional'
filenameAcc = dict['filename_acc']
gen_pnl = "N"
if "gen_pnl" in dict:
gen_pnl = dict['gen_pnl']
fileNameIns = ""
if "fileNameIns" in dict:
fileNameIns = dict['fileNameIns']
fileNameIns = fileNameIns.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
pos_array = []
header_acc, acc_array = csv_to_arr(filenameAcc)
cur.execute("CREATE TABLE acc (" + header_acc + ");")
cur.executemany("INSERT INTO acc VALUES (?,?,?,?,?,?,?)", acc_array)
context = acm.GetDefaultContext()
sheet_type = 'FPortfolioSheet'
#create CalculationSpace (Virtual Trading Manager)
calc_space = acm.Calculations().CreateCalculationSpace(context, sheet_type)
#simulate sheet settings
if ccy == "":
calc_space.SimulateGlobalValue( 'Position Currency Choice', 'Instrument Curr')
calc_space.SimulateGlobalValue( 'Aggregate Currency Choice', 'Portfolio Curr')
else:
calc_space.SimulateGlobalValue( 'Position Currency Choice', 'Fixed Curr')
calc_space.SimulateGlobalValue( 'Aggregate Currency Choice', 'Fixed Curr')
calc_space.SimulateGlobalValue( 'Fixed Currency', 'HKD')
calc_space.SimulateGlobalValue( 'Portfolio Profit Loss Start Date', 'Inception' )
if asofdate == ael.date_today():
calc_space.SimulateGlobalValue( 'Portfolio Profit Loss End Date Custom', 'Now' )
else:
calc_space.SimulateGlobalValue( 'Portfolio Profit Loss End Date', 'Custom Date' )
calc_space.SimulateGlobalValue( 'Portfolio Profit Loss End Date Custom', asofdate.to_string('%Y-%m-%d') )
calc_space.SimulateGlobalValue( 'Valuation Date', asofdate.to_string('%Y-%m-%d') )
calc_space.SimulateValue
tf = ael.TradeFilter[tf_id]
#pfObj = acm.FPortfolio['EDD']
#print pfObj
#add item to portfolio sheet
nacmTf = acm.FTradeSelection[tf.fltid]
top_node = calc_space.InsertItem(nacmTf)
groupers = [acm.Risk().GetGrouperFromName('Trade Portfolio'), acm.Risk().GetGrouperFromName('Trade Acquirer')]
chained_grouper = acm.FChainedGrouper(groupers)
top_node.ApplyGrouper(chained_grouper)
calc_space.Refresh()
tf_iter = calc_space.RowTreeIterator().FirstChild()
pf_iter = tf_iter.FirstChild()
while pf_iter:
row = pf_iter.Tree().Item()
prfid = str(row).replace("'", "")
print 'prfid', prfid
#acmInsCal = acm.FInstrumentCalculations()
#acmInsCal.RealizedProfitLoss(calc_space, prfid, 'Inception', asofdate)
for ent_row in row.Children():
print ent_row
for ins_row in ent_row.Children():
insid = ins_row.StringKey()
horizon_prod_id = ''
acm_ins = acm.FInstrument[insid]
ael_ins = ael.Instrument[insid]
print insid
#cs = acm.Calculations().CreateStandardCalculationsSpaceCollection()
ins_calcs = acm_ins.Calculation()
#print acm_ins
localcode = ''
if acm_ins.AdditionalInfo().Local_Exchange_Code() != None:
if acm_ins.AdditionalInfo().Local_Exchange_Code().strip() != '':
localcode = acm_ins.AdditionalInfo().Local_Exchange_Code().strip()
insMic = ''
if acm_ins.AdditionalInfo().MIC() != None:
if acm_ins.AdditionalInfo().MIC().strip() != '':
insMic = acm_ins.AdditionalInfo().MIC().strip()
horizon_prod_id = localcode + '@' + insMic
#print 'insid', insid
prf_ccy = calc_space.CreateCalculation(ins_row, 'Portfolio Currency').FormattedValue()
ins_typ = calc_space.CreateCalculation(ins_row, 'Valuation Group').FormattedValue()
und_ins = calc_space.CalculateValue(ins_row, 'Underlying Instrument')
und_ins = insid if str(und_ins) == "" else und_ins
str_price = calc_space.CreateCalculation(ins_row, 'Strike Price').FormattedValue().replace(',','')
if (ins_typ == "Equity") or (ins_typ == "ETF"):
conv_fac = 0
expiry = ""
else:
expiry = calc_space.CreateCalculation(ins_row, 'Expiry').FormattedValue()
conv_fac = acm_ins.ContractSize()
pos = int(round(calc_space.CalculateValue(ins_row, 'Portfolio Position')))
cp = calc_space.CalculateValue(ins_row, 'Call or Put')
cp = cp[0] if len(cp) > 0 else ''
avg_price = calc_space.CreateCalculation(ins_row, 'Portfolio Average Price').FormattedValue().replace(',','')
mktv = calc_space.CreateCalculation(ins_row, 'Portfolio Market Value').FormattedValue().replace(',','')
mktp = calc_space.CreateCalculation(ins_row, 'Portfolio Profit Loss Price End Date').FormattedValue().replace(',','')
und_price = mktp if (ins_typ == "Equity") or (ins_typ == "ETF") else calc_space.CreateCalculation(ins_row, 'Portfolio Underlying Price').FormattedValue().replace(',','')
nTrade = calc_space.CreateCalculation(ins_row, 'Portfolio Theoretical Profit And Loss Trade Attribution').FormattedValue().replace(',','')
upld = 0
uplm = 0
uply = 0
rpld = 0
rplm = 0
rply = 0
tpld = 0
tplm = 0
tply = 0
if gen_pnl == 'Y':
upld = calc_space.CreateCalculation(ins_row, 'Portfolio Unrealized Profit and Loss Daily').FormattedValue().replace(',','')
uplm = calc_space.CreateCalculation(ins_row, 'Portfolio Unrealized Profit and Loss Monthly').FormattedValue().replace(',','')
uply = calc_space.CreateCalculation(ins_row, 'Portfolio Unrealized Profit and Loss Yearly').FormattedValue().replace(',','')
rpld = calc_space.CreateCalculation(ins_row, 'Portfolio Realized Profit and Loss Daily').FormattedValue().replace(',','')
rplm = calc_space.CreateCalculation(ins_row, 'Portfolio Realized Profit and Loss Monthly').FormattedValue().replace(',','')
rply = calc_space.CreateCalculation(ins_row, 'Portfolio Realized Profit and Loss Yearly').FormattedValue().replace(',','')
tpld = calc_space.CreateCalculation(ins_row, 'Portfolio Total Profit and Loss Daily').FormattedValue().replace(',','')
tplm = calc_space.CreateCalculation(ins_row, 'Portfolio Total Profit and Loss Monthly').FormattedValue().replace(',','')
tply = calc_space.CreateCalculation(ins_row, 'Portfolio Total Profit and Loss Yearly').FormattedValue().replace(',','')
divd = calc_space.CreateCalculation(ins_row, 'Portfolio Dividends').FormattedValue().replace(',','')
delisted_date = ""
total_issued = ael_ins.total_issued if ins_typ == "Listed Warrant" else ""
notional_amt = 0
if ins_typ != "Equity" and ins_typ != "ETF":
if "Future" not in ins_typ:
notional_amt = dec(pos)*dec(und_price)*dec(conv_fac) if dec(pos) >= 0 else dec(pos)*dec(str_price)*dec(conv_fac)
else:
notional_amt = dec(pos)*dec(und_price)*dec(conv_fac)
entity = str(ent_row).replace("'","")
#pos_row = [prfid, insid, str(avg_price), str(pos), bs, horizon_context]
if float(pos) >= 0:
bs = 'L'
else:
bs = 'S'
if horizon_prod_id == '@':
horizon_prod_id = insid
#header_ins = 'instrument,underlying,portfolio,instrument_type,currency,call_put,position,reporting_date,quantity,average_price,market_value,new_trade,upld,uplm,uply,rpld,rplm,rply,tpld,tplm,tply,dividends'
#pos_row = [prfid, horizon_prod_id, str(avg_price), str(pos), bs, horizon_context]
pos_row = [entity, insid, und_ins, dec(und_price), dec(str_price), conv_fac, expiry, prfid, ins_typ, cp
, prf_ccy, bs, asofdate.to_string('%Y-%m-%d'), num(pos)
, dec(avg_price), dec(mktp), dec(mktv), dec(nTrade)
, dec(upld), dec(uplm), dec(uply)
, dec(rpld), dec(rplm), dec(rply)
, dec(tpld), dec(tplm), dec(tply)
, dec(divd), delisted_date, horizon_prod_id, total_issued, notional_amt
]
pos_array.append(pos_row)
pf_iter = pf_iter.NextSibling()
#print 'pf_iter', pf_iter
calc_space.Clear()
pos_array.sort(sortArray)
cur.execute("CREATE TABLE ins (" + header_ins + ");")
cur.executemany("INSERT INTO ins VALUES ("+question_marks(header_ins)+")", pos_array)
if fileNameIns != "":
export_to_file(fileNameIns, header_ins, pos_array)
return pos_array
def acc_journal(cur, asofdate, asofdatetp1, dict, ccy = ""):
header_jrn = 'date,journal_type,account,account_code,currency,debt,credit,analysis_code,description,company_code'
header_exc = 'date,type,id,entity,portfolio,counterparty,prod_type,currency'
header_bal = 'jorunal_type,debt,credit'
fileNameJrn = dict['filename_jrn']
fileNameJrn = fileNameJrn.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
fileNameExc = dict['filename_exc']
fileNameExc = fileNameExc.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
fileNameBal = dict['filename_bal']
fileNameBal = fileNameBal.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
cur.execute("alter table trd add column booked int default 0")
cur.execute("alter table ins add column booked int default 0")
jrn_array = []
print "Account Journal Generation"
#print asofdatetp1.strftime('%Y-%m-%d')
#python_db_debug(cur)
# Account Journal Generation
cur.execute("""select * from acc""")
prd_rows = cur.fetchall()
for prd_row in prd_rows:
#print prd_row
p_entity = prd_row[0]
p_bus_line = prd_row[1]
p_ins_type = prd_row[2]
p_otc = prd_row[3]
p_ae = prd_row[4]
p_acc_no = prd_row[5]
p_prf = prd_row[6]
str_sql = """select ins.instrument, trd.entity, ins.portfolio, ins.instrument_type, ins.currency, sum(trd.quantity), sum(trd.premium), ins.uply, ins.rply, ins.underlying_price, ins.strike_price, ins.quantity, ins.call_put, ins.conversion_factor, sum(trd.fee), '%s'
from ins join trd
on trd.portfolio = ins.portfolio
and trd.instrument_type = ins.instrument_type
and trd.instrument = ins.instrument
and trd.call_put = ins.call_put
and trd.currency = ins.currency
where trd.entity = '%s' and trd.portfolio = '%s' and trd.instrument_type = '%s'
group by trd.instrument
""" % (p_acc_no, p_entity, p_prf, p_ins_type)
cur.execute(str_sql)
trd_pos_rows = cur.fetchall()
trade_account_posting(jrn_array, trd_pos_rows, asofdate)
cur.execute( """update trd set booked = 1
where trd.entity = '%s' and trd.portfolio = '%s'
and trd.instrument_type = '%s'""" % (p_entity, p_prf, p_ins_type))
str_sql = """select ins.instrument, '%s', ins.portfolio, ins.instrument_type, ins.currency, ins.quantity, ins.average_price*ins.quantity, ins.uply, ins.rply, ins.underlying_price, ins.strike_price, ins.quantity, ins.call_put, ins.conversion_factor, 0, '%s'
from ins
where ins.portfolio = '%s' and ins.instrument_type = '%s'
""" % (p_entity, p_acc_no, p_prf, p_ins_type)
cur.execute(str_sql)
trd_pos_rows = cur.fetchall()
pos_account_posting(jrn_array, trd_pos_rows, asofdate, asofdatetp1)
cur.execute( """update ins set booked = 1
where ins.portfolio = '%s' and ins.instrument_type = '%s'""" % (p_prf, p_ins_type))
if p_ins_type != "Equity" and p_ins_type != "ETF":
expo_account_posting(jrn_array, trd_pos_rows, asofdate, asofdatetp1)
# Exceptions not yet output to Account Journal
cur.execute("""select '%s','Trade',trade_id,entity,portfolio,counterparty,instrument_type,currency from trd where booked = 0
union select '%s','Instrument',instrument,'',portfolio,'',instrument_type,currency from ins where booked = 0 or (quantity <> 0 and market_price = 0 and expiry > '%s' and instrument_type <> 'OTC Index Option' and instrument_type <> 'OTC Equity Option' ) """
%(asofdate.to_string('%Y-%m-%d'), asofdate.to_string('%Y-%m-%d'), asofdate.to_string('%Y-%m-%d')))
exc_array = cur.fetchall()
header_jrn = 'date,journal_type,account,account_code,currency,debt,credit,analysis_code,description,company_code'
cur.execute("CREATE TABLE jrn (" + header_jrn + ");")
cur.executemany("INSERT INTO jrn VALUES (?,?,?,?,?,?,?,?,?,?)", jrn_array)
cur.execute("""select journal_type, sum(debt), sum(credit) from jrn group by journal_type
union select 'Account Sum', sum(debt), sum(credit) from jrn order by journal_type desc""")
bal_array = cur.fetchall()
export_to_file(fileNameJrn, header_jrn, jrn_array)
export_to_file(fileNameExc, header_exc, exc_array)
export_to_file(fileNameBal, header_bal, bal_array)
return jrn_array, exc_array, bal_array
def trade_account_posting(jrn_array, trd_pos_rows, asofdate):
for trd_pos_row in trd_pos_rows:
accs = []
debt_credit = []
t_ins = trd_pos_row[0]
t_ent = trd_pos_row[1]
t_prf = trd_pos_row[2]
t_ins_typ = trd_pos_row[3]
t_ccy = trd_pos_row[4]
t_qty = num(trd_pos_row[5])
t_prem = num(trd_pos_row[6])
i_upl = num(trd_pos_row[7])
i_rpl = num(trd_pos_row[8])
i_und_price = dec(trd_pos_row[9])
i_str_price = dec(trd_pos_row[10])
i_qty = num(trd_pos_row[11])
i_cp = trd_pos_row[12]
i_cf = num(trd_pos_row[13])
t_fee = dec(trd_pos_row[14])
i_acc_no = trd_pos_row[15]
i_qty_past = i_qty - t_qty
t_avg_price = 0 if t_qty == 0 else t_prem / t_qty
acc_code = ""
i_desp = ("Buy " if t_qty >= 0 else "Sell ") + str(t_qty) + "#" + str(t_ins) + "@" + str(t_avg_price)
if i_qty_past * t_qty < 0 and abs(i_qty_past) < abs(t_qty): # Short Sell or Short Cover
accs.append("A/C Payables" if t_qty >= 0 else "A/C Receivables")
accs.append("Fees")
accs.append("Inv Long" if i_qty_past >= 0 else "Inv Short" )
accs.append("A/C Payables" if t_qty >= 0 or t_fee > t_prem else "A/C Receivables")
accs.append("Fees")
accs.append("Inv Short" if i_qty_past >= 0 else "Inv Long")
gross_amt = abs(i_qty_past * t_avg_price)
net_amt = gross_amt
debt_credit.append(("0," + str(abs(net_amt))) if t_qty >= 0 else (str(abs(net_amt)) + ",0"))
debt_credit.append(str(0) + ",0")
debt_credit.append((str(abs(gross_amt)) + ",0") if t_qty >= 0 else ("0," + str(abs(gross_amt))))
gross_amt = abs(i_qty * t_avg_price)
if t_qty >= 0:
net_amt = abs(gross_amt) + t_fee
else:
net_amt = abs(gross_amt) - t_fee
debt_credit.append(("0," + str(abs(net_amt))) if t_qty >= 0 or t_fee > t_prem else (str(abs(net_amt)) + ",0"))
debt_credit.append(str(t_fee) + ",0")
debt_credit.append((str(abs(gross_amt)) + ",0") if t_qty >= 0 else ("0," + str(abs(gross_amt))))
else:
accs.append("A/C Payables" if t_qty >= 0 or t_fee > t_prem else "A/C Receivables")
accs.append("Fees")
accs.append("Inv Long" if i_qty_past >= 0 else "Inv Short" )
gross_amt = t_prem
if t_qty >= 0:
net_amt = abs(t_prem) + t_fee
else:
net_amt = abs(t_prem) - t_fee
debt_credit.append(("0," + str(abs(net_amt))) if t_qty >= 0 or t_fee > t_prem else (str(abs(net_amt)) + ",0"))
debt_credit.append(str(t_fee) + ",0")
debt_credit.append((str(abs(gross_amt)) + ",0") if t_qty >= 0 else ("0," + str(abs(gross_amt))))
for i in range(0, len(accs)):
dc_arr = str(debt_credit[i]).split(',')
jrn_array.append([asofdate.to_string('%Y-%m-%d'), "T", t_prf + "_" + t_ins_typ + " " + accs[i], acc_code, t_ccy, dc_arr[0], dc_arr[1], i_acc_no, i_desp, t_ent])
return
def pos_account_posting(jrn_array, trd_pos_rows, asofdate, asofdatetp1):
for trd_pos_row in trd_pos_rows:
accs = []
debt_credit = []
ur_profit = []
accdt = []
t_ins = trd_pos_row[0]
t_ent = trd_pos_row[1]
t_prf = trd_pos_row[2]
t_ins_typ = trd_pos_row[3]
t_ccy = trd_pos_row[4]
t_qty = num(trd_pos_row[5])
t_prem = num(trd_pos_row[6])
i_upl = num(trd_pos_row[7])
i_rpl = num(trd_pos_row[8])
i_und_price = dec(trd_pos_row[9])
i_str_price = dec(trd_pos_row[10])
i_qty = num(trd_pos_row[11])
i_cp = trd_pos_row[12]
i_cf = num(trd_pos_row[13])
t_fee = dec(trd_pos_row[14])
i_acc_no = trd_pos_row[15]
t_avg_price = 0 if t_qty == 0 else t_prem / t_qty
acc_code = ""
i_desp = str(t_qty) + "#" + str(t_ins) + "@" + str(t_avg_price)
accdt.append(asofdate.to_string('%Y-%m-%d'))
accdt.append(asofdate.to_string('%Y-%m-%d'))
ur_profit.append("U")
ur_profit.append("U")
accs.append("Unrealized P/L")
accs.append("Inv Long MTM" if i_qty >= 0 else "Inv Short MTM")
debt_credit.append(("0," + str(abs(i_upl))) if i_upl >= 0 else (str(abs(i_upl)) + ",0"))
debt_credit.append((str(abs(i_upl)) + ",0") if i_upl >= 0 else ("0," + str(abs(i_upl))))
accdt.append(asofdate.to_string('%Y-%m-%d'))
accdt.append(asofdate.to_string('%Y-%m-%d'))
ur_profit.append("R")
ur_profit.append("R")
accs.append("Realized P/L")
if i_qty > 0:
accs.append("Inv Long")
if i_qty < 0:
accs.append("Inv Short")
if i_qty == 0:
trd_id = get_last_trade_of_instr(t_ins, t_prf)
acm_trd = acm.FTrade[trd_id]
if acm_trd.Quantity() > 0:
accs.append("Inv Short")
else:
accs.append("Inv Long")
debt_credit.append(("0," + str(abs(i_rpl))) if i_rpl >= 0 else (str(abs(i_rpl)) + ",0"))
debt_credit.append((str(abs(i_rpl)) + ",0") if i_rpl >= 0 else ("0," + str(abs(i_rpl))))
# T+1
accdt.append(asofdatetp1.to_string('%Y-%m-%d'))
accdt.append(asofdatetp1.to_string('%Y-%m-%d'))
ur_profit.append("U Rev")
ur_profit.append("U Rev")
accs.append("Unrealized P/L T+1")
accs.append("Inv Long MTM T+1" if i_qty >= 0 else "Inv Short MTM T+1")
debt_credit.append((str(abs(i_upl)) + ",0") if i_upl >= 0 else ("0," + str(abs(i_upl))))
debt_credit.append(("0," + str(abs(i_upl))) if i_upl >= 0 else (str(abs(i_upl)) + ",0"))
accdt.append(asofdatetp1.to_string('%Y-%m-%d'))
accdt.append(asofdatetp1.to_string('%Y-%m-%d'))
ur_profit.append("R Rev")
ur_profit.append("R Rev")
accs.append("Realized P/L T+1")
if i_qty > 0:
accs.append("Inv Long T+1")
if i_qty < 0:
accs.append("Inv Short T+1")
if i_qty == 0:
trd_id = get_last_trade_of_instr(t_ins, t_prf)
acm_trd = acm.FTrade[trd_id]
if acm_trd.Quantity() > 0:
accs.append("Inv Short T+1")
else:
accs.append("Inv Long T+1")
debt_credit.append((str(abs(i_rpl)) + ",0") if i_rpl >= 0 else ("0," + str(abs(i_rpl))))
debt_credit.append(("0," + str(abs(i_rpl))) if i_rpl >= 0 else (str(abs(i_rpl)) + ",0"))
for i in range(0, len(accs)):
dc_arr = str(debt_credit[i]).split(',')
jrn_array.append([accdt[i], ur_profit[i], t_prf + "_" + t_ins_typ + " " + accs[i], acc_code, t_ccy, dc_arr[0], dc_arr[1], i_acc_no, i_desp, t_ent])
return
def expo_account_posting(jrn_array, trd_pos_rows, asofdate, asofdatetp1):
for trd_pos_row in trd_pos_rows:
accs = []
debt_credit = []
accdt = []
obs = []
t_ins = trd_pos_row[0]
t_ent = trd_pos_row[1]
t_prf = trd_pos_row[2]
t_ins_typ = trd_pos_row[3]
t_ccy = trd_pos_row[4]
t_qty = num(trd_pos_row[5])
t_prem = num(trd_pos_row[6])
i_upl = num(trd_pos_row[7])
i_rpl = num(trd_pos_row[8])
i_und_price = dec(trd_pos_row[9])
i_str_price = dec(trd_pos_row[10])
i_qty = num(trd_pos_row[11])
i_cp = trd_pos_row[12]
i_cf = num(trd_pos_row[13])
t_fee = dec(trd_pos_row[14])
i_acc_no = trd_pos_row[15]
t_avg_price = 0 if t_qty == 0 else t_prem / t_qty
acc_code = ""
i_desp = str(t_qty) + "#" + str(t_ins) + "@" + str(t_avg_price)
if i_qty >= 0:
obs.append("OBS")
obs.append("OBS")
accdt.append(asofdate.to_string('%Y-%m-%d'))
accdt.append(asofdate.to_string('%Y-%m-%d'))
amt = abs(i_qty*i_und_price*i_cf)
accs.append("Long WRT")
accs.append("Offset A/C")
debt_credit.append(str(amt) + ",0")
debt_credit.append("0," + str(amt))
else:
obs.append("OBS")
obs.append("OBS")
accdt.append(asofdate.to_string('%Y-%m-%d'))
accdt.append(asofdate.to_string('%Y-%m-%d'))
amt = abs(i_qty*i_str_price*i_cf)
accs.append("Short WRT")
accs.append("Offset A/C")
debt_credit.append("0," + str(amt))
debt_credit.append(str(amt) + ",0")
# T+1
if i_qty >= 0:
obs.append("OBS Rev")
obs.append("OBS Rev")
accdt.append(asofdatetp1.to_string('%Y-%m-%d'))
accdt.append(asofdatetp1.to_string('%Y-%m-%d'))
amt = abs(i_qty*i_und_price*i_cf)
accs.append("Long WRT T+1")
accs.append("Offset A/C T+1")
debt_credit.append("0," + str(amt))
debt_credit.append(str(amt) + ",0")
else:
obs.append("OBS Rev")
obs.append("OBS Rev")
accdt.append(asofdatetp1.to_string('%Y-%m-%d'))
accdt.append(asofdatetp1.to_string('%Y-%m-%d'))
amt = abs(i_qty*i_str_price*i_cf)
accs.append("Short WRT T+1")
accs.append("Offset A/C T+1")
debt_credit.append(str(amt) + ",0")
debt_credit.append("0," + str(amt))
for i in range(0, len(accs)):
dc_arr = str(debt_credit[i]).split(',')
jrn_array.append([accdt[i], obs[i], t_prf + "_" + t_ins_typ + " " + accs[i], acc_code, t_ccy, dc_arr[0], dc_arr[1], i_acc_no, i_desp, t_ent])
return
# FRR rules for listed warrants and CBBC covered by equity and ETF
def wrt_eqt_covered_frr(frr_array, cur, e_ent, e_und):
# Look for instruments group by underlying and entity
# cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
# call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued
# from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
# and (instrument_type = 'Equity' or instrument_type = 'ETF')
# order by instrument_type """ % (e_ent, e_und))
# hedging_rows = cur.fetchall()
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued,fair_price,fair_value
from ins where quantity <> 0 and entity = '%s' and underlying = '%s' and ins.ph_qty <> quantity * conversion_factor
and (instrument_type = 'Listed Warrant' )
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
# for hedging_row in hedging_rows:
for princip_row in princip_rows:
#print princip_row
#print hedging_row
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
p_fair_price = dec(princip_row[19])
p_fair_value = dec(princip_row[20])
# h_id = hedging_row[0]
# h_ins = hedging_row[1]
# h_und = hedging_row[2]
# h_und_price = dec(hedging_row[3])
# h_str_price = dec(hedging_row[4])
# h_conv_factor = dec(hedging_row[5])
# h_expiry = hedging_row[6]
# h_prf = hedging_row[7]
# h_ins_typ = hedging_row[8]
# h_cp = hedging_row[9]
# h_ccy = hedging_row[10]
# h_pos = hedging_row[11]
# h_qty = dec(hedging_row[12])
# h_avg_price = dec(hedging_row[13])
# h_mkt_price = dec(hedging_row[14])
# h_mkt_value = dec(hedging_row[15])
# h_ph_qty = dec(hedging_row[16])
# h_hc = dec(hedging_row[17])
# h_total_issued = hedging_row[18]
# Only long side equity
if not (p_pos == "S" and p_cp == "C" ):
continue
p_uncovered_qty = (p_ph_qty / p_conv_factor) if p_conv_factor > 0 else 0
p_covered_qty = p_qty - p_uncovered_qty
# equity is opposite sign of warrant
instr_value = -p_covered_qty * p_conv_factor * p_und_price
eqt_hc_amt = instr_value * p_hc
str_prem = -p_covered_qty * p_conv_factor* p_str_price
s27_applied_mv_1 = instr_value - eqt_hc_amt
s27_applied_mv_2 = str_prem
wrt_mv = abs(p_fair_price * p_qty)
hc_amt = -p_uncovered_qty * p_conv_factor * p_und_price * p_hc
if (p_cp == "C" and p_und_price < p_str_price) or (p_cp == "P" and p_und_price > p_str_price) :
otm_wrt_amt = p_uncovered_qty * (p_str_price - p_und_price) * p_conv_factor
else:
otm_wrt_amt = 0
s43_applied_mv_1 = hc_amt
s43_applied_mv_2 = abs(otm_wrt_amt)
s27_6_amt1 = s27_applied_mv_1 if s27_applied_mv_1 < s27_applied_mv_2 else s27_applied_mv_2
s43_amt2 = 0 if s43_applied_mv_2 == 0 or s43_applied_mv_1 < s43_applied_mv_2 else (s43_applied_mv_1 - s43_applied_mv_2)
s43_amt1 = wrt_mv
s43_amt3 = 0
frr_array.append([e_ent, p_prf, p_ins, p_und, p_und, p_ins_typ, p_pos, "Equity", "L", p_qty, -p_covered_qty*p_conv_factor, p_covered_qty, p_uncovered_qty
, p_avg_price, p_fair_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_fair_value if p_fair_value > 0 else 0), (abs(p_fair_value) if p_fair_value < 0 else 0)
, instr_value, p_und_price, p_hc, eqt_hc_amt, s27_applied_mv_1, s27_applied_mv_2, "LA", s27_6_amt1, 0, 0, "S27(6)", "LA-11"])
frr_array.append([e_ent, p_prf, p_ins, p_und, "", p_ins_typ, p_pos, "", "", p_qty, "", p_covered_qty, p_uncovered_qty
, p_avg_price, p_fair_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_fair_value if p_fair_value > 0 else 0), (abs(p_fair_value) if p_fair_value < 0 else 0)
, instr_value, p_und_price, p_hc, hc_amt, s43_applied_mv_1, s43_applied_mv_2, "RL", s43_amt1, s43_amt2, s43_amt3, "S43(8)", "RL-31"])
cur.execute("""update ins set frr_written = 1 where rowid = %s """ % (p_id))
return
# FRR rules for listed equity option covered by equity
def opt_eqt_covered_frr(frr_array, cur, e_ent, e_und):
amt1 = 0
amt2 = 0
subsection = ""
# Look for instruments group by underlying and entity
# cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
# call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued
# from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
# and (instrument_type = 'Equity' or instrument_type = 'ETF')
# order by instrument_type """ % (e_ent, e_und))
# hedging_rows = cur.fetchall()
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued,fair_price,fair_value
from ins where quantity <> 0 and entity = '%s' and underlying = '%s' and ins.ph_qty <> quantity * conversion_factor
and instrument_type = 'Listed Equity Option'
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
# for hedging_row in hedging_rows:
for princip_row in princip_rows:
#print princip_row
#print hedging_row
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
p_fair_price = dec(princip_row[19])
p_fair_value = dec(princip_row[20])
# h_id = hedging_row[0]
# h_ins = hedging_row[1]
# h_und = hedging_row[2]
# h_und_price = dec(hedging_row[3])
# h_str_price = dec(hedging_row[4])
# h_conv_factor = dec(hedging_row[5])
# h_expiry = hedging_row[6]
# h_prf = hedging_row[7]
# h_ins_typ = hedging_row[8]
# h_cp = hedging_row[9]
# h_ccy = hedging_row[10]
# h_pos = hedging_row[11]
# h_qty = dec(hedging_row[12])
# h_avg_price = dec(hedging_row[13])
# h_mkt_price = dec(hedging_row[14])
# h_mkt_value = dec(hedging_row[15])
# h_ph_qty = dec(hedging_row[16])
# h_hc = dec(hedging_row[17])
# h_total_issued = hedging_row[18]
# Skip some entries
# if h_pos == "S":
# if not ((p_pos == "L" and p_cp == "C") or (p_pos == "S" and p_cp == "P")) :
# continue
# if h_pos == "L":
# if not ((p_pos == "S" and p_cp == "C") or (p_pos == "L" and p_cp == "P")) :
# continue
p_uncovered_qty = (p_ph_qty / p_conv_factor) if p_conv_factor > 0 else 0
p_covered_qty = p_qty - p_uncovered_qty
instr_value = p_covered_qty*p_conv_factor*p_und_price
write_frr = False
if ((p_pos == "L" and p_cp == "C") or (p_pos == "S" and p_cp == "P")):
mv = abs(p_covered_qty)*p_conv_factor*p_und_price
# if ins_percent_mv(cur, e_ent, e_und, mv):
# hc_new = 1
# mv = mv*hc_new
# else:
hc_new = p_hc
mv = mv*(1+p_hc)
applied_mv_2 = 0
if (p_pos == "L" and p_cp == "C"):
subsection = "S43(6)"
item_no = "RL-31"
str_prem = abs(p_covered_qty*p_conv_factor)*p_str_price
amt2 = mv if mv < str_prem else str_prem
applied_mv_2 = str_prem
write_frr = True
if (p_pos == "S" and p_cp == "P"):
subsection = "S43(5)"
item_no = "RL-31"
itm_prem = abs(p_covered_qty*p_conv_factor)*(p_str_price-p_und_price)
amt2 = mv if mv > itm_prem else itm_prem
applied_mv_2 = itm_prem
write_frr = True
if write_frr:
frr_array.append([e_ent, p_prf, p_ins, p_und, p_und, p_ins_typ, p_pos, "Equity", "S", p_qty, -abs(p_covered_qty*p_conv_factor), p_covered_qty, p_uncovered_qty
, p_avg_price, p_fair_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_fair_value if p_fair_value > 0 else 0), (abs(p_fair_value) if p_fair_value < 0 else 0)
, instr_value, p_und_price, hc_new, mv*hc_new, mv, applied_mv_2, "RL", 0, amt2, 0, subsection, item_no])
if ((p_pos == "S" and p_cp == "C") or (p_pos == "L" and p_cp == "P")):
eqt_mkt_value = abs(p_covered_qty) * p_conv_factor * p_und_price
eqt_hc_amt = eqt_mkt_value * p_hc
str_prem = abs(p_covered_qty) * p_conv_factor *(p_str_price)
applied_mv_1 = eqt_mkt_value - eqt_hc_amt
applied_mv_2 = str_prem
if (p_pos == "S" and p_cp == "C"):
subsection = "S27(2)"
item_no = "LA-11"
amt1 = applied_mv_1 if applied_mv_1 < applied_mv_2 else applied_mv_2
write_frr = True
if (p_pos == "L" and p_cp == "P"):
subsection = "S27(4)"
item_no = "LA-11"
amt1 = applied_mv_1 if applied_mv_1 > applied_mv_2 else applied_mv_2
write_frr = True
if write_frr:
frr_array.append([e_ent, p_prf, p_ins, p_und, p_und, p_ins_typ, p_pos, "Equity", "L", p_qty, abs(p_covered_qty*p_conv_factor), p_covered_qty, p_uncovered_qty
, p_avg_price, p_fair_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_fair_value if p_fair_value > 0 else 0), (abs(p_fair_value) if p_fair_value < 0 else 0)
, instr_value, p_und_price, p_hc, eqt_hc_amt, applied_mv_1, applied_mv_2, "LA", amt1, 0, 0, subsection, item_no])
cur.execute("""update ins set frr_written = 1 where rowid = %s """ % (p_id))
return
# FRR rules for listed equity option covered by listed equity future
def opt_fuo_covered_frr(frr_array, cur, e_ent, e_und):
# Look for instruments group by underlying and entity
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued
from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
and instrument_type = 'Listed Equity Future'
order by instrument_type """ % (e_ent, e_und))
hedging_rows = cur.fetchall()
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued
from ins where quantity <> 0 and entity = '%s' and underlying = '%s' and ins.ph_qty <> 0 and ins.ph_qty <> quantity * conversion_factor
and instrument_type = 'Listed Equity Option'
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
for hedging_row in hedging_rows:
#print princip_row
#print hedging_row
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
h_id = hedging_row[0]
h_ins = hedging_row[1]
h_und = hedging_row[2]
h_und_price = dec(hedging_row[3])
h_str_price = dec(hedging_row[4])
h_conv_factor = dec(hedging_row[5])
h_expiry = hedging_row[6]
h_prf = hedging_row[7]
h_ins_typ = hedging_row[8]
h_cp = hedging_row[9]
h_ccy = hedging_row[10]
h_pos = hedging_row[11]
h_qty = dec(hedging_row[12])
h_avg_price = dec(hedging_row[13])
h_mkt_price = dec(hedging_row[14])
h_mkt_value = dec(hedging_row[15])
h_ph_qty = dec(hedging_row[16])
h_hc = dec(hedging_row[17])
h_total_issued = hedging_row[18]
# Skip some entries
if h_pos == "S":
if not ((p_pos == "L" and p_cp == "C") or (p_pos == "S" and p_cp == "P")) :
continue
if h_pos == "L":
if not ((p_pos == "S" and p_cp == "C") or (p_pos == "L" and p_cp == "P")) :
continue
s31 = False
item_no = "LA-12"
if (h_pos == "S" and p_pos == "L" and p_cp == "C"):
subsection = "S31(3)"
s31 = True
if (h_pos == "L" and p_pos == "L" and p_cp == "P"):
subsection = "S31(2)"
s31 = True
if s31:
p_uncovered_qty = (p_ph_qty / p_conv_factor) if p_conv_factor > 0 else 0
p_covered_qty = p_qty - p_uncovered_qty
amt1 = p_covered_qty*p_mkt_price
instr_value = p_covered_qty*p_conv_factor*h_mkt_price
frr_array.append([e_ent, p_prf, p_ins, p_und, h_ins, p_ins_typ, p_pos, h_ins_typ, h_pos, p_qty, -p_covered_qty*p_conv_factor, p_covered_qty, p_uncovered_qty
, p_avg_price, p_mkt_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_mkt_value if p_mkt_value > 0 else 0), (abs(p_mkt_value) if p_mkt_value < 0 else 0)
, instr_value, p_und_price, 0, 0, amt1, 0, "LA", amt1, 0, 0, subsection, item_no])
cur.execute("""update ins set frr_written = 1 where rowid = %s or rowid = %s """ % (p_id, h_id))
return
# FRR rules for listed or OTC equity future covered by equity
def fuo_eqt_covered_frr(frr_array, cur, e_ent, e_und):
# Look for instruments group by underlying and entity
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued
from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
and (instrument_type = 'Equity' or instrument_type = 'ETF')
order by instrument_type """ % (e_ent, e_und))
hedging_rows = cur.fetchall()
if len(hedging_rows) > 0:
hedging_row = hedging_rows[0]
else:
return
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued
from ins where quantity <> 0 and entity = '%s' and underlying = '%s' and ins.ph_qty <> 0 and ins.ph_qty <> quantity * conversion_factor
and (instrument_type = 'Listed Equity Future' or instrument_type = 'OTC Equity Future')
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
#print princip_row
#print hedging_row
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
h_id = hedging_row[0]
h_ins = hedging_row[1]
h_und = hedging_row[2]
h_und_price = dec(hedging_row[3])
h_str_price = dec(hedging_row[4])
h_conv_factor = dec(hedging_row[5])
h_expiry = hedging_row[6]
h_prf = hedging_row[7]
h_ins_typ = hedging_row[8]
h_cp = hedging_row[9]
h_ccy = hedging_row[10]
h_pos = hedging_row[11]
h_qty = dec(hedging_row[12])
h_avg_price = dec(hedging_row[13])
h_mkt_price = dec(hedging_row[14])
h_mkt_value = dec(hedging_row[15])
h_ph_qty = dec(hedging_row[16])
h_hc = dec(hedging_row[17])
h_total_issued = hedging_row[18]
item_no = "LA-11"
p_uncovered_qty = (p_ph_qty / p_conv_factor) if p_conv_factor > 0 else 0
p_covered_qty = p_qty - p_uncovered_qty
instr_value = p_covered_qty*p_conv_factor*h_mkt_price
if (h_pos == "L" and p_pos == "S" ):
amt1 = abs(p_ph_qty * p_conv_factor) * h_mkt_price
frr_array.append([e_ent, p_prf, p_ins, p_und, h_ins, p_ins_typ, p_pos, h_ins_typ, h_pos, p_qty, -p_covered_qty*p_conv_factor, p_covered_qty, p_uncovered_qty
, p_avg_price, p_mkt_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_mkt_value if p_mkt_value > 0 else 0), (abs(p_mkt_value) if p_mkt_value < 0 else 0)
, instr_value, p_und_price, 0, 0, amt1, 0, "LA", amt1, 0, 0, "S27(3)", item_no])
cur.execute("""update ins set frr_written = 1 where rowid = %s or rowid = %s """ % (p_id, h_id))
return
# FRR rules for uncovered equity
def eqt_frr(frr_array, cur, e_ent, e_und):
# Look for instruments group by underlying and entity
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued
from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
and (instrument_type = 'Equity' or instrument_type = 'ETF')
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
amt1 = 0
amt2 = 0
amt3 = 0
applied_mv_1 = 0
applied_mv_2 = 0
p_uncovered_qty = p_ph_qty
p_covered_qty = p_qty - p_uncovered_qty
mv = abs(p_uncovered_qty*p_mkt_price)
hc_amt = mv*p_hc
if p_uncovered_qty >= 0:
a_l = "LA"
subsection = "S27(1)"
item_no = "LA-11"
applied_mv_1 = mv - hc_amt
amt1 = applied_mv_1
else:
a_l = "RL"
subsection = "S43(1);S43(2)"
item_no = "RL-22;RL-31"
applied_mv_1 = mv
applied_mv_2 = hc_amt
amt1 = applied_mv_1
amt2 = applied_mv_2
# if ins_percent_mv(cur, e_ent, e_und, mv):
# amt3 = abs(mv)
# subsection = subsection + ";S43(3)"
frr_array.append([e_ent, p_prf, p_ins, p_und, "", "Equity", p_pos, "", "", p_qty, "", p_covered_qty, p_uncovered_qty
, p_avg_price, p_mkt_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_mkt_value if p_mkt_value > 0 else 0), (abs(p_mkt_value) if p_mkt_value < 0 else 0)
, "", p_und_price, p_hc, hc_amt, applied_mv_1, applied_mv_2, a_l, amt1, amt2, amt3, subsection, item_no])
cur.execute("""update ins set frr_written = 1 where rowid = %s """ % (p_id))
return
# FRR rules for uncovered listed warrants and CBBC
def wrt_frr(frr_array, cur, e_ent, e_und):
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued,fair_price,fair_value
from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
and (instrument_type = 'Listed Warrant' or instrument_type = 'CBBC Warrant')
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
p_fair_price = dec(princip_row[19])
p_fair_value = dec(princip_row[20])
amt1 = 0
amt2 = 0
amt3 = 0
p_und_value = p_ph_qty*p_und_price
p_uncovered_qty = (p_ph_qty / p_conv_factor) if p_conv_factor > 0 else 0
p_covered_qty = p_qty - p_uncovered_qty
mv = p_uncovered_qty*p_fair_price
hc_amt = mv
instr_value = p_covered_qty*p_conv_factor*p_und_price
applied_mv_1 = 0
applied_mv_2 = 0
if p_pos == "L":
a_l = "LA"
subsection = "S27(1)"
item_no = "LA-11"
applied_mv_1 = mv-hc_amt
amt1 = applied_mv_1
else:
# Generate only uncovered warrant
a_l = "RL"
if int(p_covered_qty) == 0:
subsection = "S43(1)"
item_no = "RL-22;RL-31"
applied_mv_1 = abs(mv)
applied_mv_2 = abs(mv)
amt1 = applied_mv_1
if p_ins_typ == "Listed Warrant":
if p_und == "HSI Index" or p_und == "HSCEI Index":
amt2 = applied_mv_2
subsection = subsection + ";S43(3)"
else:
if p_cp == "C":
hc_amt = -p_uncovered_qty * p_conv_factor * p_und_price * p_hc
if (p_cp == "C" and p_und_price < p_str_price) or (p_cp == "P" and p_und_price > p_str_price) :
otm_wrt_amt = p_uncovered_qty * (p_str_price - p_und_price) * p_conv_factor
else:
otm_wrt_amt = 0
applied_mv_1 = hc_amt
applied_mv_2 = abs(otm_wrt_amt)
amt2 = 0 if applied_mv_2 == 0 or applied_mv_1 < applied_mv_2 else (applied_mv_1 - applied_mv_2)
subsection = subsection + ";S43(8)"
else:
amt2 = applied_mv_2
subsection = subsection + ";S43(2)"
else:
amt3 = abs(mv)
subsection = subsection + ";S43(3)"
else:
cur.execute("""update ins set frr_written = 1 where rowid = %s """ % (p_id))
continue
frr_array.append([e_ent, p_prf, p_ins, p_und, "", p_ins_typ, p_pos, "", "", p_qty, "", p_covered_qty, p_uncovered_qty
, p_avg_price, p_fair_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_fair_value if p_fair_value > 0 else 0), (abs(p_fair_value) if p_fair_value < 0 else 0)
, instr_value, p_und_price, p_hc, hc_amt, applied_mv_1, applied_mv_2, a_l, amt1, amt2, amt3, subsection, item_no])
cur.execute("""update ins set frr_written = 1 where rowid = %s """ % (p_id))
return
# FRR rules for uncovered listed index and equity option
def opt_opf_frr(frr_array, cur, e_ent, e_und):
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued,(market_price-average_price)*quantity*conversion_factor,instrument_type,hkats_code,fair_price,fair_value
from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
and (instrument_type = 'Listed Index Option' or instrument_type = 'Listed Equity Option')
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
p_ins_type = princip_row[20]
p_hkats_code = princip_row[21]
p_fair_price = dec(princip_row[22])
p_fair_value = dec(princip_row[23])
amt1 = 0
amt2 = 0
amt3 = 0
applied_mv_1 = 0
applied_mv_2 = 0
p_und_value = p_ph_qty*p_und_price
p_uncovered_qty = (p_ph_qty / p_conv_factor) if p_conv_factor > 0 else 0
p_covered_qty = p_qty - p_uncovered_qty
mv = p_uncovered_qty*p_fair_price*p_conv_factor
hc_amt = 0
p_uply = p_uncovered_qty*p_conv_factor*(p_fair_price-p_avg_price)
# if '@' in p_hkats_code and p_hkats_code.split('@')[1] == "XHKF":
# im = margin_lookup(cur, p_hkats_code, p_pos)*p_uncovered_qty
# else:
# im = 0
if p_pos == "L":
a_l = "LA"
subsection = "S31(1)"
item_no = "LA-12"
applied_mv_1 = abs(mv)
amt1 = applied_mv_1*dec(0.6)
else:
a_l = "RL"
subsection = "S40(1-4)"
item_no = "RL-31"
applied_mv_1 = abs(mv)
applied_mv_2 = 0
amt1 = applied_mv_1
amt2 = applied_mv_1
frr_array.append([e_ent, p_prf, p_ins, p_und, "", p_ins_typ, p_pos, "", "", p_qty, "", p_covered_qty, p_uncovered_qty
, p_avg_price, p_fair_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_fair_value if p_fair_value > 0 else 0), (abs(p_fair_value) if p_fair_value < 0 else 0)
, "", p_und_price, 0, hc_amt, applied_mv_1, applied_mv_2, a_l, amt1, amt2, amt3, subsection, item_no])
cur.execute("""update ins set frr_written = 1 where rowid = %s """ % (p_id))
return
# FRR rules for listed index or equity future
def fut_fuo_frr(frr_array, cur, e_ent, e_und):
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued,(market_price-average_price)*quantity*conversion_factor,instrument_type,hkats_code
from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
and (instrument_type = 'Listed Index Future' or instrument_type = 'Listed Equity Future')
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
p_ins_type = princip_row[20]
p_hkats_code = princip_row[21]
amt1 = 0
amt2 = 0
amt3 = 0
p_und_value = p_ph_qty*p_und_price
p_uncovered_qty = (p_ph_qty / p_conv_factor) if p_conv_factor > 0 else 0
p_covered_qty = p_qty - p_uncovered_qty
p_uply = p_uncovered_qty*p_conv_factor*(p_mkt_price-p_avg_price)
if '@' in p_hkats_code and p_hkats_code.split('@')[1] == "XHKF":
im = margin_lookup(cur, p_hkats_code, p_pos)*p_uncovered_qty
else:
im = 0
mv = p_uncovered_qty*p_mkt_price
hc_amt = im
if p_pos == "S":
a_l = "RL"
subsection = "S40(1-4)"
applied_mv_1 = abs(im)
applied_mv_2 = p_uply
amt1 = applied_mv_1 - applied_mv_2
item_no = "RL-31"
frr_array.append([e_ent, p_prf, p_ins, p_und, "", p_ins_typ, p_pos, "", "", p_qty, "", p_covered_qty, p_uncovered_qty
, p_avg_price, p_mkt_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_mkt_value if p_mkt_value > 0 else 0), (abs(p_mkt_value) if p_mkt_value < 0 else 0)
, "", p_und_price, 0, 0, applied_mv_1, applied_mv_2, a_l, amt1, amt2, amt3, subsection, item_no])
cur.execute("""update ins set frr_written = 1 where rowid = %s """ % (p_id))
return
# FRR rules for OTC index or equity option
def opi_opo_frr(frr_array, cur, e_ent, e_und):
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued,(market_price-average_price)*quantity*conversion_factor
from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
and (instrument_type = 'OTC Index Option' or instrument_type = 'OTC Equity Option')
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
amt1 = 0
amt2 = 0
amt3 = 0
applied_mv_1 = 0
applied_mv_2 = 0
p_und_value = p_ph_qty*p_und_price
p_uncovered_qty = (p_ph_qty / p_conv_factor) if p_conv_factor > 0 else 0
p_covered_qty = p_qty - p_uncovered_qty
mv = p_uncovered_qty*p_mkt_price
hc_amt = mv*p_hc
p_uply = p_uncovered_qty*p_conv_factor*(p_mkt_price-p_avg_price)
im = 0
item_no = ""
if p_pos == "L":
a_l = "RL"
subsection = "S48"
applied_mv_1 = p_uply if p_uply < 0 else 0
amt1 = applied_mv_1
else:
a_l = "RL"
subsection = "S40"
applied_mv_1 = abs(mv*2)
if (p_cp == "C" and p_str_price < p_und_price) or(p_cp == "P" and p_str_price > p_und_price):
applied_mv_2 = abs(p_uncovered_qty*(p_str_price - p_und_price)*2)
amt1 = applied_mv_1
amt2 = applied_mv_2
amt3 = abs(2*im)
frr_array.append([e_ent, p_prf, p_ins, p_und, "", p_ins_typ, p_pos, "", "", p_qty, "", p_covered_qty, p_uncovered_qty
, p_avg_price, p_mkt_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_mkt_value if p_mkt_value > 0 else 0), (abs(p_mkt_value) if p_mkt_value < 0 else 0)
, "", p_und_price, 0, 0, applied_mv_1, im, a_l, amt1, amt2, amt3, subsection, item_no])
cur.execute("""update ins set frr_written = 1 where rowid = %s """ % (p_id))
return
# FRR rules for FX
def fx_frr(frr_array, cur, e_ent, e_und):
cur.execute("""select ins.rowid,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued,(market_price-average_price)*quantity*conversion_factor
from ins where quantity <> 0 and entity = '%s' and underlying = '%s'
and (instrument_type = 'FX')
and frr_written = 0
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row[0]
p_ins = princip_row[1]
p_und = princip_row[2]
p_und_price = dec(princip_row[3])
p_str_price = dec(princip_row[4])
p_conv_factor = dec(princip_row[5])
p_expiry = princip_row[6]
p_prf = princip_row[7]
p_ins_typ = princip_row[8]
p_cp = princip_row[9]
p_ccy = princip_row[10]
p_pos = princip_row[11]
p_qty = dec(princip_row[12])
p_avg_price = dec(princip_row[13])
p_mkt_price = dec(princip_row[14])
p_mkt_value = dec(princip_row[15])
p_ph_qty = dec(princip_row[16])
p_hc = dec(princip_row[17])
p_total_issued = princip_row[18]
amt1 = 0
amt2 = 0
amt3 = 0
applied_mv_1 = 0
applied_mv_2 = 0
p_und_value = p_ph_qty*p_und_price
p_uncovered_qty = (p_ph_qty / p_conv_factor) if p_conv_factor > 0 else 0
p_covered_qty = p_qty - p_uncovered_qty
mv = p_uncovered_qty*p_und_price
hc_amt = mv*p_hc
p_uply = p_uncovered_qty*p_conv_factor*(p_mkt_price-p_avg_price)
im = 0
item_no = ""
if p_pos == "L":
a_l = "RL"
subsection = "S48"
applied_mv_1 = p_uply if p_uply < 0 else 0
amt1 = applied_mv_1
else:
a_l = "RL"
subsection = "S40"
applied_mv_1 = abs(mv*2)
if (p_cp == "C" and p_str_price < p_und_price) or(p_cp == "P" and p_str_price > p_und_price):
applied_mv_2 = abs(p_uncovered_qty*(p_str_price - p_und_price)*2)
amt1 = applied_mv_1
amt2 = applied_mv_2
amt3 = abs(2*im)
frr_array.append([e_ent, p_prf, p_ins, p_und, "", p_ins_typ, p_pos, "", "", p_qty, "", p_covered_qty, p_uncovered_qty
, p_avg_price, p_mkt_price, p_cp, p_ccy, p_str_price, p_expiry, p_conv_factor, p_total_issued, (p_mkt_value if p_mkt_value > 0 else 0), (abs(p_mkt_value) if p_mkt_value < 0 else 0)
, "", p_und_price, 0, 0, applied_mv_1, im, a_l, amt1, amt2, amt3, subsection, item_no])
cur.execute("""update ins set frr_written = 1 where rowid = %s """ % (p_id))
return
def frr_chksum(cur):
# debug
cur.execute("""
select entity,prod_1,currency,
sum(case when asset_liab = 'LA' then frr_amt1 + frr_amt2 + frr_amt3 else 0 end),
sum(case when asset_liab = 'RL' then frr_amt1 + frr_amt2 + frr_amt3 else 0 end)
from frr
group by entity,prod_1,currency
order by entity desc,prod_1
""")
frc_rows = cur.fetchall()
cur.execute("""
select entity,instrument1,prod_1,currency,instrument2,qty_1,qty_1_covered,qty_1_uncovered,asset_liab,frr_amt1,frr_amt2,frr_amt3,rules
from frr
order by entity desc,prod_1""" )
frd_rows = cur.fetchall()
cur.execute("""
select frr.entity, frr.instrument1, frr.prod_1, sum(qty_1_covered), sum(qty_1_uncovered), sum(qty_1_covered)+sum(qty_1_uncovered), sum(ins.quantity)
from frr left join ins
on frr.entity = ins.entity
and frr.portfolio = ins.portfolio
and frr.instrument1 = ins.instrument
where frr.entity = 'HTISEC - EDD' and frr.prod_2 = '' and frr.prod_1 <> 'Equity'
group by frr.entity, frr.instrument1, ins.entity, ins.instrument
union
select frr.entity, frr.underlying, frr.prod_2, frr.qty_2, tmp.qty_1, frr.qty_2+tmp.qty_1, ins.quantity
from (select entity, underlying, prod_2, sum(qty_2) as qty_2 from frr where frr.entity = 'HTISEC - EDD' and frr.prod_2 <> '' group by entity, underlying, prod_2) frr
left join (select entity, instrument1, sum(qty_1_uncovered) as qty_1 from frr where entity = 'HTISEC - EDD' and prod_1 = 'Equity' group by entity, instrument1) tmp
on frr.entity = tmp.entity
and frr.underlying = tmp.instrument1
left join (select entity, instrument, sum(quantity) as quantity from ins group by entity, instrument) ins
on frr.entity = ins.entity
and frr.underlying = ins.instrument
""")
rec_rows = cur.fetchall()
return frc_rows, frd_rows, rec_rows
def frr_calc(cur, asofdate, dict):
header_frc = "entity,prod_type,currency,asset,liab"
header_frd = "entity,principal_instrument,prod_type,currency,hedging_instrument,quantity,covered_qty,uncovered_qty,asset_liab,amt1,amt2,amt3,rules"
header_frr = "entity,portfolio,instrument1,underlying,instrument2,prod_1,pos_1,prod_2,pos_2,qty_1,qty_2,qty_1_covered,qty_1_uncovered,wacc,closing_price,cp,currency,strike_price,expiry,conv_factor,p_total_issued,long_mv,short_mv,ins_value,underlying_price,hc,hc_amt,applied_mv_1,applied_mv_2,asset_liab,frr_amt1,frr_amt2,frr_amt3,rules,item_no"
header_fre = "hti_entity,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued"
header_rec = "entity,instrument,product_type,covered_qty,uncovered_qty,frr_sum,ins_sum"
# Required files
filenameHaircut = max([f for f in glob.iglob(dict['filename_haircut']) if os.stat(f).st_size > 0 ])
fileName_seoch = dict['filename_seoch']
fileName_hkcc = dict['filename_hkcc']
frr_array = []
asset_arr = []
liab_arr = []
header_haircut, haircut_array = csv_to_arr(filenameHaircut, start=2)
header_haircut = re.sub(r"[?\*\.#/\$%\"\(\)& \_-]", "", header_haircut)
header_haircut = header_haircut.replace("InstrumentName,", "")
for row in haircut_array:
del row[5]
o_header, o_margin_tbl = fo_margin.seoch_margin_process(fileName_seoch)
f_header, f_margin_tbl, hsio_header, hsio_margin_tbl, hhio_header, hhio_margin_tbl, mhio_header, mhio_margin_tbl = fo_margin.hkcc_margin_process(fileName_hkcc)
cur.execute("CREATE TABLE o_margin (" + o_header + ");")
cur.executemany("INSERT INTO o_margin VALUES ("+question_marks(o_header)+")", o_margin_tbl)
cur.execute("CREATE TABLE f_margin (" + f_header + ");")
cur.executemany("INSERT INTO f_margin VALUES ("+question_marks(f_header)+")", f_margin_tbl)
cur.execute("CREATE TABLE hsio_margin (" + hsio_header + ");")
cur.executemany("INSERT INTO hsio_margin VALUES ("+question_marks(hsio_header)+")", hsio_margin_tbl)
cur.execute("CREATE TABLE hhio_margin (" + hhio_header + ");")
cur.executemany("INSERT INTO hhio_margin VALUES ("+question_marks(hhio_header)+")", hhio_margin_tbl)
cur.execute("CREATE TABLE mhio_margin (" + mhio_header + ");")
cur.executemany("INSERT INTO mhio_margin VALUES ("+question_marks(mhio_header)+")", mhio_margin_tbl)
cur.execute("CREATE TABLE haircut (" + header_haircut + ");")
cur.executemany("INSERT INTO haircut VALUES ("+question_marks(header_haircut)+")", haircut_array)
cur.execute("alter table ins add column hc numeric(7,4) default null")
cur.execute("update ins set hc = (select CashHaircutRate from haircut where haircut.BloombergCode = ins.underlying)")
cur.execute("update ins set hc = 0.3 where hc is null")
cur.execute("update ins set conversion_factor = 1 where instrument_type = 'Equity' or instrument_type = 'ETF' ")
cur.execute("alter table ins add column ph_qty int default 0")
cur.execute("update ins set ph_qty = quantity * conversion_factor")
cur.execute("alter table ins add column frr_written int default 0")
#print "FRR Calculation"
# Quantity matching to find uovered portion
# Get all principal instruments
cur.execute("""select distinct entity, underlying from ins
group by entity, underlying
order by entity, underlying """)
ent_rows = cur.fetchall()
for ent_row in ent_rows:
e_ent = ent_row[0]
e_und = ent_row[1]
# Pairing up principal and hedging intruments by underlying and entity
net_eqt_pairing(cur, e_ent, e_und)
wrt_eqt_pairing(cur, e_ent, e_und)
opt_eqt_pairing(cur, e_ent, e_und)
opt_fuo_pairing(cur, e_ent, e_und)
fuo_eqt_pairing(cur, e_ent, e_und)
# Fetch again to calculate from new principal and hedging quantity
#print "After pairing for entity " + e_ent + " and underlying " + e_und
wrt_eqt_covered_frr(frr_array, cur, e_ent, e_und)
opt_eqt_covered_frr(frr_array, cur, e_ent, e_und)
opt_fuo_covered_frr(frr_array, cur, e_ent, e_und)
eqt_frr(frr_array, cur, e_ent, e_und)
wrt_frr(frr_array, cur, e_ent, e_und)
opt_opf_frr(frr_array, cur, e_ent, e_und)
fut_fuo_frr(frr_array, cur, e_ent, e_und)
opi_opo_frr(frr_array, cur, e_ent, e_und)
fx_frr(frr_array, cur, e_ent, e_und)
if "filename_fre" in dict:
fileNameFre = dict['filename_fre']
try:
fileNameFre = fileNameFre.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
except:
fileNameFre.replace("YYYYMMDD", asofdate)
cur.execute("""select entity,instrument,underlying,underlying_price,strike_price,conversion_factor,expiry,portfolio,instrument_type,
call_put,currency,position,quantity,average_price,market_price,market_value,ph_qty,hc,total_issued
from ins
where ins.quantity <> 0 and frr_written = 0
order by instrument_type """ )
fre_rows = cur.fetchall()
export_to_file(fileNameFre, header_fre, fre_rows)
if "filename_frr" in dict:
fileNameFrr = dict['filename_frr']
try:
fileNameFrr = fileNameFrr.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
except:
fileNameFrr.replace("YYYYMMDD", asofdate)
export_to_file(fileNameFrr, header_frr, frr_array)
cur.execute("CREATE TABLE frr (" + header_frr + ");")
cur.executemany("INSERT INTO frr VALUES ("+question_marks(header_frr)+")", frr_array)
if "gen_chk" in dict and dict['gen_chk'] == "Y" and "filename_frc" in dict:
fileNameFrc = dict['filename_frc']
try:
fileNameFrc = fileNameFrc.replace("YYYYMMDD", asofdate.to_string('%Y%m%d'))
except:
fileNameFrc.replace("YYYYMMDD", asofdate)
frc_rows, frd_rows, rec_rows = frr_chksum(cur)
arrs_to_xlsx(fileNameFrc, [header_frc, header_frd, header_rec], [frc_rows, frd_rows, rec_rows])
return frr_array
def net_eqt_pairing(cur, e_ent, e_und):
# Look for instruments group by underlying and entity
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty
from ins
where ins.ph_qty > 0 and ins.entity = '%s' and ins.underlying = '%s'
and (instrument_type = 'Equity' or instrument_type = 'ETF')
order by instrument_type """ % (e_ent, e_und))
hedging_rows = cur.fetchall()
for hedging_row in hedging_rows:
h_id = hedging_row["row_id"]
h_cp = hedging_row["call_put"]
h_pos = hedging_row["position"]
h_ph_qty = hedging_row["ph_qty"]
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty, instrument
from ins
where ins.ph_qty < 0 and ins.entity = '%s' and ins.underlying = '%s'
and (instrument_type = 'Equity' or instrument_type = 'ETF')
order by strike_price asc, instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row["row_id"]
p_cp = princip_row["call_put"]
p_pos = princip_row["position"]
p_ph_qty = princip_row["ph_qty"]
# print "net_eqt_pairing"
# print "p before: " + str(p_id) + " " + str(p_ph_qty)
# print "h before: " + str(h_id) + " " + str(h_ph_qty)
if abs(p_ph_qty) > abs(h_ph_qty):
if p_ph_qty*h_ph_qty > 0:
p_ph_qty = p_ph_qty - h_ph_qty
h_ph_qty = 0
else:
p_ph_qty = p_ph_qty + h_ph_qty
h_ph_qty = 0
else:
if p_ph_qty*h_ph_qty > 0:
h_ph_qty = h_ph_qty - p_ph_qty
p_ph_qty = 0
else:
h_ph_qty = h_ph_qty + p_ph_qty
p_ph_qty = 0
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (p_ph_qty, p_id))
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (h_ph_qty, h_id))
# print "p after: " + str(p_id) + " " + str(p_ph_qty)
# print "h after: " + str(h_id) + " " + str(h_ph_qty)
return
def wrt_eqt_pairing(cur, e_ent, e_und):
# Look for instruments group by underlying and entity
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty
from ins
where ins.ph_qty <> 0 and ins.entity = '%s' and ins.underlying = '%s'
and (instrument_type = 'Equity' or instrument_type = 'ETF')
order by instrument_type """ % (e_ent, e_und))
hedging_rows = cur.fetchall()
for hedging_row in hedging_rows:
h_id = hedging_row["row_id"]
h_cp = hedging_row["call_put"]
h_pos = hedging_row["position"]
h_ph_qty = hedging_row["ph_qty"]
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty, instrument
from ins
where ins.ph_qty <> 0 and ins.entity = '%s' and ins.underlying = '%s'
and (instrument_type = 'Listed Warrant' )
order by strike_price asc, instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row["row_id"]
p_cp = princip_row["call_put"]
p_pos = princip_row["position"]
p_ph_qty = princip_row["ph_qty"]
# Only long side equity
if h_pos == "S" or h_ph_qty <= 0:
break
if not (p_pos == "S" and p_cp == "C" ):
continue
# print "wrt_eqt_pairing"
# print "p before: " + str(p_id) + " " + str(p_ph_qty)
# print "h before: " + str(h_id) + " " + str(h_ph_qty)
if abs(p_ph_qty) > abs(h_ph_qty):
if p_ph_qty*h_ph_qty > 0:
p_ph_qty = p_ph_qty - h_ph_qty
h_ph_qty = 0
else:
p_ph_qty = p_ph_qty + h_ph_qty
h_ph_qty = 0
else:
if p_ph_qty*h_ph_qty > 0:
h_ph_qty = h_ph_qty - p_ph_qty
p_ph_qty = 0
else:
h_ph_qty = h_ph_qty + p_ph_qty
p_ph_qty = 0
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (p_ph_qty, p_id))
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (h_ph_qty, h_id))
# print "p after: " + str(p_id) + " " + str(p_ph_qty)
# print "h after: " + str(h_id) + " " + str(h_ph_qty)
return
def opt_eqt_pairing(cur, e_ent, e_und):
# Look for instruments group by underlying and entity
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty
from ins
where ins.ph_qty <> 0 and ins.entity = '%s' and ins.underlying = '%s'
and (instrument_type = 'Equity' or instrument_type = 'ETF' )
order by instrument_type """ % (e_ent, e_und))
hedging_rows = cur.fetchall()
for hedging_row in hedging_rows:
h_id = hedging_row["row_id"]
h_cp = hedging_row["call_put"]
h_pos = hedging_row["position"]
h_ph_qty = hedging_row["ph_qty"]
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty
from ins
where ins.ph_qty <> 0 and ins.entity = '%s' and ins.underlying = '%s'
and instrument_type = 'Listed Equity Option'
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row["row_id"]
p_cp = princip_row["call_put"]
p_pos = princip_row["position"]
p_ph_qty = princip_row["ph_qty"]
# Skip some entries
if h_pos == "S":
continue
if h_pos == "L":
if not ((p_pos == "S" and p_cp == "C") ) :
continue
if h_ph_qty <= 0:
break
#print "opt_eqt_pairing"
#print "p before: " + str(p_id) + " " + str(p_ph_qty)
#print "h before: " + str(h_id) + " " + str(h_ph_qty)
if abs(p_ph_qty) > abs(h_ph_qty):
if p_ph_qty*h_ph_qty > 0:
p_ph_qty = p_ph_qty - h_ph_qty
h_ph_qty = 0
else:
p_ph_qty = p_ph_qty + h_ph_qty
h_ph_qty = 0
else:
if p_ph_qty*h_ph_qty > 0:
h_ph_qty = h_ph_qty - p_ph_qty
p_ph_qty = 0
else:
h_ph_qty = h_ph_qty + p_ph_qty
p_ph_qty = 0
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (p_ph_qty, p_id))
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (h_ph_qty, h_id))
#print "p after: " + str(p_id) + " " + str(p_ph_qty)
#print "h after: " + str(h_id) + " " + str(h_ph_qty)
return
def opt_fuo_pairing(cur, e_ent, e_und):
# Look for instruments group by underlying and entity
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty
from ins
where ins.ph_qty <> 0 and ins.entity = '%s' and ins.underlying = '%s'
and instrument_type = 'Listed Equity Future'
order by instrument_type """ % (e_ent, e_und))
hedging_rows = cur.fetchall()
for hedging_row in hedging_rows:
h_id = hedging_row["row_id"]
h_cp = hedging_row["call_put"]
h_pos = hedging_row["position"]
h_ph_qty = hedging_row["ph_qty"]
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty
from ins
where ins.ph_qty <> 0 and ins.entity = '%s' and ins.underlying = '%s'
and instrument_type = 'Listed Equity Option'
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row["row_id"]
p_cp = princip_row["call_put"]
p_pos = princip_row["position"]
p_ph_qty = princip_row["ph_qty"]
# Skip some entries
if h_pos == "S":
if not ((p_pos == "L" and p_cp == "C") or (p_pos == "S" and p_cp == "P")) :
continue
if h_pos == "L":
if not ((p_pos == "S" and p_cp == "C") or (p_pos == "L" and p_cp == "P")) :
continue
if h_ph_qty <= 0:
break
#print "opt_fuo_pairing"
#print "p before: " + str(p_id) + " " + str(p_ph_qty)
#print "h before: " + str(h_id) + " " + str(h_ph_qty)
if abs(p_ph_qty) > abs(h_ph_qty):
if p_ph_qty*h_ph_qty > 0:
p_ph_qty = p_ph_qty - h_ph_qty
h_ph_qty = 0
else:
p_ph_qty = p_ph_qty + h_ph_qty
h_ph_qty = 0
else:
if p_ph_qty*h_ph_qty > 0:
h_ph_qty = h_ph_qty - p_ph_qty
p_ph_qty = 0
else:
h_ph_qty = h_ph_qty + p_ph_qty
p_ph_qty = 0
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (p_ph_qty, p_id))
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (h_ph_qty, h_id))
#print "p after: " + str(p_id) + " " + str(p_ph_qty)
#print "h after: " + str(h_id) + " " + str(h_ph_qty)
return
def fuo_eqt_pairing(cur, e_ent, e_und):
# Look for instruments group by underlying and entity
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty
from ins
where ins.ph_qty <> 0 and ins.entity = '%s' and ins.underlying = '%s'
and (instrument_type = 'Equity' or instrument_type = 'ETF')
order by instrument_type """ % (e_ent, e_und))
hedging_rows = cur.fetchall()
if len(hedging_rows) > 0:
hedging_row = hedging_rows[0]
else:
return
h_id = hedging_row["row_id"]
h_cp = hedging_row["call_put"]
h_pos = hedging_row["position"]
h_ph_qty = hedging_row["ph_qty"]
cur.execute("""select ins.rowid as row_id,call_put,position,ph_qty
from ins
where ins.ph_qty <> 0 and ins.entity = '%s' and ins.underlying = '%s'
and (instrument_type = 'Listed Equity Future' or instrument_type = 'OTC Equity Future')
order by instrument_type """ % (e_ent, e_und))
princip_rows = cur.fetchall()
for princip_row in princip_rows:
p_id = princip_row["row_id"]
p_cp = princip_row["call_put"]
p_pos = princip_row["position"]
p_ph_qty = princip_row["ph_qty"]
# Skip some entries
if h_pos == "S":
if not ((p_pos == "L" and p_cp == "C") or (p_pos == "S" and p_cp == "P")) :
continue
if h_pos == "L":
if not ((p_pos == "S" and p_cp == "C") or (p_pos == "L" and p_cp == "P")) :
continue
if h_ph_qty <= 0:
break
#print "fuo_eqt_pairing"
#print "p before: " + str(p_id) + " " + str(p_ph_qty)
#print "h before: " + str(h_id) + " " + str(h_ph_qty)
if abs(p_ph_qty) > abs(h_ph_qty):
if p_ph_qty*h_ph_qty > 0:
p_ph_qty = p_ph_qty - h_ph_qty
h_ph_qty = 0
else:
p_ph_qty = p_ph_qty + h_ph_qty
h_ph_qty = 0
else:
if p_ph_qty*h_ph_qty > 0:
h_ph_qty = h_ph_qty - p_ph_qty
p_ph_qty = 0
else:
h_ph_qty = h_ph_qty + p_ph_qty
p_ph_qty = 0
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (p_ph_qty, p_id))
cur.execute("""update ins set ph_qty = %s where rowid = %s """ % (h_ph_qty, h_id))
#print "p after: " + str(p_id) + " " + str(p_ph_qty)
#print "h after: " + str(h_id) + " " + str(h_ph_qty)
return
def ael_main(dict):
ret = False
asofdate = dict['posdate']
if asofdate == 'Today':
posdate = ael.date_today()
elif asofdate == 'Yesterday':
posdate = ael.date_today().add_days(-1)
else:
asofdateArr = dict['posdate'].split('/')
posdate = ael.date_from_ymd(int(asofdateArr[2]), int(asofdateArr[1]), int(asofdateArr[0]))
posdatetp1 = posdate
hk_cal = acm.FCalendar.Select("name='Hong Kong'")[0]
while True:
posdatetp1 = posdatetp1.add_days(1)
if not hk_cal.IsNonBankingDay(hk_cal, hk_cal, posdatetp1):
break
# Acquirers
acq_array_list = dict['acq']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# Product Types
prod_type_list = dict['prd']
ptype_list = ''
for ptype in prod_type_list:
if ptype_list == '':
ptype_list = "'" + ptype + "'"
else:
ptype_list = ptype_list + ",'" + ptype+ "'"
portfolios = dict['pfs']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
prf_sbl = "'" + dict["prf_sbl"].replace(",","','") + "'"
gen_expired_pos = dict['gen_expired_pos']
gen_sbl = dict['gen_sbl']
strSql = """
select t.trdnbr
from instrument i, trade t, party acq, portfolio pf
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (@acquirer_list)
and t.time < '@d_tp1'
and i.instype in (@ptype_list)
and pf.prfid in (@portfolio_list)
"""
strSql2 = """
select i.insid, acq.ptyid, pf.prfid, sum(t.quantity) 'qty',
(sum(t.quantity) >= 0 ?
sum( t.quantity >= 0 ? t.quantity*t.price : 0 ) / sum( t.quantity >= 0 ? t.quantity : 0 )
:
sum( t.quantity < 0 ? t.quantity*t.price : 0 ) / sum( t.quantity < 0 ? t.quantity : 0 )
) 'avg_price',
(sum(t.quantity) < 0 ?
sum( t.quantity >= 0 ? t.quantity*t.price : 0 ) / sum( t.quantity >= 0 ? t.quantity : 0 )
:
sum( t.quantity < 0 ? t.quantity*t.price : 0 ) / sum( t.quantity < 0 ? t.quantity : 0 )
) 'avg_price_exec',
(sum(t.quantity) >= 0 ? sum( t.quantity < 0 ? t.quantity : 0 ) : sum( t.quantity >= 0 ? t.quantity : 0 ) ) 'qty_exec'
into temp
from instrument i, trade t, party acq, portfolio pf
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (@acquirer_list)
and t.time < '@d_tp1'
and (i.exp_day > '@dt' or i.exp_day = '0000-01-01')
and i.instype in (@ptype_list)
and pf.prfid in (@portfolio_list)
group by pf.prfid, acq.ptyid, i.insid
select i.insid, add_info(i, 'Local Exchange Code'), add_info(i, 'MIC'), t.ptyid, t.prfid, t.qty, t.avg_price, t.avg_price_exec, t.qty_exec
from temp t, instrument i
where t.insid = i.insid
"""
strSql3 = """
select t.trdnbr
from instrument i, trade t, party acq, portfolio pf
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (@acquirer_list)
and t.time < '@d_tp1'
and i.instype in (@ptype_list)
and pf.prfid in (@portfolio_list)
"""
month_start = ael.date_from_ymd(int(posdate.to_string('%Y')), int(posdate.to_string('%m')), 1)
month_end = month_start.add_months(1).add_days(-1)
if gen_expired_pos == 'Today':
strSql2 = strSql2 + "and (i.exp_day >= '@dt' or i.exp_day = '0000-01-01')"
if gen_expired_pos == 'Monthly':
strSql2 = strSql2 + "and ((i.exp_day >= '@month_start' and i.exp_day <= '@month_end' ) or i.exp_day = '0000-01-01')"
strSql = strSql.replace('@acquirer_list', acq_list)
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@d_tp1', posdatetp1.to_string('%Y-%m-%d'))
strSql = strSql.replace('@dt', posdate.to_string('%Y-%m-%d'))
strSql = strSql.replace('@ptype_list', ptype_list)
strSql = strSql.replace('@month_start', month_start.to_string('%Y-%m-%d'))
strSql = strSql.replace('@month_end', month_end.to_string('%Y-%m-%d'))
#print strSql
strSql2 = strSql2.replace('@acquirer_list', acq_list)
strSql2 = strSql2.replace('@portfolio_list', pf_list)
strSql2 = strSql2.replace('@dt', posdate.to_string('%Y-%m-%d'))
strSql2 = strSql2.replace('@d_tp1', posdatetp1.to_string('%Y-%m-%d'))
strSql2 = strSql2.replace('@ptype_list', ptype_list)
strSql2 = strSql2.replace('@month_start', month_start.to_string('%Y-%m-%d'))
strSql2 = strSql2.replace('@month_end', month_end.to_string('%Y-%m-%d'))
strSql3 = strSql3.replace('@acquirer_list', acq_list)
strSql3 = strSql3.replace('@portfolio_list', prf_sbl)
strSql3 = strSql3.replace('@d_tp1', posdatetp1.to_string('%Y-%m-%d'))
strSql3 = strSql3.replace('@dt', posdate.to_string('%Y-%m-%d'))
strSql3 = strSql3.replace('@ptype_list', ptype_list)
strSql3 = strSql3.replace('@month_start', month_start.to_string('%Y-%m-%d'))
strSql3 = strSql3.replace('@month_end', month_end.to_string('%Y-%m-%d'))
trade_filter = dict['tfs']
# FAILURE_EMAILLIST = dict['failure_emaillist']
# print 'Failure Email List:', FAILURE_EMAILLIST
# FAILURE_RECIPIENTS = FAILURE_EMAILLIST.split(',')
# SUCCESS_EMAILLIST = dict['success_emaillist']
# print 'Success Email List:', SUCCESS_EMAILLIST
# SUCCESS_RECIPIENTS = SUCCESS_EMAILLIST.split(',')
successSubject = dict['success_email_subj']
errSubject = dict['failure_email_subj']
# send_failureEmail = dict['failureEmail']
# send_successEmail = dict['successEmail']
fileNameIns = dict['fileNameIns']
fileNameIns = fileNameIns.replace("YYYYMMDD", posdate.to_string('%Y%m%d'))
fileNameTrd = dict['fileNameTrd']
fileNameTrd = fileNameTrd.replace("YYYYMMDD", posdate.to_string('%Y%m%d'))
# fileNameJrn = dict['filename_jrn']
# fileNameJrn = fileNameJrn.replace("YYYYMMDD", posdate.to_string('%Y%m%d'))
# fileNameExc = dict['filename_exc']
# fileNameExc = fileNameExc.replace("YYYYMMDD", posdate.to_string('%Y%m%d'))
# fileNameBal = dict['filename_bal']
# fileNameBal = fileNameBal.replace("YYYYMMDD", posdate.to_string('%Y%m%d'))
fileNameFrr = dict['filename_frr']
fileNameFrr = fileNameFrr.replace("YYYYMMDD", posdate.to_string('%Y%m%d'))
# Register the adapter
sqlite3.register_adapter(decimal.Decimal, adapt_decimal)
# Register the converter
sqlite3.register_converter("DECTEXT", convert_decimal)
conn = sqlite3.connect(":memory:", detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
try:
if gen_expired_pos == 'Today':
tobject = ael.TextObject.read('type="SQL Query" and name="%s"' % ("tf_edd_account_jorunal_qry"))
tobject_c = tobject.clone()
tobject_c.set_text(strSql)
tobject_c.commit()
ael.poll()
trd_array = trd_records(cur, strSql, posdate, dict)
tobject_c.set_text(strSql2)
tobject_c.commit()
ael.poll()
ins_array = ins_qty_and_avgprice_no_pnl(cur, strSql2, posdate, dict)
if gen_sbl == "Y":
sbl_array = sbl_dump(cur, strSql3, posdate, dict)
# jrn_array, exc_array, bal_array = acc_journal(cur, posdate, posdatetp1, dict)
frr_array = frr_calc(cur, posdate, dict)
# d1_sbl_export(cur, posdate, dict)
else:
ins_array = ins_qty_and_avgprice_no_pnl(cur, strSql2, posdate, dict)
ret = True
finally:
conn.close()
# email_content = 'Date: %s' % posdate.to_string('%Y-%m-%d') + '\n'
# attached_filename = os.path.basename(fileNameIns)
# attached_filenametrd = os.path.basename(fileNameTrd)
# # attached_filenamejrn = os.path.basename(fileNameJrn)
# attached_filenamefrr = os.path.basename(fileNameFrr)
# attached_filedir = os.path.dirname(fileNameIns) + "\\"
# attached_filedirtrd = os.path.dirname(fileNameTrd) + "\\"
# # attached_filedirjrn = os.path.dirname(fileNameJrn) + "\\"
# attached_filedirfrr = os.path.dirname(fileNameFrr) + "\\"
if ret:
# if send_successEmail == 'Y':
# HTI_Email_Util.SendAttachment(SUCCESS_RECIPIENTS, successSubject, email_content, [attached_filedir], [attached_filename], True)
# HTI_Email_Util.SendAttachment(SUCCESS_RECIPIENTS, successSubject, email_content, [attached_filedirtrd], [attached_filenametrd], True)
# # HTI_Email_Util.SendAttachment(SUCCESS_RECIPIENTS, successSubject, email_content, [attached_filedirjrn], [attached_filenamejrn], True)
# HTI_Email_Util.SendAttachment(SUCCESS_RECIPIENTS, successSubject, email_content, [attached_filedirfrr], [attached_filenamefrr], True)
# else:
print successSubject
print fileNameIns
print fileNameTrd
# print fileNameJrn
print fileNameFrr
else:
# if send_failureEmail == 'Y':
# HTI_Email_Util.SendAttachment(FAILURE_RECIPIENTS, errSubject, email_content, [attached_filedir], [attached_filename], True)
# HTI_Email_Util.SendAttachment(FAILURE_RECIPIENTS, errSubject, email_content, [attached_filedirtrd], [attached_filenametrd], True)
# # HTI_Email_Util.SendAttachment(FAILURE_RECIPIENTS, errSubject, email_content, [attached_filedirjrn], [attached_filenamejrn], True)
# HTI_Email_Util.SendAttachment(SUCCESS_RECIPIENTS, successSubject, email_content, [attached_filedirfrr], [attached_filenamefrr], True)
# else:
print errSubject
print fileNameIns
print fileNameTrd
# print fileNameJrn
print fileNameFrr
| {
"repo_name": "frederick623/HTI",
"path": "frr/account_journal.py",
"copies": "1",
"size": "127328",
"license": "apache-2.0",
"hash": 22835979140801616,
"line_mean": 41.756212223,
"line_max": 360,
"alpha_frac": 0.5430384519,
"autogenerated": false,
"ratio": 3.037259672725538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40802981246255376,
"avg_score": null,
"num_lines": null
} |
# 20181018 Add empty trade file checking
import os
import sys
import re
import csv
import numpy as np
import fnmatch
import math
import datetime
import xlrd
import traceback
import glob
import pypdftk
from FeeCalc import FeeCalc
PATH_DICT = {
"ull_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\FA_Trade_Import",
"ull_name": "ullink_to_fa_????????.csv",
"pbt_path": "S:\\Prime Brokerage (PB)\\Tools\\Daily Trading Tools",
"pbt_name": "PB Trading_v1.75_????????.xlsm",
"acc_file": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\account.xlsx",
"tran_template": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\SecuritiesTrfHTIFS.pdf",
"tran_pdf_output": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Stock_Transfer\\[client_name]_SecuritiesTrfHTIFS.pdf",
"tran_csv_output": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Stock_Transfer\\[client_name]_SecuritiesTransfer.csv",
"fa_output": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\FA_Trade_Import\\pb_to_fa_YYYYMMDD.csv",
}
# PATH_DICT = {
# "ull_dir": os.path.dirname(os.path.abspath(__file__)),
# "ull_name": "ullink_to_fa_????????.csv",
# "pbt_path": "D:\\Projects\\pb\\trade_engine",
# "pbt_name": "PB Trading_v?.??_????????.xlsm",
# "acc_file": "D:\\Projects\\pb\\ul_automation\\account.xlsx",
# "tran_template": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Ullink_Misc\\SecuritiesTrfHTIFS.pdf",
# "tran_pdf_output": "D:\\Projects\\pb\\Stock_Transfer\\[client_name]_SecuritiesTrfHTIFS.pdf",
# "tran_csv_output": "D:\\Projects\\pb\\Stock_Transfer\\[client_name]_SecuritiesTransfer.csv",
# "fa_output": "D:\\Projects\\pb\\ul_automation\\pb_to_fa_YYYYMMDD.csv",
# }
BBG_MIC_DICT = {"JP": "XTKS",
"HK": "XHKG",
"C1": "XSSC",
"C2": "XSEC"}
BBG_CCY_DICT = { "JP": "JPY",
"HK": "HKD",
"CH": "CNY",
"C1": "CNY",
"C2": "CNY"}
BBG_CAL_DICT = { "JP": "Tokyo",
"HK": "Hong Kong",
"CH": "Hong Kong",
"C1": "Hong Kong",
"C2": "Hong Kong"}
BBG_SPOT_DICT = {"JP": 3,
"HK": 2,
"CH": 1,
"C1": 1,
"C2": 1}
ACC_ACC_COL = 1
ACC_LS_COL = 2
ACC_CPTY_COL = 3
ACC_DPS_COL = 4
ACC_EMSX_COL = 7
ACC_MIC_COL = 8
TRD_CPTY_COL = 7
TRD_STOCK_CODE_COL = 12
TRD_STOCK_NAME_COL = 13
TRD_BB_COL = 20
TRD_STOCK_QTY_COL = 21
CLN_DPS_COL = 2
CLN_MSS_COL = 72
def round_half_up(val, digit):
return math.floor(float(str(val))*pow(10, digit)+0.5)/pow(10, digit)
def xlsx_to_arrs(xlsx_file, worksheets=[], row_starts=[], col_starts=[], row_ends=[], col_ends=[]):
headers = []
arrs = []
wb = xlrd.open_workbook(xlsx_file)
for (idx, worksheet) in enumerate(worksheets):
arr = []
ws = None
try:
ws = wb.sheet_by_index(worksheet)
except:
ws = wb.sheet_by_name(worksheet)
row_end = ws.nrows if not row_ends else row_ends[idx]
col_end = ws.ncols if not col_ends else col_ends[idx]
arr = [ws.row_values(row, start_colx=0 if not col_starts else col_starts[idx], end_colx=col_end) for row in range((0 if not row_starts else row_starts[idx]), row_end)]
arr = list(zip(*arr))
arr = [x for x in arr if any(x)]
arr = list(zip(*arr))
header = ','.join(x if x not in arr[0][:n] else x+str(n) for n, x in enumerate(arr[0]) )
headers.append(re.sub(r"[\*\.#/\$%\"\(\)& \_]", "", header))
arrs.append(arr[1:])
return headers, arrs
def arr_to_csv(file_name, header, data_arr):
csv_file = None
if sys.version_info >= (3,0,0):
csv_file = open(file_name, 'w', newline='')
else:
csv_file = open(file_name, 'wb')
wr = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
wr.writerow(header.split(','))
for data_row in data_arr:
line = []
for ele in data_row:
line.append(str(ele))
wr.writerow(line)
csv_file.close()
return
def csv_to_arr(csv_file, start=0, has_header=True, delim=','):
arr = []
with open(csv_file, 'rU') as f:
reader = csv.reader(f, delimiter=delim)
arr = list(reader)
header = ""
if has_header:
header = ','.join(arr[start])
arr = arr[start+1:]
return header, arr
else:
return arr[start:]
return
def xlsx_to_arr(xlsx_file, worksheet=0, row_start=0, col_start=0, row_end=-1, col_end=-1):
arr = []
wb = xlrd.open_workbook(xlsx_file)
ws = None
try:
ws = wb.sheet_by_index(worksheet)
except:
ws = wb.sheet_by_name(worksheet)
row_end = ws.nrows if row_end == -1 else row_end
col_end = ws.ncols if col_end == -1 else col_end
arr = [ws.row_values(row, start_colx=col_start, end_colx=col_end) for row in range(row_start, row_end)]
header = ','.join(x if x not in arr[0][:n] else x+str(n) for n, x in enumerate(arr[0]) )
return re.sub(r"[\*\.#/\$%\"\(\)& \_]", "_", header), arr[1:]
def arr_to_xlsx(filename, header, arr):
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Add()
ws = wb.Worksheets(1)
for i, cell in enumerate(header.split(',')):
ws.Cells(1,i+1).Value = cell
for i, row in enumerate(arr):
for j, cell in enumerate(row):
if str(cell)[0] == '=':
ws.Cells(i+2,j+1).Formula = cell
else:
ws.Cells(i+2,j+1).Value = cell
ws.Columns.AutoFit()
xl.DisplayAlerts = False
wb.SaveAs(filename)
xl.DisplayAlerts = True
wb.Close(True)
return
def files_lookup(tgt_dir, pattern):
dt = datetime.datetime.now()
# dt = datetime.datetime.strptime("20181017", "%Y%m%d")
return os.path.join(tgt_dir, pattern.replace("????????", dt.strftime("%Y%m%d") ) )
def search_row_to_dict(header, arr, search_key, search_value):
header_arr = header.split(',')
for row in arr:
dic = dict((header_arr[idx], ele) for idx, ele in enumerate(row))
if dic[search_key] == search_value.strip():
return dic
return {}
def prod_type_map():
prod_type = "Portfolio Swap"
return prod_type
def pb_to_fa(acc_np):
fa_header = "Trade Num,Product Type,Trade Date,Execution DateTime,Spot Days ,Start Date,End Date,Counterparty,Local Exchange Code,Instrument Name,MIC Code,ISINCode,Security,Security Name,Location,Currency,Pay Cal 1,B/S,MSS Account,Short Sell,Buy Back,Quantity,Gross Price,Commission Currency,Commission,Trading Fee Currency,Trading Fee,Transaction Levy Currency,Transaction Levy,Stamp Duty Currency,Stamp Duty,Normal/Closing,Transaction Ref,Group Ref No,Trader,External Reference,Trade Source,Channel"
fa_arr = []
FC = FeeCalc()
pbt_file = files_lookup(PATH_DICT["pbt_path"], PATH_DICT["pbt_name"])
[pbt_header, cln_header], [pbt_arr, cln_arr] = xlsx_to_arrs(pbt_file, ["Order Blotter", "ClientDetails"], row_starts=[1, 1])
cln_np = np.array(cln_arr)
pbt_header = pbt_header.split(',')
pbt_dict_arr = [ dict( (pbt_header[idx], ele) for idx, ele in enumerate(row) ) for row in pbt_arr if row[1] != "" ]
for pbt_dict in pbt_dict_arr:
if pbt_dict["EMSXStatus"] == "CANCEL":
continue
# cln_dict = search_row_to_dict(cln_header, cln_arr, "BloombergEMSXCode", pbt_dict["OriginatingTraderFirm"])
ticker_arr = pbt_dict["Ticker"].split(' ')
local_exchange_code = ticker_arr[0]
mkt = ticker_arr[1]
mic_code = pbt_dict["Exch"]
acc_row = acc_np[np.where((acc_np[:,ACC_EMSX_COL] == str(pbt_dict["OriginatingTraderFirm"]).strip()) * ([ (mic_code in ele) for ele in acc_np[:,ACC_MIC_COL] ])) ][0]
external_reference = acc_row[ACC_DPS_COL]
cln_row = cln_np[np.where(cln_np[:,CLN_DPS_COL] == external_reference)][0]
counterparty = acc_row[ACC_CPTY_COL]
tradenum = pbt_file[-13:-5] + str(int(pbt_dict["SequenceNumber"]))
product_type = prod_type_map()
trade_date = pbt_file[-13:-5]
execution_datetime = (datetime.datetime.strptime(trade_date, "%Y%m%d") + datetime.timedelta(seconds=pbt_dict["Timestamp"])).strftime("%Y%m%d%H%M%S")
spot_days = BBG_SPOT_DICT[mkt]
start_date = trade_date
end_date = ""
instrument_name = ""
isin = pbt_dict["ISIN"]
security = pbt_dict["Ticker"]
security_name = pbt_dict["Secname"]
location = mkt
currency = BBG_CCY_DICT[mkt]
pay_cal_1 = BBG_CAL_DICT[mkt]
bs = "BUY" if pbt_dict["Side"] == "BUY" or pbt_dict["Side"] == "COVR" else "SELL"
account_id = cln_row[CLN_MSS_COL]
short_sell = "Y" if pbt_dict["Side"] == "SHRT" else "N"
buy_back = "Y" if pbt_dict["Side"] == "COVR" else "N"
quantity = int(float(pbt_dict["FILLED"]))
signed_qty = quantity if bs == "BUY" else -quantity
gross_price = round_half_up(pbt_dict["AvgPrc"], 4)
gross_value = gross_price*quantity
trader = "EDMO2"
trade_source = "EMSX"
commission_currency = currency
trading_fee_currency = currency
transaction_levy_currency = currency
stamp_duty_currency = currency
commission, trading_fee, transaction_levy, stamp_duty = FC.fee_calc(external_reference, gross_price, signed_qty, local_exchange_code, mic_code, "CARE")
fa_arr.append([tradenum, product_type, trade_date, execution_datetime, spot_days, start_date, end_date, counterparty, local_exchange_code, instrument_name
, mic_code, isin, security, security_name, location, currency, pay_cal_1, bs, account_id, short_sell, buy_back, quantity, gross_price
, commission_currency, commission, trading_fee_currency, trading_fee, transaction_levy_currency, transaction_levy, stamp_duty_currency, stamp_duty
, '', '', '', trader, external_reference, trade_source, "CARE" ])
return fa_header, fa_arr
def stock_transfer(acc_np, trd_np):
if os.path.exists(os.path.dirname(PATH_DICT["tran_pdf_output"])):
for f in glob.iglob(os.path.dirname(PATH_DICT["tran_pdf_output"]) + '\\*'):
os.remove(f)
if trd_np.size == 0:
return
for cpty in np.unique(trd_np[:,TRD_CPTY_COL]):
stock_arr = []
for stk_code in np.unique(trd_np[np.where((trd_np[:,TRD_CPTY_COL] == cpty) * (trd_np[:,TRD_BB_COL] == 'Y'))][:,TRD_STOCK_CODE_COL]):
total_qty = np.sum(trd_np[np.where((trd_np[:,TRD_CPTY_COL] == cpty) * (trd_np[:,TRD_BB_COL] == 'Y') * (trd_np[:,TRD_STOCK_CODE_COL] == stk_code) )][:,TRD_STOCK_QTY_COL].astype(np.float))
stock_arr.append([ stk_code, trd_np[np.where(trd_np[:,TRD_STOCK_CODE_COL] == stk_code)][0][TRD_STOCK_NAME_COL], total_qty ])
if len(stock_arr) > 0:
pdf_dict = {
'fill_2': acc_np[np.where((acc_np[:,ACC_CPTY_COL] == cpty) * (acc_np[:,ACC_LS_COL] == 'L'))][0][ACC_ACC_COL],
'toggle_3': 'On',
'toggle_4': 'On',
'Sub-account No': acc_np[np.where((acc_np[:,ACC_CPTY_COL] == cpty) * (acc_np[:,ACC_LS_COL] == 'S'))][0][ACC_ACC_COL],
'Date': datetime.datetime.today().strftime("%d/%m/%Y"),
'fill_19': 'Please refer to separate spreadsheet.',
}
cpty_no_space = cpty.replace(' ', '_')
pypdftk.fill_form(PATH_DICT["tran_template"], pdf_dict, PATH_DICT["tran_pdf_output"].replace("[client_name]", cpty_no_space), True)
arr_to_csv(PATH_DICT["tran_csv_output"].replace("[client_name]", cpty_no_space), "stock_code,stock_name,quantity", stock_arr)
return
def main():
print ("PB to FA")
acc_header, acc_arr = xlsx_to_arr(PATH_DICT["acc_file"])
acc_np = np.array(acc_arr)
fa_header, fa_arr = pb_to_fa(acc_np)
ull_file = files_lookup(PATH_DICT["ull_dir"], PATH_DICT["ull_name"])
ull_header, ull_arr = csv_to_arr(ull_file)
trd_np = np.array(fa_arr + ull_arr)
stock_transfer(acc_np, trd_np)
arr_to_csv(PATH_DICT["fa_output"].replace("YYYYMMDD", datetime.date.today().strftime("%Y%m%d")), fa_header, fa_arr + ull_arr)
return
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print ("Ctrl+C pressed. Stopping...") | {
"repo_name": "frederick623/HTI",
"path": "ul_automation/pb_trd_consol.py",
"copies": "2",
"size": "11351",
"license": "apache-2.0",
"hash": 7650355894278971000,
"line_mean": 34.037037037,
"line_max": 502,
"alpha_frac": 0.6450532993,
"autogenerated": false,
"ratio": 2.475681570338059,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4120734869638059,
"avg_score": null,
"num_lines": null
} |
# 2018-3-16
# build by qianqians
# genjs
import sys
sys.path.append("./parser")
sys.path.append("./gen/js")
import os
import gencaller
import genmodule
import jparser
def gen(inputdir, outputdir):
defmodulelist = []
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
if not os.path.isdir(outputdir + '//caller'):
os.mkdir(outputdir + '//caller')
if not os.path.isdir(outputdir + '//module'):
os.mkdir(outputdir + '//module')
for filename in os.listdir(inputdir):
fname = os.path.splitext(filename)[0]
fex = os.path.splitext(filename)[1]
if fex == '.juggle':
file = open(inputdir + '//' + filename, 'r')
genfilestr = file.readlines()
keydict = jparser.parser(genfilestr)
for module_name, funcs in keydict.items():
if module_name in defmodulelist:
raise 'redefined module %s' % module_name
defmodulelist.append(module_name)
callercode = gencaller.gencaller(module_name, funcs)
file = open(outputdir + '//caller//' + module_name + 'caller.js', 'w')
file.write(callercode)
file.close
modulecode = genmodule.genmodule(module_name, funcs)
file = open(outputdir + '//module//' + module_name + 'module.js', 'w')
file.write(modulecode)
file.close
if __name__ == '__main__':
gen(sys.argv[1], sys.argv[2])
| {
"repo_name": "qianqians/juggle",
"path": "genjs.py",
"copies": "1",
"size": "1888",
"license": "mit",
"hash": 114693483364166180,
"line_mean": 36.76,
"line_max": 102,
"alpha_frac": 0.4528601695,
"autogenerated": false,
"ratio": 4.571428571428571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5524288740928571,
"avg_score": null,
"num_lines": null
} |
# 2018-3-16
# build by qianqians
# genmodule
def gencaller(module_name, funcs):
code = "/*this caller file is codegen by juggle for js*/\n"
code += "function " + module_name + "_caller(ch){\n"
code += " Icaller.call(this, \"" + module_name + "\", ch);\n\n"
for i in funcs:
code += " this." + i[1] + " = function("
count = 0
for item in i[2]:
code += " argv" + str(count)
count = count + 1
if count < len(i[2]):
code += ","
code += "){\n"
code += " var _argv = ["
for n in range(len(i[2])):
code += "argv" + str(n)
if (n+1) < len(i[2]):
code += ","
code += "];\n"
code += " this.call_module_method.call(this, \"" + i[1] + "\", _argv);\n"
code += " }\n\n"
code += "}\n"
code += "(function(){\n"
code += " var Super = function(){};\n"
code += " Super.prototype = Icaller.prototype;\n"
code += " " + module_name + "_caller.prototype = new Super();\n"
code += "})();\n"
code += module_name + "_caller.prototype.constructor = " + module_name + "_caller;\n\n";
return code
| {
"repo_name": "qianqians/juggle",
"path": "gen/js/gencaller.py",
"copies": "1",
"size": "1435",
"license": "mit",
"hash": -569487170830817800,
"line_mean": 37.7837837838,
"line_max": 96,
"alpha_frac": 0.3797909408,
"autogenerated": false,
"ratio": 3.8266666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47064576074666664,
"avg_score": null,
"num_lines": null
} |
# 2018-3-16
# build by qianqians
# genmodule
def genmodule(module_name, funcs):
code = "/*this module file is codegen by juggle for js*/\n"
code += "function " + module_name + "_module(){\n"
code += " eventobj.call(this);\n"
code += " Imodule.call(this, \"" + module_name + "\");\n\n"
for i in funcs:
code += " this." + i[1] + " = function("
count = 0
for item in i[2]:
code += "argv" + str(count)
count = count + 1
if count < len(i[2]):
code += ", "
code += "){\n"
code += " this.call_event(\"" + i[1] + "\", ["
count = 0
for item in i[2]:
code += "argv" + str(count)
count = count + 1
if count < len(i[2]):
code += ", "
code += "]);\n"
code += " }\n\n"
code += "}\n"
code += "(function(){\n"
code += " var Super = function(){};\n"
code += " Super.prototype = Imodule.prototype;\n"
code += " " + module_name + "_module.prototype = new Super();\n"
code += "})();\n"
code += module_name + "_module.prototype.constructor = " + module_name + "_module;\n\n";
return code
| {
"repo_name": "qianqians/juggle",
"path": "gen/js/genmodule.py",
"copies": "1",
"size": "1464",
"license": "mit",
"hash": 984540081742851600,
"line_mean": 36.5384615385,
"line_max": 96,
"alpha_frac": 0.3756830601,
"autogenerated": false,
"ratio": 3.9460916442048517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9790235191517962,
"avg_score": 0.006307902557377764,
"num_lines": 39
} |
# 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
@resources.register('dnszone')
class DnsZone(ArmResourceManager):
"""DNS Zone Resource
:example:
Finds all DNS Zones in the subscription
.. code-block:: yaml
policies:
- name: find-all-dns-zones
resource: azure.dnszone
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Networking']
service = 'azure.mgmt.dns'
client = 'DnsManagementClient'
enum_spec = ('zones', 'list', {})
resource_type = 'Microsoft.Network/dnszones'
| {
"repo_name": "ocampocj/cloud-custodian",
"path": "tools/c7n_azure/c7n_azure/resources/dns_zone.py",
"copies": "3",
"size": "1218",
"license": "apache-2.0",
"hash": -1725719483398756000,
"line_mean": 28.7073170732,
"line_max": 74,
"alpha_frac": 0.7019704433,
"autogenerated": false,
"ratio": 4.087248322147651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6289218765447651,
"avg_score": null,
"num_lines": null
} |
# 2019 RBH started lines-based ttt program
import numpy as np
# Cells
# 0 1 2 <- row 0 R0
# 3 4 5 <- row 1 R1
# 6 7 8 <- row 2 R2
# / \
# / | | | \
# D0 C0 C1 C2 D1 three columns and two diagonals
Empty, Black, White, Num_Cells, Num_Lines = -1, 0, 1, 9, 8
R0, R1, R2, C0, C1, C2, D0, D1 = 0, 1, 2, 3, 4, 5, 6, 7
Lines = np.array([ # each line is a list of cells
[0, 1, 2], [3, 4, 5], [6, 7, 8],
[0, 3, 6], [1, 4, 7], [2, 5, 8],
[0, 4, 8], [2, 4, 6]] )
Lines_Meeting = [
np.array([R0, C0, D0]), np.array([R0, C1] ), np.array([R0, C2, D1]),
np.array([R1, C0] ), np.array([R1, C1, D0, D1]), np.array([R1, C2] ),
np.array([R2, C0, D1]), np.array([R2, C1] ), np.array([R2, C2, D0])]
Line_Sums = np.array(
[[0] * Num_Cells, [0] * Num_Cells ]) # for Black, White
Board = np.array([Empty] * Num_Cells)
def addStone(yes, B, c, color):
if yes: # adding stone
assert(B[c] == Empty)
B[c] = color
for line in Lines_Meeting[c]:
Line_Sums[color][line] += 1
else: # removing stone
assert(B[c] == color)
B[c] = Empty
for line in Lines_Meeting[c]:
Line_Sums[color][line] -= 1
print(Lines)
print(Board)
print(Lines_Meeting)
print(Line_Sums)
for cell in [0, 3, 6]: addStone(True, Board, cell, Black)
print(Line_Sums)
for cell in [0, 3, 6]: addStone(False, Board, cell, Black)
print(Line_Sums)
for cell in [2, 5, 8]: addStone(True, Board, cell, Black)
print(Line_Sums)
for cell in [2, 5, 8]: addStone(False, Board, cell, Black)
print(Line_Sums)
| {
"repo_name": "ryanbhayward/games-puzzles-algorithms",
"path": "simple/ttt/ttt_lines.py",
"copies": "1",
"size": "1634",
"license": "mit",
"hash": -4476568399219366400,
"line_mean": 31.0392156863,
"line_max": 79,
"alpha_frac": 0.5263157895,
"autogenerated": false,
"ratio": 2.340974212034384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8314658688548584,
"avg_score": 0.010526262597160054,
"num_lines": 51
} |
# 201. Bitwise AND of Numbers Range
#
# Given a range [m, n] where 0 <= m <= n <= 2147483647,
# return the bitwise AND of all numbers in this range, inclusive.
#
# For example, given the range [5, 7], you should return 4.
# http://www.cnblogs.com/grandyang/p/4431646.html
# The problem is all about finding the longest common sequence between n and m
# starting from the most significant bit,
# since all the following bits will flip for at least once and the AND result will be 0.
class Solution(object):
def __init__(self):
self.INT_MAX = 2147483647
# brutal force MemoryError
def rangeBitwiseAnd(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
res = m
for i in range(m + 1, n + 1):
res &= i
return res
# use mask
def rangeBitwiseAnd(self, m, n):
b = self.INT_MAX
while (b & m) != (b & n):
b <<= 1
return b & m
# ex:
# 100101
# 100110
# 100111
# will get '100000' = '100' + '000'
# https://gengwg.blogspot.com/2018/04/leetcode-201-bitwise-and-of-numbers.html
def rangeBitwiseAnd(self, m, n):
offset = 0
# right shift until equal
while m != n:
m >>= 1
n >>= 1
offset += 1
return m << offset
if __name__ == '__main__':
print Solution().rangeBitwiseAnd(5, 7)
| {
"repo_name": "gengwg/leetcode",
"path": "201_bitwise_and_numbers_range.py",
"copies": "1",
"size": "1417",
"license": "apache-2.0",
"hash": 8990862597345494000,
"line_mean": 25.2407407407,
"line_max": 88,
"alpha_frac": 0.556104446,
"autogenerated": false,
"ratio": 3.4476885644768855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45037930104768853,
"avg_score": null,
"num_lines": null
} |
# 2020-05-04 13:45:07.486536
from alembic import op
import sqlalchemy as sa
revision = '39a2a2285c4e'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('groups',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('locale', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('login', sa.String(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('is_mute_enabled', sa.Boolean(), nullable=False),
sa.Column('is_known', sa.Boolean(), nullable=False),
sa.Column('locale', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('group_members',
sa.Column('user_id', sa.BigInteger(), nullable=True),
sa.Column('group_id', sa.BigInteger(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], onupdate='CASCADE', ondelete='CASCADE')
)
op.create_table('pending_actions',
sa.Column('id', sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), autoincrement=True, nullable=False),
sa.Column('user_id', sa.BigInteger(), nullable=False),
sa.Column('chat_id', sa.BigInteger(), nullable=False),
sa.Column('action', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('requests',
sa.Column('id', sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), autoincrement=True, nullable=False),
sa.Column('message_id', sa.BigInteger(), nullable=False),
sa.Column('chat_id', sa.BigInteger(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('author_id', sa.BigInteger(), nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('responses',
sa.Column('id', sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), autoincrement=True, nullable=False),
sa.Column('user_id', sa.BigInteger(), nullable=False),
sa.Column('request_id', sa.BigInteger(), nullable=False),
sa.Column('answer', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['request_id'], ['requests.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('responses')
op.drop_table('requests')
op.drop_table('pending_actions')
op.drop_table('group_members')
op.drop_table('users')
op.drop_table('groups')
# ### end Alembic commands ###
| {
"repo_name": "KrusnikViers/Zordon",
"path": "app/database/migrations/versions/auto_20200504_134507_39a2a2285c4e.py",
"copies": "1",
"size": "3168",
"license": "mit",
"hash": -5546894797215536000,
"line_mean": 42.397260274,
"line_max": 110,
"alpha_frac": 0.6613005051,
"autogenerated": false,
"ratio": 3.5715896279594137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4732890133059414,
"avg_score": null,
"num_lines": null
} |
# 2020-05-13 Robinhood
# It's tax season! Given a set of transactions, find out the cost basis for each sell transaction and compute the overall capital gain/loss. The cost basis for a sold equity is the price at which the equity being sold was bought at. The transactions are processed in FIFO order.
# You are provided with a sorted list of tuples, each of which represent a transaction.
# These tuples are formatted as follows:
# symbol: string
# side: string (buy/sell)
# quantity: int
# price: float
# For each sell, output the following information:
# symbol: string,
# quantity: int
# capital_gain: float
# {"symbol": "apple", "side": "buy", "quantity": 1, "price": 200}
# {"symbol": "apple", "side": "buy", "quantity": 1, "price": 205}
# {"symbol": "apple", "side": "buy", "quantity": 1, "price": 210}
# {"symbol": "apple", "side": "sell", "quantity": 3, "price": 220}
[]
# underlying data model
# { "stock" : deque[(price, quantity), ] }
# sell
# [(stock, quantity, capital_gain)]
# O(n) for inserting buy, O(n) m = buy/sell for stock x buy+sell O(n)
# symbol: string
# side: string (buy/sell)
# quantity: int
# price: float
from typing import List
from collections import defaultdict, deque
# import heapq # maxheap, if not FIFO
from collections import namedtuple
def calculate_gains(transactions: List[dict]):
model = defaultdict(deque) # {'stock': deque[(price, quantity)]}
ans = []
for txn in transactions:
symbol = txn['symbol']
price = txn['price']
quantity = txn['quantity']
if txn['side'] == 'buy':
model[symbol].append([price, quantity])
else:
while quantity > 0:
sell_txn = model[symbol].popleft()
avail_quantity = sell_txn[1]
avail_price = sell_txn[0]
# First sell transaction can fit the buy order
if avail_quantity >= quantity:
gains = (price - avail_price) * quantity
ans.append({"symbol": symbol, "quantity": quantity, "capital_gains": gains})
remaining_quantity = avail_quantity - quantity
if remaining_quantity > 0:
model[symbol].appendleft([avail_price, remaining_quantity])
quantity = 0
else:
gains = (price - avail_price) * avail_quantity
ans.append({"symbol": symbol, "quantity": avail_quantity, "capital_gains": gains})
quantity -= avail_quantity
print(ans)
return ans
# input, expected
testcases = [
{"symbol": "apple", "side": "buy", "quantity": 1, "price": 200},
{"symbol": "apple", "side": "buy", "quantity": 1, "price": 205},
{"symbol": "apple", "side": "buy", "quantity": 1, "price": 210},
{"symbol": "apple", "side": "sell", "quantity": 3, "price": 220},
]
t2 = [
{"symbol": "apple", "side": "buy", "quantity": 10, "price": 200},
{"symbol": "apple", "side": "buy", "quantity": 1, "price": 205},
{"symbol": "apple", "side": "buy", "quantity": 1, "price": 210},
{"symbol": "apple", "side": "sell", "quantity": 5, "price": 220},
]
calculate_gains(testcases)
calculate_gains(t2)
| {
"repo_name": "jackchi/interview-prep",
"path": "interviews-from-companies/buy_sell_stocks_robinhood.py",
"copies": "1",
"size": "3279",
"license": "mit",
"hash": 4498996294003962400,
"line_mean": 33.8404255319,
"line_max": 278,
"alpha_frac": 0.5804580153,
"autogenerated": false,
"ratio": 3.6674132138857782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47478712291857783,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Libao Jin'
__date__ = 'December 15, 2015'
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
if n == 1:
return True
elif n == 0:
return False
else:
appeared_numbers = [n]
s_n = str(n)
numbers = []
for s in s_n:
numbers.append(int(s))
n_sum = 0
for i in numbers:
n_sum += i ** 2
while n_sum not in set(appeared_numbers):
n = n_sum
appeared_numbers.append(n)
s_n = str(n)
numbers = []
for s in s_n:
numbers.append(int(s))
n_sum = 0
for i in numbers:
n_sum += i ** 2
if n_sum == 1:
return True
else:
return False
if __name__ == '__main__':
s = Solution()
for i in range(100):
print(i, s.isHappy(i))
| {
"repo_name": "jinlibao/LeetCode-Solutions",
"path": "solutions/202_Happy_Number.py",
"copies": "2",
"size": "1086",
"license": "mit",
"hash": 6410306104919180000,
"line_mean": 23.6818181818,
"line_max": 53,
"alpha_frac": 0.3784530387,
"autogenerated": false,
"ratio": 3.949090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5327543947790909,
"avg_score": null,
"num_lines": null
} |
# 202. Happy Number
#
# Write an algorithm to determine if a number is "happy".
#
# A happy number is a number defined by the following process:
# Starting with any positive integer, replace the number by the sum of the squares of its digits,
# and repeat the process until the number equals 1 (where it will stay),
# or it loops endlessly in a cycle which does not include 1.
# Those numbers for which this process ends in 1 are happy numbers.
#
# Example: 19 is a happy number
#
# 1**2 + 9**2 = 82
# 8**2 + 2**2 = 68
# 6**2 + 8**2 = 100
# 1**2 + 0**2 + 0**2 = 1
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
return self.helper([], n)
# more efficient if using dict {}
def helper(self, arr, n):
sum = 0
for d in str(n):
sum += int(d) ** 2
if sum == 1:
return True
if sum in arr:
return False
arr.append(sum)
return self.helper(arr, sum)
# http://blog.csdn.net/coder_orz/article/details/51315486
def isHappy(self, n):
dict = {}
while True:
dict[n] = True
sum = 0
# extract all digits from n
while n:
sum += (n % 10) * (n % 10)
n /= 10
if sum == 1:
return True
# if cycle starts return False
if sum in dict:
return False
n = sum
# use set instead of dict keys.
def isHappy(self, n):
s = set()
while True:
s.add(n)
sum = 0
while n:
sum += (n%10) ** 2
n /= 10
if sum == 1:
return True
if sum in s:
return False
n = sum
# Floyd Cycle Detection Algorithm
def isHappy(self, n):
slow = fast = n
while True:
slow = self.sumSquare(slow)
fast = self.sumSquare(fast)
fast = self.sumSquare(fast)
if slow == fast:
break
return slow == 1
def sumSquare(self, n):
sum = 0
while n:
sum += (n%10) * (n%10)
n /= 10
return sum
if __name__ == '__main__':
print Solution().isHappy(19)
| {
"repo_name": "gengwg/leetcode",
"path": "202_happy_number.py",
"copies": "1",
"size": "2339",
"license": "apache-2.0",
"hash": -3049637916538346000,
"line_mean": 24.4239130435,
"line_max": 97,
"alpha_frac": 0.4805472424,
"autogenerated": false,
"ratio": 3.8407224958949095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9816056783547793,
"avg_score": 0.0010425909494232476,
"num_lines": 92
} |
# 203. Remove Linked List Elements - LeetCode
# https://leetcode.com/problems/remove-linked-list-elements/description/
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class LinkedList(object):
def __init__(self, lst):
self.lst = lst
if len(lst) == 0:
self.head = None
return
self.head = ListNode(lst[0])
p = self.head
for i in lst[1:]:
p.next = ListNode(i)
p = p.next
def traversal(self,head=-1):
if head == -1:
return self.lst
ret = []
while head:
ret.append(head.val)
head = head.next
return ret
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
while head and head.val == val: # check head val
head = head.next
ret = head
while head and head.next:
if head.next.val == val:
head.next = head.next.next
else:
head = head.next
return ret
# Given: 1 --> 2 --> 6 --> 3 --> 4 --> 5 --> 6, val = 6
# Return: 1 --> 2 --> 3 --> 4 --> 5
s = Solution()
ans = [
([],6,[]),
([6],6,[]),
([6,6,6,6,6,6],6,[]),
([1],6,[1]),
([6,1,2],6,[1,2]),
([6,1,2,6],6,[1,2]),
([1,2,6,3,4,5,6],6,[1,2,3,4,5]),
# ([],[]),
]
for i in ans:
l = LinkedList(i[0])
r = l.traversal(s.removeElements(l.head,i[1]))
print r, r == i[2] | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/203_remove-linked-list-elements.py",
"copies": "1",
"size": "1642",
"license": "mit",
"hash": -6959397047682234000,
"line_mean": 23.5223880597,
"line_max": 72,
"alpha_frac": 0.4610231425,
"autogenerated": false,
"ratio": 3.225933202357564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41869563448575636,
"avg_score": null,
"num_lines": null
} |
# 203. Remove Linked List Elements
#
# Remove all elements from a linked list of integers that have value val.
#
# Example
# Given: 1 --> 2 --> 6 --> 3 --> 4 --> 5 --> 6, val = 6
# Return: 1 --> 2 --> 3 --> 4 --> 5
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
if not head:
return
# move to the first node that is not equal to val
# removing leading nodes that are equal to target
while head and head.val == val:
head = head.next
p = head
while p and p.next:
if p.next.val == val:
# remove node p.next
p.next = p.next.next
else:
p = p.next
return head
# http://bookshadow.com/weblog/2015/04/24/leetcode-remove-linked-list-elements/
# use a dummy head node so that do not need move head to not equal val
def removeElements(self, head, val):
dummy = ListNode(0)
dummy.next = head
p = dummy
while p and p.next:
if p.next.val == val:
p.next = p.next.next
else:
p = p.next
return dummy.next
# two cursors
def removeElements(self, head, val):
dummy = ListNode(0)
dummy.next = head
pre, cur = dummy, head
while cur:
if cur.val == val:
pre.next = cur.next
else:
pre = cur
cur = cur.next
return dummy.next
| {
"repo_name": "gengwg/leetcode",
"path": "203_remove_linked_list_elements.py",
"copies": "1",
"size": "1736",
"license": "apache-2.0",
"hash": -1413017446685017900,
"line_mean": 25.303030303,
"line_max": 83,
"alpha_frac": 0.508640553,
"autogenerated": false,
"ratio": 3.9454545454545453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9954095098454545,
"avg_score": 0,
"num_lines": 66
} |
""" 2048 game (with GUI) using Pyglet openGL """
import random
import pyglet
from pyglet.window import key
from pyglet.gl import (GL_COLOR_BUFFER_BIT, GL_TRIANGLE_STRIP, GL_BLEND,
GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, glClear,
glEnable, glDisable, glBlendFunc)
from ai import smart_move, set_board
class TransparentGroup(pyglet.graphics.Group):
def set_state(self):
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def unset_state(self):
glDisable(GL_BLEND)
SQ_SIZE = 58
SPACING = 10
BG_COLORS = {
1: ('c3B', (238, 228, 219, 238, 228, 219, 238, 228, 219, 238, 228, 219)),
2: ('c3B', (237, 224, 201, 237, 224, 201, 237, 224, 201, 237, 224, 201)),
3: ('c3B', (241, 177, 125, 241, 177, 125, 241, 177, 125, 241, 177, 125)),
4: ('c3B', (243, 149, 104, 243, 149, 104, 243, 149, 104, 243, 149, 104)),
5: ('c3B', (243, 127, 100, 243, 127, 100, 243, 127, 100, 243, 127, 100)),
6: ('c3B', (244, 96, 67, 244, 96, 67, 244, 96, 67, 244, 96, 67)),
7: ('c3B', (236, 206, 120, 236, 206, 120, 236, 206, 120, 236, 206, 120)),
8: ('c3B', (237, 204, 97, 237, 204, 97, 237, 204, 97, 237, 204, 97)),
9: ('c3B', (237, 200, 80, 237, 200, 80, 237, 200, 80, 237, 200, 80)),
10: ('c3B', (237, 197, 63, 237, 197, 63, 237, 197, 63, 237, 197, 63)),
11: ('c3B', (237, 194, 46, 237, 194, 46, 237, 194, 46, 237, 194, 46)),
12: ('c3B', (119, 110, 101, 119, 110, 101, 119, 110, 101, 119, 110, 101)),
13: ('c3B', (119, 110, 101, 119, 110, 101, 119, 110, 101, 119, 110, 101)),
14: ('c3B', (119, 110, 101, 119, 110, 101, 119, 110, 101, 119, 110, 101)),
}
TEXT_COLORS = {
1: (119, 110, 101, 255),
2: (255, 255, 255, 255),
}
LOST_SCREEN_COLOR = ('c4B', (238, 228, 219, 128,
238, 228, 219, 128,
238, 228, 219, 128,
238, 228, 219, 128))
WINDOW = pyglet.window.Window(280, 280)
BACKGROUND = pyglet.graphics.OrderedGroup(0)
FOREGROUND = pyglet.graphics.OrderedGroup(1)
BG = pyglet.image.load('assets/bg.png')
BG_SPRITE = pyglet.sprite.Sprite(BG)
FULL_SCREEN_VECTOR = ('v2f', (0, 0,
0, WINDOW.height,
WINDOW.width, 0,
WINDOW.width, WINDOW.height))
LOST_SCREEN = pyglet.graphics.Batch()
LOST_SCREEN.add_indexed(4, GL_TRIANGLE_STRIP,
TransparentGroup(), [0, 1, 2, 3],
FULL_SCREEN_VECTOR,
LOST_SCREEN_COLOR)
LOST_TEXT = pyglet.text.Label('Final Score: 0',
font_name='Arial',
font_size=18,
x=WINDOW.width//2, y=WINDOW.height//2,
anchor_x='center', anchor_y='center',
batch=LOST_SCREEN, group=FOREGROUND)
class Tile:
def __init__(self, val, x, y):
self.val = val
self.x = x
self.y = y
self.merged_flag = False
def move(self, x, y):
self.x = x
self.y = y
def __str__(self):
return str(self.val)+' at '+str(self.x)+', '+str(self.y)
class GraphicTile(Tile):
def __init__(self, val, x, y):
super().__init__(val, x, y)
self.gx = lambda: self.x * (SQ_SIZE+SPACING) + SPACING
self.gy = lambda: WINDOW.height - (self.y+1) * (SQ_SIZE+SPACING) # 0, 0 is bottom-left
self.batch = pyglet.graphics.Batch()
self.v_list = self.batch.add_indexed(4, GL_TRIANGLE_STRIP,
BACKGROUND, [0, 1, 2, 3],
('v2f', (self.gx(), self.gy(),
self.gx(), self.gy()+SQ_SIZE,
self.gx()+SQ_SIZE, self.gy(),
self.gx()+SQ_SIZE, self.gy()+SQ_SIZE)),
BG_COLORS[1])
self.label = pyglet.text.Label(str(2**val),
font_name='Arial',
bold=True,
font_size=32,
color=TEXT_COLORS[1],
x=self.gx()+SQ_SIZE//2, y=self.gy()+SQ_SIZE//2,
anchor_x='center', anchor_y='center',
batch=self.batch, group=FOREGROUND)
if self.val in BG_COLORS:
self.v_list.colors = BG_COLORS[self.val][1]
def move(self, x, y):
super().move(x, y)
self.v_list.vertices = [self.gx(), self.gy(),
self.gx(), self.gy()+SQ_SIZE,
self.gx()+SQ_SIZE, self.gy(),
self.gx()+SQ_SIZE, self.gy()+SQ_SIZE]
self.label.x = self.gx()+SQ_SIZE//2
self.label.y = self.gy()+SQ_SIZE//2
self.label.text = str(2**self.val)
if self.val in BG_COLORS:
self.v_list.colors = BG_COLORS[self.val][1]
if self.val > 9:
self.label.font_size = 16
elif self.val > 6:
self.label.font_size = 24
elif self.val > 2:
self.label.color = TEXT_COLORS[2]
class Board:
def __init__(self, graphic=False):
self.graphic = graphic
self.board = [[None for i in range(4)] for j in range(4)]
self.score = 0
self.lost = False
self.spawn_tile()
self.spawn_tile()
def show(self):
for i in range(0, 4):
for j in range(0, 4):
if self.board[i][j] is None:
print('_ ', end='')
else:
print(str(self.board[i][j].val)+' ', end='')
print()
def inbounds(self, x, y):
return 0 <= y and y < len(self.board) and 0 <= x and x < len(self.board[0])
def exist(self, x, y):
return self.inbounds(x, y) and self.board[y][x] is not None
def get_empty_spots(self):
empty_spots = []
for y in range(0, 4):
for x in range(0, 4):
if self.board[y][x] is None:
empty_spots.append((x, y))
return empty_spots
def spawn_tile(self):
empty_spots = self.get_empty_spots()
if len(empty_spots) is 0:
return False
spot = random.choice(empty_spots)
if not self.graphic:
if random.random() < 0.90:
self.board[spot[1]][spot[0]] = Tile(1, spot[0], spot[1])
else:
self.board[spot[1]][spot[0]] = Tile(2, spot[0], spot[1])
else:
if random.random() < 0.90:
self.board[spot[1]][spot[0]] = GraphicTile(1, spot[0], spot[1])
else:
self.board[spot[1]][spot[0]] = GraphicTile(2, spot[0], spot[1])
return True
@staticmethod
def mini_shift(tile_line):
# Shift one row or column forward
moved_flag = False
points = 0
for i in range(0, 3):
r = 2-i
if tile_line[r] is not None:
z = r
while z < 3:
if tile_line[z+1] is not None:
break
z += 1
if tile_line[z] is None:
# If found a new empty spot, swap tiles
tile_line[z] = tile_line[r]
tile_line[r] = None
moved_flag = True
# Check for merge
if z < 3 and not tile_line[z+1].merged_flag and \
tile_line[z+1].val is tile_line[z].val:
tile_line[z+1].val += 1
points += 2**tile_line[z+1].val
tile_line[z+1].merged_flag = True
tile_line[z] = None
moved_flag = True
return (tile_line, moved_flag, points)
def shift(self, direction):
moved_flag = False
if direction is 'w' or direction is 's':
for col in range(0, 4):
tile_line = [self.board[row][col] for row in range(0, 4)]
if direction is 'w':
tile_line.reverse()
shifted_tiles, made_move, points = Board.mini_shift(tile_line)
self.score += points
moved_flag |= made_move
if direction is 'w':
shifted_tiles.reverse()
for row in range(0, 4):
self.board[row][col] = shifted_tiles[row]
elif direction is 'a' or direction is 'd':
for row in range(0, 4):
tile_line = list(self.board[row])
if direction is 'a':
tile_line.reverse()
shifted_tiles, made_move, points = Board.mini_shift(tile_line)
self.score += points
moved_flag |= made_move
if direction is 'a':
shifted_tiles.reverse()
self.board[row] = shifted_tiles
return moved_flag
def check_loss(self):
for y in range(0, 4):
for x in range(0, 4):
if self.board[y][x] is None or \
(self.exist(x-1, y) and self.board[y][x-1].val is self.board[y][x].val) or \
(self.exist(x+1, y) and self.board[y][x+1].val is self.board[y][x].val) or \
(self.exist(x, y-1) and self.board[y-1][x].val is self.board[y][x].val) or \
(self.exist(x, y+1) and self.board[y+1][x].val is self.board[y][x].val):
return False
return True
def computer_move(self):
for row in range(0, 4):
for col in range(0, 4):
if self.board[row][col] is not None:
self.board[row][col].move(col, row)
self.board[row][col].merged_flag = False
self.spawn_tile()
self.lost |= self.check_loss()
def hash(self):
return hash(tuple(tuple(row) for row in self.board))
@WINDOW.event
def on_key_press(symbol, modifiers):
moved_flag = False
if symbol is key.UP or symbol is key.W:
moved_flag = board.shift('w')
elif symbol is key.RIGHT or symbol is key.D:
moved_flag = board.shift('d')
elif symbol is key.DOWN or symbol is key.S:
moved_flag = board.shift('s')
elif symbol is key.LEFT or symbol is key.A:
moved_flag = board.shift('a')
if moved_flag:
board.computer_move()
@WINDOW.event
def on_draw():
glClear(GL_COLOR_BUFFER_BIT)
BG_SPRITE.y = WINDOW.height - BG_SPRITE.height
BG_SPRITE.draw()
for row in board.board:
for tile in row:
if tile is not None:
tile.batch.draw()
if board.lost:
LOST_TEXT.text = "Final Score: "+str(board.score)
LOST_SCREEN.draw()
def start(graphic=False, ai_solve=True):
global board
board = Board(graphic)
if not ai_solve:
pyglet.app.run()
else:
set_board(board)
if graphic:
# for _ in range(0, 20):
# smart_move(0)
pyglet.clock.schedule_interval(smart_move, 1/120)
pyglet.app.run()
else:
for i in range(0, 100):
smart_move(0)
if board.lost:
break
board.show()
print("Score:", board.score)
# start()
| {
"repo_name": "rcbyron/2048-ai",
"path": "client/game.py",
"copies": "1",
"size": "11608",
"license": "mit",
"hash": 2019859869324549000,
"line_mean": 35.7341772152,
"line_max": 95,
"alpha_frac": 0.4763094418,
"autogenerated": false,
"ratio": 3.398126463700234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4374435905500234,
"avg_score": null,
"num_lines": null
} |
"""2048
2048 is a simple game where you combine tiles by sliding them up, down, left, or
right with the arrow keys. You can actually get a fairly high score by repeatedly
sliding in an up, right, down, and left pattern over and over again.
Write a program that will open the game at https://gabrielecirulli.github.io/2048/
and keep sending up, right, down, and left keystrokes to automatically play the game.
"""
class ElementDoesNotHaveText(object):
"""Element does not have text
An expectation for checking that an element does not have specified text.
Returns the WebElement if it doesn't have the specified text
Attributes:
locator: Used to find the element
"""
def __init__(self, locator, text):
self.locator = locator
self.text = text
def __call__(self, browser):
element = browser.find_element(*self.locator) # Finding the referenced element
if self.text not in element.text:
return element
else:
return False
def main():
from selenium import webdriver, common
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
url = "https://gabrielecirulli.github.io/2048/"
# Start Browser and go to 2048 game
browser = webdriver.Firefox()
browser.implicitly_wait(3) # seconds
browser.get(url)
try:
htmlElem = browser.find_element_by_tag_name("html")
# Repeatedly send up, right, down, left
while not browser.find_element_by_class_name("retry-button").is_displayed():
htmlElem.send_keys(Keys.ARROW_UP)
htmlElem.send_keys(Keys.ARROW_RIGHT)
htmlElem.send_keys(Keys.ARROW_DOWN)
htmlElem.send_keys(Keys.ARROW_LEFT)
# Get current score and best score
wait = WebDriverWait(browser, 10) # wait up to 10 seconds
scoreElem = wait.until(ElementDoesNotHaveText((By.CLASS_NAME, "score-container"), "+"))
score = scoreElem.text
bestElem = browser.find_element_by_class_name("best-container")
best = bestElem.text
# Display current score and best score
print("Current score: %s" % score)
print("Best score: %s" % best)
except common.exceptions.NoSuchElementException as err:
print("Unable to locate element: %s" % err)
# Close browser
browser.quit()
if __name__ == '__main__':
main()
| {
"repo_name": "JoseALermaIII/python-tutorials",
"path": "pythontutorials/books/AutomateTheBoringStuff/Ch11/Projects/P3_2048.py",
"copies": "1",
"size": "2490",
"license": "mit",
"hash": 7399014100949717000,
"line_mean": 32.2,
"line_max": 95,
"alpha_frac": 0.6646586345,
"autogenerated": false,
"ratio": 3.971291866028708,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5135950500528708,
"avg_score": null,
"num_lines": null
} |
# 2048.py
# Aug 22, 2015
# Written in python / pygame by DavidSousaRJ - david.sousarj@gmail.com
# License: Creative Commons
# Sorry about some comments in portuguese!
#
# Apr 4, 2017 - n2o.matt@gmail.com
# Make changes in how the move is implemented, since the original game
# forces the player to chose another direction if no moves is possible
# in the 'choosen' direction. The previous implementation was not handling
# this and instead spawning another block.
#
# CHANGES:
# Aug 24 - fixed colors /fonts
# BUG: game ending not working
# BUG: when a play is not possible it keeps adding a random tile
# TODO: include score, button undo and new game
import os
import sys
import pygame
import pdb;
import copy;
from pygame.locals import *
from random import randint
TABLE=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
def isgameover(TABLE):
status=0
zerocount=0
for LINE in TABLE:
if 2048 in LINE:
status=1
return status
elif 0 not in LINE:
zerocount+=1
if zerocount==4:
#condicoes de gameover: nao ter zero e nao ter consecutivo igual
#procura consecutivos horizontal
for i in range(4):
for j in range(3):
if TABLE[i][j]==TABLE[i][j+1]: return status
#procura consecutivos na vertical
for j in range(4):
for i in range(3):
if TABLE[i][j]==TABLE[i+1][j]: return status
status=2
return status
#regras do 2048
# define a direcaoo jogada, p.ex. : cima
# para cada coluna, de cima pra baixo
# move o numero para o zero-consecutivo-mais-longe
# se o nao-zero-mais-perto e igual ao numero, combina
def moveup(pi,pj,T):
justcomb=False
while pi > 0 and (T[pi-1][pj] == 0 or (T[pi-1][pj] == T[pi][pj] and not justcomb)):
if T[pi-1][pj] == 0:
T[pi-1][pj] = T[pi][pj]
T[pi][pj]=0
pi-=1
elif T[pi-1][pj]==T[pi][pj]:
T[pi-1][pj] += T[pi][pj]
T[pi][pj] = 0
pi-=1
justcomb=True
return T
def movedown(pi,pj,T):
justcomb=False
while pi < 3 and (T[pi+1][pj] == 0 or (T[pi+1][pj] == T[pi][pj] and not justcomb)):
if T[pi+1][pj] == 0:
T[pi+1][pj] = T[pi][pj]
T[pi][pj]=0
pi+=1
elif T[pi+1][pj]==T[pi][pj]:
T[pi+1][pj] += T[pi][pj]
T[pi][pj] = 0
pi+=1
justcomb=True
return T
def moveleft(pi,pj,T):
justcomb=False
while pj > 0 and (T[pi][pj-1] == 0 or (T[pi][pj-1] == T[pi][pj] and not justcomb)):
if T[pi][pj-1] == 0:
T[pi][pj-1] = T[pi][pj]
T[pi][pj]=0
pj-=1
elif T[pi][pj-1]==T[pi][pj]:
T[pi][pj-1] += T[pi][pj]
T[pi][pj] = 0
pj-=1
justcomb=True
return T
def moveright(pi,pj,T):
justcomb=False
while pj < 3 and (T[pi][pj+1] == 0 or (T[pi][pj+1] == T[pi][pj] and not justcomb)):
if T[pi][pj+1] == 0:
T[pi][pj+1] = T[pi][pj]
T[pi][pj]=0
pj+=1
elif T[pi][pj+1]==T[pi][pj]:
T[pi][pj+1] += T[pi][pj]
T[pi][pj] = 0
pj+=1
justcomb=True
return T
def randomfill(TABLE):
# search for zero in the game table
flatTABLE = sum(TABLE,[])
if 0 not in flatTABLE:
return TABLE
empty=False
w=0
while not empty:
w=randint(0,15)
if TABLE[w//4][w%4] == 0:
empty=True
z=randint(1,5)
if z==5:
TABLE[w//4][w%4] = 4
else:
TABLE[w//4][w%4] = 2
return TABLE
def key(DIRECTION,TABLE):
if DIRECTION =='w':
for pi in range(1,4):
for pj in range(4):
if TABLE[pi][pj] !=0: TABLE=moveup(pi,pj,TABLE)
elif DIRECTION =='s':
for pi in range(2,-1,-1):
for pj in range(4):
if TABLE[pi][pj] !=0: TABLE=movedown(pi,pj,TABLE)
elif DIRECTION =='a':
for pj in range(1,4):
for pi in range(4):
if TABLE[pi][pj] !=0: TABLE=moveleft(pi,pj,TABLE)
elif DIRECTION =='d':
for pj in range(2,-1,-1):
for pi in range(4):
if TABLE[pi][pj] !=0: TABLE=moveright(pi,pj,TABLE)
return TABLE
def showtext(TABLE):
os.system('clear')
for LINE in TABLE:
for N in LINE:
print "%4s" %N,
print ""
########################################################################
# Parte Grafica
width=400
height=400
boxsize = min(width,height)//4;
margin = 5
thickness = 0
STATUS=0
colorback=(189,174,158)
colorblank=(205,193,180)
colorlight=(249,246,242)
colordark=(119,110,101)
dictcolor1={
0:colorblank,
2:(238,228,218),
4:(237,224,200),
8:(242,177,121),
16:(245,149,99),
32:(246,124,95),
64:(246,95,59),
128:(237,207,114),
256:(237,204,97),
512:(237,200,80),
1024:(237,197,63),
2048:(237,194,46) }
dictcolor2={
2:colordark,
4:colordark,
8:colorlight,
16:colorlight,
32:colorlight,
64:colorlight,
128:colorlight,
256:colorlight,
512:colorlight,
1024:colorlight,
2048:colorlight }
# Init screen
pygame.init()
screen = pygame.display.set_mode((width,height))
pygame.display.set_caption( 'Python 2048 by DavidSousaRJ' )
myfont = pygame.font.SysFont("Arial", 30, bold=True)
def gameover(STATUS):
if STATUS == 1:
label = myfont.render("You win! :)", 1, (255,255,255))
screen.blit(label, (100, 100))
elif STATUS == 2:
label = myfont.render("Game over! :(", 1, (255,255,255))
screen.blit(label, (100, 100))
pygame.display.update()
def show(TABLE):
screen.fill(colorback)
for i in range(4):
for j in range(4):
pygame.draw.rect(screen, dictcolor1[TABLE[i][j]], (j*boxsize+margin,
i*boxsize+margin,
boxsize-2*margin,
boxsize-2*margin),
thickness)
if TABLE[i][j] != 0:
label = myfont.render("%4s" %(TABLE[i][j]), 1, dictcolor2[TABLE[i][j]] )
screen.blit(label, (j*boxsize+4*margin, i*boxsize+5*margin))
pygame.display.update()
#paintCanvas
TABLE=randomfill(TABLE)
TABLE=randomfill(TABLE)
show(TABLE)
showtext(TABLE)
running=True
while True:
for event in pygame.event.get():
if event.type == QUIT:
print "quit"
pygame.quit(); sys.exit()
if event.type == pygame.KEYDOWN:
if running:
desired_key = None
if event.key == pygame.K_UP : desired_key = "w"
if event.key == pygame.K_DOWN : desired_key = "s"
if event.key == pygame.K_LEFT : desired_key = "a"
if event.key == pygame.K_RIGHT : desired_key = "d"
## Player didn't selected any direction key.
if desired_key is None:
continue
## We're passing a deep copy of TABLE to key() function
## since python will pass a "reference" to the object.
## So all modifications inside the key() function will
## modify the TABLE object and we need compare it to the
## previous state of the TABLE to check if the direction
## choosen by player was a valid one.
##
## It means that if no movement or merge was possible with
## that direction, player must choose another direction.
## Only then we spawn another block.
new_table = key(desired_key, copy.deepcopy(TABLE))
if new_table != TABLE:
TABLE=randomfill(new_table)
show(TABLE)
showtext(TABLE)
STATUS=isgameover(TABLE)
if STATUS<0:
running=False
gameover(STATUS)
#end
| {
"repo_name": "davidsousarj/2048py",
"path": "2048py.py",
"copies": "1",
"size": "8037",
"license": "cc0-1.0",
"hash": -4233168921847481000,
"line_mean": 28.5477941176,
"line_max": 88,
"alpha_frac": 0.5275600348,
"autogenerated": false,
"ratio": 3.23420523138833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9065114710878899,
"avg_score": 0.03933011106188634,
"num_lines": 272
} |
#2048 (python replica)
#By Banji Afolabi
#VERSION 1.3
#hbb: Header Button or Border
#MTT: Merged This Turn
# * : Remove All Of These Comments To View Board Log As You Play
import pygame, sys, random
from pygame.locals import *
#CONSTANTS
WINDOWHEIGHT = 625
WINDOWWIDTH = 500
HEADERHEIGHT = 125
TILESIZE = 100
GAPSIZE = 20
BOARDHEIGHT = BOARDWIDTH = 4
HBBHEIGHT = 42
HBBWIDTH = 165
#COLORS R G B
FUCHSIA = (255, 0, 255) #2
RED = (255, 0, 0) #4
GREEN = ( 0, 128, 0) #8
BLUE = ( 0, 0, 255) #16
PURPLE = (128, 0, 128) #32
OLIVE = (128, 128, 0) #64
NAVYBLUE = ( 0, 0, 128) #128
LIME = ( 0, 255, 0) #256
TEAL = ( 0, 128, 128) #512
ORANGE = (252, 128, 0) #1024
YELLOW = (255, 255, 0) #2048
BLACK = ( 0, 0, 0) #text
WHITE = (255, 255, 255) #event text
SILVER = (192, 192, 192) #empty tile space color
BGCOLOR = (164, 152, 128)
EMPTYTILECOLOR = SILVER
#DIRECTIONS
UP = 'up'
DOWN = 'down'
RIGHT = 'right'
LEFT = 'left'
#TILES VALUE COLOR MTT
TWO = [ 2, FUCHSIA, False]
FOUR = [ 4, RED, False]
EIGHT = [ 8, GREEN, False]
SIXTEEN = [ 16, BLUE, False]
THIRTYTWO = [ 32, PURPLE, False]
SIXTYFOUR = [ 64, OLIVE, False]
ONETWOEIGHT = [ 128,NAVYBLUE, False]
TWOFIVESIX = [ 256, LIME, False]
FIVEONETWO = [ 512, TEAL, False]
TENTWENTYFOUR = [1024, ORANGE, False]
TWENTYFORTYEIGHT = [2048, YELLOW, False]
ALLTILES = (TWO, FOUR, EIGHT, SIXTEEN, THIRTYTWO, SIXTYFOUR, ONETWOEIGHT, TWOFIVESIX, FIVEONETWO, TENTWENTYFOUR, TWENTYFORTYEIGHT)
def main(highscore):
global DISPLAYSURF, tilefont, tilefont2, hbbfont, eventfont, eventfont2
pygame.init()
#Prepare Font
tilefont = pygame.font.Font('freesansbold.ttf', 69)
tilefont2 = pygame.font.Font('freesansbold.ttf', 42)
hbbfont = pygame.font.Font('freesansbold.ttf', 20)
eventfont = pygame.font.Font('freesansbold.ttf', 60)
eventfont2 = pygame.font.Font('freesansbold.ttf', 19)
#Prepare Window
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('2048')
#Prepare Board Structure
header, mainBoard = getStartBoard()
#Set HighScore
header[2] = highscore
#Main Game Loop
while True:
DISPLAYSURF.fill(BGCOLOR)
drawHeader(header)
drawBoard(mainBoard)
#Event Handler
for event in pygame.event.get():
if (event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
mouse_pos = pygame.mouse.get_pos()
if (header[0].collidepoint(mouse_pos)): #New Game
main(header[2])
elif event.type == KEYUP and event.key == K_UP:
move(header, mainBoard, UP)
elif event.type == KEYUP and event.key == K_DOWN:
move(header, mainBoard, DOWN)
elif event.type == KEYUP and event.key == K_RIGHT:
move(header, mainBoard, RIGHT)
elif event.type == KEYUP and event.key == K_LEFT:
move(header, mainBoard, LEFT)
pygame.display.update()
def getStartBoard():
#Header # New Game # Score # Best Score
header = [pygame.Rect(0, 0, 0, 0), 0, 0]
#Board Tiles
tiles = []
for x in range(BOARDWIDTH):
column = []
for y in range(BOARDHEIGHT):
column.append(None)
tiles.append(column)
#Generate Start Tiles
r1 = random.randrange(4)
r2 = random.randrange(4)
tiles[r1][r2] = ALLTILES[random.randrange(2)][:]
r3 = random.randrange(4)
r4 = random.randrange(4)
while r3 == r1 and r4 == r2: #Prevents Start Tiles From Being Placed In The Same Location
r3 = random.randrange(4)
r4 = random.randrange(4)
tiles[r3][r4] = ALLTILES[random.randrange(2)][:]
random.shuffle(tiles)
return (header, tiles)
def getLeftTopCoords(x, y): # Converts List Coords To Pixel Coords
left = x * (TILESIZE + GAPSIZE) + GAPSIZE
top = y * (TILESIZE + GAPSIZE) + GAPSIZE + HEADERHEIGHT
return (left, top)
def drawHeader(header):
# Draw "New Game" Button
textSurf = hbbfont.render("New Game", True, BLACK)
header[0] = textSurf.get_rect()
header[0].topleft = (0, GAPSIZE)
header[0].center = (HBBWIDTH/2, HEADERHEIGHT/3+(HBBHEIGHT/2))
DISPLAYSURF.blit(textSurf, header[0])
#Draw Score Counter
textSurf = hbbfont.render("Score: " + str(header[1]), True, BLACK)
textSurfRect = textSurf.get_rect()
textSurfRect.topleft = (HBBWIDTH, HEADERHEIGHT/3)
textSurfRect.center = (HBBWIDTH+(HBBWIDTH/2), HEADERHEIGHT/3+(HBBHEIGHT/2))
DISPLAYSURF.blit(textSurf, textSurfRect)
#Draw Best Score Counter
textSurf = hbbfont.render("Best Score: " + str(header[2]), True, BLACK)
textSurfRect = textSurf.get_rect()
textSurfRect.topleft = ((HBBWIDTH*2), HEADERHEIGHT/3)
textSurfRect.center = ((HBBWIDTH*2)+(HBBWIDTH/2), HEADERHEIGHT/3+(HBBHEIGHT/2))
DISPLAYSURF.blit(textSurf, textSurfRect)
def drawBoard(board):
for tilex in range(BOARDWIDTH):
for tiley in range(BOARDHEIGHT):
left, top = getLeftTopCoords(tilex, tiley)
if board[tilex][tiley] == None: #Tile Is Empty. Draw Empty Tile Space.
pygame.draw.rect(DISPLAYSURF, EMPTYTILECOLOR, (left, top, TILESIZE, TILESIZE))
else: # Draw The Tile
pygame.draw.rect(DISPLAYSURF, board[tilex][tiley][1], (left, top, TILESIZE, TILESIZE)) #The Color Of Each Tile Is In Location 1
if board[tilex][tiley][0] <= 64:
textSurf = tilefont.render(str(board[tilex][tiley][0]), True, BLACK) # The Value Of Each Tile Is In Location 0
else:
textSurf = tilefont2.render(str(board[tilex][tiley][0]), True, BLACK)
textSurfRect = textSurf.get_rect()
textSurfRect.topleft = (left, top)
textSurfRect.center = (left+(TILESIZE/2), top+(TILESIZE/2))
DISPLAYSURF.blit(textSurf, textSurfRect)
def gameOverCheck(header,board,direction):
m=False
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
if board[x][y]!=None:
board[x][y][2]=False
if direction == UP:
for x in range(BOARDWIDTH): #0 , 1 , 2 , 3
for y in range(1, BOARDHEIGHT): # 1, 2, 3
if y == 1:
if board[x][y] != None: #There Is A Tile In The Space
if board[x][0] == None:
board[x][0] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][0][0] and board[x][0][2] == False: #If The Tiles Have The Same Value And The End Tile Was Not Merged This Turn
header, board, board[x][y], board[x][0] = merge(header, board, board[x][y], board[x][0])
if board[x][0] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif y == 2:
if board[x][y] != None:
if board[x][1] == None:
if board[x][0] == None:
board[x][0] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][0][0] and board[x][0][2] == False:
header, board, board[x][y], board[x][0] = merge(header, board, board[x][y], board[x][0])
if board[x][0] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][1] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][1][0] and (board[x][1][2] == False):
header, board, board[x][y], board[x][1] = merge(header, board, board[x][y], board[x][1])
if board[x][1] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif y == 3:
if board[x][y] != None:
if board[x][2] == None:
if board[x][1] == None:
if board[x][0] == None:
board[x][0] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][0][0] and (board[x][0][2] == False):
header, board, board[x][y], board[x][0] = merge(header, board, board[x][y], board[x][0])
if board[x][0] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][1] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][1][0] and (board[x][1][2] == False):
header, board, board[x][y], board[x][1] = merge(header, board, board[x][y], board[x][1])
if board[x][1] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][2] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][2][0] and (board[x][2][2] == False):
header, board, board[x][y], board[x][2] = merge(header, board, board[x][y], board[x][2])
if board[x][2] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
if direction == DOWN:
for x in range(BOARDWIDTH): #0 , 1 , 2 , 3
for y in range(BOARDHEIGHT-2, -1, -1): #2, 1, 0
if y == 2:
if board[x][y] != None:
if board[x][3] == None:
board[x][3] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][3][0] and (board[x][3][2] == False):
header, board, board[x][y], board[x][3] = merge(header, board, board[x][y], board[x][3])
if board[x][3] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif y == 1:
if board[x][y] != None:
if board[x][2] == None:
if board[x][3] == None:
board[x][3] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][3][0] and (board[x][3][2] == False):
header, board, board[x][y], board[x][3] = merge(header, board, board[x][y], board[x][3])
if board[x][3] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][2] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][2][0] and (board[x][2][2] == False):
header, board, board[x][y], board[x][2] = merge(header, board, board[x][y], board[x][2])
if board[x][2] == TWENTYFORTYEIGHT:
youWin(header ,board)
m = True
elif y == 0:
if board[x][y] != None:
if board[x][1] == None:
if board[x][2] == None:
if board[x][3] == None:
board[x][3] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][3][0] and (board[x][3][2] == False):
header, board, board[x][y], board[x][3] = merge(header, board, board[x][y], board[x][3])
if board[x][3] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][2] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][2][0] and (board[x][2][2] == False):
header, board, board[x][y], board[x][2] = merge(header, board, board[x][y], board[x][2])
if board[x][2] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][1] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][1][0] and (board[x][1][2] == False):
header, board, board[x][y], board[x][1] = merge(header, board, board[x][y], board[x][1])
if board[x][1] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
if direction == RIGHT:
for y in range(BOARDHEIGHT): #0 , 1 , 2 , 3
for x in range(BOARDWIDTH-2, -1, -1): #2, 1, 0
if x == 2:
if board[x][y] != None:
if board[3][y] == None:
board[3][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[3][y][0] and (board[3][y][2] == False):
header, board, board[x][y], board[3][y] = merge(header, board, board[x][y], board[3][y])
if board[3][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif x == 1:
if board[x][y] != None:
if board[2][y] == None:
if board[3][y] == None:
board[3][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[3][y][0] and (board[3][y][2] == False):
header, board, board[x][y], board[3][y] = merge(header, board, board[x][y], board[3][y])
if board[3][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[2][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[2][y][0] and (board[2][y][2] == False):
header, board, board[x][y], board[2][y] = merge(header, board, board[x][y], board[2][y])
if board[2][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif x == 0:
if board[x][y] != None:
if board[1][y] == None:
if board[2][y] == None:
if board[3][y] == None:
board[3][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[3][y][0] and (board[3][y][2] == False):
header, board, board[x][y], board[3][y] = merge(header, board, board[x][y], board[3][y])
if board[3][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[2][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[2][y][0] and (board[2][y][2] == False):
header, board, board[x][y], board[2][y] = merge(header, board, board[x][y], board[2][y])
if board[2][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[1][y] = board[x][y]
board[x][y] = None
m=True
elif board[x][y][0] == board[1][y][0] and (board[1][y][2] == False):
header, board, board[x][y], board[1][y] = merge(header, board, board[x][y], board[1][y])
if board[1][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
if direction == LEFT:
for y in range(BOARDHEIGHT): #0 , 1 , 2 , 3
for x in range(1, BOARDWIDTH):#1 , 2 , 3
if x == 1:
if board[x][y] != None:
if board[0][y] == None:
board[0][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[0][y][0] and (board[0][y][2] == False):
header, board, board[x][y], board[0][y] = merge(header,board,board[x][y],board[0][y])
if board[0][y] == TWENTYFORTYEIGHT:
youWin(header,board)
m = True
elif x == 2:
if board[x][y] != None:
if board[1][y] == None:
if board[0][y] == None:
board[0][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[0][y][0] and (board[0][y][2] == False):
header, board, board[x][y], board[0][y] = merge(header, board, board[x][y], board[0][y])
if board[0][y]==TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[1][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[1][y][0] and (board[1][y][2] == False):
header, board, board[x][y], board[1][y] = merge(header, board, board[x][y], board[1][y])
if board[1][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif x == 3:
if board[x][y] != None:
if board[2][y] == None:
if board[1][y] == None:
if board[0][y] == None:
board[0][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[0][y][0] and (board[0][y][2] == False):
header, board, board[x][y], board[0][y] = merge(header, board, board[x][y], board[0][y])
if board[0][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[1][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[1][y][0] and (board[1][y][2] == False):
header, board, board[x][y], board[1][y] = merge(header, board, board[x][y], board[1][y])
if board[1][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[2][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[2][y][0] and (board[2][y][2] == False):
header, board, board[x][y], board[2][y] = merge(header, board, board[x][y], board[2][y])
if board[2][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
return m
def move(header, board, direction):
#*n = 0
m = False
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
if board[x][y] != None:
board[x][y][2] = False
#*print("")
#*for x in range(BOARDWIDTH):
#* for y in range(BOARDHEIGHT):
#* if board[x][y] != None:
#* print("board[" + str(x) + "][" + str(y) + "] = " + str(board[x][y][2]))
if direction == UP:
for x in range(BOARDWIDTH): #0 , 1 , 2 , 3
for y in range(1, BOARDHEIGHT): # 1, 2, 3
if y == 1:
if board[x][y] != None: #There Is A Tile In The Space
if board[x][0] == None:
board[x][0] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][0][0] and board[x][0][2] == False: #If The Tiles Have The Same Value And The End Tile Was Not Merged This Turn
header, board, board[x][y], board[x][0] = merge(header, board, board[x][y], board[x][0])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][0][0])
if board[x][0] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif y == 2:
if board[x][y] != None:
if board[x][1] == None:
if board[x][0] == None:
board[x][0] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][0][0] and board[x][0][2] == False:
header, board, board[x][y], board[x][0] = merge(header, board, board[x][y], board[x][0])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][0][0])
if board[x][0] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][1] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][1][0] and (board[x][1][2] == False):
header, board, board[x][y], board[x][1] = merge(header, board, board[x][y], board[x][1])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][1][0])
if board[x][1] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif y == 3:
if board[x][y] != None:
if board[x][2] == None:
if board[x][1] == None:
if board[x][0] == None:
board[x][0] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][0][0] and (board[x][0][2] == False):
header, board, board[x][y], board[x][0] = merge(header, board, board[x][y], board[x][0])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][0][0])
if board[x][0] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][1] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][1][0] and (board[x][1][2] == False):
header, board, board[x][y], board[x][1] = merge(header, board, board[x][y], board[x][1])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][1][0])
if board[x][1] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][2] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][2][0] and (board[x][2][2] == False):
header, board, board[x][y], board[x][2] = merge(header, board, board[x][y], board[x][2])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][2][0])
if board[x][2] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
if direction == DOWN:
for x in range(BOARDWIDTH): #0 , 1 , 2 , 3
for y in range(BOARDHEIGHT-2, -1, -1): #2, 1, 0
if y == 2:
if board[x][y] != None:
if board[x][3] == None:
board[x][3] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][3][0] and (board[x][3][2] == False):
header, board, board[x][y], board[x][3] = merge(header, board, board[x][y], board[x][3])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][3][0])
if board[x][3] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif y == 1:
if board[x][y] != None:
if board[x][2] == None:
if board[x][3] == None:
board[x][3] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][3][0] and (board[x][3][2] == False):
header, board, board[x][y], board[x][3] = merge(header, board, board[x][y], board[x][3])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][3][0])
if board[x][3] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][2] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][2][0] and (board[x][2][2] == False):
header, board, board[x][y], board[x][2] = merge(header, board, board[x][y], board[x][2])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][2][0])
if board[x][2] == TWENTYFORTYEIGHT:
youWin(header ,board)
m = True
elif y == 0:
if board[x][y] != None:
if board[x][1] == None:
if board[x][2] == None:
if board[x][3] == None:
board[x][3] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][3][0] and (board[x][3][2] == False):
header, board, board[x][y], board[x][3] = merge(header, board, board[x][y], board[x][3])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][3][0])
if board[x][3] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][2] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][2][0] and (board[x][2][2] == False):
header, board, board[x][y], board[x][2] = merge(header, board, board[x][y], board[x][2])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][2][0])
if board[x][2] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[x][1] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[x][1][0] and (board[x][1][2] == False):
header, board, board[x][y], board[x][1] = merge(header, board, board[x][y], board[x][1])
#*n+=1
#*print("\n"+"merged",n," = ",board[x][1][0])
if board[x][1] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
if direction == RIGHT:
for y in range(BOARDHEIGHT): #0 , 1 , 2 , 3
for x in range(BOARDWIDTH-2, -1, -1): #2, 1, 0
if x == 2:
if board[x][y] != None:
if board[3][y] == None:
board[3][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[3][y][0] and (board[3][y][2] == False):
header, board, board[x][y], board[3][y] = merge(header, board, board[x][y], board[3][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[3][y][0])
if board[3][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif x == 1:
if board[x][y] != None:
if board[2][y] == None:
if board[3][y] == None:
board[3][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[3][y][0] and (board[3][y][2] == False):
header, board, board[x][y], board[3][y] = merge(header, board, board[x][y], board[3][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[3][y][0])
if board[3][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[2][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[2][y][0] and (board[2][y][2] == False):
header, board, board[x][y], board[2][y] = merge(header, board, board[x][y], board[2][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[2][y][0])
if board[2][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif x == 0:
if board[x][y] != None:
if board[1][y] == None:
if board[2][y] == None:
if board[3][y] == None:
board[3][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[3][y][0] and (board[3][y][2] == False):
header, board, board[x][y], board[3][y] = merge(header, board, board[x][y], board[3][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[3][y][0])
if board[3][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[2][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[2][y][0] and (board[2][y][2] == False):
header, board, board[x][y], board[2][y] = merge(header, board, board[x][y], board[2][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[2][y][0])
if board[2][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[1][y] = board[x][y]
board[x][y] = None
m=True
elif board[x][y][0] == board[1][y][0] and (board[1][y][2] == False):
header, board, board[x][y], board[1][y] = merge(header, board, board[x][y], board[1][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[1][y][0])
if board[1][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
if direction == LEFT:
for y in range(BOARDHEIGHT): #0 , 1 , 2 , 3
for x in range(1, BOARDWIDTH):#1 , 2 , 3
if x == 1:
if board[x][y] != None:
if board[0][y] == None:
board[0][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[0][y][0] and (board[0][y][2] == False):
header, board, board[x][y], board[0][y] = merge(header,board,board[x][y],board[0][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[0][y][0])
if board[0][y] == TWENTYFORTYEIGHT:
youWin(header,board)
m = True
elif x == 2:
if board[x][y] != None:
if board[1][y] == None:
if board[0][y] == None:
board[0][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[0][y][0] and (board[0][y][2] == False):
header, board, board[x][y], board[0][y] = merge(header, board, board[x][y], board[0][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[0][y][0])
if board[0][y]==TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[1][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[1][y][0] and (board[1][y][2] == False):
header, board, board[x][y], board[1][y] = merge(header, board, board[x][y], board[1][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[1][y][0])
if board[1][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
elif x == 3:
if board[x][y] != None:
if board[2][y] == None:
if board[1][y] == None:
if board[0][y] == None:
board[0][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[0][y][0] and (board[0][y][2] == False):
header, board, board[x][y], board[0][y] = merge(header, board, board[x][y], board[0][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[0][y][0])
if board[0][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[1][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[1][y][0] and (board[1][y][2] == False):
header, board, board[x][y], board[1][y] = merge(header, board, board[x][y], board[1][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[1][y][0])
if board[1][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
else:
board[2][y] = board[x][y]
board[x][y] = None
m = True
elif board[x][y][0] == board[2][y][0] and (board[2][y][2] == False):
header, board, board[x][y], board[2][y] = merge(header, board, board[x][y], board[2][y])
#*n+=1
#*print("\n"+"merged",n," = ",board[2][y][0])
if board[2][y] == TWENTYFORTYEIGHT:
youWin(header, board)
m = True
#*print("")
#*for x in range(BOARDWIDTH):
#* for y in range(BOARDHEIGHT):
#* if board[x][y] != None:
#* print("board[" + str(x) + "][" + str(y) + "] = " + str(board[x][y][2]))
#*print("--------------------")
if m == True:
newTile(board)
none = 0 # For Counting Empty Spaces Left On The Board
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
if board[x][y] == None:
none+=1 # Increments For Each Empty Tile Space Left
if none == 0: #If No Empty Tile Space, Check For Game Over
h_sub = header[:]
b_sub = board[:]
if gameOverCheck(h_sub, b_sub, UP) == gameOverCheck(h_sub, b_sub, DOWN) == gameOverCheck(h_sub, b_sub, RIGHT) == gameOverCheck(h_sub, b_sub, LEFT) == False:
gameOver(header, board)
def merge(header, board, sourcetile, endtile):
if sourcetile[0] == 2:
endtile = FOUR[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True #Was Merged This Turn
elif sourcetile[0] == 4:
endtile = EIGHT[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True
elif sourcetile[0] == 8:
endtile = SIXTEEN[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True
elif sourcetile[0] == 16:
endtile = THIRTYTWO[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True
elif sourcetile[0] == 32:
endtile = SIXTYFOUR[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True
elif sourcetile[0] == 64:
endtile = ONETWOEIGHT[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True
elif sourcetile[0] == 128:
endtile = TWOFIVESIX[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True
elif sourcetile[0] == 256:
endtile = FIVEONETWO[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True
elif sourcetile[0] == 512:
endtile = TENTWENTYFOUR[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True
elif sourcetile[0] == 1024:
endtile = TWENTYFORTYEIGHT[:]
sourcetile = None
header[1]+=endtile[0]
if header[1] > header[2]:
header[2] = header[1]
endtile[2] = True
return (header, board, sourcetile, endtile)
def youWin(header, board):
DISPLAYSURF.fill(BGCOLOR)
drawHeader(header)
drawBoard(board)
textSurf = eventfont.render("YOU WIN!", True, WHITE)
textSurfRect = textSurf.get_rect()
textSurfRect.topleft = (WINDOWWIDTH, WINDOWHEIGHT/2)
textSurfRect.center = (WINDOWWIDTH/2, WINDOWHEIGHT/3)
DISPLAYSURF.blit(textSurf, textSurfRect)
textSurf = eventfont2.render("YOU SIR/MA ARE HIGHLY FAVOURED AMONG MEN!", True, WHITE)
textSurfRect = textSurf.get_rect()
textSurfRect.topleft = (WINDOWWIDTH, WINDOWHEIGHT)
textSurfRect.center = (WINDOWWIDTH/2, WINDOWHEIGHT/2)
DISPLAYSURF.blit(textSurf, textSurfRect)
pygame.display.update()
sys.exit()
def gameOver(header, mainBoard):
while True: #Main Game-Over Loop
DISPLAYSURF.fill(BGCOLOR)
drawHeader(header)
drawBoard(mainBoard)
textSurf = eventfont.render("GAME OVER!", True, WHITE)
textSurfRect = textSurf.get_rect()
textSurfRect.topleft = (WINDOWWIDTH, WINDOWHEIGHT/2)
textSurfRect.center = (WINDOWWIDTH/2, WINDOWHEIGHT/3)
DISPLAYSURF.blit(textSurf, textSurfRect)
#Event Handler
for event in pygame.event.get():
if (event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE)):
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
mouse_pos = pygame.mouse.get_pos()
if (header[0].collidepoint(mouse_pos)): #New Game
main(header[2])
elif event.type == KEYUP and event.key == K_UP:
move(header, mainBoard, UP)
elif event.type == KEYUP and event.key == K_DOWN:
move(header, mainBoard, DOWN)
elif event.type == KEYUP and event.key == K_RIGHT:
move(header, mainBoard, RIGHT)
elif event.type == KEYUP and event.key == K_LEFT:
move(header, mainBoard, LEFT)
pygame.display.update()
def newTile(board):
i = 0
while i != 1:
r1 = random.randrange(4)
r2 = random.randrange(4)
if (board[r1][r2] == None) and (i != 1):
board[r1][r2] = ALLTILES[random.randrange(2)][:]
i = 1
if __name__ == '__main__':
main(0)
| {
"repo_name": "Bamiji/2048-python-replica",
"path": "2048.py",
"copies": "1",
"size": "48196",
"license": "mit",
"hash": -6165717539106588000,
"line_mean": 44.425070688,
"line_max": 165,
"alpha_frac": 0.3630176778,
"autogenerated": false,
"ratio": 3.956329010014776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48193466878147756,
"avg_score": null,
"num_lines": null
} |
# 2048 - Yet another 2048 clone
# by KidsCanCode 2014
# For educational purposes only
# TODO:
# Animate moving tiles
import pygame
import sys
import random
# define some colors (R, G, B)
BLACK = (0, 0, 0)
BGCOLOR = BLACK
# constants for game options
FPS = 15
TILESIZE = 100
MARGIN = 5
BORDER = 8
WIDTH = TILESIZE * 4 + MARGIN * 3 + BORDER * 2
HEIGHT = WIDTH
# increasingly deeper shades of red, based on tile value
COLORS = {0: "0x000000",
2: "0xFFFFFF",
4: "0xFFEEEE",
8: "0xFFDDDD",
16: "0xFFCCCC",
32: "0xFFBBBB",
64: "0xFFAAAA",
128: "0xFF9999",
256: "0xFF8888",
512: "0xFF7777",
1024: "0xFF6666",
2048: "0xFF5555",
4096: "0xFF4444",
8192: "0xFF3333",
16384: "0xFF2222",
32768: "0xFF1111",
65536: "0xFF0000"}
class Tile(pygame.sprite.Sprite):
def __init__(self, value=0):
# create the tile sprite, default value is 0
pygame.sprite.Sprite.__init__(self)
self.value = value
self.image = pygame.Surface((TILESIZE, TILESIZE))
self.image.fill(pygame.Color(COLORS[self.value]))
self.rect = self.image.get_rect()
def update(self):
# make sure we have the right color in case the value has increased
self.image.fill(pygame.Color(COLORS[self.value]))
# draw the value of the tile centered on it
text_surface = FONT.render(str(self.value), True, BLACK)
text_rect = text_surface.get_rect()
text_rect.midtop = (50, 40)
self.image.blit(text_surface, text_rect)
class Board:
# board object - holds all the tiles
# new board has 2 random spots filled
def __init__(self):
self.sprite_list = pygame.sprite.Group()
# list comprehension, creates a 4x4 grid as a list of lists
# each of the items in the list is a tile object
self.board = [[0 for _ in range(4)] for _ in range(4)]
for row in range(4):
for col in range(4):
self.board[row][col] = Tile()
self.sprite_list.add(self.board[row][col])
# self.can_move = True
self.add_tile()
self.add_tile()
def draw(self):
# draw the board, pause one tick between each tile movement
# TODO: replace this with better animation
clock.tick(FPS)
for i, row in enumerate(self.board):
for j, tile in enumerate(row):
tile.rect.x = BORDER + j * TILESIZE + j * MARGIN
tile.rect.y = BORDER + i * TILESIZE + i * MARGIN
self.sprite_list.update()
self.sprite_list.draw(screen)
pygame.display.flip()
def add_tile(self):
# add a random new tile to am empty spot on the board
# new tiles always have a value of 2
if not self.full():
while True:
row = random.randrange(4)
col = random.randrange(4)
if self.board[row][col].value == 0:
self.board[row][col].value = 2
break
def full(self):
# test to see if board is full
empty_spaces = 0
for row in self.board:
for tile in row:
if tile.value == 0:
empty_spaces += 1
if empty_spaces == 0:
return True
else:
return False
def move_left(self):
# move the board to the left
done = False
while not done:
moved = False
for i, row in enumerate(self.board):
for j, tile in enumerate(row):
# we ignore the tiles in the leftmost column, they can't move
# and we ignore 0 value tiles
if j > 0 and tile.value > 0:
if self.board[i][j-1].value == 0:
# it can move to the left, so shift it
self.board[i][j-1].value = tile.value
tile.value = 0
moved = True
elif self.board[i][j-1].value == tile.value:
# the tile to the left is equal, so add them!
self.board[i][j-1].value *= 2
tile.value = 0
moved = True
self.draw()
if not moved:
done = True
def move_right(self):
# move the board right
done = False
while not done:
moved = False
# count from the right going left
for i, row in enumerate(self.board):
for j in range(3, -1, -1):
# ignore the tiles in the rightmost column
if j < 3 and self.board[i][j].value > 0:
if self.board[i][j+1].value == 0:
# it can move to the right, so shift it
self.board[i][j+1].value = self.board[i][j].value
self.board[i][j].value = 0
moved = True
elif self.board[i][j+1].value == self.board[i][j].value:
# the tile to the right is equal, so add them!
self.board[i][j+1].value *= 2
self.board[i][j].value = 0
moved = True
self.draw()
if not moved:
done = True
def move_up(self):
# move the board upward
done = False
while not done:
moved = False
for i, row in enumerate(self.board):
for j, tile in enumerate(row):
# we ignore the tiles in the top row, they can't move
# and we ignore 0 value tiles
if i > 0 and tile.value > 0:
if self.board[i-1][j].value == 0:
# it can move up, so shift it
self.board[i-1][j].value = tile.value
tile.value = 0
moved = True
elif self.board[i-1][j].value == tile.value:
# the tile above is equal, so add them!
self.board[i-1][j].value *= 2
tile.value = 0
moved = True
self.draw()
if not moved:
done = True
def move_down(self):
# move the board down
done = False
while not done:
moved = False
# count from the bottom going up
for i in range(3, -1, -1):
for j in range(4):
# ignore the tiles in the bottom row
if i < 3 and self.board[i][j].value > 0:
if self.board[i+1][j].value == 0:
# it can move down, so shift it
self.board[i+1][j].value = self.board[i][j].value
self.board[i][j].value = 0
moved = True
elif self.board[i+1][j].value == self.board[i][j].value:
# the tile to the right is equal, so add them!
self.board[i+1][j].value *= 2
self.board[i][j].value = 0
moved = True
self.draw()
if not moved:
done = True
# initialize pygame
pygame.init()
FONT_NAME = pygame.font.match_font('arial', True)
FONT = pygame.font.Font(FONT_NAME, 24)
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("2048")
clock = pygame.time.Clock()
board = Board()
running = True
while running:
clock.tick(FPS)
# check for all your events
for event in pygame.event.get():
# this one checks for the window being closed
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# now check for keypresses
elif event.type == pygame.KEYDOWN:
# this one quits if the player presses Esc
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_UP:
board.move_up()
board.add_tile()
if event.key == pygame.K_DOWN:
board.move_down()
board.add_tile()
if event.key == pygame.K_LEFT:
board.move_left()
board.add_tile()
if event.key == pygame.K_RIGHT:
board.move_right()
board.add_tile()
##### Game logic goes here #########
##### Draw/update screen #########
screen.fill(BGCOLOR)
board.draw()
| {
"repo_name": "kidscancode/gamedev",
"path": "2048/2048.py",
"copies": "1",
"size": "8932",
"license": "mit",
"hash": 3467010507176010000,
"line_mean": 34.728,
"line_max": 81,
"alpha_frac": 0.4769368562,
"autogenerated": false,
"ratio": 4.116129032258065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007487094622436563,
"num_lines": 250
} |
# 204. Count Primes - LeetCode
# https://leetcode.com/problems/count-primes/description/
# Count the number of prime numbers less than a non-negative number, n.
class Solution(object):
def countPrimes_MLE(self, n):
# with input 1500000
jump_dict = {}
count = 0
if n < 3:
return 0
for i in range(2,n):
if jump_dict.has_key(i):
continue
else:
count += 1
for j in range(i,n,i):
jump_dict.update({j:True})
return count
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
if n < 3:
return 0
lst = [ 0 for i in xrange(n) ]
count = 0
for i in xrange(2,n):
if lst[i] == 1:
continue
else:
count += 1
for j in xrange(i,n,i):
lst[j] = 1
return count
s = Solution()
ans = [
(0,0),
(1,0),
(2,0),
(3,1),
(50,15),
(1500000,114155)
]
for i in ans:
r = s.countPrimes(i[0])
print r, r == i[1] | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/204_count-primes.py",
"copies": "1",
"size": "1166",
"license": "mit",
"hash": -4349046907691009500,
"line_mean": 21.4423076923,
"line_max": 71,
"alpha_frac": 0.4339622642,
"autogenerated": false,
"ratio": 3.533333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44672955975333334,
"avg_score": null,
"num_lines": null
} |
# 204. Count Primes
#
# Count the number of prime numbers less than a non-negative number, n.
class Solution(object):
# Time Limit Exceeded
def countPrimes(self, n):
"""
:type n: int
:rtype: int
"""
count = 0
for i in range(2, n):
if self.isPrime(i):
count += 1
return count
def isPrime(self, n):
import math
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
return False
return True
# https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
# TLE
def countPrimes(self, n):
if n <= 2:
return 0
P = [_ for _ in range(2, n)]
p = 0
while True:
for i in P[p + 1:]:
if i % P[p] == 0:
P.remove(i)
if P[p] ** 2 >= P[-1]:
break
p += 1
return len(P)
# http://bookshadow.com/weblog/2015/04/27/leetcode-count-primes/
def countPrimes(self, n):
isPrime = [True] * max(n, 2)
isPrime[0], isPrime[1] = False, False
x = 2
while x * x < n:
if isPrime[x]:
# only consider starting from x**2
# the left has been marked by smaller than x
p = x * x
while p < n:
isPrime[p] = False
p += x
x += 1
return sum(isPrime)
if __name__ == '__main__':
print Solution().countPrimes(30)
| {
"repo_name": "gengwg/leetcode",
"path": "204_count_primes.py",
"copies": "1",
"size": "1544",
"license": "apache-2.0",
"hash": -6361385058013722000,
"line_mean": 24.7333333333,
"line_max": 71,
"alpha_frac": 0.4417098446,
"autogenerated": false,
"ratio": 3.599067599067599,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4540777443667599,
"avg_score": null,
"num_lines": null
} |
# 205 - Isomorphic Strings (Easy)
# https://leetcode.com/problems/isomorphic-strings/
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
# Check if the characters from s can be replaced consistently to
# obtain the string in t. This is like a substitution cipher.
# Traverse both strings, and try to create a substitution dictionary.
# If an inconsistency is found, then this substitution can not be done.
# We can assume that `s` and `t` have the same length.
# Substitution dictionaries for both directions.
subs_s, subs_t = {}, {}
for idx in range(len(s)):
char_s, char_t = s[idx], t[idx]
# Every unique char in s must map to unique chars
# in t, and viceversa, a 1-to-1 mapping.
if subs_s.get(char_s) and subs_s.get(char_s) != char_t:
# If we already registered a mapping for a character in
# s and it is not the expected character in t...
return False
if subs_t.get(char_t) and subs_t.get(char_t) != char_s:
# If we already registered a mapping for a character in
# t and it is not the expected character in s...
return False
subs_s[char_s] = char_t
subs_t[char_t] = char_s
return True
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_205.py",
"copies": "1",
"size": "1442",
"license": "mit",
"hash": 9195111788973638000,
"line_mean": 39.0555555556,
"line_max": 79,
"alpha_frac": 0.567961165,
"autogenerated": false,
"ratio": 3.9184782608695654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49864394258695655,
"avg_score": null,
"num_lines": null
} |
# 205. Isomorphic Strings
#
# Given two strings s and t, determine if they are isomorphic.
#
# Two strings are isomorphic if the characters in s can be replaced to get t.
#
# All occurrences of a character must be replaced with another character while preserving the order of characters.
# No two characters may map to the same character but a character may map to itself.
#
# For example,
# Given "egg", "add", return true.
#
# Given "foo", "bar", return false.
#
# Given "paper", "title", return true.
#
# Note:
# You may assume both s and t have the same length.
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(set(s)) != len(set(t)):
return False
dict = {}
for x, y in zip(s, t):
if x not in dict:
dict[x] = y
else:
if dict[x] != y:
return False
return True
# http://blog.csdn.net/aliceyangxi1987/article/details/50300921
def isIsomorphic(self, s, t):
sdict = {}
tdict = {}
for i, j in zip(s, t):
if i not in sdict:
sdict[i] = j
if j not in tdict:
tdict[j] = i
if sdict[i] != j or tdict[j] != i:
return False
return True
# http://blog.csdn.net/coder_orz/article/details/51681396
def isIsomorphic(self, s, t):
return len(set(zip(s, t))) == len(set(s)) == len(set(t))
# refer to prob. 290
def isIsomorphic(self, s, t):
return map(s.index, s) == map(t.index, t)
if __name__ == '__main__':
print Solution().isIsomorphic("egg", "add")
print Solution().isIsomorphic("foo", "bar")
print Solution().isIsomorphic("paper", "title")
print Solution().isIsomorphic("ab", "aa")
| {
"repo_name": "gengwg/leetcode",
"path": "205_isomorphic_strings.py",
"copies": "1",
"size": "1871",
"license": "apache-2.0",
"hash": 8485230635641269000,
"line_mean": 27.7846153846,
"line_max": 114,
"alpha_frac": 0.5537145911,
"autogenerated": false,
"ratio": 3.408014571948998,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.945730628650336,
"avg_score": 0.0008845753091277388,
"num_lines": 65
} |
# 206 - Reverse Linked List (Easy)
# https://leetcode.com/problems/reverse-linked-list/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def recursive_reverse(node, reverse=None):
# If original list has been consumed, just return the reversed list.
if node is None:
return reverse
# Put aside everything after the current node.
next_node = node.next
# If there's no reverse cumulatively generated list so far, start it.
if reverse is None:
# Make the head of the reversed list so far the current node.
reverse = node
# Erase the pointer after the current node.
reverse.next = None
else:
# Puf after the current node the reversed list so far.
node.next = reverse
# Make the head of the reversed list so far the current node.
reverse = node
# Make the current node the previous next node.
node = next_node
# Return tail-recursively.
return recursive_reverse(node, reverse)
return recursive_reverse(head)
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_206.py",
"copies": "1",
"size": "1444",
"license": "mit",
"hash": 6319966006035280000,
"line_mean": 35.1,
"line_max": 81,
"alpha_frac": 0.5664819945,
"autogenerated": false,
"ratio": 4.598726114649682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5665208109149682,
"avg_score": null,
"num_lines": null
} |
"""206. Reverse Linked List
Easy
URL: https://leetcode.com/problems/reverse-linked-list/
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively.
Could you implement both?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class SolutionStack(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
Time complexity: O(n).
Space complexity: O(n).
"""
# Collect nodes in stack to reverse them.
stack = []
current = head
while current:
stack.append(current)
current = current.next
# Create a new linked list from stack popping.
pre_head = ListNode(None)
new_current = pre_head
while stack:
current = stack.pop()
new_current.next = ListNode(current.val)
new_current = new_current.next
return pre_head.next
class SolutionRecur(object):
def _reverse(self, head, previous):
if not head:
return previous
# Create new current->current.next by reversing previous->head.
current = ListNode(head.val)
current.next = previous
# Increment previous & head and apply recursion.
previous = current
head = head.next
return self._reverse(head, previous)
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
Time complexity: O(n).
Space complexity: O(n).
"""
# Apply two pointers of head + previous.
previous = None
return self._reverse(head, previous)
class SolutionIter(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
Time complexity: O(n).
Space complexity: O(1).
"""
# Two pointer method: head + previous.
previous = None
while head:
# Create new current->current.next by reversing previous->head.
current = ListNode(head.val)
current.next = previous
# Increment previous & head.
previous = current
head = head.next
# New head is previous, with head = None.
return previous
def main():
# 1->2->3->4->5->NULL
node1 = ListNode(1)
node2 = ListNode(2)
node3 = ListNode(3)
node4 = ListNode(4)
node5 = ListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
# 5->4->3->2->1->NULL
# Should be 5.
print SolutionStack().reverseList(node1).val
print SolutionRecur().reverseList(node1).val
print SolutionIter().reverseList(node1).val
# Should be 4.
print SolutionStack().reverseList(node1).next.val
print SolutionRecur().reverseList(node1).next.val
print SolutionIter().reverseList(node1).next.val
# Should be 3.
print SolutionStack().reverseList(node1).next.next.val
print SolutionRecur().reverseList(node1).next.next.val
print SolutionIter().reverseList(node1).next.next.val
if __name__ == '__main__':
main()
| {
"repo_name": "bowen0701/algorithms_data_structures",
"path": "lc0206_reverse_linked_list.py",
"copies": "1",
"size": "3328",
"license": "bsd-2-clause",
"hash": 7543627975628936000,
"line_mean": 23.8358208955,
"line_max": 75,
"alpha_frac": 0.5943509615,
"autogenerated": false,
"ratio": 3.883313885647608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4977664847147608,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Libao Jin'
__date__ = 'December 15, 2015'
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return head
nodes = []
current_node = head
while current_node.next is not None:
nodes.append(current_node)
print(current_node.val)
current_node = current_node.next
nodes.append(current_node)
nodes.reverse()
l_nodes = len(nodes)
for i in range(l_nodes - 1):
nodes[i].next = nodes[i+1]
nodes[-1].next = None
return nodes[0]
if __name__ == '__main__':
s = Solution()
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
a.next = b
b.next = c
c.next = d
d.next = e
node = s.reverseList(a)
while node.next is not None:
print(node.val)
node = node.next
print(node.val)
| {
"repo_name": "jinlibao/LeetCode-Solutions",
"path": "solutions/206_Reverse_Linked_List_2.py",
"copies": "2",
"size": "1172",
"license": "mit",
"hash": 9016142337985712000,
"line_mean": 21.9803921569,
"line_max": 44,
"alpha_frac": 0.5264505119,
"autogenerated": false,
"ratio": 3.426900584795322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49533510966953215,
"avg_score": null,
"num_lines": null
} |
# 206. Reverse Linked List
#
# Reverse a singly linked list.
#
# click to show more hints.
#
# Hint:
# A linked list can be reversed either iteratively or recursively. Could you implement both?
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, repr(self.next))
# http://bookshadow.com/weblog/2015/05/05/leetcode-reverse-linked-list/
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
# dummy-->None
dummy = ListNode(0)
# loop over all nodes, and insert each right after dummy node
while head:
# 1. use a temp variable to store next node after head
next = head.next
# 2. insert head between dummy and node after dummy
# 2.1 point head to dummy.next
head.next = dummy.next
# 2.2 point dummy to head
dummy.next = head
# 3. advance head
head = next
return dummy.next
def reverseList(self, head):
return self.doReverse(head, None)
def doReverse(self, head, newHead):
if head is None:
return newHead
next = head.next
head.next = newHead
return self.doReverse(next, head)
def reverseList(self, head):
#dummy = ListNode(float("-inf"))
dummy = None
while head:
#dummy.next, head.next, head = head, dummy.next, head.next
dummy, head.next, head = head, dummy, head.next
#dummy.next = head
#head.next = dummy.next
#head = head.next
#return dummy.next
return dummy
class Solution2:
def reverseList(self, head):
prev = None
while head:
temp = head.next
head.next = prev
prev = head
head = temp
return prev
if __name__ == "__main__":
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
print head
print Solution().reverseList(head)
| {
"repo_name": "gengwg/leetcode",
"path": "206_reverse_linked_list.py",
"copies": "1",
"size": "2389",
"license": "apache-2.0",
"hash": 5374376708313924000,
"line_mean": 25.2527472527,
"line_max": 92,
"alpha_frac": 0.5588112181,
"autogenerated": false,
"ratio": 3.8408360128617365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4899647230961736,
"avg_score": null,
"num_lines": null
} |
# 206. Reverse Linked List
#
# Reverse a singly-linked list.
#
# Implementation of iterative, recursive, and tail recursive solutions.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
def reverse_iter(head):
revrsd = None
while head is not None:
next = head.next
head.next = revrsd
revrsd = head
head = next
return revrsd
def reverse_tailrec(head, revrsd=None):
if head is None:
return revrsd
next = head.next
head.next = revrsd
return reverse_tailrec(next, head)
def reverse_rec(head):
"""
It's a bit unnatural to implement `reverse` in a recursive but not
tail recursive way because it requires returning two items (thus
requiring a helper function) and is less efficient than the tail
recursive version, but for the purpose of an academic exercise, here
it is.
"""
def helper(head):
# Empty or single-item list: reverse is list and last item is
# item
if head is None or head.next is None:
return (head, head)
# List of length >= 2: reverse rest, then attach head at end
revrsd, last = helper(head.next)
head.next = None
last.next = head
# Return the reversed list and the last node (so that additional
# nodes can be attached)
return (revrsd, head)
revrsd, _ = helper(head)
return revrsd
# Example:
#import linked
#reverse_rec(linked.List(range(5)).head)
class Solution:
def reverseList_iter(self, head):
return reverse_iter(head)
def reverseList_tailrec(self, head):
return reverse_tailrec(head)
def reverseList_rec(self, head):
return reverse_rec(head)
reverseList = reverseList_iter
| {
"repo_name": "afbarnard/glowing-broccoli",
"path": "lc/000206.py",
"copies": "1",
"size": "1828",
"license": "mit",
"hash": -5089296812074880000,
"line_mean": 25.1142857143,
"line_max": 72,
"alpha_frac": 0.6384026258,
"autogenerated": false,
"ratio": 3.738241308793456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.986524856520498,
"avg_score": 0.0022790738776952988,
"num_lines": 70
} |
# 207. Course Schedule
#
# There are a total of n courses you have to take, labeled from 0 to n - 1.
#
# Some courses may have prerequisites,
# for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
#
# Given the total number of courses and a list of prerequisite pairs,
# is it possible for you to finish all courses?
#
# For example:
#
# 2, [[1,0]]
# There are a total of 2 courses to take.
# To take course 1 you should have finished course 0. So it is possible.
#
# 2, [[1,0],[0,1]]
# There are a total of 2 courses to take.
# To take course 1 you should have finished course 0,
# and to take course 0 you should also have finished course 1. So it is impossible.
#
# Note:
# The input prerequisites is a graph represented by a list of edges,
# not adjacency matrices. Read more about how a graph is represented.
# You may assume that there are no duplicate edges in the input prerequisites.
# click to show more hints.
#
# Hints:
# This problem is equivalent to finding if a cycle exists in a directed graph.
# If a cycle exists, no topological ordering exists and therefore it will be impossible to take all courses.
# Topological Sort via DFS - A great video tutorial (21 minutes)
# on Coursera explaining the basic concepts of Topological Sort.
# Topological sort could also be done via BFS.
class Solution(object):
# BFS
def canFinish(self, numCourses, prerequisites):
"""
http://bookshadow.com/weblog/2015/05/07/leetcode-course-schedule/
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
degrees = [0] * numCourses
childs = [[] for _ in range(numCourses)]
for pair in prerequisites:
degrees[pair[0]] += 1
childs[pair[1]].append(pair[0])
courses = set(range(numCourses))
flag = True
while flag and len(courses):
flag = False
removeList = []
for x in courses:
if degrees[x] == 0:
for child in childs[x]:
degrees[child] -= 1
removeList.append(x)
flag = True
for x in removeList:
courses.remove(x)
return len(courses) == 0
# DFS
# https://gengwg.blogspot.com/2018/05/leetcode-207-course-schedule-ep93.html
def canFinish(self, numCourses, prerequisites):
# neighbors
graph = [[] for _ in range(numCourses)]
for pair in prerequisites:
# get neighbors
graph[pair[1]].append(pair[0])
# 0 == unknown, 1 == visiting, 2 == visited
v = [0] * numCourses
for i in range(numCourses):
if self.dfs(i, v, graph):
return False
return True
def dfs(self, cur, v, graph):
# if node is marked as visiting return CYCLE
if v[cur] == 1:
return True
# if node is marked as visited OK
if v[cur] == 2:
return False
# mark node as visiting
v[cur] = 1
# for each neighbor
for t in graph[cur]:
if self.dfs(t, v, graph):
return True
# mark node as visited, after visiting all neighbors
v[cur] = 2
# no CYCLE
return False
if __name__ == '__main__':
print Solution().canFinish(4, [[1, 0], [2, 0], [3, 1], [3, 2]]) # True
print Solution().canFinish(2, [[1,0],[0,1]]) # False
print Solution().canFinish(2, [[1,0]]) # False
| {
"repo_name": "gengwg/leetcode",
"path": "207_course_schedule.py",
"copies": "1",
"size": "3568",
"license": "apache-2.0",
"hash": 4915252508118550000,
"line_mean": 31.7339449541,
"line_max": 108,
"alpha_frac": 0.5916479821,
"autogenerated": false,
"ratio": 3.6974093264248706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4789057308524871,
"avg_score": null,
"num_lines": null
} |
# 209. Minimum Size Subarray Sum
#
# Given an array of n positive integers and a positive integer s,
# find the minimal length of a contiguous subarray of which the sum >= s.
# If there isn't one, return 0 instead.
#
# For example, given the array [2,3,1,2,4,3] and s = 7,
# the subarray [4,3] has the minimal length under the problem constraint.
#
# click to show more practice.
#
# More practice:
# If you have figured out the O(n) solution, try coding another solution of which the time complexity is O(n log n).
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
if sum(nums) < s:
return 0
min = len(nums)
for i in range(len(nums)):
for j in range(i + 1, len(nums) + 1):
if sum(nums[i:j]) >= s and min > j - i:
min = j - i
break
return min
# return min(minarr, key=lambda x: x[1] - x[0])
# http://bookshadow.com/weblog/2015/05/12/leetcode-minimum-size-subarray-sum/
# sliding window.
# if smaller move end, if larger move start
def minSubArrayLen(self, s, nums):
size = len(nums)
start, end, sum = 0, 0, 0
bestAns = size + 1
while end < size:
while end < size and sum < s:
sum += nums[end]
end += 1
while start < end and sum >= s:
bestAns = min(bestAns, end - start)
sum -= nums[start]
start += 1
return [0, bestAns][bestAns <= size]
def minSubArrayLen(self, s, nums):
size = len(nums)
start, end, sum = 0, 0, 0
bestAns = size + 1
while True:
if sum < s:
if end >= size:
break
sum += nums[end]
end += 1
else:
if start > end:
break
bestAns = min(bestAns, end - start)
sum -= nums[start]
start += 1
# return [0, bestAns][bestAns <= size]
# alternatively use
return 0 if bestAns == size+1 else bestAns
# https://gengwg.blogspot.com/2018/03/leetcode-209-minimum-size-subarray-sum.html
def minSubArrayLen(self, s, nums):
sz = len(nums)
minlen = sz + 1 # not possible
sum = 0
left =0 # left side of window
# for i, num in enumerate(nums):
for i in range(sz):
sum += nums[i]
# when current sum reaches s, start deleting from left
while left <= i and sum >= s: # can delete left <= i. redundant?
# record current min length
minlen = min(minlen, i-left+1)
sum -= nums[left]
left += 1
return 0 if minlen == sz+1 else minlen
if __name__ == '__main__':
print Solution().minSubArrayLen(7, [2, 3, 1, 2, 4, 3])
print Solution().minSubArrayLen(4, [1, 4, 4])
print Solution().minSubArrayLen(15, [5, 1, 3, 5, 10, 7, 4, 9, 2, 8])
| {
"repo_name": "gengwg/leetcode",
"path": "209_min_size_subarray_sum.py",
"copies": "1",
"size": "3138",
"license": "apache-2.0",
"hash": 8230000352524756000,
"line_mean": 32.7419354839,
"line_max": 116,
"alpha_frac": 0.5098789038,
"autogenerated": false,
"ratio": 3.606896551724138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4616775455524138,
"avg_score": null,
"num_lines": null
} |
"""20 may 2014
5pm
24-16 degrees N
120-121 degrees E
Test:
in: COMPREF-10min, COMPREF0, WRFs +- 6 hours
out: matching results/ranking
cd /media/TOSHIBA\ EXT/ARMOR/python
ipython
"""
# 0. parameters, imports and set up
# 1. get the wrf filepaths
# 2. matching and scoring
# imports
import os, time, pickle, datetime, shutil
from armor import pattern
dbz = pattern.DBZ
np = pattern.np
plt = pattern.plt
dp = pattern.dp
from armor.tests import localFeaturesSensitivityTest4 as lf
# filepath parameters
doStats=False
thisScript = 'localFeaturesTest20141210.py'
lowerThreshold = 20.
upperThreshold=35.
radarFolder = '/media/TOSHIBA EXT/ARMOR/data/may14/RegriddedData/RADARCV/'
wrfFolder = '/media/TOSHIBA EXT/ARMOR/data/may14/RegriddedData/WEPS/'
outputFolder = '/media/TOSHIBA EXT/ARMOR/labLogs2/local_features_20141210/'
outputFolder += 'test' + str(len(os.listdir(outputFolder))+1) + "/"
os.makedirs(outputFolder)
radarFileName = 'COMPREF.20140519.2100.0p03.bin'
coastDataPath201='/media/TOSHIBA EXT/ARMOR/data/1may2014/RADARCV/taiwanCoast.dat'
rectangleRegion = (120,50,60,50)
featuresList = ['volume', ('centroid',0), ('centroid',1), ('skewness',0), ('skewness',1), ('kurtosis',0), ('kurtosis',1),
('eigenvalues',0), ('eigenvalues',1), 'angle',
'highIntensityRegionVolume',
#('HuMoments',0),('HuMoments',1),('HuMoments',2),
]
# functions
def sigmoid(x):
if np.isnan(x): #regularisation
x = -10
return 1./(1+np.exp(-x))
def softMask(i,j,width):
""" mask of dimensions i, j and width
"""
Z = np.zeros((i,j))
i2=i//2+1
j2=j//2+1
I = range(i2)
J = range(j2)
I = [sigmoid(1.*(v-width)/(0.1*width)) for v in I]
J = [sigmoid(1.*(v-width)/(0.1*width)) for v in J]
J2, I2 = np.meshgrid(J,I)
m = I2*J2
Z[0:i2, 0:j2] = m
Z[-i2:, 0:j2] = np.flipud(m)
Z[-i2:, -j2:] = np.fliplr(np.flipud(m))
Z[0:i2:, -j2:] = np.fliplr(m)
return Z
def getTime(fileName):
x = fileName.replace('.','')
#print x
Y,M,D,h,m = [int(v) for v in x[0:4], x[4:6], x[6:8], x[8:10], x[10:12]]
T = datetime.datetime(Y,M,D,h,m)
return T
# setup
a = dbz(dataPath=radarFolder+radarFileName).load()
a.vmin = -20.
a.vmax = 70.
a.name = 'COMPREF-2014-0519-2100z'
a.dataTime = '20140519.2100'
a.coastDataPath= coastDataPath201
a.show()
a1 = a.copy()
a1.drawRectangle(newObject=False, *rectangleRegion)
a1.name = a.name
a1.showWithCoast()
aa = a.getRectangle(*rectangleRegion)
aa.name = a.name + ' West of Northern Taiwan'
mask = softMask(width=5, *aa.matrix.shape)
aa.matrix *= mask
aa.show()
aa.globalShapeFeatures(lowerThreshold=lowerThreshold, upperThreshold=upperThreshold)
if doStats:
# tuning the parameters
T0 = getTime(a.dataTime)
L1 = os.listdir(wrfFolder)
filePathsFull = []
for i1 in L1:
L2 = os.listdir(wrfFolder+i1)
L2 = [v for v in L2 if '.dat' in v]
#L2 = [v for v in L2 if (T0-getTime(v)).total_seconds()>= -6*3600 and (T0-getTime(v)).total_seconds()<= 6*3600 ] # +- 6 hours
L2 = [wrfFolder + i1+'/' + v for v in L2]
filePathsFull.extend(L2)
print ',\t'.join(filePathsFull)
R = filePathsFull
scoresFull = []
for r in R:
b = dbz(dataPath=r).load()
b.name = r.split('/')[-1]
bb = b.getRectangle(*rectangleRegion)
bb.matrix *= mask
bb.name = b.name + ' West of Northern Taiwan'
if np.random.random() < 0.01:
b.drawRectangle(*rectangleRegion).show()
try:
#gsf = bb.globalShapeFeatures(lowerThreshold=0, upperThreshold=100)
gsf = bb.globalShapeFeatures(lowerThreshold=20, upperThreshold=100)
print '-----------------------------------------------------------'
print bb.name
print gsf
scoresFull.append((r, bb.globalFeatures))
except:
print 'Error!', bb.name
# extract stats for: skewness, kurtosis, angle, volume, position,
keys = lf.getKeys([scoresFull[0][1]])
keywordArrays = []
for k in keys:
keywordArrays.append((k, lf.getKeywordArray([v[1] for v in scoresFull], k)))
keywordArrayForAa = []
for k in keys:
keywordArrayForAa.append((k, lf.getKeywordArray([aa.globalFeatures], k)))
for k, arr in keywordArrays:
aaValue = [v[1] for v in keywordArrayForAa if v[0]==k][0]
arr -= aaValue
plt.clf()
y, x = np.histogram(arr, len(arr/20))
plt.plot(x[1:], y)
plt.savefig(outputFolder+str(k) +'.png')
plt.show(block=False)
print '---------------------'
print k
print aaValue
print arr[:10]
#
#
###############################################################################
###############################################################################
# decide upon the sigmoid width parameters
# open the relevant files and load and match
# get the wrf filepaths
sigmoidWidths = {
'eigenvectors' : 0.1,
'numberOfComponents' : 0.05,
'skewness' : 0.3,
'angle' : 0.2,
'highIntensityRegionVolume': 2.,
'volume' : 0.1, # taking log first
'centroid' : 0.1,
'eigenvalues' : 10.,
'kurtosis' : 0.5,
('HuMoments',0) : 20,
('HuMoments',1) : 2000, # can't get accurate figures for these
('HuMoments',2) :0.02,
('HuMoments',3) : 0.01,
('HuMoments',4) : 0.01,
('HuMoments',5) : 0.05,
('HuMoments',6) : 0.05,
'rectangle' : 4,
}
sigmoidCentres = [(v, sigmoidWidths[v]) for v in sigmoidWidths]
sigmoidCentres = dict(sigmoidCentres)
sigmoidWidths = [(v, sigmoidWidths[v]*0.2) for v in sigmoidWidths]
sigmoidWidths = dict(sigmoidWidths)
takeLogs = {
'eigenvectors' : False,
'numberOfComponents' : False,
'skewness' : False,
'angle' : False,
'highIntensityRegionVolume': True,
'volume' : True, # taking log first
'centroid' : False,
'eigenvalues' : False,
'kurtosis' : False,
('HuMoments',0) : False,
('HuMoments',1) : False, # can't get accurate figures for these
('HuMoments',2) : True,
('HuMoments',3) : True,
('HuMoments',4) : True,
('HuMoments',5) : True,
('HuMoments',6) : True,
'rectangle' : False,
}
relatives = [(v, False) for v in takeLogs.keys()]
relatives = dict(relatives)
weights = {
'volume' : 1.,
'kurtosis' : 1.,
'skewness' : 1.,
'centroid' : 1.,
'eigenvalues': 1.,
'angle' : 1.,
'highIntensityRegionVolume': 1.,
}
def getMatchingScore(keys, feats_a, feats_b, sigmoidWidths=sigmoidWidths, sigmoidCentres=sigmoidCentres,
takeLogs=takeLogs, relatives=relatives, weights=weights):
score = 1.
for key in keys:
try:
degrSim = lf.degreeOfSimilarity(key=key, L=sigmoidWidths[key], a=sigmoidCentres[key],
feats_a=feats_a, feats_b=feats_b, takeLog=takeLogs[key], relative=relatives[key],
verbose=False)
except:
key0 = key[0]
degrSim = lf.degreeOfSimilarity(key=key, L=sigmoidWidths[key0], a=sigmoidCentres[key0],
feats_a=feats_a, feats_b=feats_b, takeLog=takeLogs[key0], relative=relatives[key0],
verbose=False)
if key in weights.keys():
power = weights[key]
elif key[0] in weights.keys():
power = weights[key[0]]
else:
power = 1
score *= degrSim
return score
T0 = getTime(a.dataTime)
L1 = os.listdir(wrfFolder)
filePaths = []
for i1 in L1:
L2 = os.listdir(wrfFolder+i1)
L2 = [v for v in L2 if '.dat' in v]
#L2 = [v for v in L2 if (T0-getTime(v)).total_seconds()>0 and (T0-getTime(v)).total_seconds()< 24*1*3600 ] #1 days
L2 = [v for v in L2 if (T0-getTime(v)).total_seconds()>= -6*3600 and (T0-getTime(v)).total_seconds()<= 6*3600 ] # +- 6 hours
L2 = [wrfFolder + i1+'/' + v for v in L2]
filePaths.extend(L2)
print ',\t'.join(filePaths)
feats_bb = []
for r in filePaths:
b = dbz(dataPath=r).load()
b.name = r.split('/')[-1]
bb = b.getRectangle(*rectangleRegion)
bb.matrix *= mask
bb.name = b.name + ' West of Northern Taiwan'
if np.random.random() < 0.01:
b.drawRectangle(*rectangleRegion).show()
try:
#gsf = bb.globalShapeFeatures(lowerThreshold=0, upperThreshold=100)
gsf = bb.globalShapeFeatures(lowerThreshold=lowerThreshold, upperThreshold=upperThreshold)
print '-----------------------------------------------------------'
print bb.name
print gsf
feats_bb.append((r, bb.globalFeatures))
except:
print 'Error!', bb.name
scores = []
for filePath, feats_b in feats_bb:
matchingScore = getMatchingScore(keys=featuresList, feats_a=aa.globalFeatures, feats_b=feats_b, sigmoidWidths=sigmoidWidths, sigmoidCentres=sigmoidCentres,
takeLogs=takeLogs, relatives=relatives, weights=weights)
scores.append((filePath, matchingScore))
scores.sort(key=lambda v:v[1], reverse=True)
print scores[:10]
topScores = scores[:50]
for count, (filePath, score) in enumerate(topScores):
dz = dbz(dataPath=filePath, name=filePath+'\n'+str(score))
dz.load()
dz.coastDataPath = coastDataPath201
dz.drawRectangle(*rectangleRegion, newObject=False)
dz.drawCoast(newCopy=False)
dz.imagePath = outputFolder + 'rank' + str(count) + "_"+filePath.split('/')[-1] +'.jpg'
dz.vmin=a.vmin
dz.vmax=a.vmax
dz.saveImage()
a.drawCoast(newCopy=False)
a.drawRectangle(*rectangleRegion, newObject=False)
a.imagePath = outputFolder + a.dataPath.split('/')[-1] +'.jpg'
a.saveImage()
shutil.copyfile(dp.root+ 'python/armor/tests/'+ thisScript, outputFolder+thisScript)
| {
"repo_name": "yaukwankiu/armor",
"path": "tests/trackingTest20140901.py",
"copies": "1",
"size": "10885",
"license": "cc0-1.0",
"hash": -3927693927953870300,
"line_mean": 34.4560260586,
"line_max": 160,
"alpha_frac": 0.5380799265,
"autogenerated": false,
"ratio": 3.2855418050105643,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9229219841877172,
"avg_score": 0.018880377926678623,
"num_lines": 307
} |
"""20 may 2014
5pm
24-16 degrees N
120-121 degrees E
Test:
in: COMPREF-10min, COMPREF0, WRFs +- 6 hours
out: matching results/ranking
cd /media/TOSHIBA\ EXT/ARMOR/python
ipython
"""
# 0. parameters, imports and set up
# 1. get the wrf filepaths
# 2. matching and scoring
# filepath parameters
doStats=False
radarFolder = '/media/TOSHIBA EXT/ARMOR/data/may14/RegriddedData/RADARCV/'
wrfFolder = '/media/TOSHIBA EXT/ARMOR/data/may14/RegriddedData/WEPS/'
outputFolder = '/media/TOSHIBA EXT/ARMOR/labLogs2/local_features_20141210/'
radarFileName = 'COMPREF.20140519.2100.0p03.bin'
coastDataPath201='/media/TOSHIBA EXT/ARMOR/data/1may2014/RADARCV/taiwanCoast.dat'
rectangleRegion = (120,50,60,50)
featuresList = ['volume', ('centroid',0), ('centroid',1), ('skewness',0), ('skewness',1), ('kurtosis',0), ('kurtosis',1),
('eigenvalues',0), ('eigenvalues',1), 'angle']
# imports
import os, time, pickle, datetime
from armor import pattern
dbz = pattern.DBZ
np = pattern.np
plt = pattern.plt
from armor.tests import localFeaturesSensitivityTest4 as lf
# functions
def sigmoid(x):
return 1./(1+np.exp(-x))
def softMask(i,j,width):
""" mask of dimensions i, j and width
"""
Z = np.zeros((i,j))
i2=i//2+1
j2=j//2+1
I = range(i2)
J = range(j2)
I = [sigmoid(1.*(v-width)/(0.1*width)) for v in I]
J = [sigmoid(1.*(v-width)/(0.1*width)) for v in J]
J2, I2 = np.meshgrid(J,I)
m = I2*J2
Z[0:i2, 0:j2] = m
Z[-i2:, 0:j2] = np.flipud(m)
Z[-i2:, -j2:] = np.fliplr(np.flipud(m))
Z[0:i2:, -j2:] = np.fliplr(m)
return Z
def getTime(fileName):
x = fileName.replace('.','')
#print x
Y,M,D,h,m = [int(v) for v in x[0:4], x[4:6], x[6:8], x[8:10], x[10:12]]
T = datetime.datetime(Y,M,D,h,m)
return T
# setup
a = dbz(dataPath=radarFolder+radarFileName).load()
a.vmin = -20.
a.vmax = 70.
a.name = 'COMPREF-2014-0519-2100z'
a.dataTime = '20140519.2100'
a.coastDataPath= coastDataPath201
a.show()
a1 = a.copy()
a1.drawRectangle(newObject=False, *rectangleRegion)
a1.name = a.name
a1.showWithCoast()
aa = a.getRectangle(*rectangleRegion)
aa.name = a.name + ' West of Northern Taiwan'
mask = softMask(width=5, *aa.matrix.shape)
aa.matrix *= mask
aa.show()
aa.globalShapeFeatures(lowerThreshold=20, upperThreshold=100)
if doStats:
# tuning the parameters
T0 = getTime(a.dataTime)
L1 = os.listdir(wrfFolder)
filePathsFull = []
for i1 in L1:
L2 = os.listdir(wrfFolder+i1)
L2 = [v for v in L2 if '.dat' in v]
#L2 = [v for v in L2 if (T0-getTime(v)).total_seconds()>= -6*3600 and (T0-getTime(v)).total_seconds()<= 6*3600 ] # +- 6 hours
L2 = [wrfFolder + i1+'/' + v for v in L2]
filePathsFull.extend(L2)
print ',\t'.join(filePathsFull)
R = filePathsFull
scoresFull = []
for r in R:
b = dbz(dataPath=r).load()
b.name = r.split('/')[-1]
bb = b.getRectangle(*rectangleRegion)
bb.matrix *= mask
bb.name = b.name + ' West of Northern Taiwan'
if np.random.random() < 0.01:
b.drawRectangle(*rectangleRegion).show()
try:
#gsf = bb.globalShapeFeatures(lowerThreshold=0, upperThreshold=100)
gsf = bb.globalShapeFeatures(lowerThreshold=20, upperThreshold=100)
print '-----------------------------------------------------------'
print bb.name
print gsf
scoresFull.append((r, bb.globalFeatures))
except:
print 'Error!', bb.name
# extract stats for: skewness, kurtosis, angle, volume, position,
keys = lf.getKeys([scoresFull[0][1]])
keywordArrays = []
for k in keys:
keywordArrays.append((k, lf.getKeywordArray([v[1] for v in scoresFull], k)))
keywordArrayForAa = []
for k in keys:
keywordArrayForAa.append((k, lf.getKeywordArray([aa.globalFeatures], k)))
for k, arr in keywordArrays:
aaValue = [v[1] for v in keywordArrayForAa if v[0]==k][0]
arr -= aaValue
plt.clf()
y, x = np.histogram(arr, len(arr/20))
plt.plot(x[1:], y)
plt.savefig(outputFolder+str(k) +'.png')
plt.show(block=False)
print '---------------------'
print k
print aaValue
print arr[:10]
#
#
###############################################################################
###############################################################################
# decide upon the sigmoid width parameters
# open the relevant files and load and match
# get the wrf filepaths
sigmoidWidths = {
'eigenvectors' : 0.1,
'numberOfComponents' : 0.05,
'skewness' : 0.3,
'angle' : 0.2,
'highIntensityRegionVolume': 1., # didn't test it this time
'volume' : 0.1, # taking log first
'centroid' : 0.1,
'eigenvalues' : 10.,
'kurtosis' : 0.5,
('HuMoments',0) : 20,
('HuMoments',1) : 2000, # can't get accurate figures for these
('HuMoments',2) :0.02,
('HuMoments',3) : 0.01,
('HuMoments',4) : 0.01,
('HuMoments',5) : 0.05,
('HuMoments',6) : 0.05,
'rectangle' : 4,
}
sigmoidCentres = [(v, sigmoidWidths[v]) for v in sigmoidWidths]
sigmoidCentres = dict(sigmoidCentres)
sigmoidWidths = [(v, sigmoidWidths[v]*0.2) for v in sigmoidWidths]
sigmoidWidths = dict(sigmoidWidths)
takeLogs = {
'eigenvectors' : False,
'numberOfComponents' : False,
'skewness' : False,
'angle' : False,
'highIntensityRegionVolume': True,
'volume' : True, # taking log first
'centroid' : False,
'eigenvalues' : False,
'kurtosis' : False,
('HuMoments',0) : False,
('HuMoments',1) : False, # can't get accurate figures for these
('HuMoments',2) : True,
('HuMoments',3) : True,
('HuMoments',4) : True,
('HuMoments',5) : True,
('HuMoments',6) : True,
'rectangle' : False,
}
relatives = [(v, False) for v in takeLogs.keys()]
relatives = dict(relatives)
def getMatchingScore(keys, feats_a, feats_b, sigmoidWidths=sigmoidWidths, sigmoidCentres=sigmoidCentres,
takeLogs=takeLogs, relatives=relatives):
score = 1.
for key in keys:
try:
degrSim = lf.degreeOfSimilarity(key=key, L=sigmoidWidths[key], a=sigmoidCentres[key],
feats_a=feats_a, feats_b=feats_b, takeLog=takeLogs[key], relative=relatives[key],
verbose=False)
except:
key0 = key[0]
degrSim = lf.degreeOfSimilarity(key=key, L=sigmoidWidths[key0], a=sigmoidCentres[key0],
feats_a=feats_a, feats_b=feats_b, takeLog=takeLogs[key0], relative=relatives[key0],
verbose=False)
score *= degrSim
return score
T0 = getTime(a.dataTime)
L1 = os.listdir(wrfFolder)
filePaths = []
for i1 in L1:
L2 = os.listdir(wrfFolder+i1)
L2 = [v for v in L2 if '.dat' in v]
#L2 = [v for v in L2 if (T0-getTime(v)).total_seconds()>0 and (T0-getTime(v)).total_seconds()< 24*1*3600 ] #1 days
L2 = [v for v in L2 if (T0-getTime(v)).total_seconds()>= -6*3600 and (T0-getTime(v)).total_seconds()<= 6*3600 ] # +- 6 hours
L2 = [wrfFolder + i1+'/' + v for v in L2]
filePaths.extend(L2)
print ',\t'.join(filePaths)
feats_bb = []
for r in filePaths:
b = dbz(dataPath=r).load()
b.name = r.split('/')[-1]
bb = b.getRectangle(*rectangleRegion)
bb.matrix *= mask
bb.name = b.name + ' West of Northern Taiwan'
if np.random.random() < 0.01:
b.drawRectangle(*rectangleRegion).show()
try:
#gsf = bb.globalShapeFeatures(lowerThreshold=0, upperThreshold=100)
gsf = bb.globalShapeFeatures(lowerThreshold=20, upperThreshold=100)
print '-----------------------------------------------------------'
print bb.name
print gsf
feats_bb.append((r, bb.globalFeatures))
except:
print 'Error!', bb.name
scores = []
for filePath, feats_b in feats_bb:
matchingScore = getMatchingScore(keys=featuresList, feats_a=aa.globalFeatures, feats_b=feats_b, sigmoidWidths=sigmoidWidths, sigmoidCentres=sigmoidCentres,
takeLogs=takeLogs, relatives=relatives)
scores.append((filePath, matchingScore))
scores.sort(key=lambda v:v[1], reverse=True)
print scores[:10]
topScores = scores[:10]
for count, (filePath, score) in enumerate(topScores):
dz = dbz(dataPath=filePath, name=filePath+'\n'+str(score))
dz.load()
dz.coastDataPath = coastDataPath201
dz.drawRectangle(*rectangleRegion, newObject=False)
dz.drawCoast(newCopy=False)
dz.imagePath = outputFolder + 'rank' + str(count) + "_"+filePath.split('/')[-1] +'.jpg'
dz.vmin=a.vmin
dz.vmax=a.vmax
dz.saveImage()
a.drawCoast(newCopy=False)
a.drawRectangle(*rectangleRegion, newObject=False)
a.imagePath = outputFolder + filePath.split('/')[-1] +'.jpg'
a.saveImage()
| {
"repo_name": "yaukwankiu/armor",
"path": "tests/localFeaturesTest20141210.py",
"copies": "1",
"size": "10201",
"license": "cc0-1.0",
"hash": 224209239756510560,
"line_mean": 35.0945454545,
"line_max": 160,
"alpha_frac": 0.5228899128,
"autogenerated": false,
"ratio": 3.26432,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42872099127999996,
"avg_score": null,
"num_lines": null
} |
# 20.
print_log('\n20. Prover creates Proof for Proof Request\n')
prover_requested_creds = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {
'attr1_referent': {
'cred_id': prover_cred_for_attr1['referent'],
'revealed': True
}
},
'requested_predicates': {
'predicate1_referent': {
'cred_id': prover_cred_for_predicate1['referent']
}
}
})
print_log('Requested Credentials for Proving: ')
pprint.pprint(json.loads(prover_requested_creds))
prover_schema_id = json.loads(cred_offer_json)['schema_id']
schemas_json = json.dumps({prover_schema_id: json.loads(issuer_schema_json)})
cred_defs_json = json.dumps({cred_def_id: json.loads(cred_def_json)})
proof_json = await anoncreds.prover_create_proof(prover_wallet_handle,
proof_req_json,
prover_requested_creds,
link_secret_id,
schemas_json,
cred_defs_json,
"{}")
proof = json.loads(proof_json)
assert 'Alex' == proof['requested_proof']['revealed_attrs']['attr1_referent']["raw"] | {
"repo_name": "peacekeeper/indy-sdk",
"path": "docs/how-tos/negotiate-proof/python/step4.py",
"copies": "2",
"size": "1566",
"license": "apache-2.0",
"hash": -611209338153532400,
"line_mean": 49.5483870968,
"line_max": 92,
"alpha_frac": 0.4380587484,
"autogenerated": false,
"ratio": 4.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.59380587484,
"avg_score": null,
"num_lines": null
} |
''' 20-plot_SL_coverage_restriction.py
===============================================
AIM: Plots the Stray Light coverage restriction in %.
INPUT: files: - <orbit_id>_misc/ : files from 12-<...>.py
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_<SL_angle>figures/ : <orbit_id>_<threshold_obs_time>_<max_mag><_SAA?>_SL_coverage.png/pdf/eps
CMD: python 20-plot_SL_coverage_restriction.py
ISSUES: <NONE KNOWN>
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
REMARKS: <NONE>
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
from mpl_toolkits.basemap import Basemap
import matplotlib.cm as cm
from resources.routines import *
from resources.TimeStepping import *
import resources.constants as const
import resources.figures as figures
import time
from matplotlib import dates
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# orbit_id
orbit_id = 301
# Show plots
show = True
# Save the picture ?
save = True
# Fancy plots ?
fancy = True
threshold_obs_time = 78
# Magnitudes to plot
mag = np.array([10.1,11.1,12.,12.1,12.2])
labels = [r'$10$',r'$11$',r'$12\ \mathrm{processed}$',r'$12$',r'$12\ \mathrm{No\ SAA}$']
###########################################################################
### INITIALISATION
if fancy: figures.set_fancy()
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
############################################################################
### LOADS AND PLOTS
fig = plt.figure()
ax = plt.subplot(111)
for mag_max,label in zip(mag,labels):
input_fname = 'cumultative_SL_forbidden_%d_mag_%02.1f.dat' % (threshold_obs_time, mag_max)
print 'Loading %s' % input_fname
data = np.loadtxt(folder_misc+input_fname)
plt.plot(data[1:,0],data[1:,1],label=label)
# convert epoch to matplotlib float format
t_ini, junk, minute_ini, junk = orbit2times(data[1,0],orbit_id)
junk, junk, junk, minute_end = orbit2times(data[-1,0],orbit_id)
labels = np.linspace(minute_ini, minute_end, 13) * 60. + const.timestamp_2018_01_01
labels = labels[1:]
plt.xlim([data[1,0], data[-1,0]])
ax.xaxis.set_major_locator(MultipleLocator((data[-1,0]-data[1,0]+1)/12))
plt.legend(loc=9,prop={'size':14}, mode="expand", ncol=5)
plt.ylabel(r'$\%\mathrm{\ of\ observable\ sky\ for\ which }\frac{F_\star}{F_{SL}} > T$')
# to human readable date
pre = map (time.gmtime, labels)
labels = map(figures.format_day, pre)
ax.set_xticklabels(labels)
fig.autofmt_xdate()
plt.grid(True)
plt.show()
# Save plot
if save:
fname = 'cumultative_SL_forbidden_%d.dat' % (threshold_obs_time)
figures.savefig(folder_figures+fname, fig, fancy)
print 'saved as %s' % folder_figures+fname
if show: plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "20_plot_SL_coverage_restriction.py",
"copies": "1",
"size": "3070",
"license": "bsd-3-clause",
"hash": 3698788077424903700,
"line_mean": 26.1681415929,
"line_max": 115,
"alpha_frac": 0.6214983713,
"autogenerated": false,
"ratio": 3.057768924302789,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.911784262599398,
"avg_score": 0.012284933921761675,
"num_lines": 113
} |
# 20. Valid Parentheses - LeetCode
# https://leetcode.com/problems/valid-parentheses/description/
# '(', ')', '{', '}', '[' and ']'
# stack base
# Submit 1: "]" edge case
# Submit 2: "[(({})}]" WA
## Logic Failed: ord("(") - ord("}") = -85 < 3
# print ord("(") - ord(")")
# print ord("(") - ord("]") x
# print ord("(") - ord("}") x
# print ord("[") - ord(")") x
# print ord("[") - ord("]")
# print ord("[") - ord("}") x
# print ord("{") - ord(")") x
# print ord("{") - ord("]") x
# print ord("{") - ord("}")
# Submit 3: 25 mins, AC
def isValid(s):
"""
:type s: str
:rtype: bool
"""
stack = []
for i in s:
if i in [ "(", "[", "{" ]:
stack.append(ord(i))
if i in [ ")", "]", "}" ]:
if len(stack) == 0:
return False
res = ord(i) - stack[-1]
if res > 0 and res < 3:
stack.pop(-1)
else:
return False
if len(stack) != 0:
return False
else:
return True
isValid("()") | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/020_valid-parentheses.py",
"copies": "1",
"size": "1075",
"license": "mit",
"hash": 7792605488298479000,
"line_mean": 24.619047619,
"line_max": 62,
"alpha_frac": 0.4027906977,
"autogenerated": false,
"ratio": 3.2282282282282284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4131018925928228,
"avg_score": null,
"num_lines": null
} |
# 210. Course Schedule II
#
# There are a total of n courses you have to take, labeled from 0 to n - 1.
#
# Some courses may have prerequisites, for example to take course 0 you have to first take course 1,
# which is expressed as a pair: [0,1]
#
# Given the total number of courses and a list of prerequisite pairs,
# return the ordering of courses you should take to finish all courses.
#
# There may be multiple correct orders, you just need to return one of them.
# If it is impossible to finish all courses, return an empty array.
#
# For example:
#
# 2, [[1,0]]
# There are a total of 2 courses to take.
# To take course 1 you should have finished course 0. So the correct course order is [0,1]
#
# 4, [[1,0],[2,0],[3,1],[3,2]]
# There are a total of 4 courses to take.
# To take course 3 you should have finished both courses 1 and 2.
# Both courses 1 and 2 should be taken after you finished course 0.
# So one correct course order is [0,1,2,3]. Another correct ordering is[0,2,1,3].
#
# Note:
# The input prerequisites is a graph represented by a list of edges,
# not adjacency matrices. Read more about how a graph is represented.
# You may assume that there are no duplicate edges in the input prerequisites.
# click to show more hints.
#
# Hints:
# This problem is equivalent to finding the topological order in a directed graph.
# If a cycle exists, no topological ordering exists and therefore it will be impossible to take all courses.
# Topological Sort via DFS - A great video tutorial (21 minutes) on Coursera
# explaining the basic concepts of Topological Sort.
# Topological sort could also be done via BFS.
class Solution(object):
# BFS
def findOrder(self, numCourses, prerequisites):
"""
http://bookshadow.com/weblog/2015/05/14/leetcode-course-schedule-ii/
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
degrees = [0] * numCourses
children = [[] for _ in range(numCourses)]
for pair in prerequisites:
degrees[pair[0]] += 1
children[pair[1]].append(pair[0])
courses = set(range(numCourses))
flag = True
ans = []
while flag and len(courses):
flag = False
removeList = []
for x in courses:
if degrees[x] == 0:
for child in children[x]:
degrees[child] -= 1
removeList.append(x)
flag = True
for x in removeList:
ans.append(x)
courses.remove(x)
return [[], ans][len(courses) == 0]
# DFS
def findOrder(self, numCourses, prerequisites):
graph = [[] for _ in range(numCourses)]
for pair in prerequisites:
graph[pair[1]].append(pair[0])
# 0 == unknown, 1 == visiting, 2 == visited
v = [0] * numCourses
ans = []
for i in range(numCourses):
if self.dfs(i, v, graph, ans):
return []
ans.reverse()
return ans
def dfs(self, cur, v, graph, ans):
if v[cur] == 1:
return True
if v[cur] == 2:
return False
v[cur] = 1
for t in graph[cur]:
if self.dfs(t, v, graph, ans):
return True
v[cur] = 2
# ans.insert(0, cur)
ans.append(cur)
return False
if __name__ == '__main__':
print Solution().findOrder(4, [[1, 0], [2, 0], [3, 1], [3, 2]])
print Solution().findOrder(2, [[1, 0], [0, 1]])
| {
"repo_name": "gengwg/leetcode",
"path": "210_course_schedule_ii.py",
"copies": "1",
"size": "3590",
"license": "apache-2.0",
"hash": -7844058311122188000,
"line_mean": 33.5192307692,
"line_max": 108,
"alpha_frac": 0.5922005571,
"autogenerated": false,
"ratio": 3.6520854526958293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9739894439374004,
"avg_score": 0.0008783140843650701,
"num_lines": 104
} |
# 211. Add and Search Word - Data structure design
#
# Design a data structure that supports the following two operations:
#
# void addWord(word)
# bool search(word)
#
# search(word) can search a literal word or a regular expression string containing
# only letters a-z or .. A . means it can represent any one letter.
#
# For example:
#
# addWord("bad")
# addWord("dad")
# addWord("mad")
# search("pad") -> false
# search("bad") -> true
# search(".ad") -> true
# search("b..") -> true
# Note:
# You may assume that all words are consist of lowercase letters a-z.
#
# click to show hint.
#
# You should be familiar with how a Trie works. If not, please work on this problem: Implement Trie (Prefix Tree) first.
#
# http://bookshadow.com/weblog/2015/05/16/leetcode-add-and-search-word-data-structure-design/
class TrieNode(object):
def __init__(self):
self.children = {}
#self.children = dict()
self.isWord = False
class WordDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
node = self.root
for letter in word:
if node.children.get(letter) is None:
node.children[letter] = TrieNode() # add a new trie node
node = node.children.get(letter) # move node to next level
node.isWord = True # set the last node to true
def search(self, word):
"""
Returns if the word is in the data structure.
A word could contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
return self.find(self.root, word)
# dfs
def find(self, node, word):
if word == '': # termination condition
return node.isWord
if word[0] == '.': # if . loop over all children
for x in node.children:
# if any of children returns true, return true
if x and self.find(node.children[x], word[1:]):
return True
else: # normal find
child = node.children.get(word[0])
if child:
return self.find(child, word[1:])
return False
# Your WordDictionary object will be instantiated and called as such:
# obj = WordDictionary()
# obj.addWord(word)
# param_2 = obj.search(word)
if __name__ == '__main__':
obj = WordDictionary()
obj.addWord("bad")
obj.addWord("dad")
print obj.search("pad")
print obj.search(".ad")
| {
"repo_name": "gengwg/leetcode",
"path": "211_add_and_search_word.py",
"copies": "1",
"size": "2673",
"license": "apache-2.0",
"hash": 7863006037851692000,
"line_mean": 28.0543478261,
"line_max": 120,
"alpha_frac": 0.5914702581,
"autogenerated": false,
"ratio": 3.7754237288135593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48668939869135597,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.