id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
6687478 | <filename>examples/function.py
from superprocessor import cmd
print(cmd(
'function foo { echo "Hello $1\n" ; } ; foo "jonas" ; '
)) | StarcoderdataPython |
273253 | <filename>online/admin.py
from django.contrib import admin
from .models import *
from csvexport.actions import csvexport
# Register your models here.
class item(admin.ModelAdmin):
list_display = ('title','price','category','brand','status','label','image')
search_fields = ['title','description']
list_filter = ('status','label','category','brand')
list_per_page = 10
actions = [csvexport]
admin.site.register(Item,item)
class Categorys(admin.ModelAdmin):
list_display = ('name','slug','image')
search_fields = ['name']
list_per_page = 10
admin.site.register(Category,Categorys)
admin.site.register(Slider)
admin.site.register(Ad)
admin.site.register(Brand)
admin.site.register(Cart)
admin.site.register(Contact) | StarcoderdataPython |
8016124 | <reponame>reticulatingspline/WebHooks
###
# Copyright (c) 2014, spline
# All rights reserved.
#
#
###
from supybot.test import *
class WebHooksTestCase(PluginTestCase):
plugins = ('WebHooks',)
def testWebHooks(self):
pass
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| StarcoderdataPython |
1614424 | <gh_stars>0
from .readDB import *
from nltk.corpus import wordnet
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from .preProcessing import preProcessing
import random
def synonyms(query, query_processed) :
candidate = [[] for i in range(len(query_processed))]
count=0
for w in query:
w=preProcessing(w)
if not w:
continue
w=w[0]
if w not in collection_prob.keys() :
for syn in wordnet.synsets(query[count]):
for l in syn.lemmas():
if preProcessing(l.name())[0] in collection_prob.keys() :
candidate[count].append(preProcessing(l.name())[0])
else :
candidate[count].append(w)
if not candidate[count]:
candidate[count].append(w)
count+=1
return candidate
def randomPick(candidate) :
query_list=[]
itr=10
mul=1
for c in candidate :
mul*=len(c)
if mul<10 :
itr=mul
itr=10
for i in range(itr) :
query=[]
for synonyms in candidate :
tmp=random.choice(synonyms)
query.append(tmp)
query_list.append(query)
return query_list
# q=input('검색 입력 : ')
# q_processed=preProcessing(q)
# q=q.split()
# candidate=synonyms(q, q_processed)
# print(candidate)
# query_list=randomPick(candidate)
# print(query_list) | StarcoderdataPython |
4976467 | # import storage_module.config_data as config_data
# import universal_module.utils
# import logging
# import sys
# logger = logging.getLogger("Main")
# sys.excepthook = universal_module.utils.log_exception_handler
class RAMStorage:
def __init__(self):
self.total_messages_read: int = 0
self.total_messages_sent: int = 0
self.dp_last_user_count: int = -1
| StarcoderdataPython |
9640362 | <reponame>thenetcircle/dino
import sys
from subprocess import PIPE
from subprocess import Popen
from flask import Flask
from flask import make_response
from flask_restful import Api
from flask_restful import Resource
app = Flask(__name__)
api = Api(app)
UNIT = sys.argv[4]
class Logs(Resource):
def __init__(self):
pass
def get(self):
headers = {'Content-Type': 'text/html'}
p = Popen(["journalctl", "-u", UNIT, "-o", "cat", "-n", "250"], stdout=PIPE)
lines = "".join([str(line, "utf-8").replace("\n", "<br />") for line in p.stdout])
return make_response(lines, 200, headers)
api.add_resource(Logs, '/')
| StarcoderdataPython |
5078279 | <filename>wyeusk/wyeusk.py<gh_stars>0
"""Main module."""
import bs4 as bs
import urllib.request
sauce = urllib.request.urlopen('https://www.fishingpassport.co.uk/salmon-catches').read()
soup = bs.BeautifulSoup(sauce, 'html.parser')
| StarcoderdataPython |
1723366 | <reponame>JohnyEngine/CNC
import ocl
import pyocl
import camvtk
import time
import vtk
import datetime
import math
def drawEdge(myscreen, a, b):
myscreen.addActor(camvtk.Sphere(center=(a.x,a.y,a.z), radius=0.0351, color=camvtk.green));
myscreen.addActor(camvtk.Sphere(center=(b.x,b.y,b.z), radius=0.0351, color=camvtk.red));
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
if __name__ == "__main__":
print ocl.version()
myscreen = camvtk.VTKScreen()
camvtk.drawOCLtext(myscreen)
camvtk.drawArrows(myscreen,center=(-1,-2,0))
a=ocl.Point(0,1.7,-0.6)
b=ocl.Point(0,0.11,0.3)
drawEdge(myscreen, a, b)
diameter=0.4
length=1
# spherical cutter and cylinder
s1 = camvtk.Sphere(center=(a.x,a.y,a.z), radius=diameter/2, color=camvtk.lgreen)
s2 = camvtk.Sphere(center=(b.x,b.y,b.z), radius=diameter/2, color=camvtk.pink)
s1.SetOpacity(1)
s2.SetOpacity(1)
myscreen.addActor(s1)
myscreen.addActor(s2)
# tube
cyltube = camvtk.Tube( p1=(a.x,a.y,a.z) , p2=(b.x,b.y,b.z), radius=diameter/2, color=camvtk.yellow )
cyltube.SetOpacity(0.2)
myscreen.addActor( cyltube )
# Cylinder
a = a + ocl.Point(1,0,0)
b = b + ocl.Point(1,0,0)
drawEdge(myscreen, a, b)
cir1 = camvtk.Circle(center=(a.x,a.y,a.z) , radius=diameter/2, color=camvtk.lgreen, resolution=50 )
cir1.SetOpacity(1)
myscreen.addActor(cir1)
cir2 = camvtk.Circle(center=(b.x,b.y,b.z) , radius=diameter/2, color=camvtk.pink, resolution=50 )
cir2.SetOpacity(1)
myscreen.addActor(cir2)
# draw lines along the elliptic tube
# Toroid
a = a + ocl.Point(1,0,0)
b = b + ocl.Point(1,0,0)
drawEdge(myscreen, a, b)
tor1 = camvtk.Toroid(r1=diameter/2, r2=diameter/6, center=(a.x,a.y,a.z), rotXYZ=(0,0,0), color=camvtk.lgreen)
tor1.SetOpacity(1)
myscreen.addActor(tor1)
tor2 = camvtk.Toroid(r1=diameter/2, r2=diameter/6, center=(b.x,b.y,b.z), rotXYZ=(0,0,0), color=camvtk.pink)
tor2.SetOpacity(1)
myscreen.addActor(tor2)
# Cone
a = a + ocl.Point(1,0,0)
b = b + ocl.Point(1,0,0)
drawEdge(myscreen, a, b)
con1 = camvtk.Cone(center=(a.x,a.y,a.z), radius=diameter/2, height = 0.3, color=camvtk.lgreen )
myscreen.addActor(con1)
con2 = camvtk.Cone(center=(b.x,b.y,b.z), radius=diameter/2, height = 0.3, color=camvtk.pink )
myscreen.addActor(con2)
print "done."
myscreen.camera.SetPosition(4, 3, 2)
myscreen.camera.SetFocalPoint(0, 0, 0)
myscreen.render()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
#for n in range(1,18):
# t.SetText("OpenCAMLib " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# myscreen.camera.Azimuth( 2 )
# time.sleep(0.1)
# myscreen.render()
# w2if.Modified()
# lwr.SetFileName("frames/tc"+ ('%04d' % n)+".png")
#lwr.Write()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
| StarcoderdataPython |
3571138 | <gh_stars>1-10
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
DIGITS = '0123456789ABCDEF'
def decimal_to_base2(dec):
"""Convert decimal number to binary number by iteration.
Time complexity: O(d/2).
Space complexity: O(d/2).
"""
# Push remainders into stack.
rem_stack = []
while dec > 0:
dec, rem = divmod(dec, 2)
rem_stack.append(rem)
# Pop remainders and concat them into binary number string.
bin_num = ''
while rem_stack:
bin_num += str(rem_stack.pop())
return bin_num
def decimal_to_base_iter(dec, base):
"""Convert decimal number to base 2 ~ 16 by iteration.
Time complexity: O(d/b).
Space complexity: O(d/b).
"""
# Push remainders into stack.
rem_stack = []
while dec > 0:
dec, rem = divmod(dec, base)
rem_stack.append(rem)
# Pop remainders and concat them into base number string.
base_num = ''
while rem_stack:
base_num += DIGITS[rem_stack.pop()]
return base_num
def _decimal_to_base_recur_util(dec, base, rem_stack):
#
if dec < base:
rem_stack.append(dec)
else:
dec, rem = divmod(dec, base)
rem_stack.append(rem)
_decimal_to_base_recur_util(dec, base, rem_stack)
def decimal_to_base_recur(dec, base):
"""Convert decimal number to base 2 ~ 16 by recursion with stack.
Time complexity: O(d/b).
Space complexity: O(d/b).
"""
# Push remainders into stack.
rem_stack = []
_decimal_to_base_recur_util(dec, base, rem_stack)
# Pop remainders and concat them into base number string.
base_num = ''
while rem_stack:
base_num += DIGITS[rem_stack.pop()]
return base_num
def main():
import time
# Binary: (37)_10 = (100101)_2
dec = 37
start_time = time.time()
print('By iter w/ base 2: {} -> {}'
.format(dec, decimal_to_base2(dec)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By iter w/ general base 2: {} -> {}'
.format(dec, decimal_to_base_iter(dec, 2)))
print('Time: {}'.format(time.time() - start_time))
# Hexadecimal: (1728)_10 = (6C0)_16
dec = 1728
start_time = time.time()
print('By iter: {} -> {}'
.format(dec, decimal_to_base_iter(dec, 16)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By recur: {} -> {}'
.format(dec, decimal_to_base_recur(dec, 16)))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| StarcoderdataPython |
6488873 | """
Test mvGenericRegrid class
$Id: testMvGenericRegrid.py 2354 2012-07-11 15:28:14Z pletzer $
"""
import cdat_info
import cdms2
import numpy
import unittest
import regrid2
import ESMP
import sys
PLOT = False
if PLOT:
import matplotlib.pylab as pl
HAS_MPI = False
try:
from mpi4py import MPI
HAS_MPI = True
except:
pass
class TestMvGenericRegrid(unittest.TestCase):
"""
All test interpolate to the same grid
"""
def setUp(self):
"""
Set up the grids to pass to mvGenericRegrid
"""
self.doPlots = False
self.clt = cdms2.open(cdat_info.get_sampledata_path() + '/clt.nc')('clt')[0, ...]
# Convert to curvilinear
cds, cds_forBounds = [], []
cds.append(self.clt.getLatitude())
cds.append(self.clt.getLongitude())
cds_forBounds.append(self.clt.getLatitude())
cds_forBounds.append(self.clt.getLongitude())
self.cltGrid, nDims = regrid2.gsRegrid.makeCurvilinear(cds)
self.cltInterp = numpy.array(self.clt) * 0.0 + self.clt.missing_value
self.cltInterpInterp = numpy.array(self.clt) * 0.0 + self.clt.missing_value
# Salinity check
f = cdms2.open(cdat_info.get_sampledata_path() + \
'/so_Omon_ACCESS1-0_historical_r1i1p1_185001-185412_2timesteps.nc')
self.so = f('so')
soGrid = []
soGrid.append(self.so.getLatitude())
soGrid.append(self.so.getLongitude())
self.soGrid = soGrid
self.soInterp = numpy.array(self.clt) * 0.0 + self.so.missing_value
self.tol = 1e2
self.rank = 0
self.size = 1
if HAS_MPI:
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
def test0_mvGeneric_dstMaskFloat_salinity(self):
"""
Check that the number of returned masked values is ok
"""
ro = regrid2.GenericRegrid(self.soGrid, self.cltGrid, dtype = self.so.dtype,
regridMethod='linear', regridTool='libcf')
ro.computeWeights()
ro.apply(self.so[0,0,:,:], self.soInterp, missingValue = self.so.missing_value)
self.assertLess(abs(self.so[0,0,...].mask.sum()/float(self.so[0,0,...].size) - 0.35),
0.05)
soInterpMask = numpy.array(self.soInterp == self.so.missing_value,
numpy.int64)
self.assertLess(abs(soInterpMask.sum()/float(soInterpMask.size) - 0.39),
0.05)
def test1_LibCF_clt(self):
"""
Out and back, same grid using mvGenericRegrid -> LibCF, linear
"""
ro = regrid2.GenericRegrid(self.cltGrid, self.cltGrid, self.clt.dtype,
regridMethod='linear', regridTool='libcf')
ro.computeWeights()
ro.apply(self.clt, self.cltInterp)
ro.apply(self.cltInterp, self.cltInterpInterp)
nCell = numpy.array(self.clt.shape).prod()
avgDiffInterp = (abs(self.clt - self.cltInterp)).sum()/float(nCell)
avgDiffInterpInterp = abs(self.clt - self.cltInterpInterp).sum()/float(nCell)
self.assertLess(avgDiffInterp, self.tol)
self.assertLess(avgDiffInterpInterp, self.tol)
if self.rank == 0:
avgDiffInterp = (abs(self.clt - self.cltInterp)).sum()/float(nCell)
avgDiffInterpInterp = abs(self.clt - self.cltInterpInterp).sum()/float(nCell)
self.assertLess(avgDiffInterp, self.tol)
self.assertLess(avgDiffInterpInterp, self.tol)
if PLOT:
pl.figure(1)
pl.subplot(3,2,1)
pl.pcolor(self.cltGrid[1], self.cltGrid[0], self.clt)
pl.title('clt')
pl.colorbar()
pl.subplot(3,2,2)
pl.pcolor(self.cltGrid[1], self.cltGrid[0], self.cltInterp,
vmin = 0, vmax = 100)
pl.title('Interp')
pl.colorbar()
pl.subplot(3,2,3)
pl.pcolor(self.cltGrid[1], self.cltGrid[0], self.cltInterpInterp,
vmin = 0, vmax = 100)
pl.title('InterpInterp')
pl.colorbar()
pl.subplot(3,2,4)
pl.pcolor(self.cltGrid[1],self.cltGrid[0], self.clt-self.cltInterp)
pl.colorbar()
pl.title('clt-cltInterp')
pl.subplot(3,2,5)
pl.pcolor(self.cltGrid[1],self.cltGrid[0], self.clt-self.cltInterpInterp)
pl.colorbar()
pl.title('clt-cltInterpInterp')
pl.subplot(3,2,6)
pl.pcolor(self.cltGrid[1],self.cltGrid[0], self.cltGrid[1])
pl.colorbar()
pl.title('Longitude')
per = 0
string0 = "LibCF coordSys = Bilinear, "
string1 = "periodicity = %d, " % (per)
string2 = "MPI COMM size = %d" % (self.size)
pl.suptitle(string0 + string1 + string2)
def test2_ESMF_clt(self):
"""
Out and back, same grid using mvGenericRegrid -> ESMF, linear
"""
per = 1
coordSys = 'spherical degrees'
grid = [self.cltGrid[0], self.cltGrid[1]]
ro = regrid2.GenericRegrid(grid, grid,
dtype=self.clt.dtype,
regridMethod='linear',
regridTool = 'esMf', periodicity = per,
coordSys = coordSys)
ro.computeWeights()
ro.apply(numpy.array(self.clt), self.cltInterp, rootPe = 0)
self.cltInterp = self.comm.bcast(self.cltInterp, root = 0)
ro.apply(self.cltInterp, self.cltInterpInterp, rootPe = 0)
nCell = numpy.array(self.clt.shape).prod()
if self.rank == 0:
avgDiffInterp = (abs(self.clt - self.cltInterp)).sum()/float(nCell)
avgDiffInterpInterp = abs(self.clt - self.cltInterpInterp).sum()/float(nCell)
self.assertLess(avgDiffInterp, self.tol)
if self.size > 1:
self.assertLess(avgDiffInterpInterp, 600)
else:
self.assertLess(avgDiffInterpInterp, self.tol)
if PLOT:
pl.figure(2)
pl.subplot(3,2,1)
pl.pcolor(grid[1], grid[0], self.clt)
pl.title('clt')
pl.colorbar()
pl.subplot(3,2,2)
pl.pcolor(grid[1], grid[0], self.cltInterp,
vmin = 0, vmax = 100)
pl.title('Interp')
pl.colorbar()
pl.subplot(3,2,3)
pl.pcolor(grid[1], grid[0], self.cltInterpInterp,
vmin = 0, vmax = 100)
pl.title('InterpInterp')
pl.colorbar()
pl.subplot(3,2,4)
pl.pcolor(grid[1],grid[0], self.clt-self.cltInterp)
pl.colorbar()
pl.title('clt-cltInterp')
pl.subplot(3,2,5)
pl.pcolor(grid[1],grid[0], self.clt-self.cltInterpInterp)
pl.colorbar()
pl.title('clt-cltInterpInterp')
pl.subplot(3,2,6)
pl.pcolor(grid[1],grid[0], grid[1])
pl.colorbar()
pl.title('Longitude')
string0 = "ESMF coordSys = %s, " % coordSys
string1 = "periodicity = %d, " % (per)
string2 = "MPI COMM size = %d" % (self.size)
pl.suptitle(string0 + string1 + string2)
def test3_ESMF_Masking(self):
"""
Out, ESMF, Masking in __init__, Bilinear
"""
per = 1
coordSys = 'spherical degrees'
grid = [self.cltGrid[0], self.cltGrid[1]]
mask = numpy.array(self.clt > 90, dtype = numpy.int32)
ro = regrid2.GenericRegrid(grid, grid, self.clt.dtype,
regridMethod = 'linear',
regridTool = 'esMf', periodicity = per,
coordSys = coordSys, srcGridMask = mask)
ro.computeWeights()
ro.apply(numpy.array(self.clt), self.cltInterp, rootPe = 0)
nCell = numpy.array(self.clt.shape).prod()
if self.rank == 0:
avgDiffInterp = (abs(self.clt - self.cltInterp)).sum()/float(nCell)
avgDiffInterpInterp = abs(self.clt - self.cltInterpInterp).sum()/float(nCell)
# we're expecting some ver large values because of the masking
#self.assertLess(avgDiffInterp, 50)
if PLOT:
pl.figure(3)
pl.subplot(1, 3, 1)
pl.pcolor(grid[1], grid[0], self.clt)
pl.title('clt')
pl.colorbar()
pl.subplot(1, 3, 2)
pl.pcolor(grid[1], grid[0], self.cltInterp,
vmin = 0, vmax = 100)
pl.title('Interp')
pl.colorbar()
pl.subplot(1, 3, 3)
pl.pcolor(grid[1], grid[0], self.clt-self.cltInterp)
pl.colorbar()
pl.title('clt-cltInterp')
string0 = "ESMF coordSys = %s, " % coordSys
string1 = "periodicity = %d, " % (per)
string2 = "MPI COMM size = %d" % (self.size)
pl.suptitle(string0 + string1 + string2)
def Xtest4_ESMF_Conservative_2D_clt(self):
"""
Out, ESMF, Conservative metric
"""
per = 1
coordSys = 'spherical degrees'
grid = [self.cltGrid[0], self.cltGrid[1]]
mask = numpy.array(self.clt > 90, dtype = numpy.int32)
newclt = numpy.ones(self.clt.shape) * self.clt
newclt[numpy.where(self.clt>75)] = self.clt.missing_value
ro = regrid2.GenericRegrid(grid, grid, self.clt.dtype,
regridMethod = 'conserv',
regridTool = 'esMf',
periodicity = per,
coordSys = coordSys)
ro.computeWeights()
print dir(ro.computeWeights())
ro.apply(numpy.array(newclt), self.cltInterp,
srcMissingValue = self.clt.missing_value, rootPe = 0)
nCell = numpy.array(self.clt.shape).prod()
if self.rank == 0:
avgDiffInterp = (abs(self.clt - self.cltInterp)).sum()/float(nCell)
avgDiffInterpInterp = abs(self.clt - self.cltInterpInterp).sum()/float(nCell)
#self.assertLess(avgDiffInterp, 50)
if PLOT:
pl.figure(4)
pl.subplot(3,2,1)
pl.pcolor(grid[1], grid[0], self.clt)
pl.title('clt')
pl.colorbar()
pl.subplot(3,2,2)
pl.pcolor(grid[1], grid[0], (newclt == self.clt.missing_value)+mask,
vmin = 0, vmax = 2)
pl.title('newclt == self.clt.missing_value')
pl.colorbar()
pl.subplot(3,2,3)
pl.pcolor(grid[1], grid[0], self.cltInterp)
mn, mx = self.cltInterp.min(), self.cltInterp.max()
pl.title('newMask %5.2f %5.2f' % (mn,mx))
pl.colorbar()
pl.subplot(3,2,4)
pl.pcolor(grid[1],grid[0], mask+self.cltInterp)
pl.colorbar()
pl.title('mask')
pl.subplot(3,2,5)
pl.pcolor(grid[1],grid[0], mask)
pl.colorbar()
pl.title('(newclt==self.clt.missing_value) - self.cltInterp')
pl.subplot(3,2,6)
pl.pcolor(grid[1],grid[0], newclt == self.clt.missing_value)
pl.colorbar()
pl.title('newclt-cltInterp')
string0 = "ESMF coordSys = %s, " % coordSys
string1 = "periodicity = %d, " % (per)
string2 = "MPI COMM size = %d" % (self.size)
pl.suptitle(string0 + string1 + string2)
def test5_LibCF_LevelTime(self):
"""
Interpolate over one level/time
"""
f = cdms2.open(cdat_info.get_sampledata_path() + '/clt.nc')
clt = f('clt')
v = f('v')
# mask
srcGridMask = numpy.array(v[0,0,...] == v.missing_value, numpy.int32)
# v onto the ctl grid
srcGrd, srcNDims = regrid2.gsRegrid.makeCurvilinear([v.getLatitude(),
v.getLongitude()])
dstGrd, dstNDims = regrid2.gsRegrid.makeCurvilinear([clt.getLatitude(),
clt.getLongitude()])
ro = regrid2.GenericRegrid(srcGrd, dstGrd,
clt.dtype,
regridMethod = 'linear',
regridTool = 'esmf',
periodicity = 1,
coordSys = 'cart',
srcGridMask = srcGridMask)
ro.computeWeights()
vInterp = numpy.ones(clt.shape[-2:],
v.dtype) * v.missing_value
ro.apply(numpy.array(v[0,0,...]), vInterp, rootPe = 0)
print 'min/max of v: %f %f' % (v.min(), v.max())
print 'min/max of vInterp: %f %f' % (vInterp.min(), vInterp.max())
if PLOT:
pl.figure()
pl.subplot(1,2,1)
pl.pcolor(srcGrd[1], srcGrd[0], v[0, 0,...], vmin=-20, vmax=20)
pl.title('test5: v[0, 0,...]')
pl.colorbar()
pl.subplot(1,2,2)
pl.pcolor(dstGrd[1], dstGrd[0], vInterp, vmin=-20, vmax=20)
pl.title('test5: vInterp')
pl.colorbar()
def Xtest6_ESMF_Conserve_LevelTime_clt(self):
"""
Interpolate over level/time in addition to lat-lon
"""
f = cdms2.open(cdat_info.get_sampledata_path() + '/clt.nc')
clt = f('clt')
v = f('v')
# mask
srcGridMask = numpy.array(v[0,0,...] == v.missing_value, numpy.int32)
# v onto the ctl grid
srcGrd, srcNDims = regrid2.gsRegrid.makeCurvilinear([v.getLatitude(),
v.getLongitude()])
dstGrd, dstNDims = regrid2.gsRegrid.makeCurvilinear([clt.getLatitude(),
clt.getLongitude()])
ro = regrid2.GenericRegrid(srcGrd, dstGrd,
regridMethod = 'conserve',
regridTool = 'esmp',
periodicity = 1,
srcGridMask = srcGridMask)
ro.computeWeights()
vInterp = numpy.ones(list(v.shape[:-2]) + list(clt.shape[-2:]),
v.dtype) * v.missing_value
ro.apply(numpy.array(v), vInterp, rootPe = 0)
print 'min/max of v: %f %f' % (v.min(), v.max())
print 'min/max of vInterp: %f %f' % (vInterp.min(), vInterp.max())
if PLOT:
nTimes = v.shape[0]
nLevels = v.shape[1]
for el in range(nTimes):
for k in range(nLevels):
pl.figure()
pl.subplot(1,2,1)
pl.pcolor(srcGrd[1], srcGrd[0], v[el, k,...], vmin=-20, vmax=20)
pl.title('test6: v[%d, %d,...]' % (el, k))
pl.colorbar()
pl.subplot(1,2,2)
pl.pcolor(dstGrd[1], dstGrd[0], vInterp[el, k,...], vmin=-20, vmax=20)
pl.title('test6: vInterp[%d, %d,...]' % (el, k))
pl.colorbar()
if __name__ == '__main__':
print "" # Spacer
ESMP.ESMP_Initialize()
suite = unittest.TestLoader().loadTestsFromTestCase(TestMvGenericRegrid)
unittest.TextTestRunner(verbosity = 1).run(suite)
if PLOT: pl.show()
| StarcoderdataPython |
1807238 | <filename>tests/features/steps/expanded.py<gh_stars>1000+
# -*- coding: utf-8
"""Steps for behavioral style tests are defined in this module.
Each step is defined by the string decorating it. This string is used
to call the step in "*.feature" file.
"""
from __future__ import unicode_literals
import wrappers
from behave import when, then
from textwrap import dedent
@when('we prepare the test data')
def step_prepare_data(context):
"""Create table, insert a record."""
context.cli.sendline('drop table if exists a;')
wrappers.wait_prompt(context)
context.cli.sendline(
'create table a(x integer, y real, z numeric(10, 4));')
wrappers.expect_pager(context, 'CREATE TABLE\r\n', timeout=2)
context.cli.sendline('''insert into a(x, y, z) values(1, 1.0, 1.0);''')
wrappers.expect_pager(context, 'INSERT 0 1\r\n', timeout=2)
@when('we set expanded {mode}')
def step_set_expanded(context, mode):
"""Set expanded to mode."""
context.cli.sendline('\\' + 'x {}'.format(mode))
wrappers.expect_exact(context, 'Expanded display is', timeout=2)
wrappers.wait_prompt(context)
@then('we see {which} data selected')
def step_see_data(context, which):
"""Select data from expanded test table."""
if which == 'expanded':
wrappers.expect_pager(
context,
dedent('''\
-[ RECORD 1 ]-------------------------\r
x | 1\r
y | 1.0\r
z | 1.0000\r
SELECT 1\r
'''),
timeout=1)
else:
wrappers.expect_pager(
context,
dedent('''\
+-----+-----+--------+\r
| x | y | z |\r
|-----+-----+--------|\r
| 1 | 1.0 | 1.0000 |\r
+-----+-----+--------+\r
SELECT 1\r
'''),
timeout=1)
| StarcoderdataPython |
26583 | """An environment to skip k frames and return a max between the last two."""
import gym
import numpy as np
class MaxFrameskipEnv(gym.Wrapper):
"""An environment to skip k frames and return a max between the last two."""
def __init__(self, env, skip: int=4) -> None:
"""
Initialize a new max frame skip env around an existing environment.
Args:
env: the environment to wrap around
skip: the number of frames to skip (i.e. hold an action for)
Returns:
None
"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2, *env.observation_space.shape), dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
# the total reward from `skip` frames having `action` held on them
total_reward = 0.0
done = None
# perform the action `skip` times
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
total_reward += reward
# assign the buffer with the last two frames
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
# break the loop if the game terminated
if done:
break
# Note that the observation on the done=True frame doesn't matter
# (because the next state isn't evaluated when done is true)
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
# explicitly define the outward facing API of this module
__all__ = [MaxFrameskipEnv.__name__]
| StarcoderdataPython |
1610325 | from pathlib import Path
import moonleap.resource.props as P
from moonleap import extend, rule
from moonleap.verbs import has
from titan.react_pkg.reactapp import ReactApp
from .props import get_context
@rule("react-app", has, "routes:module")
def react_app_has_routes_module(react_app, routes_module):
routes_module.add_template_dir(Path(__file__).parent / "templates", get_context)
@extend(ReactApp)
class ExtendReactApp:
routes_module = P.child(has, "routes:module")
| StarcoderdataPython |
3263111 | """Git tools for Python."""
from pathlib import Path, PurePosixPath
from datetime import datetime
import json
from copy import copy
from git import Repo, InvalidGitRepositoryError
# ============================ Custom exceptions =============================
class DirtyRepo(Exception):
"""Specific exception indicating some changes in repo are not committed."""
pass
class NotInTree(Exception):
"""Specific exception indicating file is not in commit tree."""
pass
# =========================== Private subroutines ============================
def _pathify(path):
"""Transforms str or partial path in fully resolved path object."""
pathabs = Path(path).resolve() # absolute path of filename
if not pathabs.exists():
raise FileNotFoundError(f'Path {pathabs} does not exist')
return pathabs
def _make_iterable(x):
"""Transforms non-iterables into a tuple, but keeps iterables unchanged."""
try:
iter(x)
except TypeError:
return x,
else:
return x
# ============================= Public functions =============================
def path_in_tree(path, commit):
"""Return True if path belongs to the commit's working tree, else False.
Note that if the path is the root directory of the git repository (where
the .git is located), the function also returns True even if one could
argue that the root directory is technically not in the repo's tree.
INPUTS
------
- path: str or path object of folder or file
- commit: *gitpython* commit object
OUTPUT
------
bool (True if path is in working tree, False if not)
"""
pathabs = _pathify(path)
rootabs = Path(commit.repo.working_dir).resolve() # path of root of repo
localpath = pathabs.relative_to(rootabs) # relative path of file in repo
localname = str(PurePosixPath(localpath)) # gitpython uses Unix names
if localname == '.': # Means that the entered path is the repo's root
return True
try:
commit.tree[localname]
except KeyError: # in this case the file is not in the commit
return False
else:
return True
def current_commit_hash(path='.', checkdirty=True, checktree=True):
"""Return HEAD commit hash corresponding to path if it's in a git repo.
INPUT
-----
- path: str or path object of folder/file. Default: current working dir.
- checkdirty: bool, if True exception raised if repo has uncommitted changes.
- checktree: bool, if True exception raised if path/file not in repo's
working tree and path is not the root directory of the repo.
OUTPUT
------
- str of the commit's hash name.
"""
p = _pathify(path)
repo = Repo(p, search_parent_directories=True)
if checkdirty and repo.is_dirty():
raise DirtyRepo("Dirty repo, please commit recent changes first.")
commit = repo.head.commit
if checktree and not path_in_tree(path, commit):
raise NotInTree("Path or file not in working tree.")
return str(commit)
def repo_tags(path='.'):
"""Return dict of all {'commit hash': 'tag name'} in git repo.
INPUT
-----
- path: str or path object of folder/file. Default: current working dir.
OUTPUT
------
dict {'commit hash': 'tag name'} (both key and value are str).
"""
p = _pathify(path)
repo = Repo(p, search_parent_directories=True)
return {str(tag.commit): str(tag) for tag in repo.tags}
def path_status(path='.'):
"""Current (HEAD) commit hashes, status (dirty or clean), and potential tag.
Slightly higher level compared to current_commit_hash, as it returns a
dictionary with a variety of information (status, hash, tag)
INPUT
-----
- path: str or path object of folder/file. Default: current working dir.
OUTPUT
------
Dictionary keys 'hash', 'status' (clean/diry), 'tag' (if exists)
"""
info = {}
# get commit hash and check repo status (dirty or clean) -----------------
try:
cch = current_commit_hash(path)
except DirtyRepo:
cch = current_commit_hash(path, checkdirty=False)
status = 'dirty'
else:
status = 'clean'
info['hash'] = cch
info['status'] = status
# check if tag associated with commit ------------------------------------
commits_with_tags = repo_tags(path)
if cch in commits_with_tags:
info['tag'] = commits_with_tags[cch]
return info
# ================== Functions for status of python modules ==================
def module_status(module, dirty_warning=False, notag_warning=False,
nogit_ok=False, nogit_warning=False):
"""Get status info (current hash, dirty/clean repo, tag) of module(s).
Parameters
----------
- module or list/iterable of modules (each must belong to a git repository)
- dirty_warning: if True, prints a warning if some git repos are dirty.
- notag_warning: if True, prints a warning if some git repos don't have tags
- nogit_ok: if True, if some modules are not in a git repo, simply get
their version number. If False (default), raise an error.
- nogit_warning: if some modules are not in a git repo and nogit_ok is True,
print a warning when this happens.
Output
------
Dict with module name as keys, and a dict {hash:, status:, tag:} as values
"""
modules = _make_iterable(module)
mods = {} # dict {module name: dict of module info}
for module in modules:
name = module.__name__
try:
info = path_status(module.__file__)
except InvalidGitRepositoryError:
if nogit_ok:
tag = 'v' + module.__version__
info = {'status': 'not a git repository', 'tag': tag}
else:
raise InvalidGitRepositoryError(f'{module} not a git repo')
mods[name] = info
# Manage warnings if necessary -------------------------------------------
if dirty_warning:
dirty_modules = [module for module, info in mods.items()
if info['status'] == 'dirty']
if len(dirty_modules) > 0:
msg = '\nWarning: these modules have dirty git repositories: '
msg += ', '.join(dirty_modules)
print(msg)
if notag_warning:
tagless_modules = [module for module, info in mods.items()
if 'tag' not in info]
if len(tagless_modules) > 0:
msg = '\nWarning: these modules are missing a tag: '
msg += ', '.join(tagless_modules)
print(msg)
if nogit_ok and nogit_warning:
nogit_modules = [module for module, info in mods.items()
if info['status'] == 'not a git repository']
if len(nogit_modules) > 0:
msg = '\nWarning: these modules are not in a git repository: '
msg += ', '.join(nogit_modules)
print(msg)
return mods
def save_metadata(file, info=None, module=None, dirty_warning=False,
notag_warning=False, nogit_ok=False, nogit_warning=False):
"""Save metadata (info dict) into json file, and add git commit & time info.
Parameters
----------
- file: str or path object of .json file to save data into.
- info: dict of info
- module: module or iterable (e.g. list) of modules with git info to save.
- dirty_warning: if True, prints a warning if some git repos are dirty.
- notag_warning: if True, prints a warning if some git repos don't have tags
- nogit_ok: if True, if some modules are not in a git repo, simply get
their version number. If False (default), raise an error.
- nogit_warning: if some modules are not in a git repo and nogit_ok is True,
print a warning when this happens.
"""
metadata = copy(info) if info is not None else {}
metadata['time (utc)'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
# Info on commit hashes of homemade modules used -------------------------
if module is not None:
module_info = module_status(module,
dirty_warning=dirty_warning,
notag_warning=notag_warning,
nogit_ok=nogit_ok, nogit_warning=nogit_warning)
metadata['code version'] = module_info
# Write to file ----------------------------------------------------------
# Note: below, the encoding and ensure_ascii options are for signs like °
with open(file, 'w', encoding='utf8') as f:
json.dump(metadata, f, indent=4, ensure_ascii=False)
| StarcoderdataPython |
9695182 | """
Normalized regional hypsometric interpolation
=============================================
There are many ways of interpolating gaps in a dDEM.
In the case of glaciers, one very useful fact is that elevation change is generally varies with elevation.
This means that if valid pixels exist in a certain elevation bin, their values can be used to fill other pixels in the same approximate elevation.
Filling gaps by elevation is the main basis of "hypsometric interpolation approaches", of which there are many variations of.
One problem with simple hypsometric approaches is that they may not work glaciers with different elevation ranges and scales.
Let's say we have two glaciers: one gigantic reaching from 0-1000 m, and one small from 900-1100 m.
Usually in the 2000s, glaciers thin rapidly at the bottom, while they may be neutral or only thin slightly in the top.
If we extrapolate the hypsometric signal of the gigantic glacier to use on the small one, it may seem like the smaller glacier has almost no change whatsoever.
This may be right, or it may be catastrophically wrong!
Normalized regional hypsometric interpolation solves the scale and elevation range problems in one go. It:
1. Calculates a regional signal using the weighted average of each glacier's normalized signal:
a. The glacier's elevation range is scaled from 0-1 to be elevation-independent.
b. The glaciers elevation change is scaled from 0-1 to be magnitude-independent.
c. A weight is assigned by the amount of valid pixels (well-covered large glaciers gain a higher weight)
2. Re-scales that signal to fit each glacier once determined.
The consequence is a much more accurate interpolation approach that can be used in a multitude of glacierized settings.
"""
# sphinx_gallery_thumbnail_number = 2
import matplotlib.pyplot as plt
import numpy as np
import xdem
import xdem.misc
import geoutils as gu
# %%
# **Example files**
dem_2009 = xdem.DEM(xdem.examples.get_path("longyearbyen_ref_dem"))
dem_1990 = xdem.DEM(xdem.examples.get_path("longyearbyen_tba_dem_coreg"))
glacier_outlines = gu.Vector(xdem.examples.get_path("longyearbyen_glacier_outlines"))
# Rasterize the glacier outlines to create an index map.
# Stable ground is 0, the first glacier is 1, the second is 2, etc.
glacier_index_map = glacier_outlines.rasterize(dem_2009)
plt_extent = [
dem_2009.bounds.left,
dem_2009.bounds.right,
dem_2009.bounds.bottom,
dem_2009.bounds.top,
]
# %%
# To test the method, we can generate a semi-random mask to assign nans to glacierized areas.
# Let's remove 30% of the data.
np.random.seed(42)
random_nans = (xdem.misc.generate_random_field(dem_1990.shape, corr_size=5) > 0.7) & (glacier_index_map > 0)
plt.imshow(random_nans)
plt.show()
# %%
# The normalized hypsometric signal shows the tendency for elevation change as a function of elevation.
# The magnitude may vary between glaciers, but the shape is generally similar.
# Normalizing by both elevation and elevation change, and then re-scaling the signal to every glacier, ensures that it is as accurate as possible.
# **NOTE**: The hypsometric signal does not need to be generated separately; it will be created by :func:`xdem.volume.norm_regional_hypsometric_interpolation`.
# Generating it first, however, allows us to visualize and validate it.
ddem = (dem_2009 - dem_1990).data
ddem_voided = np.where(random_nans, np.nan, ddem)
signal = xdem.volume.get_regional_hypsometric_signal(
ddem=ddem_voided,
ref_dem=dem_2009.data,
glacier_index_map=glacier_index_map,
)
plt.fill_between(signal.index.mid, signal["sigma-1-lower"], signal["sigma-1-upper"], label="Spread (+- 1 sigma)")
plt.plot(signal.index.mid, signal["w_mean"], color="black", label="Weighted mean")
plt.ylabel("Normalized elevation change")
plt.xlabel("Normalized elevation")
plt.legend()
plt.show()
# %%
# The signal can now be used (or simply estimated again if not provided) to interpolate the DEM.
ddem_filled = xdem.volume.norm_regional_hypsometric_interpolation(
voided_ddem=ddem_voided,
ref_dem=dem_2009.data,
glacier_index_map=glacier_index_map,
regional_signal=signal
)
plt.figure(figsize=(8, 5))
plt.imshow(ddem_filled, cmap="coolwarm_r", vmin=-10, vmax=10, extent=plt_extent)
plt.colorbar()
plt.show()
# %%
# We can plot the difference between the actual and the interpolated values, to validate the method.
difference = (ddem_filled - ddem).squeeze()[random_nans].filled(np.nan)
median = np.nanmedian(difference)
nmad = xdem.spatialstats.nmad(difference)
plt.title(f"Median: {median:.2f} m, NMAD: {nmad:.2f} m")
plt.hist(difference, bins=np.linspace(-15, 15, 100))
plt.show()
# %%
# As we see, the median is close to zero, while the :ref:`spatial_stats_nmad` varies slightly more.
# This is expected, as the regional signal is good for multiple glaciers at once, but it cannot account for difficult local topography and meteorological conditions.
# It is therefore highly recommended for large regions; just don't zoom in too close!
| StarcoderdataPython |
357680 | <reponame>ojarva/home-info-display<gh_stars>1-10
from .models import PrintLabel, get_serialized_labels
from django.conf import settings
from django.http import HttpResponse
from django.utils import timezone
from django.views.generic import View
from reportlab.pdfgen import canvas
from reportlab.pdfbase.pdfmetrics import stringWidth as reportlab_stringWidth
import cups
import datetime
import json
import redis
redis_instance = redis.StrictRedis()
# TODO: move to local_settings.py
CUPS_IP = "192.168.1.112"
class GetLabels(View):
def get(self, request, *args, **kwargs):
return HttpResponse(get_serialized_labels(), content_type="application/json")
class CancelJob(View):
def delete(self, request, *args, **kwargs):
cups.setServer(CUPS_IP)
cups_instance = cups.Connection()
# This does not return any useful information
cups_instance.cancelJob(int(kwargs.get("job_id")))
return HttpResponse("ok")
class GetPrinters(View):
def get(self, request, *args, **kwargs):
cups.setServer(CUPS_IP)
cups_instance = cups.Connection()
items = cups_instance.getPrinters()
return HttpResponse(json.dumps(items), content_type="application/json")
class GetStatus(View):
def get(self, request, *args, **kwargs):
cups.setServer(CUPS_IP)
cups_instance = cups.Connection()
try:
items = cups_instance.getJobs(requested_attributes=[
"job-id", "job-media-sheets-completed", "time-at-creation"])
except:
return HttpResponse("error")
for key in items:
items[key]["time-at-creation"] = datetime.datetime.fromtimestamp(
items[key]["time-at-creation"]).isoformat()
return HttpResponse(json.dumps(items), content_type="application/json")
class PrintLabels(View):
def post(self, request, *args, **kwargs):
c = canvas.Canvas("printjob.pdf", pagesize=(260.787402, 108))
c.showPage()
c.save()
return HttpResponse("ok")
| StarcoderdataPython |
4811606 | <reponame>iamsuman/iv
class Solution:
def distribute_candies(self, candies, num_people):
#TODO solve candies = kn*(kn+1)/2
n = 0
arr = [0] * num_people
i = 0
while candies > 0:
n += 1
arr[i] += n if n < candies else candies
candies -= n
i += 1
if i == num_people:
i = 0
return arr
candies = 7; num_people = 4
candies = 10; num_people = 3
candies = 10**9; num_people = 1
s = Solution()
print(s.distribute_candies(candies, num_people))
| StarcoderdataPython |
12807674 | #!/usr/bin/env python3
for number in range(0, 100):
if number % 15 == 0:
print('FizzBuzz')
elif number % 5 == 0:
print('Buzz')
elif number % 3 == 0:
print('Fizz')
else:
print(number)
| StarcoderdataPython |
9680288 | <filename>birthday_greetings/utils.py
"""Utilities for birthday_greetings."""
import datetime
import json
import typing
from birthday_greetings.reader import Reader
from birthday_greetings.sender import Sender
def parse_date(date_str: str) -> datetime.date:
"""Parse a date in Y/m/d format."""
return datetime.datetime.strptime(date_str, "%Y/%m/%d").date()
def parse_extra_data(
extra_data: typing.Optional[str],
) -> typing.Mapping[str, typing.Any]:
"""Parse extra data (JSON)."""
if extra_data:
return typing.cast(typing.Mapping[str, typing.Any], json.loads(extra_data))
return {}
def send_messages(date: typing.Union[datetime.date, datetime.date], reader: Reader, sender: Sender) -> int:
"""Send messages to friends whose birthdays are on the given date."""
friends_with_birthdays = reader.read_todays_birthdays(date)
return sender.send_many(friends_with_birthdays)
| StarcoderdataPython |
5051037 | <filename>tests/r/test_benefits.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.benefits import benefits
def test_benefits():
"""Test module benefits.py by downloading
benefits.csv and testing shape of
extracted data has 4877 rows and 18 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = benefits(test_path)
try:
assert x_train.shape == (4877, 18)
except:
shutil.rmtree(test_path)
raise()
| StarcoderdataPython |
5089668 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-01 16:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("studies", "0031_merge_20170828_1227")]
operations = [
migrations.RemoveField(model_name="study", name="max_age"),
migrations.RemoveField(model_name="study", name="min_age"),
migrations.AddField(
model_name="study",
name="max_age_months",
field=models.IntegerField(default=0, null=True),
),
migrations.AddField(
model_name="study",
name="max_age_years",
field=models.IntegerField(default=0, null=True),
),
migrations.AddField(
model_name="study",
name="min_age_months",
field=models.IntegerField(default=0, null=True),
),
migrations.AddField(
model_name="study",
name="min_age_years",
field=models.IntegerField(default=0, null=True),
),
]
| StarcoderdataPython |
11269574 | <reponame>LGordon2/pyresttest<gh_stars>0
#!/usr/bin/env python
import sys
from pyresttest import resttest
resttest.command_line_run(sys.argv[1:]) | StarcoderdataPython |
5197886 | """
This module implements the Needleman-Wunsch Algorithm.
The backtracing step is modified to start with maximum score and extend
to the top-left and down to bottom-right. This follows one path that
contains the maximal matching sequence.
This algorithm also implements an X-Drop termination condition.
"""
import os
import argparse
from pyaligner import Sequence, Scorer, DPMatrix
if __name__ == "__main__":
description = "Smith-Waterman Algorithm with X-Drop"
parser = argparse.ArgumentParser( description = description )
parser.add_argument( "input1", type = argparse.FileType(),
help = "Sequence File 1" )
parser.add_argument( "input2", type = argparse.FileType(),
help = "Sequence File 2" )
parser.add_argument( "-x", "--xdrop", type = int, default = 7,
help = "X-Drop Value" )
parser.add_argument( "-m", "--match-score", type = int, default = 1,
help = "Match Score" )
parser.add_argument( "-i", "--mismatch-score", type = int, default = -1,
help = "Mismatch Score" )
parser.add_argument( "-g", "--gap-score", type = int, default = -1,
help = "Gap Score" )
parser.add_argument( "-s", "--semiglobal", action = "store_true",
help = "Only run the semi-global alignment." )
parser.add_argument( "-v", "--verbosity", action = "count", default = 0,
help = "Level 1: print match score," + \
"2: print match sequences, 3: print dp matrix." )
args = parser.parse_args()
seq1 = Sequence( args.input1.read() )
seq2 = Sequence( args.input2.read() )
scorer = Scorer( args.match_score, args.mismatch_score,
args.gap_score, args.xdrop )
dp_matrix = DPMatrix( seq1, seq2, scorer, args.semiglobal )
if args.verbosity == 0:
print( dp_matrix.calc_alignment_score() )
if args.verbosity >= 1:
print( "Exit Alignment Score:", dp_matrix.calc_alignment_score() )
print( "Best Alignment Score:", dp_matrix.max_score )
match_seqs = dp_matrix.calc_match_seq()
print( "Number of matches:", match_seqs[2] )
if args.verbosity >= 2:
print( "First Matched Sequence:", match_seqs[0] )
print( "Second Matched Sequence:", match_seqs[1] )
if args.verbosity >= 3:
print()
print( dp_matrix )
| StarcoderdataPython |
3295518 | import re
from math import floor
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch, cm
from reportlab.lib.pagesizes import letter
from reportlab.pdfbase.pdfmetrics import stringWidth
import json
class ExamGenerator:
"""
Notes on layout:
Bottom : y==0
Left:x==0
"""
LAYOUTS={
"3num20":{
"block_width":(3.6*inch),
"block_height":0.9*inch,
"margin":0.6*inch,
"y_top":9.5*inch,
"font_size":25
},
"3num30":{
"block_width":(2.5*inch),
"block_height":0.9*inch,
"margin":0.5*inch,
"y_top":9.5*inch,
"font_size":15
},
"3num50":{
"block_width":(2.5*inch),
"block_height":0.65*inch,
"margin":0.5*inch,
"y_top":9.5*inch,
"font_size":15
},
"horizontal20":{
"block_width":(3.6*inch),
"block_height":0.9*inch,
"margin":0.6*inch,
"y_top":9.5*inch,
"font_size":25
},
"horizontal30":{
"block_width":(2.5*inch),
"block_height":0.9*inch,
"margin":0.5*inch,
"y_top":9.5*inch,
"font_size":18
},
"horizontal52":{
"block_width":(1.915*inch),
"block_height":0.7*inch,
"margin":0.41*inch,
"y_top":9.5*inch,
"font_size":14
},
"vertical20":{
"block_width":(1.5*inch),
"block_height":2.2*inch,
"margin":0.25*inch,
"y_top":9.5*inch,
"line_width":0.35*inch,
"font_size":25
},
"vertical30":{
"block_width":(1.2*inch),
"block_height":1.8*inch,
"margin":0.5*inch,
"y_top":9.5*inch,
"line_width":0.35*inch,
"font_size":20
},
"vertical49":{
"block_width":(1.0*inch),
"block_height":1.3*inch,
"margin":0.5*inch,
"y_top":9.5*inch,
"line_width":0.3*inch,
"font_size":15
},
"vertical56":{
"block_width":(1.0*inch),
"block_height":1.1*inch,
"margin":0.5*inch,
"y_top":9.5*inch,
"line_width":0.22*inch,
"font_size":12
}
}
FONT="Helvetica-Bold"
PAGE_HEIGHT=11*inch
PAGE_WIDTH=8.5*inch
def __init__(self):
pass
def stringWidth(self, text, font_size):
return stringWidth(text, self.FONT, font_size)
def printHeader(self, c):
"""
Args:
c: The canvas to draw on
"""
c.drawImage('template/logo.jpg', 0, 10*inch, 8.5*inch, 1*inch)
def findTemplate(self, jsondata):
"""
Find a template to best fit this page using the number of equations,
and the layout on this page.
"""
if not "template" in jsondata:
template_name = "horizontal"
else:
template_name = jsondata['template']
if "problems_per_page" in jsondata:
template_name += str(jsondata["problems_per_page"])
else:
numEquations = len(jsondata["equations"])
if template_name=="vertical":
if numEquations > 49:
template_name += "56"
elif numEquations >= 30:
template_name += "49"
else:
template_name += "30"
elif template_name=="horizontal":
if numEquations > 30:
template_name += "52"
else:
template_name += "30"
elif template_name == "3num":
if numEquations > 30:
template_name += "50"
elif numEquations > 20:
template_name += "30"
else:
template_name += "20"
return (template_name, self.LAYOUTS[template_name])
def _generatePage(self, jsondata, c):
equations = jsondata['equations']
template_name, template = self.findTemplate(jsondata)
blocks_per_row = (8.5*inch-2*template['margin'])/(template['block_width'])
blocks_per_row = floor(blocks_per_row)
print('blocks per row', blocks_per_row)
print(type(equations))
print(len(equations))
start_index = 0
finished = False
if 'horizontal' in template_name:
c.setStrokeColorRGB(1, 1, 1)
c.setFillColorRGB(0.9, 0.9, 0.9)
vertLineX = template['margin']+template['block_width']-0.1*inch
drawRect = False
while vertLineX + template['block_width']< 8.5*inch:
drawRect=not drawRect
if drawRect:
c.rect(vertLineX, template['margin'], template['block_width'], template['y_top'], fill=1)
c.rect(0, 0, 10, 10)
c.line(vertLineX, template['margin'], vertLineX, 100)
#c.line(vertLineX, template['y_top']+template['block_height'], vertLineX, template['margin'])
vertLineX += template['block_width']
c.setFillColorRGB(0, 0, 0)
#c.setStrokeColorRGB(0, 0, 0)
#c.setDash()
while start_index < len(equations) and not finished:
c.setFont(self.FONT, template['font_size'])
self.printHeader(c)
finished = True
for index, value in enumerate(equations):
i = index - start_index
if i < 0:
continue
x = template['margin'] + (i%blocks_per_row)*template['block_width']
y = template['y_top'] - floor(i/blocks_per_row)*template['block_height']
self.drawEquation(template_name, c, value, x, y, template)
c.showPage()
def generate(self, data):
"""
Args:
data: string in JSON format from user's request body
"""
jsonData = json.loads(data)
c = canvas.Canvas('/tmp/result.pdf', pagesize=letter)
c.setTitle("BlankMath.com");
if 'countPerPage' in jsonData:
countPerPage=int(jsonData['countPerPage'])
equations = jsonData['equations']
while len(equations) > 0:
countThisPage = min(countPerPage, len(equations))
jsonData['equations'] = equations[:countThisPage]
self._generatePage(jsonData, c)
del equations[:countThisPage]
else:
self._generatePage(jsonData, c)
c.save()
def horizontalExpand(self, text):
result=""
# Remove all spaces, I will add them back
text = text.replace(" ", "")
for index, char in enumerate(text):
if char in ['+','-', '*', '/']:
result = result + ' ' + char + ' '
elif char == '=':
result = result + ' = '
elif char == 'x':
result = result + '___'
else:
result = result + char
result = result.replace('*', '×')
result = result.replace('/', '÷')
return result
def verticalExpand(self, text):
print("Expand ", text)
result=str("")
# Remove all spaces, I will add them back
text = text.replace(" ", "")
for index, char in enumerate(text):
if char in ['+','-', '*', '/']:
result = result + ' ' + char + ' '
elif char == '=':
result = result + ' ___ '
elif char == 'x':
result = result + '#'
else:
result = result + char
result = result.replace('*', '×')
result = result.replace('/', '÷')
return result
def drawVerticalDiv(self, text):
pass
def drawEquation(self, template_name, my_canvas, text, x, y, template):
""" Draws an equation at the specified point on the specified canvas
Args:
template_name: Name of the template
my_canvas: canvas to draw on
text: The text representation of the string
x: x coordinate of the place to draw, representing the bottem left cornor
y: y coordinate of the place to draw, representing the bottem left cornor
template: template object to draw
"""
if 'horizontal' in template_name or '3num' in template_name:
print("Will draw text horizontally @", x, y)
text = self.horizontalExpand(text)
width = self.stringWidth(text, template['font_size'])
if 'x' in text:
sign = '___'
elif 'o' in text:
sign = 'o'
else:
sign = '___'
parts = text.split(sign)
print('Spliting horizontal: ', parts)
my_canvas.drawString(x, y, parts[0])
start_x = x + self.stringWidth(parts[0], template['font_size'])
margin=-0.05*inch
rect_width = 0.55*inch
rect_height = 0.55*inch
rect_y = y - 0.2*inch
stroke = 0.09*inch
if 'o' in text:
margin = 0.2*inch
rect_width = 0.35*inch
rect_height = 0.35*inch
rect_y = y - 0.1*inch
rect_x = start_x+margin
print("Rect dimension: %d %d %d %d "%(rect_width,rect_height,rect_x,rect_y))
my_canvas.setStrokeColorRGB(0.75, 0.75, 0.75)
if 'o' in text:
# Reuse the dimention of the rect.
my_canvas.roundRect(rect_x, rect_y, rect_width, rect_height, stroke)
#my_canvas.circle(rect_x + rect_width / 2, rect_y+rect_height/2, rect_width/4)
else:
my_canvas.roundRect(rect_x, rect_y, rect_width, rect_height, stroke)
my_canvas.setStrokeColorRGB(0, 0, 0)
start_x = start_x + rect_width + 2*margin
my_canvas.drawString(start_x, y, parts[1])
elif 'vertical' in template_name:
text = self.verticalExpand(text)
print("Will draw text vertically @", x, y, " as ", text)
tokens = re.split(" |=", text)
print(tokens)
reverse_tokens = [x[::-1] for x in tokens]
print(reverse_tokens)
pass_eq = 0
for index, value in enumerate(tokens):
value_to_draw = value
width = self.stringWidth(value, template['font_size'])
start_y = y - template["line_width"]*(index - pass_eq)
if value in ['+', '-', '*', '/']:
pass_eq=1
start_x = x + 0.4*template['block_width']
elif value == '#':
value_to_draw = ' '
start_x = x + 0.4*inch
else:
start_x = x + template['block_width'] - width
if value_to_draw == '___':
start_y = start_y + 0.15*inch
my_canvas.line(start_x, start_y, start_x + width, start_y)
else:
my_canvas.drawString(start_x, start_y, value_to_draw)
elif 'vertical_div' in template_name:
self.drawVerticalDiv(my_canvas, text, x, y, template)
else:
print("I do not know how to draw this", template_name)
| StarcoderdataPython |
1839466 | # coding: utf-8
import sublime
st_version = int(sublime.version())
if st_version > 3000:
from JoomlaPack.lib.inflector.english import English
else:
from lib.inflector.english import English
__all__ = [
'English'
]
| StarcoderdataPython |
12810474 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Mixcr(Package):
"""MiXCR is a universal framework that processes big immunome data from
raw sequences to quantitated clonotypes. MiXCR efficiently handles
paired- and single-end reads, considers sequence quality, corrects PCR
errors and identifies germline hypermutations. The software supports
both partial- and full-length profiling and employs all available RNA or
DNA information, including sequences upstream of V and downstream of J
gene segments."""
homepage = "https://mixcr.readthedocs.io/en/master/index.html"
url = "https://github.com/milaboratory/mixcr/releases/download/v3.0.2/mixcr-3.0.2.zip"
version('3.0.2', sha256='b4dcad985053438d5f5590555f399edfbd8cb514e1b9717620ee0ad0b5eb6b33')
depends_on('java@8:')
def install(self, spec, prefix):
install_tree('.', prefix)
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix)
| StarcoderdataPython |
354470 | <filename>national_debt/national_debt/spiders/gdp_debt.py<gh_stars>0
import scrapy
class GdpDebtSpider(scrapy.Spider):
name = 'gdp_debt'
allowed_domains = ['worldpopulationreview.com']
# note that here you might need to change http to https
start_urls = ['http://worldpopulationreview.com/countries/countries-by-national-debt/']
def parse(self, response):
countries = response.xpath("//tbody/tr")
# countries is a list of selector objects
for country in countries:
# country name
name = country.xpath(".//td[1]/a/text()").get()
# get() extracts the textual data
# getall() extracts the textual data as a list
rate = country.xpath(".//td[2]/text()").get()
yield {
"country_name": name,
"rate": rate
}
| StarcoderdataPython |
11375610 | <gh_stars>1-10
#!/usr/bin/python
#
# Author: <NAME> <<EMAIL>>
#
# The BSD 3-Clause License
# Copyright (c) 2013, SUSE Linux Products GmbH
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# * Neither the name of the SUSE Linux Products GmbH nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import os
import sys
import pty
import stat
import time
import socket
import random
import hashlib
import getpass
import tempfile
from xml.dom import minidom
class SMClientOutput:
"""
Parse SM Client output.
"""
SUCCESS = 'success'
WARNING = 'warning'
ERROR = 'failure'
def __init__(self, output):
"""
Get output from the SM Client.
"""
self.events = {
self.SUCCESS : [],
self.WARNING : [],
self.ERROR : [],
}
chop = False
xmldoc = []
for line in (output != None and output.strip() or "").split("\n"):
if line.startswith('<?xml '):
chop = True
if chop:
xmldoc.append(line.strip())
xmldoc = minidom.parseString('\n'.join(xmldoc))
for node in xmldoc.getElementsByTagName("message"):
if not node.attributes or not node.attributes.get("type"):
# Broken remote XML here. How to cry in a proper way?
continue
for cnode in node.childNodes:
self.events[node.attributes.get("type") and node.attributes.get("type").value or "unknown"].append(cnode.nodeValue)
class SSH:
"""
SSH tools wrapper.
"""
def __init__(self, hostname, passwd, user, port, verbose=False):
"""
Constructor.
"""
self.hostname = hostname
self.passwd = <PASSWORD>
self.user = user
self.port = port
self.verbose = verbose
self.tunneling = []
def set_tunneling(self, settings):
"""
Set tunneling mode with settings of the following format:
[(source_port, destination_port, hostname,),]
"""
self.tunneling = []
for src_port, dst_port, hostname in settings:
self.tunneling.append("-R %s:%s:%s" % (src_port, hostname, dst_port))
def _read(self, input):
"""
Read the output of the child process.
"""
out = ''
try:
out = os.read(input, 0x400)
if self.verbose and str(out).strip():
print >> sys.stderr, "INPUT>", out.strip()
except Exception, e:
# Always suppress IO fail here.
pass
return out
def _results(self, pid, f):
"""
Process output results.
"""
out = ""
while True:
r = self._read(f)
if r.lower().find("authenticity of host") > -1:
os.write(f, 'yes\n')
continue
elif r.lower().find("password:") > -1:
os.write(f, self.passwd + "\n")
tmp = self._read(f)
tmp += self._read(f)
if tmp.lower().find("permission denied") > -1:
raise Exception("Invalid passwd")
out += r
if not len(r):
break
os.waitpid(pid, 0)
os.close(f)
return out.strip()
def execute(self, c):
"""
Execute one SSH command on the remote node.
"""
(pid, f) = pty.fork()
if pid == 0:
cmd = ['ssh', '-o NumberOfPasswordPrompts=1',
'-p %s' % self.port, self.user + '@' + self.hostname, c,]
os.execlp("ssh", *(cmd[:1] + self.tunneling + cmd[1:]))
else:
return self._results(pid, f)
def push_file(self, src, dst):
"""
Copy source file to the destination on the remote host.
"""
pid, f = pty.fork()
if pid == 0:
os.execlp("scp", "scp", '-o NumberOfPasswordPrompts=1',
'-P %s' % self.port, src, self.user + '@' + self.hostname + ':' + dst)
else:
return self._results(pid, f)
def deploy_identity(self):
"""
Deploy SSH identity on the remote node.
"""
idpath = "%s/.ssh/id_rsa.pub" % os.path.expanduser('~' + getpass.getuser())
if self.verbose:
print >> sys.stderr, "Deploying SSH key %s" % idpath
self.execute("umask 077; test -d .ssh || mkdir .ssh;")
# Make temp name
digest = hashlib.md5()
block = 0x10000
fh = open(idpath)
buff = fh.read(block)
while len(buff) > 0:
digest.update(buff)
buff = fh.read(block)
fh.close()
destfile = '.id-rsa-pub.%s.%s' % (self.hostname, digest.hexdigest())
# Add public key and cleanup
pid, f = pty.fork()
if pid == 0:
os.execlp("scp", "scp", '-o NumberOfPasswordPrompts=1',
'-P %d' % self.port, idpath, self.user + '@' + self.hostname + ':' + destfile)
else:
self._results(pid, f)
self.execute("umask 077; cat %s >> .ssh/authorized_keys; rm %s" % (destfile, destfile))
class PushEnvironment:
"""
Class to setup push environment: SSH, keys etc.
"""
def __init__(self, target_host, params, target_port=22):
self.target_host = target_host
self.target_port = target_port
self.target_os = 'Unknown'
self.target_arch = 'Unknown'
self.params = params
def setup(self):
"""
Prepare push environment on the server side.
"""
# SSH keys around?
key_fp = self.verify_id_rsa()
if key_fp and not 'quiet' in self.params.keys():
RuntimeUtils.info('New RSA key for SSH generated.', format=self.params.get('output-format', 'text'))
RuntimeUtils.info('Fingerprint: ' + key_fp, format=self.params.get('output-format', 'text'))
# Public keys on target?
target_machine = self.verify_keychain()
if not target_machine:
if not 'quiet' in self.params.keys():
RuntimeUtils.info('No public key deployed on target machine.', format=self.params.get('output-format', 'text'))
else:
raise Exception("You want it quiet, but I need remote password for \"%s\"!" % getpass.getuser())
self.deploy_keychain()
target_machine = self.verify_keychain()
target_machine = filter(None, target_machine.split(' '))
if len(target_machine) == 2:
self.target_os, self.target_arch = target_machine
else:
raise Exception("Unknown platform: " + self.target_os)
if not 'quiet' in self.params.keys():
RuntimeUtils.info('Target machine "%s" prepared.' % self.target_host, format=self.params.get('output-format', 'text'))
def deploy_keychain(self):
"""
Deploy public key on the target machine.
Require console password input.
"""
if not os.environ.get('SSH_REMOTE_PASSWORD'):
print >> sys.stdout, "REQUEST:\t",
sys.stdout.flush()
msg = "Enter login password to %s\n\t\tas user '%s': " % (self.target_host, getpass.getuser())
SSH(self.target_host, os.environ.get('SSH_REMOTE_PASSWORD') or getpass.getpass(msg),
user=getpass.getuser(), port=self.target_port).deploy_identity()
def verify_keychain(self):
"""
Verify SSH keys are deployed on the target machine.
"""
cin, out, err = os.popen3("/usr/bin/ssh -oBatchMode=yes %s uname -sp" % self.target_host)
try:
return (out.read() + '').strip()
except Exception, ex:
out = None
def verify_id_rsa(self):
"""
Get the id_rsa.pub SSH key in place, generate new if none.
"""
key_fp = None
id_path = os.path.expanduser("~%s/.ssh/id_rsa" % getpass.getuser())
if not os.path.exists("%s.pub" % id_path):
for line in os.popen("/usr/bin/ssh-keygen -t rsa -N '' -f %s" % id_path).readlines():
tokens = line.split(' ')
if len(tokens) == 2 and tokens[-1].startswith(getpass.getuser() + "@"):
key_fp = tokens[0].upper()
return key_fp
class TunnelConfig:
"""
Class to configure tunneling.
"""
# Ports, used by default, if no config around
CFG_DEFAULT_HTTP_PORT=1232
CFG_DEFAULT_HTTPS_PORT=1233
# What to look in configs
CFG_HTTP_PORT_KEY="server_push_port_http"
CFG_HTTPS_PORT_KEY="server_push_port_https"
# Where to look in configs
PTH_RHN_CONF="/etc/rhn/rhn.conf"
PTH_RHN_DEFAULTS="/usr/share/rhn/config-defaults/rhn_web.conf"
def __init__(self):
"""
Init and setup the tunneling.
"""
cfg = self.get_config(self.PTH_RHN_DEFAULTS) # Get defaults (if any)
cfg.update(self.get_config(self.PTH_RHN_CONF)) # Apply custom conf on top
self.http_port = cfg.get(self.CFG_HTTP_PORT_KEY, self.CFG_DEFAULT_HTTP_PORT)
self.https_port = cfg.get(self.CFG_HTTPS_PORT_KEY, self.CFG_DEFAULT_HTTPS_PORT)
def get_config(self, config):
"""
Parse typical key=value config
and return a dictionary of parsed values back.
"""
cfg = {}
for conf_item in open(config).readlines():
if conf_item.find('=') > -1 and not conf_item.strip().startswith('#'):
cfg.update(dict([map(lambda i:i.strip(), conf_item.split('=', 1))]))
return cfg
class TaskPush:
"""
Class to perform the tasks on the remote host.
"""
def __init__(self, params):
self.host_ip, self.hostname = self._get_hostname(params.get('hostname', None))
self.localhost_ip, self.localhostname = self._get_hostname(socket.gethostname())
self.params = params
self.ssh = None
self.environ = None
self.tunnel = None
self.is_tunnel_enabled = None
def _get_hostname(self, hostname):
"""
Resolve to fully qualified hostname.
"""
if not hostname:
raise Exception("Unknown target hostname.")
host_ip = None
fullname = None
try:
host_ip = socket.gethostbyname(hostname)
fullname = socket.gethostbyaddr(host_ip)[0]
except Exception, ex:
raise Exception("Unable to resolve \"%s\" hostname." % hostname)
return host_ip, fullname
def prepare(self):
"""
Prepare the push mechanism.
"""
self.environ = PushEnvironment(self.hostname, self.params)
self.environ.setup()
self.tunnel = TunnelConfig()
self.ssh = SSH(self.hostname, None, user=getpass.getuser(), port=self.params.get('ssh-port', '22'))
def perform(self):
"""
Run the task on the target system.
"""
# Enable or disable tunneling
if 'tunneling' in self.params.keys():
if self.params.get('tunneling') in ['yes', 'no']:
self._do_tunneling()
else:
raise Exception('What means "%s" in context of tunneling?' % self.params.get('tunneling'))
else:
# Check if tunneling is on the remote, since user is not asking for it.
if self.is_tunnel_enabled == None:
self._do_tunneling(check_only=True)
if 'quiet' not in self.params.keys():
RuntimeUtils.info("Tunnel is %s." % (self.is_tunnel_enabled and 'enabled' or 'disabled'), format=self.params.get('output-format', 'text'))
# Register, if requested
if 'activation-keys' in self.params.keys():
self._do_register_at_sm(force=('override' in self.params.keys()))
# Execute some command, if any
if self.params.get('command'):
self._do_command()
# Performing tasks
def _do_register_at_sm(self, force=False):
"""
Register remote node at SUSE Manager.
"""
ssl_certificate = "/srv/www/htdocs/pub/RHN-ORG-TRUSTED-SSL-CERT" # Point of configuration in a future.
if self.environ.target_os.lower() == 'linux':
# Register remote against SUSE Manager
if self.ssh.execute("rpm -qa | grep sm-client-tools || echo 'absent'") == 'absent':
RuntimeUtils.info('Installing SM Client on target machine', format=self.params.get('output-format', 'text'))
remote_pkg_pth = '/tmp/sm-client-tools.%s.%s.rpm' % (time.time(), random.randint(0xff, 0xffff)) # Temporary unique (hopefully) name on remote filesystem.
local_pkg_pth = "/srv/www/htdocs/pub/bootstrap/sm-client-tools.rpm"
if not os.path.exists(local_pkg_pth):
raise Exception('SUSE Manager Client package does not exists.')
self.ssh.push_file(local_pkg_pth, remote_pkg_pth)
self.ssh.execute('/bin/rpm -ivh %s; rm %s' % (remote_pkg_pth, remote_pkg_pth))
if self.ssh.execute('test -e /usr/bin/sm-client && echo "installed" || echo "failed"') == 'failed':
raise Exception("SM Client installation failed. :-(")
else:
if 'quiet' not in self.params.keys():
RuntimeUtils.info("SM Client has been installed", format=self.params.get('output-format', 'text'))
else:
if 'quiet' not in self.params.keys():
RuntimeUtils.info('SM Client is already installed', format=self.params.get('output-format', 'text'))
# Get SSL certificate fingerprint
ssl_fp = os.popen("/usr/bin/openssl x509 -noout -in %s -fingerprint" % ssl_certificate).read().split('=')[-1].strip()
if not 'quiet' in self.params.keys():
RuntimeUtils.info("SSL certificate: %s" % ssl_fp, format=self.params.get('output-format', 'text'))
# If we need sudo, we need to know it is there
if getpass.getuser() != 'root':
if self.ssh.execute("test -e /usr/bin/sudo && echo 'OK'") != 'OK':
raise Exception("You cannot run anything on \"%s\" as \"%s\" without sudo installed!" % (self.hostname, getpass.getuser()))
# Check if we have permissions
if self.ssh.execute("/usr/bin/sudo -S true < /dev/null &>/dev/null && echo 'OK'") != 'OK':
raise Exception("Not enough privileges for user \"%s\" on \"%s\" node." % (getpass.getuser(), self.hostname))
# Register machine
remote_tmp_logfile = '/tmp/.sm-client-tools.%s.%s.log' % (time.strftime('%Y%m%d.%H%M%S.backup', time.localtime()), random.randint(0xff, 0xffff))
overrides = []
if self.is_tunnel_enabled:
overrides.append('--cfg=noSSLServerURL,http://%s:%s/' % (self.localhostname, self.tunnel.http_port))
overrides.append('--cfg=serverURL,https://%s:%s/XMLRPC' % (self.localhostname, self.tunnel.https_port))
prefix = getpass.getuser() != 'root' and "/usr/bin/sudo -n " or ""
self.ssh.execute("%s/usr/bin/sm-client --output-format=xml --hostname=%s --activation-keys=%s --ssl-fingerprint=%s %s > %s" %
(prefix, self.localhostname, self.params['activation-keys'], ssl_fp, ' '.join(overrides), remote_tmp_logfile))
smc_out = SMClientOutput(self.ssh.execute("test -e %s && /bin/cat %s && rm %s || echo '<?xml version=\"1.0\" encoding=\"UTF-8\"?><log/>'" %
(remote_tmp_logfile, remote_tmp_logfile, remote_tmp_logfile)))
if smc_out.events.get(SMClientOutput.ERROR):
RuntimeUtils.warning("Remote machine was not happy:", format=self.params.get('output-format', 'text'))
for error_message in smc_out.events.get(SMClientOutput.ERROR):
RuntimeUtils.error(error_message, format=self.params.get('output-format', 'text'))
raise Exception("Registration failed. Please login to the %s and find out why." % self.hostname)
elif smc_out.events.get(SMClientOutput.WARNING) and not 'quiet' in self.params.keys():
for warning_message in smc_out.events.get(SMClientOutput.WARNING):
RuntimeUtils.warning(self.hostname + ": " + warning_message, format=self.params.get('output-format', 'text'))
# No success blah-blah-blah here.
else:
# Solaris fans, do it yourself. :-)
raise Exception('I cannot register %s against SUSE Manager as of today.' % self.environ.target_os)
if 'quiet' not in self.params.keys():
RuntimeUtils.info("Remote machine %s has been registered successfully." % self.hostname, format=self.params.get('output-format', 'text'))
def _do_tunneling(self, check_only=False):
"""
Enable or disable tunnel.
"""
if not self.ssh:
raise Exception("SSH link was not initialized.")
# Get content of the /etc/hosts on the remote machine
random.seed()
token = '# <PASSWORD>' % (time.<PASSWORD>(), random.randint(0xff, 0xffff))
etc_hosts = self.ssh.execute("test -e /etc/hosts && cat /etc/hosts || echo '%s'" % token) + ""
self.is_tunnel_enabled = False
if etc_hosts.find(token) > -1:
raise Exception('Tunneling cannot be enabled on this system.')
else:
for line in map(lambda item:item.strip().lower(), etc_hosts.split("\n")):
if not line.startswith('#') and line.find(self.localhostname) > -1:
self.is_tunnel_enabled = True
break
# Setup SSH if tunneling around
if self.is_tunnel_enabled:
self.ssh.set_tunneling(((self.tunnel.http_port, 80, self.localhostname),
(self.tunnel.https_port, 443, self.localhostname),))
# Exit if this is only check/setup
if check_only:
return
# Skip procedure if nothing needed to do.
enable = self.params.get('tunneling', '') == 'yes'
RuntimeUtils.info('%s tunneling on %s node.' % ((enable and 'Enabling' or 'Disabling'), self.hostname),
format=self.params.get('output-format', 'text'))
if enable:
if self.is_tunnel_enabled:
RuntimeUtils.warning('Tunelling on the node "%s" is already enabled.' % self.hostname,
format=self.params.get('output-format', 'text'))
return
else:
if not self.is_tunnel_enabled:
RuntimeUtils.warning('Tunelling on the node "%s" is already disabled.' % self.hostname,
format=self.params.get('output-format', 'text'))
return
self.is_tunnel_enabled = enable
hosts = []
for line in etc_hosts.split("\n"):
if not line.strip().startswith('#'):
if enable and line.lower().find('localhost') + 1:
line = map(lambda item:item.strip(), filter(None, line.split(' ')))
line.append(self.localhostname)
line = ' '.join(line)
else:
line = ' '.join(filter(None, line.replace(self.localhostname, '').split(' '))).strip()
hosts.append(line)
etc_hosts = '\n'.join(hosts)
# Save to tempfile
tmpfd, tmppth = tempfile.mkstemp(prefix='sm-push-hosts-%s-' % self.hostname)
tmpfh = os.fdopen(tmpfd, "w")
tmpfh.write(etc_hosts + "\n")
tmpfh.close()
# Push the file to the remote
remote_hosts_pth = '/tmp/.sm-push-hosts-%s.%s' % (time.time(), random.randint(0xff, 0xffff))
self.ssh.push_file(tmppth, remote_hosts_pth)
# Push failed?
if (self.ssh.execute("test -e %s && echo 'OK' || echo '%s'" % (remote_hosts_pth, token)) + "").strip() != 'OK':
raise Exception('Unable to send new configuration to "%s" node.' % self.hostname)
# Replace remote file
if 'safe' in self.params.keys():
backup_suffix = time.strftime('%Y%m%d.%H%M%S.backup', time.localtime())
res = self.ssh.execute('mv /etc/hosts /etc/hosts.%s' % backup_suffix)
if res:
RuntimeUtils.error(res, format=self.params.get('output-format', 'text'))
self._cleanup(tmppth)
raise Exception('Remote node error.')
if not 'quiet' in self.params.keys():
RuntimeUtils.info('Previous file "/etc/hosts" has been saved as "/etc/hosts.%s"' % backup_suffix,
format=self.params.get('output-format', 'text'))
res = self.ssh.execute('mv %s /etc/hosts; chmod 0644 /etc/hosts' % remote_hosts_pth)
if res:
RuntimeUtils.error(res, format=self.params.get('output-format', 'text'))
self._cleanup(tmppth)
raise Exception('Remote node error.')
# Restart DNS cache
self._restart_dns_cache()
# Enable or disable 3rd party services
self._enable_services(not enable)
def _enable_services(self, enable):
"""
Enable or disable various 3rd party services that should not run when SSH tunneling is around.
"""
if self.environ.target_os.lower() == 'linux':
for service_name, service_exec in [('OSAD client-side', '/etc/init.d/osad'),
('Red Hat Network update query', '/etc/init.d/rhnsd'),]:
if self.ssh.execute('test -e %s && %s %s;chkconfig %s %s || echo "absent"' %(service_exec,
service_exec,
(enable and 'start' or 'stop'),
(enable and '-a' or '-d'),
service_exec.split('/')[-1])) != 'absent':
RuntimeUtils.info('%s %s service' % ((enable and 'Enabling' or 'Stopping'), service_name),
format=self.params.get('output-format', 'text'))
else:
RuntimeUtils.warning('Additional service operations are not supported for %s on %s.' % (self.environ.target_os, self.environ.target_arch),
format=self.params.get('output-format', 'text'))
def _restart_dns_cache(self):
"""
Restart DNS cache.
On Linux it is nscd.
"""
if self.environ.target_os.lower() == 'linux':
if self.ssh.execute("test -e /etc/init.d/nscd && echo 'exists' || echo 'absent'") == 'exists':
RuntimeUtils.info('Restarting name service cache daemon on remote node.',
format=self.params.get('output-format', 'text'))
self.ssh.execute('/etc/init.d/nscd')
else:
RuntimeUtils.warning('DNS cache operations are not supported for %s on %s.' % (self.environ.target_os, self.environ.target_arch),
format=self.params.get('output-format', 'text'))
def _cleanup(self, *fpth):
"""
Cleanup all given file paths.
"""
for fp in fpth:
if os.path.exists(fp):
try:
os.unlink(fp)
except Exception, ex:
RuntimeUtils.warning('Could not remove local temporary file "%s"' % fp,
format=self.params.get('output-format', 'text'))
RuntimeUtils.error(str(ex), format=self.params.get('output-format', 'text'))
def _do_command(self):
"""
Execute a custom command on the remote machine.
"""
if not self.ssh:
raise Exception("SSH link was not initialized.")
if not 'quiet' in self.params.keys():
RuntimeUtils.info('Executing command: "' + self.params.get('command') + '"',
format=self.params.get('output-format', 'text'))
RuntimeUtils.info('Remote response below as follows:',
format=self.params.get('output-format', 'text'))
response = self.ssh.execute(self.params.get('command'))
# Output "frame" only during verbose mode (default)
if not 'quiet' in self.params.keys() and self.params.get('output-format', 'text') == 'text':
print >> sys.stdout, "-" * 80
if self.params.get('output-format', 'text') == 'xml':
RuntimeUtils.info(response or "", format='xml')
else:
print >> sys.stdout, response
if not 'quiet' in self.params.keys() and self.params.get('output-format', 'text') == 'text':
print >> sys.stdout, "-" * 80
class RuntimeUtils:
"""
All 'orphan' functions are here. :)
"""
@classmethod
def is_root(self):
"""
Returns True if user is root.
"""
return getpass.getuser() == 'root'
@classmethod
def get_event_time(self):
"""
Format a time for an event, usually used in XML messages.
"""
return time.strftime('%Y.%m.%d %T', time.localtime())
@classmethod
def header(self):
"""
Displays header.
"""
print >> sys.stdout, "SUSE Manager Task Push. Version 0.1\n" \
+ "Copyright (c) 2013 by SUSE Linux Products GmbH\n"
@classmethod
def usage(self):
"""
Displays usage and exits.
"""
print >> sys.stderr, "Usage:\n\tsm-push <options>\n"
print >> sys.stderr, "Options:"
print >> sys.stderr, "\t--hostname=<DNS name>\t\tSpecify target hostname."
print >> sys.stderr, "\t--activation-keys=<list>\tComma separated list of activation keys.\n" \
+ "\t\t\t\t\tIf parameter specified, machine will be registered against SUSE Manager."
print >> sys.stderr, "\t--override\t\t\tIgnore conditional request of an operation and always perform it."
print >> sys.stderr, "\t--command=\"<command>\"\t\tCustom command to be executed on the target machine.\n" \
+ "\t\t\t\t\tPlease escape quote and/or double-quote inside, if required."
print >> sys.stderr, "\t--tunneling=<yes|no>\t\tEnable or disable tunneling."
print >> sys.stderr, "\t--output-format=<xml|text>\tOutput format. Default is \"text\"."
print >> sys.stderr, "\t--safe\t\t\t\tMake a backup copy of previous configuration."
print >> sys.stderr, "\t--quiet\t\t\t\tProduce no output at all except occurred errors and command result."
print >> sys.stderr, "\t--help\t\t\t\tDisplays this message.\n\n"
print >> sys.stderr, "Environment variables:"
print >> sys.stderr, "\tSSH_REMOTE_PASSWORD\t\tPassword on the remote machine to the calling user.\n"
sys.exit(1)
@classmethod
def info(self, msg, output=sys.stdout, format='text', type='INFO'):
typemap = {
'INFO' : 'success',
'WARNING' : 'warning',
'ERROR' : 'failure',
}
if format == 'xml':
print >> output, " <message type=\"%s\" time=\"%s\"><![CDATA[%s]]></message>" % (typemap[type], RuntimeUtils.get_event_time(), msg)
else:
print >> output, "%s:\t%s" % (type, (type != 'WARNING' and '\t' or '') +msg)
output.flush()
@classmethod
def warning(self, msg, output=sys.stdout, format='text'):
RuntimeUtils.info(msg, output=output, format=format, type='WARNING')
@classmethod
def error(self, msg, output=sys.stdout, format='text'):
RuntimeUtils.info(msg, output=output, format=format, type='ERROR')
@classmethod
def required_params(self):
"""
Returns True or False if required params has been passed.
"""
params = RuntimeUtils.get_params()
if 'hostname' in params.keys():
for p in ['activation-keys', 'command', 'tunneling']:
if p in params.keys():
return True
@classmethod
def get_params(self):
"""
Parse params.
"""
params = {}
for arg in sys.argv[1:]:
if arg[:2] != '--':
continue
if arg in ['--help', '--override', '--quiet', '--safe']:
params[arg[2:]] = None
elif arg.find("=") > -1:
k, v = arg.split("=", 1)
params[k[2:]] = v
return params
# Main app
if __name__ == "__main__":
params = RuntimeUtils.get_params()
if not RuntimeUtils.required_params() or 'help' in params.keys():
RuntimeUtils.header()
RuntimeUtils.usage()
else:
# How to output
if params.get('output-format') and params.get('output-format') not in ['xml', 'text']:
RuntimeUtils.header()
RuntimeUtils.error("How to speak in %sanese?\n" % params.get('output-format').title())
sys.exit(1)
if params.get('output-format', 'text') == 'xml':
print >> sys.stdout, '<?xml version="1.0" encoding="utf-8"?>\n<log>\n <meta/>\n <messages>'
try:
task_push = TaskPush(params)
task_push.prepare()
task_push.perform()
except Exception, ex:
RuntimeUtils.error(str(ex), format=params.get('output-format', 'text'))
if params.get('output-format', 'text') == 'xml':
print >> sys.stdout, " </messages>\n</log>"
| StarcoderdataPython |
3235292 | text = open('input').read().split('\n\n')
groups = []
for g in text:
g = g.replace('\n', '')
groups.append(len(set(g)))
print(sum(groups))
| StarcoderdataPython |
6607740 | <reponame>hannes-holey/hans
"""
MIT License
Copyright 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import numpy as np
import pytest
from hans.input import Input
from hans.plottools import DatasetSelector
@pytest.fixture(scope="session")
def setup(tmpdir_factory):
config_file = os.path.join("tests", "inclined-slider1D_y_ideal-gas.yaml")
tmp_dir = tmpdir_factory.mktemp("tmp")
myTestProblem = Input(config_file).getProblem()
myTestProblem.run(out_dir=tmp_dir)
file = DatasetSelector("", mode="name", fname=[str(tmp_dir.join(os.path.basename(myTestProblem.outpath)))])
fdata = file.get_centerline(dir="y")
yield fdata
def test_pressure(setup):
p_ref = np.loadtxt(os.path.join("tests", "inclined-slider1D_ideal-gas_U50_s5.6e-4.dat"), unpack=True, usecols=(2,))
for data in setup.values():
p = data["p"][1]
np.testing.assert_almost_equal(p / 1e6, p_ref / 1e6, decimal=1)
def test_density(setup):
rho_ref = np.loadtxt(os.path.join("tests", "inclined-slider1D_ideal-gas_U50_s5.6e-4.dat"), unpack=True, usecols=(1,))
for data in setup.values():
rho = data["rho"][1]
np.testing.assert_almost_equal(rho, rho_ref, decimal=1)
| StarcoderdataPython |
11267178 | <reponame>brand-fabian/varfish-server
import json
from django import template
register = template.Library()
@register.filter
def pretty_json(value):
return json.dumps(value, indent=4)
| StarcoderdataPython |
3306851 | # ********** Without the int, the second print would error because you can't add strings to numbers
age = int(input('Whats your age? '))
print(age)
print(age + 1)
| StarcoderdataPython |
5125333 | <filename>ecco2_scripts/read_ecco.py
## read ECCO2 only for one layer and one area
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset as ncread
import time as tictoc
## local functions
def closest(vec,x):
#returns the index of vector vec which contains a value that is closest to value x
a = abs(vec - x)
return np.where(a == np.min(a))[0][0]
## define paths and read file names
path = '/network/aopp/chaos/pred/cooper/data/ecco2.jpl.nasa.gov/data1/cube/cube92/lat_lon/quart_90S_90N/'
folder = 'THETA.nc/'
fnlist = open(path+folder+'fileNames.txt','r').readlines()
#remove the \n from every string
fnlist = [fn[:-1] for fn in fnlist]
#remove the fileNames.txt from that list
fnlist = np.delete(fnlist,np.where(np.array(fnlist) == 'fileNames.txt')[0][0])
## create a mask and get dimensions
dat1 = ncread(path+folder+fnlist[0])
eccomask = dat1.variables['THETA'][:].mask.squeeze()
lat = dat1.variables['LATITUDE_T'][:]
lon = dat1.variables['LONGITUDE_T'][:]
dep = dat1.variables['DEPTH_T'][:]
dat1.close()
#time in days since 1992-01-01
time = np.arange(len(fnlist))*3 + 1.5
## choose area - NORTH ATLANTIC
# area - 260 to 320 EAST, 30 to 50 NORTH
lonmin,lonmax = closest(lon,270),closest(lon,340)
latmin,latmax = closest(lat,0),closest(lat,50)
dep1 = 0 #only first level, i.e. 5m
## READ ALL NC FILES ONLY ONCE
# preallocate
theta1 = np.zeros((fnlist.shape[0],latmax-latmin+1,lonmax-lonmin+1))
tic0 = tictoc.time()
for ifn in range(len(fnlist)):
dat = ncread(path+folder+fnlist[ifn])
theta1[ifn,:,:] = dat.variables['THETA'][0,dep1,latmin:latmax+1,lonmin:lonmax+1]
dat.close()
print('reading '+str(np.round(float(ifn)/fnlist.shape[0]*100))+'%')
tic0 = tictoc.time() - tic0
np.save('python/ecco2/theta_sfc_NA.npy',theta1)
#theta1 = np.load('python/ecco2/theta_sfc_NA.npy')
| StarcoderdataPython |
9668741 | <reponame>wuffi/nwb-conversion-tools
from PySide2 import QtCore, QtGui, QtWidgets
#class CollapsibleBox(QtWidgets.QWidget):
class CollapsibleBox(QtWidgets.QGroupBox):
def __init__(self, title="", parent=None):
"""
Implementation of collapsible boxes:
https://stackoverflow.com/a/52617714/11483674
"""
super().__init__(parent)
self.toggle_button = QtWidgets.QToolButton(
text=title, checkable=True, checked=False
)
self.toggle_button.setStyleSheet("QToolButton { border: none; }")
self.toggle_button.setToolButtonStyle(
QtCore.Qt.ToolButtonTextBesideIcon
)
self.toggle_button.setArrowType(QtCore.Qt.RightArrow)
self.toggle_button.pressed.connect(self.on_pressed)
self.toggle_animation = QtCore.QParallelAnimationGroup(self)
self.content_area = QtWidgets.QScrollArea(
maximumHeight=0, minimumHeight=0
)
self.content_area.setSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed
)
self.content_area.setFrameShape(QtWidgets.QFrame.NoFrame)
lay = QtWidgets.QVBoxLayout(self)
lay.setSpacing(0)
lay.setContentsMargins(0, 0, 0, 0)
lay.addWidget(self.toggle_button)
lay.addWidget(self.content_area)
self.toggle_animation.addAnimation(
QtCore.QPropertyAnimation(self, b"minimumHeight")
)
self.toggle_animation.addAnimation(
QtCore.QPropertyAnimation(self, b"maximumHeight")
)
self.toggle_animation.addAnimation(
QtCore.QPropertyAnimation(self.content_area, b"maximumHeight")
)
@QtCore.Slot()
def on_pressed(self):
checked = self.toggle_button.isChecked()
self.toggle_button.setArrowType(
QtCore.Qt.DownArrow if not checked else QtCore.Qt.RightArrow
)
self.toggle_animation.setDirection(
QtCore.QAbstractAnimation.Forward
if not checked
else QtCore.QAbstractAnimation.Backward
)
self.toggle_animation.start()
def setContentLayout(self, layout):
lay = self.content_area.layout()
del lay
self.content_area.setLayout(layout)
collapsed_height = (
self.sizeHint().height() - self.content_area.maximumHeight()
)
content_height = layout.sizeHint().height()
for i in range(self.toggle_animation.animationCount()):
animation = self.toggle_animation.animationAt(i)
animation.setDuration(500)
animation.setStartValue(collapsed_height)
animation.setEndValue(collapsed_height + content_height)
content_animation = self.toggle_animation.animationAt(
self.toggle_animation.animationCount() - 1
)
content_animation.setDuration(500)
content_animation.setStartValue(0)
content_animation.setEndValue(content_height)
| StarcoderdataPython |
3510394 | <filename>bundle_one/tree/bbst/RedBlackTreeNode.py
from ..OrderedBinarySearchTreeNode import *
class RedBlackTreeNode(OrderedBinarySearchTreeNode):
def __init__(self, element, parent, left_child, right_child, is_red = False):
OrderedBinarySearchTreeNode.__init__(self, element, parent, left_child, right_child)
self.is_red = is_red
def isRed(self):
return self.is_red
def setRed(self):
self.is_red = True
def setBlack(self):
self.is_red = False
def setColor(self, color):
self.is_red = color
def parentIsRed(self):
parent = self.getParent()
return parent.isRed()
def redChild(self):
child1 = self.getLeftChild()
child2 = self.getRightChild()
if child1.isRed():
return child1
elif child2.isRed():
return child2
else:
return None
def hasRedChild(self):
return self.getLeftChild().isRed() or self.getRightChild().isRed()
| StarcoderdataPython |
8174460 | <filename>jina/types/arrays/mixins/getattr.py
from typing import Union, List, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from ..document import DocumentArray
class GetAttributeMixin:
"""Helpers that provide attributes getter in bulk """
def get_attributes(self, *fields: str) -> Union[List, List[List]]:
"""Return all nonempty values of the fields from all docs this array contains
:param fields: Variable length argument with the name of the fields to extract
:return: Returns a list of the values for these fields.
When `fields` has multiple values, then it returns a list of list.
"""
contents = [doc.get_attributes(*fields) for doc in self]
if len(fields) > 1:
contents = list(map(list, zip(*contents)))
return contents
def get_attributes_with_docs(
self,
*fields: str,
) -> Tuple[Union[List, List[List]], 'DocumentArray']:
"""Return all nonempty values of the fields together with their nonempty docs
:param fields: Variable length argument with the name of the fields to extract
:return: Returns a tuple. The first element is a list of the values for these fields.
When `fields` has multiple values, then it returns a list of list. The second element is the non-empty docs.
"""
contents = []
docs_pts = []
for doc in self:
contents.append(doc.get_attributes(*fields))
docs_pts.append(doc)
if len(fields) > 1:
contents = list(map(list, zip(*contents)))
from ..document import DocumentArray
return contents, DocumentArray(docs_pts)
| StarcoderdataPython |
74861 | from adapters.moes.BRT100TRV import BRT100TRV
moes_adapters = {
'BRT-100-TRV': BRT100TRV, # Moes BRT-100-TRV Radiator valve with thermostat
}
| StarcoderdataPython |
6596124 | import Algorithmia
# get your Algorithmia API Key from https://algorithmia.com/users/#credentials
client = Algorithmia.client("YOUR_API_KEY")
# one-time configuration of data.world token from https://data.world/settings/advanced
# delete this line once completed:
client.algo('datadotworld/configure/0.2.0').pipe({"auth_token":"YOUR_DATA.WORLD_API_TOKEN"})
input = {
"dataset_key": "gmoney/nba-team-annual-attendance",
"query": "SELECT home_total_attendance FROM `nba_team_annual_attendance` WHERE team='Lakers'",
"query_type": "sql",
"parameters": []
}
# load dataset
algo = client.algo("datadotworld/query")
dataset = algo.pipe(input).result["data"]
# process dataset
all_values = [d["home_total_attendance"] for d in dataset]
metrics = client.algo("TimeSeries/TimeSeriesSummary").pipe({"uniformData": all_values}).result
print(metrics) | StarcoderdataPython |
4910442 | # pylint: disable=C0111,R0903
"""Draws a widget with configurable text content.
Parameters:
* spacer.text: Widget contents (defaults to empty string)
"""
import bumblebee.input
import bumblebee.output
import bumblebee.engine
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
super(Module, self).__init__(engine, config,
bumblebee.output.Widget(full_text=self.text)
)
self._text = self.parameter("text", "")
def text(self, widget):
return self._text
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| StarcoderdataPython |
11301564 | <reponame>Jette16/spacy-course
import spacy
# 定义定制化组件
def length_component(doc):
# 获取doc的长度
doc_length = ____
print(f"This document is {doc_length} tokens long.")
# 返回这个doc
____
# 读取小规模的中文模型
nlp = spacy.load("zh_core_web_sm")
# 将组件加入到流程的最前面,打印流程组件名
____.____(____)
print(nlp.pipe_names)
# 处理一段文本
doc = ____
| StarcoderdataPython |
228617 | <filename>server/app/auth/endpoints.py
from app.database import get_db
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
from app.users.models import User
from .exceptions import WrongCredentials
from .factories import build_auth_user_service
from .oauth2 import oauth2_scheme
router = APIRouter()
def get_current_user(
db: Session = Depends(get_db),
token: str = Depends(oauth2_scheme)
) -> User:
auth_service = build_auth_user_service(db)
try:
user = auth_service.get_user_from_token(token)
if user is None:
raise WrongCredentials
except WrongCredentials:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
return user
@router.post("/token", status_code=status.HTTP_201_CREATED)
def login(form_data: OAuth2PasswordRequestForm = Depends(), db: Session = Depends(get_db)):
service = build_auth_user_service(db)
try:
token = service.authenticate(form_data.username, form_data.password)
except WrongCredentials:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
return {"access_token": token, "token_type": "bearer"}
@router.get("/logout")
def logout(current_user: User = Depends(get_current_user), db: Session = Depends(get_db)):
service = build_auth_user_service(db)
service.unauthenticate(current_user)
return {}
| StarcoderdataPython |
8114661 | <gh_stars>1-10
import os
import platform
from contextlib import contextmanager
from shutil import copyfile
import dask.dataframe as dd
import pytest
from dask.delayed import Delayed
from numpy import array, array_equal, isnan
from numpy.testing import assert_allclose, assert_equal
from pandas import Series
from bgen_reader import create_metafile, example_filepath, read_bgen
def test_bgen_samples_inside_bgen():
data = read_bgen(example_filepath("haplotypes.bgen"), verbose=False)
samples = ["sample_0", "sample_1", "sample_2", "sample_3"]
samples = Series(samples, dtype=str, name="id")
assert all(data["samples"] == samples)
def test_bgen_samples_not_present():
data = read_bgen(example_filepath("complex.23bits.no.samples.bgen"), verbose=False)
samples = ["sample_0", "sample_1", "sample_2", "sample_3"]
samples = Series(samples, dtype=str, name="id")
assert all(data["samples"] == samples)
def test_bgen_samples_specify_samples_file():
data = read_bgen(
example_filepath("complex.23bits.bgen"),
samples_filepath=example_filepath("complex.sample"),
verbose=False,
)
samples = ["sample_0", "sample_1", "sample_2", "sample_3"]
samples = Series(samples, dtype=str, name="id")
assert all(data["samples"] == samples)
@pytest.mark.skipif(platform.system() != "Darwin", reason="only reliable on macos")
def test_bgen_samples_outside_bgen_unreadable(tmp_path):
bgen_filepath = example_filepath("complex.23bits.bgen")
samples_filepath = tmp_path / "complex.sample"
copyfile(example_filepath("complex.sample"), samples_filepath)
with noread_permission(samples_filepath):
with pytest.raises(PermissionError):
read_bgen(bgen_filepath, samples_filepath=samples_filepath, verbose=False)
@pytest.mark.skipif(platform.system() != "Darwin", reason="only reliable on macos")
def test_bgen_file_not_readable(tmp_path):
filepath = tmp_path / "haplotypes.bgen"
copyfile(example_filepath("haplotypes.bgen"), filepath)
with noread_permission(filepath):
with pytest.raises(PermissionError):
read_bgen(filepath, verbose=False)
def test_bgen_file_dont_exist():
with pytest.raises(FileNotFoundError):
read_bgen("idontexist.bgen", verbose=False)
def test_metafile_not_provided():
read_bgen(example_filepath("haplotypes.bgen"), verbose=False)
def test_metafile_provided_not_supported_anymore():
with pytest.raises(RuntimeError):
read_bgen(
example_filepath("haplotypes.bgen"),
metafile_filepath=example_filepath("haplotypes.bgen.metadata.valid"),
verbose=False,
)
def test_metafile_wrong_filepath():
filepath = example_filepath("haplotypes.bgen")
fp = "/omg/invalid/haplotypes.bgen.metafile_path"
with pytest.raises(FileNotFoundError):
with pytest.warns(UserWarning):
read_bgen(filepath, metafile_filepath=fp, verbose=False)
@pytest.mark.skipif(platform.system() != "Darwin", reason="only reliable on macos")
def test_metafile_not_provided_no_permission_to_create(tmp_path):
src = example_filepath("haplotypes.bgen")
dst = tmp_path / "haplotypes.bgen"
copyfile(src, dst)
path = os.path.dirname(dst)
with nowrite_permission(path):
with pytest.warns(UserWarning):
read_bgen(dst, verbose=False)
@contextmanager
def nowrite_permission(path):
perm = os.stat(path).st_mode
os.chmod(path, 0o555)
try:
yield
finally:
os.chmod(path, perm)
@contextmanager
def noread_permission(path):
perm = os.stat(path).st_mode
os.chmod(path, 0o333)
try:
yield
finally:
os.chmod(path, perm)
def test_bgen_reader_lazy_types():
bgen = read_bgen(example_filepath("haplotypes.bgen"), verbose=False)
assert isinstance(bgen["genotype"][0], Delayed)
assert isinstance(bgen["variants"], dd.DataFrame)
def test_bgen_reader_phased_genotype():
filepath = example_filepath("haplotypes.bgen")
bgen = read_bgen(filepath, verbose=False)
variants = bgen["variants"]
samples = bgen["samples"]
v = variants.loc[0].compute()
assert_equal(v["chrom"].values[0], "1")
assert_equal(v["id"].values[0], "SNP1")
assert_equal(v["nalleles"].values[0], 2)
assert_equal(v["allele_ids"].values[0], "A,G")
assert_equal(v["pos"].values[0], 1)
assert_equal(v["rsid"].values[0], "RS1")
v = variants.loc[2].compute()
assert_equal(v["chrom"].values[0], "1")
assert_equal(v["id"].values[0], "SNP3")
assert_equal(v["nalleles"].values[0], 2)
assert_equal(v["allele_ids"].values[0], "A,G")
assert_equal(v["pos"].values[0], 3)
assert_equal(v["rsid"].values[0], "RS3")
assert_equal(samples.loc[0], "sample_0")
assert_equal(samples.loc[2], "sample_2")
n = samples.shape[0]
assert_equal(samples.loc[n - 1], "sample_3")
g = bgen["genotype"][0].compute()
assert_allclose(g["probs"][0], [1.0, 0.0, 1.0, 0.0])
k = len(variants)
n = len(samples)
g = bgen["genotype"][k - 1].compute()
assert_allclose(g["probs"][n - 1], [1.0, 0.0, 0.0, 1.0])
def test_bgen_reader_variants_info():
filepath = example_filepath("example.32bits.bgen")
bgen = read_bgen(filepath, verbose=False)
variants = bgen["variants"]
samples = bgen["samples"]
assert "genotype" in bgen
variants = variants.compute()
assert_equal(variants.loc[0, "chrom"], "01")
assert_equal(variants.loc[0, "id"], "SNPID_2")
assert_equal(variants.loc[0, "nalleles"], 2)
assert_equal(variants.loc[0, "allele_ids"], "A,G")
assert_equal(variants.loc[0, "pos"], 2000)
assert_equal(variants.loc[0, "rsid"], "RSID_2")
assert_equal(variants.loc[7, "chrom"], "01")
assert_equal(variants.loc[7, "id"], "SNPID_9")
assert_equal(variants.loc[7, "nalleles"], 2)
assert_equal(variants.loc[7, "allele_ids"], "A,G")
assert_equal(variants.loc[7, "pos"], 9000)
assert_equal(variants.loc[7, "rsid"], "RSID_9")
n = variants.shape[0]
assert_equal(variants.loc[n - 1, "chrom"], "01")
assert_equal(variants.loc[n - 1, "id"], "SNPID_200")
assert_equal(variants.loc[n - 1, "nalleles"], 2)
assert_equal(variants.loc[n - 1, "allele_ids"], "A,G")
assert_equal(variants.loc[n - 1, "pos"], 100001)
assert_equal(variants.loc[n - 1, "rsid"], "RSID_200")
assert_equal(samples.loc[0], "sample_001")
assert_equal(samples.loc[7], "sample_008")
n = samples.shape[0]
assert_equal(samples.loc[n - 1], "sample_500")
g = bgen["genotype"][0].compute()["probs"]
assert all(isnan(g[0, :]))
g = bgen["genotype"][0].compute()["probs"]
a = [0.027802362811705648, 0.00863673794284387, 0.9635608992454505]
assert_allclose(g[1, :], a)
b = [
0.97970582847010945215516,
0.01947019668749305418287,
0.00082397484239749366197,
]
g = bgen["genotype"][1].compute()["probs"]
assert_allclose(g[2, :], b)
def test_bgen_reader_without_metadata():
filepath = example_filepath("example.32bits.bgen")
bgen = read_bgen(filepath, verbose=False)
variants = bgen["variants"].compute()
samples = bgen["samples"]
assert "genotype" in bgen
assert_equal(variants.loc[7, "allele_ids"], "A,G")
n = samples.shape[0]
assert_equal(samples.loc[n - 1], "sample_500")
def test_bgen_reader_with_wrong_metadata_file():
filepath = example_filepath("example.32bits.bgen")
filepath.touch()
metafile_filepath = example_filepath("wrong.metadata")
metafile_filepath.touch() # make sure that the metafile has a later timestamp (otherwise, it might be re-created)
with pytest.raises(RuntimeError):
read_bgen(filepath, verbose=False, metafile_filepath=metafile_filepath)
def test_bgen_reader_with_nonexistent_metadata_file():
filepath = example_filepath("example.32bits.bgen")
folder = os.path.dirname(filepath)
metafile_filepath = os.path.join(folder, "nonexistent.metadata")
with pytest.raises(FileNotFoundError):
with pytest.warns(UserWarning):
read_bgen(filepath, verbose=False, metafile_filepath=metafile_filepath)
def test_bgen_reader_file_notfound():
with pytest.raises(FileNotFoundError):
read_bgen("/1/2/3/example.32bits.bgen", verbose=False)
def test_create_metadata_file(tmp_path):
filepath = example_filepath("example.32bits.bgen")
metafile_filepath = tmp_path / (filepath.name + ".metadata")
try:
create_metafile(filepath, metafile_filepath, verbose=False)
assert os.path.exists(metafile_filepath)
finally:
if os.path.exists(metafile_filepath):
os.remove(metafile_filepath)
def test_bgen_reader_complex():
filepath = example_filepath("complex.23bits.bgen")
bgen = read_bgen(filepath, verbose=False)
variants = bgen["variants"].compute()
samples = bgen["samples"]
assert "genotype" in bgen
assert_equal(variants.loc[0, "chrom"], "01")
assert_equal(variants.loc[0, "id"], "")
assert_equal(variants.loc[0, "nalleles"], 2)
assert_equal(variants.loc[0, "allele_ids"], "A,G")
assert_equal(variants.loc[0, "pos"], 1)
assert_equal(variants.loc[0, "rsid"], "V1")
assert_equal(variants.loc[7, "chrom"], "01")
assert_equal(variants.loc[7, "id"], "")
assert_equal(variants.loc[7, "nalleles"], 7)
assert_equal(variants.loc[7, "allele_ids"], "A,G,GT,GTT,GTTT,GTTTT,GTTTTT")
assert_equal(variants.loc[7, "pos"], 8)
assert_equal(variants.loc[7, "rsid"], "M8")
n = variants.shape[0]
assert_equal(variants.loc[n - 1, "chrom"], "01")
assert_equal(variants.loc[n - 1, "id"], "")
assert_equal(variants.loc[n - 1, "nalleles"], 2)
assert_equal(variants.loc[n - 1, "allele_ids"], "A,G")
assert_equal(variants.loc[n - 1, "pos"], 10)
assert_equal(variants.loc[n - 1, "rsid"], "M10")
assert_equal(samples.loc[0], "sample_0")
assert_equal(samples.loc[3], "sample_3")
g = bgen["genotype"][0].compute()["probs"][0]
assert_allclose(g[:2], [1, 0])
assert isnan(g[2])
g = bgen["genotype"][0].compute()["probs"][1]
assert_allclose(g[:3], [1, 0, 0])
g = bgen["genotype"][-1].compute()["probs"][-1]
assert_allclose(g[:5], [0, 0, 0, 1, 0])
ploidy = bgen["genotype"][0].compute()["ploidy"]
assert_allclose(ploidy, [1, 2, 2, 2])
ploidy = bgen["genotype"][-1].compute()["ploidy"]
assert_allclose(ploidy, [4, 4, 4, 4])
nvariants = len(variants)
phased = [bgen["genotype"][i].compute()["phased"] for i in range(nvariants)]
phased = array(phased)
assert_equal(phased.dtype.name, "bool")
ideal = array([False, True, True, False, True, True, True, True, False, False])
assert array_equal(phased, ideal)
def test_bgen_reader_complex_sample_file():
bgen = read_bgen(
example_filepath("complex.23bits.bgen"),
samples_filepath=example_filepath("complex.sample"),
verbose=False,
)
variants = bgen["variants"].compute()
samples = bgen["samples"]
assert "genotype" in bgen
assert_equal(variants.loc[0, "chrom"], "01")
assert_equal(variants.loc[0, "id"], "")
assert_equal(variants.loc[0, "nalleles"], 2)
assert_equal(variants.loc[0, "allele_ids"], "A,G")
assert_equal(variants.loc[0, "pos"], 1)
assert_equal(variants.loc[0, "rsid"], "V1")
assert_equal(variants.loc[7, "chrom"], "01")
assert_equal(variants.loc[7, "id"], "")
assert_equal(variants.loc[7, "nalleles"], 7)
assert_equal(variants.loc[7, "allele_ids"], "A,G,GT,GTT,GTTT,GTTTT,GTTTTT")
assert_equal(variants.loc[7, "pos"], 8)
assert_equal(variants.loc[7, "rsid"], "M8")
n = variants.shape[0]
assert_equal(variants.loc[n - 1, "chrom"], "01")
assert_equal(variants.loc[n - 1, "id"], "")
assert_equal(variants.loc[n - 1, "nalleles"], 2)
assert_equal(variants.loc[n - 1, "allele_ids"], "A,G")
assert_equal(variants.loc[n - 1, "pos"], 10)
assert_equal(variants.loc[n - 1, "rsid"], "M10")
assert_equal(samples.loc[0], "sample_0")
assert_equal(samples.loc[3], "sample_3")
ploidy = bgen["genotype"][2].compute()["ploidy"]
missing = bgen["genotype"][2].compute()["missing"]
nvariants = len(variants)
phased = [bgen["genotype"][i].compute()["phased"] for i in range(nvariants)]
assert_allclose(ploidy, [1, 2, 2, 2])
assert_allclose(missing, [0, 0, 0, 0])
assert_allclose(phased, [0, 1, 1, 0, 1, 1, 1, 1, 0, 0])
if __name__ == "__main__":
pytest.main([__file__])
| StarcoderdataPython |
6488121 | # impy - a post-processor for HYADES implosion simulations
# Copyright (c) Massachusetts Institute of Technology / <NAME>
# Distributed under the MIT License
import tkinter as tk
import tkinter.ttk as ttk
import platform
class Option_Prompt(tk.Toplevel):
"""Implement a dialog window to prompt a user to select one of several options. The chosen result
is accessible via the member variable `result`::
o = Option_Prompt(...)
o.result
:param parent: The parent UI element
:param title: (optional) A title to display on this window [default=None]
:param text: (optional) Text to display next to the prompt [default=None]
:param options: (optional) The list of options that the user can choose from. First element must be ''
:param width: (optional) the width in characters for the drop-down menu [default=10]
:author: <NAME>
:date: 2014-01-25
"""
__author__ = '<NAME>'
__date__ = '2014-01-25'
__version__ = '0.1.0'
def __init__(self, parent, title=None, text=None, options=[], width=10):
"""Initialize the dialog window"""
super(Option_Prompt, self).__init__(parent)
self.transient(parent)
self.parent = parent
self.lift()
self.grab_set()
self.result = None
self.__create_widgets__(title, text, options, width)
self.protocol("WM_DELETE_WINDOW", self.__cancel__)
# a couple key bindings:
self.bind('<Return>', self.__ok__)
self.bind('<Escape>', self.__cancel__)
# Set window background
if platform.system() == 'Darwin':
self.configure(background='#E8E9E8')
else:
self.configure(background='#F1F1F1')
self.wait_window(self)
def __create_widgets__(self, title, text, options, width):
"""Create the UI"""
if title is not None:
self.title(title)
if text is not None:
label1 = ttk.Label(self, text=text)
label1.pack()
if options is not None:
self.var = tk.StringVar('')
menu = ttk.OptionMenu(self, self.var, *options)
menu.configure(width=width)
menu.pack()
menu.focus_force()
self.__make_buttons__()
def __make_buttons__(self):
"""Add the OK and cancel buttons"""
box = ttk.Frame(self)
w = ttk.Button(box, text="OK", width=10, command=self.__ok__)
w.pack(side=tk.LEFT, padx=5, pady=5)
w = ttk.Button(box, text="Cancel", width=10, command=self.__cancel__)
w.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.__ok__)
self.bind("<Escape>", self.__cancel__)
box.pack()
def __ok__(self, event=None):
"""Handle activation of the OK button."""
if not self.__validate__():
print('not valid')
return
self.__apply__()
self.withdraw()
self.update_idletasks()
self.__cancel__()
def __cancel__(self, event=None):
"""Handle cancel button"""
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
def __validate__(self):
"""Validate the selection, returns true if it is OK"""
return self.var.get() != ''
def __apply__(self):
"""Set the result"""
self.result = self.var.get() | StarcoderdataPython |
4900493 | <filename>jupyter_book/utils.py
"""Utility functions for Jupyter Book."""
import string
import argparse
import os
import os.path as op
import yaml
from . import __version__
##############################################################################
# CLI utilities
def print_color(msg, style):
endc = "\033[0m"
bcolors = dict(
blue="\033[94m",
green="\033[92m",
orange="\033[93m",
red="\033[91m",
bold="\033[1m",
underline="\033[4m",
)
print(bcolors[style] + msg + endc)
def print_message_box(msg):
border = "================================================================================"
print_color("\n\n" + border + "\n\n", "green")
print(msg)
print_color("\n\n" + border + "\n\n", "green")
def _error(msg):
msg = "\n\n\033[91m==========\033[0m\n{}\n\033[91m==========\033[0m\n".format(msg)
return ValueError(msg)
def str2bool(msg):
if msg.lower() in ("yes", "true", "t", "y", "1"):
return True
elif msg.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected. Got: {}".format(msg))
##############################################################################
# Book conversion formatting
ALLOWED_CHARACTERS = string.ascii_letters + "-_/." + string.digits
def _split_yaml(lines):
yaml0 = None
for ii, iline in enumerate(lines):
iline = iline.strip()
if yaml0 is None:
if iline == "---":
yaml0 = ii
elif iline:
break
elif iline == "---":
return lines[yaml0 + 1: ii], lines[ii + 1:]
return [], lines
def _check_url_page(url_page, content_folder_name):
"""Check that the page URL matches certain conditions."""
if not all(ii in ALLOWED_CHARACTERS for ii in url_page):
raise ValueError("Found unsupported character in filename: {}".format(url_page))
if "." in os.path.splitext(url_page)[-1]:
raise _error(
"A toc.yml entry links to a file directly. You should strip the file suffix.\n"
"Please change {} to {}".format(url_page, os.path.splitext(url_page)[0])
)
if any(
url_page.startswith(ii)
for ii in [content_folder_name, os.sep + content_folder_name]
):
raise ValueError(
"It looks like you have a page URL that starts with your content folder's name."
"page URLs should be *relative* to the content folder. Here is the page URL: {}".format(
url_page
)
)
def _prepare_toc(toc):
"""Prepare the TOC for processing."""
# Un-nest the TOC so it's a flat list
new_toc = []
for chapter in toc:
sections = chapter.get('sections', [])
new_toc.append(chapter)
for section in sections:
subsections = section.get('subsections', [])
new_toc.append(section)
new_toc.extend(subsections)
# Omit items that don't have URLs (like dividers) or have an external link
return [
item for item in new_toc
if 'url' in item and not item.get('external', False)
]
def _prepare_url(url):
"""Prep the formatting for a url."""
# Strip suffixes and prefixes of the URL
if not url.startswith("/"):
url = "/" + url
# Standardize the quotes character
url = url.replace('"', "'")
# Make sure it ends in "HTML"
if not url.endswith(".html"):
url = op.splitext(url)[0] + ".html"
return url
def _clean_markdown_cells(ntbk):
"""Clean up cell text of an nbformat NotebookNode."""
# Remove '#' from the end of markdown headers
for cell in ntbk.cells:
if cell.cell_type == "markdown":
cell_lines = cell.source.split("\n")
for ii, line in enumerate(cell_lines):
if line.startswith("#"):
cell_lines[ii] = line.rstrip("#").rstrip()
cell.source = "\n".join(cell_lines)
return ntbk
def _file_newer_than(path1, path2):
"""Check whether file at path1 is newer than path2."""
return os.stat(path1).st_mtime > os.stat(path2).st_mtime
def _check_book_versions(path_book):
"""Check whether the version of a book matches the version of the
CLI that's building it."""
with open(op.join(path_book, "_config.yml"), 'r') as ff:
config_version = yaml.safe_load(ff.read()).get(
"jupyter_book_version"
)
if config_version is None:
raise _error(
"Couldn't find the version for your Jupyter Book.\n"
f"Try upgrading it with `jupyter-book upgrade {path_book}"
)
if config_version != __version__:
raise _error(
f"The version of the book you are modifying doesn't match the\n"
"version of the command-line tool that you're using. Please run\n"
"\n"
f" jupyter-book upgrade {path_book} \n"
"\n"
"to upgrade your book to the CLI version.\n"
"\n"
f"This book's version: {config_version}\n"
f"Your CLI's version: {__version__}\n"
"\n"
"See above for the error message."
)
return True
| StarcoderdataPython |
3365154 | from typing import Union
from flask import Response
from backend.api.handlers.helpers.make_error_response import make_error_response
def handle_404(_e: Union[int, Exception]) -> Response:
return make_error_response(404, {"Error": "Invalid endpoint"})
| StarcoderdataPython |
1699735 | <gh_stars>0
'''Entry point'''
from .app import create_app
APP = create_app() | StarcoderdataPython |
3594943 | <gh_stars>1-10
from sklearn.externals import joblib
from dataset import PennFudanDataset
from processing import process
from classifier import extractor
from filteropt import create_pipeline
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
dataset = PennFudanDataset('dataset/PennFudanPed')
pipeline = create_pipeline(threshold=20)
process(dataset, pipeline)
inputs, targets = extractor.extract(dataset, w=11, N=20000)
model = joblib.load('trained/quick_2.pkl')
predicted = model.predict(inputs)
cm = confusion_matrix(targets, predicted)
print(cm)
plt.matshow(cm)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
| StarcoderdataPython |
161229 | <reponame>5voltsgc/brush_wear<filename>brush_wear_gui.py
# Import required modules
import tkinter as tk
from tkinter import ttk
import serial
# Global Varibles
Number_samples = 3
red_first_time = True
blue_first_time = True
green_first_time = True
green_brush = ['[0] item Number',
'[1] Fiber Count',
'[2] Fiber Radius',
'[3] Start Length',
'[4] Start weight',
'[5] Current weight',
'[6] Diff from Previous weight',
'[7] Current Length'
]
red_brush = ['[0] item Number',
'[1] Fiber Count',
'[2] Fiber Radius',
'[3] Start Length',
'[4] Start weight',
'[5] Current weight',
'[6] Diff from Previous weight',
'[7] Current Length'
]
blue_brush = ['[0] item Number',
'[1] Fiber Count',
'[2] Fiber Radius',
'[3] Start Length',
'[4] Start weight',
'[5] Current weight',
'[6] Diff from Previous weight',
'[7] Current Length'
]
comm_port = "COM29" # this is the comm port the scale is connected to
# Serial Port - Change port to match serial port on computer device manager
# serialPort = serial.Serial(port=comm_port, baudrate=9600,
# bytesize=8, timeout=2, stopbits=serial.STOPBITS_ONE)
# Main Window
window = tk.Tk()
window.resizable(width=False, height=False)
window.title("Bursh Wear Testing - Lathe")
window.geometry('1100x300')
# Seperator objects
separator1 = ttk.Separator(window, orient='vertical')
separator1.place(relx=0.33, rely=0, relwidth=0.2, relheight=1)
separator2 = ttk.Separator(window, orient='vertical')
separator2.place(relx=0.66, rely=0, relwidth=0.2, relheight=1)
def Weight_read():
serialString = "" # Used to hold data coming over UART
try:
serialString = serialPort.readline()
serialString = serialString.decode('Ascii').strip('+').strip()
serialString = serialString[:-1]
return(float(serialString))
except ValueError:
# just return 0 Zero if cant be converted to float, and try again
return(0)
def sample_weight():
average_weight = []
for x in range(Number_samples):
read = Weight_read()
average_weight.append(read)
current_weight = Weight_read()
max_weight = max(average_weight)
min_weight = min(average_weight)
loop_count = 0
while max_weight != min_weight:
average_weight.pop(0)
current_weight = Weight_read()
average_weight.append(current_weight)
max_weight = max(average_weight)
min_weight = min(average_weight)
loop_count += 1
if loop_count > 25:
print("check scale! can't get a stable reading")
return(current_weight)
def find_num_fibers(fiber_diameter):
# TODO
num_fibers = fiber_diameter * 500
return(num_fibers)
# Label objects
Blue_lbl = ttk.Label(window, text="Blue Brushes",
background="blue", font=("Helvetica", 16), width=30)
Blue_lbl.grid(column=0, row=4, rowspan=2, columnspan=5)
Red_lbl = ttk.Label(window, text="Red Brushes",
background="red", font=("Helvetica", 16), width=30)
Red_lbl.grid(column=6, row=4, rowspan=2, columnspan=5)
Green_lbl = ttk.Label(window, text="Green Brushes",
background="green", font=("Helvetica", 16), width=30)
Green_lbl.grid(column=12, row=4, rowspan=2, columnspan=5)
# Brush tuple Column 0=Item#, 1=Lenth, 2=Fiber Diameter
Brushes = (
['Not Measured', 0, 0],
['110733-01', 3.00, .010],
['110733-02', 3.00, .012],
['110733-03', 3.00, .015],
['110733-04', 3.19, .010],
['110733-05', 3.19, .012],
['110733-06', 3.19, .015],
['110733-07', 3.25, .010],
['110733-08', 3.25, .012],
['110733-09', 3.25, .015],
['110733-10', 3.34, .010],
['110733-11', 3.34, .012],
['110733-12', 3.34, .015],
['110733-13', 3.47, .010],
['110733-14', 3.47, .012],
['110733-15', 3.47, .015],
['110733-16', 3.53, .012],
['110733-17', 3.28, .012],
['110733-18', 3.65, .015],
['110733-19', 2.32, .008],
['110733-20', 2.32, .010],
['110733-21', 2.32, .012],
['110733-22', 2.50, .010],
['110733-23', 2.50, .012],
['110733-24', 2.50, .015],
['110733-25', 3.88, .012],
['110733-26', 3.65, .010],
['110733-27', 3.65, .012],
['110733-28', 3.65, .019],
['110733-29', 4.28, .010])
# Blue Combobox creation
Blue_combo = ttk.Combobox(window)
Blue_combo['values'] = Brushes
Blue_combo.current(1) # set the selected item
Blue_combo.grid(column=2, row=15)
# Red Combobox creation
Red_combo = ttk.Combobox(window)
Red_combo['values'] = Brushes
Red_combo.current(2) # set the selected item
Red_combo.grid(column=7, row=15)
# Green Combobox creation
Green_combo = ttk.Combobox(window)
Green_combo['values'] = Brushes
Green_combo.current(3) # set the selected item
Green_combo.grid(column=13, row=15)
# Selected Blue Brush
def Blue_clicked():
Blue_Brush = Blue_combo.get()
print(Blue_Brush)
print(Blue_start.get())
BlueButton = tk.Button(window, text='Record', command=Blue_clicked)
BlueButton.grid(column=2, row=50)
# Selected Red Brush
def Red_clicked():
Red_Brush = Red_combo.get() # sting
print(Red_Brush)
RedButton = tk.Button(window, text='Record', command=Red_clicked)
RedButton.grid(column=7, row=50)
# #############################################################################
# GREEN BUTTON
# #############################################################################
# Selected Green Brush
global green_brush
def Green_clicked():
"""
This function will be repeated for the other two buttons.
Collect information: Scale weight, Brush info, previous weight, and do the
calculations. Format this data for the tkinter GUI, and the output file
"""
global green_first_time
global green_brush
# Change button to be sunken, the command can not be run again
GreenButton.config(text='Recorded', relief='sunken', command='')
# Get the current weight from the scale
current_weight = sample_weight()
# Find out if this is the first record
if green_first_time:
green_first_time = False
# read the selected brush then make it grayed out
brush_info = Green_combo.get()
Green_combo.config(relief='sunken') # disabled=True
# TODO regex to parse the brush info into green_brush
green_brush[0] = brush_info[:8]
green_brush[2] = float(brush_info[-5:])/2
green_brush[4] = current_weight
# if green_first_time:
# green_first_time = False
# green_fiber_diamter = float(brush_info[-5:])
# find_num_fibers(green_fiber_diamter)
# G_start.set(current_weight)
# else:
# G_Current.set(G_Current.get())
# TODO add command if desired to change
# Green = sample_weight()
# G_Previous = Green
# G_Previous = find_num_fibers()
# print(G_Previous)
# print(Green)
GreenButton = tk.Button(window, text='Record', command=Green_clicked)
GreenButton.grid(column=13, row=50)
# Blue labels and Text Boxes
Blue_Start_lbl = ttk.Label(window,
text="Start Weight(g)",
font=("Helvetica", 12))
Blue_Start_lbl.grid(column=1, row=44,)
B_start = tk.StringVar()
Blue_start = ttk.Entry(window, width=15, textvariable=B_start)
Blue_start.grid(column=2, row=44)
Blue_Previous_lbl = ttk.Label(window,
text="Previous Weight(g)",
font=("Helvetica", 12))
Blue_Previous_lbl.grid(column=1, row=45,)
B_Previous = tk.StringVar()
Blue_Previous = ttk.Entry(window, width=15, textvariable=B_Previous)
Blue_Previous.grid(column=2, row=45)
Blue_Current_lbl = ttk.Label(window,
text="Current Weight(g)",
font=("Helvetica", 12))
Blue_Current_lbl.grid(column=1, row=46,)
B_Current = tk.StringVar()
Blue_Current = ttk.Entry(window, width=15, textvariable=B_Current)
Blue_Current.grid(column=2, row=46)
Blue_Diff_lbl = ttk.Label(window,
text="Difference Weight (g)",
font=("Helvetica", 12))
Blue_Diff_lbl.grid(column=1, row=47,)
B_diff = tk.StringVar()
Blue_diff = ttk.Entry(window, width=15, textvariable=B_diff)
Blue_diff.grid(column=2, row=47)
Blue_wear_lbl = ttk.Label(window,
text="Wear (mm)",
font=("Helvetica", 12))
Blue_wear_lbl.grid(column=1, row=48)
B_wear = tk.StringVar()
Blue_wear = ttk.Entry(window, width=15, textvariable=B_wear)
Blue_wear.grid(column=2, row=48)
Blue_total_wear_lbl = ttk.Label(window,
text="Total Wear (mm)",
font=("Helvetica", 12))
Blue_total_wear_lbl.grid(column=1, row=49,)
B_total_wear = tk.StringVar()
Blue_total_wear = ttk.Entry(window, width=15, textvariable=B_total_wear)
Blue_total_wear.grid(column=2, row=49)
# Red labels and Text Boxes
Red_Start_lbl = ttk.Label(window,
text="Start Weight(g)",
font=("Helvetica", 12))
Red_Start_lbl.grid(column=6, row=44,)
R_start = tk.StringVar()
Red_start = ttk.Entry(window, width=15, textvariable=R_start)
Red_start.grid(column=7, row=44)
Red_Previous_lbl = ttk.Label(window,
text="Previous Weight(g)",
font=("Helvetica", 12))
Red_Previous_lbl.grid(column=6, row=45,)
R_Previous = tk.StringVar()
Red_Previous = ttk.Entry(window, width=15, textvariable=R_Previous)
Red_Previous.grid(column=7, row=45)
Red_Current_lbl = ttk.Label(window,
text="Current Weight(g)",
font=("Helvetica", 12))
Red_Current_lbl.grid(column=6, row=46,)
R_Current = tk.StringVar()
Red_Current = ttk.Entry(window, width=15, textvariable=R_Current)
Red_Current.grid(column=7, row=46)
Red_Diff_lbl = ttk.Label(window,
text="Difference Weight (g)",
font=("Helvetica", 12))
Red_Diff_lbl.grid(column=6, row=47,)
R_diff = tk.StringVar()
Red_diff = ttk.Entry(window, width=15, textvariable=R_diff)
Red_diff.grid(column=7, row=47)
Red_wear_lbl = ttk.Label(window,
text="Wear (mm)",
font=("Helvetica", 12))
Red_wear_lbl.grid(column=6, row=48)
R_wear = tk.StringVar()
Red_wear = ttk.Entry(window, width=15, textvariable=R_wear)
Red_wear.grid(column=7, row=48)
Red_total_wear_lbl = ttk.Label(window,
text="Total Wear (mm)",
font=("Helvetica", 12))
Red_total_wear_lbl.grid(column=6, row=49,)
R_total_wear = tk.StringVar()
Red_total_wear = ttk.Entry(window, width=15, textvariable=R_total_wear)
Red_total_wear.grid(column=7, row=49)
# Green labels and Text Boxes
Green_Start_lbl = ttk.Label(window,
text="Start Weight(g)",
font=("Helvetica", 12))
Green_Start_lbl.grid(column=12, row=44,)
G_start = tk.StringVar()
Green_start = ttk.Entry(window, width=15, textvariable=G_start)
Green_start.grid(column=13, row=44)
Green_Previous_lbl = ttk.Label(window,
text="Previous Weight(g)",
font=("Helvetica", 12))
Green_Previous_lbl.grid(column=12, row=45,)
G_Previous = tk.StringVar()
Green_Previous = ttk.Entry(window, width=15, textvariable=G_Previous)
Green_Previous.grid(column=13, row=45)
Green_Current_lbl = ttk.Label(window,
text="Current Weight(g)",
font=("Helvetica", 12))
Green_Current_lbl.grid(column=12, row=46,)
G_Current = tk.StringVar()
Green_Current = ttk.Entry(window, width=15, textvariable=G_Current)
Green_Current.grid(column=13, row=46)
Green_Diff_lbl = ttk.Label(window,
text="Difference Weight (g)",
font=("Helvetica", 12))
Green_Diff_lbl.grid(column=12, row=47,)
G_diff = tk.StringVar()
Green_diff = ttk.Entry(window, width=15, textvariable=G_diff)
Green_diff.grid(column=13, row=47)
Green_wear_lbl = ttk.Label(window,
text="Wear (mm)",
font=("Helvetica", 12))
Green_wear_lbl.grid(column=12, row=48)
G_wear = tk.StringVar()
Green_wear = ttk.Entry(window, width=15, textvariable=G_wear)
Green_wear.grid(column=13, row=48)
Green_total_wear_lbl = ttk.Label(window,
text="Total Wear (mm)",
font=("Helvetica", 12))
Green_total_wear_lbl.grid(column=12, row=49,)
G_total_wear = tk.StringVar()
Green_total_wear = ttk.Entry(window, width=15, textvariable=G_total_wear)
Green_total_wear.grid(column=13, row=49)
window.mainloop()
| StarcoderdataPython |
1967112 | <reponame>hongxin001/robustdg
import sys
import numpy as np
import argparse
import copy
import random
import json
import torch
from torch.autograd import grad
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.autograd import Variable
import torch.utils.data as data_utils
from .base_eval import BaseEval
from utils.match_function import get_matched_pairs, perfect_match_score
class MatchEval(BaseEval):
def __init__(self, args, train_dataset, val_dataset, test_dataset, base_res_dir, run, cuda):
super().__init__(args, train_dataset, val_dataset, test_dataset, base_res_dir, run, cuda)
def get_metric_eval(self):
if self.args.match_func_data_case=='train':
dataset= self.train_dataset['data_loader']
total_domains= self.train_dataset['total_domains']
domain_list= self.train_dataset['domain_list']
base_domain_size= self.train_dataset['base_domain_size']
domain_size_list= self.train_dataset['domain_size_list']
elif self.args.match_func_data_case== 'val':
dataset= self.val_dataset['data_loader']
total_domains= self.val_dataset['total_domains']
domain_list= self.val_dataset['domain_list']
base_domain_size= self.val_dataset['base_domain_size']
domain_size_list= self.val_dataset['domain_size_list']
elif self.args.match_func_data_case== 'test':
dataset= self.test_dataset['data_loader']
total_domains= self.test_dataset['total_domains']
domain_list= self.test_dataset['domain_list']
base_domain_size= self.test_dataset['base_domain_size']
domain_size_list= self.test_dataset['domain_size_list']
inferred_match=1
# Self Augmentation Match Function evaluation will always follow perfect matches
if self.args.match_func_aug_case:
perfect_match= 1
else:
perfect_match= self.args.perfect_match
data_match_tensor, label_match_tensor, indices_matched, perfect_match_rank= get_matched_pairs( self.args, self.cuda, dataset, base_domain_size, total_domains, domain_size_list, self.phi, self.args.match_case, perfect_match, inferred_match )
score= perfect_match_score(indices_matched)
perfect_match_rank= np.array(perfect_match_rank)
self.metric_score['Perfect Match Score']= score
self.metric_score['TopK Perfect Match Score']= 100*np.sum( perfect_match_rank < self.args.top_k )/perfect_match_rank.shape[0]
self.metric_score['Perfect Match Rank']= np.mean(perfect_match_rank)
# Perfect Match Prediction Discrepancy
# perm = torch.randperm(data_match_tensor.size(0))
# data_match_tensor_split= torch.split(data_match_tensor[perm], self.args.batch_size, dim=0)
# label_match_tensor_split= torch.split(label_match_tensor[perm], self.args.batch_size, dim=0)
# total_batches= len(data_match_tensor_split)
# penalty_ws= 0.0
# for batch_idx in range(total_batches):
# curr_batch_size= data_match_tensor_split[batch_idx].shape[0]
# data_match= data_match_tensor_split[batch_idx].to(self.cuda)
# data_match= data_match.view( data_match.shape[0]*data_match.shape[1], data_match.shape[2], data_match.shape[3], data_match.shape[4] )
# feat_match= self.phi( data_match )
# label_match= label_match_tensor_split[batch_idx].to(self.cuda)
# label_match= label_match.view( label_match.shape[0]*label_match.shape[1] )
# # Creating tensor of shape ( domain size, total domains, feat size )
# if len(feat_match.shape) == 4:
# feat_match= feat_match.view( curr_batch_size, len(total_domains), feat_match.shape[1]*feat_match.shape[2]*feat_match.shape[3] )
# else:
# feat_match= feat_match.view( curr_batch_size, len(total_domains), feat_match.shape[1] )
# label_match= label_match.view( curr_batch_size, len(total_domains) )
# # print(feat_match.shape)
# data_match= data_match.view( curr_batch_size, len(total_domains), data_match.shape[1], data_match.shape[2], data_match.shape[3] )
# #Positive Match Loss
# pos_match_counter=0
# for d_i in range(feat_match.shape[1]):
# # if d_i != base_domain_idx:
# # continue
# for d_j in range(feat_match.shape[1]):
# if d_j > d_i:
# if self.args.pos_metric == 'l2':
# wasserstein_loss+= torch.sum( torch.sum( (feat_match[:, d_i, :] - feat_match[:, d_j, :])**2, dim=1 ) )
# elif self.args.pos_metric == 'l1':
# wasserstein_loss+= torch.sum( torch.sum( torch.abs(feat_match[:, d_i, :] - feat_match[:, d_j, :]), dim=1 ) )
# elif self.args.pos_metric == 'cos':
# wasserstein_loss+= torch.sum( cosine_similarity( feat_match[:, d_i, :], feat_match[:, d_j, :] ) )
# pos_match_counter += feat_match.shape[0]
# wasserstein_loss = wasserstein_loss / pos_match_counter
# penalty_ws+= float(wasserstein_loss)
# self.metric_score['Perfect Match Distance']= penalty_ws
print('Perfect Match Score: ', self.metric_score['Perfect Match Score'] )
print('TopK Perfect Match Score: ', self.metric_score['TopK Perfect Match Score'] )
print('Perfect Match Rank: ', self.metric_score['Perfect Match Rank'] )
# print('Perfect Match Distance: ', self.metric_score['Perfect Match Distance'])
return | StarcoderdataPython |
5025144 | import abc
from typing import Optional
class ConfigType(abc.ABC):
name: str
path: Optional[str] = "config/phase0.yaml"
@classmethod
def has_config(cls) -> bool:
"""
Return ``True`` if this ``ConfigType`` has configuration that should be loaded.
"""
return cls.path is not None
class Mainnet(ConfigType):
name = "mainnet"
class Minimal(ConfigType):
name = "minimal"
class General(ConfigType):
"""
``General`` covers the set of tests that function independent of a particular config.
"""
name = "general"
path = None
| StarcoderdataPython |
1747333 | from django.contrib import admin
from .models import Website, DataPoint
# Register your models here.
admin.site.register(Website)
admin.site.register(DataPoint)
| StarcoderdataPython |
3545599 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# tensor_models.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
KG Sparse embedding
"""
import os
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as functional
import torch.nn.init as INIT
import torch.multiprocessing as mp
from torch.multiprocessing import Queue
from _thread import start_new_thread
import traceback
from functools import wraps
from .. import *
logsigmoid = functional.logsigmoid
def get_dev(gpu):
return th.device('cpu') if gpu < 0 else th.device('cuda:' + str(gpu))
def get_device(args):
return th.device('cpu') if args.gpu[0] < 0 else th.device('cuda:' + str(args.gpu[0]))
none = lambda x : x
norm = lambda x, p: x.norm(p=p)**p
get_scalar = lambda x: x.detach().item()
reshape = lambda arr, x, y: arr.view(x, y)
cuda = lambda arr, gpu: arr.cuda(gpu)
def l2_dist(x, y, pw=False):
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return -th.norm(x-y, p=2, dim=-1)
def l1_dist(x, y, pw=False):
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return -th.norm(x-y, p=1, dim=-1)
def dot_dist(x, y, pw=False):
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return th.sum(x * y, dim=-1)
def cosine_dist(x, y, pw=False):
score = dot_dist(x, y, pw)
x = x.norm(p=2, dim=-1)
y = y.norm(p=2, dim=-1)
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return score / (x * y)
def extended_jaccard_dist(x, y, pw=False):
score = dot_dist(x, y, pw)
x = x.norm(p=2, dim=-1)**2
y = y.norm(p=2, dim=-1)**2
if pw is False:
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return score / (x + y - score)
def floor_divide(input, other):
return th.floor_divide(input, other)
def thread_wrapped_func(func):
"""Wrapped func for torch.multiprocessing.Process.
With this wrapper we can use OMP threads in subprocesses
otherwise, OMP_NUM_THREADS=1 is mandatory.
How to use:
@thread_wrapped_func
def func_to_wrap(args ...):
"""
@wraps(func)
def decorated_function(*args, **kwargs):
queue = Queue()
def _queue_result():
exception, trace, res = None, None, None
try:
res = func(*args, **kwargs)
except Exception as e:
exception = e
trace = traceback.format_exc()
queue.put((res, exception, trace))
start_new_thread(_queue_result, ())
result, exception, trace = queue.get()
if exception is None:
return result
else:
assert isinstance(exception, Exception)
raise exception.__class__(trace)
return decorated_function
@thread_wrapped_func
def async_update(args, emb, queue):
"""Asynchronous embedding update for entity embeddings.
How it works:
1. trainer process push entity embedding update requests into the queue.
2. async_update process pull requests from the queue, calculate
the gradient state and gradient and write it into entity embeddings.
Parameters
----------
args :
Global confis.
emb : ExternalEmbedding
The entity embeddings.
queue:
The request queue.
"""
th.set_num_threads(args.num_thread)
while True:
(grad_indices, grad_values, gpu_id) = queue.get()
clr = emb.args.lr
if grad_indices is None:
return
with th.no_grad():
grad_sum = (grad_values * grad_values).mean(1)
device = emb.state_sum.device
if device != grad_indices.device:
grad_indices = grad_indices.to(device)
if device != grad_sum.device:
grad_sum = grad_sum.to(device)
emb.state_sum.index_add_(0, grad_indices, grad_sum)
std = emb.state_sum[grad_indices] # _sparse_mask
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * grad_values / std_values)
if tmp.device != device:
tmp = tmp.to(device)
emb.emb.index_add_(0, grad_indices, tmp)
class InferEmbedding:
def __init__(self, device):
self.device = device
def load(self, path, name):
"""Load embeddings.
Parameters
----------
path : str
Directory to load the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name+'.npy')
self.emb = th.Tensor(np.load(file_name))
def load_emb(self, emb_array):
"""Load embeddings from numpy array.
Parameters
----------
emb_array : numpy.array or torch.tensor
Embedding array in numpy array or torch.tensor
"""
if isinstance(emb_array, np.ndarray):
self.emb = th.Tensor(emb_array)
else:
self.emb = emb_array
def __call__(self, idx):
return self.emb[idx].to(self.device)
class ExternalEmbedding:
"""Sparse Embedding for Knowledge Graph
It is used to store both entity embeddings and relation embeddings.
Parameters
----------
args :
Global configs.
num : int
Number of embeddings.
dim : int
Embedding dimention size.
device : th.device
Device to store the embedding.
"""
def __init__(self, args, num, dim, device):
self.gpu = args.gpu
self.args = args
self.num = num
self.trace = []
self.emb = th.empty(num, dim, dtype=th.float32, device=device)
self.state_sum = self.emb.new().resize_(self.emb.size(0)).zero_()
self.state_step = 0
self.has_cross_rel = False
# queue used by asynchronous update
self.async_q = None
# asynchronous update process
self.async_p = None
def init(self, emb_init):
"""Initializing the embeddings.
Parameters
----------
emb_init : float
The intial embedding range should be [-emb_init, emb_init].
"""
INIT.uniform_(self.emb, -emb_init, emb_init)
INIT.zeros_(self.state_sum)
def setup_cross_rels(self, cross_rels, global_emb):
cpu_bitmap = th.zeros((self.num,), dtype=th.bool)
for i, rel in enumerate(cross_rels):
cpu_bitmap[rel] = 1
self.cpu_bitmap = cpu_bitmap
self.has_cross_rel = True
self.global_emb = global_emb
def get_noncross_idx(self, idx):
cpu_mask = self.cpu_bitmap[idx]
gpu_mask = ~cpu_mask
return idx[gpu_mask]
def share_memory(self):
"""Use torch.tensor.share_memory_() to allow cross process tensor access
"""
self.emb.share_memory_()
self.state_sum.share_memory_()
def __call__(self, idx, gpu_id=-1, trace=True):
""" Return sliced tensor.
Parameters
----------
idx : th.tensor
Slicing index
gpu_id : int
Which gpu to put sliced data in.
trace : bool
If True, trace the computation. This is required in training.
If False, do not trace the computation.
Default: True
"""
if self.has_cross_rel:
cpu_idx = idx.cpu()
cpu_mask = self.cpu_bitmap[cpu_idx]
cpu_idx = cpu_idx[cpu_mask]
cpu_idx = th.unique(cpu_idx)
if cpu_idx.shape[0] != 0:
cpu_emb = self.global_emb.emb[cpu_idx]
self.emb[cpu_idx] = cpu_emb.cuda(gpu_id)
s = self.emb[idx]
if gpu_id >= 0:
s = s.cuda(gpu_id)
# During the training, we need to trace the computation.
# In this case, we need to record the computation path and compute the gradients.
if trace:
data = s.clone().detach().requires_grad_(True)
self.trace.append((idx, data))
else:
data = s
return data
def update(self, gpu_id=-1):
""" Update embeddings in a sparse manner
Sparse embeddings are updated in mini batches. we maintains gradient states for
each embedding so they can be updated separately.
Parameters
----------
gpu_id : int
Which gpu to accelerate the calculation. if -1 is provided, cpu is used.
"""
self.state_step += 1
with th.no_grad():
for idx, data in self.trace:
grad = data.grad.data
clr = self.args.lr
#clr = self.args.lr / (1 + (self.state_step - 1) * group['lr_decay'])
# the update is non-linear so indices must be unique
grad_indices = idx
grad_values = grad
if self.async_q is not None:
grad_indices.share_memory_()
grad_values.share_memory_()
self.async_q.put((grad_indices, grad_values, gpu_id))
else:
grad_sum = (grad_values * grad_values).mean(1)
device = self.state_sum.device
if device != grad_indices.device:
grad_indices = grad_indices.to(device)
if device != grad_sum.device:
grad_sum = grad_sum.to(device)
if self.has_cross_rel:
cpu_mask = self.cpu_bitmap[grad_indices]
cpu_idx = grad_indices[cpu_mask]
if cpu_idx.shape[0] > 0:
cpu_grad = grad_values[cpu_mask]
cpu_sum = grad_sum[cpu_mask].cpu()
cpu_idx = cpu_idx.cpu()
self.global_emb.state_sum.index_add_(0, cpu_idx, cpu_sum)
std = self.global_emb.state_sum[cpu_idx]
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * cpu_grad / std_values)
tmp = tmp.cpu()
self.global_emb.emb.index_add_(0, cpu_idx, tmp)
self.state_sum.index_add_(0, grad_indices, grad_sum)
std = self.state_sum[grad_indices] # _sparse_mask
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * grad_values / std_values)
if tmp.device != device:
tmp = tmp.to(device)
# TODO(zhengda) the overhead is here.
self.emb.index_add_(0, grad_indices, tmp)
self.trace = []
def create_async_update(self):
"""Set up the async update subprocess.
"""
self.async_q = Queue(1)
self.async_p = mp.Process(target=async_update, args=(self.args, self, self.async_q))
self.async_p.start()
def finish_async_update(self):
"""Notify the async update subprocess to quit.
"""
self.async_q.put((None, None, None))
self.async_p.join()
def curr_emb(self):
"""Return embeddings in trace.
"""
data = [data for _, data in self.trace]
return th.cat(data, 0)
def save(self, path, name):
"""Save embeddings.
Parameters
----------
path : str
Directory to save the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name+'.npy')
np.save(file_name, self.emb.cpu().detach().numpy())
def load(self, path, name):
"""Load embeddings.
Parameters
----------
path : str
Directory to load the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name+'.npy')
self.emb = th.Tensor(np.load(file_name))
| StarcoderdataPython |
3564617 | """Write a function that takes in two numbers and recursively multiplies them together
"""
def recursive_multiplication(n,m):
# base case
if n == 1 and m == 1:
return 1
elif n == 1:
return m
elif m == 1:
return n
elif n == 0 or m == 0:
return 0
# recursive case
else:
return recursive_multiplication(n-1,m) + m
# print(recursive_multiplication(12,3))
"""Write a function that takes in a base and an exp and recursively computes base**exp. You are not allowed to
use the ** operator!
"""
def exponent(base,exp):
# base cases
if exp == 0:
return 1
# recursive cases:
else:
return base * exponent(base,exp-1)
# print(exponent(2,4))
"""3. Write a function using recursion to print numbers from n to 0.
"""
def print_nums(n):
# base case
if n == 0:
print(n)
return 0
elif n > 0 :
print(n)
return print_nums(n-1)
elif n < 0:
print(n)
return print_nums(n+1)
# print_nums(-9)
"""4. Write a function using recursion to print numbers from 0 to n (you just need to change one line in the program
of problem 1).
"""
def countup(n,m):
if m == n:
print(m)
return m
else:
print(m)
return countup(n,m+1)
# countup(5,0)
"""5. Write a function using recursion that takes in a string and returns a reversed copy of the string. The only
string operation you are allowed to use is string concatenation.
"""
"""As others have pointed out, this is not the way you would usually do this in Python. An iterative solution is going to be faster, and using slicing to do it is going to be faster still.
Additionally, Python imposes a limit on stack size, and there's no tail call optimization, so a recursive solution would be limited to reversing strings of only about a thousand characters. You can increase Python's stack size, but there would still be a fixed limit, while other solutions can always handle a string of any length.
"""
# def reversed_string(str):
# reversed = ""
# for char in str:
# reversed = char + reversed
# return reversed
# str= "shalom"
# print(reversed_string(str))
def reversed_string(str):
if len(str) == 0:
return ""
else:
# return str[-1] + reversed_string(str[:-1])
return reversed_string(str[1:]) + str[0]
# print(reversed_string("shalom"))
"""6. Write a function using recursion to check if a number n is prime (you have to check whether n is divisible by
any number below n).
"""
def is_prime(n,m=2):
if n <=2 :
print("{} is prime number".format(n))
return True
if m * m >= n:
print("{} is prime number".format(n))
return True
if n%m == 0:
print("{} is not prime number".format(n))
return False
return is_prime(n,m+1)
# print(is_prime(15))
"""7. Write a recursive function that takes in one argument n and computes Fn, the nth value of the Fibonacci
sequence. Recall that the Fibonacci sequence is defined by the relation
"""
def Fibonacci(n):
if n==0:
return 0
if n==1:
return 1
else:
return Fibonacci(n-1) + Fibonacci(n-2)
print(Fibonacci(6))
| StarcoderdataPython |
245616 | from dataclasses import dataclass
from sqlalchemy import func
from pycroft import config
from pycroft.model.finance import Split
from pycroft.model.user import PreMember, User, Membership
@dataclass
class OverviewStats:
member_requests: int
users_in_db: int
members: int
not_paid_all: int
not_paid_members: int
def overview_stats():
return OverviewStats(
member_requests=PreMember.q.count(),
users_in_db=User.q.count(),
members=User.q
.join(Membership)
.filter(Membership.group == config.member_group,
Membership.active_during.contains(func.current_timestamp()))
.count(),
not_paid_all=User.q
.join(User.account)
.join(Split)
.group_by(User.id)
.having(func.sum(Split.amount) > 0)
.count(),
not_paid_members=User.q
.join(Membership)
.filter(Membership.group == config.member_group,
Membership.active_during.contains(func.current_timestamp()))
.join(User.account)
.join(Split)
.group_by(User.id)
.having(func.sum(Split.amount) > 0)
.count(),
)
| StarcoderdataPython |
5102894 | #!/usr/bin/env python3
from sklearn.metrics.cluster import normalized_mutual_info_score
import numpy as np
from subprocess import call
from np_loader import *
from path_tools import *
from ism import *
from orthogonal_optimization import *
from DimGrowth import *
import itertools
#from acc import *
import socket
import torch
import pickle
import random
import string
import os
class test_base():
def __init__(self, db):
if db['run_only_validation']:
db['data_file_name'] = './datasets/' + db['data_name'] + '_validation.csv'
db['label_file_name'] = './datasets/' + db['data_name'] + '_label_validation.csv'
else:
db['data_file_name'] = './datasets/' + db['data_name'] + '.csv'
db['label_file_name'] = './datasets/' + db['data_name'] + '_label.csv'
db['validation_data_file_name'] = './datasets/' + db['data_name'] + '_validation.csv'
db['validation_label_file_name'] = './datasets/' + db['data_name'] + '_label_validation.csv'
db['best_path'] = '../version9/pre_trained_weights/Best_pk/'
db['learning_rate'] = 0.001
db['center_and_scale'] = True
db['kernel_type'] = 'rbf' #rbf, linear, rbf_slow
db['poly_power'] = 3
db['poly_constant'] = 1
self.db = db
tmp_path = './tmp/' + db['data_name'] + '/'
db_output_path = tmp_path + 'db_files/'
batch_output_path = tmp_path + 'batch_outputs/'
ensure_path_exists('./tmp')
ensure_path_exists('./results')
ensure_path_exists(tmp_path)
ensure_path_exists(db_output_path)
ensure_path_exists(batch_output_path)
def remove_tmp_files(self):
db = self.db
file_in_tmp = os.listdir('./tmp/' + db['data_name'] + '/db_files/')
for i in file_in_tmp:
if i.find(db['data_name']) == 0:
os.remove('./tmp/' + db['data_name'] + '/db_files/' + i)
def output_db_to_text(self):
db = self.db
db['db_file'] = './tmp/' + db['data_name'] + '/db_files/' + db['data_name'] + '_' + str(int(10000*np.random.rand())) + '.txt'
fin = open(db['db_file'], 'w')
for i,j in db.items():
if type(j) == str:
fin.write('db["' + i + '"]="' + str(j) + '"\n')
elif type(j) == bool:
fin.write('db["' + i + '"]=' + str(j) + '\n')
elif type(j) == type:
fin.write('db["' + i + '"]=' + j.__name__ + '\n')
elif type(j) == float:
fin.write('db["' + i + '"]=' + str(j) + '\n')
elif type(j) == int:
fin.write('db["' + i + '"]=' + str(j) + '\n')
elif j is None:
fin.write('db["' + i + '"]=None\n')
else:
raise ValueError('unrecognized type : ' + str(type(j)) + ' found.')
fin.close()
return db['db_file']
def export_bash_file(self, i, test_name, export_db):
run_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(2))
cmd = ''
cmd += "#!/bin/bash\n"
cmd += "\n#set a job name "
cmd += "\n#SBATCH --job-name=%d_%s_%s"%(i, test_name, run_name)
cmd += "\n################# "
cmd += "\n#a file for job output, you can check job progress"
cmd += "\n#SBATCH --output=./tmp/%s/batch_outputs/%d_%s_%s.out"%(test_name, i, test_name, run_name)
cmd += "\n#################"
cmd += "\n# a file for errors from the job"
cmd += "\n#SBATCH --error=./tmp/%s/batch_outputs/%d_%s_%s.err"%(test_name, i, test_name, run_name)
cmd += "\n#################"
cmd += "\n#time you think you need; default is one day"
cmd += "\n#in minutes in this case, hh:mm:ss"
cmd += "\n#SBATCH --time=24:00:00"
cmd += "\n#################"
cmd += "\n#number of tasks you are requesting"
cmd += "\n#SBATCH -N 1"
cmd += "\n#SBATCH --exclusive"
cmd += "\n#################"
cmd += "\n#partition to use"
cmd += "\n#SBATCH --partition=general"
cmd += "\n#SBATCH --mem=120Gb"
cmd += "\n#################"
cmd += "\n#number of nodes to distribute n tasks across"
cmd += "\n#################"
cmd += "\n"
cmd += "\npython ./src/hsic_algorithms.py " + export_db
fin = open('execute_combined.bash','w')
fin.write(cmd)
fin.close()
def batch_run(self):
count = 0
db = self.db
output_list = self.parameter_ranges()
every_combination = list(itertools.product(*output_list))
for count, single_instance in enumerate(every_combination):
[W_optimize_technique] = single_instance
db['W_optimize_technique'] = W_optimize_technique
fname = self.output_db_to_text()
self.export_bash_file(count, db['data_name'], fname)
if socket.gethostname().find('login') != -1:
call(["sbatch", "execute_combined.bash"])
else:
os.system("bash ./execute_combined.bash")
def batch_file_names(self):
count = 0
db = self.db
output_list = self.file_name_ranges()
every_combination = list(itertools.product(*output_list))
for count, single_instance in enumerate(every_combination):
[data_name, W_optimize_technique] = single_instance
db['data_name'] = data_name
db['W_optimize_technique'] = W_optimize_technique
tmp_path = './tmp/' + db['data_name'] + '/'
db_output_path = tmp_path + 'db_files/'
batch_output_path = tmp_path + 'batch_outputs/'
ensure_path_exists('./tmp')
ensure_path_exists(tmp_path)
ensure_path_exists(db_output_path)
ensure_path_exists(batch_output_path)
fname = self.output_db_to_text()
self.export_bash_file(count, db['data_name'], fname)
if socket.gethostname().find('login') != -1:
call(["sbatch", "execute_combined.bash"])
else:
os.system("bash ./execute_combined.bash")
def basic_run(self):
self.remove_tmp_files()
fname = self.output_db_to_text()
call(["./src/hsic_algorithms.py", fname])
| StarcoderdataPython |
8072156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Module containing a class which allows access to disparate Python neural
network implementations and architectures, united through a common interface.
This interface is modelled on the scikit-learn interface.
'''
import warnings
import math
import timeit
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
from sklearn.neural_network import MLPClassifier as SKL_MLP
from sknn.mlp import Classifier as sknn_MLPClassifier, Layer
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.optimizers import SGD, Adam, Adadelta
from keras.regularizers import l2
from keras.callbacks import LearningRateScheduler
class UnifiedMLP(object):
""" Unified interface to compare neural network modules and hyperparameters.
The module is initialised with arguments that associate it with a dataset.
Then, neural networks from multiple packages with specified hyperparameters
can be trained to this dataset and the results compared.
After initialisation, a dict self.benchmark in the results dict format is
accessible. The stratified random approach of randomly assigning outcomes
with correct class weights is used. :ref:`results-dict`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Vectors of features for each sample, where there are n_samples vectors
each with n_features elements.
Y : array-like, shape (n_samples, n_classes)
Vectors of labelled outcomes for each sample. UnifiedMLP expects a
boolean or binary array specifying membership to each of n_classes
classes.
split : tuple of 3 entries, summing to 1.0 or less.
The split of data between training, validation and testing. Training
data is passed to fit() methods, validation data is used to track
fitting progress and can be used for early stopping, and test data is
used for the final evaluation of model quality.
Examples
--------
>>> nn = UnifiedMLP(X, Ys).set_hypers(
... learning_rate=0.001,
... batch_size=24,
... module='keras',
... dropout=0.5,
... max_epoch=50,
... )
>>> results, model = nn.run_test()
"""
_default_hypers = {
'module': 'keras',
'frac_training': 1.0,
##################
# Architecture #
##################
'hidden_units': 15,
'activation': 'relu',
####################
# Regularisation #
####################
'alpha': 0.0000, # L2 penalty. 0.0 = turned off.
'dropout': 0.0, # Dropout between hidden and output layers.
##############
# Learning #
##############
'learning_rate': 0.001, # Adam and SGD only
'algorithm': 'sgd',
'batch_size': 16,
# SGD only
'momentum': 0.9,
'nesterov': False,
# Adam only (Scikit-learn and Keras only)
'beta_1': 0.9,
'beta_2': 0.999,
'epsilon': 1e-8,
###############################################
# Iteration/epoch settings - can be changed #
###############################################
# Epochs to run for if no convergence.
'max_epoch': 100,
# Max decline in loss between epochs to consider converged. (Ratio)
'epoch_tol': 0.001,
# Number of consecutive epochs considered converged before stopping.
'n_stable': 3,
# For SGD, decay in learning rate between epochs. 0 = no decay.
'learning_decay': 0.000,
# Terminate before the loss stops improving if the accuracy score
# on the validation stops improving. Uses epoch_tol and n_stable.
'early_stopping': True,
#######################
# Consistent output # (for developing and debugging)
#######################
'random_state': 1,
}
# For settings which take a categorical value, provided is a dict of
# which settings should work in which of the Python modules.
# This dict exists only for reference. It is not used for computaton.
supported_settings = {
'activation': {
'relu': ['sklearn', 'sknn', 'keras'],
'linear': ['sknn', 'keras'],
'logistic': ['sklearn', 'sknn', 'keras'],
'tanh': ['sklearn', 'sknn', 'keras']
},
'algorithm': {
'sgd': ['sklearn', 'sknn', 'keras'],
'adam': ['sklearn', 'keras'],
'adadelta': ['sknn', 'keras']
}
}
def __init__(self, X, Y, split=(0.70, 0.15, 0.15)):
# Normalise inputs and split data
self.X_train, self.X_valid, self.X_test, self.Y_train, self.Y_valid, self.Y_test = \
self._prepare_data(X, Y, split)
self.n_features = X.shape[1]
self.n_classes = Y.shape[1]
# Help Scikit-learn support multi-label classification probabilities
self.n_labels_sklearn = self.Y_train.sum(axis=1).mean()
self._benchmark() # Stratified random
# Apply the default settings
self._nn_hypers = {}
self.set_hypers(**UnifiedMLP._default_hypers)
def _benchmark(self):
classifier = _StratifiedRandomClassifier().fit(self.X_train, self.Y_train)
Y_test_pred = classifier.predict(self.X_test, self.Y_test)
accuracy, F1, accuracy_all, F1_all = getScores(self.Y_test, Y_test_pred)
self.benchmark = {
'F1': F1,
'accuracy': accuracy,
'F1_all': F1_all,
'accuracy_all': accuracy_all,
'time_all': 0,
'n_epochs_all': 0,
'loss_all' : 0
}
@staticmethod
def _prepare_data(X, Y, split):
X = np.array(X).astype('float64')
Y = np.array(Y).astype(bool)
try:
assert(X.shape[0] == Y.shape[0])
except AssertionError:
raise AssertionError("Number of samples differs between X and Y.")
split_randint = 0
leftover = 1.0 - sum(split)
if leftover > 0.0:
warnings.warn("Suggested data split doesn't use full dataset.")
if leftover < 0.0:
raise ValueError("Specified data split sums to over 1.0.")
# Assign test and validation data before training data.
# This ensures training data size can be varied and other
# datasets will stay identical.
X, X_test, Y, Y_test = train_test_split(
X, Y, test_size=split[2], random_state=split_randint)
X, X_valid, Y, Y_valid = train_test_split(
X, Y, test_size=split[1] / (split[1] + split[0] + leftover),
random_state=split_randint)
try:
_, X_train, _, Y_train = train_test_split(
X, Y, test_size=split[0] / (split[0] + leftover),
random_state=split_randint)
except ValueError:
# scikit-learn doesn't like test_size=1.0
X_train, Y_train = X, Y
# Train the normaliser on training data only
normaliser = preprocessing.StandardScaler().fit(X_train)
X_train = normaliser.transform(X_train)
X_valid = normaliser.transform(X_valid)
X_test = normaliser.transform(X_test)
return X_train, X_valid, X_test, Y_train, Y_valid, Y_test
def _validate_settings(self):
''' Some basic compatibility checks between settings. Doesn't check
module-specific validity, e.g. whether sklearn supports an algorithm.
'''
for hyper_name in self._nn_hypers.keys():
try:
assert (hyper_name in self._default_hypers.keys())
except AssertionError:
raise AssertionError("The setting name \"" + hyper_name +
" \" is unknown")
if self._nn_hypers['algorithm'] != 'sgd' and self._nn_hypers['learning_decay'] != 0.0:
raise KeyError(
"The learning_decay option is for the sgd algorithm only.")
def get_hypers(self):
''' Return neural network hyperparameters.
Returns
-------
nn_settings : dict
'''
return dict(self._nn_hypers)
def set_hypers(self, **new_settings):
''' Set the hyperparameters with which neural networks are built.
Takes keyword arguments setting neural network hyperparameters.
:doc:`Hyperparameters reference guide<../hyperparameter_reference>`.
Returns
-------
self
'''
# Modules often choke on numpy types
for key in new_settings.keys():
if type(new_settings[key]) == np.float64 or\
type(new_settings[key]) == np.float32:
new_settings[key] = float(new_settings[key])
elif type(new_settings[key]) == np.string_:
new_settings[key] = str(new_settings[key])
elif type(new_settings[key]) == np.bool_:
new_settings[key] = bool(new_settings[key])
self._nn_hypers.update(new_settings)
self._validate_settings()
return self
def run_test(self):
""" Build, train and test a neural network architecture.
Guarentee: If settings incompatible with a specific module are passed,
``KeyError`` is raised.
Returns
-------
results : dict
Results of the test. :ref:`results-dict`.
hypers : dict
Complete dict of hyperparameters used.
model : object
The MLP object resulting from running the test, with a class
dependent on the module which was used.
"""
module = self.get_hypers()['module']
modules = {
'sklearn': self._sklearn,
'sknn': self._sknn,
'keras': self._keras
}
training, performance, model = modules[module]()
hypers = self.get_hypers()
results = {
'training': {
'loss_all': training[0],
'accuracy': training[1],
'F1': training[2],
'time_all': training[3],
'accuracy_all': training[4],
'F1_all': training[5]
},
'performance': {
'accuracy': performance[0],
'F1': performance[1],
'time_all': performance[2],
'accuracy_all': performance[3],
'F1_all': performance[4],
'n_epochs_all': performance[5]
}
}
return results, hypers, model
def _keras(self):
np.random.seed(self._nn_hypers['random_state'])
#########################
# Settings conversion #
#########################
activation_dict = {'relu': 'relu', 'linear': 'linear',
'logistic': 'sigmoid', 'tanh': 'tanh'}
try:
activation = activation_dict[self._nn_hypers['activation']]
except KeyError:
err = "Activation function \"" + self._nn_hypers['activation']
err += "\" unsupported."
raise KeyError(err)
# Callback for SGD learning rate decline
ii_epoch = [0]
def learning_schedule(epoch):
init = self._nn_hypers['learning_rate']
factor = (1 - self._nn_hypers['learning_decay'])**ii_epoch[0]
lr = factor * init
return lr
###############
# Create NN #
###############
keras_nn = Sequential()
keras_nn.add(Dense(
self._nn_hypers['hidden_units'],
input_dim=self.n_features,
init='lecun_uniform',
W_regularizer=l2(self._nn_hypers['alpha']),
activation=activation)
)
keras_nn.add(Dropout(self._nn_hypers['dropout']))
keras_nn.add(Dense(
self.n_classes,
init='lecun_uniform',
W_regularizer=l2(self._nn_hypers['alpha']),
activation='sigmoid')
)
if self._nn_hypers['algorithm'] == 'sgd':
optimiser = SGD(
lr=self._nn_hypers['learning_rate'],
decay=0.0,
momentum=self._nn_hypers['momentum'],
nesterov=self._nn_hypers['nesterov'],
)
callbacks = [LearningRateScheduler(learning_schedule)]
elif self._nn_hypers['algorithm'] == 'adam':
optimiser = Adam(
lr=self._nn_hypers['learning_rate'],
beta_1=self._nn_hypers['beta_1'],
beta_2=self._nn_hypers['beta_2'],
epsilon=self._nn_hypers['epsilon']
)
callbacks = []
elif self._nn_hypers['algorithm'] == 'adadelta':
optimiser = Adadelta() # Recommended to use the default values
callbacks = []
else:
err = "Learning algorithm \"" + self._nn_hypers['algorithm']
err += "\" not implemented."
raise KeyError(err)
keras_nn.compile(loss='binary_crossentropy', optimizer=optimiser)
##############
# Train NN #
##############
loss_curve = []
acc_curve, acc_all_curve = [], []
F1_curve, F1_all_curve = [], []
time_curve = []
n_loss = [0]
n_valid = [0]
stop_reason = 0
X_train, Y_train = self._trim_data(self._nn_hypers['frac_training'],
self.X_train, self.Y_train)
for i_epoch in range(self._nn_hypers['max_epoch']):
ii_epoch[0] = i_epoch
start_time = timeit.default_timer()
history = keras_nn.fit(
X_train, Y_train,
nb_epoch=10,
batch_size=self._nn_hypers['batch_size'],
verbose=0,
callbacks=callbacks
)
end_time = timeit.default_timer()
time_curve.append(end_time - start_time)
####################
# Track progress #
####################
loss_curve.append(history.history['loss'][1])
valid_proba = keras_nn.predict_proba(self.X_valid, verbose=0)
valid_predict = self._predict_from_proba(valid_proba)
valid_accuracy, valid_F1, valid_accuracy_all, valid_F1_all =\
getScores(self.Y_valid, valid_predict)
acc_curve.append(valid_accuracy)
F1_curve.append(valid_F1)
acc_all_curve.append(valid_accuracy_all)
F1_all_curve.append(valid_F1_all)
#############################
# Check stopping criteria #
#############################
if self._converged(loss_curve, n_loss):
stop_reason = 1
break
if self._nn_hypers['early_stopping'] and\
self._converged(acc_all_curve, n_valid, invert=True):
stop_reason = 2
break
test_proba = keras_nn.predict_proba(self.X_test, verbose=0)
test_predict = self._predict_from_proba(test_proba)
test_acc, test_F1, test_acc_all, test_F1_all =\
getScores(self.Y_test, test_predict)
n_epochs = i_epoch + 1
training = (loss_curve, acc_curve, F1_curve,
time_curve, acc_all_curve, F1_all_curve)
performance = (test_acc, test_F1, np.mean(
time_curve), test_acc_all, test_F1_all, n_epochs)
return training, performance, keras_nn
@staticmethod
def _predict_from_proba(proba, thres=0.5):
return (proba > thres)
def _sklearn(self):
#####################################################
# Strip settings that are unrecognised by sklearn #
#####################################################
unsupported_keys = ['dropout']
bad_settings = [self._nn_hypers[key] > 0 for key in unsupported_keys]
if any(bad_settings):
err = "Unsupported settings: "
for i, key in enumerate(unsupported_keys):
if bad_settings[i]:
err += key + ", "
raise KeyError(err[:-2])
valid_keys = [
'activation', 'alpha', 'batch_size', 'random_state', 'shuffle',
'verbose', 'momentum', 'beta_1', 'beta_2', 'epsilon', 'algorithm'
]
sklearn_settings = {key: val for key, val in self._nn_hypers.items()
if key in valid_keys}
sklearn_settings.update({
'n_labels': self.n_labels_sklearn,
'hidden_layer_sizes': (self._nn_hypers['hidden_units']),
'learning_rate_init': self._nn_hypers['learning_rate'],
'nesterovs_momentum': self._nn_hypers['nesterov'],
'learning_rate': 'constant',
'max_iter': 1,
'warm_start': True
})
###############
# Create NN #
###############
sklearn_nn = _SKL_Multilabel_MLP(**sklearn_settings)
##############
# Train NN #
##############
loss_curve = []
acc_curve, acc_all_curve = [], []
F1_curve, F1_all_curve = [], []
time_curve = []
n_loss = [0]
n_valid = [0]
stop_reason = 0
learning_rate = sklearn_settings['learning_rate_init']
X_train, Y_train = self._trim_data(self._nn_hypers['frac_training'],
self.X_train, self.Y_train)
for i_epoch in range(self._nn_hypers['max_epoch']):
try:
start_time = timeit.default_timer()
sklearn_nn.fit(X_train, Y_train)
end_time = timeit.default_timer()
time_curve.append(end_time - start_time)
except ValueError as e:
raise KeyError(e.message)
loss_curve = sklearn_nn.loss_curve_ # sklearn itself keeps a list across fits
learning_rate *= (1.0 - self._nn_hypers['learning_decay'])
sklearn_nn.set_params(learning_rate_init=learning_rate)
valid_proba = sklearn_nn.predict_proba(self.X_valid)
valid_predict = self._predict_from_proba(valid_proba)
valid_accuracy, valid_F1, valid_accuracy_all, valid_F1_all =\
getScores(self.Y_valid, valid_predict)
acc_curve.append(valid_accuracy)
F1_curve.append(valid_F1)
acc_all_curve.append(valid_accuracy_all)
F1_all_curve.append(valid_F1_all)
#############################
# Check stopping criteria #
#############################
if self._converged(loss_curve, n_loss):
stop_reason = 1
break
if self._nn_hypers['early_stopping'] and\
self._converged(acc_all_curve, n_valid, invert=True):
stop_reason = 2
break
test_proba = sklearn_nn.predict_proba(self.X_test)
test_predict = self._predict_from_proba(test_proba)
test_acc, test_F1, test_acc_all, test_F1_all =\
getScores(self.Y_test, test_predict)
n_epochs = i_epoch + 1
loss_curve = [loss / self.n_classes for loss in loss_curve]
training = (loss_curve, acc_curve, F1_curve,
time_curve, acc_all_curve, F1_all_curve)
performance = (test_acc, test_F1, np.mean(
time_curve), test_acc_all, test_F1_all, n_epochs)
return training, performance, sklearn_nn
def _sknn(self):
#########################
# Settings conversion #
#########################
activation_dict = {
'relu': 'Rectifier', 'linear': 'Linear', 'logistic': 'Sigmoid', 'tanh': 'Tanh'}
try:
activation = activation_dict[self._nn_hypers['activation']]
except KeyError:
err = "Activation function \"" + self._nn_hypers['activation']
err += "\" not supported."
raise KeyError(err)
if self._nn_hypers['algorithm'] == 'sgd':
learning_rate = self._nn_hypers['learning_rate']
if self._nn_hypers['momentum'] == 0.0:
learning_rule = 'sgd'
elif self._nn_hypers['nesterov'] is True:
learning_rule = 'nesterov'
else:
learning_rule = 'momentum'
elif self._nn_hypers['algorithm'] == 'adadelta':
learning_rule = 'adadelta'
learning_rate = 1.0 # Recommended to always use default values here
else:
err = "The algorithm " + self._nn_hypers['algorithm'] +\
" is not supported."
raise KeyError(err)
if self._nn_hypers['dropout'] != 0 and self._nn_hypers['alpha'] != 0:
err = "The combined use of dropout and L2 is not supported."
raise KeyError(err)
if self._nn_hypers['learning_decay'] != 0.0:
raise KeyError("SGD learning decay not supported.")
# The contents of a mutable variable can be changed in a closure.
batch_loss = [0]
# SKNN doesn't give access to the loss in the end-of-epoch callback,
# only in the end-of-batch callback.
def batch_callback(**variables):
batch_loss[0] = variables['loss'] / variables['count']
###############
# Create NN #
###############
sknn_nn = sknn_MLPClassifier(
# Architecture
layers=[Layer(activation, units=self._nn_hypers['hidden_units'],),
Layer("Softmax", units=2 * self.n_classes)],
# Learning settings
loss_type='mcc',
learning_rate=learning_rate,
learning_rule=learning_rule,
learning_momentum=self._nn_hypers['momentum'],
batch_size=self._nn_hypers['batch_size'],
n_iter=1,
# Regularisation
weight_decay=self._nn_hypers['alpha'],
dropout_rate=self._nn_hypers['dropout'],
random_state=self._nn_hypers['random_state'],
# Callback to get loss
callback={'on_batch_finish': batch_callback},
# verbose=1
)
##############
# Train NN #
##############
loss_curve = []
acc_curve, acc_all_curve = [], []
F1_curve, F1_all_curve = [], []
time_curve = []
n_loss = [0]
n_valid = [0]
stop_reason = 0
X_train, Y_train = self._trim_data(self._nn_hypers['frac_training'],
self.X_train, self.Y_train)
for i_epoch in range(self._nn_hypers['max_epoch']):
start_time = timeit.default_timer()
sknn_nn.fit(X_train, Y_train)
end_time = timeit.default_timer()
time_curve.append(end_time - start_time)
# Normalise the same as the other loss curves.
loss_curve.append(batch_loss[0] / (4 * self.n_classes))
# NOTE: predict_proba returns 2 entries per binary class, which are
# True and False, adding to 1.0. We take the probability of True.
valid_proba = sknn_nn.predict_proba(self.X_valid)[:, 1::2]
valid_predict = self._predict_from_proba(valid_proba)
valid_accuracy, valid_F1, valid_accuracy_all, valid_F1_all =\
getScores(self.Y_valid, valid_predict)
acc_curve.append(valid_accuracy)
F1_curve.append(valid_F1)
acc_all_curve.append(valid_accuracy_all)
F1_all_curve.append(valid_F1_all)
# Use change in loss_curve to evaluate stability
if self._converged(loss_curve, n_loss):
stop_reason = 1
break
if self._nn_hypers['early_stopping'] and\
self._converged(acc_all_curve, n_valid, invert=True):
stop_reason = 2
break
test_proba = sknn_nn.predict_proba(self.X_test)[:, 1::2]
test_predict = self._predict_from_proba(test_proba)
test_acc, test_F1, test_acc_all, test_F1_all =\
getScores(self.Y_test, test_predict)
n_epochs = i_epoch + 1
training = (loss_curve, acc_curve, F1_curve,
time_curve, acc_all_curve, F1_all_curve)
performance = (test_acc, test_F1, np.mean(
time_curve), test_acc_all, test_F1_all, n_epochs)
return training, performance, sknn_nn
@staticmethod
def _trim_data(frac, X, Y):
n_samples = X.shape[0]
n_trimmed_samples = int(round(n_samples * frac))
return X[:n_trimmed_samples], Y[:n_trimmed_samples]
def _converged(self, objective, n_objective, invert=False):
''' Check if a quantity in a list has stopped decreasing within tol
Parameters
----------
objective : list
Loss or validation score at end of each epoch until now
n_objective : list of int
Length of previous stability streak
invert : bool
Check if quantity has stopped increasing instead of decreasing
Returns
-------
True if converged according to settings
False if not converged according to settings
Updates n_objective as a side-effect.
'''
try:
change = (objective[-1] / objective[-2]) - 1.0
except IndexError:
return False
change = change if invert else change * -1.0
if change < self._nn_hypers['epoch_tol']:
n_objective[0] += 1
else:
n_objective[0] = 0
if n_objective[0] == self._nn_hypers['n_stable']:
return True
else:
return False
class _SKL_Multilabel_MLP(SKL_MLP):
''' Wrapper for Scikit-learn enabling multi-label probability output. '''
def __init__(self, hidden_layer_sizes=(100,), activation="relu",
algorithm='adam', alpha=0.0001,
batch_size=200, learning_rate="constant",
learning_rate_init=0.001, power_t=0.5, max_iter=200,
shuffle=True, random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, n_labels=1):
self.n_labels = n_labels
sup = super(_SKL_Multilabel_MLP, self)
sup.__init__(hidden_layer_sizes=hidden_layer_sizes,
activation=activation, algorithm=algorithm, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : array-like, shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
proba = super(_SKL_Multilabel_MLP, self).predict_proba(X)
return proba * self.n_labels
class _StratifiedRandomClassifier(object):
''' Benchmarking classifier with consistent behaviour.
Randomly assigns class predictions with the correct balance of True and
False predictions per class. Deterministic: there is no variance in the
accuracy of the answers to the same problem. In other words, the
classification accuracy is equal to the expected value of the
accuracy in scikit-learn's DummyClassifier(strategy='stratified')
'''
def fit(self, X, Y):
self.weights = Y.mean(axis=0)
return self
def getAccuracy(self):
''' Analytically assess the expected accuracy.
accuracy = correct_predictions/all_predictions
'''
return (self.weights**2 + (1.0 - self.weights)**2)
def predict(self, X, Y):
''' Peeks at the correct answer in order to assign predictions which
exactly match the expected quality of predictions.
'''
n_samples, n_classes = Y.shape
predictions = np.zeros([n_samples, n_classes], dtype=bool)
for i_class in range(n_classes):
weight = self.weights[i_class]
true_idxs = np.where(Y[:, i_class] == True)
false_idxs = np.where(Y[:, i_class] == False)
n_true = true_idxs[0].shape[0]
n_false = false_idxs[0].shape[0]
n_true_assign_true = int(round(weight * n_true))
n_false_assign_true = int(round(weight * n_false))
predictions[true_idxs[0][:n_true_assign_true], i_class] = True
predictions[false_idxs[0][:n_false_assign_true], i_class] = True
return predictions
def getScores(answers, predictions):
''' Returns the F1 score and simple accuracy score (percent correct).
Requires predictions and answers in 0 and 1 int or bool format.
Returns arrays giving score per class.
'''
predicted_positives = (predictions == 1)
true_positives = (predicted_positives & answers)
correct_predictions = (predictions == answers)
precision = true_positives.sum(axis=0).astype(float) /\
predicted_positives.sum(axis=0)
recall = true_positives.sum(axis=0).astype(float) /\
answers.sum(axis=0)
F1 = (2 * precision * recall) / (precision + recall)
precision_all = float(true_positives.sum()) / predicted_positives.sum()
recall_all = float(true_positives.sum()) / answers.sum()
F1_all = (2 * precision_all * recall_all) / (precision_all + recall_all)
accuracy = correct_predictions.sum(
axis=0).astype(float) / predictions.shape[0]
return accuracy, F1, accuracy.mean(), F1_all
| StarcoderdataPython |
3437 | <reponame>mlandriau/surveysim
"""Simulate stochastic observing weather conditions.
The simulated conditions include seeing, transparency and the dome-open fraction.
"""
from __future__ import print_function, division, absolute_import
from datetime import datetime
import numpy as np
import astropy.time
import astropy.table
import astropy.units as u
import desiutil.log
import desimodel.weather
import desisurvey.config
import desisurvey.ephem
import desisurvey.utils
class Weather(object):
"""Simulate weather conditions affecting observations.
The start/stop date range is taken from the survey config.
Seeing and transparency values are stored with 32-bit floats to save
some memory.
Parameters
----------
seed : int
Random number seed to use to generate stochastic conditions.
The seed determines the same seeing and transparency realization
independent of the value of ``replay``.
replay : str
Either 'random' or a comma-separated list of years whose
historical weather should be replayed, e.g. 'Y2010,Y2012'.
Replayed weather will be used cyclically if necessary.
Random weather will be a boostrap sampling of all available
years with historical weather data. Use 'Y2015' for the
worst-case weather scenario.
time_step : float or :class:`astropy.units.Quantity`, optional
Time step calculating updates. Must evenly divide 24 hours.
If unitless float, will be interpreted as minutes.
restore : filename or None
Restore an existing weather simulation from the specified file name.
All other parameters are ignored when this is provided. A relative path
name refers to the :meth:`configuration output path
<desisurvey.config.Configuration.get_path>`.
extra_downtime : float
Additionally close the dome completely on some nights. Nights are
chosen randomly, with the chance of the night being closed equal to
extra_random_close_fraction. This is intended to include margin.
"""
def __init__(self, seed=1, replay='random', time_step=5, restore=None,
extra_downtime=0):
if not isinstance(time_step, u.Quantity):
time_step = time_step * u.min
self.log = desiutil.log.get_logger()
config = desisurvey.config.Configuration()
ephem = desisurvey.ephem.get_ephem()
if restore is not None:
fullname = config.get_path(restore)
self._table = astropy.table.Table.read(fullname)
self.start_date = desisurvey.utils.get_date(
self._table.meta['START'])
self.stop_date = desisurvey.utils.get_date(
self._table.meta['STOP'])
self.num_nights = self._table.meta['NIGHTS']
self.steps_per_day = self._table.meta['STEPS']
self.replay = self._table.meta['REPLAY']
self.log.info('Restored weather from {}.'.format(fullname))
return
else:
self.log.info('Generating random weather with seed={} replay="{}".'
.format(seed, replay))
gen = np.random.RandomState(seed)
# Use our config to set any unspecified dates.
start_date = config.first_day()
stop_date = config.last_day()
num_nights = (stop_date - start_date).days
if num_nights <= 0:
raise ValueError('Expected start_date < stop_date.')
# Check that the time step evenly divides 24 hours.
steps_per_day = int(round((1 * u.day / time_step).to(1).value))
if not np.allclose((steps_per_day * time_step).to(u.day).value, 1.):
raise ValueError(
'Requested time_step does not evenly divide 24 hours: {0}.'
.format(time_step))
# Calculate the number of times where we will tabulate the weather.
num_rows = num_nights * steps_per_day
meta = dict(START=str(start_date), STOP=str(stop_date),
NIGHTS=num_nights, STEPS=steps_per_day, REPLAY=replay)
self._table = astropy.table.Table(meta=meta)
# Initialize column of MJD timestamps.
t0 = desisurvey.utils.local_noon_on_date(start_date)
times = t0 + (np.arange(num_rows) / float(steps_per_day)) * u.day
self._table['mjd'] = times.mjd
# Generate a random atmospheric seeing time series.
dt_sec = 24 * 3600. / steps_per_day
self._table['seeing'] = desimodel.weather.sample_seeing(
num_rows, dt_sec=dt_sec, gen=gen).astype(np.float32)
# Generate a random atmospheric transparency time series.
self._table['transparency'] = desimodel.weather.sample_transp(
num_rows, dt_sec=dt_sec, gen=gen).astype(np.float32)
if replay == 'random':
# Generate a bootstrap sampling of the historical weather years.
years_to_simulate = config.last_day().year - config.first_day().year + 1
history = ['Y{}'.format(year) for year in range(2007, 2018)]
replay = ','.join(gen.choice(history, years_to_simulate, replace=True))
# Lookup the dome closed fractions for each night of the survey.
# This step is deterministic and only depends on the config weather
# parameter, which specifies which year(s) of historical daily
# weather to replay during the simulation.
dome_closed_frac = desimodel.weather.dome_closed_fractions(
start_date, stop_date, replay=replay)
r = gen.uniform(size=num_nights)
r2 = gen.uniform(size=num_nights)
dome_closed_frac[r2 < extra_downtime] = 1.
# Convert fractions of scheduled time to hours per night.
ilo, ihi = (start_date - ephem.start_date).days, (stop_date - ephem.start_date).days
bright_dusk = ephem._table['brightdusk'].data[ilo:ihi]
bright_dawn = ephem._table['brightdawn'].data[ilo:ihi]
dome_closed_time = dome_closed_frac * (bright_dawn - bright_dusk)
# Randomly pick between three scenarios for partially closed nights:
# 1. closed from dusk, then open the rest of the night.
# 2. open at dusk, then closed for the rest of the night.
# 3. open and dusk and dawn, with a closed period during the night.
# Pick scenarios 1+2 with probability equal to the closed fraction.
# Use a fixed number of random numbers to decouple from the seeing
# and transparency sampling below.
self._table['open'] = np.ones(num_rows, bool)
for i in range(num_nights):
sl = slice(i * steps_per_day, (i + 1) * steps_per_day)
night_mjd = self._table['mjd'][sl]
# Dome is always closed before dusk and after dawn.
closed = (night_mjd < bright_dusk[i]) | (night_mjd >= bright_dawn[i])
if dome_closed_frac[i] == 0:
# Dome open all night.
pass
elif dome_closed_frac[i] == 1:
# Dome closed all night. This occurs with probability frac / 2.
closed[:] = True
elif r[i] < 0.5 * dome_closed_frac[i]:
# Dome closed during first part of the night.
# This occurs with probability frac / 2.
closed |= (night_mjd < bright_dusk[i] + dome_closed_time[i])
elif r[i] < dome_closed_frac[i]:
# Dome closed during last part of the night.
# This occurs with probability frac / 2.
closed |= (night_mjd > bright_dawn[i] - dome_closed_time[i])
else:
# Dome closed during the middle of the night.
# This occurs with probability 1 - frac. Use the value of r[i]
# as the fractional time during the night when the dome reopens.
dome_open_at = bright_dusk[i] + r[i] * (bright_dawn[i] - bright_dusk[i])
dome_closed_at = dome_open_at - dome_closed_time[i]
closed |= (night_mjd >= dome_closed_at) & (night_mjd < dome_open_at)
self._table['open'][sl][closed] = False
self.start_date = start_date
self.stop_date = stop_date
self.num_nights = num_nights
self.steps_per_day = steps_per_day
self.replay = replay
def save(self, filename, overwrite=True):
"""Save the generated weather to a file.
The saved file can be restored using the constructor `restore`
parameter.
Parameters
----------
filename : str
Name of the file where the weather should be saved. A
relative path name refers to the :meth:`configuration output path
<desisurvey.config.Configuration.get_path>`.
overwrite : bool
Silently overwrite any existing file when this is True.
"""
config = desisurvey.config.Configuration()
filename = config.get_path(filename)
self._table.write(filename, overwrite=overwrite)
self.log.info('Saved weather to {0}.'.format(filename))
def get(self, time):
"""Get the weather conditions at the specified time(s).
Returns the conditions at the closest tabulated time, rather than
using interpolation.
Parameters
----------
time : astropy.time.Time
Time(s) when the simulated weather is requested.
Returns
-------
table slice
Slice of precomputed table containing row(s) corresponding
to the requested time(s).
"""
offset = np.floor(
(time.mjd - self._table['mjd'][0]) * self.steps_per_day + 0.5
).astype(int)
if np.any(offset < 0) or np.any(offset > len(self._table)):
raise ValueError('Cannot get weather beyond tabulated range.')
return self._table[offset]
| StarcoderdataPython |
12819511 | import requests
import mimetypes
# -----------------------------------------------------------------------------
# Globals
BASE_URL = "<YOUR_DOMAIN>/rest/api/content"
SPACE_NAME = "<YOUR_SPACE_NAME>"
USERNAME = "<YOUR_USERNAME>"
PASSWORD = "<<PASSWORD>>"
def upload_attachment(page_id, filepath):
url = BASE_URL + "/" + page_id + "/child/attachment/"
headers = {"X-Atlassian-Token": "no-check"} # no content-type here!
print(f"URL: {url}")
filename = filepath
# determine content-type
content_type, encoding = mimetypes.guess_type(filename)
if content_type is None:
content_type = "multipart/form-data"
# provide content-type explicitly
files = {"file": (filename, open(filename, "rb"), content_type)}
print(f"FILES: {files}")
auth = (USERNAME, PASSWORD)
r = requests.post(url, headers=headers, files=files, auth=auth)
r.raise_for_status()
def find_parent_name_of_page(name):
idp = find_page_id(name)
url = BASE_URL + "/" + idp + "?expand=ancestors"
print(f"URL: {url}")
auth = (USERNAME, PASSWORD)
r = requests.get(url, auth=auth)
r.raise_for_status()
response_json = r.json()
if response_json:
print(f"ID: {response_json['ancestors'][0]['title']}")
return response_json
else:
print("PAGE DOES NOT EXIST")
return None
def find_page_id(name):
name_confl = name.replace(" ", "+")
url = BASE_URL + "?title=" + name_confl + "&spaceKey=" + SPACE_NAME + "&expand=history"
print(f"URL: {url}")
auth = (USERNAME, PASSWORD)
r = requests.get(url, auth=auth)
r.raise_for_status()
response_json = r.json()
if response_json["results"]:
print(f"ID: {response_json['results']}")
return response_json["results"]
else:
print("PAGE DOES NOT EXIST")
return None
def add_page(page_name, parent_page_id):
url = BASE_URL + "/"
print(f"URL: {url}")
headers = {"Content-Type": "application/json"}
auth = (USERNAME, PASSWORD)
data = {
"type": "page",
"title": page_name,
"space": {"key": SPACE_NAME},
"ancestors": [{"id": parent_page_id}],
"body": {"storage": {"value": "<p>This is a new page</p>", "representation": "storage"}},
}
r = requests.post(url, json=data, headers=headers, auth=auth)
r.raise_for_status()
print(r.json())
def update_page(page_name):
page_id = find_page_id(page_name)
if page_id:
page_version = find_page_version(page_name)
page_version = page_version + 1
print(f"PAGE ID: {page_id}, PAGE NAME: {page_name}")
url = BASE_URL + "/" + page_id
print(f"URL: {url}")
headers = {"Content-Type": "application/json"}
auth = (USERNAME, PASSWORD)
data = {
"type": "page",
"space": {"key": SPACE_NAME},
"body": {"storage": {"value": "<p>Let the dragons out!</p>", "representation": "storage"}},
"version": {"number": page_version},
}
data["id"] = page_id
data["title"] = page_name
print(data)
r = requests.put(url, json=data, headers=headers, auth=auth)
r.raise_for_status()
print(r.json())
else:
print("PAGE DOES NOT EXIST. CREATING WITH DEFAULT BODY")
add_page(page_name)
def find_page_version(name):
name_confl = name.replace(" ", "+")
url = BASE_URL + "?title=" + name_confl + "&spaceKey=" + SPACE_NAME + "&expand=version"
print(f"URL: {url}")
auth = (USERNAME, PASSWORD)
r = requests.get(url, auth=auth)
r.raise_for_status()
response_json = r.json()
if response_json["results"]:
print(f"VERSION: {response_json['results'][0]['version']['number']}")
return response_json["results"][0]["version"]["number"]
else:
print("PAGE DOES NOT EXISTS")
return None
# add_page()
# update_page("Test Page")
# find_page_version("Test Page")
# find_parent_name_of_page("Test Parent Page")
# find_page_id("Test Page")
# upload_attachment()
| StarcoderdataPython |
47331 | <filename>launch.py<gh_stars>1-10
#!/usr/bin/env python
import sys
from PyQt5.QtWidgets import QApplication
from cad.application import Application
if __name__ == '__main__':
app = QApplication(sys.argv)
workspace = Application()
workspace.show()
sys.exit(app.exec_())
| StarcoderdataPython |
198501 | <filename>wcics/utils/files.py
# -*- coding: utf-8 -*-
# Return the contents of a file
def load_file(filename):
with open(filename, "r") as f:
return f.read()
# Write contents to a file
def write_file(filename, content):
with open(filename, "w+") as f:
f.write(content)
# Append contents to a file
def append_file(filename, content):
with open(filename, "a+") as f:
f.write(content)
| StarcoderdataPython |
3404291 | <gh_stars>100-1000
import torch
import numpy as np
import pickle
import os
img_size=32
classes={
'train': [1, 2, 3, 4, 5, 6, 9, 10, 15, 17, 18, 19],
'val': [8, 11, 13, 16],
'test': [0, 7, 12, 14]
}
def _get_file_path(filename=""):
return os.path.join('./data', "cifar-100-python/", filename)
def _unpickle(filename):
"""
Unpickle the given file and return the data.
Note that the appropriate dir-name is prepended the filename.
"""
# Create full path for the file.
file_path = _get_file_path(filename)
print("Loading data: " + file_path)
with open(file_path, mode='rb') as file:
# In Python 3.X it is important to set the encoding,
# otherwise an exception is raised here.
data = pickle.load(file, encoding='latin1')
return data
# import IPython
# IPython.embed()
meta = _unpickle('meta')
train = _unpickle('train')
test = _unpickle('test')
data = np.concatenate([train['data'], test['data']])
labels = np.array(train['fine_labels'] + test['fine_labels'])
filts = np.array(train['coarse_labels'] + test['coarse_labels'])
cifar_data = {}
cifar_label = {}
for k, v in classes.items():
data_filter = np.zeros_like(filts)
for i in v: data_filter += ( filts == i )
assert data_filter.max() == 1
cifar_data[k] = data[data_filter == 1]
cifar_label[k] = labels[data_filter == 1]
torch.save({'data': cifar_data, 'label': cifar_label}, './data/cifar100.pth')
| StarcoderdataPython |
358903 | from transcode.containers import basereader
import ass
import numpy
from fractions import Fraction as QQ
from itertools import islice
from transcode.util import Packet
class Track(basereader.Track):
def __getstate__(self):
state = super().__getstate__()
state["index"] = self.index
state["sizes"] = self.sizes
state["durations"] = self.durations
return state
def __setstate__(self, state):
self.index = state.get("index")
super().__setstate__(state)
@property
def extradata(self):
sections = []
for head, section in self.container.assfile.sections.items():
if head == "Events":
sections.append(
"\n".join([f"[{head}]", ", ".join(section.field_order)]))
break
sections.append("\n".join(section.dump()))
return "\n\n".join(sections).encode("utf8")
@property
def type(self):
return "subtitle"
@property
def codec(self):
return "ass"
@property
def time_base(self):
return self.container.time_base
def iterPackets(self, start=0, whence="pts"):
if whence == "pts":
start = self.frameIndexFromPts(start)
if whence == "seconds":
start = self.frameIndexFromPts(int(start/self.time_base))
elif whence == "framenumber":
pass
fields = [
field
for field in self.container.assfile.events.field_order
if field not in ("Start", "End")]
for k, event in enumerate(
self.container.assfile.events[start:], start):
data = f"{k},{event.dump(fields)}".encode("utf8")
yield Packet(
data=data,
pts=int(event.start.total_seconds()/self.time_base),
duration=int(
(event.end - event.start).total_seconds()/self.time_base),
keyframe=True, time_base=self.time_base)
def iterFrames(self, start=0, end=None, whence="pts"):
if whence == "pts":
start = self.frameIndexFromPts(start)
try:
end = end and self.frameIndexFromPts(end)
except IndexError:
end = None
elif whence == "seconds":
start = self.frameIndexFromPts(start/self.time_base)
try:
end = end and self.frameIndexFromPts(end/self.time_base)
except IndexError:
end = None
return islice(self.container.assfile.events, start, end)
@property
def pts(self):
num = self.time_base.numerator
den = self.time_base.denominator
return numpy.int0([
event.start.total_seconds()*den/num
for event in self.container.assfile.events])
@pts.setter
def pts(self, value):
pass
@property
def sizes(self):
fields = [
field
for field in self.container.assfile.events.field_order
if field not in ("Start", "End")]
return numpy.int0([
len(f"{k+1},{event.dump(fields)}".encode("utf8"))
for k, event in enumerate(self.container.assfile.events)])
@sizes.setter
def sizes(self, value):
pass
@property
def durations(self):
num = self.time_base.numerator
den = self.time_base.denominator
return numpy.int0([
(event.end.total_seconds()
- event.start.total_seconds())*den/num
for event in self.container.assfile.events])
@durations.setter
def durations(self, value):
pass
class SubstationAlphaReader(basereader.BaseReader):
trackclass = Track
extensions = (".ass", ".ssa", ".assa")
fmtname = "Substation Alpha/Advanced Substation Alpha"
def _open(self):
self.assfile = ass.document.Document.parse_file(
open(self.inputpath, "r", encoding="utf_8_sig"))
def _populatetracks(self):
self.tracks = [Track()]
self.tracks[0].container = self
self.scan()
def scan(self, notifystart=None, notifyprogress=None, notifyfinish=None):
track = self.tracks[0]
track.index = None
@property
def time_base(self):
return QQ(1, 100)
| StarcoderdataPython |
3244065 | import collections
import typing
import numpy as np
class TrainConfig(typing.NamedTuple):
T: int
train_size: int
batch_size: int
loss_func: typing.Callable
class TrainData(typing.NamedTuple):
feats: np.ndarray
targs: np.ndarray
DaRnnNet = collections.namedtuple("DaRnnNet", ["encoder", "decoder", "enc_opt", "dec_opt"])
| StarcoderdataPython |
6536585 | import time
import numpy as np
from dbscan import DBScan
from sklearn import datasets
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from itertools import cycle, islice
np.random.seed(0)
iris = datasets.load_iris()
X = iris.data[:, :2] # Looking at only Sepal Length and Width for now
y = iris.target
plt.figure()
t0 = time.time()
scanner = DBScan(.2, 4)
labels = scanner.fit(X)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(labels) + 1))))
# add black color for outliers (if any)
colors = np.append(colors, ["#000000"])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[labels])
t1 = time.time()
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plt.show() | StarcoderdataPython |
6465984 | <reponame>EmersonAires/Introducao_a_ciencia_da_computacao_com_Python<gh_stars>0
def cria_matriz():
m = int(input("Digite um número inteiro: "))
n = int(input("Digite um número inteiro: "))
matriz = []
for i in range(m):
linha = []
for j in range(n):
linha.append(int(input("Digite o elemento (" + str(i) + "," + str(j) + ") : ")))
matriz.append(linha)
linhas_nulas = 0
colunas_nulas = 0
for linha in matriz:
soma_linha = 0
for elemento in linha:
soma_linha = soma_linha + elemento
if soma_linha == 0:
linhas_nulas = linhas_nulas + 1
for j in range(n):
soma_coluna = 0
for i in range(m):
soma_coluna = soma_coluna + matriz[i][j]
if soma_coluna ==0:
colunas_nulas = colunas_nulas + 1
print("Linhas nulas: ", linhas_nulas)
print("Colunas nulas: ", colunas_nulas)
cria_matriz() | StarcoderdataPython |
9624789 | #!/usr/bin/env python3
import sys, json
from collections import Counter
data_providers = []
with open(sys.argv[1]) as f:
for line in f:
rec = json.loads(line)
#for record in rec:
data_providers.append(rec['dataProvider'])
counts = Counter(data_providers)
for item in list(counts):
print(item, ': ', counts[item])
| StarcoderdataPython |
12806784 | <filename>admin_list_controls/tests/test_views.py
from django.contrib.auth import get_user_model
from django.test import RequestFactory
from wagtail.contrib.modeladmin.options import ModelAdmin
from django_webtest import WebTest
from shop.models import Product
from admin_list_controls.views import ListControlsIndexView
User = get_user_model()
class TestViews(WebTest):
def setUp(self):
self.factory = RequestFactory()
self.superuser = User.objects.create_superuser(
username='test',
email='<EMAIL>',
password='<PASSWORD>',
)
def tearDown(self):
User.objects.all().delete()
def test_view_init_and_context(self):
class TestView(ListControlsIndexView):
pass
view = self.list_view_class_to_view_function(TestView)
response = view(self.create_superuser_request('/'))
self.assertIn('admin_list_controls', response.context_data)
self.assertIsInstance(response.context_data['admin_list_controls'], dict)
def create_superuser_request(self, url):
request = self.factory.get(url)
request.user = self.superuser
return request
def instantiate_list_view_class(self, list_view_class) -> ListControlsIndexView:
class TestModelAdmin(ModelAdmin):
model = Product
index_view_class = list_view_class
return list_view_class(model_admin=TestModelAdmin())
def list_view_class_to_view_function(self, list_view_class):
class TestModelAdmin(ModelAdmin):
model = Product
index_view_class = list_view_class
request = self.factory.get('/')
request.user = self.superuser
return TestModelAdmin().index_view
| StarcoderdataPython |
8110268 | import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from utils import load_citation, sgc_precompute, set_seed
from models import get_model
from metrics import accuracy
import pickle as pkl
from args import get_citation_args
from time import perf_counter
import pygraph as pg
import kernel
import torch.utils.dlpack
from time import perf_counter
# Arguments
args = get_citation_args()
if args.tuned:
if args.model == "SGC":
with open("{}-tuning/{}.txt".format(args.model, args.dataset), 'rb') as f:
args.weight_decay = pkl.load(f)['weight_decay']
print("using tuned weight decay: {}".format(args.weight_decay))
else:
raise NotImplemented
# setting random seeds
set_seed(args.seed, args.cuda)
adj, features, labels, idx_train, idx_val, idx_test = load_citation(args.dataset, args.normalization, args.cuda)
#pygraph code starts
def memoryview_to_np(memview, nebr_dt):
arr = np.array(memview, copy=False)
#a = arr.view(nebr_dt).reshape(nebr_reader.get_degree())
a = arr.view(nebr_dt)
return a
edge_dt = np.dtype([('src', np.int32), ('dst', np.int32), ('val',np.float32)])
flags = pg.enumGraph.eUdir
outdir = ""
graph = pg.init(1,1, outdir, 1, 2) # Indicate one pgraph, and one vertex type
tid0 = graph.init_vertex_type(adj.shape[0], False, "gtype"); # initiate the vertex type
pgraph = graph.create_schema(flags, tid0, "friend", edge_dt); #initiate the pgraph
dd = np.zeros(10000, edge_dt)
tempGraph = adj.coalesce()
rowList = tempGraph.indices()[1].tolist()
colList = tempGraph.indices()[0].tolist()
valList = tempGraph.values().tolist()
feat = features.tolist()
feattensor = torch.tensor(feat, dtype=torch.float32)
edge_count = 0
for i in range(0,len(rowList)):
dd[edge_count]= (rowList[i], colList[i], valList[i])
edge_count += 1
if (edge_count == 10000):
pgraph.add_edges(dd, edge_count)
edge_count = 0
pgraph.add_edges(dd, edge_count)
pgraph.wait()
offset_csr1, offset_csc1, nebrs_csr1, nebrs_csc1 = pg.create_csr_view(pgraph)
offset_dt = np.dtype([('offset', np.int32)])
csr_dt = np.dtype([('dst', np.int32),('val',np.float32)])
offset_csr = memoryview_to_np(offset_csr1, offset_dt)
offset_csc = memoryview_to_np(offset_csc1, offset_dt)
nebrs_csr = memoryview_to_np(nebrs_csr1, csr_dt)
nebrs_csc = memoryview_to_np(nebrs_csc1, csr_dt)
flag = 0
G = kernel.init_graph(offset_csr, nebrs_csr, offset_csc, nebrs_csc, flag, adj.shape[0])
X_dl = torch.utils.dlpack.to_dlpack(feattensor)
res = torch.zeros(features.shape[0], features.shape[1])
res_dl = torch.utils.dlpack.to_dlpack(res)
#sgc_precompute with kernel
t = perf_counter()
for i in range(args.degree):
kernel.spmm(G, X_dl, res_dl)
if (i<args.degree-1):
X_dl = torch.utils.dlpack.to_dlpack(res)
res = torch.zeros(features.shape[0], features.shape[1])
res_dl = torch.utils.dlpack.to_dlpack(res)
print("kernel spmm time: "+"{:.4f}s".format(perf_counter()-t))
if (args.cuda):
res = res.to(device='cuda:0')
#pygraph code ends
model = get_model(args.model, features.size(1), labels.max().item()+1, args.hidden, args.dropout, args.cuda)
if args.model == "SGC" or args.model == "nSGC": features, precompute_time = sgc_precompute(features, adj, args.degree)
print("pytorch spmm api time: "+"{:.4f}s".format(precompute_time))
#uses the output feature from the kernel instead of the python api comment this out if you want the orginal
features = res
def train_regression(model,
train_features, train_labels,
val_features, val_labels,
epochs=args.epochs, weight_decay=args.weight_decay,
lr=args.lr, dropout=args.dropout):
optimizer = optim.Adam(model.parameters(), lr=lr,
weight_decay=weight_decay)
t = perf_counter()
for epoch in range(epochs):
model.train()
optimizer.zero_grad()
output = model(train_features)
loss_train = F.cross_entropy(output, train_labels)
loss_train.backward()
optimizer.step()
train_time = perf_counter()-t
with torch.no_grad():
model.eval()
output = model(val_features)
acc_val = accuracy(output, val_labels)
return model, acc_val, train_time
def test_regression(model, test_features, test_labels):
model.eval()
return accuracy(model(test_features), test_labels)
if args.model == "SGC" or args.model == "nSGC":
model, acc_val, train_time = train_regression(model, features[idx_train], labels[idx_train], features[idx_val], labels[idx_val],
args.epochs, args.weight_decay, args.lr, args.dropout)
acc_test = test_regression(model, features[idx_test], labels[idx_test])
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val, acc_test))
print("Pre-compute time: {:.4f}s, train time: {:.4f}s, total: {:.4f}s".format(precompute_time, train_time, precompute_time+train_time))
| StarcoderdataPython |
4881247 | <reponame>shahbagdadi/py-algo-n-ds
from typing import List
import bisect
class Solution:
def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
window = sorted(nums[:k])
medians = []
for a, b in zip(nums, nums[k:] + [0]):
medians.append((window[k//2] + window[~(k//2)]) / 2.0)
window.remove(a)
bisect.insort(window, b)
return medians
s = Solution()
ip = [1,3,-1,-3,5,3,6,7]
ans = s.medianSlidingWindow(ip,3)
print(ans) | StarcoderdataPython |
3474669 | <reponame>akshayka/gavel
import os, sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import cvxpy as cp
import numpy as np
from policy import Policy, PolicyWithPacking
class ThroughputSumWithPerf(Policy):
def __init__(self, solver):
self._name = 'ThroughputSumWithPerf'
self._policy = ThroughputNormalizedByCostSumWithPerfSLOs(solver)
def get_allocation(self, unflattened_throughputs, scale_factors,
cluster_spec):
return self._policy.get_allocation(unflattened_throughputs,
scale_factors,
cluster_spec)
class ThroughputNormalizedByCostSumWithPerf(Policy):
def __init__(self, solver):
self._name = 'ThroughputNormalizedByCostSum_Perf'
self._policy = ThroughputNormalizedByCostSumWithPerfSLOs(solver)
def get_allocation(self, unflattened_throughputs, scale_factors,
cluster_spec, instance_costs):
return self._policy.get_allocation(unflattened_throughputs,
scale_factors,
cluster_spec,
instance_costs=instance_costs)
class ThroughputNormalizedByCostSumWithPerfSLOs(Policy):
def __init__(self, solver):
Policy.__init__(self, solver)
self._name = 'ThroughputNormalizedByCostSum_PerfSLOs'
def get_allocation(self, unflattened_throughputs, scale_factors,
cluster_spec, instance_costs=None, SLOs={},
num_steps_remaining={}):
throughputs, index = super().flatten(unflattened_throughputs,
cluster_spec)
if throughputs is None: return None
(m, n) = throughputs.shape
(job_ids, worker_types) = index
scale = 1.0 / throughputs.sum(axis=1)
# Row i of scale_factors_array is the scale_factor of job
# combination i repeated len(worker_types) times.
scale_factors_array = self.scale_factors_array(
scale_factors, job_ids, m, n)
x = cp.Variable(throughputs.shape)
instance_costs_array = np.ones((1, n))
if instance_costs is not None:
for i in range(n):
instance_costs_array[0, i] = instance_costs[worker_types[i]]
objective = \
cp.Maximize(cp.sum(cp.sum(cp.multiply(throughputs /
instance_costs_array, x),
axis=1)))
# Make sure that a given job is not over-allocated resources.
constraints = self.get_base_constraints(x, scale_factors_array)
SLO_constraints = []
for job_id in SLOs:
i = job_ids.index(job_id)
assert(job_id in num_steps_remaining)
SLO_constraints.append(
cp.sum(cp.multiply(throughputs[i], x[i])) >=
(num_steps_remaining[job_id] / SLOs[job_id])
)
cvxprob = cp.Problem(objective, constraints + SLO_constraints)
result = cvxprob.solve(solver=self._solver)
if cvxprob.status != "optimal":
print('WARNING: Allocation returned by policy not optimal!')
if x.value is None:
print('WARNING: No allocation possible with provided SLOs!')
cvxprob = cp.Problem(objective, constraints)
result = cvxprob.solve(solver=self._solver)
return super().unflatten(x.value.clip(min=0.0).clip(max=1.0), index)
class ThroughputNormalizedByCostSumWithPackingSLOs(PolicyWithPacking):
def __init__(self, solver):
Policy.__init__(self, solver)
self._name = 'ThroughputNormalizedByCostSum_PackingSLOs'
def get_allocation(self, unflattened_throughputs, scale_factors, cluster_spec,
instance_costs=None, SLOs={}, num_steps_remaining={}):
all_throughputs, index = super().flatten(unflattened_throughputs,
cluster_spec)
if all_throughputs is None or len(all_throughputs) == 0: return None
(m, n) = all_throughputs[0].shape
(job_ids, single_job_ids, worker_types, relevant_combinations) = index
# Row i of scale_factors_array is the scale_factor of job
# combination i repeated len(worker_types) times.
scale_factors_array = self.scale_factors_array(
scale_factors, job_ids, m, n)
x = cp.Variable((m, n))
instance_costs_array = np.ones((m, n))
if instance_costs is not None:
for i in range(m):
for j in range(n):
instance_costs_array[i,j] = \
instance_costs[worker_types[j]]
objective_terms = []
for i in range(len(single_job_ids)):
indexes = relevant_combinations[single_job_ids[i]]
objective_terms.append(cp.sum(cp.multiply(
all_throughputs[i][indexes] /\
instance_costs_array[indexes], x[indexes])))
if len(objective_terms) == 1:
objective = cp.Maximize(objective_terms[0])
else:
objective = cp.Maximize(cp.sum(cp.hstack(objective_terms)))
# Make sure that a given job is not over-allocated resources.
constraints = self.get_base_constraints(x, single_job_ids,
scale_factors_array,
relevant_combinations)
SLO_constraints = []
per_job_throughputs = []
per_job_SLOs = []
for job_id in SLOs:
i = job_ids.index(job_id)
assert(job_id in num_steps_remaining)
indexes = relevant_combinations[single_job_ids[i]]
throughput = cp.sum(cp.multiply(
all_throughputs[i][indexes], x[indexes]))
per_job_throughputs.append(throughput)
per_job_SLOs.append(num_steps_remaining[job_id] / SLOs[job_id])
if len(per_job_throughputs) > 0:
SLO_constraints.append(cp.vstack(per_job_throughputs) >=
cp.vstack(per_job_SLOs))
cvxprob = cp.Problem(objective, constraints + SLO_constraints)
result = cvxprob.solve(solver=self._solver)
if x.value is None:
print('WARNING: No allocation possible with provided SLOs!')
cvxprob = cp.Problem(objective, constraints)
result = cvxprob.solve(solver=self._solver)
if cvxprob.status != "optimal":
print('WARNING: Allocation returned by policy not optimal!')
return super().unflatten(x.value.clip(min=0.0).clip(max=1.0), index)
| StarcoderdataPython |
6626100 | <filename>trains/utilities/check_updates.py<gh_stars>0
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
import requests
import six
if six.PY3:
from math import inf
else:
inf = float('inf')
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
_Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class Version(_BaseVersion):
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
_local_version_separators = re.compile(r"[\._-]")
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=self._parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=self._parse_letter_version(
match.group("post_l") or '', match.group("post_n1") or match.group("post_n2") or ''
),
dev=self._parse_letter_version(match.group("dev_l") or '', match.group("dev_n") or ''),
local=self._parse_local_version(match.group("local") or ''),
)
# Generate a key which will be used for sorting
self._key = self._cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(".post{0}".format(self.post))
# Development release
if self.dev is not None:
parts.append(".dev{0}".format(self.dev))
# Local version segment
if self.local is not None:
parts.append("+{0}".format(self.local))
return "".join(parts)
@property
def epoch(self):
return self._version.epoch
@property
def release(self):
return self._version.release
@property
def pre(self):
return self._version.pre
@property
def post(self):
return self._version.post[1] if self._version.post else None
@property
def dev(self):
return self._version.dev[1] if self._version.dev else None
@property
def local(self):
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self):
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self):
return self.post is not None
@property
def is_devrelease(self):
return self.dev is not None
@staticmethod
def _parse_letter_version(letter, number):
if not letter and not number:
return None
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
@classmethod
def _parse_local_version(cls, local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
local = tuple(
part.lower() if not part.isdigit() else int(part)
for part in cls._local_version_separators.split(local)
)
if not local or not local[0]:
return None
return local
return None
@staticmethod
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
# release = tuple(
# reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
# )
# Versions without a pre-release (except as noted above) should sort after
# those with one.
if not pre:
pre = inf
elif pre:
pre = pre[1]
# Versions without a post segment should sort before those with one.
if not post:
post = -inf
else:
post = post[1]
# Versions without a development segment should sort after those with one.
if not dev:
dev = inf
else:
dev = dev[1]
if not local:
# Versions without a local segment should sort before those with one.
local = inf
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = local[1]
return epoch, release, pre, post, dev, local
class CheckPackageUpdates(object):
_package_version_checked = False
@classmethod
def check_new_package_available(cls, only_once=False):
"""
:return: true if there is a newer package in PyPI
"""
if only_once and cls._package_version_checked:
return None
# noinspection PyBroadException
try:
cls._package_version_checked = True
releases = requests.get('https://pypi.python.org/pypi/trains/json').json()['releases'].keys()
releases = [Version(r) for r in releases]
latest_version = sorted(releases)
from ..version import __version__
cur_version = Version(__version__)
if not cur_version.is_devrelease and not cur_version.is_prerelease:
latest_version = [r for r in latest_version if not r.is_devrelease and not r.is_prerelease]
if cur_version >= latest_version[-1]:
return None
not_patch_upgrade = latest_version[-1].release[:2] != cur_version.release[:2]
return str(latest_version[-1]), not_patch_upgrade
except Exception:
return None
| StarcoderdataPython |
4937720 | <filename>programs/pyeos/tests/python/cryptokitties/clockauctionbase.py
from backend import *
from basement import *
from auction import Auction
from erc721 import ERC721
from backyard.storage import SDict
# @title Auction Core
# @dev Contains models, variables, and internal methods for the auction.
# @notice We omit a fallback function to prevent accidental sends to this contract.
class ClockAuctionBase:
def __init__(self):
#Reference to contract tracking NFT ownership
#ERC721 public nonFungibleContract;
self.nonFungibleContract = ERC721()
#Cut owner takes on each auction, measured in basis points (1/100 of a percent).
#Values 0-10,000 map to 0%-100%
#uint256 public ownerCut;
self.ownerCut = uint256(0)
#Map from token ID to their corresponding auction.
#mapping (uint256 => Auction) tokenIdToAuction;
self.tokenIdToAuction = SDict(key_type = uint256, value_type = Auction)
@event
def AuctionCreated(self, tokenId: uint256, startingPrice: uint256, endingPrice: uint256, duration: uint256): pass
@event
def AuctionSuccessful(self, tokenId: uint256, totalPrice: uint256, winner: address): pass
@event
def AuctionCancelled(self, tokenId: uint256): pass
# @dev Returns true if the claimant owns the token.
# @param _claimant - Address claiming to own the token.
# @param _tokenId - ID of token whose ownership to verify.
def _owns(self, _claimant: address, _tokenId: uint256) -> bool:
return self.nonFungibleContract.ownerOf(_tokenId) == _claimant
# @dev Escrows the NFT, assigning ownership to this contract.
# Throws if the escrow fails.
# @param _owner - Current owner address of token to escrow.
# @param _tokenId - ID of token whose approval to verify.
def _escrow(self, _owner: address, _tokenId: uint256):
#it will throw if transfer fails
#FIXME this
self.nonFungibleContract.transferFrom(_owner, this, _tokenId)
# @dev Transfers an NFT owned by this contract to another address.
# Returns true if the transfer succeeds.
# @param _receiver - Address to transfer NFT to.
# @param _tokenId - ID of token to transfer.
def _transfer(self, _receiver: address, _tokenId: uint256):
#it will throw if transfer fails
self.nonFungibleContract.transfer(_receiver, _tokenId)
# @dev Adds an auction to the list of open auctions. Also fires the
# AuctionCreated event.
# @param _tokenId The ID of the token to be put on auction.
# @param _auction Auction to add.
def _addAuction(self, _tokenId: uint256, _auction: Auction):
#Require that all auctions have a duration of
#at least one minute. (Keeps our math from getting hairy!)
# require(_auction.duration >= 1 minutes)
require(_auction.duration >= 60)
self.tokenIdToAuction[_tokenId] = _auction;
self.AuctionCreated(
uint256(_tokenId),
uint256(_auction.startingPrice),
uint256(_auction.endingPrice),
uint256(_auction.duration)
)
# @dev Cancels an auction unconditionally.
def _cancelAuction(self, _tokenId: uint256, _seller: address):
self._removeAuction(_tokenId)
self._transfer(_seller, _tokenId)
self.AuctionCancelled(_tokenId)
# @dev Computes the price and transfers winnings.
# Does NOT transfer ownership of token.
def _bid(self, _tokenId: uint256, _bidAmount: uint256) -> uint256:
#Get a reference to the auction struct
auction = self.tokenIdToAuction[_tokenId];
#Explicitly check that this auction is currently live.
#(Because of how Ethereum mappings work, we can't just count
#on the lookup above failing. An invalid _tokenId will just
#return an auction object that is all zeros.)
require(self._isOnAuction(auction));
#Check that the bid is greater than or equal to the current price
price = self._currentPrice(auction);
require(_bidAmount >= price);
#Grab a reference to the seller before the auction struct
#gets deleted.
seller = auction.seller;
#The bid is good! Remove the auction before sending the fees
#to the sender so we can't have a reentrancy attack.
self._removeAuction(_tokenId);
#Transfer proceeds to seller (if there are any!)
if price > 0:
#Calculate the auctioneer's cut.
#(NOTE: _computeCut() is guaranteed to return a
#value <= price, so this subtraction can't go negative.)
auctioneerCut = self._computeCut(price);
sellerProceeds = price - auctioneerCut;
#NOTE: Doing a transfer() in the middle of a complex
#method like this is generally discouraged because of
#reentrancy attacks and DoS attacks if the seller is
#a contract with an invalid fallback function. We explicitly
#guard against reentrancy attacks by removing the auction
#before calling transfer(), and the only thing the seller
#can DoS is the sale of their own asset! (And if it's an
#accident, they can call cancelAuction(). )
seller.transfer(sellerProceeds)
#Calculate any excess funds included with the bid. If the excess
#is anything worth worrying about, transfer it back to bidder.
#NOTE: We checked above that the bid amount is greater than or
#equal to the price so this cannot underflow.
bidExcess = _bidAmount - price
#Return the funds. Similar to the previous transfer, this is
#not susceptible to a re-entry attack because the auction is
#removed before any transfers occur.
msg.sender.transfer(bidExcess)
#Tell the world!
self.AuctionSuccessful(_tokenId, price, msg.sender)
return price;
# @dev Removes an auction from the list of open auctions.
# @param _tokenId - ID of NFT on auction.
def _removeAuction(self, _tokenId: uint256):
del self.tokenIdToAuction[_tokenId]
# @dev Returns true if the NFT is on auction.
# @param _auction - Auction to check.
def _isOnAuction(self, _auction: Auction) -> bool:
return _auction.startedAt > 0
# @dev Returns current price of an NFT on auction. Broken into two
# functions (this one, that computes the duration from the auction
# structure, and the other that does the price computation) so we
# can easily test that the price computation works correctly.
def _currentPrice(self, _auction: Auction) -> uint256:
self.secondsPassed = 0;
#A bit of insurance against negative values (or wraparound).
#Probably not necessary (since Ethereum guarnatees that the
#now variable doesn't ever go backwards).
if now() > _auction.startedAt:
self.secondsPassed = now() - _auction.startedAt;
return self._computeCurrentPrice(
_auction.startingPrice,
_auction.endingPrice,
_auction.duration,
self.secondsPassed
)
# @dev Computes the current price of an auction. Factored out
# from _currentPrice so we can run extensive unit tests.
# When testing, make this function public and turn on
# `Current price computation` test suite.
def _computeCurrentPrice(self, _startingPrice: uint256, _endingPrice: uint256, _duration: uint256, _secondsPassed: uint256) -> uint256:
#NOTE: We don't use SafeMath (or similar) in this function because
# all of our public functions carefully cap the maximum values for
# time (at 64-bits) and currency (at 128-bits). _duration is
# also known to be non-zero (see the require() statement in
# _addAuction())
if _secondsPassed >= _duration:
#We've reached the end of the dynamic pricing portion
#of the auction, just return the end price.
return _endingPrice;
else:
#Starting price can be higher than ending price (and often is!), so
#this delta can be negative.
self.totalPriceChange = int256(_endingPrice) - int256(_startingPrice);
#This multiplication can't overflow, _secondsPassed will easily fit within
#64-bits, and totalPriceChange will easily fit within 128-bits, their product
#will always fit within 256-bits.
self.currentPriceChange = self.totalPriceChange * int256(_secondsPassed) / int256(_duration);
#currentPriceChange can be negative, but if so, will have a magnitude
#less that _startingPrice. Thus, this result will always end up positive.
self.currentPrice = int256(_startingPrice) + self.currentPriceChange;
return uint256(self.currentPrice);
# @dev Computes owner's cut of a sale.
# @param _price - Sale price of NFT.
def _computeCut(self, _price: uint256) -> uint256:
#NOTE: We don't use SafeMath (or similar) in this function because
# all of our entry functions carefully cap the maximum values for
# currency (at 128-bits), and ownerCut <= 10000 (see the require()
# statement in the ClockAuction constructor). The result of this
# function is always guaranteed to be <= _price.
return _price * self.ownerCut / 10000;
| StarcoderdataPython |
11209253 | """
Revision ID: 0304a_merge
Revises: 0304_remove_org_to_service, 0303a_merge
Create Date: 2019-07-29 16:18:27.467361
"""
# revision identifiers, used by Alembic.
revision = "0304a_merge"
down_revision = ("0304_remove_org_to_service", "0303a_merge")
branch_labels = None
import sqlalchemy as sa
from alembic import op
def upgrade():
pass
def downgrade():
pass
| StarcoderdataPython |
8121653 | """
genotype.__main__
~~~~~~~~~~~~~~~~~~~~~
__
____ ____ ____ _____/ |_ ___.__.______ ____
/ ___\_/ __ \ / \ / _ \ __< | |\____ \_/ __ \
/ /_/ > ___/| | ( <_> ) | \___ || |_> > ___/
\___ / \___ >___| /\____/|__| / ____|| __/ \___ >
/_____/ \/ \/ \/ |__| \/
The main entry point for the command line interface.
Invoke as ``genotype`` (if installed)
or ``python -m genotype`` (no install required).
"""
import sys
from genotype.cli.base_cmd import root
if __name__ == "__main__":
# exit using whatever exit code the CLI returned
sys.exit(root())
| StarcoderdataPython |
6454495 | <filename>tests/integrationtests/integrator/connection/inmemory/sqlite/test_connection.py
from unittest import TestCase
from pdip.integrator.connection.domain.enums import ConnectorTypes
from pdip.integrator.connection.types.inmemory.base import InMemoryProvider
from pdip.integrator.connection.types.sql.base import SqlProvider
class TestMssqlConnection(TestCase):
def setUp(self):
pass
def tearDown(self):
return super().tearDown()
def test_mssql_connection(self):
try:
self.database_context = InMemoryProvider().get_context(
connector_type=ConnectorTypes.SqLite,
database='test_pdi.db'
)
self.database_context.connector.connect()
except Exception as ex:
print(ex)
raise
def test_create_and_drop_table(self):
try:
self.database_context = InMemoryProvider().get_context(
connector_type=ConnectorTypes.SqLite,
database='test_pdi'
)
self.database_context.execute('''CREATE TABLE main.test_source (
Id INT NULL,
Name varchar(100) NULL
)''')
except Exception as ex:
print(ex)
raise
finally:
self.database_context.execute('''DROP TABLE main.test_source''')
def test_data(self):
try:
self.database_context = InMemoryProvider().get_context(
connector_type=ConnectorTypes.SqLite,
database='test_pdi'
)
self.database_context.execute('''CREATE TABLE main.test_source (
Id INT NULL,
Name varchar(100) NULL
)''')
self.database_context.execute('''insert into main.test_source(Id,Name) values(1,'test')''')
data = self.database_context.fetch_query('''select * from main.test_source''')
assert len(data) == 1
assert data[0]["Id"] == 1
assert data[0]["Name"] == 'test'
self.database_context.execute('''update main.test_source set Name='Update' where Id=1''')
data = self.database_context.fetch_query('''select * from main.test_source''')
assert data[0]["Id"] == 1
assert data[0]["Name"] == 'Update'
self.database_context.execute('''delete from main.test_source where Id=1''')
data = self.database_context.fetch_query('''select * from main.test_source''')
assert len(data) == 0
except Exception as ex:
print(ex)
raise
finally:
self.database_context.execute('''DROP TABLE main.test_source''')
| StarcoderdataPython |
8186420 | <reponame>NumberAI/python-bandwidth-iris
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
from future.builtins import super
from iris_sdk.models.base_resource import BaseResource
from iris_sdk.models.data.dlda_order_response import DldaOrderResponseData
XPATH_DLDA_ORDER_RESPONSE = "/{}"
class DldaOrderResponse(BaseResource, DldaOrderResponseData):
""" DLDA order response """
_xpath = XPATH_DLDA_ORDER_RESPONSE
@property
def id(self):
return self.dlda_order.order_id
@id.setter
def id(self, order_id):
self.dlda_order.order_id = order_id
@property
def dlda_order(self):
return self._dlda_order
@dlda_order.setter
def dlda_order(self, dlda_order):
self._dlda_order = dlda_order
def __init__(self, parent=None, client=None):
super().__init__(parent, client)
DldaOrderResponseData.__init__(self)
def get(self, id=None, params=None):
return self._get_data((id or self.id), params=params) | StarcoderdataPython |
1821616 | # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Wikitext-103 datasets."""
import re
from typing import NamedTuple, List
from absl import logging
import numpy as np
from wikigraphs.data import dataset
from wikigraphs.data import tokenizers
from wikigraphs.data import tools
# The data directory that contains subdirectories `wikitext-103` and
# `wikitext-103-raw`.
DATA_ROOT = '/tmp/data/wikitext-103'
class WikitextArticle(NamedTuple):
title: str
text: str
def articles_from_file(file_path: str) -> List[WikitextArticle]:
"""Read wikitext articles from file.
Args:
file_path: path to the input `.tokens` file.
Returns:
A list of `WikitextArticle` tuples.
"""
with open(file_path, mode='rb') as f:
content = f.read()
content = content.decode('utf-8')
title_re = re.compile(r'(\n = ([^=].*) = \n \n)')
parts = title_re.split(content)
# Skip the first part which is empty
return [WikitextArticle(title=parts[i+1], text=parts[i] + parts[i+2])
for i in range(1, len(parts), 3)]
class RawDataset(dataset.Dataset):
"""Raw text dataset for wikitext-103."""
def __init__(self,
subset: str = 'train',
shuffle_data: bool = False,
data_dir: str = None,
version: str = 'tokens'):
"""Constructor.
Args:
subset: which subset to load, one of {"train", "valid", "test"}.
shuffle_data: if set to True the data will be randomly shuffled.
data_dir: if provided will be used instead of the default `DATA_ROOT` as
the directory that contains the data.
version: one of {'tokens', 'raw'}
"""
super().__init__()
self._subset = subset
self._shuffle_data = shuffle_data
self._data_dir = data_dir or DATA_ROOT
self._dataset = None
allowed_versions = ('tokens', 'raw')
if version not in allowed_versions:
raise ValueError(f'Version must be one of {allowed_versions}.')
self._version = version
def _load_data(self):
"""Prepare data for another pass through the dataset."""
if self._dataset is None:
data_root = self._data_dir + ('-raw' if self._version == 'raw' else '')
self._dataset = articles_from_file(
f'{data_root}/wiki.{self._subset}.{self._version}')
def source():
n_articles = len(self._dataset)
if self._shuffle_data:
idx = np.random.permutation(n_articles)
else:
idx = np.arange(n_articles)
for i in range(n_articles):
yield self._dataset[idx[i]]
return source()
def normalize_title(title: str) -> str:
"""Normalize the wikitext article title by handling special characters."""
return title.replace(
'@-@', '-').replace('@,@', ',').replace('@.@', '.').replace(' ', '')
class WikitextDataset(dataset.Dataset):
"""Tokenized dataset for wikitext-103."""
def __init__(self,
tokenizer: tokenizers.Tokenizer,
batch_size: int = 1,
timesteps: int = 128,
subset: str = 'train',
shuffle_data: bool = True,
data_dir: str = None,
repeat: bool = False,
debug: bool = False,
**kwargs):
"""Constructor.
Args:
tokenizer: a tokenizer for text data.
batch_size: number of sequences to put into a batch.
timesteps: length of the sequences.
subset: which subset to load, one of {"train", "valid", "test"}.
shuffle_data: if set to True the data will be randomly shuffled.
data_dir: if provided will be used instead of the default `DATA_ROOT` as
the directory that contains the data.
repeat: set to False to go through the data only once, otherwise go
through the data indefinitely.
debug: set to True to only load a small amount of data for fast debugging.
**kwargs: other arguments (for interface compatibility).
"""
super().__init__()
self._tokenizer = tokenizer
self._batch_size = batch_size
self._timesteps = timesteps
self._subset = subset
self._shuffle_data = shuffle_data
self._data_dir = data_dir
self._repeat = repeat
self._debug = debug
self._dataset = None
def _load_data(self):
"""Prepare data for one pass through the dataset."""
# Pre-tokenize everything in our dataset so we don't have to when going
# through the data more than once.
if not self._dataset:
raw_dataset = RawDataset(
subset=self._subset, shuffle_data=False, data_dir=self._data_dir)
if self._debug:
# Load a small number of examples for debugging.
self._dataset = [
self._tokenizer.encode(next(raw_dataset).text, prepend_bos=True)
for _ in range(5)]
else:
self._dataset = [self._tokenizer.encode(item.text, prepend_bos=True)
for item in raw_dataset]
logging.info('%s set loaded, total %d examples.',
self._subset, len(self._dataset))
def source():
idx = np.random.permutation(len(self._dataset))
for i in idx:
yield self._dataset[i]
def repeated_source():
if self._repeat:
while True:
yield from source()
else:
yield from source()
data_iter = tools.dynamic_batch(
repeated_source(),
self._batch_size,
self._timesteps + 1, # Extra token to count for the overlap.
return_incomplete_batch=True,
pad=True,
pad_value=self._tokenizer.pad_token())
data_iter = map(lambda x: dict( # pylint: disable=g-long-lambda
obs=x['obs'][:, :-1],
target=x['obs'][:, 1:],
should_reset=x['should_reset'][:, :-1],
mask=(x['obs'][:, 1:] != self._tokenizer.pad_token()).astype(
np.float32),
), data_iter)
return data_iter
def return_faux_batch(self):
"""Return a fake batch with the right shapes and dtypes."""
obs = np.zeros((self._batch_size, self._timesteps), dtype=np.int32)
target = np.zeros_like(obs, dtype=np.int32)
should_reset = np.zeros_like(obs, dtype=np.float32)
mask = np.zeros_like(obs, dtype=np.float32)
return dict(obs=obs, target=target, should_reset=should_reset, mask=mask)
| StarcoderdataPython |
11231332 | import cv2
from time import sleep
crop_x,crop_y,crop_w,crop_h = 142,265,338,70
#crop_x,crop_y,crop_w,crop_h = 95,275,330,67
x,y=0,0
img = cv2.imread("/home/pi/Desktop/132710.jpg")
cv2.rectangle(img, (x+crop_x, y+crop_y), (x+crop_x + crop_w, y+crop_y + crop_h), (255, 0, 0), 2)
cv2.imshow("Test",img)
#sleep(500)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
364650 | from lib.mlp import NeuralNetwork
import numpy as np
if __name__ == "__main__":
print("MLP Test usin XOR gate")
filename = "XOR.dat"
'''
@dataset: array of arrays
[ [x1, x1, x2, ..., xn, y],
[x1, x1, x2, ..., xn, y],
[x1, x1, x2, ..., xn, y] ]
'''
dataset = np.loadtxt(open(filename, "rb"), delimiter=" ")
input_size = dataset.shape[1] - 1
output_size = 1
nn_size = [input_size, 2, output_size]
print("DataSet: {}".format(dataset))
print("NN SIZE {}".format(nn_size))
mlp = NeuralNetwork(layer_size=nn_size, debug_string=True)
mlp.train(dataset, eta=0.1, threshold=1e-3, max_iterations=100000)
outputs, output = mlp.classify(np.array([0,0]))
print(mlp)
x = np.array([0,0])
outputs, output = mlp.classify(x)
print("==========================")
# print("Z: {}".format(outputs))
print("x: {}, ŷ: {}".format(x, output))
x = np.array([0,1])
outputs, output = mlp.classify(x)
print("==========================")
# print("Z: {}".format(outputs))
print("x: {}, ŷ: {}".format(x, output))
x = np.array([1,0])
outputs, output = mlp.classify(x)
print("==========================")
# print("Z: {}".format(outputs))
print("x: {}, ŷ: {}".format(x, output))
x = np.array([1,1])
outputs, output = mlp.classify(x)
print("==========================")
# print("Z: {}".format(outputs))
print("x: {}, ŷ: {}".format(x, output))
| StarcoderdataPython |
6445008 | <reponame>wangji1/test-framework-and-suites-for-android<filename>acs/acs/Device/DeviceLogger/SerialLogger/SerialAnalyzerThread.py
"""
:copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.
The source code contained or described here in and all documents related
to the source code ("Material") are owned by Intel Corporation or its
suppliers or licensors. Title to the Material remains with Intel Corporation
or its suppliers and licensors. The Material contains trade secrets and
proprietary and confidential information of Intel or its suppliers and
licensors.
The Material is protected by worldwide copyright and trade secret laws and
treaty provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or disclosed
in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other intellectual
property right is granted to or conferred upon you by disclosure or delivery
of the Materials, either expressly, by implication, inducement, estoppel or
otherwise. Any license under such intellectual property rights must be express
and approved by Intel in writing.
:organization: INTEL MCG PSI
:summary: This file expose the phone interface IPhone
:since: 06/05/2011
:author: sfusilie
"""
from Queue import Queue, Empty
import threading
import time
class SerialAnalyzerThread():
"""
SerialAnalyzerThread: Thread to analyze every lines that are read from
Serial port.
The mechanism is similar to LogCatAnalyzerThread
"""
def __init__(self, logger):
# Analyzer thread stop condition
self.__start_analyze = False
# Messages to trigger
self.__messages_to_trigger = {}
# Message to received
self.__message_to_receive = None
self.__message_received = None
self.__is_message_received = False
# Lock object
self.__lock_message_triggered = threading.RLock()
self.__lock_message_received = threading.RLock()
# Internal buffer
self.__queue = Queue()
self.__analyzer_thread = None
# Logger
self._logger = logger
def stop(self):
"""
Stop the Thread
"""
self.__start_analyze = False
if self.__analyzer_thread is not None:
try:
self.__analyzer_thread.join(5)
except Exception: # pylint: disable=W0703
pass
finally:
del self.__analyzer_thread
self.__analyzer_thread = None
return
def start(self):
"""
Start the thread
"""
self.__analyzer_thread = threading.Thread(target=self.__run)
self.__analyzer_thread.name = "SerialAnalyzerThread"
self.__analyzer_thread.daemon = True
self.__analyzer_thread.start()
def push(self, line):
"""
Method to receive the line that are read from the serial port.
This method is used by the SerialReaderThread
:type line: String
:param line: Line read from the serial port
"""
self.__queue.put_nowait(line)
def __run(self):
"""
Overloaded method that contains the instructions to run
when the thread is started
"""
self.__start_analyze = True
while self.__start_analyze:
while not self.__queue.empty():
try:
line = self.__queue.get_nowait()
if len(line) > 0:
self.__analyse_line(line.rstrip('\r\n'))
except Empty:
pass
time.sleep(1)
def __analyse_line(self, line):
"""
Sub method to analyse every line read by the SerialReaderThread
and store them if they match one of the trigger message
:type line: String
:param line: Line read from the serial port
"""
# For each line to analyze
# for line in lines:
# Check all messages to be triggered
self.__lock_message_triggered.acquire()
for trig_message in self.__messages_to_trigger:
if line.find(trig_message) != -1:
# Message received, store log line
self.__messages_to_trigger[trig_message].append(line)
self.__lock_message_triggered.release()
# Check message to be received
self.__lock_message_received.acquire()
if self.__message_to_receive is not None:
if line.find(self.__message_to_receive) != -1:
self.__message_received.append(line)
self.__is_message_received = True
self.__lock_message_received.release()
def add_trigger_messages(self, messages):
""" Trigger a list of messages
:type messages: Array
:param messages: messages to be triggered
"""
for message in messages:
self.add_trigger_message(message)
def add_trigger_message(self, message):
""" Trigger a message
:type message: string
:param message: message to be triggered
"""
self.__lock_message_triggered.acquire()
self.__messages_to_trigger[message] = list()
self.__lock_message_triggered.release()
def remove_trigger_message(self, message):
""" Remove a triggered message
:type message: string
:param message: message to be removed
"""
if message in self.__messages_to_trigger:
self.__lock_message_triggered.acquire()
del self.__messages_to_trigger[message]
self.__lock_message_triggered.release()
def is_message_received(self, message, timeout):
""" Check if a message is received
:type message: string
:param message: message that we look for
:type timeout: int
:param timeout: time limit where we expect to receive the message
:return: Array of message received, empty array if nothing
:rtype: list
"""
self.__lock_message_received.acquire()
self.__is_message_received = False
self.__message_received = list()
self.__message_to_receive = message
self.__lock_message_received.release()
time_count = 0
while (not self.__is_message_received) and (time_count <= timeout):
time.sleep(1)
time_count += 1
self.__is_message_received = False
self.__message_to_receive = None
return self.__message_received
def get_message_triggered_status(self, message):
""" Get the status of a message triggered
:type message: string
:param message: message triggered
:rtype: list of string
:return: Array of message received, empty array if nothing
"""
if message in self.__messages_to_trigger:
return self.__messages_to_trigger[message]
else:
return None
def reset_trigger_message(self, message):
""" Reset triggered message
:type message: string
:param message: message to be reseted
"""
if message in self.__messages_to_trigger:
self.__lock_message_received.acquire()
self.__messages_to_trigger[message] = list()
self.__lock_message_received.release()
| StarcoderdataPython |
3519641 | <filename>boxio.py<gh_stars>0
import functools
import itertools
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
from boxsdk import BoxAPIException
from boxsdk.object import folder as boxfolder
import boxapi
import fileio
logger = logging.getLogger(__file__)
def _get_project_names_from_dmux_folder(folder: Path):
""" Infers the project names in a sequencing run from the folder names after dmuxing."""
folders = [i.name for i in folder.iterdir() if i.is_dir()]
logging.debug(str(folders))
logging.debug(str(folder.iterdir()))
project_names = [i for i in folders if i not in {'Reports', 'Stats'}]
return project_names
@functools.lru_cache(maxsize = None)
def get_project_files_on_box(project_name: str) -> List:
project_folder = get_box_folder(boxapi.FOLDER, project_name)
containers = project_folder.item_collection['entries']
containers = [i.get(fields = None, etag = None) for i in containers]
project_files = list()
for container in containers:
container_samples: List[Any] = [i.get(fields = None, etag = None) for i in container.item_collection['entries']]
container_files: List[List[Any]] = [sample.item_collection['entries'] for sample in container_samples if hasattr(sample, 'item_collection')]
project_files += list(itertools.chain.from_iterable(container_files))
# pprint(project_files)
# project_files = list(itertools.chain.from_iterable(project_files))
return project_files
def file_exists(project_name: str, file_name: Union[str, Path]) -> bool:
if isinstance(file_name, Path):
file_name = file_name.name
project_files = get_project_files_on_box(project_name)
project_filenames = [i.name for i in project_files]
return file_name in project_filenames
def item_in_folder(folder: boxfolder, item_name: str) -> bool:
folder_items = folder.item_collection['entries']
folder_item_names = [i.name for i in folder_items]
return item_name in folder_item_names
def get_box_folder(parent_folder: boxfolder, item_name: str):
""" Attempts to find an existing project folder on box.com that is in the parent_folder.
If no folder is found, create one.
"""
existing_items = parent_folder.item_collection['entries']
for existing_item in existing_items:
if existing_item.name == item_name:
subfolder = existing_item
break
else:
# Could not locate the folder on box.com. create one.
subfolder = parent_folder.create_subfolder(item_name)
# Retrieve the folder properties.
subfolder = subfolder.get(fields = None, etag = None)
return subfolder
def upload_project_to_box(project_folder: Path, container_id: str, project_name: Optional[str] = None) -> str:
"""
Uploads all files for a project to box.com and returns a sharable link to the project folder.
Parameters
----------
project_folder
container_id
project_name
Returns
-------
"""
if project_name is None:
project_name = project_folder.name
# Create a folder for the selected project
project_box_folder = get_box_folder(boxapi.FOLDER, project_name)
# Create a subfolder for the current sequencing run.
container_folder = get_box_folder(project_box_folder, container_id)
# Upload the file checksums.
checksum_filename = project_folder / "checksums.tsv"
if checksum_filename.exists() and not file_exists(project_name, 'checksums.tsv'):
try:
container_folder.upload(str(checksum_filename))
except:
pass
# Upload sample folders
sample_folders = fileio.get_project_samples(project_folder)
for sample_folder in sample_folders:
print("\t Uploading ", sample_folder.name)
# Need to create a subfolder for each sample.
sample_box_folder = get_box_folder(container_folder, sample_folder.name)
upload_project_samples_to_box(sample_folder, sample_box_folder)
return project_box_folder.get_shared_link()
def upload_project_samples_to_box(samples: fileio.Sample, sample_box_folder: Any):
existing_files = [i.name for i in sample_box_folder.item_collection['entries']]
for filename in samples:
if filename.name in existing_files: continue
try:
if filename.stat().st_size > 50E6:
# The chunked API raises an error if the filesize is less than 20MB.
chunked_uploader = sample_box_folder.get_chunked_uploader(str(filename))
uploaded_file = chunked_uploader.start()
else:
uploaded_file = sample_box_folder.upload(str(filename))
logger.info(f"Uploaded {filename}\t{uploaded_file}")
except BoxAPIException:
logger.error(f"Could not upload {filename}")
if __name__ == "__main__":
pass
| StarcoderdataPython |
49575 | import unittest
from rdbtools3.intset import unpack_intset
from rdbtools3.exceptions import RDBValueError
class TestIntset(unittest.TestCase):
def test_3x2bytes(self):
val = (b'\x02\x00\x00\x00' # int size
b'\x03\x00\x00\x00' # set length
b'\x01\x00' # item 1
b'\x02\x00' # item 2
b'\x00\x01') # item 3
ret = list(unpack_intset(val))
self.assertEqual([
1, 2, 256
], ret)
def test_2x4bytes(self):
val = (b'\x04\x00\x00\x00'
b'\x02\x00\x00\x00'
b'\x01\x00\x00\x00'
b'\x00\x00\x00\x80')
ret = list(unpack_intset(val))
self.assertEqual([
1, 2**31
], ret)
def test_2x8bytes(self):
val = (b'\x08\x00\x00\x00'
b'\x02\x00\x00\x00'
b'\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x80')
ret = list(unpack_intset(val))
self.assertEqual([
1, 2**63
], ret)
def test_bad_length(self):
val = (b'\x02\x00\x00\x00'
b'\x01\x00\x00\x00'
b'\x01\x00'
b'\x02\x00\x00')
test = getattr(self, 'assertRaisesRegex',
getattr(self, 'assertRaisesRegexp'))
with test(RDBValueError, "Bad content size 5 \(expected 2\)"):
list(unpack_intset(val))
def test_bad_size_encoding(self):
val = (b'\x03\x00\x00\x00'
b'\x01\x00\x00\x00'
b'\x00\x00\x00')
test = getattr(self, 'assertRaisesRegex',
getattr(self, 'assertRaisesRegexp'))
with test(RDBValueError, "Unexpected size encoding 0x3"):
list(unpack_intset(val))
def test_zero_len(self):
val = (b'\x02\x00\x00\x00'
b'\x00\x00\x00\x00')
ret = list(unpack_intset(val))
self.assertEqual([], ret)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
9633749 | <reponame>PartiallyTyped/Hyperactive<gh_stars>100-1000
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import cross_val_score
from rgf.sklearn import RGFClassifier
from hyperactive import Hyperactive
data = load_breast_cancer()
X, y = data.data, data.target
def model(opt):
rgf = RGFClassifier(
max_leaf=opt["max_leaf"],
reg_depth=opt["reg_depth"],
min_samples_leaf=opt["min_samples_leaf"],
algorithm="RGF_Sib",
test_interval=100,
verbose=False,
)
scores = cross_val_score(rgf, X, y, cv=3)
return scores.mean()
search_space = {
"max_leaf": list(range(10, 2000, 10)),
"reg_depth": list(range(1, 21)),
"min_samples_leaf": list(range(1, 21)),
}
hyper = Hyperactive()
hyper.add_search(model, search_space, n_iter=10)
hyper.run()
| StarcoderdataPython |
3276557 | <reponame>satra/nibabel
""" Testing reading DICOM files
"""
import numpy as np
from .. import dicomreaders as didr
from .test_dicomwrappers import (dicom_test,
EXPECTED_AFFINE,
EXPECTED_PARAMS,
IO_DATA_PATH,
DATA)
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
@dicom_test
def test_read_dwi():
img = didr.mosaic_to_nii(DATA)
arr = img.get_data()
assert_equal(arr.shape, (128,128,48))
assert_array_almost_equal(img.get_affine(), EXPECTED_AFFINE)
@dicom_test
def test_read_dwis():
data, aff, bs, gs = didr.read_mosaic_dwi_dir(IO_DATA_PATH, '*.dcm.gz')
assert_equal(data.ndim, 4)
assert_array_almost_equal(aff, EXPECTED_AFFINE)
assert_array_almost_equal(bs, (0, EXPECTED_PARAMS[0]))
assert_array_almost_equal(gs,
(np.zeros((3,)) + np.nan,
EXPECTED_PARAMS[1]))
assert_raises(IOError, didr.read_mosaic_dwi_dir, 'improbable')
| StarcoderdataPython |
9607325 | <reponame>mpetyx/pyrif<gh_stars>0
from rdflib import Graph, RDF, URIRef
from FuXi.Syntax.InfixOWL import OWL_NS, Class
# local source:
# galenGraph = Graph().parse(
# os.path.join(os.path.dirname(__file__), 'GALEN-CABG-Segment.owl'))
# remote source:
galenGraph = Graph().parse(
location="http://python-dlp.googlecode.com/" + \
"svn/trunk/InfixOWL/GALEN-CABG-Segment.owl",
format="xml")
graph = galenGraph
with open('GALEN-CABG-Segment.asc', 'w') as fp:
for c in graph.subjects(predicate=RDF.type, object=OWL_NS.Class):
if isinstance(c, URIRef):
fp.write(Class(c, graph=graph).__repr__(True) + "\n\n")
print("Done")
| StarcoderdataPython |
398143 | #!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import cupy
from numba import cuda
@cuda.jit
def compute_norms(data, norms):
"""Compute norms
Args:
data (matrix): matrix with data and samples in rows
norms (matrix): matrix for norms
"""
i = cuda.grid(1)
norms[i] = len(data[i])
for j in range(len(data[i])):
if data[i][j] != 0:
value = j + 1
data[i][j] = value
norms[i] = norms[i] + (value ** 2)
if norms[i] != 0:
norms[i] = math.sqrt(norms[i])
@cuda.jit
def compute_tanimoto_similarity_matrix(data, norms, dist_array):
"""Numba kernel to calculate tanimoto similarity according to the wikipedia definition
Args:
data (matrix): data with samples in rows
norms (matrix): matrix with samples in rows
dist_array (matrix): square matrix to hold pairwise distance
"""
x = cuda.grid(1)
rows = len(data)
i = x // rows
j = x % rows
if i == j:
dist_array[i][j] = 1.0
return
a = data[i]
b = data[j]
prod = 0
for k in range(len(a)):
prod = prod + (a[k] * b[k])
a_norm = norms[i]
b_norm = norms[j]
dist_array[i][j] = (prod / ((a_norm ** 2 + b_norm ** 2) - prod))
@cuda.jit
def compute_rdkit_tanimoto_similarity_matrix(data, dist_array):
"""Numba kernel to calculate tanimoto similarity according to the RDKit definition
Args:
data (matrix): data with samples in rows
dist_array (matrix): square matrix to hold pairwise distance
"""
x = cuda.grid(1)
rows = len(data)
i = x // rows
j = x % rows
if i == j:
dist_array[i][j] = 1.0
return
a = data[i]
b = data[j]
intersections = 0
total = 0
for k in range(len(a)):
if a[k] and b[k]:
intersections += 1
total += 2
elif a[k] or b[k]:
total += 1
dist_array[i][j] = intersections / float(total - intersections)
def tanimoto_calculate(fp, calc_distance=False):
"""Calculate tanimoto similarity or distance
Args:
fp (cupy array or cudf dataframe): fingerprints with samples in rows
calc_distance (bool, optional): Calculate distance metric. Defaults to False.
Returns:
array: pairwise tanimoto distance
"""
dist_array = cupy.zeros((fp.shape[0], fp.shape[0]), cupy.float32)
compute_rdkit_tanimoto_similarity_matrix.forall(fp.shape[0] * fp.shape[0], 1)(fp, dist_array)
if calc_distance:
dist_array = 1.0 - dist_array
return dist_array
| StarcoderdataPython |
3232103 | """
Copyright (c) 2020 Cisco Systems Inc or its affiliates.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------
Name: fmc.py
Purpose: This is contains FMC class having REST methods
"""
import time
import requests
import logging
import json
import utility as utl
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Setup Logging
logger = utl.setup_logging(utl.e_var['DebugDisable'])
class FirepowerManagementCenter:
def __init__(self):
self.server = 'https://' + utl.e_var['FmcIp']
self.username = utl.e_var['FmcUserName']
self.password = utl.e_var['FmcPassword']
self.headers = []
self.domain_uuid = ""
self.authTokenTimestamp = 0
self.authTokenMaxAge = 15*60 # seconds - 30 minutes is the max without using refresh
self.accessPolicyName = utl.j_var['AccessPolicyName']
def rest_get(self, url):
"""
Purpose: Issue REST get to the specified URL
Parameters: url
Returns: r.text is the text response (r.json() is a python dict version of the json response)
r.status_code = 2xx on success
Raises:
"""
# if the token is too old then get another
if time.time() > self.authTokenMaxAge + self.authTokenTimestamp:
logging.debug("Getting a new authToken")
self.get_auth_token()
try:
# REST call with SSL verification turned off:
logging.debug("Request: " + url)
r = requests.get(url, headers=self.headers, verify=False)
# REST call with SSL verification turned on:
# r = requests.get(url, headers=headers, verify='/path/to/ssl_certificate')
status_code = r.status_code
resp = r.text
logging.debug("Response status_code: " + str(status_code))
logging.debug("Response body: " + str(resp))
if 200 <= status_code <= 300:
# logging.debug("GET successful. Response data --> ")
# json_resp = json.loads(resp)
# logging.debug(json.dumps(json_resp,sort_keys=True,indent=4, separators=(',', ': ')))
pass
else:
r.raise_for_status()
raise Exception("Error occurred in Get -->"+resp)
except requests.exceptions.HTTPError as err:
raise Exception("Error in connection --> "+str(err))
finally:
if r: r.close()
return r
def rest_post(self, url, post_data):
"""
Purpose: Issue REST post to the specified url with the post_data provided
Parameters: url, post data
Returns: This function will return 'r' which is the response from the post:
r.text is the text response (r.json() is a python dict version of the json response)
r.status_code = 2xx on success
Raises: Error occurred in post
"""
if time.time() > self.authTokenMaxAge + self.authTokenTimestamp:
logging.debug("Getting a new authToken")
self.get_auth_token()
try:
# REST call with SSL verification turned off:
logging.debug("Request: " + url)
logging.debug("Post_data " + str(post_data))
r = requests.post(url, data=json.dumps(post_data), headers=self.headers, verify=False)
# REST call with SSL verification turned on:
# r = requests.post(url,data=json.dumps(post_data), headers=self.headers, verify='/path/to/ssl_certificate')
status_code = r.status_code
resp = r.text
logging.info("Response status_code: " + str(status_code))
logging.info("Response body: " + str(resp))
# logging.debug("Status code is: "+str(status_code))
if 201 <= status_code <= 202:
# json_resp = json.loads(resp)
# logging.debug(json.dumps(json_resp,sort_keys=True,indent=4, separators=(',', ': ')))
pass
else:
r.raise_for_status()
raise Exception("Error occurred in POST --> "+resp)
except requests.exceptions.HTTPError as err:
raise Exception("Error in connection --> "+str(err))
finally:
if r: r.close()
return r
def rest_put(self, url, put_data):
"""
Purpose: Issue REST put to specific url with the put_data provided
Parameters: url, put data
Returns: This function will return 'r' which is the response from the put:
r.text is the text response (r.json() is a python dict version of the json response)
r.status_code = 2xx on success
Raises:
"""
if time.time() > self.authTokenMaxAge + self.authTokenTimestamp:
logging.debug("Getting a new authToken")
self.get_auth_token()
try:
# REST call with SSL verification turned off:
logging.info("Request: " + url)
logging.info("Put_data: " + str(put_data))
r = requests.put(url, data=json.dumps(put_data), headers=self.headers, verify=False)
# REST call with SSL verification turned on:
# r = requests.put(url, data=json.dumps(put_data), headers=headers, verify='/path/to/ssl_certificate')
status_code = r.status_code
resp = r.text
logging.info("Response status_code: " + str(status_code))
logging.info("Response body: " + str(resp))
if status_code == 200:
pass
else:
r.raise_for_status()
raise Exception("Error occurred in put -->" + resp)
except requests.exceptions.HTTPError as err:
raise Exception("Error in connection --> "+str(err))
finally:
if r: r.close()
return r
def rest_delete(self, url):
"""
Purpose: Issue REST delete to the specified URL
Parameters: url
Returns: This function will return 'r' which is the response to the request:
r.text is the text response (r.json() is a python dict version of the json response)
r.status_code = 2xx on success
Raises:
"""
if time.time() > self.authTokenMaxAge + self.authTokenTimestamp:
logging.debug("Getting a new authToken")
self.get_auth_token()
try:
# REST call with SSL verification turned off:
logging.debug("Request: " + url)
r = requests.delete(url, headers=self.headers, verify=False)
# REST call with SSL verification turned on:
# r = requests.delete(url, headers=headers, verify='/path/to/ssl_certificate')
status_code = r.status_code
resp = r.text
logging.info("Response status_code: " + str(status_code))
logging.info("Response body: " + str(resp))
if 200 <= status_code <= 300:
# logging.debug("GET successful. Response data --> ")
# json_resp = json.loads(resp)
# logging.debug(json.dumps(json_resp,sort_keys=True,indent=4, separators=(',', ': ')))
pass
else:
r.raise_for_status()
raise Exception("Error occurred in Delete -->"+resp)
except requests.exceptions.HTTPError as err:
raise Exception("Error in connection --> "+str(err))
finally:
if r: r.close()
return r
def get_auth_token(self):
"""
Purpose: get a new REST authentication token
update the 'headers' variable
set a timestamp for the header (tokens expire)
Parameters:
Returns:
Raises:
"""
self.headers = {'Content-Type': 'application/json'}
api_auth_path = "/api/fmc_platform/v1/auth/generatetoken"
auth_url = self.server + api_auth_path
try:
# 2 ways of making a REST call are provided:
# One with "SSL verification turned off" and the other with "SSL verification turned on".
# The one with "SSL verification turned off" is commented out. If you like to use that then
# uncomment the line where verify=False and comment the line with =verify='/path/to/ssl_certificate'
# REST call with SSL verification turned off:
r = requests.post(auth_url, headers=self.headers,
auth=requests.auth.HTTPBasicAuth(self.username, self.password), verify=False)
# REST call with SSL verification turned on: Download SSL certificates
# from your FMC first and provide its path for verification.
# r = requests.post(auth_url, headers=self.headers,
# auth=requests.auth.HTTPBasicAuth(username,password), verify='/path/to/ssl_certificate')
auth_headers = r.headers
auth_token = auth_headers.get('X-auth-access-token', default=None)
self.domain_uuid = auth_headers.get('domain_uuid', default=None)
self.headers['X-auth-access-token'] = auth_token
self.authTokenTimestamp = int(time.time())
# logging.debug("Acquired AuthToken: " + auth_token)
# logging.debug("domain_uuid: " + domain_uuid)
if auth_token is None:
logging.debug("auth_token not found. Exiting...")
# raise Exception("Error occurred in get auth token ")
except Exception as err:
logger.error("Error in generating auth token --> " + str(err))
return
def get_device_grp_id_by_name(self, name):
"""
Purpose: To get device group id by passing name of the group
Parameters: Name of device group
Returns: Group Id or None
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devicegroups/devicegrouprecords"
url = self.server + api_path + '?offset=0&limit=9000'
r = self.rest_get(url)
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == name:
return str(item['id'])
return None
def get_security_objectid_by_name(self, name):
"""
Purpose: Get Zone ID from it's name
Parameters: Zone Name
Returns: Zone ID, None
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/object/securityzones"
url = self.server + api_path + '?offset=0&limit=9000'
r = self.rest_get(url)
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == name:
return str(item['id'])
return None
# Get network objects (all network and host objects)
def get_network_objectid_by_name(self, name):
"""
Purpose: Get Network object Id by its name
Parameters: Object Name
Returns: Object Id
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/object/networkaddresses"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
for item in r.json()['items']:
if item['type'] == 'Network' and item['name'] == name:
return str(item['id'])
# raise Exception('network object with name ' + name + ' was not found')
return ''
def get_port_objectid_by_name(self, name):
"""
Purpose: Get Port object Id by its name
Parameters: Object Name
Returns: Object Id
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/object/protocolportobjects"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
for item in r.json()['items']:
if item['type'] == 'ProtocolPortObject' and item['name'] == name:
return str(item['id'])
# raise Exception('network port with name ' + name + ' was not found')
return ''
def get_host_objectid_by_name(self, name):
"""
Purpose: Get Host object Id by Name
Parameters: Object Name
Returns: Object Id
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/object/hosts"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
for item in r.json()['items']:
if item['type'] == 'Host' and item['name'] == name:
return str(item['id'])
# raise Exception('host object with name ' + name + ' was not found')
return ''
def get_device_id_by_name(self, name):
"""
Purpose: Get Device Id by its name
Parameters: Device Name
Returns: Device Id
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == name:
return str(item['id'])
# or return empty string
return ''
def get_access_policy_id_by_name(self, name):
"""
Purpose: Get Access Policy Id by its name
Parameters: Access policy name
Returns: Access Policy Id, None
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/policy/accesspolicies"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
# Search for policy by name
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == name:
return str(item['id'])
return None
def get_nic_id_by_name(self, device_name, nic_name):
"""
Purpose: Get Nic Id by device & nic name
Parameters: Device Name, Nic name
Returns: Nic Id, None
Raises:
"""
if nic_name != 'GigabitEthernet0/0' and nic_name != 'GigabitEthernet0/1':
logging.debug("warning - nic name must be GigabitEthernet0/0 or GigabitEthernet0/1. "
"The argument name was " + nic_name)
device_id = self.get_device_id_by_name(device_name)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/physicalinterfaces"
url = self.server + api_path
r = self.rest_get(url)
if 'items' in r.json():
for item in r.json()['items']:
if item['name'] == nic_name:
return str(item['id'])
return None
def get_time_stamp(self):
"""
Purpose: Get time stamp
Parameters:
Returns: Audit time stamp
Raises:
"""
api_path = "/api/fmc_platform/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/audit/auditrecords"
url = self.server + api_path
r = self.rest_get(url)
return r.json()['items'][0]['time']*1000
def get_deployable_devices(self):
"""
Purpose: Get list of deployable devices
Parameters:
Returns: List of devices, pending to be deployed
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/deployment/deployabledevices"
url = self.server + api_path
r = self.rest_get(url)
logging.debug("deployable devices:" + str(r.json()))
device_list = []
if 'items' in r.json():
for item in r.json()['items']:
if item['type'] == 'DeployableDevice':
device_list.append(item['name'])
return device_list
def get_nic_status(self, device_id, nic, nic_id, ifname, zone_id, ip=None):
"""
Purpose: To check whether Nic is configured or not configured
Parameters: Device Id, Nic, Nic Id, Interface Name, Zone Id, Ip
Returns: CONFIGURED, MIS-CONFIGURED, UN-CONFIGURED
Raises:
"""
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/physicalinterfaces/" + nic_id
url = self.server + api_path
r = self.rest_get(url)
flag1, flag2 = 0, 0
try:
if 'ipv4' in r.json():
item = dict.copy(r.json()['ipv4']['static'])
if item['address'] == ip:
flag1 = 1
except:
try:
if 'ipv4' in r.json():
item = dict.copy(r.json()['ipv4']['dhcp'])
flag1 = 1
except:
flag1 = 0
try:
if r.json()['name'] == nic:
if r.json()['ifname'] == ifname:
flag2 = 1
if r.json()['securityZone']['id'] != zone_id:
flag2 = 0
except:
flag2 = 0
if flag1 == 1 and flag2 == 1:
return "CONFIGURED"
elif (flag1 == 1 and flag2 == 0) or (flag1 == 0 and flag2 == 1):
logger.critical("Interface Mis-Configured! ")
return "UN-CONFIGURED"
def check_static_route(self, device, interface_name, network_name, host_object_name_gw):
"""
Purpose: Check if a static route exists on a device
Parameters: Device, Interface name, Network, Gateway
Returns: CONFIGURED, UN-CONFIGURED
Raises:
"""
ngfwid = self.get_device_id_by_name(device)
if ngfwid == '':
return "NO-DEVICE"
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + ngfwid + \
"/routing/ipv4staticroutes"
url = self.server + api_path + '?offset=0&limit=10000'
r = self.rest_get(url)
if 'items' in r.json():
for key1 in r.json()['items']:
id = key1['id']
url = self.server + api_path + '/' + id
r = self.rest_get(url)
if r.json()['interfaceName'] == interface_name:
for key2 in r.json()['selectedNetworks']:
if key2['name'] == network_name:
try:
element = dict.copy(r.json()['gateway']['object'])
if element['name'] == host_object_name_gw:
return "CONFIGURED"
except:
pass
try:
element = dict.copy(r.json()['gateway']['literal'])
if element['value'] == host_object_name_gw:
return "CONFIGURED"
except:
pass
return "UN-CONFIGURED"
def configure_nic_dhcp(self, device_name, nic, nic_name, zone, mtu):
"""
Purpose: Configure an Nic interface as DHCP
Parameters: Device Name, Nic, Nic name, Zone, MTU
Returns: REST put response
Raises:
"""
device_id = self.get_device_id_by_name(device_name)
nic_id = self.get_nic_id_by_name(device_name, nic)
zone_id = self.get_security_objectid_by_name(zone)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/physicalinterfaces/" + nic_id
url = self.server + api_path
put_data = {
"type": "PhysicalInterface",
"managementOnly": "false",
"MTU": int(mtu),
"ipv4": {
"dhcp": {
"enableDefaultRouteDHCP": "false",
"dhcpRouteMetric": 1
}
},
"securityZone": {
"id": zone_id,
"type": "SecurityZone"
},
"mode": "NONE",
"ifname": nic_name,
"enabled": "true",
"name": nic,
"id": nic_id
}
r = self.rest_put(url, put_data)
return r
def configure_nic_static(self, device_name, nic, nic_name, zone, mtu, ip, netmask):
"""
Purpose: Configure an Nic interface as Static
Parameters: Device Name, Nic, Nic name, Zone, IP, Netmask
Returns: REST put response
Raises:
"""
device_id = self.get_device_id_by_name(device_name)
nic_id = self.get_nic_id_by_name(device_name, nic)
zone_id = self.get_security_objectid_by_name(zone)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
device_id + "/physicalinterfaces/" + nic_id
url = self.server + api_path
put_data = {
"type": "PhysicalInterface",
"managementOnly": "false",
"MTU": mtu,
"ipv4": {
"static": {
"address": ip,
"netmask": netmask
}
},
"securityZone": {
"id": zone_id,
"type": "SecurityZone"
},
"mode": "NONE",
"ifname": nic_name,
"enabled": "true",
"name": nic,
"id": nic_id
}
r = self.rest_put(url, put_data)
return r
def create_static_network_route(self, device, interface_name, network_object_name, host_object_name_gw, metric):
"""
Purpose: To create static network route on device
Parameters: Device, Interface Name, Network, Gateway, Metric
Returns: REST response
Raises:
"""
ngfwid = self.get_device_id_by_name(device)
network_object_id = self.get_network_objectid_by_name(network_object_name)
host_object_id_gw = self.get_host_objectid_by_name(host_object_name_gw)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + ngfwid + \
"/routing/ipv4staticroutes"
url = self.server + api_path
if host_object_id_gw != '':
gate_way = {
"object": {
"type": "Host",
"id": host_object_id_gw,
"name": host_object_name_gw
}
}
else:
gate_way = {
"literal": {
"type": "Host",
"value": host_object_name_gw
}
}
post_data = {
"interfaceName": interface_name,
"selectedNetworks": [
{
"type": "Network",
"id": network_object_id,
"name": network_object_name
}
],
"gateway": gate_way,
"metricValue": metric,
"type": "IPv4StaticRoute",
"isTunneled": False
}
r = self.rest_post(url, post_data)
return r
def create_static_host_route(self, device, interface_name, host_object_name, host_object_name_gw, metric):
"""
Purpose: To create static host route on device
Parameters: Device, Interface Name, Host, Gateway, Metric
Returns: REST response
Raises:
"""
ngfwid = self.get_device_id_by_name(device)
host_object_id = self.get_host_objectid_by_name(host_object_name)
host_object_id_gw = self.get_host_objectid_by_name(host_object_name_gw)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/" + \
ngfwid + "/routing/ipv4staticroutes" # param
url = self.server + api_path
if host_object_id_gw != '':
gate_way = {
"object": {
"type": "Host",
"id": host_object_id_gw,
"name": host_object_name_gw
}
}
else:
gate_way = {
"literal": {
"type": "Host",
"value": host_object_name_gw
}
}
post_data = {
"interfaceName": interface_name,
"selectedNetworks": [
{
"type": "Host",
"id": host_object_id,
"name": host_object_name
}
],
"gateway": gate_way,
"metricValue": metric,
"type": "IPv4StaticRoute",
"isTunneled": False
}
r = self.rest_post(url, post_data)
return r
def register_device(self, name, mgmt_ip, policy_id, reg_id, nat_id, license_caps, device_grp_id):
"""
Purpose: Register the device to FMC
Parameters: Name of device, Mgmt ip, Access Policy Id, Registration & NAT id, Licenses Caps, Group Id
Returns: REST post response
Raises:
"""
logger.info("Registering: "+name)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords"
url = self.server + api_path
post_data = {
"name": name,
"hostName": mgmt_ip,
"regKey": reg_id,
"natID": nat_id,
"type": "Device",
"license_caps": license_caps,
"accessPolicy": {
"id": policy_id,
"type": "AccessPolicy"
},
"deviceGroup": {
"id": device_grp_id,
"type": "DeviceGroup"
}
}
r = self.rest_post(url, post_data)
return r
def deregister_device(self, name):
"""
Purpose: De-registers the device from FMC
Parameters: Device Name
Returns: REST delete response
Raises:
"""
logger.info("De-registering: " + name)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/devices/devicerecords/"
dev_id = self.get_device_id_by_name(name)
url = self.server + api_path + dev_id
r = self.rest_delete(url)
return r
def start_deployment(self, device_name):
"""
Purpose: Deploys policy changes on device
Parameters: Device name
Returns: Task Id
Raises:
"""
logger.info("Deploy called for: " + device_name)
device_list = self.get_deployable_devices()
logging.debug("Device List = " + str(device_list))
if device_name in device_list:
logging.debug("deploying on device: " + device_name)
api_path = "/api/fmc_config/v1/domain/e276abec-e0f2-11e3-8169-6d9ed49b625f/deployment/deploymentrequests"
url = self.server + api_path
post_data = {
"type": "DeploymentRequest",
"version": str(self.get_time_stamp()),
"forceDeploy": True,
"ignoreWarning": True,
"deviceList": [self.get_device_id_by_name(device_name)]
}
r = self.rest_post(url, post_data)
if 'type' in r.json():
if r.json()['type'] == 'DeploymentRequest':
return r.json()['metadata']['task']['id']
return ''
def register_ftdv(self, vm_name, mgmtip, reg_id, nat_id, license_caps, device_grp_id):
"""
Purpose: Register the device to FMC
Parameters: Device Name, Mgmgt Ip, Registration & NAT id, Licenses cap, grp id
Returns: Task id, None
Raises:
"""
try:
vm_policy_id = self.get_access_policy_id_by_name(self.accessPolicyName)
except Exception as e:
logger.warn("%s policy doesn't exist in FMC!" % self.accessPolicyName)
logger.debug(str(e))
return None
else:
if vm_policy_id is not None:
logger.info("Registering FTDv: " + vm_name + " to FMC with policy id: " + vm_policy_id)
r = self.register_device(vm_name, mgmtip, vm_policy_id, reg_id, nat_id, license_caps, device_grp_id)
logger.debug("Register response was: " + str(r.json()))
if 'type' in r.json():
if r.json()['type'] == 'Device':
logger.info("NGWFv: " + vm_name + " registration started and task ID is: " +
r.json()['metadata']['task']['id'])
return r.json()['metadata']['task']['id']
else:
logger.warn("%s policy doesn't exist in FMC" % self.accessPolicyName)
return None
def check_reg_status_from_fmc(self, vm_name):
"""
Purpose: Checks if device is registered to FMC
Parameters: Device Name
Returns: SUCCESS, FAILED
Raises:
"""
try:
device_id = self.get_device_id_by_name(vm_name)
except Exception as e:
logger.debug(str(e))
else:
if device_id != '':
return "SUCCESS"
else:
return "FAILED"
def check_deploy_status(self, vm_name):
"""
Purpose: Checks if any deployment pending for device
Parameters: Device name
Returns: DEPLOYED, NOT-DEPLOYED
Raises:
"""
r = self.get_deployable_devices()
for device in r:
if device == vm_name:
logger.debug("Policies not deployed on " + vm_name)
return "NOT-DEPLOYED"
logger.debug("Policies deployed on " + vm_name)
return "DEPLOYED"
def check_object_fmc(self, obj_name):
"""
Purpose: Checks for Object inn FMC
Parameters: Object name
Returns: Object Id
Raises:
"""
obj_id = self.get_network_objectid_by_name(obj_name)
if obj_id == '':
obj_id = self.get_host_objectid_by_name(obj_name)
if obj_id == '':
obj_id = self.get_port_objectid_by_name(obj_name)
if obj_id == '':
logger.error("Unable to find object %s" % obj_name)
return ''
return obj_id
| StarcoderdataPython |
11273263 | from datastore.reader.flask_frontend.routes import Route
from datastore.shared.flask_frontend import ERROR_CODES
from tests import assert_error_response
from tests.reader.system.util import setup_data
from tests.util import assert_success_response
data = {
"a/1": {
"fqid": "a/1",
"field_1": "data",
"field_2": 42,
"field_3": [1, 2, 3],
"meta_position": 1,
},
"a/2": {
"fqid": "a/2",
"field_1": "test",
"field_2": 42,
"field_3": [1, 2, 3],
"meta_position": 2,
},
"b/1": {
"fqid": "b/1",
"field_4": "data",
"field_5": 42,
"field_6": [1, 2, 3],
"meta_position": 3,
},
}
def test_0(json_client, db_connection, db_cur):
setup_data(db_connection, db_cur, data)
response = json_client.post(
Route.COUNT.URL,
{
"collection": "a",
"filter": {"field": "field_1", "operator": "=", "value": "invalid"},
},
)
assert_success_response(response)
assert response.json == {
"count": 0,
"position": 3,
}
def test_1(json_client, db_connection, db_cur):
setup_data(db_connection, db_cur, data)
response = json_client.post(
Route.COUNT.URL,
{
"collection": "a",
"filter": {"field": "field_1", "operator": "=", "value": "data"},
},
)
assert_success_response(response)
assert response.json == {
"count": 1,
"position": 3,
}
def test_2(json_client, db_connection, db_cur):
setup_data(db_connection, db_cur, data)
response = json_client.post(
Route.COUNT.URL,
{
"collection": "a",
"filter": {"field": "field_2", "operator": "=", "value": 42},
},
)
assert_success_response(response)
assert response.json == {
"count": 2,
"position": 3,
}
def test_invalid_collection(json_client):
response = json_client.post(
Route.COUNT.URL,
{
"collection": "not valid",
"filter": {"field": "field", "operator": "=", "value": "data"},
},
)
assert_error_response(response, ERROR_CODES.INVALID_FORMAT)
def test_invalid_field(json_client):
response = json_client.post(
Route.COUNT.URL,
{
"collection": "a",
"filter": {"field": "not valid", "operator": "=", "value": "data"},
},
)
assert_error_response(response, ERROR_CODES.INVALID_FORMAT)
def test_invalid_operator(json_client):
response = json_client.post(
Route.COUNT.URL,
{
"collection": "a",
"filter": {"field": "field", "operator": "invalid", "value": "data"},
},
)
assert_error_response(response, ERROR_CODES.INVALID_REQUEST)
| StarcoderdataPython |
1989292 | import random
print('-=-' * 40)
print('Vou pensar em um número de 0 a 1, tente adivinhar em que número eu pensei.')
print('-=-' * 40)
r = random.randint(0,10)
num = 11
cont = 0
while num != r:
num = int(input('Qual seu palpite? '))
if num > r:
print('Menos... Tente novamente!')
if num < r:
print('Mais... Tente novamente')
cont += 1
if num == r:
print('Parabéns você acertou na {}ª tentativa'.format(cont))
| StarcoderdataPython |
1782483 | # -*- coding: utf-8 -*-
"""
Created on Mon May 11 12:17:33 2020
@author: jatin
"""
import os
import cv2
#File Paths
filedir = os.path.dirname(os.path.realpath(__file__))
#Get User Name
name = input("Enter Name: ")
#Create directory
directory = os.path.join(filedir, "dataset", name)
if not os.path.exists(directory):
os.makedirs(directory)
print("Directory _images\{} Successfully Created".format(name))
else:
print("Directory Already Exists. Continuing With Capture")
#Capture Images
print("Starting Webcam...")
capture = cv2.VideoCapture(0)
image_counter = 1
while True:
_, frame = capture.read()
cv2.imshow('imagasde', frame)
k = cv2.waitKey(100) & 0xff
if k == 27:
# ESC pressed
print("Escape hit. Closing Webcam...")
break
elif k == 32:
# SPACE pressed
print("writing file")
image_name = "opencv_frame_{}.png".format(image_counter)
cv2.imwrite(os.path.join(directory, image_name), frame)
print("{} written!".format(image_name))
image_counter += 1
capture.release()
cv2.destroyAllWindows()
| StarcoderdataPython |
1743203 | from typing import Optional
import arrow
from hypothesis import given, infer, settings
from hypothesis.provisional import urls
from hypothesis.strategies import builds, fixed_dictionaries, lists, none, text
from pydantic import HttpUrl, ValidationError
from rsserpent.models.rss import Category, Enclosure, Feed, Guid, Image, Item, Source
from tests.conftest import Times
class TestCategory:
"""Test the `Category` class."""
@settings(max_examples=Times.SOME)
@given(builds(Category, url=urls() | none()))
def test(self, category: Category) -> None:
"""Test if the `Category` class works properly."""
assert category is not None
class TestEnclosure:
"""Test the `Enclosure` class."""
@settings(max_examples=Times.ONCE)
@given(builds(Enclosure, url=urls()))
def test(self, enclosure: Enclosure) -> None:
"""Test if the `Enclosure` class works properly."""
assert enclosure is not None
@settings(max_examples=Times.SOME)
@given(type_=infer, url=urls())
def test_default_length(self, type_: str, url: str) -> None:
"""Test the default value of `length` in the `Enclosure` class."""
assert Enclosure(type=type_, url=url).length == 0
class TestGUID:
"""Test the `GUID` class."""
@settings(max_examples=Times.ONCE)
@given(builds(Guid))
def test(self, guid: Guid) -> None:
"""Test if the `GUID` class works properly."""
assert guid is not None
@settings(max_examples=Times.SOME)
@given(value=infer)
def test_default_is_perma_link(self, value: str) -> None:
"""Test the default value of `is_perma_link` in the `GUID` class."""
assert Guid(value=value).is_perma_link is True
class TestImage:
"""Test the `Image` class."""
@settings(max_examples=Times.ONCE)
@given(builds(Image, url=urls(), link=urls()))
def test(self, image: Image) -> None:
"""Test if the `Image` class works properly."""
assert image is not None
@settings(max_examples=Times.SOME)
@given(url=urls(), title=infer, link=urls(), description=infer)
def test_default_width_and_height(
self, url: str, title: str, link: str, description: Optional[str]
) -> None:
"""Test the default value of `width` & `height` in the `Image` class."""
image = Image(url=url, title=title, link=link, description=description)
assert image.width == 88
assert image.height == 31
class TestSource:
"""Test the `Source` class."""
@settings(max_examples=Times.SOME)
@given(builds(Source, url=urls() | none()))
def test(self, source: Source) -> None:
"""Test if the `Source` class works properly."""
assert source is not None
assert isinstance(source.url, HttpUrl) or (source.url is None)
class TestItem:
"""Test the `Item` class."""
@settings(max_examples=Times.SOME)
@given(
builds(
Item,
title=text(),
link=urls() | none(),
description=infer,
author=infer,
categories=lists(fixed_dictionaries({"name": text()})) | none(),
comments=urls() | none(),
enclosure=builds(Enclosure, url=urls()) | none(),
guid=builds(Guid) | none(),
pub_date=builds(arrow.utcnow) | none(),
source=builds(Source, url=urls() | none()) | none(),
)
)
def test(self, item: Item) -> None:
"""Test if the `Item` class works properly."""
assert item is not None
@settings(max_examples=Times.THOROUGH)
@given(title=infer, description=infer)
def test_validation(self, title: Optional[str], description: Optional[str]) -> None:
"""Test if the `@root_validator` of `Item` class works properly."""
try:
Item(title=title, description=description)
except ValidationError:
assert title is None and description is None
class TestFeed:
"""Test the `Feed` class."""
@settings(max_examples=Times.SOME)
@given(
builds(
Feed,
title=infer,
link=urls(),
description=infer,
language=infer,
copyright=infer,
managing_editor=infer,
web_master=infer,
pub_date=builds(arrow.utcnow) | none(),
last_build_date=builds(arrow.utcnow) | none(),
categories=lists(fixed_dictionaries({"name": text()})) | none(),
generator=infer,
docs=urls() | none(),
ttl=infer,
image=builds(Image, url=urls(), link=urls()) | none(),
items=lists(fixed_dictionaries({"title": text()})) | none(),
)
)
def test(self, feed: Feed) -> None:
"""Test if the `Feed` lass works properly."""
assert feed is not None
@settings(max_examples=Times.SOME)
@given(title=infer, link=urls(), description=infer)
def test_default_values(self, title: str, link: str, description: str) -> None:
"""Test the default value of `ttl`in the `Feed` class."""
assert Feed(title=title, link=link, description=description).ttl == 60
| StarcoderdataPython |
11379791 | from unittest.mock import MagicMock, patch
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import Signal
from django.test import TestCase, override_settings
from pynotify.helpers import (DeletedRelatedObject, SecureRelatedObject, autoload, get_from_context, get_import_path,
process_task, register, signal_map)
from .test_app.signals import autoload_signal
# MOCK OBJECTS ------------------------------------------------------------------------------------
test_signal = Signal(providing_args=['abc'])
class MockHandler1(MagicMock):
signal_kwargs = None
def __init__(self):
MockHandler1.signal_kwargs = None
def handle(self, signal_kwargs):
MockHandler1.signal_kwargs = signal_kwargs
class MockHandler2(MagicMock):
pass
class MockSerializer(MagicMock):
deserialize = MagicMock(side_effect=lambda value: value)
class MockRelatedObject:
a = 123
b = 456
def say_hello(self):
return 'Hello!'
def __str__(self):
return 'Related object'
# TESTS -------------------------------------------------------------------------------------------
class HelpersTestCase(TestCase):
def tearDown(self):
signal_map.remove(test_signal)
def test_signal_map_should_return_empty_list_for_unknown_signal(self):
self.assertEqual(signal_map.get(test_signal), [])
def test_signal_map_should_return_list_values_for_known_signal(self):
signal_map.add(test_signal, MockHandler1)
self.assertEqual(signal_map.get(test_signal), [MockHandler1])
signal_map.add(test_signal, MockHandler2)
self.assertEqual(signal_map.get(test_signal), [MockHandler1, MockHandler2])
def test_register_should_add_handler_class_and_allowed_senders_to_signal_map(self):
register(test_signal, MockHandler1, self.__class__)
self.assertEqual(signal_map.get(test_signal), [(MockHandler1, self.__class__)])
def test_register_should_connect_receive_function_to_the_signal(self):
register(test_signal, MockHandler1)
# not very clever test, but patching here is problematic
self.assertEqual(test_signal.receivers[0][0][0], 'pynotify')
@override_settings(PYNOTIFY_RECEIVER='abc123')
def test_receive_should_fail_if_receiver_is_misconfigured(self):
register(test_signal, MockHandler1)
with self.assertRaises(ImproperlyConfigured):
test_signal.send(sender='abc', abc=123)
@override_settings(PYNOTIFY_RECEIVER='pynotify.receivers.SynchronousReceiver')
def test_receive_should_pass_signal_kwargs_to_handler_through_receiver(self):
register(test_signal, MockHandler1)
test_signal.send(sender='abc', abc=123)
self.assertEqual(MockHandler1.signal_kwargs, {'abc': 123})
@override_settings(PYNOTIFY_ENABLED=False)
def test_receive_should_not_call_handler_if_pynotify_not_enabled(self):
MockHandler1.signal_kwargs = 'constant'
register(test_signal, MockHandler1)
test_signal.send(sender='abc', abc=123)
self.assertEqual(MockHandler1.signal_kwargs, 'constant')
@override_settings(PYNOTIFY_RECEIVER='pynotify.receivers.SynchronousReceiver')
def test_receive_should_not_call_handler_if_disallowed_sender_sent_the_signal(self):
MockHandler1.signal_kwargs = 'constant'
register(test_signal, MockHandler1, allowed_senders='abc')
test_signal.send(sender='def', abc=123)
self.assertEqual(MockHandler1.signal_kwargs, 'constant')
def test_process_task_should_call_handler(self):
process_task(
handler_class=get_import_path(MockHandler1),
serializer_class=get_import_path(MockSerializer),
signal_kwargs={'abc': 1234}
)
MockSerializer.deserialize.assert_called_once_with({'abc': 1234})
self.assertEqual(MockHandler1.signal_kwargs, {'abc': 1234})
def test_get_import_path_should_return_import_path_of_the_class(self):
self.assertEqual(get_import_path(MockHandler1), 'example.tests.test_helpers.MockHandler1')
@override_settings(PYNOTIFY_AUTOLOAD_MODULES=('example.tests.test_app.handlers',))
def test_handlers_should_be_autoloaded_from_specified_apps(self):
self.assertEqual(signal_map.get(autoload_signal), [])
autoload()
handler, _ = signal_map.get(autoload_signal)[0]
from .test_app.handlers import AutoloadHandler
self.assertEqual(handler, AutoloadHandler)
signal_map.remove(autoload_signal)
@override_settings(PYNOTIFY_AUTOLOAD_MODULES=('example.tests.test_app2.handlers',))
def test_if_autoload_fails_it_should_be_logged(self):
with patch('pynotify.helpers.logger') as logger:
autoload()
self.assertEqual(signal_map.get(autoload_signal), [])
logger.exception.assert_called_once()
@override_settings(PYNOTIFY_RELATED_OBJECTS_ALLOWED_ATTRIBUTES={})
def test_related_object_proxy_should_allow_only_string_representation(self):
obj = SecureRelatedObject(MockRelatedObject())
self.assertEqual(str(obj), 'Related object')
self.assertRaises(AttributeError, lambda: obj.a)
self.assertRaises(AttributeError, lambda: obj.b)
self.assertRaises(AttributeError, lambda: obj.xyz)
self.assertRaises(AttributeError, lambda: obj.say_hello())
@override_settings(PYNOTIFY_RELATED_OBJECTS_ALLOWED_ATTRIBUTES={'a', 'say_hello'})
def test_related_object_proxy_should_allow_only_defined_allowed_attributes(self):
obj = SecureRelatedObject(MockRelatedObject())
self.assertEqual(str(obj), 'Related object')
self.assertEqual(obj.a, 123)
self.assertEqual(obj.say_hello(), 'Hello!')
self.assertRaises(AttributeError, lambda: obj.b)
self.assertRaises(AttributeError, lambda: obj.xyz)
def test_deleted_related_object_should_have_string_representation_same_for_any_attribute(self):
obj = DeletedRelatedObject()
self.assertEqual(str(obj), '[DELETED]')
self.assertEqual(str(obj.x), '[DELETED]')
self.assertEqual(str(obj.x.y), '[DELETED]')
def test_get_from_context_should_return_variable_value_or_none(self):
self.assertIsNone(get_from_context('a', {}))
self.assertIsNone(get_from_context('a.b', {}))
self.assertEqual(get_from_context('a', {'a': 1}), 1)
self.assertIsNone(get_from_context('a.b', {'a': 1}))
self.assertEqual(get_from_context('a', {'a': {'b': 1}}), {'b': 1})
self.assertEqual(get_from_context('a.b', {'a': {'b': 1}}), 1)
related_object = MockRelatedObject()
self.assertEqual(get_from_context('obj', {'obj': related_object}), related_object)
self.assertEqual(get_from_context('obj.b', {'obj': related_object}), 456)
self.assertIsNone(get_from_context('obj.non_sense', {'obj': related_object}))
| StarcoderdataPython |
344602 | from django.db import models
from core.models.base import BaseModel
from core.models.taxonomic_order import TaxonomicOrder
class TaxonomicFamily(BaseModel):
class Meta:
app_label = 'core'
default_permissions = ()
db_table = 'taxonomic_families'
taxonomic_order = models.ForeignKey(TaxonomicOrder, on_delete=models.CASCADE)
name = models.CharField(max_length=45, unique=True)
def __str__(self):
return self.name
| StarcoderdataPython |
9707239 | from robocup_env.envs.base.robocup import RoboCup
import logging
import numpy as np
import torch
from ddpg import DDPG
from wrappers.normalized_actions import NormalizedActions
save_dir = "./saved_models"
log_dir = "./runs"
env_name = "collect"
seed = 1337
gamma = 0.99
tau = 0.001
noise_stddev = 0.5
hidden_size = [400, 300]
render_eval = True
n_test_cycles = 50 # Num. of episodes in the evaluation phases
def create_runs_dir():
from pathlib import Path
Path("./runs").mkdir(parents=True, exist_ok=True)
def get_run_num() -> int:
return 1
# from os import listdir
# from os.path import isdir, join
# import re
#
# folders = [f for f in listdir(log_dir) if isdir(join(log_dir, f))]
# run_num_pattern = f"{env_name}_([0-9]+)"
# for f in folders:
# result = re.search(run_num_pattern, f)
# if result is not None:
def main():
checkpoint_dir = f"{save_dir}/{env_name}"
create_runs_dir()
env = NormalizedActions(RoboCup())
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Defined and build a DDPG agent
observation_space_size = 10
agent = DDPG(gamma, tau, hidden_size,
# env.observation_space.shape[0],
observation_space_size,
env.action_space,
checkpoint_dir=checkpoint_dir)
# checkpoint_path = None
checkpoint_path = "saved_models/collect/ep_2886522.pth.tar"
start_step, memory = agent.load_checkpoint(checkpoint_path)
# Create logger
logger = logging.getLogger('train')
logger.setLevel(logging.INFO)
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info("Using {}".format(device))
# Start training
logger.info('Testing agent on {} env'.format(env_name))
test_rewards = []
for j in range(n_test_cycles):
state = torch.Tensor([env.reset()]).to(device)
test_reward = 0
while True:
if render_eval:
env.render()
action = agent.calc_action(state) # Selection without noise
next_state, reward, done, _ = env.step(action.cpu().numpy()[0])
test_reward += reward
next_state = torch.Tensor([next_state]).to(device)
state = next_state
if done:
break
test_rewards.append(test_reward)
logger.info("Mean test reward {}".format(np.mean(test_rewards)))
env.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
11257295 | <filename>tests/test_utils.py
from audacitorch.utils import save_model
import torch
import audacitorch
def test_metadata(broken_metadata, metadata):
success, msg = audacitorch.utils.validate_metadata(broken_metadata)
assert not success
success, msg = audacitorch.utils.validate_metadata(metadata)
assert success
def test_save_model(wav2wavmodel, metadata):
from pathlib import Path
jit_model = torch.jit.script(wav2wavmodel)
path = Path('./test_save_model/')
path.mkdir(exist_ok=True, parents=True)
save_model(jit_model, metadata, path)
assert (path / 'model.pt').exists()
assert (path / 'metadata.json').exists()
loaded_model = torch.jit.load(path / 'model.pt')
for x in audacitorch.utils.get_example_inputs():
assert torch.allclose(loaded_model(x), jit_model(x)) | StarcoderdataPython |
140612 | #!/usr/bin/env python
import os
import os.path as op
import pandas as pd
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import seaborn as sns
# 1. load a dataset from a file
# 2. "organize" that file, so we can access columns *or* rows of it easily
# 3. compute some "summary statisics" about the dataset
# 4. print those summary statistics
# 1. load a dataset
# 1a. accept arbitrary filename as argument
from argparse import ArgumentParser
parser = ArgumentParser(description = 'A CSV reader + stats maker')
parser.add_argument('csvfile',
type = str,
help = 'path to the input csv file.')
parsed_args = parser.parse_args()
my_csv_file = parsed_args.csvfile
assert op.isfile(my_csv_file), "Please give us a real file, thx"
print('woot, the file exists')
#*********************************************************
# Load data, Organize dataset and Add header to the dataframe
#*********************************************************
data = pd.read_csv(my_csv_file, sep='\s+|,', header=None, engine='python',
names = ['ID number', 'Diagnosis','Radius_M', 'Texture_M', 'Perimeter_M', 'Area_M','Smoothness_M', 'Compactness_M', 'Concavity_M', 'ConcavePoints_M', 'Symmetry_M', 'FractalDimension_M',
'Radius_SE', 'Texture_SE', 'Perimeter_SE', 'Area_SE','Smoothness_SE', 'Compactness_SE', 'Concavity_SE', 'ConcavePoints_SE', 'Symmetry_SE', 'FractalDimension_SE',
'Radius_W', 'Texture_W', 'Perimeter_W', 'Area_W','Smoothness_W', 'Compactness_W', 'Concavity_W', 'ConcavePoints_W', 'Symmetry_W', 'FractalDimension_W'])
data.drop(['ID number'], axis=1, inplace=True)
#print(data.head())
#print(data.shape)
#print(data.dtypes)
#print(data.info())
print(data.columns)
#print(data.corr())
# Find missing values
print('Missing values:\n{}'.format(data.isnull().sum()))
# Find duplicated records
print('\nNumber of duplicated records: {}'.format(data.duplicated().sum()))
# Find the unique values of 'diagnosis'
print('\nUnique values of "Diagnosis": {}'.format(data['Diagnosis'].unique()))
# Access any row
#print(data.iloc[:,3:5])
# Access any column
#print(data.iloc[32:35,:])
#Access any value
#print(data.iloc[6,7])
# Compute Mean and STD
#print(np.mean(data))
#print(np.std(data))
def plotHistogram(data):
for i, column in enumerate(data.columns):
plt.figure(i)
plt.hist(data[column], bins = 'auto', alpha = 0.5, label = 'x')
plt.savefig('Hist_{}.png'.format(column))
#plt.show()
def plotScatter(data):
i = 1
columncount = len(data.columns)
columncount = 4 # To avoid having so many scatter
while i < columncount - 1:
j = i + 1
while j < columncount:
for iv, jv in zip(data.iloc[:, i], data.iloc[:, j]):
plt.scatter(x = iv, y = jv)
plt.savefig('Scatt_{}.png'.format(data.columns[i] + ' ' + data.columns[j]))
#plt.show()
j = j + 1
i = i + 1
def plotHeatmap(data):
features_mean = ['Radius_M', 'Texture_M', 'Perimeter_M', 'Area_M','Smoothness_M', 'Compactness_M', 'Concavity_M', 'ConcavePoints_M', 'Symmetry_M', 'FractalDimension_M']
plt.figure(figsize=(15,15))
heat = sns.heatmap(data[features_mean].corr(), vmax = 1, square = True, annot = True)
figure = heat.get_figure()
figure.savefig('Heat.png', dpi = 400)
#plt.show()
plotHistogram(data)
plotHeatmap(data)
plotScatter(data)
| StarcoderdataPython |
6560892 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# THE VALUES IN THE ENUM ARE AUTO-GENERATED. DO NOT EDIT THIS MANUALLY.
# --------------------------------------------------------------------------------------------
from enum import Enum
# pylint: disable=line-too-long
class SystemEventNames(str, Enum):
"""
This enum represents the names of the various event types for the system events published to
Azure Event Grid. To check the list of recognizable system topics,
visit https://docs.microsoft.com/azure/event-grid/system-topics.
"""
# These names at the top are 'corrected' aliases of duplicate values that appear below, which are
# deprecated but maintained for backwards compatibility.
AcsChatMemberAddedToThreadWithUserEventName = 'Microsoft.Communication.ChatMemberAddedToThreadWithUser'
ResourceWriteFailureEventName = 'Microsoft.Resources.ResourceWriteFailure'
IoTHubDeviceDeletedEventName = 'Microsoft.Devices.DeviceDeleted'
IoTHubDeviceDisconnectedEventName = 'Microsoft.Devices.DeviceDisconnected'
ResourceDeleteFailureEventName = 'Microsoft.Resources.ResourceDeleteFailure'
ResourceDeleteCancelEventName = 'Microsoft.Resources.ResourceDeleteCancel'
AcsChatThreadParticipantAddedEventName = 'Microsoft.Communication.ChatThreadParticipantAdded'
ResourceDeleteSuccessEventName = 'Microsoft.Resources.ResourceDeleteSuccess'
EventGridSubscriptionValidationEventName = 'Microsoft.EventGrid.SubscriptionValidationEvent'
ResourceWriteSuccessEventName = 'Microsoft.Resources.ResourceWriteSuccess'
ResourceActionSuccessEventName = 'Microsoft.Resources.ResourceActionSuccess'
ResourceWriteCancelEventName = 'Microsoft.Resources.ResourceWriteCancel'
ResourceActionFailureEventName = 'Microsoft.Resources.ResourceActionFailure'
AcsChatMemberRemovedFromThreadWithUserEventName = 'Microsoft.Communication.ChatMemberRemovedFromThreadWithUser'
IoTHubDeviceConnectedEventName = 'Microsoft.Devices.DeviceConnected'
EventGridSubscriptionDeletedEventName = 'Microsoft.EventGrid.SubscriptionDeletedEvent'
AcsChatThreadParticipantRemovedEventName = 'Microsoft.Communication.ChatThreadParticipantRemoved'
ResourceActionCancelEventName = 'Microsoft.Resources.ResourceActionCancel'
IoTHubDeviceCreatedEventName = 'Microsoft.Devices.DeviceCreated'
# Aliases end here
AcsChatMessageDeletedEventName = 'Microsoft.Communication.ChatMessageDeleted'
AcsChatMessageDeletedInThreadEventName = 'Microsoft.Communication.ChatMessageDeletedInThread'
AcsChatMessageEditedEventName = 'Microsoft.Communication.ChatMessageEdited'
AcsChatMessageEditedInThreadEventName = 'Microsoft.Communication.ChatMessageEditedInThread'
AcsChatMessageReceivedEventName = 'Microsoft.Communication.ChatMessageReceived'
AcsChatMessageReceivedInThreadEventName = 'Microsoft.Communication.ChatMessageReceivedInThread'
AcsChatParticipantAddedToThreadEventName = 'Microsoft.Communication.ChatThreadParticipantAdded'
AcsChatParticipantAddedToThreadWithUserEventName = 'Microsoft.Communication.ChatParticipantAddedToThreadWithUser'
AcsChatParticipantRemovedFromThreadEventName = 'Microsoft.Communication.ChatThreadParticipantRemoved'
AcsChatParticipantRemovedFromThreadWithUserEventName = 'Microsoft.Communication.ChatParticipantRemovedFromThreadWithUser'
AcsChatThreadCreatedEventName = 'Microsoft.Communication.ChatThreadCreated'
AcsChatThreadCreatedWithUserEventName = 'Microsoft.Communication.ChatThreadCreatedWithUser'
AcsChatThreadDeletedEventName = 'Microsoft.Communication.ChatThreadDeleted'
AcsChatThreadPropertiesUpdatedEventName = 'Microsoft.Communication.ChatThreadPropertiesUpdated'
AcsChatThreadPropertiesUpdatedPerUserEventName = 'Microsoft.Communication.ChatThreadPropertiesUpdatedPerUser'
AcsChatThreadWithUserDeletedEventName = 'Microsoft.Communication.ChatThreadWithUserDeleted'
AcsRecordingFileStatusUpdatedEventName = 'Microsoft.Communication.RecordingFileStatusUpdated'
AcsSmsDeliveryReportReceivedEventName = 'Microsoft.Communication.SMSDeliveryReportReceived'
AcsSmsReceivedEventName = 'Microsoft.Communication.SMSReceived'
AcsUserDisconnectedEventName = 'Microsoft.Communication.UserDisconnected'
ApiManagementApiCreatedEventName = 'Microsoft.ApiManagement.ApiCreated'
ApiManagementApiDeletedEventName = 'Microsoft.ApiManagement.ApiDeleted'
ApiManagementApiReleaseCreatedEventName = 'Microsoft.ApiManagement.ApiReleaseCreated'
ApiManagementApiReleaseDeletedEventName = 'Microsoft.ApiManagement.ApiReleaseDeleted'
ApiManagementApiReleaseUpdatedEventName = 'Microsoft.ApiManagement.ApiReleaseUpdated'
ApiManagementApiUpdatedEventName = 'Microsoft.ApiManagement.ApiUpdated'
ApiManagementProductCreatedEventName = 'Microsoft.ApiManagement.ProductCreated'
ApiManagementProductDeletedEventName = 'Microsoft.ApiManagement.ProductDeleted'
ApiManagementProductUpdatedEventName = 'Microsoft.ApiManagement.ProductUpdated'
ApiManagementSubscriptionCreatedEventName = 'Microsoft.ApiManagement.SubscriptionCreated'
ApiManagementSubscriptionDeletedEventName = 'Microsoft.ApiManagement.SubscriptionDeleted'
ApiManagementSubscriptionUpdatedEventName = 'Microsoft.ApiManagement.SubscriptionUpdated'
ApiManagementUserCreatedEventName = 'Microsoft.ApiManagement.UserCreated'
ApiManagementUserDeletedEventName = 'Microsoft.ApiManagement.UserDeleted'
ApiManagementUserUpdatedEventName = 'Microsoft.ApiManagement.UserUpdated'
AppConfigurationKeyValueDeletedEventName = 'Microsoft.AppConfiguration.KeyValueDeleted'
AppConfigurationKeyValueModifiedEventName = 'Microsoft.AppConfiguration.KeyValueModified'
ContainerRegistryChartDeletedEventName = 'Microsoft.ContainerRegistry.ChartDeleted'
ContainerRegistryChartPushedEventName = 'Microsoft.ContainerRegistry.ChartPushed'
ContainerRegistryImageDeletedEventName = 'Microsoft.ContainerRegistry.ImageDeleted'
ContainerRegistryImagePushedEventName = 'Microsoft.ContainerRegistry.ImagePushed'
ContainerServiceNewKubernetesVersionAvailableEventName = 'Microsoft.ContainerService.NewKubernetesVersionAvailable'
EventHubCaptureFileCreatedEventName = 'Microsoft.EventHub.CaptureFileCreated'
HealthcareFhirResourceCreatedEventName = 'Microsoft.HealthcareApis.FhirResourceCreated'
HealthcareFhirResourceDeletedEventName = 'Microsoft.HealthcareApis.FhirResourceDeleted'
HealthcareFhirResourceUpdatedEventName = 'Microsoft.HealthcareApis.FhirResourceUpdated'
IotHubDeviceConnectedEventName = 'Microsoft.Devices.DeviceConnected'
IotHubDeviceCreatedEventName = 'Microsoft.Devices.DeviceCreated'
IotHubDeviceDeletedEventName = 'Microsoft.Devices.DeviceDeleted'
IotHubDeviceDisconnectedEventName = 'Microsoft.Devices.DeviceDisconnected'
IotHubDeviceTelemetryEventName = 'Microsoft.Devices.DeviceTelemetry'
KeyVaultCertificateExpiredEventName = 'Microsoft.KeyVault.CertificateExpired'
KeyVaultCertificateNearExpiryEventName = 'Microsoft.KeyVault.CertificateNearExpiry'
KeyVaultCertificateNewVersionCreatedEventName = 'Microsoft.KeyVault.CertificateNewVersionCreated'
KeyVaultKeyExpiredEventName = 'Microsoft.KeyVault.KeyExpired'
KeyVaultKeyNearExpiryEventName = 'Microsoft.KeyVault.KeyNearExpiry'
KeyVaultKeyNewVersionCreatedEventName = 'Microsoft.KeyVault.KeyNewVersionCreated'
KeyVaultSecretExpiredEventName = 'Microsoft.KeyVault.SecretExpired'
KeyVaultSecretNearExpiryEventName = 'Microsoft.KeyVault.SecretNearExpiry'
KeyVaultSecretNewVersionCreatedEventName = 'Microsoft.KeyVault.SecretNewVersionCreated'
KeyVaultVaultAccessPolicyChangedEventName = 'Microsoft.KeyVault.VaultAccessPolicyChanged'
MachineLearningServicesDatasetDriftDetectedEventName = 'Microsoft.MachineLearningServices.DatasetDriftDetected'
MachineLearningServicesModelDeployedEventName = 'Microsoft.MachineLearningServices.ModelDeployed'
MachineLearningServicesModelRegisteredEventName = 'Microsoft.MachineLearningServices.ModelRegistered'
MachineLearningServicesRunCompletedEventName = 'Microsoft.MachineLearningServices.RunCompleted'
MachineLearningServicesRunStatusChangedEventName = 'Microsoft.MachineLearningServices.RunStatusChanged'
MapsGeofenceEnteredEventName = 'Microsoft.Maps.GeofenceEntered'
MapsGeofenceExitedEventName = 'Microsoft.Maps.GeofenceExited'
MapsGeofenceResultEventName = 'Microsoft.Maps.GeofenceResult'
MediaJobCanceledEventName = 'Microsoft.Media.JobCanceled'
MediaJobCancelingEventName = 'Microsoft.Media.JobCanceling'
MediaJobErroredEventName = 'Microsoft.Media.JobErrored'
MediaJobFinishedEventName = 'Microsoft.Media.JobFinished'
MediaJobOutputCanceledEventName = 'Microsoft.Media.JobOutputCanceled'
MediaJobOutputCancelingEventName = 'Microsoft.Media.JobOutputCanceling'
MediaJobOutputErroredEventName = 'Microsoft.Media.JobOutputErrored'
MediaJobOutputFinishedEventName = 'Microsoft.Media.JobOutputFinished'
MediaJobOutputProcessingEventName = 'Microsoft.Media.JobOutputProcessing'
MediaJobOutputProgressEventName = 'Microsoft.Media.JobOutputProgress'
MediaJobOutputScheduledEventName = 'Microsoft.Media.JobOutputScheduled'
MediaJobOutputStateChangeEventName = 'Microsoft.Media.JobOutputStateChange'
MediaJobProcessingEventName = 'Microsoft.Media.JobProcessing'
MediaJobScheduledEventName = 'Microsoft.Media.JobScheduled'
MediaJobStateChangeEventName = 'Microsoft.Media.JobStateChange'
MediaLiveEventChannelArchiveHeartbeatEventName = 'Microsoft.Media.LiveEventChannelArchiveHeartbeat'
MediaLiveEventConnectionRejectedEventName = 'Microsoft.Media.LiveEventConnectionRejected'
MediaLiveEventEncoderConnectedEventName = 'Microsoft.Media.LiveEventEncoderConnected'
MediaLiveEventEncoderDisconnectedEventName = 'Microsoft.Media.LiveEventEncoderDisconnected'
MediaLiveEventIncomingDataChunkDroppedEventName = 'Microsoft.Media.LiveEventIncomingDataChunkDropped'
MediaLiveEventIncomingStreamReceivedEventName = 'Microsoft.Media.LiveEventIncomingStreamReceived'
MediaLiveEventIncomingStreamsOutOfSyncEventName = 'Microsoft.Media.LiveEventIncomingStreamsOutOfSync'
MediaLiveEventIncomingVideoStreamsOutOfSyncEventName = 'Microsoft.Media.LiveEventIncomingVideoStreamsOutOfSync'
MediaLiveEventIngestHeartbeatEventName = 'Microsoft.Media.LiveEventIngestHeartbeat'
MediaLiveEventTrackDiscontinuityDetectedEventName = 'Microsoft.Media.LiveEventTrackDiscontinuityDetected'
PolicyInsightsPolicyStateChangedEventName = 'Microsoft.PolicyInsights.PolicyStateChanged'
PolicyInsightsPolicyStateCreatedEventName = 'Microsoft.PolicyInsights.PolicyStateCreated'
PolicyInsightsPolicyStateDeletedEventName = 'Microsoft.PolicyInsights.PolicyStateDeleted'
RedisExportRDBCompletedEventName = 'Microsoft.Cache.ExportRDBCompleted'
RedisImportRDBCompletedEventName = 'Microsoft.Cache.ImportRDBCompleted'
RedisPatchingCompletedEventName = 'Microsoft.Cache.PatchingCompleted'
RedisScalingCompletedEventName = 'Microsoft.Cache.ScalingCompleted'
ResourceActionCancelName = 'Microsoft.Resources.ResourceActionCancel'
ResourceActionFailureName = 'Microsoft.Resources.ResourceActionFailure'
ResourceActionSuccessName = 'Microsoft.Resources.ResourceActionSuccess'
ResourceDeleteCancelName = 'Microsoft.Resources.ResourceDeleteCancel'
ResourceDeleteFailureName = 'Microsoft.Resources.ResourceDeleteFailure'
ResourceDeleteSuccessName = 'Microsoft.Resources.ResourceDeleteSuccess'
ResourceWriteCancelName = 'Microsoft.Resources.ResourceWriteCancel'
ResourceWriteFailureName = 'Microsoft.Resources.ResourceWriteFailure'
ResourceWriteSuccessName = 'Microsoft.Resources.ResourceWriteSuccess'
ServiceBusActiveMessagesAvailablePeriodicNotificationsEventName = 'Microsoft.ServiceBus.ActiveMessagesAvailablePeriodicNotifications'
ServiceBusActiveMessagesAvailableWithNoListenersEventName = 'Microsoft.ServiceBus.ActiveMessagesAvailableWithNoListeners'
ServiceBusDeadletterMessagesAvailablePeriodicNotificationsEventName = 'Microsoft.ServiceBus.DeadletterMessagesAvailablePeriodicNotifications'
ServiceBusDeadletterMessagesAvailableWithNoListenersEventName = 'Microsoft.ServiceBus.DeadletterMessagesAvailableWithNoListeners'
SignalRServiceClientConnectionConnectedEventName = 'Microsoft.SignalRService.ClientConnectionConnected'
SignalRServiceClientConnectionDisconnectedEventName = 'Microsoft.SignalRService.ClientConnectionDisconnected'
StorageAsyncOperationInitiatedEventName = 'Microsoft.Storage.AsyncOperationInitiated'
StorageBlobCreatedEventName = 'Microsoft.Storage.BlobCreated'
StorageBlobDeletedEventName = 'Microsoft.Storage.BlobDeleted'
StorageBlobInventoryPolicyCompletedEventName = 'Microsoft.Storage.BlobInventoryPolicyCompleted'
StorageBlobRenamedEventName = 'Microsoft.Storage.BlobRenamed'
StorageBlobTierChangedEventName = 'Microsoft.Storage.BlobTierChanged'
StorageDirectoryCreatedEventName = 'Microsoft.Storage.DirectoryCreated'
StorageDirectoryDeletedEventName = 'Microsoft.Storage.DirectoryDeleted'
StorageDirectoryRenamedEventName = 'Microsoft.Storage.DirectoryRenamed'
StorageLifecyclePolicyCompletedEventName = 'Microsoft.Storage.LifecyclePolicyCompleted'
SubscriptionDeletedEventName = 'Microsoft.EventGrid.SubscriptionDeletedEvent'
SubscriptionValidationEventName = 'Microsoft.EventGrid.SubscriptionValidationEvent'
WebAppServicePlanUpdatedEventName = 'Microsoft.Web.AppServicePlanUpdated'
WebAppUpdatedEventName = 'Microsoft.Web.AppUpdated'
WebBackupOperationCompletedEventName = 'Microsoft.Web.BackupOperationCompleted'
WebBackupOperationFailedEventName = 'Microsoft.Web.BackupOperationFailed'
WebBackupOperationStartedEventName = 'Microsoft.Web.BackupOperationStarted'
WebRestoreOperationCompletedEventName = 'Microsoft.Web.RestoreOperationCompleted'
WebRestoreOperationFailedEventName = 'Microsoft.Web.RestoreOperationFailed'
WebRestoreOperationStartedEventName = 'Microsoft.Web.RestoreOperationStarted'
WebSlotSwapCompletedEventName = 'Microsoft.Web.SlotSwapCompleted'
WebSlotSwapFailedEventName = 'Microsoft.Web.SlotSwapFailed'
WebSlotSwapStartedEventName = 'Microsoft.Web.SlotSwapStarted'
WebSlotSwapWithPreviewCancelledEventName = 'Microsoft.Web.SlotSwapWithPreviewCancelled'
WebSlotSwapWithPreviewStartedEventName = 'Microsoft.Web.SlotSwapWithPreviewStarted'
ContainerRegistryArtifactEventName = 'Microsoft.AppConfiguration.KeyValueModified'
KeyVaultAccessPolicyChangedEventName = 'Microsoft.KeyVault.VaultAccessPolicyChanged'
ContainerRegistryEventName = 'Microsoft.ContainerRegistry.ChartPushed'
ServiceBusDeadletterMessagesAvailableWithNoListenerEventName = 'Microsoft.ServiceBus.DeadletterMessagesAvailableWithNoListeners'
| StarcoderdataPython |
164098 | <filename>python/dynamic_graph/sot/torque_control/identification/identify_motor_with_current.py<gh_stars>0
# -*- coding: utf-8 -*-
import numpy as np
import sys
#import dynamic_graph.sot.torque_control.utils.plot_utils as plot_utils
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 4;
import matplotlib.pyplot as plt
from motor_model import Motor_model
from dynamic_graph.sot.torque_control.hrp2.control_manager_conf import IN_OUT_GAIN
from identify_motor_static import identify_motor_static
from identify_motor_low_level import identify_motor_low_level
from identify_motor_vel import identify_motor_vel
from identify_motor_acc import identify_motor_acc
from identification_utils import jID
'''
motor model :
i(t) = Kt*tau(t) + Kv*dq(t) + Ka*ddq(t) + Kf*Sign(dq)
'''
#~ DZ=70
DZ = 0.0
dt = 0.001
ZERO_VEL_THRESHOLD = 0.1
POSITIVE_VEL_THRESHOLD = 0.001
ZERO_ACC_THRESHOLD = 0.2
ZERO_JERK_THRESHOLD = 3.0
CURRENT_SATURATION = 9.5
Nvel = 10
SHOW_THRESHOLD_EFFECT = True
USING_CONTROL_AS_CURRENT_MEASURE = False
INVERT_CURRENT = False
result_dir = '../../../../../results/hrp2_motor_identification/'
IDENTIFICATION_MODE ='static'
IDENTIFICATION_MODE ='vel'
#IDENTIFICATION_MODE ='acc'
#IDENTIFICATION_MODE ='test_model' #Compare Model Vs Measurment
#JOINT_NAME = 'rhy';
#JOINT_NAME = 'rhr';
#JOINT_NAME = 'rhp';
#JOINT_NAME = 'rk';
#JOINT_NAME = 'rap';
#JOINT_NAME = 'rar';
#JOINT_NAME = 'lhy'; #USING_CONTROL_AS_CURRENT_MEASURE = True # 6
#JOINT_NAME = 'lhr'; #USING_CONTROL_AS_CURRENT_MEASURE = True # 7
JOINT_NAME = 'lhp'; #USING_CONTROL_AS_CURRENT_MEASURE = True # 8
#JOINT_NAME = 'lk'; # 9 ok
#JOINT_NAME = 'lap'; # 10 ok
#JOINT_NAME = 'lar'; # 11 ok
#data_folder='../../results/20161114_153220_rk_vel/'
#data_folder='../../results/20161114_151812_rhp_vel/'
#data_folder='../../results/20170203_164133_com_sin_z_001/'
#JOINT_NAME = 'rhp'
#data_folder='../../results/20170203_164133_com_sin_z_001/'
#data_folder= '../../results/20161114_152706_rk_acc/' ; INVERT_CURRENT = True
#data_folder = result_dir + '../20170911172000_com_sin_3cm/';
if (IDENTIFICATION_MODE != 'test_model') :
if(JOINT_NAME == 'rhy' ):
INVERT_CURRENT = True
Nvel = 9
data_folder_static = result_dir+'20161114_135332_rhy_static/';
data_folder_vel = result_dir+'20161114_143152_rhy_vel/';
data_folder_acc = result_dir+'20161114_142351_rhy_acc/';
if(JOINT_NAME == 'rhr' ):
INVERT_CURRENT = True
Nvel = 10
data_folder_static = result_dir+'20161114_144232_rhr_static/';
data_folder_vel = result_dir+'20161114_150356_rhr_vel/';
data_folder_acc = result_dir+'20161114_145456_rhr_acc/';
if(JOINT_NAME == 'rhp' ):
data_folder_static = result_dir+'20161114_150722_rhp_static/';
data_folder_vel = result_dir+'20161114_151812_rhp_vel/';
data_folder_acc = result_dir+'20161114_151259_rhp_acc/';
if(JOINT_NAME == 'rk' ):
INVERT_CURRENT = True
data_folder_static = result_dir+'20161114_152140_rk_static/';
# data_folder_static = result_dir+'../20170918_180023_rk_current_chirp/'
data_folder_vel = result_dir+'20161114_153220_rk_vel/';
data_folder_acc = result_dir+'20161114_152706_rk_acc/';
if(JOINT_NAME == 'rap' ):
INVERT_CURRENT = True
data_folder_static = result_dir+'20161114_153739_rap_static/';
data_folder_vel = result_dir+'20161114_154559_rap_vel/';
data_folder_acc = result_dir+'20161114_154316_rap_acc/';
if(JOINT_NAME == 'rar' ):
data_folder_static = result_dir+'20161114_154945_rar_static/';
data_folder_vel = result_dir+'20161114_160038_rar_vel/';
data_folder_acc = result_dir+'20161114_155545_rar_acc/';
if(JOINT_NAME == 'lhy' ):
data_folder_static = result_dir+'20171002_163413_lhy_static/';
data_folder_vel = result_dir+'/20171002_151718_lhy_vel/';
data_folder_acc = result_dir+'20170113_144710_lhy_const_acc/';
if(JOINT_NAME == 'lhr' ):
data_folder_static = result_dir+'20171002_164436_lhr_static/';
data_folder_vel = result_dir+'20171002_153334_lhr_vel/';
data_folder_acc = result_dir+'20170113_145826_lhr_const_acc/';
if(JOINT_NAME == 'lhp' ):
data_folder_static = result_dir+'20171002_165335_lhp_static/';
data_folder_vel = result_dir+'20171002_154449_lhp_vel/';
data_folder_acc = result_dir+'20170113_151103_lhp_const_acc/';
if(JOINT_NAME == 'lk' ):
data_folder_static = result_dir+'20170113_151748_lk_static/';
data_folder_vel = result_dir+'20170113_152924_lk_const_vel/';
data_folder_acc = result_dir+'20170113_152606_lk_const_acc/';
if(JOINT_NAME == 'lap' ):
data_folder_static = result_dir+'20170113_154007_lap_static/';
data_folder_vel = result_dir+'20170113_154834_lap_const_vel/';
data_folder_acc = result_dir+'20170113_154303_lap_const_acc/';
if(JOINT_NAME == 'lar' ):
data_folder_static = result_dir+'20170113_155150_lar_static/';
data_folder_vel = result_dir+'20170113_160057_lar_const_vel/';
data_folder_acc = result_dir+'20170113_155706_lar_const_acc/';
if (IDENTIFICATION_MODE=='static') : data_folder = data_folder_static
if (IDENTIFICATION_MODE=='vel') : data_folder = data_folder_vel
if (IDENTIFICATION_MODE=='acc') : data_folder = data_folder_acc
JOINT_ID = jID[JOINT_NAME]
DATA_FILE_NAME = 'data_j'+str(JOINT_ID)+'.npz';
''' Load data from file '''
try:
data = np.load(data_folder+DATA_FILE_NAME);
enc = np.squeeze(data['enc']);
dq = np.squeeze(data['dq']);
ddq = np.squeeze(data['ddq']);
tau = np.squeeze(data['tau']);
ctrl = np.squeeze(data['ctrl']);
current = np.squeeze(data['current']);
except IOError:
print "Impossible to read data file %s" % (data_folder+DATA_FILE_NAME);
sys.exit("Run script compress_identification_data.py to generate data file from tracer log files.");
maskSaturation=np.logical_and( (current>-CURRENT_SATURATION) , (current<CURRENT_SATURATION))
if USING_CONTROL_AS_CURRENT_MEASURE:
maskCtrlNotInDZ = np.logical_and( ctrl >= (DZ *0.8) ,ctrl <= -(DZ*0.8) )
maskSaturation=np.logical_or(maskSaturation, maskCtrlNotInDZ)
if INVERT_CURRENT:
current = - current
enc = enc [maskSaturation];
dq = dq [maskSaturation];
ddq = ddq [maskSaturation];
tau = tau [maskSaturation];
ctrl = ctrl [maskSaturation];
current = current[maskSaturation];
if USING_CONTROL_AS_CURRENT_MEASURE:
current_est = current.copy()
#~ maskUpDZ = ctrl > DZ
#~ maskDnDZ = ctrl < DZ
#~ maskInDZ = np.logical_and( ctrl <= DZ ,ctrl >= -DZ )
current = ctrl / IN_OUT_GAIN
#~ current[maskUpDZ] = (ctrl[maskUpDZ]-DZ) /IN_OUT_GAIN
#~ current[maskDnDZ] = (ctrl[maskDnDZ]+DZ) /IN_OUT_GAIN
#~ current[maskInDZ] = 0.0
#IDENTIFICATION_MODE='low_level'
if(IDENTIFICATION_MODE=='low_level'):
identify_motor_low_level(dq, ctrl, current);
#Ktau,Tau0 Identification
if(IDENTIFICATION_MODE=='static'):
(Ktp, Ktn, Ks, DZ) = identify_motor_static(enc, dq, ctrl, current, tau, JOINT_ID, JOINT_NAME, ZERO_VEL_THRESHOLD,
POSITIVE_VEL_THRESHOLD, SHOW_THRESHOLD_EFFECT);
#save parameters for next identification level**********************
np.savez(data_folder+'motor_param_'+JOINT_NAME+'.npz',Ktp=Ktp, Ktn=Ktn, Ks=Ks, DZ=DZ)
plt.savefig(data_folder+"static_"+JOINT_NAME+".jpg")
plt.show()
if(IDENTIFICATION_MODE=='vel' or IDENTIFICATION_MODE=='acc'):
#load parameters from last identification level*********************
try:
data_motor_param = np.load(data_folder_static+'motor_param_'+JOINT_NAME+'.npz')
Ktp=(data_motor_param['Ktp'].item())
Ktn=(data_motor_param['Ktn'].item())
except IOError:
print "Impossible to read data file %s" % (data_folder_static+'motor_param_'+JOINT_NAME+'.npz');
sys.exit("Run identification on static experiments.");
#Kd Identification
if(IDENTIFICATION_MODE=='vel'):
Ks =(data_motor_param['Ks'].item())
(Kvp, Kvn, Kfp, Kfn, DeadZone, K_bemf) = identify_motor_vel(dt, dq, ddq, ctrl, current, tau, Ktp, Ktn, Ks,
ZERO_VEL_THRESHOLD, ZERO_ACC_THRESHOLD,
Nvel, SHOW_THRESHOLD_EFFECT);
np.savez(data_folder+'motor_param_'+JOINT_NAME+'.npz', Ktp=Ktp, Ktn=Ktn, Kvp=Kvp, Kvn=Kvn,
Kfp=Kfp, Kfn=Kfn, DeadZone=DeadZone, K_bemf=K_bemf)
warning = ""
if USING_CONTROL_AS_CURRENT_MEASURE :
warning = " (Current sensor not used)"
print "cur_sens_gains[%d]= %f #Using %s"% (JOINT_ID, Ks, data_folder_static.split('/')[-2] + warning);
print "deadzone[%d] = %f #Using %s"% (JOINT_ID, data_motor_param['DZ'].item(), data_folder_static.split('/')[-2] + warning);
print "deadzone[%d] = %f #Using %s"% (JOINT_ID, DeadZone, data_folder_vel.split('/')[-2] + warning);
print "K_bemf[%d] = %f # [Amp/Rad.s-1] Using %s"% (JOINT_ID, K_bemf, data_folder_vel.split('/')[-2] + warning);
print 'Kt_p[%d] = %f #Using %s' % (JOINT_ID, Ktp, data_folder_static.split('/')[-2] + warning);
print 'Kt_n[%d] = %f #Using %s' % (JOINT_ID, Ktn, data_folder_static.split('/')[-2] + warning);
print 'Kv_p[%d] = %f #Using %s' % (JOINT_ID, Kvp, data_folder_vel.split('/')[-2] + warning);
print 'Kv_n[%d] = %f #Using %s' % (JOINT_ID, Kvn, data_folder_vel.split('/')[-2] + warning);
print 'Kf_p[%d] = %f #Using %s' % (JOINT_ID, Kfp, data_folder_vel.split('/')[-2] + warning);
print 'Kf_n[%d] = %f #Using %s' % (JOINT_ID, Kfn, data_folder_vel.split('/')[-2] + warning);
plt.savefig(data_folder+"vel_"+JOINT_NAME+".jpg")
#J Identification
if(IDENTIFICATION_MODE=='acc'):
Kvp=(data_motor_param['Kvp'].item())
(Kap, Kan, Kfp, Kfn) = identify_motor_acc(dt, dq, ddq, current, tau, Ktp, Kvp,
POSITIVE_VEL_THRESHOLD, ZERO_JERK_THRESHOLD,
SHOW_THRESHOLD_EFFECT);
print 'Ka_p[%d] = %f' % (JOINT_ID,Kap);
print 'Ka_n[%d] = %f' % (JOINT_ID,Kan);
print 'Kf_p[%d] = %f' % (JOINT_ID,Kfp);
print 'Kf_n[%d] = %f' % (JOINT_ID,Kfn);
#model vs measurement
if (IDENTIFICATION_MODE=='test_model'):
#load motor parameters
import dynamic_graph.sot.torque_control.hrp2.motors_parameters as hrp2_motors_parameters
Kt_p = hrp2_motors_parameters.Kt_p
Kt_n = hrp2_motors_parameters.Kt_n
Kf_p = hrp2_motors_parameters.Kf_p
Kf_n = hrp2_motors_parameters.Kf_n
Kv_p = hrp2_motors_parameters.Kv_p
Kv_n = hrp2_motors_parameters.Kv_n
Ka_p = hrp2_motors_parameters.Ka_p
Ka_n = hrp2_motors_parameters.Ka_n
motor = Motor_model(Kt_p[JOINT_ID], Kt_n[JOINT_ID],
Kf_p[JOINT_ID], Kf_n[JOINT_ID],
Kv_p[JOINT_ID], Kv_n[JOINT_ID],
Ka_p[JOINT_ID], Ka_n[JOINT_ID],dqThreshold=0.01)
tau_motor=np.zeros(len(tau))
tau_motor_current=np.zeros(len(tau))
tau_motor_vel=np.zeros(len(tau))
tau_motor_acc=np.zeros(len(tau))
i_motor=np.zeros(len(current))
for idx in range(len(tau)):
tau_motor[idx] =motor.getTorque (current[idx], dq[idx], ddq[idx])
tau_motor_current[idx] =motor.getTorque (current[idx], 0.0, 0.0)
tau_motor_vel[idx] =motor.getTorque (0.0, dq[idx], 0.0)
tau_motor_acc[idx] =motor.getTorque (0.0, 0.0, ddq[idx])
i_motor[idx] =motor.getCurrent (tau[idx], dq[idx], ddq[idx])
plt.figure()
alpha = 0.7
plt.plot(tau, alpha=alpha, label='torque from dynamic model')
plt.plot(tau_motor, alpha=alpha, label='torque from motor model')
plt.plot(tau_motor_current, alpha=alpha, label='torque from motor model (cur only)')
plt.plot(tau_motor_vel, alpha=alpha, label='torque from motor model (vel only)')
plt.plot(tau_motor_acc, alpha=alpha, label='torque from motor model (acc only)')
plt.legend()
plt.figure()
plt.subplot(211)
plt.plot(dq, label='dq')
plt.legend()
plt.subplot(212)
plt.plot(current)
plt.plot(i_motor)
plt.legend(['measured current','Estimated current with model'])
plt.figure()
plt.plot(current, label='current')
plt.plot(ctrl/IN_OUT_GAIN, label='ctrl')
plt.legend()
plt.show()
| StarcoderdataPython |
9756568 | import pyomo.environ as pyomo
from pyomo.network import Port
from pyomo.environ import units as u
from hybrid.dispatch.dispatch import Dispatch
class PowerSourceDispatch(Dispatch):
"""
"""
def __init__(self,
pyomo_model: pyomo.ConcreteModel,
index_set: pyomo.Set,
system_model,
financial_model,
block_set_name: str = 'generator'):
super().__init__(pyomo_model,
index_set,
system_model,
financial_model,
block_set_name=block_set_name)
@staticmethod
def dispatch_block_rule(gen):
##################################
# Parameters #
##################################
gen.time_duration = pyomo.Param(
doc="Time step [hour]",
default=1.0,
within=pyomo.NonNegativeReals,
mutable=True,
units=u.hr)
gen.cost_per_generation = pyomo.Param(
doc="Generation cost for generator [$/MWh]",
default=0.0,
within=pyomo.NonNegativeReals,
mutable=True,
units=u.USD / u.MWh)
gen.available_generation = pyomo.Param(
doc="Available generation for the generator [MW]",
default=0.0,
within=pyomo.NonNegativeReals,
mutable=True,
units=u.MW)
##################################
# Variables #
##################################
gen.generation = pyomo.Var(
doc="Power generation of generator [MW]",
domain=pyomo.NonNegativeReals,
bounds=(0, gen.available_generation),
units=u.MW)
gen.generation_cost = pyomo.Var(
doc="Cost of generation [$]",
domain=pyomo.NonNegativeReals,
units=u.USD)
##################################
# Constraints #
##################################
gen.generation_cost_calc = pyomo.Constraint(
doc="Calculation of generation cost for objective function",
expr=gen.generation_cost == gen.time_duration * gen.cost_per_generation * gen.generation)
##################################
# Ports #
##################################
gen.port = Port()
gen.port.add(gen.generation)
gen.port.add(gen.generation_cost)
def initialize_dispatch_model_parameters(self):
self.cost_per_generation = self._financial_model.value("om_capacity")[0]*1e3/8760
def update_time_series_dispatch_model_parameters(self, start_time: int):
n_horizon = len(self.blocks.index_set())
generation = self._system_model.value("gen")
if start_time + n_horizon > len(generation):
horizon_gen = list(generation[start_time:])
horizon_gen.extend(list(generation[0:n_horizon - len(horizon_gen)]))
else:
horizon_gen = generation[start_time:start_time + n_horizon]
if len(horizon_gen) < len(self.blocks):
raise RuntimeError(f"Dispatch parameter update error at start_time {start_time}: System model "
f"{type(self._system_model)} generation profile should have at least {len(self.blocks)} "
f"length but has only {len(generation)}")
self.available_generation = [gen_kw / 1e3 for gen_kw in horizon_gen]
@property
def cost_per_generation(self) -> float:
for t in self.blocks.index_set():
return self.blocks[t].cost_per_generation.value
@cost_per_generation.setter
def cost_per_generation(self, om_dollar_per_mwh: float):
for t in self.blocks.index_set():
self.blocks[t].cost_per_generation.set_value(round(om_dollar_per_mwh, self.round_digits))
@property
def available_generation(self) -> list:
return [self.blocks[t].available_generation.value for t in self.blocks.index_set()]
@available_generation.setter
def available_generation(self, resource: list):
if len(resource) == len(self.blocks):
for t, gen in zip(self.blocks, resource):
self.blocks[t].available_generation.set_value(round(gen, self.round_digits))
else:
raise ValueError(f"'resource' list ({len(resource)}) must be the same length as time horizon ({len(self.blocks)})")
@property
def generation(self) -> list:
return [round(self.blocks[t].generation.value, self.round_digits) for t in self.blocks.index_set()]
@property
def generation_cost(self) -> list:
return [round(self.blocks[t].generation_cost.value, self.round_digits) for t in self.blocks.index_set()]
| StarcoderdataPython |
3477499 | <reponame>deepakkt/aasaan<gh_stars>0
# Generated by Django 2.0.4 on 2018-05-08 06:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('notify', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='notifier',
name='centers',
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.