sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def Mx(mt, x):
""" Return the Mx """
n = len(mt.Cx)
sum1 = 0
for j in range(x, n):
k = mt.Cx[j]
sum1 += k
return sum1 | Return the Mx | entailment |
def nEx(mt, x, n):
""" nEx : Returns the EPV of a pure endowment (deferred capital).
Pure endowment benefits are conditional on the survival of the policyholder. (v^n * npx) """
return mt.Dx[x + n] / mt.Dx[x] | nEx : Returns the EPV of a pure endowment (deferred capital).
Pure endowment benefits are conditional on the survival of the policyholder. (v^n * npx) | entailment |
def Axn(mt, x, n):
""" (A^1)x:n : Returns the EPV (net single premium) of a term insurance. """
return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] | (A^1)x:n : Returns the EPV (net single premium) of a term insurance. | entailment |
def AExn(mt, x, n):
""" AExn : Returns the EPV of a endowment insurance.
An endowment insurance provides a combination of a term insurance and a pure endowment
"""
return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] + mt.Dx[x + n] / mt.Dx[x] | AExn : Returns the EPV of a endowment insurance.
An endowment insurance provides a combination of a term insurance and a pure endowment | entailment |
def tAx(mt, x, t):
""" n/Ax : Returns the EPV (net single premium) of a deferred whole life insurance. """
return mt.Mx[x + t] / mt.Dx[x] | n/Ax : Returns the EPV (net single premium) of a deferred whole life insurance. | entailment |
def qAx(mt, x, q):
""" This function evaluates the APV of a geometrically increasing annual annuity-due """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return Ax(mtj, x) | This function evaluates the APV of a geometrically increasing annual annuity-due | entailment |
def aaxn(mt, x, n, m=1):
""" Γ€xn : Return the actuarial present value of a (immediate) temporal (term certain) annuity:
n-year temporary life annuity-anticipatory. Payable 'm' per year at the beginning of the period
"""
if m == 1:
return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x]
else:
return (mt.Nx[x] - mt.Nx[x + n]) / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, n))) | Γ€xn : Return the actuarial present value of a (immediate) temporal (term certain) annuity:
n-year temporary life annuity-anticipatory. Payable 'm' per year at the beginning of the period | entailment |
def aax(mt, x, m=1):
""" Γ€x : Returns the actuarial present value of an (immediate) annuity of 1 per time period
(whole life annuity-anticipatory). Payable 'm' per year at the beginning of the period
"""
return mt.Nx[x] / mt.Dx[x] - (float(m - 1) / float(m * 2)) | Γ€x : Returns the actuarial present value of an (immediate) annuity of 1 per time period
(whole life annuity-anticipatory). Payable 'm' per year at the beginning of the period | entailment |
def ax(mt, x, m=1):
""" ax : Returns the actuarial present value of an (immediate) annuity of 1 per time period
(whole life annuity-late). Payable 'm' per year at the ends of the period
"""
return (mt.Nx[x] / mt.Dx[x] - 1) + (float(m - 1) / float(m * 2)) | ax : Returns the actuarial present value of an (immediate) annuity of 1 per time period
(whole life annuity-late). Payable 'm' per year at the ends of the period | entailment |
def taax(mt, x, t, m=1):
""" n/Γ€x : Return the actuarial present value of a deferred annuity (deferred n years):
n-year deferred whole life annuity-anticipatory. Payable 'm' per year at the beginning of the period
"""
return mt.Nx[x + t] / mt.Dx[x] - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t))) | n/Γ€x : Return the actuarial present value of a deferred annuity (deferred n years):
n-year deferred whole life annuity-anticipatory. Payable 'm' per year at the beginning of the period | entailment |
def Iaaxn(mt, x, n, *args):
""" during a term certain, IAn """
return (Sx(mt, x) - Sx(nt, x + n) - n * Nx(nt, x + n)) / Dx(nt, x) | during a term certain, IAn | entailment |
def Iaxn(mt, x, n, *args):
""" during a term certain, IAn """
return (Sx(mt, x + 1) - Sx(mt, x + n + 1) - n * Nx(mt, x + n + 1)) / Dx(mt, x) | during a term certain, IAn | entailment |
def Iaax(mt, x, *args):
""" (IΓ€)x : Returns the present value of annuity-certain at the beginning of the first year
and increasing linerly. Arithmetically increasing annuity-anticipatory
"""
return Sx(mt, x) / Dx(mt, x) | (IΓ€)x : Returns the present value of annuity-certain at the beginning of the first year
and increasing linerly. Arithmetically increasing annuity-anticipatory | entailment |
def Iax(mt, x, *args):
""" (Ia)x : Returns the present value of annuity-certain at the end of the first year
and increasing linerly. Arithmetically increasing annuity-late
"""
return Sx(mt, x + 1) / Dx(mt, x) | (Ia)x : Returns the present value of annuity-certain at the end of the first year
and increasing linerly. Arithmetically increasing annuity-late | entailment |
def Itaax(mt, x, t):
""" deffered t years """
return (Sx(mt, x) - Sx(mt, x + t)) / Dx(mt, x) | deffered t years | entailment |
def Itax(mt, x, t):
""" deffered t years """
return (Sx(mt, x + 1) - Sx(mt, x + t + 1)) / Dx(mt, x) | deffered t years | entailment |
def qax(mt, x, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return ax(mtj, x, m) | geometrica | entailment |
def qaax(mt, x, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return aax(mtj, x, m) | geometrica | entailment |
def qaxn(mt, x, n, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return axn(mtj, x, n, m) | geometrica | entailment |
def qaaxn(mt, x, n, q, m = 1):
""" geometrica """
#i = float(nt[1])
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return aaxn(mtj, x, n, m) | geometrica | entailment |
def qtax(mt, x, t, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return tax(mtj, x, t) + ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t))) | geometrica | entailment |
def qtaax(mt, x, t, q, m=1):
""" geometrica """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return taax(mtj, x, t) - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t))) | geometrica | entailment |
def annuity(mt, x, n, p, m=1 , *args):
"""Syntax: annuity(nt, x, n, p, m, ['a/g', q], -d)
Args:
mt = the mortality table
x = the age as integer number.
n = A integer number (term of insurance in years) or 'w' = whole-life.
(Also, 99 years is defined to be whole-life).
p = Moment of payment. Syntaxis: 0 = begining of each period (prepaid), 1 = end of each period (postpaid),
Optional variables:
m = Payable 'm' per year (frational payments). Default = 1 (annually)
a or g = a: Arithmetical / g: Geometrical
q = The increase rate. Syntax: ['g',q] or ['a',q]. For example, ['g',0.03]
Deferring period:
-d = The n-years deferring period as negative number.
"""
l = len(args)
post = False
incr = False
deff = False
arit = False
wh_l = False
if isinstance(n,str) or n == 99:
wh_l = True
else:
pass
if isinstance(m,int) and m >=0 and l == 0:
pass
elif l == 0 and isinstance(m,list):
args = (m,)
m = 1
incr = True
elif l == 0 and int(m) < 0:
args = False
deff = True
t = int(m) * -1
m = 1
elif l == 1:
if isinstance(args[0], list):
incr = True
elif isinstance(args[0], int):
if isinstance(m, list):
deff = True
incr = True
t = int(args[0]) * -1
args = (m, )
m = 1
else:
deff = True
t = int(args[0]) * -1
args = False
else:
pass
elif l == 2:
if isinstance(args[0], list):
deff = True
t = int(args[1]) * -1
incr = True
elif isinstance(args[0], int):
deff = True
t = int(args[0]) * -1
args = args[1]
else:
pass
else:
pass
if p == 1:
post = True
elif p == 0:
pass
else:
print('Error: payment value is 0 or 1')
if incr:
if 'a' in args[0]:
arit = True
incr = False
elif 'g' in args[0]:
incr = True
q = args[0][1]
else:
return "Error: increasing value is 'a' or 'g'"
else:
pass
if not incr and not deff and not wh_l and not post:
return aaxn(mt, x, n, m)
elif not incr and not deff and not wh_l and post:
return axn(mt, x, n, m)
elif not incr and not deff and wh_l and not post:
return aax(mt, x, m)
elif not incr and not deff and wh_l and post:
return ax(mt, x, m)
elif not incr and deff and not wh_l and not post:
return taaxn(mt, x, n, t, m)
elif not incr and deff and not wh_l and post:
return taxn(mt, x, n, t, m)
elif not incr and deff and wh_l and not post:
return taax(mt, x, t, m)
elif not incr and deff and wh_l and post:
return tax(mt, x, t, m)
elif incr and not deff and not wh_l and not post:
return qaaxn(mt, x, n, q, m)
elif incr and not deff and not wh_l and post:
return qaxn(mt, x, n, q, m)
elif incr and not deff and wh_l and not post:
return qaax(mt, x, q, m)
elif incr and not deff and wh_l and post:
return qax(mt, x, q, m)
elif incr and deff and not wh_l and not post:
return qtaaxn(mt, x, n, t, q, m)
elif incr and deff and not wh_l and post:
return qtaxn(mt, x, n, t, q, m)
elif incr and deff and wh_l and not post:
return qtaax(mt, x, t, q, m)
else:
#elif incr and deff and wh_l and post:
return Itax(mt, x, t) | Syntax: annuity(nt, x, n, p, m, ['a/g', q], -d)
Args:
mt = the mortality table
x = the age as integer number.
n = A integer number (term of insurance in years) or 'w' = whole-life.
(Also, 99 years is defined to be whole-life).
p = Moment of payment. Syntaxis: 0 = begining of each period (prepaid), 1 = end of each period (postpaid),
Optional variables:
m = Payable 'm' per year (frational payments). Default = 1 (annually)
a or g = a: Arithmetical / g: Geometrical
q = The increase rate. Syntax: ['g',q] or ['a',q]. For example, ['g',0.03]
Deferring period:
-d = The n-years deferring period as negative number. | entailment |
def _meanvalueattr(self,v):
"""
find new position of vertex v according to adjacency in prevlayer.
position is given by the mean value of adjacent positions.
experiments show that meanvalue heuristic performs better than median.
"""
sug = self.layout
if not self.prevlayer(): return sug.grx[v].bar
bars = [sug.grx[x].bar for x in self._neighbors(v)]
return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars) | find new position of vertex v according to adjacency in prevlayer.
position is given by the mean value of adjacent positions.
experiments show that meanvalue heuristic performs better than median. | entailment |
def _medianindex(self,v):
"""
find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|)
"""
assert self.prevlayer()!=None
N = self._neighbors(v)
g=self.layout.grx
pos = [g[x].pos for x in N]
lp = len(pos)
if lp==0: return []
pos.sort()
pos = pos[::self.layout.dirh]
i,j = divmod(lp-1,2)
return [pos[i]] if j==0 else [pos[i],pos[i+j]] | find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|) | entailment |
def _neighbors(self,v):
"""
neighbors refer to upper/lower adjacent nodes.
Note that v.N() provides neighbors of v in the graph, while
this method provides the Vertex and DummyVertex adjacent to v in the
upper or lower layer (depending on layout.dirv state).
"""
assert self.layout.dag
dirv = self.layout.dirv
grxv = self.layout.grx[v]
try: #(cache)
return grxv.nvs[dirv]
except AttributeError:
grxv.nvs={-1:v.N(-1),+1:v.N(+1)}
if grxv.dummy: return grxv.nvs[dirv]
# v is real, v.N are graph neigbors but we need layers neighbors
for d in (-1,+1):
tr=grxv.rank+d
for i,x in enumerate(v.N(d)):
if self.layout.grx[x].rank==tr:continue
e=v.e_with(x)
dum = self.layout.ctrls[e][tr]
grxv.nvs[d][i]=dum
return grxv.nvs[dirv] | neighbors refer to upper/lower adjacent nodes.
Note that v.N() provides neighbors of v in the graph, while
this method provides the Vertex and DummyVertex adjacent to v in the
upper or lower layer (depending on layout.dirv state). | entailment |
def _crossings(self):
"""
counts (inefficently but at least accurately) the number of
crossing edges between layer l and l+dirv.
P[i][j] counts the number of crossings from j-th edge of vertex i.
The total count of crossings is the sum of flattened P:
x = sum(sum(P,[]))
"""
g=self.layout.grx
P=[]
for v in self:
P.append([g[x].pos for x in self._neighbors(v)])
for i,p in enumerate(P):
candidates = sum(P[i+1:],[])
for j,e in enumerate(p):
p[j] = len(filter((lambda nx:nx<e), candidates))
del candidates
return P | counts (inefficently but at least accurately) the number of
crossing edges between layer l and l+dirv.
P[i][j] counts the number of crossings from j-th edge of vertex i.
The total count of crossings is the sum of flattened P:
x = sum(sum(P,[])) | entailment |
def _cc(self):
"""
implementation of the efficient bilayer cross counting by insert-sort
(see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting")
"""
g=self.layout.grx
P=[]
for v in self:
P.extend(sorted([g[x].pos for x in self._neighbors(v)]))
# count inversions in P:
s = []
count = 0
for i,p in enumerate(P):
j = bisect(s,p)
if j<i: count += (i-j)
s.insert(j,p)
return count | implementation of the efficient bilayer cross counting by insert-sort
(see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting") | entailment |
def init_all(self,roots=None,inverted_edges=None,optimize=False):
"""initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False)
"""
if self.initdone: return
# For layered sugiyama algorithm, the input graph must be acyclic,
# so we must provide a list of root nodes and a list of inverted edges.
if roots==None:
roots = [v for v in self.g.sV if len(v.e_in())==0]
if inverted_edges==None:
L = self.g.get_scs_with_feedback(roots)
inverted_edges = [x for x in self.g.sE if x.feedback]
self.alt_e = inverted_edges
# assign rank to all vertices:
self.rank_all(roots,optimize)
# add dummy vertex/edge for 'long' edges:
for e in self.g.E():
self.setdummies(e)
# precompute some layers values:
for l in self.layers: l.setup(self)
self.initdone = True | initializes the layout algorithm by computing roots (unless provided),
inverted edges (unless provided), vertices ranks and creates all dummy
vertices and layers.
Parameters:
roots (list[Vertex]): set *root* vertices (layer 0)
inverted_edges (list[Edge]): set edges to invert to have a DAG.
optimize (bool): optimize ranking if True (default False) | entailment |
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
"""
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges() | compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing. | entailment |
def rank_all(self,roots,optimize=False):
"""Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex).
"""
self._edge_inverter()
r = [x for x in self.g.sV if (len(x.e_in())==0 and x not in roots)]
self._rank_init(roots+r)
if optimize: self._rank_optimize()
self._edge_inverter() | Computes rank of all vertices.
add provided roots to rank 0 vertices,
otherwise update ranking from provided roots.
The initial rank is based on precedence relationships,
optimal ranking may be derived from network flow (simplex). | entailment |
def _rank_init(self,unranked):
"""Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*.
"""
assert self.dag
scan = {}
# set rank of unranked based on its in-edges vertices ranks:
while len(unranked)>0:
l = []
for v in unranked:
self.setrank(v)
# mark out-edges has scan-able:
for e in v.e_out(): scan[e]=True
# check if out-vertices are rank-able:
for x in v.N(+1):
if not (False in [scan.get(e,False) for e in x.e_in()]):
if x not in l: l.append(x)
unranked=l | Computes rank of provided unranked list of vertices and all
their children. A vertex will be asign a rank when all its
inward edges have been *scanned*. When a vertex is asigned
a rank, its outward edges are marked *scanned*. | entailment |
def _rank_optimize(self):
"""optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf)
"""
assert self.dag
for l in reversed(self.layers):
for v in l:
gv = self.grx[v]
for x in v.N(-1):
if all((self.grx[y].rank>=gv.rank for y in x.N(+1))):
gx = self.grx[x]
self.layers[gx.rank].remove(x)
gx.rank = gv.rank-1
self.layers[gv.rank-1].append(x) | optimize ranking by pushing long edges toward lower layers as much as possible.
see other interersting network flow solver to minimize total edge length
(http://jgaa.info/accepted/2005/EiglspergerSiebenhallerKaufmann2005.9.3.pdf) | entailment |
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v])) | set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank. | entailment |
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
"""
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv | creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex. | entailment |
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
"""
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl) | creates and defines all needed dummy vertices for edge e. | entailment |
def draw_step(self):
"""iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose.
"""
ostep = self.ordering_step()
for s in ostep:
self.setxy()
self.draw_edges()
yield s | iterator that computes all vertices coordinates and edge routing after
just one step (one layer after the other from top to bottom to top).
Purely inefficient ! Use it only for "animation" or debugging purpose. | entailment |
def ordering_step(self,oneway=False):
"""iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True).
"""
self.dirv=-1
crossings = 0
for l in self.layers:
mvmt = l.order()
crossings += mvmt
yield (l,mvmt)
if oneway or (crossings == 0):
return
self.dirv=+1
while l:
mvmt = l.order()
yield (l,mvmt)
l = l.nextlayer() | iterator that computes all vertices ordering in their layers
(one layer after the other from top to bottom, to top again unless
oneway is True). | entailment |
def setxy(self):
"""computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf.
"""
self._edge_inverter()
self._detect_alignment_conflicts()
inf = float('infinity')
# initialize vertex coordinates attributes:
for l in self.layers:
for v in l:
self.grx[v].root = v
self.grx[v].align = v
self.grx[v].sink = v
self.grx[v].shift = inf
self.grx[v].X = None
self.grx[v].x = [0.0]*4
curvh = self.dirvh # save current dirvh value
for dirvh in xrange(4):
self.dirvh = dirvh
self._coord_vertical_alignment()
self._coord_horizontal_compact()
self.dirvh = curvh # restore it
# vertical coordinate assigment of all nodes:
Y = 0
for l in self.layers:
dY = max([v.view.h/2. for v in l])
for v in l:
vx = sorted(self.grx[v].x)
# mean of the 2 medians out of the 4 x-coord computed above:
avgm = (vx[1]+vx[2])/2.
# final xy-coordinates :
v.view.xy = (avgm,Y+dY)
Y += 2*dY+self.yspace
self._edge_inverter() | computes all vertex coordinates (x,y) using
an algorithm by Brandes & Kopf. | entailment |
def _detect_alignment_conflicts(self):
"""mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase)
"""
curvh = self.dirvh # save current dirvh value
self.dirvh=0
self.conflicts = []
for L in self.layers:
last = len(L)-1
prev = L.prevlayer()
if not prev: continue
k0=0
k1_init=len(prev)-1
l=0
for l1,v in enumerate(L):
if not self.grx[v].dummy: continue
if l1==last or v.inner(-1):
k1=k1_init
if v.inner(-1):
k1=self.grx[v.N(-1)[-1]].pos
for vl in L[l:l1+1]:
for vk in L._neighbors(vl):
k = self.grx[vk].pos
if (k<k0 or k>k1):
self.conflicts.append((vk,vl))
l=l1+1
k0=k1
self.dirvh = curvh | mark conflicts between edges:
inner edges are edges between dummy nodes
type 0 is regular crossing regular (or sharing vertex)
type 1 is inner crossing regular (targeted crossings)
type 2 is inner crossing inner (avoided by reduce_crossings phase) | entailment |
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m | performs vertical alignment according to current dirvh internal state. | entailment |
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l) | Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view. | entailment |
def pyprf(strCsvCnfg, lgcTest=False, varRat=None, strPathHrf=None):
"""
Main function for pRF mapping.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
"""
# *************************************************************************
# *** Check time
print('---pRF analysis')
varTme01 = time.time()
# *************************************************************************
# *************************************************************************
# *** Preparations
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# Conditional imports:
if cfg.strVersion == 'gpu':
from pyprf_feature.analysis.find_prf_gpu import find_prf_gpu
if ((cfg.strVersion == 'cython') or (cfg.strVersion == 'numpy')):
from pyprf_feature.analysis.find_prf_cpu import find_prf_cpu
# Convert preprocessing parameters (for temporal smoothing)
# from SI units (i.e. [s]) into units of data array (volumes):
cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr)
# *************************************************************************
# *************************************************************************
# *** Create or load pRF time course models
# Create model time courses. Also return logical for inclusion of model
# parameters which will be needed later when we create model parameters
# in degree.
aryPrfTc, lgcMdlInc = model_creation(dicCnfg, varRat=varRat,
strPathHrf=strPathHrf)
# Deduce the number of features from the pRF time course models array
cfg.varNumFtr = aryPrfTc.shape[1]
# *************************************************************************
# *************************************************************************
# *** Preprocessing
# The model time courses will be preprocessed such that they are smoothed
# (temporally) with same factor as the data and that they will be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp)
# The functional data will be masked and demeaned:
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100)
# set the precision of the header to np.float32 so that the prf results
# will be saved in this precision later
hdrMsk.set_data_dtype(np.float32)
# *************************************************************************
# *************************************************************************
# *** Checks
# Make sure that if gpu fitting is used, the number of cross-validations is
# set to 1, not higher
if cfg.strVersion == 'gpu':
strErrMsg = 'Stopping program. ' + \
'Cross-validation on GPU is currently not supported. ' + \
'Set varNumXval equal to 1 in csv file in order to continue. '
assert cfg.varNumXval == 1, strErrMsg
# For the GPU version, we need to set down the parallelisation to 1 now,
# because no separate CPU threads are to be created. We may still use CPU
# parallelisation for preprocessing, which is why the parallelisation
# factor is only reduced now, not earlier.
if cfg.strVersion == 'gpu':
cfg.varPar = 1
# Make sure that if cython is used, the number of features is 1 or 2,
# not higher
if cfg.strVersion == 'cython':
strErrMsg = 'Stopping program. ' + \
'Cython is not supported for more features than 1. ' + \
'Set strVersion equal \'numpy\'.'
assert cfg.varNumFtr in [1, 2], strErrMsg
# Check whether we need to crossvalidate
if np.greater(cfg.varNumXval, 1):
cfg.lgcXval = True
elif np.equal(cfg.varNumXval, 1):
cfg.lgcXval = False
strErrMsg = 'Stopping program. ' + \
'Set numXval (number of crossvalidation folds) to 1 or higher'
assert np.greater_equal(cfg.varNumXval, 1), strErrMsg
# *************************************************************************
# *** Find pRF models for voxel time courses
print('------Find pRF models for voxel time courses')
# Number of voxels for which pRF finding will be performed:
cfg.varNumVoxInc = aryFunc.shape[0]
print('---------Number of voxels on which pRF finding will be performed: '
+ str(cfg.varNumVoxInc))
print('---------Number of features pRF finding will be performed with: '
+ str(cfg.varNumFtr))
print('---------Preparing parallel pRF model finding')
# Get array with all possible model parameter combination:
# [x positions, y positions, sigmas]
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2,
cfg.varExtYmin, cfg.varExtYmax,
cfg.varNumPrfSizes, cfg.varPrfStdMin,
cfg.varPrfStdMax, kwUnt='deg',
kwCrd=cfg.strKwCrd)
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Empty list for results (parameters of best fitting pRF model):
lstPrfRes = [None] * cfg.varPar
# Empty list for processes:
lstPrcs = [None] * cfg.varPar
# Create a queue to put the results in:
queOut = mp.Queue()
# Create list with chunks of functional data for the parallel processes:
lstFunc = np.array_split(aryFunc, cfg.varPar)
# We don't need the original array with the functional data anymore:
del(aryFunc)
# Prepare dictionary to pass as kwargs to find_prf_cpu
dctKw = {'lgcRstr': None,
'lgcPrint': True}
# CPU version (using numpy or cython for pRF finding):
if ((cfg.strVersion == 'numpy') or (cfg.strVersion == 'cython')):
print('---------pRF finding on CPU')
print('---------Creating parallel processes')
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_cpu,
args=(idxPrc,
lstFunc[idxPrc],
aryPrfTc,
aryMdlParams,
cfg.strVersion,
cfg.lgcXval,
cfg.varNumXval,
queOut),
kwargs=dctKw,
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# GPU version (using tensorflow for pRF finding):
elif cfg.strVersion == 'gpu':
print('---------pRF finding on GPU')
# Create processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc] = mp.Process(target=find_prf_gpu,
args=(idxPrc,
aryMdlParams,
lstFunc[idxPrc],
aryPrfTc,
queOut),
kwargs=dctKw,
)
# Daemon (kills processes when exiting):
lstPrcs[idxPrc].Daemon = True
# Start processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].start()
# Delete reference to list with function data (the data continues to exists
# in child process):
del(lstFunc)
# Collect results from queue:
for idxPrc in range(0, cfg.varPar):
lstPrfRes[idxPrc] = queOut.get(True)
# Join processes:
for idxPrc in range(0, cfg.varPar):
lstPrcs[idxPrc].join()
# *************************************************************************
# *************************************************************************
# *** Prepare pRF finding results for export
print('---------Prepare pRF finding results for export')
# Put output into correct order:
lstPrfRes = sorted(lstPrfRes)
# collect results from parallelization
aryBstXpos = joinRes(lstPrfRes, cfg.varPar, 1, inFormat='1D')
aryBstYpos = joinRes(lstPrfRes, cfg.varPar, 2, inFormat='1D')
aryBstSd = joinRes(lstPrfRes, cfg.varPar, 3, inFormat='1D')
aryBstR2 = joinRes(lstPrfRes, cfg.varPar, 4, inFormat='1D')
aryBstBts = joinRes(lstPrfRes, cfg.varPar, 5, inFormat='2D')
if np.greater(cfg.varNumXval, 1):
aryBstR2Single = joinRes(lstPrfRes, cfg.varPar, 6, inFormat='2D')
# Delete unneeded large objects:
del(lstPrfRes)
# *************************************************************************
# *************************************************************************
# Calculate polar angle map:
aryPlrAng = np.arctan2(aryBstYpos, aryBstXpos)
# Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ):
aryEcc = np.sqrt(np.add(np.square(aryBstXpos),
np.square(aryBstYpos)))
# *************************************************************************
# *************************************************************************
# Export each map of best parameters as a 3D nii file
print('---------Exporting results')
# Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# Xoncatenate all the best voxel maps
aryBstMaps = np.stack([aryBstXpos, aryBstYpos, aryBstSd, aryBstR2,
aryPlrAng, aryEcc], axis=1)
# List with name suffices of output images:
lstNiiNames = ['_x_pos',
'_y_pos',
'_SD',
'_R2',
'_polar_angle',
'_eccentricity']
# Append ratio to nii file name, if fitting was done with sup surround
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export map results as seperate 3D nii files
export_nii(aryBstMaps, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='3D')
# *************************************************************************
# *************************************************************************
# Save beta parameter estimates for every feature:
# List with name suffices of output images:
lstNiiNames = ['_Betas']
# Append ratio to nii file name, if fitting was done with sup surround
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export beta parameter as a single 4D nii file
export_nii(aryBstBts, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# Save R2 maps from crossvalidation (saved for every run) as nii:
if np.greater(cfg.varNumXval, 1):
# truncate extremely negative R2 values
aryBstR2Single[np.where(np.less_equal(aryBstR2Single, -1.0))] = -1.0
# List with name suffices of output images:
lstNiiNames = ['_R2_single']
# Append ratio to nii file name, if fitting was done with sup surround
if varRat is not None:
lstNiiNames = [strNii + '_' + str(varRat) for strNii in
lstNiiNames]
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export R2 maps as a single 4D nii file
export_nii(aryBstR2Single, lstNiiNames, aryLgcMsk, aryLgcVar,
tplNiiShp, aryAff, hdrMsk, outFormat='4D')
# *************************************************************************
# *************************************************************************
# *** Report time
varTme02 = time.time()
varTme03 = varTme02 - varTme01
print('---Elapsed time: ' + str(varTme03) + ' s')
print('---Done.') | Main function for pRF mapping.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used. | entailment |
def make_request(cls, url, method, params=None, basic_auth=None, timeout=600):
""" Makes a cURL POST request to the given URL, specifying the data to be passed in as
{"method": method, "params": parameters}
:param str url: URL to connect to.
:param str method: The API method to call.
:param dict params: Dictionary object of the parameters associated with the `method` given. None by default.
:param list | tuple basic_auth: List containing your username and password as ['username', 'password'].
This is empty by default, however it is required by all of the `lbrycrd` methods
:param float timeout: Amount of seconds to wait for the server's response before we timeout.
:raises LBRYException: If the request returns an error when calling the API
:return: A `dict` of the JSON result member of the request
:rtype: dict, PreparedResponse
"""
# Default parameters
params = {} if params is None else params
# Increment the request ID
cls.request_id += 1
# Weed out all the None valued params
params = {k: v for (k, v) in params.items() if v is not None}
# This is the data to be sent
data = {"method": method, "params": params, "jsonrpc": "2.0", "id": cls.request_id}
headers = {"Content-Type": "application/json-rpc", # sends the request as a json
"user-agent": "LBRY python3-api"} # Sets the user agent
# You could create a request object and then make a prepared request object
# And then be able to print the Request that will be sent
request = requests.Request('POST', url, json=data, headers=headers, auth=basic_auth)
prepared = request.prepare()
try:
# Create a session object
sesh = requests.Session()
# Send the prepared request object through
response = sesh.send(prepared, timeout=timeout)
response_json = response.json()
# Successful request was made
if 'result' in response_json:
# Returns the Result sub-JSON formatted as a dict
return response_json['result'], response
# If the response we received from the LBRY http post had an error
elif 'error' in response_json:
raise LBRYUtils.LBRYException("POST Request made to LBRY received an error",
response_json, response.status_code, prepared)
except requests.HTTPError as HE:
print(HE)
return None, None
except requests.RequestException as RE:
# Print the Request Exception given
print(RE)
print("Printing Request Created:\n")
LBRYUtils.print_request(prepared)
return None, None | Makes a cURL POST request to the given URL, specifying the data to be passed in as
{"method": method, "params": parameters}
:param str url: URL to connect to.
:param str method: The API method to call.
:param dict params: Dictionary object of the parameters associated with the `method` given. None by default.
:param list | tuple basic_auth: List containing your username and password as ['username', 'password'].
This is empty by default, however it is required by all of the `lbrycrd` methods
:param float timeout: Amount of seconds to wait for the server's response before we timeout.
:raises LBRYException: If the request returns an error when calling the API
:return: A `dict` of the JSON result member of the request
:rtype: dict, PreparedResponse | entailment |
def load_png(varNumVol, strPathPng, tplVslSpcSze=(200, 200), varStrtIdx=0,
varZfill=3):
"""
Load PNGs with stimulus information for pRF model creation.
Parameters
----------
varNumVol : int
Number of PNG files.
strPathPng : str
Parent directory of PNG files. PNG files need to be organsied in
numerical order (e.g. `file_001.png`, `file_002.png`, etc.).
tplVslSpcSze : tuple
Pixel size (x, y) at which PNGs are sampled. In case of large PNGs it
is useful to sample at a lower than the original resolution.
varStrtIdx : int
Start index of PNG files. For instance, `varStrtIdx = 0` if the name of
the first PNG file is `file_000.png`, or `varStrtIdx = 1` if it is
`file_001.png`.
varZfill : int
Zero padding of PNG file names. For instance, `varStrtIdx = 3` if the
name of PNG files is `file_007.png`, or `varStrtIdx = 4` if it is
`file_0007.png`.
Returns
-------
aryPngData : np.array
3D Numpy array with the following structure:
aryPngData[x-pixel-index, y-pixel-index, PngNumber]
Notes
-----
Part of py_pRF_mapping library.
"""
# Create list of png files to load:
lstPngPaths = [None] * varNumVol
for idx01 in range(0, varNumVol):
lstPngPaths[idx01] = (strPathPng +
str(idx01 + varStrtIdx).zfill(varZfill) +
'.png')
# The png data will be saved in a numpy array of the following order:
# aryPngData[x-pixel, y-pixel, PngNumber].
aryPngData = np.zeros((tplVslSpcSze[0],
tplVslSpcSze[1],
varNumVol))
# Open first image in order to check dimensions (greyscale or RGB, i.e. 2D
# or 3D).
objIm = Image.open(lstPngPaths[0])
aryTest = np.array(objIm.resize((objIm.size[0], objIm.size[1]),
Image.ANTIALIAS))
varNumDim = aryTest.ndim
del(aryTest)
# Loop trough PNG files:
for idx01 in range(0, varNumVol):
# Old version of reading images with scipy
# aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :, 0]
# aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :]
# Load & resize image:
objIm = Image.open(lstPngPaths[idx01])
objIm = objIm.resize((tplVslSpcSze[0],
tplVslSpcSze[1]),
resample=Image.NEAREST)
# Casting of array depends on dimensionality (greyscale or RGB, i.e. 2D
# or 3D):
if varNumDim == 2:
aryPngData[:, :, idx01] = np.array(objIm.resize(
(objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :]
elif varNumDim == 3:
aryPngData[:, :, idx01] = np.array(objIm.resize(
(objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :, 0]
else:
# Error message:
strErrMsg = ('ERROR: PNG files for model creation need to be RGB '
+ 'or greyscale.')
raise ValueError(strErrMsg)
# Convert RGB values (0 to 255) to integer ones and zeros:
aryPngData = (aryPngData > 200).astype(np.int8)
return aryPngData | Load PNGs with stimulus information for pRF model creation.
Parameters
----------
varNumVol : int
Number of PNG files.
strPathPng : str
Parent directory of PNG files. PNG files need to be organsied in
numerical order (e.g. `file_001.png`, `file_002.png`, etc.).
tplVslSpcSze : tuple
Pixel size (x, y) at which PNGs are sampled. In case of large PNGs it
is useful to sample at a lower than the original resolution.
varStrtIdx : int
Start index of PNG files. For instance, `varStrtIdx = 0` if the name of
the first PNG file is `file_000.png`, or `varStrtIdx = 1` if it is
`file_001.png`.
varZfill : int
Zero padding of PNG file names. For instance, `varStrtIdx = 3` if the
name of PNG files is `file_007.png`, or `varStrtIdx = 4` if it is
`file_0007.png`.
Returns
-------
aryPngData : np.array
3D Numpy array with the following structure:
aryPngData[x-pixel-index, y-pixel-index, PngNumber]
Notes
-----
Part of py_pRF_mapping library. | entailment |
def load_ev_txt(strPthEv):
"""Load information from event text file.
Parameters
----------
input1 : str
Path to event text file
Returns
-------
aryEvTxt : 2d numpy array, shape [n_measurements, 3]
Array with info about conditions: type, onset, duration
Notes
-----
Part of py_pRF_mapping library.
"""
aryEvTxt = np.loadtxt(strPthEv, dtype='float', comments='#', delimiter=' ',
skiprows=0, usecols=(0, 1, 2))
return aryEvTxt | Load information from event text file.
Parameters
----------
input1 : str
Path to event text file
Returns
-------
aryEvTxt : 2d numpy array, shape [n_measurements, 3]
Array with info about conditions: type, onset, duration
Notes
-----
Part of py_pRF_mapping library. | entailment |
def adjust_status(info: dict) -> dict:
"""Apply status mapping to a raw API result."""
modified_info = deepcopy(info)
modified_info.update({
'level':
get_nearest_by_numeric_key(STATUS_MAP, int(info['level'])),
'level2':
STATUS_MAP[99] if info['level2'] is None else
get_nearest_by_numeric_key(STATUS_MAP, int(info['level2']))
})
return modified_info | Apply status mapping to a raw API result. | entailment |
async def status_by_coordinates(
self, latitude: float, longitude: float) -> dict:
"""Return the CDC status for the provided latitude/longitude."""
cdc_data = await self.raw_cdc_data()
nearest = await self.nearest_by_coordinates(latitude, longitude)
return adjust_status(cdc_data[nearest['state']['name']]) | Return the CDC status for the provided latitude/longitude. | entailment |
async def status_by_state(self, state: str) -> dict:
"""Return the CDC status for the specified state."""
data = await self.raw_cdc_data()
try:
info = next((v for k, v in data.items() if state in k))
except StopIteration:
return {}
return adjust_status(info) | Return the CDC status for the specified state. | entailment |
def brief_exception_text(exception, secret_values):
"""
Returns the Exception class and the message of the exception as string.
:param exception: The exception to format
:param secret_values: Values to hide in output
"""
exception_text = _hide_secret_values(str(exception), secret_values)
return '[{}]\n{}'.format(type(exception).__name__, exception_text) | Returns the Exception class and the message of the exception as string.
:param exception: The exception to format
:param secret_values: Values to hide in output | entailment |
def print_exception(exception, secret_values=None):
"""
Prints the exception message and the name of the exception class to stderr.
:param exception: The exception to print
:param secret_values: Values to hide in output
"""
print(brief_exception_text(exception, secret_values), file=sys.stderr) | Prints the exception message and the name of the exception class to stderr.
:param exception: The exception to print
:param secret_values: Values to hide in output | entailment |
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
"""
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors | Saves the Document to the database if it is valid.
Returns errors otherwise. | entailment |
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
"""
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors | Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise. | entailment |
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
"""
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
) | Deletes the document if it is saved in the collection. | entailment |
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
"""
return cls.collection.find_one(filter, *args, **kwargs) | Returns one document dict if one passes the filter.
Returns None otherwise. | entailment |
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
"""
return list(cls.collection.find(*args, **kwargs)) | Returns all document dicts that pass the filter | entailment |
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
"""
return list(cls.collection.aggregate(pipeline or [], **kwargs)) | Returns the document dicts returned from the Aggregation Pipeline | entailment |
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
"""
return cls.collection.insert_many(documents, ordered).inserted_ids | Inserts a list of documents into the Collection and returns their _ids | entailment |
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_one(filter, update, upsert).raw_result | Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered | entailment |
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result | Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered | entailment |
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result | Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered | entailment |
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
"""
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None | Returns a Document if any document is filtered, returns None otherwise | entailment |
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
"""
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document] | Returns a list of Documents if any document is filtered | entailment |
def in_file(self, fn: str) -> Iterator[Statement]:
"""
Returns an iterator over all of the statements belonging to a file.
"""
yield from self.__file_to_statements.get(fn, []) | Returns an iterator over all of the statements belonging to a file. | entailment |
def at_line(self, line: FileLine) -> Iterator[Statement]:
"""
Returns an iterator over all of the statements located at a given line.
"""
num = line.num
for stmt in self.in_file(line.filename):
if stmt.location.start.line == num:
yield stmt | Returns an iterator over all of the statements located at a given line. | entailment |
def funcFindPrfGpu(idxPrc, vecMdlXpos, vecMdlYpos, vecMdlSd, aryFunc, # noqa
aryPrfTc, varL2reg, queOut, lgcPrint=True):
"""
Find best pRF model for voxel time course.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
vecMdlXpos : np.array
1D array with pRF model x positions.
vecMdlYpos : np.array
1D array with pRF model y positions.
vecMdlSd : np.array
1D array with pRF model sizes (SD of Gaussian).
aryFunc : np.array
2D array with functional MRI data, with shape aryFunc[voxel, time].
aryPrfTc : np.array
Array with pRF model time courses, with shape
aryPrfTc[x-pos, y-pos, SD, motion-direction, time]
varL2reg : float
L2 regularisation factor for ridge regression.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
lstOut : list
List containing the following objects:
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0.
vecBstXpos : np.array
1D array with best fitting x-position for each voxel, with shape
vecBstXpos[voxel].
vecBstYpos : np.array
1D array with best fitting y-position for each voxel, with shape
vecBstYpos[voxel].
vecBstSd : np.array
1D array with best fitting pRF size for each voxel, with shape
vecBstSd[voxel].
vecBstR2 : np.array
1D array with R2 value of 'winning' pRF model for each voxel, with
shape vecBstR2[voxel].
dummy : np.array
2D array that is supposed to contain the beta values of 'winning'
pRF models for each voxel, with shape aryBeta[voxel, beta]. AT THE
MOMENT, CONTAINS EMPTY DUMMY ARRAY (np.zeros).
Notes
-----
Uses a queue that runs in a separate thread to put model time courses on
the computational graph.
"""
# -------------------------------------------------------------------------
# *** Queue-feeding-function that will run in extra thread
def funcPlcIn():
"""Place data on queue."""
# Iteration counter:
idxCnt = 0
while True:
# Feed example to Tensorflow placeholder
aryTmp02 = lstPrfTc[idxCnt]
dicIn = {objPlcHld01: aryTmp02}
# Push to the queue:
objSess.run(objEnQ, feed_dict=dicIn)
idxCnt += 1
# Stop if coordinator says stop:
if objCoord.should_stop():
break
# Stop if all data has been put on the queue:
elif idxCnt == varNumMdls:
break
# -------------------------------------------------------------------------
# *** Prepare pRF model time courses for graph
if lgcPrint:
print('------Prepare pRF model time courses for graph')
# Information about pRF model parameters:
varNumX = np.shape(vecMdlXpos)[0]
varNumY = np.shape(vecMdlYpos)[0]
varNumPrfSizes = np.shape(vecMdlSd)[0]
# Number of predictors (betas):
varNumBeta = aryPrfTc.shape[3]
# At this point, aryPrfTc has the following dimensions:
# aryPrfTc[x-pos, y-pos, SD, motion-direction, time]
# Reshape pRF model time courses:
aryPrfTc = np.reshape(aryPrfTc,
((aryPrfTc.shape[0]
* aryPrfTc.shape[1]
* aryPrfTc.shape[2]),
aryPrfTc.shape[3],
aryPrfTc.shape[4]))
# Now, aryPrfTc has the following dimensions:
# aryPrfTc[(x-pos * y-pos * SD), motion-direction, time]
# Original total number of pRF time course models (before removing models
# with zero variance):
varNumMdlsTtl = aryPrfTc.shape[0]
# Change type to float 32:
aryPrfTc = aryPrfTc.astype(np.float32)
# The pRF model is fitted only if variance along time dimension is not
# zero. Get variance along time dimension:
vecVarPrfTc = np.var(aryPrfTc, axis=2)
# Zero with float32 precision for comparison:
varZero32 = np.array(([0.0])).astype(np.float32)[0]
# Boolean array for models with variance greater than zero for at least
# one motion direction:
vecLgcVar = np.max(
np.greater(vecVarPrfTc,
varZero32),
axis=1
)
# Take models with variance less than zero out of the array:
aryPrfTc = aryPrfTc[vecLgcVar, :, :]
# Swap axes, so that
# aryPrfTc[(x-pos * y-pos * SD), time, motion-direction]
aryPrfTc = np.swapaxes(aryPrfTc, 1, 2)
# Add constant term (ones):
# aryPrfTc = np.concatenate((aryPrfTc,
# np.ones((aryPrfTc.shape[0],
# aryPrfTc.shape[1],
# 1)).astype(np.float32)),
# axis=2)
# Size of pRF time courses in MB:
varSzePrf = np.divide(float(aryPrfTc.nbytes),
1000000.0)
if lgcPrint:
print(('---------Size of pRF time courses: '
+ str(np.around(varSzePrf))
+ ' MB'))
# Put pRF model time courses into list:
lstPrfTc = [None] * aryPrfTc.shape[0]
for idxMdl in range(int(aryPrfTc.shape[0])):
lstPrfTc[idxMdl] = aryPrfTc[idxMdl, :, :]
del(aryPrfTc)
# Total number of pRF models to fit:
varNumMdls = len(lstPrfTc)
# -------------------------------------------------------------------------
# *** Prepare functional data for graph
if lgcPrint:
print('------Prepare functional data for graph')
# Number of voxels to be fitted:
varNumVox = aryFunc.shape[0]
# Number of volumes:
varNumVol = aryFunc.shape[1]
# We reshape the voxel time courses, so that time goes down the column,
# i.e. from top to bottom.
aryFunc = aryFunc.T
# Change type to float 32:
aryFunc = aryFunc.astype(np.float32)
# We cannot commit the entire functional data to GPU memory, we need to
# create chunks. Establish the limit (maximum size) of one chunk (in MB):
varSzeMax = 50.0 # 20.0
# Size of functional data in MB:
varSzeFunc = np.divide(float(aryFunc.nbytes),
1000000.0)
if lgcPrint:
print(('---------Size of functional data: '
+ str(np.around(varSzeFunc))
+ ' MB'))
# Number of chunks to create:
varNumChnk = int(np.ceil(np.divide(varSzeFunc, varSzeMax)))
if lgcPrint:
print(('---------Functional data will be split into '
+ str(varNumChnk)
+ ' batches'))
# Vector with the indicies at which the functional data will be separated
# in order to be chunked up for the parallel processes:
vecIdxChnks = np.linspace(0,
varNumVox,
num=varNumChnk,
endpoint=False)
vecIdxChnks = np.hstack((vecIdxChnks, varNumVox))
# List into which the chunks of functional data are put:
lstFunc = [None] * varNumChnk
# Put functional data into chunks:
for idxChnk in range(0, varNumChnk):
# Index of first voxel to be included in current chunk:
varChnkStr = int(vecIdxChnks[idxChnk])
# Index of last voxel to be included in current chunk:
varChnkEnd = int(vecIdxChnks[(idxChnk+1)])
# Put voxel array into list:
lstFunc[idxChnk] = aryFunc[:, varChnkStr:varChnkEnd]
# We delete the original array holding the functional data to conserve
# memory. Therefore, we first need to calculate the mean (will be needed
# for calculation of R2).
# After finding the best fitting model for each voxel, we still have to
# calculate the coefficient of determination (R-squared) for each voxel. We
# start by calculating the total sum of squares (i.e. the deviation of the
# data from the mean). The mean of each time course:
vecFuncMean = np.mean(aryFunc, axis=0)
# Deviation from the mean for each datapoint:
vecFuncDev = np.subtract(aryFunc, vecFuncMean[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(vecFuncDev,
2.0),
axis=0)
# We don't need the original array with the functional data anymore (the
# above seems to have created a hard copy):
del(vecFuncDev)
del(aryFunc)
# -------------------------------------------------------------------------
# *** Miscellaneous preparations
# Vector for minimum squared residuals:
vecResSsMin = np.zeros((varNumVox), dtype=np.float32)
# Vector for indices of models with minimum residuals:
vecResSsMinIdx = np.zeros((varNumVox), dtype=np.int32)
# Multiply L2 regularization factor with identity matrix:
aryL2reg = np.multiply(np.eye(varNumBeta),
varL2reg).astype(np.float32)
# Reduce logging verbosity:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# -------------------------------------------------------------------------
# *** Prepare status indicator
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
(varNumMdls * varNumChnk),
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# -------------------------------------------------------------------------
# *** Loop through chunks
if lgcPrint:
print('------Run graph')
for idxChnk in range(varNumChnk):
if lgcPrint:
print(('---------Chunk: ' + str(idxChnk)))
print('lstPrfTc[0].shape')
print(lstPrfTc[0].shape)
# Define session:
# objSess = tf.Session()
with tf.Graph().as_default(), tf.Session() as objSess:
# -----------------------------------------------------------------
# *** Prepare queue
if lgcPrint:
print('------Define computational graph, queue & session')
# Queue capacity:
varCapQ = 10
# Dimensions of placeholder have to be determined outside of the
# tensor object, otherwise the object on which the size is
# calculated is loaded into GPU memory.
varDim01 = lstPrfTc[0].shape[0]
varDim02 = lstPrfTc[0].shape[1]
# The queue:
objQ = tf.FIFOQueue(capacity=varCapQ,
dtypes=[tf.float32],
shapes=[(varDim01, varDim02)])
# Method for getting queue size:
objSzeQ = objQ.size()
# Placeholder that is used to put design matrix on computational
# graph:
objPlcHld01 = tf.placeholder(tf.float32,
shape=[varDim01, varDim02])
# The enqueue operation that puts data on the graph.
objEnQ = objQ.enqueue([objPlcHld01])
# Number of threads that will be created:
varNumThrd = 1
# The queue runner (places the enqueue operation on the queue?).
objRunQ = tf.train.QueueRunner(objQ, [objEnQ] * varNumThrd)
tf.train.add_queue_runner(objRunQ)
# The tensor object that is retrieved from the queue. Functions
# like placeholders for the data in the queue when defining the
# graph.
objDsng = objQ.dequeue()
# Coordinator needs to be initialised:
objCoord = tf.train.Coordinator()
# -----------------------------------------------------------------
# *** Fill queue
# Buffer size (number of samples to put on queue before starting
# execution of graph):
varBuff = 10
# Define & run extra thread with graph that places data on queue:
objThrd = threading.Thread(target=funcPlcIn)
objThrd.setDaemon(True)
objThrd.start()
# Stay in this while loop until the specified number of samples
# (varBuffer) have been placed on the queue).
varTmpSzeQ = 0
while varTmpSzeQ < varBuff:
varTmpSzeQ = objSess.run(objSzeQ)
# -----------------------------------------------------------------
# *** Prepare & run the graph
# Chunk of functional data:
aryTmp01 = np.copy(lstFunc[idxChnk])
with tf.device('/gpu:0'):
objFunc = tf.Variable(aryTmp01)
# Regularisation factor matrix:
with tf.device('/gpu:0'):
objL2reg = tf.Variable(aryL2reg)
# The computational graph. Operation that solves matrix (in the
# least squares sense), and calculates residuals along time
# dimension. There are two versions: (1) The number of measurements
# (e.g. volumes) is greater than or equal to the number of
# predictors (betas). (2) The number of measurements is less than
# the number of predictors.
# (1) Number of measurements greater/equal to number of predictors:
if np.greater_equal(varNumVol, varNumBeta):
objMatSlve = tf.reduce_sum(
tf.squared_difference(
objFunc,
tf.matmul(
objDsng,
tf.matmul(
tf.matmul(
tf.matrix_inverse(
tf.add(
tf.matmul(
objDsng,
objDsng,
transpose_a=True,
transpose_b=False
),
objL2reg
)
),
objDsng,
transpose_a=False,
transpose_b=True
),
objFunc
)
),
),
axis=0
)
# (2) Number of measurements less than number of predictors:
else:
objMatSlve = tf.reduce_sum(
tf.squared_difference(
objFunc,
tf.matmul(
objDsng,
tf.matmul(
tf.matmul(
objDsng,
tf.matrix_inverse(
tf.add(
tf.matmul(
objDsng,
objDsng,
transpose_a=False,
transpose_b=True
),
objL2reg
)
),
transpose_a=True,
transpose_b=False
),
objFunc
)
),
),
axis=0
)
# Variables need to be (re-)initialised:
objSess.run(tf.global_variables_initializer())
# Mark graph as read-only (would throw an error in case of memory
# leak):
objSess.graph.finalize()
# Index of first voxel in current chunk (needed to assign results):
varChnkStr = int(vecIdxChnks[idxChnk])
# Index of last voxel in current chunk (needed to assign results):
varChnkEnd = int(vecIdxChnks[(idxChnk+1)])
# Array for results of current chunk:
aryTmpRes = np.zeros((varNumMdls,
lstFunc[idxChnk].shape[1]),
dtype=np.float32)
# Loop through models:
for idxMdl in range(varNumMdls):
# Run main computational graph and put results in list:
# varTme01 = time.time()
aryTmpRes[idxMdl, :] = objSess.run(objMatSlve)
# print(('---------Time for graph call: '
# + str(time.time() - varTme01)))
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Number of elements on queue:
varTmpSzeQ = objSess.run(objSzeQ)
# Prepare status message:
strStsMsg = ('---------Progress: '
+ str(vecStatPrc[varCntSts01])
+ ' % --- Number of elements on queue: '
+ str(varTmpSzeQ))
if lgcPrint:
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# Stop threads.
objCoord.request_stop()
# objSess.close()
# Get indices of models with minimum residuals (minimum along
# model-space) for current chunk:
vecResSsMinIdx[varChnkStr:varChnkEnd] = np.argmin(aryTmpRes, axis=0)
# Get minimum residuals of those models:
vecResSsMin[varChnkStr:varChnkEnd] = np.min(aryTmpRes, axis=0)
# -------------------------------------------------------------------------
# *** Post-process results
if lgcPrint:
print('------Post-processing results')
# Array for model parameters. At the moment, we have the indices of the
# best fitting models, so we need an array that tells us what model
# parameters these indices refer to.
aryMdl = np.zeros((varNumMdlsTtl, 3), dtype=np.float32)
# Model parameter can be represented as float32 as well:
vecMdlXpos = vecMdlXpos.astype(np.float32)
vecMdlYpos = vecMdlYpos.astype(np.float32)
vecMdlSd = vecMdlSd.astype(np.float32)
# The first column is to contain model x positions:
aryMdl[:, 0] = np.repeat(vecMdlXpos, int(varNumY * varNumPrfSizes))
# The second column is to contain model y positions:
aryMdl[:, 1] = np.repeat(
np.tile(vecMdlYpos,
varNumX),
varNumPrfSizes
)
# The third column is to contain model pRF sizes:
aryMdl[:, 2] = np.tile(vecMdlSd, int(varNumX * varNumY))
# Earlier, we had removed models with a variance of zero. Thus, those
# models were ignored and are not present in the results. We remove them
# from the model-parameter-array:
aryMdl = aryMdl[vecLgcVar]
# Retrieve model parameters of 'winning' model for all voxels:
vecBstXpos = aryMdl[:, 0][vecResSsMinIdx]
vecBstYpos = aryMdl[:, 1][vecResSsMinIdx]
vecBstSd = aryMdl[:, 2][vecResSsMinIdx]
# Coefficient of determination (1 - ratio of (residual sum of squares by
# total sum of squares)):
vecBstR2 = np.subtract(1.0,
np.divide(vecResSsMin,
vecSsTot)
)
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2,
np.zeros((varNumVox, (varNumBeta))).astype(np.float32)]
queOut.put(lstOut) | Find best pRF model for voxel time course.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
vecMdlXpos : np.array
1D array with pRF model x positions.
vecMdlYpos : np.array
1D array with pRF model y positions.
vecMdlSd : np.array
1D array with pRF model sizes (SD of Gaussian).
aryFunc : np.array
2D array with functional MRI data, with shape aryFunc[voxel, time].
aryPrfTc : np.array
Array with pRF model time courses, with shape
aryPrfTc[x-pos, y-pos, SD, motion-direction, time]
varL2reg : float
L2 regularisation factor for ridge regression.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
lstOut : list
List containing the following objects:
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0.
vecBstXpos : np.array
1D array with best fitting x-position for each voxel, with shape
vecBstXpos[voxel].
vecBstYpos : np.array
1D array with best fitting y-position for each voxel, with shape
vecBstYpos[voxel].
vecBstSd : np.array
1D array with best fitting pRF size for each voxel, with shape
vecBstSd[voxel].
vecBstR2 : np.array
1D array with R2 value of 'winning' pRF model for each voxel, with
shape vecBstR2[voxel].
dummy : np.array
2D array that is supposed to contain the beta values of 'winning'
pRF models for each voxel, with shape aryBeta[voxel, beta]. AT THE
MOMENT, CONTAINS EMPTY DUMMY ARRAY (np.zeros).
Notes
-----
Uses a queue that runs in a separate thread to put model time courses on
the computational graph. | entailment |
def wrap(text, width=70, **kwargs):
"""Wrap multiple paragraphs of text, returning a list of wrapped lines.
Reformat the multiple paragraphs 'text' so they fit in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See ParagraphWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = ParagraphWrapper(width=width, **kwargs)
return w.wrap(text) | Wrap multiple paragraphs of text, returning a list of wrapped lines.
Reformat the multiple paragraphs 'text' so they fit in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See ParagraphWrapper class for available keyword args to customize
wrapping behaviour. | entailment |
def fill(text, width=70, **kwargs):
"""Fill multiple paragraphs of text, returning a new string.
Reformat multiple paragraphs in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped text. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See ParagraphWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = ParagraphWrapper(width=width, **kwargs)
return w.fill(text) | Fill multiple paragraphs of text, returning a new string.
Reformat multiple paragraphs in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped text. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See ParagraphWrapper class for
available keyword args to customize wrapping behaviour. | entailment |
def split(cls, text):
"""split(text : string) -> [string]
Splits 'text' into multiple paragraphs and return a list of each
paragraph.
"""
result = [line.strip('\n') for line in cls.parasep_re.split(text)]
if result == ['', '']:
result = ['']
return result | split(text : string) -> [string]
Splits 'text' into multiple paragraphs and return a list of each
paragraph. | entailment |
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the multiple paragraphs in 'text' so they fit in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
lines = []
linewrap = partial(textwrap.TextWrapper.wrap, self)
for para in self.split(text):
lines.extend(linewrap(para))
lines.append('') # Add newline between paragraphs
# Remove trailing newline
lines = lines[:-1]
return lines | wrap(text : string) -> [string]
Reformat the multiple paragraphs in 'text' so they fit in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space. | entailment |
def getSenderNumberMgtURL(self, CorpNum, UserID):
""" ν©μ€ μ μ‘λ΄μ νμ
URL
args
CorpNum : νμ μ¬μ
μλ²νΈ
UserID : νμ νλΉμμ΄λ
return
30μ΄ λ³΄μ ν ν°μ ν¬ν¨ν url
raise
PopbillException
"""
result = self._httpget('/FAX/?TG=SENDER', CorpNum, UserID)
return result.url | ν©μ€ μ μ‘λ΄μ νμ
URL
args
CorpNum : νμ μ¬μ
μλ²νΈ
UserID : νμ νλΉμμ΄λ
return
30μ΄ λ³΄μ ν ν°μ ν¬ν¨ν url
raise
PopbillException | entailment |
def getUnitCost(self, CorpNum):
""" ν©μ€ μ μ‘ λ¨κ° νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
return
μ μ‘ λ¨κ° by float
raise
PopbillException
"""
result = self._httpget('/FAX/UnitCost', CorpNum)
return int(result.unitCost) | ν©μ€ μ μ‘ λ¨κ° νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
return
μ μ‘ λ¨κ° by float
raise
PopbillException | entailment |
def getFaxResult(self, CorpNum, ReceiptNum, UserID=None):
""" ν©μ€ μ μ‘κ²°κ³Ό μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ReceiptNum : μ μ‘μμ²μ λ°κΈλ°μ μ μλ²νΈ
UserID : νλΉνμ μμ΄λ
return
ν©μ€μ μ‘μ 보 as list
raise
PopbillException
"""
if ReceiptNum == None or len(ReceiptNum) != 18:
raise PopbillException(-99999999, "μ μλ²νΈκ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
return self._httpget('/FAX/' + ReceiptNum, CorpNum, UserID) | ν©μ€ μ μ‘κ²°κ³Ό μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ReceiptNum : μ μ‘μμ²μ λ°κΈλ°μ μ μλ²νΈ
UserID : νλΉνμ μμ΄λ
return
ν©μ€μ μ‘μ 보 as list
raise
PopbillException | entailment |
def getFaxResultRN(self, CorpNum, RequestNum, UserID=None):
""" ν©μ€ μ μ‘κ²°κ³Ό μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
UserID : νλΉνμ μμ΄λ
return
ν©μ€μ μ‘μ 보 as list
raise
PopbillException
"""
if RequestNum == None or RequestNum == '':
raise PopbillException(-99999999, "μμ²λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/FAX/Get/' + RequestNum, CorpNum, UserID) | ν©μ€ μ μ‘κ²°κ³Ό μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
UserID : νλΉνμ μμ΄λ
return
ν©μ€μ μ‘μ 보 as list
raise
PopbillException | entailment |
def sendFax(self, CorpNum, SenderNum, ReceiverNum, ReceiverName, FilePath, ReserveDT=None, UserID=None,
SenderName=None, adsYN=False, title=None, RequestNum=None):
""" ν©μ€ λ¨κ±΄ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
SenderNum : λ°μ μ λ²νΈ
ReceiverNum : μμ μ λ²νΈ
ReceiverName : μμ μ λͺ
FilePath : λ°μ νμΌκ²½λ‘
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
SenderName : λ°μ μλͺ
(λ보μ μ‘μ©)
adsYN : κ΄κ³ ν©μ€ μ¬λΆ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException
"""
receivers = []
receivers.append(FaxReceiver(receiveNum=ReceiverNum,
receiveName=ReceiverName)
)
return self.sendFax_multi(CorpNum, SenderNum, receivers, FilePath, ReserveDT, UserID, SenderName, adsYN, title,
RequestNum) | ν©μ€ λ¨κ±΄ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
SenderNum : λ°μ μ λ²νΈ
ReceiverNum : μμ μ λ²νΈ
ReceiverName : μμ μ λͺ
FilePath : λ°μ νμΌκ²½λ‘
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
SenderName : λ°μ μλͺ
(λ보μ μ‘μ©)
adsYN : κ΄κ³ ν©μ€ μ¬λΆ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException | entailment |
def sendFax_multi(self, CorpNum, SenderNum, Receiver, FilePath, ReserveDT=None, UserID=None, SenderName=None,
adsYN=False, title=None, RequestNum=None):
""" ν©μ€ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
SenderNum : λ°μ μ λ²νΈ (λ보μ μ‘μ©)
Receiver : μμ μ λ²νΈ(λ보μ μ‘μ©)
FilePath : λ°μ νμΌκ²½λ‘
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
SenderName : λ°μ μλͺ
(λ보μ μ‘μ©)
adsYN : κ΄κ³ ν©μ€ μ¬λΆ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException
"""
if SenderNum == None or SenderNum == "":
raise PopbillException(-99999999, "λ°μ μ λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if Receiver == None:
raise PopbillException(-99999999, "μμ μ μ λ³΄κ° μ
λ ₯λμ§ μμμ΅λλ€.")
if not (type(Receiver) is str or type(Receiver) is FaxReceiver or type(Receiver) is list):
raise PopbillException(-99999999, "'Receiver' argument type error. 'FaxReceiver' or List of 'FaxReceiver'.")
if FilePath == None:
raise PopbillException(-99999999, "λ°μ νμΌκ²½λ‘κ° μ
λ ₯λμ§ μμμ΅λλ€.")
if not (type(FilePath) is str or type(FilePath) is list):
raise PopbillException(-99999999, "λ°μ νμΌμ νμΌκ²½λ‘ λλ κ²½λ‘λͺ©λ‘λ§ μ
λ ₯ κ°λ₯ν©λλ€.")
if type(FilePath) is list and (len(FilePath) < 1 or len(FilePath) > 20):
raise PopbillException(-99999999, "νμΌμ 1κ° μ΄μ, 20κ° κΉμ§ μ μ‘ κ°λ₯ν©λλ€.")
req = {"snd": SenderNum, "sndnm": SenderName, "fCnt": 1 if type(FilePath) is str else len(FilePath), "rcvs": [],
"sndDT": None}
if (type(Receiver) is str):
Receiver = FaxReceiver(receiveNum=Receiver)
if (type(Receiver) is FaxReceiver):
Receiver = [Receiver]
if adsYN:
req['adsYN'] = True
for r in Receiver:
req['rcvs'].append({"rcv": r.receiveNum, "rcvnm": r.receiveName})
if ReserveDT != None:
req['sndDT'] = ReserveDT
if title != None:
req['title'] = title
if RequestNum != None:
req['requestNum'] = RequestNum
postData = self._stringtify(req)
if (type(FilePath) is str):
FilePath = [FilePath]
files = []
for filePath in FilePath:
with open(filePath, "rb") as f:
files.append(File(fieldName='file',
fileName=f.name,
fileData=f.read())
)
result = self._httppost_files('/FAX', postData, files, CorpNum, UserID)
return result.receiptNum | ν©μ€ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
SenderNum : λ°μ μ λ²νΈ (λ보μ μ‘μ©)
Receiver : μμ μ λ²νΈ(λ보μ μ‘μ©)
FilePath : λ°μ νμΌκ²½λ‘
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
SenderName : λ°μ μλͺ
(λ보μ μ‘μ©)
adsYN : κ΄κ³ ν©μ€ μ¬λΆ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException | entailment |
def resendFax(self, CorpNum, ReceiptNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None,
UserID=None, title=None, RequestNum=None):
""" ν©μ€ λ¨κ±΄ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ReceiptNum : ν©μ€ μ μλ²νΈ
SenderNum : λ°μ μ λ²νΈ
SenderName : λ°μ μλͺ
ReceiverNum : μμ λ²νΈ
ReceiverName : μμ μλͺ
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException
"""
receivers = None
if ReceiverNum != "" or ReceiverName != "":
receivers = []
receivers.append(FaxReceiver(receiveNum=ReceiverNum,
receiveName=ReceiverName)
)
return self.resendFax_multi(CorpNum, ReceiptNum, SenderNum, SenderName, receivers, ReserveDT, UserID, title,
RequestNum) | ν©μ€ λ¨κ±΄ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ReceiptNum : ν©μ€ μ μλ²νΈ
SenderNum : λ°μ μ λ²νΈ
SenderName : λ°μ μλͺ
ReceiverNum : μμ λ²νΈ
ReceiverName : μμ μλͺ
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException | entailment |
def resendFaxRN(self, CorpNum, OrgRequestNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None,
UserID=None, title=None, RequestNum=None):
""" ν©μ€ λ¨κ±΄ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
OrgRequestNum : μλ³Έ ν©μ€ μ μ‘μ ν λΉν μ μ‘μμ²λ²νΈ
ReceiptNum : ν©μ€ μ μλ²νΈ
SenderNum : λ°μ μ λ²νΈ
SenderName : λ°μ μλͺ
ReceiverNum : μμ λ²νΈ
ReceiverName : μμ μλͺ
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException
"""
receivers = None
if ReceiverNum != "" or ReceiverName != "":
receivers = []
receivers.append(FaxReceiver(receiveNum=ReceiverNum,
receiveName=ReceiverName)
)
return self.resendFaxRN_multi(CorpNum, OrgRequestNum, SenderNum, SenderName, receivers, ReserveDT,
UserID, title, RequestNum) | ν©μ€ λ¨κ±΄ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
OrgRequestNum : μλ³Έ ν©μ€ μ μ‘μ ν λΉν μ μ‘μμ²λ²νΈ
ReceiptNum : ν©μ€ μ μλ²νΈ
SenderNum : λ°μ μ λ²νΈ
SenderName : λ°μ μλͺ
ReceiverNum : μμ λ²νΈ
ReceiverName : μμ μλͺ
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException | entailment |
def resendFaxRN_multi(self, CorpNum, OrgRequestNum, SenderNum, SenderName, Receiver, ReserveDT=None, UserID=None,
title=None, RequestNum=None):
""" ν©μ€ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
OrgRequestNum : μλ³Έ ν©μ€ μ μ‘μ ν λΉν μ μ‘μμ²λ²νΈ
SenderNum : λ°μ μ λ²νΈ
SenderName : λ°μ μλͺ
Receiver : μμ μμ 보 λ°°μ΄
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException
"""
req = {}
if not OrgRequestNum:
raise PopbillException(-99999999, "μλ³Έ ν©μ€ μμ²λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€")
if SenderNum != "":
req['snd'] = SenderNum
if SenderName != "":
req['sndnm'] = SenderName
if ReserveDT != None:
req['sndDT'] = ReserveDT
if title != None:
req['title'] = title
if RequestNum != None:
req['requestNum'] = RequestNum
if Receiver != None:
req['rcvs'] = []
if (type(Receiver) is str):
Receiver = FaxReceiver(receiveNum=Receiver)
if (type(Receiver) is FaxReceiver):
Receiver = [Receiver]
for r in Receiver:
req['rcvs'].append({"rcv": r.receiveNum, "rcvnm": r.receiveName})
postData = self._stringtify(req)
return self._httppost('/FAX/Resend/' + OrgRequestNum, postData, CorpNum, UserID).receiptNum | ν©μ€ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
OrgRequestNum : μλ³Έ ν©μ€ μ μ‘μ ν λΉν μ μ‘μμ²λ²νΈ
SenderNum : λ°μ μ λ²νΈ
SenderName : λ°μ μλͺ
Receiver : μμ μμ 보 λ°°μ΄
ReserveDT : μμ½μκ°(νμ yyyyMMddHHmmss)
UserID : νλΉνμ μμ΄λ
title : ν©μ€μ λͺ©
RequestNum : μ μ‘μμ²μ ν λΉν μ μ‘μμ²λ²νΈ
return
μ μλ²νΈ (receiptNum)
raise
PopbillException | entailment |
def getPreviewURL(self, CorpNum, ReceiptNum, UserID):
""" ν©μ€ λ°μ λ²νΈ λͺ©λ‘ νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. list of SenderNumber
raise
PopbillException
"""
return self._httpget('/FAX/Preview/' + ReceiptNum, CorpNum, UserID).url | ν©μ€ λ°μ λ²νΈ λͺ©λ‘ νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. list of SenderNumber
raise
PopbillException | entailment |
def prepare_outdir(outdir):
"""
Creates the output directory if not existing.
If outdir is None or if no output_files are provided nothing happens.
:param outdir: The output directory to create.
"""
if outdir:
outdir = os.path.expanduser(outdir)
if not os.path.isdir(outdir):
try:
os.makedirs(outdir)
except os.error as e:
raise JobExecutionError('Failed to create outdir "{}".\n{}'.format(outdir, str(e))) | Creates the output directory if not existing.
If outdir is None or if no output_files are provided nothing happens.
:param outdir: The output directory to create. | entailment |
def requestJob(self, CorpNum, Type, SDate, EDate, UserID=None):
""" μμ§ μμ²
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
Type : λ¬Έμνν, SELL-λ§€μΆ, BUY-λ§€μ
,
SDate : μμμΌμ, νμνμ(yyyyMMdd)
EDate : μ’
λ£μΌμ, νμνμ(yyyyMMdd)
UserID : νλΉνμ μμ΄λ
return
μμ
μμ΄λ (jobID)
raise
PopbillException
"""
if Type == None or Type == '':
raise PopbillException(-99999999, "λ¬Έμννμ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
if SDate == None or SDate == '':
raise PopbillException(-99999999, "μμμΌμκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "μ’
λ£μΌμκ° μ
λ ₯λμ§ μμμ΅λλ€.")
uri = '/HomeTax/Cashbill/' + Type
uri += '?SDate=' + SDate
uri += '&EDate=' + EDate
return self._httppost(uri, "", CorpNum, UserID).jobID | μμ§ μμ²
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
Type : λ¬Έμνν, SELL-λ§€μΆ, BUY-λ§€μ
,
SDate : μμμΌμ, νμνμ(yyyyMMdd)
EDate : μ’
λ£μΌμ, νμνμ(yyyyMMdd)
UserID : νλΉνμ μμ΄λ
return
μμ
μμ΄λ (jobID)
raise
PopbillException | entailment |
def search(self, CorpNum, JobID, TradeType, TradeUsage, Page, PerPage, Order, UserID=None):
""" μμ§ κ²°κ³Ό μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
TradeType : λ¬Έμνν λ°°μ΄, N-μΌλ° νκΈμμμ¦, C-μ·¨μ νκΈμμμ¦
TradeUsage : κ±°λκ΅¬λΆ λ°°μ΄, P-μλ±κ³΅μ μ©, C-μ§μΆμ¦λΉμ©
Page : νμ΄μ§ λ²νΈ
PerPage : νμ΄μ§λΉ λͺ©λ‘ κ°μ, μ΅λ 1000κ°
Order : μ λ ¬ λ°©ν₯, D-λ΄λ¦Όμ°¨μ, A-μ€λ¦μ°¨μ
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μ 보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
uri = '/HomeTax/Cashbill/' + JobID
uri += '?TradeType=' + ','.join(TradeType)
uri += '&TradeUsage=' + ','.join(TradeUsage)
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
return self._httpget(uri, CorpNum, UserID) | μμ§ κ²°κ³Ό μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
TradeType : λ¬Έμνν λ°°μ΄, N-μΌλ° νκΈμμμ¦, C-μ·¨μ νκΈμμμ¦
TradeUsage : κ±°λκ΅¬λΆ λ°°μ΄, P-μλ±κ³΅μ μ©, C-μ§μΆμ¦λΉμ©
Page : νμ΄μ§ λ²νΈ
PerPage : νμ΄μ§λΉ λͺ©λ‘ κ°μ, μ΅λ 1000κ°
Order : μ λ ¬ λ°©ν₯, D-λ΄λ¦Όμ°¨μ, A-μ€λ¦μ°¨μ
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μ 보
raise
PopbillException | entailment |
def summary(self, CorpNum, JobID, TradeType, TradeUsage, UserID=None):
""" μμ§ κ²°κ³Ό μμ½μ 보 μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
TradeType : λ¬Έμνν λ°°μ΄, N-μΌλ° νκΈμμμ¦, C-μ·¨μ νκΈμμμ¦
TradeUsage : κ±°λκ΅¬λΆ λ°°μ΄, P-μλ±κ³΅μ μ©, C-μ§μΆμ¦λΉμ©
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μμ½μ 보
raise
PopbillException
"""
if JobID == None or len(JobID) != 18:
raise PopbillException(-99999999, "μμ
μμ΄λ(jobID)κ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
uri = '/HomeTax/Cashbill/' + JobID + '/Summary'
uri += '?TradeType=' + ','.join(TradeType)
uri += '&TradeUsage=' + ','.join(TradeUsage)
return self._httpget(uri, CorpNum, UserID) | μμ§ κ²°κ³Ό μμ½μ 보 μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
JobID : μμ
μμ΄λ
TradeType : λ¬Έμνν λ°°μ΄, N-μΌλ° νκΈμμμ¦, C-μ·¨μ νκΈμμμ¦
TradeUsage : κ±°λκ΅¬λΆ λ°°μ΄, P-μλ±κ³΅μ μ©, C-μ§μΆμ¦λΉμ©
UserID : νλΉνμ μμ΄λ
return
μμ§ κ²°κ³Ό μμ½μ 보
raise
PopbillException | entailment |
def registDeptUser(self, CorpNum, DeptUserID, DeptUserPWD, UserID=None):
""" ννμ€ νκΈμμμ¦ λΆμμ¬μ©μ κ³μ λ±λ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
DeptUserID : ννμ€ λΆμμ¬μ©μ κ³μ μμ΄λ
DeptUserPWD : ννμ€ λΆμμ¬μ©μ κ³μ λΉλ°λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if DeptUserID == None or len(DeptUserID) == 0:
raise PopbillException(-99999999, "ννμ€ λΆμμ¬μ©μ κ³μ μμ΄λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if DeptUserPWD == None or len(DeptUserPWD) == 0:
raise PopbillException(-99999999, "ννμ€ λΆμμ¬μ©μ κ³μ λΉλ°λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
req = {}
req["id"] = DeptUserID
req["pwd"] = DeptUserPWD
postData = self._stringtify(req)
return self._httppost("/HomeTax/Cashbill/DeptUser", postData, CorpNum, UserID) | ννμ€ νκΈμμμ¦ λΆμμ¬μ©μ κ³μ λ±λ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
DeptUserID : ννμ€ λΆμμ¬μ©μ κ³μ μμ΄λ
DeptUserPWD : ννμ€ λΆμμ¬μ©μ κ³μ λΉλ°λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def model_node(**kwargs):
"""
Decorates a ``schematics.Model`` class to add it as a field
of type ``schematic.types.ModelType``.
Keyword arguments are passed to ``schematic.types.ModelType``.
Example:
.. code-block:: python
:emphasize-lines: 8,13
from schematics import Model, types
from rafter.contrib.schematics.helpers import model_node
class MyModel(Model):
name = types.StringType()
@model_node()
class options(Model):
status = types.IntType()
# With arguments and another name
@model_node(serialized_name='extra', required=True)
class _extra(Model):
test = types.StringType()
"""
kwargs.setdefault('default', {})
def decorator(model):
return types.ModelType(model, **kwargs)
return decorator | Decorates a ``schematics.Model`` class to add it as a field
of type ``schematic.types.ModelType``.
Keyword arguments are passed to ``schematic.types.ModelType``.
Example:
.. code-block:: python
:emphasize-lines: 8,13
from schematics import Model, types
from rafter.contrib.schematics.helpers import model_node
class MyModel(Model):
name = types.StringType()
@model_node()
class options(Model):
status = types.IntType()
# With arguments and another name
@model_node(serialized_name='extra', required=True)
class _extra(Model):
test = types.StringType() | entailment |
def for_each_file(base_dir, func):
"""
Calls func(filename) for every file under base_dir.
:param base_dir: A directory containing files
:param func: The function to call with every file.
"""
for dir_path, _, file_names in os.walk(base_dir):
for filename in file_names:
func(os.path.join(dir_path, filename)) | Calls func(filename) for every file under base_dir.
:param base_dir: A directory containing files
:param func: The function to call with every file. | entailment |
def make_file_read_only(file_path):
"""
Removes the write permissions for the given file for owner, groups and others.
:param file_path: The file whose privileges are revoked.
:raise FileNotFoundError: If the given file does not exist.
"""
old_permissions = os.stat(file_path).st_mode
os.chmod(file_path, old_permissions & ~WRITE_PERMISSIONS) | Removes the write permissions for the given file for owner, groups and others.
:param file_path: The file whose privileges are revoked.
:raise FileNotFoundError: If the given file does not exist. | entailment |
def load_config(strCsvCnfg, lgcTest=False, lgcPrint=True):
"""
Load py_pRF_mapping config file.
Parameters
----------
strCsvCnfg : string
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of this function
will be prepended to config file paths.
lgcPrint : Boolean
Print config parameters?
Returns
-------
dicCnfg : dict
Dictionary containing parameter names (as keys) and parameter values
(as values). For example, `dicCnfg['varTr']` contains a float, such as
`2.94`.
"""
# Dictionary with config information:
dicCnfg = {}
# Open file with parameter configuration:
# fleConfig = open(strCsvCnfg, 'r')
with open(strCsvCnfg, 'r') as fleConfig:
# Read file with ROI information:
csvIn = csv.reader(fleConfig,
delimiter='\n',
skipinitialspace=True)
# Loop through csv object to fill list with csv data:
for lstTmp in csvIn:
# Skip comments (i.e. lines starting with '#') and empty lines.
# Note: Indexing the list (i.e. lstTmp[0][0]) does not work for
# empty lines. However, if the first condition is no fullfilled
# (i.e. line is empty and 'if lstTmp' evaluates to false), the
# second logical test (after the 'and') is not actually carried
# out.
if lstTmp and not (lstTmp[0][0] == '#'):
# Name of current parameter (e.g. 'varTr'):
strParamKey = lstTmp[0].split(' = ')[0]
# print(strParamKey)
# Current parameter value (e.g. '2.94'):
strParamVlu = lstTmp[0].split(' = ')[1]
# print(strParamVlu)
# Put paramter name (key) and value (item) into dictionary:
dicCnfg[strParamKey] = strParamVlu
# Are model parameters in cartesian or polar coordinates?
# set either pol (polar) or crt (cartesian)
dicCnfg['strKwCrd'] = ast.literal_eval(dicCnfg['strKwCrd'])
if lgcPrint:
print('---Model coordinates are in: ' + str(dicCnfg['strKwCrd']))
# Number of x- or radial positions to model:
dicCnfg['varNum1'] = int(dicCnfg['varNum1'])
# Number of y- or angular positions to model:
dicCnfg['varNum2'] = int(dicCnfg['varNum2'])
if lgcPrint:
if dicCnfg['strKwCrd'] == 'crt':
print('---Number of x-positions to model: ' +
str(dicCnfg['varNum1']))
print('---Number of y-positions to model: ' +
str(dicCnfg['varNum2']))
elif dicCnfg['strKwCrd'] == 'pol':
print('---Number of radial positions to model: ' +
str(dicCnfg['varNum1']))
print('---Number of angular positions to model: ' +
str(dicCnfg['varNum2']))
# Number of pRF sizes to model:
dicCnfg['varNumPrfSizes'] = int(dicCnfg['varNumPrfSizes'])
if lgcPrint:
print('---Number of pRF sizes to model: '
+ str(dicCnfg['varNumPrfSizes']))
# Extent of visual space from centre of the screen in negative x-direction
# (i.e. from the fixation point to the left end of the screen) in degrees
# of visual angle.
dicCnfg['varExtXmin'] = float(dicCnfg['varExtXmin'])
if lgcPrint:
print('---Extent of visual space in negative x-direction: '
+ str(dicCnfg['varExtXmin']))
# Extent of visual space from centre of the screen in positive x-direction
# (i.e. from the fixation point to the right end of the screen) in degrees
# of visual angle.
dicCnfg['varExtXmax'] = float(dicCnfg['varExtXmax'])
if lgcPrint:
print('---Extent of visual space in positive x-direction: '
+ str(dicCnfg['varExtXmax']))
# Extent of visual space from centre of the screen in negative y-direction
# (i.e. from the fixation point to the lower end of the screen) in degrees
# of visual angle.
dicCnfg['varExtYmin'] = float(dicCnfg['varExtYmin'])
if lgcPrint:
print('---Extent of visual space in negative y-direction: '
+ str(dicCnfg['varExtYmin']))
# Extent of visual space from centre of the screen in positive y-direction
# (i.e. from the fixation point to the upper end of the screen) in degrees
# of visual angle.
dicCnfg['varExtYmax'] = float(dicCnfg['varExtYmax'])
if lgcPrint:
print('---Extent of visual space in positive y-direction: '
+ str(dicCnfg['varExtYmax']))
# Minimum pRF model size (standard deviation of 2D Gaussian) [degrees of
# visual angle]:
dicCnfg['varPrfStdMin'] = float(dicCnfg['varPrfStdMin'])
if lgcPrint:
print('---Minimum pRF model size: ' + str(dicCnfg['varPrfStdMin']))
# Maximum pRF model size (standard deviation of 2D Gaussian) [degrees of
# visual angle]:
dicCnfg['varPrfStdMax'] = float(dicCnfg['varPrfStdMax'])
if lgcPrint:
print('---Maximum pRF model size: ' + str(dicCnfg['varPrfStdMax']))
# Volume TR of input data [s]:
dicCnfg['varTr'] = float(dicCnfg['varTr'])
if lgcPrint:
print('---Volume TR of input data [s]: ' + str(dicCnfg['varTr']))
# Voxel resolution of fMRI data [mm]:
dicCnfg['varVoxRes'] = float(dicCnfg['varVoxRes'])
if lgcPrint:
print('---Voxel resolution of fMRI data [mm]: '
+ str(dicCnfg['varVoxRes']))
# Number of fMRI volumes and png files to load:
dicCnfg['varNumVol'] = int(dicCnfg['varNumVol'])
if lgcPrint:
print('---Total number of fMRI volumes and png files: '
+ str(dicCnfg['varNumVol']))
# Extent of temporal smoothing for fMRI data and pRF time course models
# [standard deviation of the Gaussian kernel, in seconds]:
# same temporal smoothing will be applied to pRF model time courses
dicCnfg['varSdSmthTmp'] = float(dicCnfg['varSdSmthTmp'])
if lgcPrint:
print('---Extent of temporal smoothing (Gaussian SD in [s]): '
+ str(dicCnfg['varSdSmthTmp']))
# Number of processes to run in parallel:
dicCnfg['varPar'] = int(dicCnfg['varPar'])
if lgcPrint:
print('---Number of processes to run in parallel: '
+ str(dicCnfg['varPar']))
# Size of space model in which the pRF models are
# created (x- and y-dimension).
dicCnfg['tplVslSpcSze'] = tuple([int(dicCnfg['varVslSpcSzeX']),
int(dicCnfg['varVslSpcSzeY'])])
if lgcPrint:
print('---Size of visual space model (x & y): '
+ str(dicCnfg['tplVslSpcSze']))
# Path(s) of functional data:
dicCnfg['lstPathNiiFunc'] = ast.literal_eval(dicCnfg['lstPathNiiFunc'])
if lgcPrint:
print('---Path(s) of functional data:')
for strTmp in dicCnfg['lstPathNiiFunc']:
print(' ' + str(strTmp))
# Path of mask (to restrict pRF model finding):
dicCnfg['strPathNiiMask'] = ast.literal_eval(dicCnfg['strPathNiiMask'])
if lgcPrint:
print('---Path of mask (to restrict pRF model finding):')
print(' ' + str(dicCnfg['strPathNiiMask']))
# Output basename:
dicCnfg['strPathOut'] = ast.literal_eval(dicCnfg['strPathOut'])
if lgcPrint:
print('---Output basename:')
print(' ' + str(dicCnfg['strPathOut']))
# Which version to use for pRF finding. 'numpy' or 'cython' for pRF finding
# on CPU, 'gpu' for using GPU.
dicCnfg['strVersion'] = ast.literal_eval(dicCnfg['strVersion'])
if lgcPrint:
print('---Version (numpy, cython, or gpu): '
+ str(dicCnfg['strVersion']))
# Create pRF time course models?
dicCnfg['lgcCrteMdl'] = (dicCnfg['lgcCrteMdl'] == 'True')
if lgcPrint:
print('---Create pRF time course models: '
+ str(dicCnfg['lgcCrteMdl']))
# Path to npy file with pRF time course models (to save or laod). Without
# file extension.
dicCnfg['strPathMdl'] = ast.literal_eval(dicCnfg['strPathMdl'])
if lgcPrint:
print('---Path to npy file with pRF time course models (to save '
+ 'or load):')
print(' ' + str(dicCnfg['strPathMdl']))
# switch to determine which hrf functions should be used
# 1: canonical, 2: can and temp derivative, 3: can, temp and spat deriv
dicCnfg['switchHrfSet'] = ast.literal_eval(dicCnfg['switchHrfSet'])
if lgcPrint:
print('---Switch to determine which hrf functions should be used: '
+ str(dicCnfg['switchHrfSet']))
# should model fitting be based on k-fold cross-validation?
# if not, set to 1
dicCnfg['varNumXval'] = ast.literal_eval(dicCnfg['varNumXval'])
if lgcPrint:
print('---Model fitting will have this number of folds for xval: '
+ str(dicCnfg['varNumXval']))
# If we create new pRF time course models, the following parameters have to
# be provided:
if dicCnfg['lgcCrteMdl']:
# Name of the npy that holds spatial info about conditions
dicCnfg['strSptExpInf'] = ast.literal_eval(dicCnfg['strSptExpInf'])
if lgcPrint:
print('---Path to npy file with spatial condition info: ')
print(' ' + str(dicCnfg['strSptExpInf']))
# Name of the npy that holds temporal info about conditions
dicCnfg['strTmpExpInf'] = ast.literal_eval(dicCnfg['strTmpExpInf'])
if lgcPrint:
print('---Path to npy file with temporal condition info: ')
print(' ' + str(dicCnfg['strTmpExpInf']))
# Factor by which time courses and HRF will be upsampled for the
# convolutions
dicCnfg['varTmpOvsmpl'] = ast.literal_eval(dicCnfg['varTmpOvsmpl'])
if lgcPrint:
print('---Factor by which time courses and HRF will be upsampled: '
+ str(dicCnfg['varTmpOvsmpl']))
# Is this a test?
if lgcTest:
# Prepend absolute path of this file to config file paths:
dicCnfg['strPathNiiMask'] = (strDir + dicCnfg['strPathNiiMask'])
dicCnfg['strPathOut'] = (strDir + dicCnfg['strPathOut'])
dicCnfg['strPathMdl'] = (strDir + dicCnfg['strPathMdl'])
dicCnfg['strSptExpInf'] = (strDir + dicCnfg['strSptExpInf'])
dicCnfg['strTmpExpInf'] = (strDir + dicCnfg['strTmpExpInf'])
# Loop through functional runs:
varNumRun = len(dicCnfg['lstPathNiiFunc'])
for idxRun in range(varNumRun):
dicCnfg['lstPathNiiFunc'][idxRun] = (
strDir
+ dicCnfg['lstPathNiiFunc'][idxRun]
)
return dicCnfg | Load py_pRF_mapping config file.
Parameters
----------
strCsvCnfg : string
Absolute file path of config file.
lgcTest : Boolean
Whether this is a test (pytest). If yes, absolute path of this function
will be prepended to config file paths.
lgcPrint : Boolean
Print config parameters?
Returns
-------
dicCnfg : dict
Dictionary containing parameter names (as keys) and parameter values
(as values). For example, `dicCnfg['varTr']` contains a float, such as
`2.94`. | entailment |
async def status_by_coordinates(
self, latitude: float, longitude: float) -> dict:
"""Get symptom data for the location nearest to the user's lat/lon."""
return await self.nearest_by_coordinates(latitude, longitude) | Get symptom data for the location nearest to the user's lat/lon. | entailment |
async def status_by_zip(self, zip_code: str) -> dict:
"""Get symptom data for the provided ZIP code."""
try:
location = next((
d for d in await self.user_reports()
if d['zip'] == zip_code))
except StopIteration:
return {}
return await self.status_by_coordinates(
float(location['latitude']), float(location['longitude'])) | Get symptom data for the provided ZIP code. | entailment |
def print_request(request):
""" Prints a prepared request to give the user info as to what they're sending
:param request.PreparedRequest request: PreparedRequest object to be printed
:return: Nothing
"""
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
request.method + ' ' + request.url,
'\n'.join('{}: {}'.format(k, v) for k, v in request.headers.items()),
request.body,
)) | Prints a prepared request to give the user info as to what they're sending
:param request.PreparedRequest request: PreparedRequest object to be printed
:return: Nothing | entailment |
def filter_validate_schemas(get_response, params):
"""
This filter validates input data against the resource's
``request_schema`` and fill the request's ``validated`` dict.
Data from ``request.params`` and ``request.body`` (when the request body
is of a form type) will be converted using the schema in order to get
proper lists or unique values.
.. important::
The request validation is only effective when a
``request_schema`` has been provided by the resource definition.
"""
request_schema = params.get('request_schema')
if request_schema is None:
return get_response
def _convert_params(schema, data):
for sc in schema.fields.values():
name = sc.serialized_name or sc.name
val = data.getlist(name)
if val is None:
continue
if len(val) == 1 and not isinstance(sc, ListType):
val = val[0]
data[name] = val
async def decorated_filter(request, *args, **kwargs):
data = {
'headers': CIDict(request.headers),
'path': request.app.router.get(request)[2],
'params': RequestParameters(request.args),
'body': {}
}
if request.body:
# Get body if we have something there
if request.form:
data['body'] = RequestParameters(request.form)
else:
# will raise 400 if cannot parse json
data['body'] = deepcopy(request.json)
if hasattr(request_schema, 'body') and request.form:
_convert_params(request_schema.body, data['body'])
if hasattr(request_schema, 'params') and data['params']:
_convert_params(request_schema.params, data['params'])
# Now, validate the whole thing
try:
model = request_schema(data, strict=False, validate=False)
model.validate()
request.validated = model.to_native()
except BaseError as e:
raise ValidationErrors(e.to_primitive())
return await get_response(request, *args, **kwargs)
return decorated_filter | This filter validates input data against the resource's
``request_schema`` and fill the request's ``validated`` dict.
Data from ``request.params`` and ``request.body`` (when the request body
is of a form type) will be converted using the schema in order to get
proper lists or unique values.
.. important::
The request validation is only effective when a
``request_schema`` has been provided by the resource definition. | entailment |
def filter_validate_response(get_response, params):
"""
This filter process the returned response. It does 2 things:
- If the response is a ``sanic.response.HTTPResponse`` and not a
:class:`rafter.http.Response`, return it immediately.
- It processes, validates and serializes this response when a schema
is provided.
That means that you can always return a normal Sanic's HTTPResponse
and thus, bypass the validation process when you need to do so.
.. important::
The response validation is only effective when:
- A ``response_schema`` has been provided by the resource definition
- The resource returns a :class:`rafter.http.Response` instance
or arbitrary data.
"""
schema = params.get('response_schema')
async def decorated_filter(request, *args, **kwargs):
response = await get_response(request, *args, **kwargs)
if isinstance(response, HTTPResponse) and \
not isinstance(response, Response):
return response
if not isinstance(response, Response):
raise TypeError('response is not an instance '
'of rafter.http.Response.')
if schema:
data = {
'body': response.data,
'headers': response.headers
}
try:
model = schema(data, strict=False, validate=False)
model.validate()
result = model.to_primitive()
response.body = result.get('body', None)
response.headers.update(result.get('headers', {}))
except BaseError as e:
log.exception(e)
abort(500, 'Wrong data output')
return response
return decorated_filter | This filter process the returned response. It does 2 things:
- If the response is a ``sanic.response.HTTPResponse`` and not a
:class:`rafter.http.Response`, return it immediately.
- It processes, validates and serializes this response when a schema
is provided.
That means that you can always return a normal Sanic's HTTPResponse
and thus, bypass the validation process when you need to do so.
.. important::
The response validation is only effective when:
- A ``response_schema`` has been provided by the resource definition
- The resource returns a :class:`rafter.http.Response` instance
or arbitrary data. | entailment |
def make_EPUB(parsed_article,
output_directory,
input_path,
image_directory,
config_module=None,
epub_version=None,
batch=False):
"""
Standard workflow for creating an EPUB document.
make_EPUB is used to produce an EPUB file from a parsed article. In addition
to the article it also requires a path to the appropriate image directory
which it will insert into the EPUB file, as well the output directory
location for the EPUB file.
Parameters
----------
article : openaccess_epub.article.Article instance
`article` is an Article instance for the XML document to be converted to
EPUB.
output_directory : str
`output_directory` is a string path to the directory in which the EPUB
will be produced. The name of the directory will be used as the EPUB's
filename.
input_path : str
`input_path` is a string absolute path to the input XML file, used to
locate input-relative images.
image_directory : str
`image_directory` is a string path indicating an explicit image
directory. If supplied, other image input methods will not be used.
config_module : config module, optional
`config_module` is a pre-loaded config module for OpenAccess_EPUB; if
not used then this function will load the global config file. Might be
useful in certain cases to dynamically alter configuration.
epub_version : {None, 2, 3}
`epub_version` dictates which version of EPUB to be created. An error
will be raised if the specified version is not supported for the
publisher. If left to the default, the created version will defer to the
publisher default version.
batch : bool, optional
`batch` indicates that batch creation is being used (such as with the
`oaepub batch` command). In this case, directory conflicts will be
automatically resolved (in favor of keeping previous data, skipping
creation of EPUB).
Returns False in the case of a fatal error, True if successful.
"""
#command_log.info('Creating {0}.epub'.format(output_directory))
if config_module is None:
config_module = openaccess_epub.utils.load_config_module()
if epub_version not in (None, 2, 3):
log.error('Invalid EPUB version: {0}'.format(epub_version))
raise ValueError('Invalid EPUB version. Should be 2 or 3')
if epub_version is None:
epub_version = parsed_article.publisher.epub_default
#Handle directory output conflicts
if os.path.isdir(output_directory):
if batch: # No user prompt, default to protect previous data
log.error('Directory conflict during batch conversion, skipping.')
return False
else: # User prompting
openaccess_epub.utils.dir_exists(output_directory)
else:
try:
os.makedirs(output_directory)
except OSError as err:
if err.errno != 17:
log.exception('Unable to recursively create output directories')
#Copy over the basic epub directory
make_epub_base(output_directory)
#Get the images, if possible, fail gracefully if not
success = openaccess_epub.utils.images.get_images(output_directory,
image_directory,
input_path,
config_module,
parsed_article)
if not success:
log.critical('Images for the article were not located! Aborting!')
return False
#Instantiate Navigation and Package
epub_nav = Navigation()
epub_package = Package()
#Process the article for navigation and package info
epub_nav.process(parsed_article)
epub_package.process(parsed_article)
#Render the content using publisher-specific methods
parsed_article.publisher.render_content(output_directory, epub_version)
if epub_version == 2:
epub_nav.render_EPUB2(output_directory)
epub_package.render_EPUB2(output_directory)
elif epub_version == 3:
epub_nav.render_EPUB3(output_directory)
epub_package.render_EPUB3(output_directory)
#Zip the directory into EPUB
epub_zip(output_directory)
return True | Standard workflow for creating an EPUB document.
make_EPUB is used to produce an EPUB file from a parsed article. In addition
to the article it also requires a path to the appropriate image directory
which it will insert into the EPUB file, as well the output directory
location for the EPUB file.
Parameters
----------
article : openaccess_epub.article.Article instance
`article` is an Article instance for the XML document to be converted to
EPUB.
output_directory : str
`output_directory` is a string path to the directory in which the EPUB
will be produced. The name of the directory will be used as the EPUB's
filename.
input_path : str
`input_path` is a string absolute path to the input XML file, used to
locate input-relative images.
image_directory : str
`image_directory` is a string path indicating an explicit image
directory. If supplied, other image input methods will not be used.
config_module : config module, optional
`config_module` is a pre-loaded config module for OpenAccess_EPUB; if
not used then this function will load the global config file. Might be
useful in certain cases to dynamically alter configuration.
epub_version : {None, 2, 3}
`epub_version` dictates which version of EPUB to be created. An error
will be raised if the specified version is not supported for the
publisher. If left to the default, the created version will defer to the
publisher default version.
batch : bool, optional
`batch` indicates that batch creation is being used (such as with the
`oaepub batch` command). In this case, directory conflicts will be
automatically resolved (in favor of keeping previous data, skipping
creation of EPUB).
Returns False in the case of a fatal error, True if successful. | entailment |
def make_epub_base(location):
"""
Creates the base structure for an EPUB file in a specified location.
This function creates constant components for the structure of the EPUB in
a specified directory location.
Parameters
----------
location : str
A path string to a local directory in which the EPUB is to be built
"""
log.info('Making EPUB base files in {0}'.format(location))
with open(os.path.join(location, 'mimetype'), 'w') as out: # mimetype file
out.write('application/epub+zip')
#Create EPUB and META-INF directorys
os.mkdir(os.path.join(location, 'META-INF'))
os.mkdir(os.path.join(location, 'EPUB'))
os.mkdir(os.path.join(location, 'EPUB', 'css'))
with open(os.path.join(location, 'META-INF', 'container.xml'), 'w') as out:
out.write('''\
<?xml version="1.0" encoding="UTF-8"?>
<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
<rootfiles>
<rootfile full-path="EPUB/package.opf" media-type="application/oebps-package+xml"/>
</rootfiles>
</container>''')
with open(os.path.join(location, 'EPUB', 'css', 'default.css') ,'wb') as out:
out.write(bytes(DEFAULT_CSS, 'UTF-8')) | Creates the base structure for an EPUB file in a specified location.
This function creates constant components for the structure of the EPUB in
a specified directory location.
Parameters
----------
location : str
A path string to a local directory in which the EPUB is to be built | entailment |
def epub_zip(outdirect):
"""
Zips up the input file directory into an EPUB file.
"""
def recursive_zip(zipf, directory, folder=None):
if folder is None:
folder = ''
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
zipf.write(os.path.join(directory, item),
os.path.join(directory, item))
elif os.path.isdir(os.path.join(directory, item)):
recursive_zip(zipf, os.path.join(directory, item),
os.path.join(folder, item))
log.info('Zipping up the directory {0}'.format(outdirect))
epub_filename = outdirect + '.epub'
epub = zipfile.ZipFile(epub_filename, 'w')
current_dir = os.getcwd()
os.chdir(outdirect)
epub.write('mimetype')
log.info('Recursively zipping META-INF and EPUB')
for item in os.listdir('.'):
if item == 'mimetype':
continue
recursive_zip(epub, item)
os.chdir(current_dir)
epub.close() | Zips up the input file directory into an EPUB file. | entailment |
def _write_int(fname, data, append=True):
"""Write data to CSV file with validation."""
# pylint: disable=W0705
data_ex = pexdoc.exh.addex(ValueError, "There is no data to save to file")
fos_ex = pexdoc.exh.addex(
OSError, "File *[fname]* could not be created: *[reason]*"
)
data_ex((len(data) == 0) or ((len(data) == 1) and (len(data[0]) == 0)))
try:
pmisc.make_dir(fname)
mode = "w" if append is False else "a"
if sys.hexversion < 0x03000000: # pragma: no cover, no branch
with open(fname, mode) as file_handle:
csv.writer(file_handle, delimiter=",").writerows(data)
else: # pragma: no cover
with open(fname, mode, newline="") as file_handle:
csv.writer(file_handle, delimiter=",").writerows(data)
except (IOError, OSError) as eobj:
fos_ex(True, _MF("fname", fname, "reason", eobj.strerror)) | Write data to CSV file with validation. | entailment |
def _input_directory_description(input_identifier, arg_item, input_dir):
"""
Produces a directory description. A directory description is a dictionary containing the following information.
- 'path': An array containing the paths to the specified directories.
- 'debugInfo': A field to possibly provide debug information.
- 'found': A boolean that indicates, if the directory exists in the local filesystem.
- 'listing': A listing that shows which files are in the given directory. This could be None.
:param input_identifier: The input identifier in the cwl description file
:param arg_item: The corresponding job information
:param input_dir: TODO
:return: A directory description
:raise DirectoryError: If the given directory does not exist or is not a directory.
"""
description = {
'path': None,
'found': False,
'debugInfo': None,
'listing': None,
'basename': None
}
try:
path = location(input_identifier, arg_item)
if input_dir and not os.path.isabs(path):
path = os.path.join(os.path.expanduser(input_dir), path)
description['path'] = path
if not os.path.exists(path):
raise DirectoryError('path does not exist')
if not os.path.isdir(path):
raise DirectoryError('path is not a directory')
description['listing'] = arg_item.get('listing')
description['basename'] = os.path.basename(path)
description['found'] = True
except:
description['debugInfo'] = exception_format()
return description | Produces a directory description. A directory description is a dictionary containing the following information.
- 'path': An array containing the paths to the specified directories.
- 'debugInfo': A field to possibly provide debug information.
- 'found': A boolean that indicates, if the directory exists in the local filesystem.
- 'listing': A listing that shows which files are in the given directory. This could be None.
:param input_identifier: The input identifier in the cwl description file
:param arg_item: The corresponding job information
:param input_dir: TODO
:return: A directory description
:raise DirectoryError: If the given directory does not exist or is not a directory. | entailment |
def _check_input_directory_listing(base_directory, listing):
"""
Raises an DirectoryError if files or directories, given in the listing, could not be found in the local filesystem.
:param base_directory: The path to the directory to check
:param listing: A listing given as dictionary
:raise DirectoryError: If the given base directory does not contain all of the subdirectories and subfiles given in
the listing.
"""
for sub in listing:
path = os.path.join(base_directory, sub['basename'])
if sub['class'] == 'File':
if not os.path.isfile(path):
raise DirectoryError('File \'{}\' not found but specified in listing.'.format(path))
if sub['class'] == 'Directory':
if not os.path.isdir(path):
raise DirectoryError('Directory \'{}\' not found but specified in listing'.format(path))
sub_listing = sub.get('listing')
if sub_listing:
_check_input_directory_listing(path, sub_listing) | Raises an DirectoryError if files or directories, given in the listing, could not be found in the local filesystem.
:param base_directory: The path to the directory to check
:param listing: A listing given as dictionary
:raise DirectoryError: If the given base directory does not contain all of the subdirectories and subfiles given in
the listing. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.