sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _validate_rfilter(self, rfilter, letter="d"):
"""Validate that all columns in filter are in header."""
if letter == "d":
pexdoc.exh.addai(
"dfilter",
(
(not self._has_header)
and any([not isinstance(item, int) for item in rfilter.keys()])
),
)
else:
pexdoc.exh.addai(
"rfilter",
(
(not self._has_header)
and any([not isinstance(item, int) for item in rfilter.keys()])
),
)
for key in rfilter:
self._in_header(key)
rfilter[key] = (
[rfilter[key]] if isinstance(rfilter[key], str) else rfilter[key]
) | Validate that all columns in filter are in header. | entailment |
def data(self, filtered=False, no_empty=False):
r"""
Return (filtered) file data.
The returned object is a list, each item is a sub-list corresponding
to a row of data; each item in the sub-lists contains data
corresponding to a particular column
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param no_empty: Flag that indicates whether rows with empty columns
should be filtered out (True) or not (False)
:type no_empty: bool
:rtype: list
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.data
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
.. [[[end]]]
"""
return self._apply_filter(filtered, no_empty) | r"""
Return (filtered) file data.
The returned object is a list, each item is a sub-list corresponding
to a row of data; each item in the sub-lists contains data
corresponding to a particular column
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param no_empty: Flag that indicates whether rows with empty columns
should be filtered out (True) or not (False)
:type no_empty: bool
:rtype: list
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.data
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
.. [[[end]]] | entailment |
def dsort(self, order):
r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]]
"""
# Make order conforming to a list of dictionaries
order = order if isinstance(order, list) else [order]
norder = [{item: "A"} if not isinstance(item, dict) else item for item in order]
# Verify that all columns exist in file
self._in_header([list(item.keys())[0] for item in norder])
# Get column indexes
clist = []
for nitem in norder:
for key, value in nitem.items():
clist.append(
(
key
if isinstance(key, int)
else self._header_upper.index(key.upper()),
value.upper() == "D",
)
)
# From the Python documentation:
# "Starting with Python 2.3, the sort() method is guaranteed to be
# stable. A sort is stable if it guarantees not to change the
# relative order of elements that compare equal - this is helpful
# for sorting in multiple passes (for example, sort by department,
# then by salary grade)."
# This means that the sorts have to be done from "minor" column to
# "major" column
for (cindex, rvalue) in reversed(clist):
fpointer = operator.itemgetter(cindex)
self._data.sort(key=fpointer, reverse=rvalue) | r"""
Sort rows.
:param order: Sort order
:type order: :ref:`CsvColFilter`
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.dsort
:raises:
* RuntimeError (Argument \`order\` is not valid)
* RuntimeError (Invalid column specification)
* ValueError (Column *[column_identifier]* not found)
.. [[[end]]] | entailment |
def header(self, filtered=False):
r"""
Return data header.
When the raw (input) data is used the data header is a list of the
comma-separated values file header if the file is loaded with header
(each list item is a column header) or a list of column numbers if the
file is loaded without header (column zero is the leftmost column).
When filtered data is used the data header is the active column filter,
if any, otherwise it is the same as the raw (input) data header
:param filtered: Flag that indicates whether the raw (input) data
should be used (False) or whether filtered data
should be used (True)
:type filtered: boolean
:rtype: list of strings or integers
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.header
:raises: RuntimeError (Argument \`filtered\` is not valid)
.. [[[end]]]
"""
return (
self._header
if (not filtered) or (filtered and self._cfilter is None)
else self._cfilter
) | r"""
Return data header.
When the raw (input) data is used the data header is a list of the
comma-separated values file header if the file is loaded with header
(each list item is a column header) or a list of column numbers if the
file is loaded without header (column zero is the leftmost column).
When filtered data is used the data header is the active column filter,
if any, otherwise it is the same as the raw (input) data header
:param filtered: Flag that indicates whether the raw (input) data
should be used (False) or whether filtered data
should be used (True)
:type filtered: boolean
:rtype: list of strings or integers
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.header
:raises: RuntimeError (Argument \`filtered\` is not valid)
.. [[[end]]] | entailment |
def replace(self, rdata, filtered=False):
r"""
Replace data.
:param rdata: Replacement data
:type rdata: list of lists
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
.. [[[cog cog.out(exobj.get_sphinx_autodoc(width=63)) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.replace
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`rdata\` is not valid)
* ValueError (Number of columns mismatch between input and
replacement data)
* ValueError (Number of rows mismatch between input and
replacement data)
.. [[[end]]]
"""
# pylint: disable=R0914
rdata_ex = pexdoc.exh.addai("rdata")
rows_ex = pexdoc.exh.addex(
ValueError, "Number of rows mismatch between input and replacement data"
)
cols_ex = pexdoc.exh.addex(
ValueError, "Number of columns mismatch between input and replacement data"
)
rdata_ex(any([len(item) != len(rdata[0]) for item in rdata]))
# Use all columns if no specification has been given
cfilter = (
self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header
)
# Verify column names, has to be done before getting data
col_num = len(self._data[0]) - 1
odata = self._apply_filter(filtered)
cfilter = (
self._cfilter if filtered in [True, "B", "b", "C", "c"] else self._header
)
col_index = [
self._header_upper.index(col_id.upper())
if isinstance(col_id, str)
else col_id
for col_id in cfilter
]
rows_ex(len(odata) != len(rdata))
cols_ex(len(odata[0]) != len(rdata[0]))
df_tuples = self._format_rfilter(self._rfilter)
rnum = 0
for row in self._data:
if (not filtered) or (
filtered
and all([row[col_num] in col_value for col_num, col_value in df_tuples])
):
for col_num, new_data in zip(col_index, rdata[rnum]):
row[col_num] = new_data
rnum = rnum + 1 | r"""
Replace data.
:param rdata: Replacement data
:type rdata: list of lists
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
.. [[[cog cog.out(exobj.get_sphinx_autodoc(width=63)) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.replace
:raises:
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`rdata\` is not valid)
* ValueError (Number of columns mismatch between input and
replacement data)
* ValueError (Number of rows mismatch between input and
replacement data)
.. [[[end]]] | entailment |
def write(self, fname=None, filtered=False, header=True, append=False):
r"""
Write (processed) data to a specified comma-separated values (CSV) file.
:param fname: Name of the comma-separated values file to be
written. If None the file from which the data originated
is overwritten
:type fname: FileName_
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param header: If a list, column headers to use in the file. If
boolean, flag that indicates whether the input column
headers should be written (True) or not (False)
:type header: string, list of strings or boolean
:param append: Flag that indicates whether data is added to an
existing file (or a new file is created if it does not
exist) (True), or whether data overwrites the file
contents (if the file exists) or creates a new file if
the file does not exists (False)
:type append: boolean
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.write
:raises:
* OSError (File *[fname]* could not be created: *[reason]*)
* RuntimeError (Argument \`append\` is not valid)
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`header\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
* ValueError (There is no data to save to file)
.. [[[end]]]
"""
# pylint: disable=R0913
write_ex = pexdoc.exh.addex(ValueError, "There is no data to save to file")
fname = self._fname if fname is None else fname
data = self.data(filtered=filtered)
write_ex((len(data) == 0) or ((len(data) == 1) and (len(data[0]) == 0)))
if header:
header = [header] if isinstance(header, str) else header
cfilter = self._gen_col_index(filtered=filtered)
filtered_header = (
[self._header[item] for item in cfilter]
if self._has_header
else cfilter
)
file_header = filtered_header if isinstance(header, bool) else header
# Convert None's to ''
data = [["''" if item is None else item for item in row] for row in data]
_write_int(fname, [file_header] + data if header else data, append=append) | r"""
Write (processed) data to a specified comma-separated values (CSV) file.
:param fname: Name of the comma-separated values file to be
written. If None the file from which the data originated
is overwritten
:type fname: FileName_
:param filtered: Filtering type
:type filtered: :ref:`CsvFiltered`
:param header: If a list, column headers to use in the file. If
boolean, flag that indicates whether the input column
headers should be written (True) or not (False)
:type header: string, list of strings or boolean
:param append: Flag that indicates whether data is added to an
existing file (or a new file is created if it does not
exist) (True), or whether data overwrites the file
contents (if the file exists) or creates a new file if
the file does not exists (False)
:type append: boolean
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. pcsv.csv_file.CsvFile.write
:raises:
* OSError (File *[fname]* could not be created: *[reason]*)
* RuntimeError (Argument \`append\` is not valid)
* RuntimeError (Argument \`filtered\` is not valid)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`header\` is not valid)
* RuntimeError (Argument \`no_empty\` is not valid)
* ValueError (There is no data to save to file)
.. [[[end]]] | entailment |
def model_creation(dicCnfg, varRat=None, strPathHrf=None):
"""
Create or load pRF model time courses.
Parameters
----------
dicCnfg : dict
Dictionary containing config parameters.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
Returns
-------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
'aryPrfTc[x-position, y-position, SD, volume]'.
lgcMdlInc : np.array, boolean
Logical to only include models with pRF center on stimulated area.
"""
# *************************************************************************
# *** Load parameters from config file
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# *************************************************************************
if cfg.lgcCrteMdl:
# *********************************************************************
# *** Load spatial condition information
print('------Load spatial condition information')
arySptExpInf = np.load(cfg.strSptExpInf)
# Here we assume scientific convention and orientation of images where
# the origin should fall in the lower left corner, the x-axis occupies
# the width and the y-axis occupies the height dimension of the screen.
# We also assume that the first dimension that the user provides
# indexes x and the second indexes the y-axis. Since python is column
# major (i.e. first indexes columns, only then rows), we need to rotate
# arySptExpInf by 90 degrees rightward. This will insure that with the
# 0th axis we index the scientific x-axis and higher values move us to
# the right on that x-axis. It will also ensure that the 1st
# python axis indexes the scientific y-axis and higher values will
# move us up.
arySptExpInf = np.rot90(arySptExpInf, k=3)
# Calculate the areas that were stimulated during the experiment
aryStimArea = np.sum(arySptExpInf, axis=-1).astype(np.bool)
# *********************************************************************
# *********************************************************************
# *** Load temporal condition information
print('------Load temporal condition information')
aryTmpExpInf = np.load(cfg.strTmpExpInf)
# add fourth column to make it appropriate for pyprf_feature
if aryTmpExpInf.shape[-1] == 3:
print('---------Added fourth column')
vecNewCol = np.greater(aryTmpExpInf[:, 0], 0).astype(np.float16)
aryTmpExpInf = np.concatenate(
(aryTmpExpInf, np.expand_dims(vecNewCol, axis=1)), axis=1)
# *********************************************************************
# *********************************************************************
# *** Create model parameter combination, for now in pixel.
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax,
cfg.varNum2, cfg.varExtYmin,
cfg.varExtYmax, cfg.varNumPrfSizes,
cfg.varPrfStdMin, cfg.varPrfStdMax,
kwUnt='pix', kwCrd=cfg.strKwCrd)
# If desired by user, also create model parameters for supp surround
if varRat is not None:
aryMdlParamsSur = np.copy(aryMdlParams)
aryMdlParamsSur[:, 2] = aryMdlParamsSur[:, 2] * varRat
# Exclude model parameters whose prf center would lie outside the
# stimulated area
print('------Exclude model params with prf center outside stim area')
varNumMdlBfr = aryMdlParams.shape[0]
# Get logical for model inclusion
lgcMdlInc = aryStimArea[aryMdlParams[:, 0].astype(np.int32),
aryMdlParams[:, 1].astype(np.int32)]
# Exclude models with prf center outside stimulated area
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Also apply the logical to the surround parameters, if they exist
if varRat is not None:
aryMdlParamsSur = aryMdlParamsSur[lgcMdlInc, :]
print('---------Number of models excluded: ' +
str(varNumMdlBfr-aryMdlParams.shape[0]))
# *********************************************************************
# *********************************************************************
# *** Create 2D Gauss model responses to spatial conditions.
print('------Create 2D Gauss model responses to spatial conditions')
aryMdlRsp = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParams, cfg.varPar)
# If desired by user, also create model responses for supp surround
if varRat is not None:
aryMdlRspSur = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
aryMdlParamsSur, cfg.varPar)
# Delete array to save memory
del(arySptExpInf)
# *********************************************************************
# *********************************************************************
# *** Create prf time course models
print('------Create prf time course models')
# Check whether path to npy file with hrf parameters was provided
if strPathHrf is not None:
print('---------Load custom hrf parameters')
aryCstPrm = np.load(strPathHrf)
dctPrm = {}
dctPrm['peak_delay'] = aryCstPrm[0]
dctPrm['under_delay'] = aryCstPrm[1]
dctPrm['peak_disp'] = aryCstPrm[2]
dctPrm['under_disp'] = aryCstPrm[3]
dctPrm['p_u_ratio'] = aryCstPrm[4]
# If not, set dctPrm to None, which will result in default hrf params
else:
print('---------Use default hrf parameters')
dctPrm = None
aryPrfTc = crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, cfg.varNumVol,
cfg.varTr, cfg.varTmpOvsmpl,
cfg.switchHrfSet, (int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm)
# If desired by user, create prf time course models for supp surround
if varRat is not None:
print('---------Add suppressive surround')
aryPrfTcSur = crt_prf_ftr_tc(aryMdlRspSur, aryTmpExpInf,
cfg.varNumVol, cfg.varTr,
cfg.varTmpOvsmpl, cfg.switchHrfSet,
(int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)),
cfg.varPar, dctPrm=dctPrm)
# Concatenate aryPrfTc and aryPrfTcSur
aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcSur), axis=1)
# *********************************************************************
# *********************************************************************
# *** Save pRF time course models, corresponding params and responses
print('------Save pRF time course models to disk')
# Prepare file name extensions
strNmeExtMdl = ''
strNmeExtPrm = '_params'
strNmeExtRsp = '_mdlRsp'
strNmeExtMdlInc = '_lgcMdlInc'
# Check whether extensions need to be modified with ratio name
if varRat is not None:
strNmeExtMdl = strNmeExtMdl + '_' + str(varRat)
strNmeExtPrm = strNmeExtPrm + '_' + str(varRat)
strNmeExtRsp = strNmeExtRsp + '_' + str(varRat)
# Also include model the parameters and responses of the surround
# For the pRF time course models, the surround is included above
aryMdlParams = np.stack((aryMdlParams, aryMdlParamsSur),
axis=1)
aryMdlRsp = np.stack((aryMdlRsp, aryMdlRspSur),
axis=1)
# Append the npy file name for model exlusion in unstimulated area
# with general _supsur suffic since it does not depend on specific
# surround
strNmeExtMdlInc = '_supsur' + strNmeExtMdlInc
# Save pRF time course models
np.save(cfg.strPathMdl + strNmeExtMdl, aryPrfTc)
# Save the corresponding model parameters
np.save(cfg.strPathMdl + strNmeExtPrm, aryMdlParams)
# Save the corresponding model responses
np.save(cfg.strPathMdl + strNmeExtRsp, aryMdlRsp)
# Save logical for parameter exclusion in unstimulated area
np.save(cfg.strPathMdl + strNmeExtMdlInc, lgcMdlInc)
del(aryMdlParams)
del(aryMdlRsp)
# *********************************************************************
else:
# *********************************************************************
# %% Load existing pRF time course models
print('------Load pRF time course models from disk')
# Load the file:
aryPrfTc = np.load((cfg.strPathMdl + '.npy'))
# Check whether pRF time course model matrix has the expected
# dimensions:
vecPrfTcShp = aryPrfTc.shape
# Logical test for correct dimensions:
strErrMsg = ('---Error: Dimensions of specified pRF time course ' +
'models do not agree with specified model parameters')
assert vecPrfTcShp[0] == cfg.varNum1 * \
cfg.varNum2 * cfg.varNumPrfSizes, strErrMsg
assert vecPrfTcShp[-1] == cfg.varNumVol, strErrMsg
# Check number of feature. If fitting is performed with sup surround,
# number of features will be twice as many as simple fitting
if varRat is None:
assert vecPrfTcShp[1] == cfg.switchHrfSet, strErrMsg
else:
assert vecPrfTcShp[1] == cfg.switchHrfSet*2, strErrMsg
# Load logical for parameter exclusion in unstimulated area
lgcMdlInc = np.load(cfg.strPathMdl + '_lgcMdlInc.npy')
# *************************************************************************
return aryPrfTc, lgcMdlInc | Create or load pRF model time courses.
Parameters
----------
dicCnfg : dict
Dictionary containing config parameters.
varRat : float, default None
Ratio of size suppressive surround to size of center pRF
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, default
parameters will be used.
Returns
-------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
'aryPrfTc[x-position, y-position, SD, volume]'.
lgcMdlInc : np.array, boolean
Logical to only include models with pRF center on stimulated area. | entailment |
def registIssue(self, CorpNum, cashbill, Memo, UserID=None):
""" νκΈμμμ¦ μ¦μλ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
cashbill : λ±λ‘ν νκΈμμμ¦ object. made with Cashbill(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if cashbill == None:
raise PopbillException(-99999999, "νκΈμμμ¦ μ λ³΄κ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = ""
if Memo != None or Memo != '':
cashbill.memo = Memo
postData = self._stringtify(cashbill)
return self._httppost('/Cashbill', postData, CorpNum, UserID, "ISSUE") | νκΈμμμ¦ μ¦μλ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
cashbill : λ±λ‘ν νκΈμμμ¦ object. made with Cashbill(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def register(self, CorpNum, cashbill, UserID=None):
""" νκΈμμμ¦ λ±λ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
cashbill : λ±λ‘ν νκΈμμμ¦ object. made with Cashbill(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if cashbill == None:
raise PopbillException(-99999999, "νκΈμμμ¦ μ λ³΄κ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(cashbill)
return self._httppost('/Cashbill', postData, CorpNum, UserID) | νκΈμμμ¦ λ±λ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
cashbill : λ±λ‘ν νκΈμμμ¦ object. made with Cashbill(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def revokeRegistIssue(self, CorpNum, mgtKey, orgConfirmNum, orgTradeDate, smssendYN=False, memo=None, UserID=None,
isPartCancel=False, cancelType=None, supplyCost=None, tax=None, serviceFee=None,
totalAmount=None):
""" μ·¨μνκΈμμμ¦ μ¦μλ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
mgtKey : νκΈμμμ¦ λ¬Έμκ΄λ¦¬λ²νΈ
orgConfirmNum : μλ³ΈνκΈμμμ¦ μΉμΈλ²νΈ
orgTradeDate : μλ³ΈνκΈμμμ¦ κ±°λμΌμ
smssendYN : λ°νμλ΄λ¬Έμ μ μ‘μ¬λΆ
memo : λ©λͺ¨
UserID : νλΉνμ μμ΄λ
isPartCancel : λΆλΆμ·¨μμ¬λΆ
cancelType : μ·¨μμ¬μ
supplyCost : [μ·¨μ] 곡κΈκ°μ‘
tax : [μ·¨μ] μΈμ‘
serviceFee : [μ·¨μ] λ΄μ¬λ£
totalAmount : [μ·¨μ] ν©κ³κΈμ‘
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
postData = self._stringtify({
"mgtKey": mgtKey,
"orgConfirmNum": orgConfirmNum,
"orgTradeDate": orgTradeDate,
"smssendYN": smssendYN,
"memo": memo,
"isPartCancel": isPartCancel,
"cancelType": cancelType,
"supplyCost": supplyCost,
"tax": tax,
"serviceFee": serviceFee,
"totalAmount": totalAmount,
})
return self._httppost('/Cashbill', postData, CorpNum, UserID, "REVOKEISSUE") | μ·¨μνκΈμμμ¦ μ¦μλ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
mgtKey : νκΈμμμ¦ λ¬Έμκ΄λ¦¬λ²νΈ
orgConfirmNum : μλ³ΈνκΈμμμ¦ μΉμΈλ²νΈ
orgTradeDate : μλ³ΈνκΈμμμ¦ κ±°λμΌμ
smssendYN : λ°νμλ΄λ¬Έμ μ μ‘μ¬λΆ
memo : λ©λͺ¨
UserID : νλΉνμ μμ΄λ
isPartCancel : λΆλΆμ·¨μμ¬λΆ
cancelType : μ·¨μμ¬μ
supplyCost : [μ·¨μ] 곡κΈκ°μ‘
tax : [μ·¨μ] μΈμ‘
serviceFee : [μ·¨μ] λ΄μ¬λ£
totalAmount : [μ·¨μ] ν©κ³κΈμ‘
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def update(self, CorpNum, MgtKey, cashbill, UserID=None):
""" μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : μλ³Έ νκΈμμμ¦ λ¬Έμκ΄λ¦¬λ²νΈ
cashbill : μμ ν νκΈμμμ¦ object. made with Cashbill(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if cashbill == None:
raise PopbillException(-99999999, "νκΈμμμ¦ μ λ³΄κ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(cashbill)
return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "PATCH") | μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : μλ³Έ νκΈμμμ¦ λ¬Έμκ΄λ¦¬λ²νΈ
cashbill : μμ ν νκΈμμμ¦ object. made with Cashbill(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def issue(self, CorpNum, MgtKey, Memo=None, UserID=None):
""" λ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : μλ³Έ νκΈμμμ¦ λ¬Έμκ΄λ¦¬λ²νΈ
Memo : λ°ν λ©λͺ¨
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = ""
req = {}
if Memo != None or Memo != '':
req["memo"] = Memo
postData = self._stringtify(req)
return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "ISSUE") | λ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : μλ³Έ νκΈμμμ¦ λ¬Έμκ΄λ¦¬λ²νΈ
Memo : λ°ν λ©λͺ¨
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def delete(self, CorpNum, MgtKey, UserID=None):
""" μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : μλ³Έ νκΈμμμ¦ λ¬Έμκ΄λ¦¬λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httppost('/Cashbill/' + MgtKey, '', CorpNum, UserID, "DELETE") | μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : μλ³Έ νκΈμμμ¦ λ¬Έμκ΄λ¦¬λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def search(self, CorpNum, DType, SDate, EDate, State, TradeType, TradeUsage, TaxationType, Page, PerPage, Order,
UserID=None, QString=None, TradeOpt=None):
""" λͺ©λ‘ μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
DType : μΌμμ ν, R-λ±λ‘μΌμ, T-κ±°λμΌμ, I-λ°νμΌμ μ€ ν 1
SDate : μμμΌμ, νμνμ(yyyyMMdd)
EDate : μ’
λ£μΌμ, νμνμ(yyyyMMdd)
State : μνμ½λ λ°°μ΄, 2,3λ²μ§Έ μ리μ μμΌλμΉ΄λ(*) μ¬μ©κ°λ₯
TradeType : λ¬Έμνν λ°°μ΄, N-μΌλ°νκΈμμμ¦, C-μ·¨μνκΈμμμ¦
TradeUsage : κ±°λκ΅¬λΆ λ°°μ΄, P-μλ곡μ μ©, C-μ§μΆμ¦λΉμ©
TaxationType : κ³ΌμΈνν λ°°μ΄, T-κ³ΌμΈ, N-λΉκ³ΌμΈ
Page : νμ΄μ§λ²νΈ
PerPage : νμ΄μ§λΉ κ²μκ°μ
Order : μ λ ¬λ°©ν₯, D-λ΄λ¦Όμ°¨μ, A-μ€λ¦μ°¨μ
UserID : νλΉ νμμμ΄λ
QString : νκΈμμμ¦ μλ³λ²νΈ, λ―ΈκΈ°μ¬μ μ 체쑰ν
TradeOpt : κ±°λμ ν, N-μΌλ°, B-λμ곡μ°, T-λμ€κ΅ν΅
"""
if DType == None or DType == '':
raise PopbillException(-99999999, "μΌμμ νμ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
if SDate == None or SDate == '':
raise PopbillException(-99999999, "μμμΌμκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "μ’
λ£μΌμκ° μ
λ ₯λμ§ μμμ΅λλ€.")
uri = '/Cashbill/Search'
uri += '?DType=' + DType
uri += '&SDate=' + SDate
uri += '&EDate=' + EDate
uri += '&State=' + ','.join(State)
uri += '&TradeUsage=' + ','.join(TradeUsage)
uri += '&TradeType=' + ','.join(TradeType)
uri += '&TaxationType=' + ','.join(TaxationType)
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
if QString is not None:
uri += '&QString=' + QString
if TradeOpt is not None:
uri += '&TradeOpt=' + ','.join(TradeOpt)
return self._httpget(uri, CorpNum, UserID) | λͺ©λ‘ μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
DType : μΌμμ ν, R-λ±λ‘μΌμ, T-κ±°λμΌμ, I-λ°νμΌμ μ€ ν 1
SDate : μμμΌμ, νμνμ(yyyyMMdd)
EDate : μ’
λ£μΌμ, νμνμ(yyyyMMdd)
State : μνμ½λ λ°°μ΄, 2,3λ²μ§Έ μ리μ μμΌλμΉ΄λ(*) μ¬μ©κ°λ₯
TradeType : λ¬Έμνν λ°°μ΄, N-μΌλ°νκΈμμμ¦, C-μ·¨μνκΈμμμ¦
TradeUsage : κ±°λκ΅¬λΆ λ°°μ΄, P-μλ곡μ μ©, C-μ§μΆμ¦λΉμ©
TaxationType : κ³ΌμΈνν λ°°μ΄, T-κ³ΌμΈ, N-λΉκ³ΌμΈ
Page : νμ΄μ§λ²νΈ
PerPage : νμ΄μ§λΉ κ²μκ°μ
Order : μ λ ¬λ°©ν₯, D-λ΄λ¦Όμ°¨μ, A-μ€λ¦μ°¨μ
UserID : νλΉ νμμμ΄λ
QString : νκΈμμμ¦ μλ³λ²νΈ, λ―ΈκΈ°μ¬μ μ 체쑰ν
TradeOpt : κ±°λμ ν, N-μΌλ°, B-λμ곡μ°, T-λμ€κ΅ν΅ | entailment |
def getInfo(self, CorpNum, MgtKey):
""" μν/μμ½ μ 보 μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μν/μμ½ μ 보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/Cashbill/' + MgtKey, CorpNum) | μν/μμ½ μ 보 μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μν/μμ½ μ 보 object
raise
PopbillException | entailment |
def getInfos(self, CorpNum, MgtKeyList):
""" μνμ 보 λ€λ νμΈ, μ΅λ 1000건
args
CorpNum : νμ μ¬μ
μ λ²νΈ
MgtKeyList : λ¬Έμκ΄λ¦¬λ²νΈ λͺ©λ‘
return
μνμ 보 λͺ©λ‘ as List
raise
PopbillException
"""
if MgtKeyList == None or len(MgtKeyList) < 1:
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(MgtKeyList)
return self._httppost('/Cashbill/States', postData, CorpNum) | μνμ 보 λ€λ νμΈ, μ΅λ 1000건
args
CorpNum : νμ μ¬μ
μ λ²νΈ
MgtKeyList : λ¬Έμκ΄λ¦¬λ²νΈ λͺ©λ‘
return
μνμ 보 λͺ©λ‘ as List
raise
PopbillException | entailment |
def getDetailInfo(self, CorpNum, MgtKey):
""" μμΈμ 보 μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μμΈμ 보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/Cashbill/' + MgtKey + '?Detail', CorpNum) | μμΈμ 보 μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μμΈμ 보 object
raise
PopbillException | entailment |
def sendEmail(self, CorpNum, MgtKey, ReceiverEmail, UserID=None):
""" μλ¦Όλ©μΌ μ¬μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
ReceiverEmail : μμ μ μ΄λ©μΌ μ£Όμ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ReceiverEmail == None or ReceiverEmail == "":
raise PopbillException(-99999999, "μμ μ λ©μΌμ£Όμκ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify({"receiver": ReceiverEmail})
return self._httppost('/Cashbill/' + MgtKey, postData, CorpNum, UserID, "EMAIL") | μλ¦Όλ©μΌ μ¬μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
ReceiverEmail : μμ μ μ΄λ©μΌ μ£Όμ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def getLogs(self, CorpNum, MgtKey):
""" λ¬Έμμ΄λ ₯ μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μ΄λ ₯ λͺ©λ‘ as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/Cashbill/' + MgtKey + '/Logs', CorpNum) | λ¬Έμμ΄λ ₯ μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μ΄λ ₯ λͺ©λ‘ as List
raise
PopbillException | entailment |
def getEPrintURL(self, CorpNum, MgtKey, UserID=None):
""" 곡κΈλ°λμμ© μΈμ URL νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
UserID : νλΉνμ μμ΄λ
return
νλΉ URL as str
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
result = self._httpget('/Cashbill/' + MgtKey + '?TG=EPRINT', CorpNum, UserID)
return result.url | 곡κΈλ°λμμ© μΈμ URL νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
MgtKey : λ¬Έμκ΄λ¦¬λ²νΈ
UserID : νλΉνμ μμ΄λ
return
νλΉ URL as str
raise
PopbillException | entailment |
def updateEmailConfig(self, Corpnum, EmailType, SendYN, UserID=None):
""" μλ¦Όλ©μΌ μ μ‘μ€μ μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
EmailType: λ©μΌμ μ‘μ ν
SendYN: μ μ‘μ¬λΆ (True-μ μ‘, False-λ―Έμ μ‘)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if EmailType == None or EmailType == '':
raise PopbillException(-99999999, "λ©μΌμ μ‘ νμ
μ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
if SendYN == None or SendYN == '':
raise PopbillException(-99999999, "λ©μΌμ μ‘ μ¬λΆ νλͺ©μ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
uri = "/Cashbill/EmailSendConfig?EmailType=" + EmailType + "&SendYN=" + str(SendYN)
return self._httppost(uri, "", Corpnum, UserID) | μλ¦Όλ©μΌ μ μ‘μ€μ μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
EmailType: λ©μΌμ μ‘μ ν
SendYN: μ μ‘μ¬λΆ (True-μ μ‘, False-λ―Έμ μ‘)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def list(self, request):
"""
Returns the list of documents found on the collection
"""
pipeline = [{'$match': request.args.pop('match', {})}]
sort = request.args.pop('sort', {})
if sort:
pipeline.append({'$sort': sort})
project = request.args.pop('project', {})
if project:
pipeline.append({'$project': project})
return Response(serialize(self.collection.aggregate(pipeline))) | Returns the list of documents found on the collection | entailment |
def create(self, request):
"""
Creates a new document based on the given data
"""
document = self.collection(request.json)
document.created_at = datetime.utcnow()
document.updated_at = document.created_at
created = document.insert()
return Response(
response=serialize(created),
status=(
201 if not all(
key in created for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
) | Creates a new document based on the given data | entailment |
def retrieve(self, request, _id):
"""
Returns the document containing the given _id or 404
"""
_id = deserialize(_id)
retrieved = self.collection.find_one({'_id': _id})
if retrieved:
return Response(serialize(retrieved))
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) | Returns the document containing the given _id or 404 | entailment |
def update(self, request, _id):
"""
Updates the document with the given _id using the given data
"""
_id = deserialize(_id)
to_update = self.collection.find_one({'_id': _id})
if to_update:
document = self.collection(dict(to_update, **request.json))
document.updated_at = datetime.utcnow()
updated = document.update()
return Response(
response=serialize(updated),
status=(
200 if not all(
key in updated for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=400
) | Updates the document with the given _id using the given data | entailment |
def delete(self, request, _id):
"""
Deletes the document with the given _id if it exists
"""
_id = deserialize(_id)
to_delete = self.collection.get({'_id': _id})
if to_delete:
deleted = to_delete.delete()
return Response(
response=serialize(deleted),
status=(
200 if not all(
key in deleted for key in [
'error_code', 'error_type', 'error_message'
]
) else 400
)
)
else:
return Response(
response=serialize(
DocumentNotFoundError(self.collection.__name__, _id)
),
status=404
) | Deletes the document with the given _id if it exists | entailment |
def with_reactor(*dec_args, **dec_kwargs):
"""
Decorator for test functions that require a running reactor.
Can be used like this::
@with_reactor
def test_connect_to_server(self):
...
Or like this::
@with_reactor(timeout=10)
def test_connect_to_server(self):
...
If the test function returns a deferred then the test will
be successful if the deferred resolves to a value or unsuccessful
if the deferred errbacks.
The test must not leave any connections or a like open. This will
otherwise result in a reactor-unclean failure of the test.
If there is a function called `twisted_setup()` in the same class
as the test function is defined, then this function will be invoked
before the test, but already in the context of the reactor. Note that
the regular setup function provided by the testing framework will
be executed too, but not in the reactor context.
Accordingly, if there is a `twisted_teardown()` it executes after the
test function, even if the test failed.
If the test, including `twisted_setup` and `twisted_teardown`, has
not completed within the timout, the test fails. The timeout defaults
to two minutes. A timeout duration of zero disables the timeout.
"""
# This method takes care of the decorator protocol, it
# distinguishes between using the decorator with brackets
# and without brackets. It then calls `_twisted_test_sync()`.
if len(dec_args) == 1 and callable(dec_args[0]) and not dec_kwargs:
# decorator used without brackets:
# @twisted_test
# def test_xxx():
# ....
callee = dec_args[0]
dec_args = ()
dec_kwargs = {}
@functools.wraps(callee)
def wrapper(*call_args, **call_kwargs):
return _twisted_test_sync(callee, call_args, call_kwargs)
return wrapper
else:
# decorator used with brackets:
# @twisted_test(*dec_args, **dec_args)
# def test_xxx():
# ....
def decorator(callee):
@functools.wraps(callee)
def wrapper(*call_args, **call_kwargs):
return _twisted_test_sync(callee, call_args, call_kwargs, *dec_args, **dec_kwargs)
return wrapper
return decorator | Decorator for test functions that require a running reactor.
Can be used like this::
@with_reactor
def test_connect_to_server(self):
...
Or like this::
@with_reactor(timeout=10)
def test_connect_to_server(self):
...
If the test function returns a deferred then the test will
be successful if the deferred resolves to a value or unsuccessful
if the deferred errbacks.
The test must not leave any connections or a like open. This will
otherwise result in a reactor-unclean failure of the test.
If there is a function called `twisted_setup()` in the same class
as the test function is defined, then this function will be invoked
before the test, but already in the context of the reactor. Note that
the regular setup function provided by the testing framework will
be executed too, but not in the reactor context.
Accordingly, if there is a `twisted_teardown()` it executes after the
test function, even if the test failed.
If the test, including `twisted_setup` and `twisted_teardown`, has
not completed within the timout, the test fails. The timeout defaults
to two minutes. A timeout duration of zero disables the timeout. | entailment |
def _ensure_reactor_running():
"""
Starts the twisted reactor if it is not running already.
The reactor is started in a new daemon-thread.
Has to perform dirty hacks so that twisted can register
signals even if it is not running in the main-thread.
"""
if not reactor.running:
# Some of the `signal` API can only be called
# from the main-thread. So we do a dirty workaround.
#
# `signal.signal()` and `signal.wakeup_fd_capture()`
# are temporarily monkey-patched while the reactor is
# starting.
#
# The patched functions record the invocations in
# `signal_registrations`.
#
# Once the reactor is started, the main-thread
# is used to playback the recorded invocations.
signal_registrations = []
# do the monkey patching
def signal_capture(*args, **kwargs):
signal_registrations.append((orig_signal, args, kwargs))
def set_wakeup_fd_capture(*args, **kwargs):
signal_registrations.append((orig_set_wakeup_fd, args, kwargs))
orig_signal = signal.signal
signal.signal = signal_capture
orig_set_wakeup_fd = signal.set_wakeup_fd
signal.set_wakeup_fd = set_wakeup_fd_capture
# start the reactor in a daemon-thread
reactor_thread = threading.Thread(target=reactor.run, name="reactor")
reactor_thread.daemon = True
reactor_thread.start()
while not reactor.running:
time.sleep(0.01)
# Give the reactor a moment to register the signals.
# Apparently the 'running' flag is set before that.
time.sleep(0.01)
# Undo the monkey-paching
signal.signal = orig_signal
signal.set_wakeup_fd = orig_set_wakeup_fd
# Playback the recorded calls
for func, args, kwargs in signal_registrations:
func(*args, **kwargs) | Starts the twisted reactor if it is not running already.
The reactor is started in a new daemon-thread.
Has to perform dirty hacks so that twisted can register
signals even if it is not running in the main-thread. | entailment |
def _timeoutDeferred(deferred, timeout):
"""
Cancels the given deferred after the given time, if it has not yet callbacked/errbacked it.
"""
delayedCall = reactor.callLater(timeout, deferred.cancel)
def gotResult(result):
if delayedCall.active():
delayedCall.cancel()
return result
deferred.addBoth(gotResult) | Cancels the given deferred after the given time, if it has not yet callbacked/errbacked it. | entailment |
def find_prf_cpu(idxPrc, aryFuncChnk, aryPrfTc, aryMdlParams, strVersion,
lgcXval, varNumXval, queOut, lgcRstr=None, lgcPrint=True):
"""
Find best fitting pRF model for voxel time course, using the CPU.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryFunc : np.array
2D array with functional MRI data, with shape aryFunc[voxel, time].
aryPrfTc : np.array
Array with pRF model time courses, with shape
aryPrfTc[x-pos*y-pos*SD, number of feautures, number of volumes]
aryMdlParams : np.array
2D array with all pRF model parameter combinations.
strVersion : str
Which version to use for pRF finding; 'numpy' or 'cython'.
lgcXval: boolean
Logical to determine whether we cross-validate.
varNumXval: int
Number of folds for k-fold cross-validation.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
lgcRstr : boolean numpy array or None, default None
Logical to restrict certain models to particular voxels.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
lstOut : list
List containing the following objects:
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0.
vecBstXpos : np.array
1D array with best fitting x-position for each voxel, with shape
vecBstXpos[voxel].
vecBstYpos : np.array
1D array with best fitting y-position for each voxel, with shape
vecBstYpos[voxel].
vecBstSd : np.array
1D array with best fitting pRF size for each voxel, with shape
vecBstSd[voxel].
vecBstR2 : np.array
1D array with R2 value of 'winning' pRF model for each voxel, with
shape vecBstR2[voxel].
aryBstBts : np.array
2D array with beta parameter estimates of 'winning' pRF model for
each voxel, with shape aryBstBts[voxel, feautures].
Notes
-----
The list with results is not returned directly, but placed on a
multiprocessing queue. This version performs the model finding on the CPU,
using numpy or cython (depending on the value of `strVersion`).
"""
# Number of models in the visual space:
varNumMdls = aryPrfTc.shape[0]
# Number of feautures
varNumFtr = aryPrfTc.shape[1]
# Number of voxels to be fitted in this chunk:
varNumVoxChnk = aryFuncChnk.shape[0]
# Vectors for pRF finding results [number-of-voxels times one]:
# make sure they have the same precision as aryMdlParams, since this
# is important for later comparison
vecBstXpos = np.zeros(varNumVoxChnk, dtype=aryMdlParams.dtype)
vecBstYpos = np.zeros(varNumVoxChnk, dtype=aryMdlParams.dtype)
vecBstSd = np.zeros(varNumVoxChnk, dtype=aryMdlParams.dtype)
# Vector for best R-square value. For each model fit, the R-square value is
# compared to this, and updated if it is lower than the best-fitting
# solution so far. We initialise with an arbitrary, high value
vecBstRes = np.add(np.zeros(varNumVoxChnk), np.inf).astype(np.float32)
# array for best beta values. If we update the residual value above because
# it is lower, we also update the beta values of these voxels
aryBstBts = np.zeros((varNumVoxChnk, varNumFtr)).astype(np.float32)
# In case we cross-validate we also save and replace the best
# residual values for every fold (not only mean across folds):
if lgcXval:
aryBstResFlds = np.zeros((varNumVoxChnk, varNumXval), dtype=np.float32)
# We reshape the voxel time courses, so that time goes down the column,
# i.e. from top to bottom.
aryFuncChnk = aryFuncChnk.T
# Change type to float 32:
aryFuncChnk = aryFuncChnk.astype(np.float32)
aryPrfTc = aryPrfTc.astype(np.float32)
# if lgc for Xval is true we already prepare indices for xvalidation
if lgcXval:
# obtain iterator for cross-validation
itXval = KFold(n_splits=varNumXval)
vecSplts = np.arange(aryPrfTc.shape[-1], dtype=np.int32)
# prepare lists that will hold indices for xvalidation
lstIdxTrn = []
lstIdxtst = []
# Loop over the cross-validations to put indcies in array
for idxTrn, idxTst in itXval.split(vecSplts):
lstIdxTrn.append(idxTrn)
lstIdxtst.append(idxTst)
# trun lists into array
aryIdxTrn = np.stack(lstIdxTrn, axis=-1).astype(np.int32)
aryIdxTst = np.stack(lstIdxtst, axis=-1).astype(np.int32)
# Prepare status indicator if this is the first of the parallel processes:
if idxPrc == 0:
# We create a status indicator for the time consuming pRF model finding
# algorithm. Number of steps of the status indicator:
varStsStpSze = 20
# Vector with pRF values at which to give status feedback:
vecStatPrf = np.linspace(0,
varNumMdls,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrf = np.ceil(vecStatPrf)
vecStatPrf = vecStatPrf.astype(int)
# Vector with corresponding percentage values at which to give status
# feedback:
vecStatPrc = np.linspace(0,
100,
num=(varStsStpSze+1),
endpoint=True)
vecStatPrc = np.ceil(vecStatPrc)
vecStatPrc = vecStatPrc.astype(int)
# Counter for status indicator:
varCntSts01 = 0
varCntSts02 = 0
# If user does not restrict model space for particular voxels, select
# all voxels
if lgcRstr is None:
lgcVxl = np.arange(varNumVoxChnk, dtype=np.int32)
# There can be pRF model time courses with a variance of zero (i.e. pRF
# models that are not actually responsive to the stimuli). For time
# efficiency, and in order to avoid division by zero, we ignore these
# model time courses.
aryPrfTcVar = np.var(aryPrfTc, axis=-1)
# Zero with float32 precision for comparison:
varZero32 = np.array(([0.0])).astype(np.float32)[0]
# Loop through pRF models:
for idxMdl in range(0, varNumMdls):
# If desired by user, restrict the model fitting such that certain
# models are restricted to particular voxels
if lgcRstr is not None:
# Apply flatnonzero, so we can use cascaded integer indexing later
lgcVxl = np.flatnonzero(lgcRstr[:, idxMdl])
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Status indicator:
if varCntSts02 == vecStatPrf[varCntSts01]:
# Prepare status message:
strStsMsg = ('------------Progress: ' +
str(vecStatPrc[varCntSts01]) +
' % --- ' +
str(vecStatPrf[varCntSts01]) +
' pRF models out of ' +
str(varNumMdls))
if lgcPrint:
print(strStsMsg)
# Only increment counter if the last value has not been
# reached yet:
if varCntSts01 < varStsStpSze:
varCntSts01 = varCntSts01 + int(1)
# Only fit pRF model if:
# 1) all feature predictors have a variance greater than zero AND
# 2) at least one voxel is being tested
if np.logical_and(np.all(np.greater(aryPrfTcVar[idxMdl], varZero32),
axis=0),
np.greater(lgcVxl.size, 0)):
# Get predictor time courses for this specific model
vecMdl = aryPrfTc[idxMdl, :, :].T
# Check whether we need to crossvalidate
if lgcXval:
# We do crossvalidate. In this case, we loop through
# the different folds of the crossvalidation and
# calculate the cross-validation error for the current
# model for all voxel time courses.
# Cython version:
if strVersion == 'cython':
# A cython function is used to calculate the residuals and
# beta parameter estimates of the current model:
if varNumFtr == 1:
# For time course with one predictors
aryResXval = cy_lst_sq_xval_one(np.squeeze(vecMdl),
aryFuncChnk[:, lgcVxl],
aryIdxTrn,
aryIdxTst)
elif varNumFtr == 2:
# For time course with two predictors
aryResXval = cy_lst_sq_xval_two(vecMdl,
aryFuncChnk[:, lgcVxl],
aryIdxTrn,
aryIdxTst)
else:
if lgcPrint:
print('Cython currently not implemented for ' +
'more than two predictors.')
# Numpy version:
elif strVersion == 'numpy':
aryResXval = np_lst_sq_xval(vecMdl, aryFuncChnk[:, lgcVxl],
aryIdxTrn, aryIdxTst)
# calculate the average cross validation error across
# all folds
vecTmpRes = np.mean(aryResXval, axis=1)
else:
# We do not crossvalidate. In this case, we calculate
# the ratio of the explained variance (R squared)
# for the current model for all voxel time courses.
# Cython version:
if strVersion == 'cython':
# A cython function is used to calculate the residuals and
# beta parameter estimates of the current model:
if varNumFtr == 1:
# For time course with one predictor
aryTmpBts, vecTmpRes = cy_lst_sq_one(
np.squeeze(vecMdl), aryFuncChnk[:, lgcVxl])
elif varNumFtr == 2:
# For time course with two predictors
aryTmpBts, vecTmpRes = \
cy_lst_sq_two(vecMdl, aryFuncChnk[:, lgcVxl])
else:
if lgcPrint:
print('Cython currently not implemented for ' +
'more than two two predictors.')
# Numpy version:
elif strVersion == 'numpy':
# Numpy linalg.lstsq is used to calculate the
# beta values and residuals of the current model:
aryTmpBts, vecTmpRes = np_lst_sq(vecMdl,
aryFuncChnk[:, lgcVxl])
# Check whether current crossvalidation error (xval=True)
# or residuals (xval=False) are lower than previously
# calculated ones:
vecLgcTmpRes = np.less(vecTmpRes, vecBstRes[lgcVxl])
# Apply np.flatnonzero for cascaded integer-indexing
vecLgcTmpRes = np.flatnonzero(vecLgcTmpRes)
# Replace best x and y position values, and SD values:
vecBstXpos[lgcVxl[vecLgcTmpRes]] = aryMdlParams[idxMdl, 0]
vecBstYpos[lgcVxl[vecLgcTmpRes]] = aryMdlParams[idxMdl, 1]
vecBstSd[lgcVxl[vecLgcTmpRes]] = aryMdlParams[idxMdl, 2]
# Replace best mean residual values:
vecBstRes[lgcVxl[vecLgcTmpRes]] = vecTmpRes[vecLgcTmpRes]
if not lgcXval:
# Replace best beta values:
aryBstBts[lgcVxl[vecLgcTmpRes], :] = \
aryTmpBts[:, vecLgcTmpRes].T
# In case we cross-validate we also save and replace the best
# residual values for every fold (not only mean across folds):
if lgcXval:
aryBstResFlds[lgcVxl[vecLgcTmpRes], :] = \
aryResXval[vecLgcTmpRes, :]
# Status indicator (only used in the first of the parallel
# processes):
if idxPrc == 0:
# Increment status indicator counter:
varCntSts02 = varCntSts02 + 1
# After finding the best fitting model for each voxel, we still have to
# calculate the average correlation coefficient between predicted and
# measured time course (xval=True) or the coefficient of determination
# (xval=False) for each voxel.
if lgcXval:
# create vector that allows to check whether every voxel is visited
# exactly once
vecVxlTst = np.zeros(aryFuncChnk.shape[1])
# Since we did not do this during finding the best model, we still need
# to calculate deviation from a mean model for every voxel and fold
# arySsTotXval as well as calculate the best betas for the full model
# concatenate vectors with best x, y, sigma params
aryBstPrm = np.stack((vecBstXpos, vecBstYpos, vecBstSd), axis=1)
# Find unique rows
aryUnqRows = fnd_unq_rws(aryBstPrm, return_index=False,
return_inverse=False)
# Calculate deviation from a mean model for every voxel and fold
arySsTotXval = np.zeros((aryBstResFlds.shape),
dtype=aryBstResFlds.dtype)
# Loop over all best-fitting model parameter combinations found
for vecPrm in aryUnqRows:
# Get logical for voxels for which this prm combi was the best
lgcPrm = np.isclose(aryBstPrm, vecPrm, atol=1e-04).all(axis=1)
# Get logical index for the model number
# This can only be 1 index, so we directly get 1st entry of array
lgcIndMdl = np.where(np.isclose(aryMdlParams, vecPrm,
atol=1e-04).all(axis=1))[0][0]
if np.all(np.invert(lgcPrm)):
if lgcPrint:
print('------------No voxel found, process ' + str(idxPrc))
# Mark those voxels that were visited
vecVxlTst[lgcPrm] += 1
# Get voxel time course
aryVxlTc = aryFuncChnk[:, lgcPrm]
# Get model time courses
aryMdlTc = aryPrfTc[lgcIndMdl, :, :].T
# Calculate beta parameter estimates for entire model
aryBstBts[lgcPrm, :] = np.linalg.lstsq(aryMdlTc,
aryVxlTc,
rcond=-1)[0].T
# loop over cross-validation folds
for idxXval in range(varNumXval):
# Get functional data for tst:
aryFuncChnkTst = aryVxlTc[
aryIdxTst[:, idxXval], :]
# Deviation from the mean for each datapoint:
aryFuncDev = np.subtract(aryFuncChnkTst,
np.mean(aryFuncChnkTst,
axis=0)[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(aryFuncDev,
2.0),
axis=0)
arySsTotXval[lgcPrm, idxXval] = vecSsTot
# check that every voxel was visited exactly once
errMsg = 'At least one voxel visited more than once for SStot calc'
assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg
# Calculate coefficient of determination by comparing:
# aryBstResFlds vs. arySsTotXval
# get logical to check that arySsTotXval is greater than zero in all
# voxels and folds
lgcExclZeros = np.all(np.greater(arySsTotXval, np.array([0.0])),
axis=1)
if lgcPrint:
print('------------Nr of voxels: ' + str(len(lgcExclZeros)))
print('------------Nr of voxels avove 0: ' +
str(np.sum(lgcExclZeros)))
# Calculate R2 for every crossvalidation fold seperately
aryBstR2fld = np.subtract(
1.0, np.divide(aryBstResFlds,
arySsTotXval))
# Calculate mean R2 across folds here
vecBstR2 = np.subtract(
1.0, np.mean(np.divide(aryBstResFlds,
arySsTotXval),
axis=1))
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2,
aryBstBts,
aryBstR2fld]
queOut.put(lstOut)
else:
# To calculate the coefficient of determination, we start with the
# total sum of squares (i.e. the deviation of the data from the mean).
# The mean of each time course:
vecFuncMean = np.mean(aryFuncChnk, axis=0)
# Deviation from the mean for each datapoint:
aryFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :])
# Sum of squares:
vecSsTot = np.sum(np.power(aryFuncDev,
2.0),
axis=0)
# Coefficient of determination:
vecBstR2 = np.subtract(1.0,
np.divide(vecBstRes,
vecSsTot))
# Output list:
lstOut = [idxPrc,
vecBstXpos,
vecBstYpos,
vecBstSd,
vecBstR2,
aryBstBts]
queOut.put(lstOut) | Find best fitting pRF model for voxel time course, using the CPU.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryFunc : np.array
2D array with functional MRI data, with shape aryFunc[voxel, time].
aryPrfTc : np.array
Array with pRF model time courses, with shape
aryPrfTc[x-pos*y-pos*SD, number of feautures, number of volumes]
aryMdlParams : np.array
2D array with all pRF model parameter combinations.
strVersion : str
Which version to use for pRF finding; 'numpy' or 'cython'.
lgcXval: boolean
Logical to determine whether we cross-validate.
varNumXval: int
Number of folds for k-fold cross-validation.
queOut : multiprocessing.queues.Queue
Queue to put the results on.
lgcRstr : boolean numpy array or None, default None
Logical to restrict certain models to particular voxels.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
lstOut : list
List containing the following objects:
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0.
vecBstXpos : np.array
1D array with best fitting x-position for each voxel, with shape
vecBstXpos[voxel].
vecBstYpos : np.array
1D array with best fitting y-position for each voxel, with shape
vecBstYpos[voxel].
vecBstSd : np.array
1D array with best fitting pRF size for each voxel, with shape
vecBstSd[voxel].
vecBstR2 : np.array
1D array with R2 value of 'winning' pRF model for each voxel, with
shape vecBstR2[voxel].
aryBstBts : np.array
2D array with beta parameter estimates of 'winning' pRF model for
each voxel, with shape aryBstBts[voxel, feautures].
Notes
-----
The list with results is not returned directly, but placed on a
multiprocessing queue. This version performs the model finding on the CPU,
using numpy or cython (depending on the value of `strVersion`). | entailment |
def check(schema, rev_id, page_id=None, radius=defaults.RADIUS,
before=None, window=None):
"""
Checks the revert status of a revision. With this method, you can
determine whether an edit is a 'reverting' edit, was 'reverted' by another
edit and/or was 'reverted_to' by another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
page_id : int
the ID of the page the revision occupies (slower if not provided)
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert` | `None`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
:Example:
>>> import mwdb
>>> import mwreverts.api
>>>
>>> schema = mwdb.Schema("mysql+pymysql://enwiki.labsdb/enwiki_p" +
"?read_default_file=~/replica.my.cnf")
>>>
>>> def print_revert(revert):
... if revert is None:
... print(None)
... else:
... print(revert.reverting['rev_id'],
... [r['rev_id'] for r in revert.reverteds],
... revert.reverted_to['rev_id'])
...
>>> reverting, reverted, reverted_to = \\
... mwreverts.db.check(schema, 679778587)
>>> print_revert(reverting)
None
>>> print_revert(reverted)
679778743 [679778587] 679742862
>>> print_revert(reverted_to)
None
"""
rev_id = int(rev_id)
radius = int(radius)
if radius < 1:
raise TypeError("invalid radius. Expected a positive integer.")
page_id = int(page_id) if page_id is not None else None
before = Timestamp(before) if before is not None else None
# If we don't have the page_id, we're going to need to look them up
if page_id is None:
page_id = get_page_id(schema, rev_id)
# Load history and current rev
current_and_past_revs = list(n_edits_before(
schema, rev_id + 1, page_id, n=radius + 1))
if len(current_and_past_revs) < 1:
raise KeyError("Revision {0} not found in page {1}."
.format(rev_id, page_id))
current_rev, past_revs = (
current_and_past_revs[-1], # Current rev is the last one returned
current_and_past_revs[:-1] # The rest are past revs
)
if current_rev.rev_id != rev_id:
raise KeyError("Revision {0} not found in page {1}."
.format(rev_id, page_id))
if window is not None and before is None:
before = Timestamp(current_rev.rev_timestamp) + window
# Load future revisions
future_revs = list(n_edits_after(
schema, rev_id, page_id, n=radius, before=before))
return build_revert_tuple(
rev_id, past_revs, current_rev, future_revs, radius) | Checks the revert status of a revision. With this method, you can
determine whether an edit is a 'reverting' edit, was 'reverted' by another
edit and/or was 'reverted_to' by another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
page_id : int
the ID of the page the revision occupies (slower if not provided)
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert` | `None`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
:Example:
>>> import mwdb
>>> import mwreverts.api
>>>
>>> schema = mwdb.Schema("mysql+pymysql://enwiki.labsdb/enwiki_p" +
"?read_default_file=~/replica.my.cnf")
>>>
>>> def print_revert(revert):
... if revert is None:
... print(None)
... else:
... print(revert.reverting['rev_id'],
... [r['rev_id'] for r in revert.reverteds],
... revert.reverted_to['rev_id'])
...
>>> reverting, reverted, reverted_to = \\
... mwreverts.db.check(schema, 679778587)
>>> print_revert(reverting)
None
>>> print_revert(reverted)
679778743 [679778587] 679742862
>>> print_revert(reverted_to)
None | entailment |
def check_archive(schema, rev_id, namespace=None, title=None, timestamp=None,
radius=defaults.RADIUS,
before=None, window=None):
"""
Checks the revert status of an archived revision (from a deleted page).
With this method, you can determine whether an edit is a 'reverting'
edit, was 'reverted' by another edit and/or was 'reverted_to' by
another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
namespace : int
the namespace ID of the page the revision exists in
title : str
the title of the page the revision exists in
timestamp : :class:`mwtypes.Timestamp`
the timestamp that the revision for `rev_id` was saved
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
"""
rev_id = int(rev_id)
radius = int(radius)
if radius < 1:
raise TypeError("invalid radius. Expected a positive integer.")
namespace = int(namespace) if namespace is not None else None
title = str(title) if title is not None else None
timestamp = Timestamp(timestamp) if timestamp is not None else None
before = Timestamp(before) if before is not None else None
# If we don't have the page_id, we're going to need to look them up
if namespace is None or title is None or timestamp is None:
namespace, title, timestamp = \
get_archived_namespace_title_and_timestamp(schema, rev_id)
# Load history and current rev
current_and_past_revs = list(n_archived_edits_before(
schema, rev_id + 1, namespace, title, timestamp + 1, n=radius + 1))
if len(current_and_past_revs) < 1:
raise KeyError("Revision {0} not found in page {1}(ns={2}) @ {3}."
.format(rev_id, title, namespace, timestamp))
current_rev, past_revs = (
current_and_past_revs[-1], # Current rev is the last one returned
current_and_past_revs[:-1] # The rest are past revs
)
if current_rev.ar_rev_id != rev_id:
raise KeyError("Revision {0} not found in page {1}(ns={2}) @ {3}."
.format(rev_id, title, namespace, timestamp))
if window is not None and before is None:
before = Timestamp(current_rev.ar_timestamp) + window
# Load future revisions
future_revs = list(n_archived_edits_after(
schema, rev_id, namespace, title, timestamp, n=radius, before=before))
return build_revert_tuple(
rev_id, past_revs, current_rev, future_revs, radius) | Checks the revert status of an archived revision (from a deleted page).
With this method, you can determine whether an edit is a 'reverting'
edit, was 'reverted' by another edit and/or was 'reverted_to' by
another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
namespace : int
the namespace ID of the page the revision exists in
title : str
the title of the page the revision exists in
timestamp : :class:`mwtypes.Timestamp`
the timestamp that the revision for `rev_id` was saved
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit | entailment |
def _get_json(value):
"""Convert the given value to a JSON object."""
if hasattr(value, 'replace'):
value = value.replace('\n', ' ')
try:
return json.loads(value)
except json.JSONDecodeError:
# Escape double quotes.
if hasattr(value, 'replace'):
value = value.replace('"', '\\"')
# try putting the value into a string
return json.loads('"{}"'.format(value)) | Convert the given value to a JSON object. | entailment |
def save_list(key, *values):
"""Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters.
"""
return json.dumps({key: [_get_json(value) for value in values]}) | Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters. | entailment |
def save_file(key, file_path, *refs):
"""Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}}
"""
if not os.path.isfile(file_path):
return error("Output '{}' set to a missing file: '{}'.".format(key, file_path))
result = {key: {"file": file_path}}
if refs:
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
result[key]['refs'] = refs
return json.dumps(result) | Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}} | entailment |
def save_file_list(key, *files_refs):
"""Convert the given parameters to a special JSON object.
Each parameter is a file-refs specification of the form:
<file-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}}
"""
file_list = []
for file_refs in files_refs:
if ':' in file_refs:
try:
file_name, refs = file_refs.split(':')
except ValueError as e:
return error("Only one colon ':' allowed in file-refs specification.")
else:
file_name, refs = file_refs, None
if not os.path.isfile(file_name):
return error(
"Output '{}' set to a missing file: '{}'.".format(key, file_name)
)
file_obj = {'file': file_name}
if refs:
refs = [ref_path.strip() for ref_path in refs.split(',')]
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
file_obj['refs'] = refs
file_list.append(file_obj)
return json.dumps({key: file_list}) | Convert the given parameters to a special JSON object.
Each parameter is a file-refs specification of the form:
<file-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"file": file_path}}, or
{ key: {"file": file_path, "refs": [refs[0], refs[1], ... ]}} | entailment |
def save_dir(key, dir_path, *refs):
"""Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}}
"""
if not os.path.isdir(dir_path):
return error(
"Output '{}' set to a missing directory: '{}'.".format(key, dir_path)
)
result = {key: {"dir": dir_path}}
if refs:
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
result[key]["refs"] = refs
return json.dumps(result) | Convert the given parameters to a special JSON object.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}} | entailment |
def save_dir_list(key, *dirs_refs):
"""Convert the given parameters to a special JSON object.
Each parameter is a dir-refs specification of the form:
<dir-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}}
"""
dir_list = []
for dir_refs in dirs_refs:
if ':' in dir_refs:
try:
dir_path, refs = dir_refs.split(':')
except ValueError as e:
return error("Only one colon ':' allowed in dir-refs specification.")
else:
dir_path, refs = dir_refs, None
if not os.path.isdir(dir_path):
return error(
"Output '{}' set to a missing directory: '{}'.".format(key, dir_path)
)
dir_obj = {'dir': dir_path}
if refs:
refs = [ref_path.strip() for ref_path in refs.split(',')]
missing_refs = [
ref for ref in refs if not (os.path.isfile(ref) or os.path.isdir(ref))
]
if len(missing_refs) > 0:
return error(
"Output '{}' set to missing references: '{}'.".format(
key, ', '.join(missing_refs)
)
)
dir_obj['refs'] = refs
dir_list.append(dir_obj)
return json.dumps({key: dir_list}) | Convert the given parameters to a special JSON object.
Each parameter is a dir-refs specification of the form:
<dir-path>:<reference1>,<reference2>, ...,
where the colon ':' and the list of references are optional.
JSON object is of the form:
{ key: {"dir": dir_path}}, or
{ key: {"dir": dir_path, "refs": [refs[0], refs[1], ... ]}} | entailment |
def progress(progress):
"""Convert given progress to a JSON object.
Check that progress can be represented as float between 0 and 1 and
return it in JSON of the form:
{"proc.progress": progress}
"""
if isinstance(progress, int) or isinstance(progress, float):
progress = float(progress)
else:
try:
progress = float(json.loads(progress))
except (TypeError, ValueError):
return warning("Progress must be a float.")
if not 0 <= progress <= 1:
return warning("Progress must be a float between 0 and 1.")
return json.dumps({'proc.progress': progress}) | Convert given progress to a JSON object.
Check that progress can be represented as float between 0 and 1 and
return it in JSON of the form:
{"proc.progress": progress} | entailment |
def checkrc(rc, *args):
"""Check if ``rc`` (return code) meets requirements.
Check if ``rc`` is 0 or is in ``args`` list that contains
acceptable return codes.
Last argument of ``args`` can optionally be error message that
is printed if ``rc`` doesn't meet requirements.
Output is JSON of the form:
{"proc.rc": <rc>,
"proc.error": "<error_msg>"},
where "proc.error" entry is omitted if empty.
"""
try:
rc = int(rc)
except (TypeError, ValueError):
return error("Invalid return code: '{}'.".format(rc))
acceptable_rcs = []
error_msg = ""
if len(args):
for code in args[:-1]:
try:
acceptable_rcs.append(int(code))
except (TypeError, ValueError):
return error("Invalid return code: '{}'.".format(code))
try:
acceptable_rcs.append(int(args[-1]))
except (TypeError, ValueError):
error_msg = args[-1]
if rc in acceptable_rcs:
rc = 0
ret = {'proc.rc': rc}
if rc and error_msg:
ret['proc.error'] = error_msg
return json.dumps(ret) | Check if ``rc`` (return code) meets requirements.
Check if ``rc`` is 0 or is in ``args`` list that contains
acceptable return codes.
Last argument of ``args`` can optionally be error message that
is printed if ``rc`` doesn't meet requirements.
Output is JSON of the form:
{"proc.rc": <rc>,
"proc.error": "<error_msg>"},
where "proc.error" entry is omitted if empty. | entailment |
def export_file(file_path):
"""Prepend the given parameter with ``export``"""
if not os.path.isfile(file_path):
return error("Referenced file does not exist: '{}'.".format(file_path))
return "export {}".format(file_path) | Prepend the given parameter with ``export`` | entailment |
def import_file(
src,
file_name,
imported_format=ImportedFormat.BOTH,
progress_from=0.0,
progress_to=None,
):
"""Import file to working directory.
:param src: Source file path or URL
:param file_name: Source file name
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given)
"""
if progress_to is not None:
if not isinstance(progress_from, float) or not isinstance(progress_to, float):
raise ValueError("Progress_from and progress_to must be float")
if progress_from < 0 or progress_from > 1:
raise ValueError("Progress_from must be between 0 and 1")
if progress_to < 0 or progress_to > 1:
raise ValueError("Progress_to must be between 0 and 1")
if progress_from >= progress_to:
raise ValueError("Progress_to must be higher than progress_from")
print("Importing and compressing {}...".format(file_name))
def importGz():
"""Import gzipped file.
The file_name must have .gz extension.
"""
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
with open(file_name[:-3], 'wb') as f_out, gzip.open(src, 'rb') as f_in:
try:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
else: # Extracted file not-required
# Verify the compressed file.
with gzip.open(src, 'rb') as f:
try:
while f.read(CHUNK_SIZE) != b'':
pass
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
if imported_format == ImportedFormat.COMPRESSED:
return file_name
else:
return file_name[:-3]
def import7z():
"""Import compressed file in various formats.
Supported extensions: .bz2, .zip, .rar, .7z, .tar.gz, and .tar.bz2.
"""
extracted_name, _ = os.path.splitext(file_name)
destination_name = extracted_name
temp_dir = 'temp_{}'.format(extracted_name)
cmd = '7z x -y -o{} {}'.format(shlex.quote(temp_dir), shlex.quote(src))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as err:
if err.returncode == 2:
raise ValueError("Failed to extract file: {}".format(file_name))
else:
raise
paths = os.listdir(temp_dir)
if len(paths) == 1 and os.path.isfile(os.path.join(temp_dir, paths[0])):
# Single file in archive.
temp_file = os.path.join(temp_dir, paths[0])
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(temp_file, 'rb') as f_in, gzip.open(
extracted_name + '.gz', 'wb'
) as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
shutil.move(temp_file, './{}'.format(extracted_name))
if extracted_name.endswith('.tar'):
with tarfile.open(extracted_name) as tar:
tar.extractall()
os.remove(extracted_name)
destination_name, _ = os.path.splitext(extracted_name)
else:
destination_name = extracted_name + '.gz'
else:
# Directory or several files in archive.
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with tarfile.open(extracted_name + '.tar.gz', 'w:gz') as tar:
for fname in glob.glob(os.path.join(temp_dir, '*')):
tar.add(fname, os.path.basename(fname))
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
for path in os.listdir(temp_dir):
shutil.move(os.path.join(temp_dir, path), './{}'.format(path))
else:
destination_name = extracted_name + '.tar.gz'
shutil.rmtree(temp_dir)
return destination_name
def importUncompressed():
"""Import uncompressed file."""
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(src, 'rb') as f_in, gzip.open(file_name + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
return (
file_name + '.gz'
if imported_format == ImportedFormat.COMPRESSED
else file_name
)
# Large file download from Google Drive requires cookie and token.
try:
response = None
if re.match(
r'^https://drive.google.com/[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
session = requests.Session()
response = session.get(src, stream=True)
token = None
for key, value in response.cookies.items():
if key.startswith('download_warning'):
token = value
break
if token is not None:
params = {'confirm': token}
response = session.get(src, params=params, stream=True)
elif re.match(
r'^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
response = requests.get(src, stream=True)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to {}".format(src))
if response:
with open(file_name, 'wb') as f:
total = response.headers.get('content-length')
total = float(total) if total else None
downloaded = 0
current_progress = 0
for content in response.iter_content(chunk_size=CHUNK_SIZE):
f.write(content)
if total is not None and progress_to is not None:
downloaded += len(content)
progress_span = progress_to - progress_from
next_progress = progress_from + progress_span * downloaded / total
next_progress = round(next_progress, 2)
if next_progress > current_progress:
print(progress(next_progress))
current_progress = next_progress
# Check if a temporary file exists.
if not os.path.isfile(file_name):
raise ValueError("Downloaded file not found {}".format(file_name))
src = file_name
else:
if not os.path.isfile(src):
raise ValueError("Source file not found {}".format(src))
# Decide which import should be used.
if re.search(r'\.(bz2|zip|rar|7z|tgz|tar\.gz|tar\.bz2)$', file_name):
destination_file_name = import7z()
elif file_name.endswith('.gz'):
destination_file_name = importGz()
else:
destination_file_name = importUncompressed()
if progress_to is not None:
print(progress(progress_to))
return destination_file_name | Import file to working directory.
:param src: Source file path or URL
:param file_name: Source file name
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given) | entailment |
def loadNiiData(lstNiiFls,
strPathNiiMask=None,
strPathNiiFunc=None):
"""load nii data.
Parameters
----------
lstNiiFls : list, list of str with nii file names
strPathNiiMask : str, path to nii file with mask (optional)
strPathNiiFunc : str, parent path to nii files (optional)
Returns
-------
aryFunc : np.array
Nii data
"""
print('---------Loading nii data')
# check whether a mask is available
if strPathNiiMask is not None:
aryMask = nb.load(strPathNiiMask).get_data().astype('bool')
# check a parent path is available that needs to be preprended to nii files
if strPathNiiFunc is not None:
lstNiiFls = [os.path.join(strPathNiiFunc, i) for i in lstNiiFls]
aryFunc = []
for idx, path in enumerate(lstNiiFls):
print('------------Loading run: ' + str(idx+1))
# Load 4D nii data:
niiFunc = nb.load(path).get_data()
# append to list
if strPathNiiMask is not None:
aryFunc.append(niiFunc[aryMask, :])
else:
aryFunc.append(niiFunc)
# concatenate arrys in list along time dimension
aryFunc = np.concatenate(aryFunc, axis=-1)
# set to type float32
aryFunc = aryFunc.astype('float32')
return aryFunc | load nii data.
Parameters
----------
lstNiiFls : list, list of str with nii file names
strPathNiiMask : str, path to nii file with mask (optional)
strPathNiiFunc : str, parent path to nii files (optional)
Returns
-------
aryFunc : np.array
Nii data | entailment |
def calcR2(predTst, yTest, axis=0):
"""calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2
"""
rss = np.sum((yTest - predTst) ** 2, axis=axis)
tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis)
return 1 - rss/tss | calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2 | entailment |
def calcFstats(predTst, yTest, p, axis=0):
"""calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
p: float, number of predictors
Returns
-------
aryFunc : np.array
R2
"""
rss = np.sum((yTest - predTst) ** 2, axis=axis)
tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis)
# derive number of measurements
n = yTest.shape[0]
# calculate Fvalues
vecFvals = ((tss - rss)/p)/(rss/(n-p-1))
# calculate corresponding po values
df1 = p - 1
df2 = n-1
vecPvals = stats.f.cdf(vecFvals, df1, df2)
return vecFvals, vecPvals | calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
p: float, number of predictors
Returns
-------
aryFunc : np.array
R2 | entailment |
def calcMse(predTst, yTest, axis=0):
"""calculate mean squared error. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
MSE
"""
return np.mean((yTest - predTst) ** 2, axis=axis) | calculate mean squared error. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
MSE | entailment |
def n_day(date_string):
"""
date_string string in format "(number|a) day(s) ago"
"""
today = datetime.date.today()
match = re.match(r'(\d{1,3}|a) days? ago', date_string)
groups = match.groups()
if groups:
decrement = groups[0]
if decrement == 'a':
decrement = 1
return today - datetime.timedelta(days=int(decrement))
return None | date_string string in format "(number|a) day(s) ago" | entailment |
def detect(checksum_revisions, radius=defaults.RADIUS):
"""
Detects reverts that occur in a sequence of revisions. Note that,
`revision` data meta will simply be returned in the case of a revert.
This function serves as a convenience wrapper around calls to
:class:`mwreverts.Detector`'s :func:`~mwreverts.Detector.process`
method.
:Parameters:
checksum_revisions : `iterable` ( (checksum, revision) )
an iterable over tuples of checksum and revision meta data
radius : int
a positive integer indicating the maximum revision distance that a
revert can span.
:Return:
a iterator over :class:`mwreverts.Revert`
:Example:
>>> import mwreverts
>>>
>>> checksum_revisions = [
... ("aaa", {'rev_id': 1}),
... ("bbb", {'rev_id': 2}),
... ("aaa", {'rev_id': 3}),
... ("ccc", {'rev_id': 4})
... ]
>>>
>>> list(mwreverts.detect(checksum_revisions))
[Revert(reverting={'rev_id': 3},
reverteds=[{'rev_id': 2}],
reverted_to={'rev_id': 1})]
"""
revert_detector = Detector(radius)
for checksum, revision in checksum_revisions:
revert = revert_detector.process(checksum, revision)
if revert is not None:
yield revert | Detects reverts that occur in a sequence of revisions. Note that,
`revision` data meta will simply be returned in the case of a revert.
This function serves as a convenience wrapper around calls to
:class:`mwreverts.Detector`'s :func:`~mwreverts.Detector.process`
method.
:Parameters:
checksum_revisions : `iterable` ( (checksum, revision) )
an iterable over tuples of checksum and revision meta data
radius : int
a positive integer indicating the maximum revision distance that a
revert can span.
:Return:
a iterator over :class:`mwreverts.Revert`
:Example:
>>> import mwreverts
>>>
>>> checksum_revisions = [
... ("aaa", {'rev_id': 1}),
... ("bbb", {'rev_id': 2}),
... ("aaa", {'rev_id': 3}),
... ("ccc", {'rev_id': 4})
... ]
>>>
>>> list(mwreverts.detect(checksum_revisions))
[Revert(reverting={'rev_id': 3},
reverteds=[{'rev_id': 2}],
reverted_to={'rev_id': 1})] | entailment |
def getChargeInfo(self, CorpNum, ItemCode, UserID=None):
""" κ³ΌκΈμ 보 νμΈ
args
CorpNum : νμ μ¬μ
μλ²νΈ
ItemCode : μ μλͺ
μΈμ μ’
λ₯μ½λ
UserID : νλΉ νμμμ΄λ
return
κ³ΌκΈμ 보 κ°μ²΄
raise
PopbillException
"""
if ItemCode == None or ItemCode == '':
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/Statement/ChargeInfo/' + ItemCode, CorpNum, UserID) | κ³ΌκΈμ 보 νμΈ
args
CorpNum : νμ μ¬μ
μλ²νΈ
ItemCode : μ μλͺ
μΈμ μ’
λ₯μ½λ
UserID : νλΉ νμμμ΄λ
return
κ³ΌκΈμ 보 κ°μ²΄
raise
PopbillException | entailment |
def getUnitCost(self, CorpNum, ItemCode):
""" μ μλͺ
μΈμ λ°νλ¨κ° νμΈ.
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
return
λ°νλ¨κ° by float
raise
PopbillException
"""
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
result = self._httpget('/Statement/' + str(ItemCode) + '?cfg=UNITCOST', CorpNum)
return float(result.unitCost) | μ μλͺ
μΈμ λ°νλ¨κ° νμΈ.
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
return
λ°νλ¨κ° by float
raise
PopbillException | entailment |
def FAXSend(self, CorpNum, statement, SendNum, ReceiveNum, UserID=None):
""" μ ν©μ€ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
statement : μ μλͺ
μΈμ κ°μ²΄
SendNum : ν©μ€ λ°μ λ²νΈ
ReceiveNum : ν©μ€ μμ λ²νΈ
UserID : νλΉνμ μμ΄λ
return
ν©μ€μ μ‘ μ μλ²νΈ(receiptNum)
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "μ μ‘ν μ μλͺ
μΈμ μ λ³΄κ° μ
λ ₯λμ§ μμμ΅λλ€.")
if SendNum == None or SendNum == '':
raise PopbillException(-99999999, "ν©μ€μ μ‘ λ°μ λ²νΈκ° μ¬λ°λ₯΄μ§ μμμ΅λλ€.")
if ReceiveNum == None or ReceiveNum == '':
raise PopbillException(-99999999, "ν©μ€μ μ‘ μμ λ²νΈκ° μ¬λ°λ₯΄μ§ μμ΅λλ€.")
statement.sendNum = SendNum
statement.receiveNum = ReceiveNum
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID, "FAX").receiptNum | μ ν©μ€ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
statement : μ μλͺ
μΈμ κ°μ²΄
SendNum : ν©μ€ λ°μ λ²νΈ
ReceiveNum : ν©μ€ μμ λ²νΈ
UserID : νλΉνμ μμ΄λ
return
ν©μ€μ μ‘ μ μλ²νΈ(receiptNum)
raise
PopbillException | entailment |
def registIssue(self, CorpNum, statement, Memo=None, UserID=None):
""" μ¦μλ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
statement : λ±λ‘ν μ μλͺ
μΈμ object. made with Statement(...)
Memo : μ¦μλ°νλ©λͺ¨
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "λ±λ‘ν μ μλͺ
μΈμ μ λ³΄κ° μ
λ ₯λμ§ μμμ΅λλ€.")
if Memo != None or Memo != '':
statement.memo = Memo
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID, "ISSUE") | μ¦μλ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
statement : λ±λ‘ν μ μλͺ
μΈμ object. made with Statement(...)
Memo : μ¦μλ°νλ©λͺ¨
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def register(self, CorpNum, statement, UserID=None):
""" μμμ μ₯
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
statement : λ±λ‘ν μ μλͺ
μΈμ object. made with Statement(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if statement == None:
raise PopbillException(-99999999, "λ±λ‘ν μ μλͺ
μΈμ μ λ³΄κ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(statement)
return self._httppost('/Statement', postData, CorpNum, UserID) | μμμ μ₯
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
statement : λ±λ‘ν μ μλͺ
μΈμ object. made with Statement(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def update(self, CorpNum, ItemCode, MgtKey, Statement, UserID=None):
""" μμμ μ₯
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
Statement : λ±λ‘ν μ μλͺ
μΈμ object. made with Statement(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if Statement == None:
raise PopbillException(-99999999, "λ±λ‘ν μ μλͺ
μΈμ μ λ³΄κ° μ
λ ₯λμ§ μμμ΅λλ€.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(Statement)
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, 'PATCH') | μμμ μ₯
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
Statement : λ±λ‘ν μ μλͺ
μΈμ object. made with Statement(...)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def issue(self, CorpNum, ItemCode, MgtKey, Memo=None, EmailSubject=None, UserID=None):
""" λ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
Memo : μ²λ¦¬λ©λͺ¨
EmailSubject : λ°νλ©μΌ μ λͺ©(λ―ΈκΈ°μ¬μ κΈ°λ³ΈμμμΌλ‘ μ μ‘)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
req = {}
postData = ""
if Memo != None and Memo != '':
req["memo"] = Memo
if EmailSubject != None and EmailSubject != '':
req["emailSubject"] = EmailSubject
postData = self._stringtify(req)
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "ISSUE") | λ°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
Memo : μ²λ¦¬λ©λͺ¨
EmailSubject : λ°νλ©μΌ μ λͺ©(λ―ΈκΈ°μ¬μ κΈ°λ³ΈμμμΌλ‘ μ μ‘)
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def cancel(self, CorpNum, ItemCode, MgtKey, Memo=None, UserID=None):
""" λ°νμ·¨μ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
Memo : μ²λ¦¬λ©λͺ¨
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = ''
if Memo != None and Memo != '':
postData = self._stringtify({"memo": Memo})
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "CANCEL") | λ°νμ·¨μ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
Memo : μ²λ¦¬λ©λͺ¨
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def delete(self, CorpNum, ItemCode, MgtKey, UserID=None):
""" μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, '', CorpNum, UserID, "DELETE") | μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def search(self, CorpNum, DType, SDate, EDate, State, ItemCode, Page, PerPage, Order, UserID=None, QString=None):
""" λͺ©λ‘ μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
DType : μΌμμ ν, R-λ±λ‘μΌμ, W-μμ±μΌμ, I-λ°νμΌμ μ€ ν 1
SDate : μμμΌμ, νμνμ(yyyyMMdd)
EDate : μ’
λ£μΌμ, νμνμ(yyyyMMdd)
State : μνμ½λ, 2,3λ²μ§Έ μ리μ μμΌλμΉ΄λ(*) μ¬μ©κ°λ₯
ItemCode : λͺ
μΈμ μ’
λ₯μ½λ λ°°μ΄, 121-λͺ
μΈμ, 122-μ²κ΅¬μ, 123-견μ μ, 124-λ°μ£Όμ 125-μ
κΈν, 126-μμμ¦
Page : νμ΄μ§λ²νΈ
PerPage : νμ΄μ§λΉ λͺ©λ‘κ°μ
Order : μ λ ¬λ°©ν₯, D-λ΄λ¦Όμ°¨μ, A-μ€λ¦μ°¨μ
QString : κ±°λμ² μ 보, κ±°λμ² μνΈ λλ μ¬μ
μλ±λ‘λ²νΈ κΈ°μ¬, λ―ΈκΈ°μ¬μ μ 체쑰ν
UserID : νλΉ νμμμ΄λ
"""
if DType == None or DType == '':
raise PopbillException(-99999999, "μΌμμ νμ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
if SDate == None or SDate == '':
raise PopbillException(-99999999, "μμμΌμκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "μ’
λ£μΌμκ° μ
λ ₯λμ§ μμμ΅λλ€.")
uri = '/Statement/Search'
uri += '?DType=' + DType
uri += '&SDate=' + SDate
uri += '&EDate=' + EDate
uri += '&State=' + ','.join(State)
uri += '&ItemCode=' + ','.join(ItemCode)
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
if QString is not None:
uri += '&QString=' + QString
return self._httpget(uri, CorpNum, UserID) | λͺ©λ‘ μ‘°ν
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
DType : μΌμμ ν, R-λ±λ‘μΌμ, W-μμ±μΌμ, I-λ°νμΌμ μ€ ν 1
SDate : μμμΌμ, νμνμ(yyyyMMdd)
EDate : μ’
λ£μΌμ, νμνμ(yyyyMMdd)
State : μνμ½λ, 2,3λ²μ§Έ μ리μ μμΌλμΉ΄λ(*) μ¬μ©κ°λ₯
ItemCode : λͺ
μΈμ μ’
λ₯μ½λ λ°°μ΄, 121-λͺ
μΈμ, 122-μ²κ΅¬μ, 123-견μ μ, 124-λ°μ£Όμ 125-μ
κΈν, 126-μμμ¦
Page : νμ΄μ§λ²νΈ
PerPage : νμ΄μ§λΉ λͺ©λ‘κ°μ
Order : μ λ ¬λ°©ν₯, D-λ΄λ¦Όμ°¨μ, A-μ€λ¦μ°¨μ
QString : κ±°λμ² μ 보, κ±°λμ² μνΈ λλ μ¬μ
μλ±λ‘λ²νΈ κΈ°μ¬, λ―ΈκΈ°μ¬μ μ 체쑰ν
UserID : νλΉ νμμμ΄λ | entailment |
def getInfo(self, CorpNum, ItemCode, MgtKey):
""" μν/μμ½ μ 보 νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μν/μμ½μ 보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey, CorpNum) | μν/μμ½ μ 보 νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μν/μμ½μ 보 object
raise
PopbillException | entailment |
def getDetailInfo(self, CorpNum, ItemCode, MgtKey):
""" μ μλͺ
μΈμ μμΈμ 보 νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μμΈμ 보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '?Detail', CorpNum) | μ μλͺ
μΈμ μμΈμ 보 νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμ μμΈμ 보 object
raise
PopbillException | entailment |
def sendSMS(self, CorpNum, ItemCode, MgtKey, Sender, Receiver, Contents, UserID=None):
""" μλ¦Όλ¬Έμ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
Sender : λ°μ λ²νΈ
Receiver : μμ λ²νΈ
Contents : λ¬Έμλ©μμ§ λ΄μ©(μ΅λ 90Byte), μ΅λκΈΈμ΄λ₯Ό μ΄κ³Όνκ²½μ° κΈΈμ΄κ° μ‘°μ λμ΄ μ μ‘λ¨
UserID : νλΉ νμμμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify({
"sender": Sender,
"receiver": Receiver,
"contents": Contents
})
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey, postData, CorpNum, UserID, "SMS") | μλ¦Όλ¬Έμ μ μ‘
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
Sender : λ°μ λ²νΈ
Receiver : μμ λ²νΈ
Contents : λ¬Έμλ©μμ§ λ΄μ©(μ΅λ 90Byte), μ΅λκΈΈμ΄λ₯Ό μ΄κ³Όνκ²½μ° κΈΈμ΄κ° μ‘°μ λμ΄ μ μ‘λ¨
UserID : νλΉ νμμμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def getLogs(self, CorpNum, ItemCode, MgtKey):
""" μ μλͺ
μΈμ λ¬Έμμ΄λ ₯ λͺ©λ‘ νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμμ΄λ ₯ μ 보 λͺ©λ‘ as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Logs', CorpNum) | μ μλͺ
μΈμ λ¬Έμμ΄λ ₯ λͺ©λ‘ νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
return
λ¬Έμμ΄λ ₯ μ 보 λͺ©λ‘ as List
raise
PopbillException | entailment |
def attachFile(self, CorpNum, ItemCode, MgtKey, FilePath, UserID=None):
""" νμΌ μ²¨λΆ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
FilePath : 첨λΆνμΌμ κ²½λ‘
UserID : νλΉ νμμμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if FilePath == None or FilePath == "":
raise PopbillException(-99999999, "νμΌκ²½λ‘κ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
files = []
try:
with open(FilePath, "rb") as F:
files = [File(fieldName='Filedata',
fileName=F.name,
fileData=F.read())]
except IOError:
raise PopbillException(-99999999, "ν΄λΉκ²½λ‘μ νμΌμ΄ μκ±°λ μ½μ μ μμ΅λλ€.")
return self._httppost_files('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files', None, files, CorpNum,
UserID) | νμΌ μ²¨λΆ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
FilePath : 첨λΆνμΌμ κ²½λ‘
UserID : νλΉ νμμμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def getFiles(self, CorpNum, ItemCode, MgtKey):
""" 첨λΆνμΌ λͺ©λ‘ νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
return
첨λΆνμΌ λͺ©λ‘ as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files', CorpNum) | 첨λΆνμΌ λͺ©λ‘ νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
return
첨λΆνμΌ λͺ©λ‘ as List
raise
PopbillException | entailment |
def deleteFile(self, CorpNum, ItemCode, MgtKey, FileID, UserID=None):
""" 첨λΆνμΌ μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
FileID : νμΌμμ΄λ, 첨λΆνμΌ λͺ©λ‘νμΈ(getFiles) API μλ΅μ λ¬Έμ AttachedFile λ³μκ°
UserID : νλΉνμ μμ΄λ
return
첨λΆνμΌ μ 보 λͺ©λ‘ as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if FileID == None or FileID == "":
raise PopbillException(-99999999, "νμΌμμ΄λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = ''
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files/' + FileID, postData, CorpNum,
UserID, 'DELETE') | 첨λΆνμΌ μμ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKey : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ
FileID : νμΌμμ΄λ, 첨λΆνμΌ λͺ©λ‘νμΈ(getFiles) API μλ΅μ λ¬Έμ AttachedFile λ³μκ°
UserID : νλΉνμ μμ΄λ
return
첨λΆνμΌ μ 보 λͺ©λ‘ as List
raise
PopbillException | entailment |
def getMassPrintURL(self, CorpNum, ItemCode, MgtKeyList, UserID=None):
""" λ€λ μΈμ URL νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKeyList : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ λͺ©λ‘
UserID : νλΉνμ μμ΄λ
return
νλΉ URL as str
raise
PopbillException
"""
if MgtKeyList == None:
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈ λ°°μ΄μ΄ μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
postData = self._stringtify(MgtKeyList)
result = self._httppost('/Statement/' + str(ItemCode) + '?Print', postData, CorpNum, UserID)
return result.url | λ€λ μΈμ URL νμΈ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : λͺ
μΈμ μ’
λ₯ μ½λ
[121 - κ±°λλͺ
μΈμ], [122 - μ²κ΅¬μ], [123 - 견μ μ],
[124 - λ°μ£Όμ], [125 - μ
κΈν], [126 - μμμ¦]
MgtKeyList : ννΈλ λ¬Έμκ΄λ¦¬λ²νΈ λͺ©λ‘
UserID : νλΉνμ μμ΄λ
return
νλΉ URL as str
raise
PopbillException | entailment |
def attachStatement(self, CorpNum, ItemCode, MgtKey, SubItemCode, SubMgtKey, UserID=None):
""" λ€λ₯Έ μ μλͺ
μΈμ 첨λΆ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : μ μλͺ
μΈμ μ’
λ₯μ½λ, 121-λͺ
μΈμ, 122-μ²κ΅¬μ, 123-견μ μ, 124-λ°μ£Όμ 125-μ
κΈν, 126-μμμ¦
MgtKey : μ μλͺ
μΈμ λ¬Έμκ΄λ¦¬λ²νΈ
SubItemCode : 첨λΆν λͺ
μΈμ μ’
λ₯μ½λ, 121-λͺ
μΈμ, 122-μ²κ΅¬μ, 123-견μ μ, 124-λ°μ£Όμ 125-μ
κΈν, 126-μμμ¦
SubMgtKey : 첨λΆν μ μλͺ
μΈμ λ¬Έμκ΄λ¦¬λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "κ΄λ¦¬λ²νΈκ° μ
λ ₯λμ§ μμμ΅λλ€.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "λͺ
μΈμ μ’
λ₯ μ½λκ° μ
λ ₯λμ§ μμμ΅λλ€.")
uri = '/Statement/' + ItemCode + '/' + MgtKey + '/AttachStmt'
postData = self._stringtify({"ItemCode": ItemCode, "MgtKey": SubMgtKey})
return self._httppost(uri, postData, CorpNum, UserID) | λ€λ₯Έ μ μλͺ
μΈμ 첨λΆ
args
CorpNum : νλΉνμ μ¬μ
μλ²νΈ
ItemCode : μ μλͺ
μΈμ μ’
λ₯μ½λ, 121-λͺ
μΈμ, 122-μ²κ΅¬μ, 123-견μ μ, 124-λ°μ£Όμ 125-μ
κΈν, 126-μμμ¦
MgtKey : μ μλͺ
μΈμ λ¬Έμκ΄λ¦¬λ²νΈ
SubItemCode : 첨λΆν λͺ
μΈμ μ’
λ₯μ½λ, 121-λͺ
μΈμ, 122-μ²κ΅¬μ, 123-견μ μ, 124-λ°μ£Όμ 125-μ
κΈν, 126-μμμ¦
SubMgtKey : 첨λΆν μ μλͺ
μΈμ λ¬Έμκ΄λ¦¬λ²νΈ
UserID : νλΉνμ μμ΄λ
return
μ²λ¦¬κ²°κ³Ό. consist of code and message
raise
PopbillException | entailment |
def process(self, article):
"""
Ingests an article and processes it for metadata and elements to provide
proper references in the EPUB spine.
This method may only be called once unless the Package was instantiated
in collection mode using ``Package(collection=True)``. It places entries
in an internal spine list for the Main Content Document, the
Bibliographic Content Document (if there are ref elements in Back), and
the Tables Content Document (if there are table elements). It then
employs the publisher specific methods for extracting article metadata
using the article's publisher attribute (an instance of a Publisher
class).
Parameters
----------
article : openaccess_epub.article.Article instance
An article to be included in the EPUB, to be processed for metadata
and appropriate content document references.
"""
if self.article is not None and not self.collection:
log.warning('Could not process additional article. Package only \
handles one article unless collection mode is set.')
return False
if article.publisher is None:
log.error('''Package cannot be generated for an Article \
without a publisher!''')
return
self.article = article
self.article_doi = self.article.doi.split('/')[1]
self.all_dois.append(self.article.doi)
#Analyze the article to add entries to the spine
dash_doi = self.article_doi.replace('.', '-')
#Entry for the main content document
main_idref = 'main-{0}-xhtml'.format(dash_doi)
self.spine_list.append(spine_item(main_idref, True))
#Entry for the biblio content document
biblio_idref = 'biblio-{0}-xhtml'.format(dash_doi)
if self.article.root.xpath('./back/ref-list/ref'):
self.spine_list.append(spine_item(biblio_idref, True))
#Entry for the tables content document
tables_idref = 'tables-{0}-xhtml'.format(dash_doi)
if self.article.publisher.has_out_of_flow_tables():
self.spine_list.append(spine_item(tables_idref, False))
self.acquire_metadata() | Ingests an article and processes it for metadata and elements to provide
proper references in the EPUB spine.
This method may only be called once unless the Package was instantiated
in collection mode using ``Package(collection=True)``. It places entries
in an internal spine list for the Main Content Document, the
Bibliographic Content Document (if there are ref elements in Back), and
the Tables Content Document (if there are table elements). It then
employs the publisher specific methods for extracting article metadata
using the article's publisher attribute (an instance of a Publisher
class).
Parameters
----------
article : openaccess_epub.article.Article instance
An article to be included in the EPUB, to be processed for metadata
and appropriate content document references. | entailment |
def acquire_metadata(self):
"""
Handles the acquisition of metadata for both collection mode and single
mode, uses the metadata methods belonging to the article's publisher
attribute.
"""
#For space economy
publisher = self.article.publisher
if self.collection: # collection mode metadata gathering
pass
else: # single mode metadata gathering
self.pub_id = publisher.package_identifier()
self.title = publisher.package_title()
for date in publisher.package_date():
self.dates.add(date)
#Common metadata gathering
for lang in publisher.package_language():
self.languages.add(lang) # languages
for contributor in publisher.package_contributors(): # contributors
self.contributors.add(contributor)
self.publishers.add(publisher.package_publisher()) # publisher names
desc = publisher.package_description()
if desc is not None:
self.descriptions.add(desc)
for subj in publisher.package_subject():
self.subjects.add(subj) # subjects
#Rights
art_rights = publisher.package_rights()
self.rights.add(art_rights)
if art_rights not in self.rights_associations:
self.rights_associations[art_rights] = [self.article.doi]
else:
self.rights_associations[art_rights].append(self.article.doi) | Handles the acquisition of metadata for both collection mode and single
mode, uses the metadata methods belonging to the article's publisher
attribute. | entailment |
def file_manifest(self, location):
"""
An iterator through the files in a location which yields item elements
suitable for insertion into the package manifest.
"""
#Maps file extensions to mimetypes
mimetypes = {'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.xml': 'application/xhtml+xml',
'.png': 'image/png',
'.css': 'text/css',
'.ncx': 'application/x-dtbncx+xml',
'.gif': 'image/gif',
'.tif': 'image/tif',
'.pdf': 'application/pdf',
'.xhtml': 'application/xhtml+xml',
'.ttf': 'application/vnd.ms-opentype',
'.otf': 'application/vnd.ms-opentype'}
current_dir = os.getcwd()
os.chdir(location)
for dirpath, _dirnames, filenames in os.walk('.'):
dirpath = dirpath[2:] # A means to avoid dirpath prefix of './'
for fn in filenames:
fn_ext = os.path.splitext(fn)[-1]
item = etree.Element('item')
#Here we set three attributes: href, media-type, and id
if not dirpath:
item.attrib['href'] = fn
else:
item.attrib['href'] = '/'.join([dirpath, fn])
item.attrib['media-type'] = mimetypes[fn_ext]
#Special handling for common image types
if fn_ext in ['.jpg', '.png', '.tif', '.jpeg']:
#the following lines assume we are using the convention
#where the article doi is prefixed by 'images-'
item.attrib['id'] = '-'.join([dirpath[7:],
fn.replace('.', '-')])
else:
item.attrib['id'] = fn.replace('.', '-')
yield item
os.chdir(current_dir) | An iterator through the files in a location which yields item elements
suitable for insertion into the package manifest. | entailment |
def get_contrib_names(self, contrib):
"""
Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well.
"""
collab = contrib.find('collab')
anon = contrib.find('anonymous')
if collab is not None:
proper_name = serialize(collab, strip=True)
file_as_name = proper_name
elif anon is not None:
proper_name = 'Anonymous'
file_as_name = proper_name
else:
name = contrib.find('name')
surname = name.find('surname').text
given = name.find('given-names')
if given is not None:
if given.text: # Sometimes these tags are empty
proper_name = ' '.join([surname, given.text])
#File-as name is <surname>, <given-initial-char>
file_as_name = ', '.join([surname, given.text[0]])
else:
proper_name = surname
file_as_name = proper_name
else:
proper_name = surname
file_as_name = proper_name
return proper_name, file_as_name | Returns an appropriate Name and File-As-Name for a contrib element.
This code was refactored out of nav_contributors and
package_contributors to provide a single definition point for a common
job. This is a useful utility that may be well-employed for other
publishers as well. | entailment |
def package_description(self):
"""
Given an Article class instance, this is responsible for returning an
article description. For this method I have taken the approach of
serializing the article's first abstract, if it has one. This results
in 0 or 1 descriptions per article.
"""
abstract = self.article.root.xpath('./front/article-meta/abstract')
return serialize(abstract[0], strip=True) if abstract else None | Given an Article class instance, this is responsible for returning an
article description. For this method I have taken the approach of
serializing the article's first abstract, if it has one. This results
in 0 or 1 descriptions per article. | entailment |
def heading_title(self):
"""
Makes the Article Title for the Heading.
Metadata element, content derived from FrontMatter
"""
art_title = self.article.root.xpath('./front/article-meta/title-group/article-title')[0]
article_title = deepcopy(art_title)
article_title.tag = 'h1'
article_title.attrib['id'] = 'title'
article_title.attrib['class'] = 'article-title'
return article_title | Makes the Article Title for the Heading.
Metadata element, content derived from FrontMatter | entailment |
def make_heading_authors(self, authors):
"""
Constructs the Authors content for the Heading. This should display
directly after the Article Title.
Metadata element, content derived from FrontMatter
"""
author_element = etree.Element('h3', {'class': 'authors'})
#Construct content for the author element
first = True
for author in authors:
if first:
first = False
else:
append_new_text(author_element, ',', join_str='')
collab = author.find('collab')
anon = author.find('anon')
if collab is not None:
append_all_below(author_element, collab)
elif anon is not None: # If anonymous, just add "Anonymous"
append_new_text(author_element, 'Anonymous')
else: # Author is neither Anonymous or a Collaboration
author_name, _ = self.get_contrib_names(author)
append_new_text(author_element, author_name)
#TODO: Handle author footnote references, also put footnotes in the ArticleInfo
#Example: journal.pbio.0040370.xml
first = True
for xref in author.xpath("./xref[@ref-type='corresp' or @ref-type='aff']"):
_sup = xref.find('sup')
sup_text = all_text(_sup) if _sup is not None else ''
auth_sup = etree.SubElement(author_element, 'sup')
sup_link = etree.SubElement(auth_sup,
'a',
{'href': self.main_fragment.format(xref.attrib['rid'])})
sup_link.text = sup_text
if first:
first = False
else:
append_new_text(auth_sup, ', ', join_str='')
#for xref in author.findall('xref'):
#if xref.attrs['ref-type'] in ['corresp', 'aff']:
#try:
#sup_element = xref.sup[0].node
#except IndexError:
#sup_text = ''
#else:
#sup_text = all_text(sup_element)
#new_sup = etree.SubElement(author_element, 'sup')
#sup_link = etree.SubElement(new_sup, 'a')
#sup_link.attrib['href'] = self.main_fragment.format(xref.attrs['rid'])
#sup_link.text = sup_text
#if first:
#first = False
#else:
#new_sup.text = ','
return author_element | Constructs the Authors content for the Heading. This should display
directly after the Article Title.
Metadata element, content derived from FrontMatter | entailment |
def make_heading_affiliations(self, heading_div):
"""
Makes the content for the Author Affiliations, displays after the
Authors segment in the Heading.
Metadata element, content derived from FrontMatter
"""
#Get all of the aff element tuples from the metadata
affs = self.article.root.xpath('./front/article-meta/aff')
#Create a list of all those pertaining to the authors
author_affs = [i for i in affs if 'aff' in i.attrib['id']]
#Count them, used for formatting
if len(author_affs) == 0:
return None
else:
affs_list = etree.SubElement(heading_div,
'ul',
{'id': 'affiliations',
'class': 'simple'})
for aff in author_affs:
#Create a span element to accept extracted content
aff_item = etree.SubElement(affs_list, 'li')
aff_item.attrib['id'] = aff.attrib['id']
#Get the first label node and the first addr-line node
label = aff.find('label')
addr_line = aff.find('addr-line')
if label is not None:
bold = etree.SubElement(aff_item, 'b')
bold.text = all_text(label) + ' '
if addr_line is not None:
append_new_text(aff_item, all_text(addr_line))
else:
append_new_text(aff_item, all_text(aff)) | Makes the content for the Author Affiliations, displays after the
Authors segment in the Heading.
Metadata element, content derived from FrontMatter | entailment |
def make_heading_abstracts(self, heading_div):
"""
An article may contain data for various kinds of abstracts. This method
works on those that are included in the Heading. This is displayed
after the Authors and Affiliations.
Metadata element, content derived from FrontMatter
"""
for abstract in self.article.root.xpath('./front/article-meta/abstract'):
#Make a copy of the abstract
abstract_copy = deepcopy(abstract)
abstract_copy.tag = 'div'
#Abstracts are a rather diverse bunch, keep an eye on them!
title_text = abstract_copy.xpath('./title[1]/text()')
for title in abstract_copy.findall('.//title'):
remove(title)
#Create a header for the abstract
abstract_header = etree.Element('h2')
remove_all_attributes(abstract_copy)
#Set the header text and abstract id according to abstract type
abstract_type = abstract.attrib.get('abstract-type')
log.debug('Handling Abstrace of with abstract-type="{0}"'.format(abstract_type))
if abstract_type == 'summary':
abstract_header.text = 'Author Summary'
abstract_copy.attrib['id'] = 'author-summary'
elif abstract_type == 'editors-summary':
abstract_header.text = 'Editors\' Summary'
abstract_copy.attrib['id'] = 'editor-summary'
elif abstract_type == 'synopsis':
abstract_header.text = 'Synopsis'
abstract_copy.attrib['id'] = 'synopsis'
elif abstract_type == 'alternate':
#Right now, these will only be included if there is a title to
#give it
if title_text:
abstract_header.text= title_text[0]
abstract_copy.attrib['id'] = 'alternate'
else:
continue
elif abstract_type is None:
abstract_header.text = 'Abstract'
abstract_copy.attrib['id'] = 'abstract'
elif abstract_type == 'toc': # We don't include these
continue
else: # Warn about these, then skip
log.warning('No handling for abstract-type="{0}"'.format(abstract_type))
continue
#abstract_header.text = abstract_type
#abstract_copy.attrib['id'] = abstract_type
heading_div.append(abstract_header)
heading_div.append(abstract_copy) | An article may contain data for various kinds of abstracts. This method
works on those that are included in the Heading. This is displayed
after the Authors and Affiliations.
Metadata element, content derived from FrontMatter | entailment |
def make_article_info(self):
"""
The Article Info contains the (self) Citation, Editors, Dates,
Copyright, Funding Statement, Competing Interests Statement,
Correspondence, and Footnotes. Maybe more...
This content follows the Heading and precedes the Main segment in the
output.
This function accepts the receiving_node argument, which will receive
all generated output as new childNodes.
"""
body = self.main.getroot().find('body')
#Create a div for ArticleInfo, exposing it to linking and formatting
article_info_div = etree.Element('div', {'id': 'ArticleInfo'})
body.insert(1, article_info_div)
#Creation of the self Citation
article_info_div.append(self.make_article_info_citation())
#Creation of the Editors
editors = self.article.root.xpath("./front/article-meta/contrib-group/contrib[@contrib-type='editor']")
self.make_article_info_editors(editors, article_info_div)
#Creation of the important Dates segment
article_info_div.append(self.make_article_info_dates())
#Creation of the Copyright statement
self.make_article_info_copyright(article_info_div)
#Creation of the Funding statement
self.make_article_info_funding(article_info_div)
#Creation of the Competing Interests statement
self.make_article_info_competing_interests(article_info_div)
#Creation of the Correspondences (contact information) for the article
self.make_article_info_correspondences(article_info_div)
#Creation of the Footnotes (other) for the ArticleInfo
self.make_article_info_footnotes_other(article_info_div) | The Article Info contains the (self) Citation, Editors, Dates,
Copyright, Funding Statement, Competing Interests Statement,
Correspondence, and Footnotes. Maybe more...
This content follows the Heading and precedes the Main segment in the
output.
This function accepts the receiving_node argument, which will receive
all generated output as new childNodes. | entailment |
def make_article_info_citation(self):
"""
Creates a self citation node for the ArticleInfo of the article.
This method uses code from this page as a reference implementation:
https://github.com/PLOS/ambra/blob/master/base/src/main/resources/articleTransform-v3.xsl
"""
citation_div = etree.Element('div', {'id': 'article-citation'})
b = etree.SubElement(citation_div, 'b')
b.text = 'Citation: '
#Add author stuff to the citation
authors = self.article.root.xpath("./front/article-meta/contrib-group/contrib[@contrib-type='author']")
for author in authors:
author_index = authors.index(author)
#At the 6th author, simply append an et al., then stop iterating
if author_index == 5:
append_new_text(citation_div, 'et al.', join_str='')
break
else:
#Check if the author contrib has a collab
collab = author.find('collab')
if collab is not None:
collab_copy = deepcopy(collab)
for contrib_group in collab_copy.findall('contrib_group'):
remove(contrib_group)
append_all_below(citation_div, collab, join_str='')
else: # Author element is not a collab
name = author.find('name')
#Note that this does not support eastern names
#Grab the surname information
surname = name.find('surname')
given_names = name.find('given-names')
suffix = name.find('suffix')
append_new_text(citation_div, surname.text, join_str='')
#Make initials from the given-name information
if given_names is not None:
#Add a space
append_new_text(citation_div, ' ', join_str='')
#Split by whitespace and take first character
given_initials = [i[0] for i in given_names.text.split() if i]
for initial in given_initials:
append_new_text(citation_div, initial, join_str='')
#If there is a suffix, add its text, but don't include the
#trailing period if there is one
if suffix is not None:
#Add a space
append_new_text(citation_div, ' ', join_str='')
suffix_text = suffix.text
#Check for the trailing period
if suffix_text[-1] == '.':
suffix_text = suffix_text[:-1]
append_new_text(citation_div, suffix_text, join_str='')
#If this is not the last author to be added, add a ", "
#This is satisfied by being less than the 6th author, or less
#than the length of the author_list - 1
if author_index < 5 or author_index < len(author_list) -1:
append_new_text(citation_div, ', ', join_str='')
#Add Publication Year to the citation
#Find pub-date elements, use pub-type=collection, or else pub-type=ppub
d = './front/article-meta/pub-date'
coll = self.article.root.xpath(d + "[@pub-type='collection']")
ppub = self.article.root.xpath(d + "[@pub-type='ppub']")
if coll:
pub_year = coll[0].find('year').text
elif ppub:
pub_year = ppub[0].find('year').text
append_new_text(citation_div, ' ({0}) '.format(pub_year), join_str='')
#Add the Article Title to the Citation
#As best as I can tell from the reference implementation, they
#serialize the article title to text-only, and expunge redundant spaces
#This might need later review
article_title = self.article.root.xpath('./front/article-meta/title-group/article-title')[0]
article_title_text = serialize(article_title)
normalized = ' '.join(article_title_text.split()) # Remove redundant whitespace
#Add a period unless there is some other valid punctuation
if normalized[-1] not in '.?!':
normalized += '.'
append_new_text(citation_div, normalized + ' ', join_str='')
#Add the article's journal name using the journal-id of type "nlm-ta"
journal = self.article.root.xpath("./front/journal-meta/journal-id[@journal-id-type='nlm-ta']")
append_new_text(citation_div, journal[0].text + ' ', join_str='')
#Add the article's volume, issue, and elocation_id values
volume = self.article.root.xpath('./front/article-meta/volume')[0].text
issue = self.article.root.xpath('./front/article-meta/issue')[0].text
elocation_id = self.article.root.xpath('./front/article-meta/elocation-id')[0].text
form = '{0}({1}): {2}. '.format(volume, issue, elocation_id)
append_new_text(citation_div, form, join_str='')
append_new_text(citation_div, 'doi:{0}'.format(self.article.doi), join_str='')
return citation_div | Creates a self citation node for the ArticleInfo of the article.
This method uses code from this page as a reference implementation:
https://github.com/PLOS/ambra/blob/master/base/src/main/resources/articleTransform-v3.xsl | entailment |
def make_article_info_dates(self):
"""
Makes the section containing important dates for the article: typically
Received, Accepted, and Published.
"""
dates_div = etree.Element('div', {'id': 'article-dates'})
d = './front/article-meta/history/date'
received = self.article.root.xpath(d + "[@date-type='received']")
accepted = self.article.root.xpath(d + "[@date-type='accepted']")
if received:
b = etree.SubElement(dates_div, 'b')
b.text = 'Received: '
dt = self.date_tuple_from_date(received[0], 'Received')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
if accepted:
b = etree.SubElement(dates_div, 'b')
b.text = 'Accepted: '
dt = self.date_tuple_from_date(accepted[0], 'Accepted')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
#Published date is required
pub_date = self.article.root.xpath("./front/article-meta/pub-date[@pub-type='epub']")[0]
b = etree.SubElement(dates_div, 'b')
b.text = 'Published: '
dt = self.date_tuple_from_date(pub_date, 'Published')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string)
return dates_div | Makes the section containing important dates for the article: typically
Received, Accepted, and Published. | entailment |
def make_article_info_copyright(self, article_info_div):
"""
Makes the copyright section for the ArticleInfo. For PLoS, this means
handling the information contained in the metadata <permissions>
element.
"""
perm = self.article.root.xpath('./front/article-meta/permissions')
if not perm:
return
copyright_div = etree.SubElement(article_info_div, 'div', {'id': 'copyright'})
cp_bold = etree.SubElement(copyright_div, 'b')
cp_bold.text = 'Copyright: '
copyright_string = '\u00A9 '
copyright_holder = perm[0].find('copyright-holder')
if copyright_holder is not None:
copyright_string += all_text(copyright_holder) + '. '
lic = perm[0].find('license')
if lic is not None:
copyright_string += all_text(lic.find('license-p'))
append_new_text(copyright_div, copyright_string) | Makes the copyright section for the ArticleInfo. For PLoS, this means
handling the information contained in the metadata <permissions>
element. | entailment |
def make_article_info_funding(self, article_info_div):
"""
Creates the element for declaring Funding in the article info.
"""
funding_group = self.article.root.xpath('./front/article-meta/funding-group')
if funding_group:
funding_div = etree.SubElement(article_info_div,
'div',
{'id': 'funding'})
funding_b = etree.SubElement(funding_div, 'b')
funding_b.text = 'Funding: '
#As far as I can tell, PLoS only uses one funding-statement
funding_statement = funding_group[0].find('funding-statement')
append_all_below(funding_div, funding_statement) | Creates the element for declaring Funding in the article info. | entailment |
def make_article_info_competing_interests(self, article_info_div):
"""
Creates the element for declaring competing interests in the article
info.
"""
#Check for author-notes
con_expr = "./front/article-meta/author-notes/fn[@fn-type='conflict']"
conflict = self.article.root.xpath(con_expr)
if not conflict:
return
conflict_div = etree.SubElement(article_info_div,
'div',
{'id': 'conflict'})
b = etree.SubElement(conflict_div, 'b')
b.text = 'Competing Interests: '
fn_p = conflict[0].find('p')
if fn_p is not None:
append_all_below(conflict_div, fn_p) | Creates the element for declaring competing interests in the article
info. | entailment |
def make_article_info_correspondences(self, article_info_div):
"""
Articles generally provide a first contact, typically an email address
for one of the authors. This will supply that content.
"""
corresps = self.article.root.xpath('./front/article-meta/author-notes/corresp')
if corresps:
corresp_div = etree.SubElement(article_info_div,
'div',
{'id': 'correspondence'})
for corresp in corresps:
sub_div = etree.SubElement(corresp_div,
'div',
{'id': corresp.attrib['id']})
append_all_below(sub_div, corresp) | Articles generally provide a first contact, typically an email address
for one of the authors. This will supply that content. | entailment |
def make_article_info_footnotes_other(self, article_info_div):
"""
This will catch all of the footnotes of type 'other' in the <fn-group>
of the <back> element.
"""
other_fn_expr = "./back/fn-group/fn[@fn-type='other']"
other_fns = self.article.root.xpath(other_fn_expr)
if other_fns:
other_fn_div = etree.SubElement(article_info_div,
'div',
{'class': 'back-fn-other'})
for other_fn in other_fns:
append_all_below(other_fn_div, other_fn) | This will catch all of the footnotes of type 'other' in the <fn-group>
of the <back> element. | entailment |
def make_back_matter(self):
"""
The <back> element may have 0 or 1 <label> elements and 0 or 1 <title>
elements. Then it may have any combination of the following: <ack>,
<app-group>, <bio>, <fn-group>, <glossary>, <ref-list>, <notes>, and
<sec>. <sec> is employed here as a catch-all for material that does not
fall under the other categories.
The Back should generally be thought of as a non-linear element, though
some of its content will be parsed to the linear flow of the document.
This can be thought of as critically important meta-information that
should accompany the main text (e.g. Acknowledgments and Contributions)
Because the content of <back> contains a set of tags that intersects
with that of the Body, this method should always be called before the
general post-processing steps; keep in mind that this is also the
opportunity to permit special handling of content in the Back
"""
#Back is technically metadata content that needs to be interpreted to
#presentable content
body = self.main.getroot().find('body')
if self.article.root.find('back') is None:
return
#The following things are ordered in such a way to adhere to what
#appears to be a consistent presentation order for PLoS
#Acknowledgments
back_ack = self.make_back_acknowledgments()
if back_ack is not None:
body.append(back_ack)
#Author Contributions
self.make_back_author_contributions(body)
#Glossaries
self.make_back_glossary(body)
#Notes
self.make_back_notes(body) | The <back> element may have 0 or 1 <label> elements and 0 or 1 <title>
elements. Then it may have any combination of the following: <ack>,
<app-group>, <bio>, <fn-group>, <glossary>, <ref-list>, <notes>, and
<sec>. <sec> is employed here as a catch-all for material that does not
fall under the other categories.
The Back should generally be thought of as a non-linear element, though
some of its content will be parsed to the linear flow of the document.
This can be thought of as critically important meta-information that
should accompany the main text (e.g. Acknowledgments and Contributions)
Because the content of <back> contains a set of tags that intersects
with that of the Body, this method should always be called before the
general post-processing steps; keep in mind that this is also the
opportunity to permit special handling of content in the Back | entailment |
def move_back_boxed_texts(self):
"""
The only intended use for this function is to patch a problem seen in
at least one PLoS article (journal.pgen.0020002). This will move any
<boxed-text> elements over to the receiving element, which is probably
the main body.
"""
body = self.main.getroot().find('body')
back = self.article.root.find('back')
if back is None:
return
boxed_texts = back.xpath('.//boxed-text')
for boxed_text in boxed_texts:
body.append(deepcopy(boxed_text)) | The only intended use for this function is to patch a problem seen in
at least one PLoS article (journal.pgen.0020002). This will move any
<boxed-text> elements over to the receiving element, which is probably
the main body. | entailment |
def make_back_acknowledgments(self):
"""
The <ack> is an important piece of back matter information, and will be
including immediately after the main text.
This element should only occur once, optionally, for PLoS, if a need
becomes known, then multiple instances may be supported.
"""
acks = self.article.root.xpath('./back/ack')
if not acks:
return
ack = deepcopy(acks[0])
#Modify the tag to div
ack.tag = 'div'
#Give it an id
ack.attrib['id'] = 'acknowledgments'
#Give it a title element--this is not an EPUB element but doing so will
#allow it to later be depth-formatted by self.convert_div_titles()
ack_title = etree.Element('title')
ack_title.text = 'Acknowledgments'
ack.insert(0, ack_title) # Make it the first element
return ack | The <ack> is an important piece of back matter information, and will be
including immediately after the main text.
This element should only occur once, optionally, for PLoS, if a need
becomes known, then multiple instances may be supported. | entailment |
def make_back_author_contributions(self, body):
"""
Though this goes in the back of the document with the rest of the back
matter, it is not an element found under <back>.
I don't expect to see more than one of these. Compare this method to
make_article_info_competing_interests()
"""
cont_expr = "./front/article-meta/author-notes/fn[@fn-type='con']"
contribution = self.article.root.xpath(cont_expr)
if contribution:
author_contrib = deepcopy(contribution[0])
remove_all_attributes(author_contrib)
author_contrib.tag = 'div'
author_contrib.attrib['id'] = 'author-contributions'
#This title element will be parsed later
title = etree.Element('title')
title.text = 'Author Contributions'
author_contrib.insert(0, title)
body.append(author_contrib) | Though this goes in the back of the document with the rest of the back
matter, it is not an element found under <back>.
I don't expect to see more than one of these. Compare this method to
make_article_info_competing_interests() | entailment |
def make_back_glossary(self, body):
"""
Glossaries are a fairly common item in papers for PLoS, but it also
seems that they are rarely incorporated into the PLoS web-site or PDF
formats. They are included in the ePub output however because they are
helpful and because we can.
"""
for glossary in self.article.root.xpath('./back/glossary'):
gloss_copy = deepcopy(glossary)
gloss_copy.tag = 'div'
gloss_copy.attrib['class'] = 'back-glossary'
body.append(gloss_copy) | Glossaries are a fairly common item in papers for PLoS, but it also
seems that they are rarely incorporated into the PLoS web-site or PDF
formats. They are included in the ePub output however because they are
helpful and because we can. | entailment |
def make_back_notes(self, body):
"""
The notes element in PLoS articles can be employed for posting notices
of corrections or adjustments in proof. The <notes> element has a very
diverse content model, but PLoS practice appears to be fairly
consistent: a single <sec> containing a <title> and a <p>
"""
for notes in self.article.root.xpath('./back/notes'):
notes_sec = deepcopy(notes.find('sec'))
notes_sec.tag = 'div'
notes_sec.attrib['class'] = 'back-notes'
body.append(notes_sec) | The notes element in PLoS articles can be employed for posting notices
of corrections or adjustments in proof. The <notes> element has a very
diverse content model, but PLoS practice appears to be fairly
consistent: a single <sec> containing a <title> and a <p> | entailment |
def convert_disp_formula_elements(self):
"""
<disp-formula> elements must be converted to conforming elements
"""
for disp in self.main.getroot().findall('.//disp-formula'):
#find label element
label_el = disp.find('label')
graphic_el = disp.find('graphic')
if graphic_el is None: # No graphic, assume math as text instead
text_span = etree.Element('span', {'class': 'disp-formula'})
if 'id' in disp.attrib:
text_span.attrib['id'] = disp.attrib['id']
append_all_below(text_span, disp)
#Insert the text span before the disp-formula
insert_before(disp, text_span)
#If a label exists, modify and insert before text_span
if label_el is not None:
label_el.tag = 'b'
insert_before(text_span, label_el)
#Remove the disp-formula
remove(disp)
#Skip the rest, which deals with the graphic element
continue
#The graphic element is present
#Create a file reference for the image
xlink_href = ns_format(graphic_el, 'xlink:href')
graphic_xlink_href = graphic_el.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the img element
img_element = etree.Element('img', {'alt': 'A Display Formula',
'class': 'disp-formula',
'src': img_path})
#Transfer the id attribute
if 'id' in disp.attrib:
img_element.attrib['id'] = disp.attrib['id']
#Insert the img element
insert_before(disp, img_element)
#Create content for the label
if label_el is not None:
label_el.tag = 'b'
insert_before(img_element, label_el)
#Remove the old disp-formula element
remove(disp) | <disp-formula> elements must be converted to conforming elements | entailment |
def convert_inline_formula_elements(self):
"""
<inline-formula> elements must be converted to be conforming
These elements may contain <inline-graphic> elements, textual content,
or both.
"""
for inline in self.main.getroot().findall('.//inline-formula'):
#inline-formula elements will be modified in situ
remove_all_attributes(inline)
inline.tag = 'span'
inline.attrib['class'] = 'inline-formula'
inline_graphic = inline.find('inline-graphic')
if inline_graphic is None:
# Do nothing more if there is no graphic
continue
#Need to conver the inline-graphic element to an img element
inline_graphic.tag = 'img'
#Get a copy of the attributes, then remove them
inline_graphic_attributes = copy(inline_graphic.attrib)
remove_all_attributes(inline_graphic)
#Create a file reference for the image
xlink_href = ns_format(inline_graphic, 'xlink:href')
graphic_xlink_href = inline_graphic_attributes[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Set the source to the image path
inline_graphic.attrib['src'] = img_path
inline_graphic.attrib['class'] = 'inline-formula'
inline_graphic.attrib['alt'] = 'An Inline Formula' | <inline-formula> elements must be converted to be conforming
These elements may contain <inline-graphic> elements, textual content,
or both. | entailment |
def convert_disp_quote_elements(self):
"""
Extract or extended quoted passage from another work, usually made
typographically distinct from surrounding text
<disp-quote> elements have a relatively complex content model, but PLoS
appears to employ either <p>s or <list>s.
"""
for disp_quote in self.main.getroot().findall('.//disp-quote'):
if disp_quote.getparent().tag == 'p':
elevate_element(disp_quote)
disp_quote.tag = 'div'
disp_quote.attrib['class'] = 'disp-quote' | Extract or extended quoted passage from another work, usually made
typographically distinct from surrounding text
<disp-quote> elements have a relatively complex content model, but PLoS
appears to employ either <p>s or <list>s. | entailment |
def convert_boxed_text_elements(self):
"""
Textual material that is part of the body of text but outside the
flow of the narrative text, for example, a sidebar, marginalia, text
insert (whether enclosed in a box or not), caution, tip, note box, etc.
<boxed-text> elements for PLoS appear to all contain a single <sec>
element which frequently contains a <title> and various other content.
This method will elevate the <sec> element, adding class information as
well as processing the title.
"""
for boxed_text in self.main.getroot().findall('.//boxed-text'):
sec_el = boxed_text.find('sec')
if sec_el is not None:
sec_el.tag = 'div'
title = sec_el.find('title')
if title is not None:
title.tag = 'b'
sec_el.attrib['class'] = 'boxed-text'
if 'id' in boxed_text.attrib:
sec_el.attrib['id'] = boxed_text.attrib['id']
replace(boxed_text, sec_el)
else:
div_el = etree.Element('div', {'class': 'boxed-text'})
if 'id' in boxed_text.attrib:
div_el.attrib['id'] = boxed_text.attrib['id']
append_all_below(div_el, boxed_text)
replace(boxed_text, div_el) | Textual material that is part of the body of text but outside the
flow of the narrative text, for example, a sidebar, marginalia, text
insert (whether enclosed in a box or not), caution, tip, note box, etc.
<boxed-text> elements for PLoS appear to all contain a single <sec>
element which frequently contains a <title> and various other content.
This method will elevate the <sec> element, adding class information as
well as processing the title. | entailment |
def convert_supplementary_material_elements(self):
"""
Supplementary material are not, nor are they generally expected to be,
packaged into the epub file. Though this is a technical possibility,
and certain epub reading systems (such as those run on a PC) might be
reasonably capable of the external handling of diverse file formats
I presume that supplementary material will remain separate from the
document. So special cases aside, external links to supplementary
material will be employed; this will require internet connection for
access.
As for content in <supplementary-material>, they appear to strictly
contain 1 <label> element, followed by a <caption><title><p></caption>
substructure.
"""
for supplementary in self.main.getroot().findall('.//supplementary-material'):
#Create a div element to hold the supplementary content
suppl_div = etree.Element('div')
if 'id' in supplementary.attrib:
suppl_div.attrib['id'] = supplementary.attrib['id']
insert_before(supplementary, suppl_div)
#Get the sub elements
label = supplementary.find('label')
caption = supplementary.find('caption')
#Get the external resource URL for the supplementary information
ns_xlink_href = ns_format(supplementary, 'xlink:href')
xlink_href = supplementary.attrib[ns_xlink_href]
resource_url = self.fetch_single_representation(xlink_href)
if label is not None:
label.tag = 'a'
label.attrib['href'] = resource_url
append_new_text(label, '. ', join_str='')
suppl_div.append(label)
if caption is not None:
title = caption.find('title')
paragraphs = caption.findall('p')
if title is not None:
title.tag = 'b'
suppl_div.append(title)
for paragraph in paragraphs:
suppl_div.append(paragraph)
#This is a fix for odd articles with <p>s outside of <caption>
#See journal.pctr.0020006, PLoS themselves fail to format this for
#the website, though the .pdf is good
#It should be noted that journal.pctr.0020006 does not pass
#validation because it places a <p> before a <caption>
#By placing this at the end of the method, it conforms to the spec
#by expecting such p tags after caption. This causes a hiccup in
#the rendering for journal.pctr.0020006, but it's better than
#skipping the data entirely AND it should also work for conforming
#articles.
for paragraph in supplementary.findall('p'):
suppl_div.append(paragraph)
remove(supplementary) | Supplementary material are not, nor are they generally expected to be,
packaged into the epub file. Though this is a technical possibility,
and certain epub reading systems (such as those run on a PC) might be
reasonably capable of the external handling of diverse file formats
I presume that supplementary material will remain separate from the
document. So special cases aside, external links to supplementary
material will be employed; this will require internet connection for
access.
As for content in <supplementary-material>, they appear to strictly
contain 1 <label> element, followed by a <caption><title><p></caption>
substructure. | entailment |
def fetch_single_representation(self, item_xlink_href):
"""
This function will render a formatted URL for accessing the PLoS' server
SingleRepresentation of an object.
"""
#A dict of URLs for PLoS subjournals
journal_urls = {'pgen': 'http://www.plosgenetics.org/article/{0}',
'pcbi': 'http://www.ploscompbiol.org/article/{0}',
'ppat': 'http://www.plospathogens.org/article/{0}',
'pntd': 'http://www.plosntds.org/article/{0}',
'pmed': 'http://www.plosmedicine.org/article/{0}',
'pbio': 'http://www.plosbiology.org/article/{0}',
'pone': 'http://www.plosone.org/article/{0}',
'pctr': 'http://clinicaltrials.ploshubs.org/article/{0}'}
#Identify subjournal name for base URl
subjournal_name = self.article.doi.split('.')[2]
base_url = journal_urls[subjournal_name]
#Compose the address for fetchSingleRepresentation
resource = 'fetchSingleRepresentation.action?uri=' + item_xlink_href
return base_url.format(resource) | This function will render a formatted URL for accessing the PLoS' server
SingleRepresentation of an object. | entailment |
def convert_fig_elements(self):
"""
Responsible for the correct conversion of JPTS 3.0 <fig> elements to
EPUB xhtml. Aside from translating <fig> to <img>, the content model
must be edited.
"""
for fig in self.main.getroot().findall('.//fig'):
if fig.getparent().tag == 'p':
elevate_element(fig)
for fig in self.main.getroot().findall('.//fig'):
#self.convert_fn_elements(fig)
#self.convert_disp_formula_elements(fig)
#Find label and caption
label_el = fig.find('label')
caption_el = fig.find('caption')
#Get the graphic node, this should be mandatory later on
graphic_el = fig.find('graphic')
#Create a file reference for the image
xlink_href = ns_format(graphic_el, 'xlink:href')
graphic_xlink_href = graphic_el.attrib[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Create the content: using image path, label, and caption
img_el = etree.Element('img', {'alt': 'A Figure', 'src': img_path,
'class': 'figure'})
if 'id' in fig.attrib:
img_el.attrib['id'] = fig.attrib['id']
insert_before(fig, img_el)
#Create content for the label and caption
if caption_el is not None or label_el is not None:
img_caption_div = etree.Element('div', {'class': 'figure-caption'})
img_caption_div_b = etree.SubElement(img_caption_div, 'b')
if label_el is not None:
append_all_below(img_caption_div_b, label_el)
append_new_text(img_caption_div_b, '. ', join_str='')
if caption_el is not None:
caption_title = caption_el.find('title')
if caption_title is not None:
append_all_below(img_caption_div_b, caption_title)
append_new_text(img_caption_div_b, ' ', join_str='')
for each_p in caption_el.findall('p'):
append_all_below(img_caption_div, each_p)
insert_before(fig, img_caption_div)
#Remove the original <fig>
remove(fig) | Responsible for the correct conversion of JPTS 3.0 <fig> elements to
EPUB xhtml. Aside from translating <fig> to <img>, the content model
must be edited. | entailment |
def convert_verse_group_elements(self):
"""
A song, poem, or verse
Implementorβs Note: No attempt has been made to retain the look or
visual form of the original poetry.
This unusual element, <verse-group> is used to convey poetry and is
recursive in nature (it may contain further <verse-group> elements).
Examples of these tags are sparse, so it remains difficult to ensure
full implementation. This method will attempt to handle the label,
title, and subtitle elements correctly, while converting <verse-lines>
to italicized lines.
"""
for verse_group in self.main.getroot().findall('.//verse-group'):
#Find some possible sub elements for the heading
label = verse_group.find('label')
title = verse_group.find('title')
subtitle = verse_group.find('subtitle')
#Modify the verse-group element
verse_group.tag = 'div'
verse_group.attrib['id'] = 'verse-group'
#Create a title for the verse_group
if label is not None or title is not None or subtitle is not None:
new_verse_title = etree.Element('b')
#Insert it at the beginning
verse_group.insert(0, new_verse_title)
#Induct the title elements into the new title
if label is not None:
append_all_below(new_verse_title, label)
remove(label)
if title is not None:
append_all_below(new_verse_title, title)
remove(title)
if subtitle is not None:
append_all_below(new_verse_title, subtitle)
remove(subtitle)
for verse_line in verse_group.findall('verse-line'):
verse_line.tag = 'p'
verse_line.attrib['class'] = 'verse-line' | A song, poem, or verse
Implementorβs Note: No attempt has been made to retain the look or
visual form of the original poetry.
This unusual element, <verse-group> is used to convey poetry and is
recursive in nature (it may contain further <verse-group> elements).
Examples of these tags are sparse, so it remains difficult to ensure
full implementation. This method will attempt to handle the label,
title, and subtitle elements correctly, while converting <verse-lines>
to italicized lines. | entailment |
def convert_fn_elements(self):
"""
<fn> elements may be used in the main text body outside of tables and
figures for purposes such as erratum notes. It appears that PLoS
practice is to not show erratum notes in the web or pdf formats after
the appropriate corrections have been made to the text. The erratum
notes are thus the only record that an error was made.
This method will attempt to display footnotes unless the note can be
identified as an Erratum, in which case it will be removed in
accordance with PLoS' apparent guidelines.
"""
for footnote in self.main.getroot().findall('.//fn'):
#Use only the first paragraph
paragraph = footnote.find('p')
#If no paragraph, move on
if paragraph is None:
remove(footnote)
continue
#Simply remove corrected errata items
paragraph_text = str(etree.tostring(paragraph, method='text', encoding='utf-8'), encoding='utf-8')
if paragraph_text.startswith('Erratum') and 'Corrected' in paragraph_text:
remove(footnote)
continue
#Transfer some attribute information from the fn element to the paragraph
if 'id' in footnote.attrib:
paragraph.attrib['id'] = footnote.attrib['id']
if 'fn-type' in footnote.attrib:
paragraph.attrib['class'] = 'fn-type-{0}'.footnote.attrib['fn-type']
else:
paragraph.attrib['class'] = 'fn'
#Replace the
replace(footnote, paragraph) | <fn> elements may be used in the main text body outside of tables and
figures for purposes such as erratum notes. It appears that PLoS
practice is to not show erratum notes in the web or pdf formats after
the appropriate corrections have been made to the text. The erratum
notes are thus the only record that an error was made.
This method will attempt to display footnotes unless the note can be
identified as an Erratum, in which case it will be removed in
accordance with PLoS' apparent guidelines. | entailment |
def convert_list_elements(self):
"""
A sequence of two or more items, which may or may not be ordered.
The <list> element has an optional <label> element and optional <title>
element, followed by one or more <list-item> elements. This is element
is recursive as the <list-item> elements may contain further <list> or
<def-list> elements. Much of the potential complexity in dealing with
lists comes from this recursion.
"""
#I have yet to gather many examples of this element, and may have to
#write a recursive method for the processing of lists depending on how
#PLoS produces their XML, for now this method is ignorant of nesting
#TODO: prefix-words, one possible solution would be to have this method
#edit the CSS to provide formatting support for arbitrary prefixes...
#This is a block level element, so elevate it if found in p
for list_el in self.main.getroot().findall('.//list'):
if list_el.getparent().tag == 'p':
elevate_element(list_el)
#list_el is used instead of list (list is reserved)
for list_el in self.main.getroot().findall('.//list'):
if 'list-type' not in list_el.attrib:
list_el_type = 'order'
else:
list_el_type = list_el.attrib['list-type']
#Unordered lists
if list_el_type in ['', 'bullet', 'simple']:
list_el.tag = 'ul'
#CSS must be used to recognize the class and suppress bullets
if list_el_type == 'simple':
list_el.attrib['class'] = 'simple'
#Ordered lists
else:
list_el.tag = 'ol'
list_el.attrib['class'] = list_el_type
#Convert the list-item element tags to 'li'
for list_item in list_el.findall('list-item'):
list_item.tag = 'li'
remove_all_attributes(list_el, exclude=['id', 'class']) | A sequence of two or more items, which may or may not be ordered.
The <list> element has an optional <label> element and optional <title>
element, followed by one or more <list-item> elements. This is element
is recursive as the <list-item> elements may contain further <list> or
<def-list> elements. Much of the potential complexity in dealing with
lists comes from this recursion. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.