sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def convert_def_list_elements(self): """ A list in which each item consists of two parts: a word, phrase, term, graphic, chemical structure, or equation paired with one of more descriptions, discussions, explanations, or definitions of it. <def-list> elements are lists of <def-item> elements which are in turn composed of a pair of term (<term>) and definition (<def>). This method will convert the <def-list> to a classed <div> with a styled format for the terms and definitions. """ for def_list in self.main.getroot().findall('.//def-list'): #Remove the attributes, excepting id remove_all_attributes(def_list, exclude=['id']) #Modify the def-list element def_list.tag = 'div' def_list.attrib['class'] = 'def-list' for def_item in def_list.findall('def-item'): #Get the term being defined, modify it term = def_item.find('term') term.tag = 'p' term.attrib['class']= 'def-item-term' #Insert it before its parent def_item insert_before(def_item, term) #Get the definition, handle missing with a warning definition = def_item.find('def') if definition is None: log.warning('Missing def element in def-item') remove(def_item) continue #PLoS appears to consistently place all definition text in a #paragraph subelement of the def element def_para = definition.find('p') def_para.attrib['class'] = 'def-item-def' #Replace the def-item element with the p element replace(def_item, def_para)
A list in which each item consists of two parts: a word, phrase, term, graphic, chemical structure, or equation paired with one of more descriptions, discussions, explanations, or definitions of it. <def-list> elements are lists of <def-item> elements which are in turn composed of a pair of term (<term>) and definition (<def>). This method will convert the <def-list> to a classed <div> with a styled format for the terms and definitions.
entailment
def convert_ref_list_elements(self): """ List of references (citations) for an article, which is often called β€œReferences”, β€œBibliography”, or β€œAdditional Reading”. No distinction is made between lists of cited references and lists of suggested references. This method should not be confused with the method(s) employed for the formatting of a proper bibliography, though they are related. Similarly, this is an area of major openness in development, I lack access to PLOS' algorithm for proper citation formatting. """ #TODO: Handle nested ref-lists for ref_list in self.main.getroot().findall('.//ref-list'): remove_all_attributes(ref_list) ref_list.tag = 'div' ref_list.attrib['class'] = 'ref-list' label = ref_list.find('label') if label is not None: label.tag = 'h3' for ref in ref_list.findall('ref'): ref_p = etree.Element('p') ref_p.text = str(etree.tostring(ref, method='text', encoding='utf-8'), encoding='utf-8') replace(ref, ref_p)
List of references (citations) for an article, which is often called β€œReferences”, β€œBibliography”, or β€œAdditional Reading”. No distinction is made between lists of cited references and lists of suggested references. This method should not be confused with the method(s) employed for the formatting of a proper bibliography, though they are related. Similarly, this is an area of major openness in development, I lack access to PLOS' algorithm for proper citation formatting.
entailment
def convert_table_wrap_elements(self): """ Responsible for the correct conversion of JPTS 3.0 <table-wrap> elements to EPUB content. The 'id' attribute is treated as mandatory by this method. """ for table_wrap in self.main.getroot().findall('.//table-wrap'): table_div = etree.Element('div', {'id': table_wrap.attrib['id']}) label = table_wrap.find('label') caption = table_wrap.find('caption') alternatives = table_wrap.find('alternatives') graphic = table_wrap.find('graphic') table = table_wrap.find('table') if graphic is None: if alternatives is not None: graphic = alternatives.find('graphic') if table is None: if alternatives is not None: table = alternatives.find('table') #Handling the label and caption if label is not None and caption is not None: caption_div = etree.Element('div', {'class': 'table-caption'}) caption_div_b = etree.SubElement(caption_div, 'b') if label is not None: append_all_below(caption_div_b, label) if caption is not None: #Find, optional, title element and paragraph elements caption_title = caption.find('title') if caption_title is not None: append_all_below(caption_div_b, caption_title) caption_ps = caption.findall('p') #For title and each paragraph, give children to the div for caption_p in caption_ps: append_all_below(caption_div, caption_p) #Add this to the table div table_div.append(caption_div) ### Practical Description ### #A table may have both, one of, or neither of graphic and table #The different combinations should be handled, but a table-wrap #with neither should fail with an error # #If there is both an image and a table, the image should be placed #in the text flow with a link to the html table # #If there is an image and no table, the image should be placed in #the text flow without a link to an html table # #If there is a table with no image, then the table should be placed #in the text flow. if graphic is not None: #Create the image path for the graphic xlink_href = ns_format(graphic, 'xlink:href') graphic_xlink_href = graphic.attrib[xlink_href] file_name = graphic_xlink_href.split('.')[-1] + '.png' img_dir = 'images-' + self.doi_suffix() img_path = '/'.join([img_dir, file_name]) #Create the new img element img_element = etree.Element('img', {'alt': 'A Table', 'src': img_path, 'class': 'table'}) #Add this to the table div table_div.append(img_element) #If table, add it to the list, and link to it if table is not None: # Both graphic and table #The label attribute is just a means of transmitting some #plaintext which will be used for the labeling in the html #tables file div = etree.SubElement(self.tables.find('body'), 'div', {'id': table_wrap.attrib['id']}) if label is not None: bold_label = etree.SubElement(div, 'b') append_all_below(bold_label, label) #Add the table to the tables list div.append(deepcopy(table)) #Also add the table's foot if it exists table_wrap_foot = table_wrap.find('table-wrap-foot') if table_wrap_foot is not None: table_wrap_foot.tag = 'div' table_wrap_foot.attrib['class'] = 'table-wrap-foot' div.append(table_wrap_foot) #Create a link to the html version of the table html_table_link = etree.Element('a') html_table_link.attrib['href'] = self.tables_fragment.format(table_wrap.attrib['id']) html_table_link.text = 'Go to HTML version of this table' #Add this to the table div table_div.append(html_table_link) remove(table) elif table is not None: # Table only #Simply append the table to the table div table_div.append(table) elif graphic is None and table is None: sys.exit('Encountered table-wrap element with neither graphic nor table. Exiting.') #Replace the original table-wrap with the newly constructed div replace(table_wrap, table_div)
Responsible for the correct conversion of JPTS 3.0 <table-wrap> elements to EPUB content. The 'id' attribute is treated as mandatory by this method.
entailment
def convert_graphic_elements(self): """ This is a method for the odd special cases where <graphic> elements are standalone, or rather, not a part of a standard graphical element such as a figure or a table. This method should always be employed after the standard cases have already been handled. """ for graphic in self.main.getroot().findall('.//graphic'): graphic.tag = 'img' graphic.attrib['alt'] = 'unowned-graphic' ns_xlink_href = ns_format(graphic, 'xlink:href') if ns_xlink_href in graphic.attrib: xlink_href = graphic.attrib[ns_xlink_href] file_name = xlink_href.split('.')[-1] + '.png' img_dir = 'images-' + self.doi_suffix() img_path = '/'.join([img_dir, file_name]) graphic.attrib['src'] = img_path remove_all_attributes(graphic, exclude=['id', 'class', 'alt', 'src'])
This is a method for the odd special cases where <graphic> elements are standalone, or rather, not a part of a standard graphical element such as a figure or a table. This method should always be employed after the standard cases have already been handled.
entailment
def getChargeInfo(self, CorpNum, MsgType, UserID=None): """ κ³ΌκΈˆμ •λ³΄ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ MsgType : λ¬Έμžμ „μ†‘ μœ ν˜• UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return κ³ΌκΈˆμ •λ³΄ 객체 raise PopbillException """ if MsgType == None or MsgType == "": raise PopbillException(-99999999, "μ „μ†‘μœ ν˜•μ΄ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") return self._httpget('/Message/ChargeInfo?Type=' + MsgType, CorpNum, UserID)
κ³ΌκΈˆμ •λ³΄ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ MsgType : λ¬Έμžμ „μ†‘ μœ ν˜• UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return κ³ΌκΈˆμ •λ³΄ 객체 raise PopbillException
entailment
def sendSMS_multi(self, CorpNum, Sender, Contents, Messages, reserveDT, adsYN=False, UserID=None, RequestNum=None): """ 단문 λ¬Έμžλ©”μ‹œμ§€ λ‹€λŸ‰μ „μ†‘ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ μžλ²ˆν˜Έ (λ™λ³΄μ „μ†‘μš©) Contents : 문자 λ‚΄μš© (λ™λ³΄μ „μ†‘μš©) Messages : κ°œλ³„μ „μ†‘μ •λ³΄ λ°°μ—΄ reserveDT : μ˜ˆμ•½μ „μ†‘μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 RequestNum : μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException """ return self.sendMessage("SMS", CorpNum, Sender, '', '', Contents, Messages, reserveDT, adsYN, UserID, RequestNum)
단문 λ¬Έμžλ©”μ‹œμ§€ λ‹€λŸ‰μ „μ†‘ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ μžλ²ˆν˜Έ (λ™λ³΄μ „μ†‘μš©) Contents : 문자 λ‚΄μš© (λ™λ³΄μ „μ†‘μš©) Messages : κ°œλ³„μ „μ†‘μ •λ³΄ λ°°μ—΄ reserveDT : μ˜ˆμ•½μ „μ†‘μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 RequestNum : μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException
entailment
def sendLMS(self, CorpNum, Sender, Receiver, ReceiverName, Subject, Contents, reserveDT, adsYN=False, UserID=None, SenderName=None, RequestNum=None): """ μž₯λ¬Έ λ¬Έμžλ©”μ‹œμ§€ 단건 전솑 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ λ²ˆν˜Έ Receiver : μˆ˜μ‹ λ²ˆν˜Έ ReceiverName : μˆ˜μ‹ μžλͺ… Subject : λ©”μ‹œμ§€ 제λͺ© Contents : λ©”μ‹œμ§€ λ‚΄μš©(2000Byte μ΄ˆκ³Όμ‹œ 길이가 μ‘°μ •λ˜μ–΄ 전솑됨) reserveDT : μ˜ˆμ•½μ „μ†‘μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 SenderName : λ°œμ‹ μžλͺ… RequestNum = μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException """ Messages = [] Messages.append(MessageReceiver( snd=Sender, sndnm=SenderName, rcv=Receiver, rcvnm=ReceiverName, msg=Contents, sjt=Subject) ) return self.sendMessage("LMS", CorpNum, Sender, '', Subject, Contents, Messages, reserveDT, adsYN, UserID, RequestNum)
μž₯λ¬Έ λ¬Έμžλ©”μ‹œμ§€ 단건 전솑 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ λ²ˆν˜Έ Receiver : μˆ˜μ‹ λ²ˆν˜Έ ReceiverName : μˆ˜μ‹ μžλͺ… Subject : λ©”μ‹œμ§€ 제λͺ© Contents : λ©”μ‹œμ§€ λ‚΄μš©(2000Byte μ΄ˆκ³Όμ‹œ 길이가 μ‘°μ •λ˜μ–΄ 전솑됨) reserveDT : μ˜ˆμ•½μ „μ†‘μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 SenderName : λ°œμ‹ μžλͺ… RequestNum = μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException
entailment
def sendLMS_multi(self, CorpNum, Sender, Subject, Contents, Messages, reserveDT, adsYN=False, UserID=None, RequestNum=None): """ μž₯λ¬Έ λ¬Έμžλ©”μ‹œμ§€ λ‹€λŸ‰μ „μ†‘ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ μžλ²ˆν˜Έ (λ™λ³΄μ „μ†‘μš©) Subject : μž₯λ¬Έ λ©”μ‹œμ§€ 제λͺ© (λ™λ³΄μ „μ†‘μš©) Contents : μž₯λ¬Έ 문자 λ‚΄μš© (λ™λ³΄μ „μ†‘μš©) Messages : κ°œλ³„μ „μ†‘μ •λ³΄ λ°°μ—΄ reserveDT : μ˜ˆμ•½μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 RequestNum = μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException """ return self.sendMessage("LMS", CorpNum, Sender, '', Subject, Contents, Messages, reserveDT, adsYN, UserID, RequestNum)
μž₯λ¬Έ λ¬Έμžλ©”μ‹œμ§€ λ‹€λŸ‰μ „μ†‘ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ μžλ²ˆν˜Έ (λ™λ³΄μ „μ†‘μš©) Subject : μž₯λ¬Έ λ©”μ‹œμ§€ 제λͺ© (λ™λ³΄μ „μ†‘μš©) Contents : μž₯λ¬Έ 문자 λ‚΄μš© (λ™λ³΄μ „μ†‘μš©) Messages : κ°œλ³„μ „μ†‘μ •λ³΄ λ°°μ—΄ reserveDT : μ˜ˆμ•½μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 RequestNum = μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException
entailment
def sendMMS_Multi(self, CorpNum, Sender, Subject, Contents, Messages, FilePath, reserveDT, adsYN=False, UserID=None, RequestNum=None): """ λ©€ν‹° λ¬Έμžλ©”μ‹œμ§€ λ‹€λŸ‰ 전솑 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ μžλ²ˆν˜Έ (λ™λ³΄μ „μ†‘μš©) Subject : μž₯λ¬Έ λ©”μ‹œμ§€ 제λͺ© (λ™λ³΄μ „μ†‘μš©) Contents : μž₯λ¬Έ 문자 λ‚΄μš© (λ™λ³΄μ „μ†‘μš©) Messages : κ°œλ³„μ „μ†‘μ •λ³΄ λ°°μ—΄ FilePath : μ „μ†‘ν•˜κ³ μž ν•˜λŠ” 파일 경둜 reserveDT : μ˜ˆμ•½μ „μ†‘μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 RequestNum = μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException """ if Messages == None or len(Messages) < 1: raise PopbillException(-99999999, "전솑할 λ©”μ‹œμ§€κ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") req = {} if Sender != None or Sender != '': req['snd'] = Sender if Contents != None or Contents != '': req['content'] = Contents if Subject != None or Subject != '': req['subject'] = Subject if reserveDT != None or reserveDT != '': req['sndDT'] = reserveDT if Messages != None or Messages != '': req['msgs'] = Messages if RequestNum != None or RequestNum != '': req['requestNum'] = RequestNum if adsYN: req['adsYN'] = True postData = self._stringtify(req) files = [] try: with open(FilePath, "rb") as F: files = [File(fieldName='file', fileName=F.name, fileData=F.read())] except IOError: raise PopbillException(-99999999, "ν•΄λ‹Ήκ²½λ‘œμ— 파일이 μ—†κ±°λ‚˜ 읽을 수 μ—†μŠ΅λ‹ˆλ‹€.") result = self._httppost_files('/MMS', postData, files, CorpNum, UserID) return result.receiptNum
λ©€ν‹° λ¬Έμžλ©”μ‹œμ§€ λ‹€λŸ‰ 전솑 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ μžλ²ˆν˜Έ (λ™λ³΄μ „μ†‘μš©) Subject : μž₯λ¬Έ λ©”μ‹œμ§€ 제λͺ© (λ™λ³΄μ „μ†‘μš©) Contents : μž₯λ¬Έ 문자 λ‚΄μš© (λ™λ³΄μ „μ†‘μš©) Messages : κ°œλ³„μ „μ†‘μ •λ³΄ λ°°μ—΄ FilePath : μ „μ†‘ν•˜κ³ μž ν•˜λŠ” 파일 경둜 reserveDT : μ˜ˆμ•½μ „μ†‘μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 RequestNum = μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException
entailment
def sendMessage(self, MsgType, CorpNum, Sender, SenderName, Subject, Contents, Messages, reserveDT, adsYN=False, UserID=None, RequestNum=None): """ 문자 λ©”μ‹œμ§€ 전솑 args MsgType : 문자 전솑 μœ ν˜•(단문:SMS, μž₯λ¬Έ:LMS, 단/μž₯λ¬Έ:XMS) CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ μžλ²ˆν˜Έ (λ™λ³΄μ „μ†‘μš©) Subject : μž₯λ¬Έ λ©”μ‹œμ§€ 제λͺ© (λ™λ³΄μ „μ†‘μš©) Contents : μž₯λ¬Έ 문자 λ‚΄μš© (λ™λ³΄μ „μ†‘μš©) Messages : κ°œλ³„μ „μ†‘μ •λ³΄ λ°°μ—΄ reserveDT : μ˜ˆμ•½μ „μ†‘μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 RequestNum : μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException """ if MsgType == None or MsgType == '': raise PopbillException(-99999999, "문자 전솑 μœ ν˜•μ΄ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if Messages == None or len(Messages) < 1: raise PopbillException(-99999999, "전솑할 λ©”μ‹œμ§€κ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") req = {} if Sender != None or Sender != '': req['snd'] = Sender if SenderName != None or SenderName != '': req['sndnm'] = SenderName if Contents != None or Contents != '': req['content'] = Contents if Subject != None or Subject != '': req['subject'] = Subject if reserveDT != None or reserveDT != '': req['sndDT'] = reserveDT if Messages != None or Messages != '': req['msgs'] = Messages if RequestNum != None or RequestNum != '': req['requestnum'] = RequestNum if adsYN: req['adsYN'] = True postData = self._stringtify(req) result = self._httppost('/' + MsgType, postData, CorpNum, UserID) return result.receiptNum
문자 λ©”μ‹œμ§€ 전솑 args MsgType : 문자 전솑 μœ ν˜•(단문:SMS, μž₯λ¬Έ:LMS, 단/μž₯λ¬Έ:XMS) CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ Sender : λ°œμ‹ μžλ²ˆν˜Έ (λ™λ³΄μ „μ†‘μš©) Subject : μž₯λ¬Έ λ©”μ‹œμ§€ 제λͺ© (λ™λ³΄μ „μ†‘μš©) Contents : μž₯λ¬Έ 문자 λ‚΄μš© (λ™λ³΄μ „μ†‘μš©) Messages : κ°œλ³„μ „μ†‘μ •λ³΄ λ°°μ—΄ reserveDT : μ˜ˆμ•½μ „μ†‘μ‹œκ°„ (ν˜•μ‹. yyyyMMddHHmmss) UserID : νŒλΉŒνšŒμ› 아이디 RequestNum : μ „μ†‘μš”μ²­λ²ˆν˜Έ return μ ‘μˆ˜λ²ˆν˜Έ (receiptNum) raise PopbillException
entailment
def getMessages(self, CorpNum, ReceiptNum, UserID=None): """ 문자 전솑결과 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ ReceiptNum : μ „μ†‘μš”μ²­μ‹œ λ°œκΈ‰λ°›μ€ μ ‘μˆ˜λ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 return 전솑정보 as list raise PopbillException """ if ReceiptNum == None or len(ReceiptNum) != 18: raise PopbillException(-99999999, "μ ‘μˆ˜λ²ˆν˜Έκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") return self._httpget('/Message/' + ReceiptNum, CorpNum, UserID)
문자 전솑결과 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ ReceiptNum : μ „μ†‘μš”μ²­μ‹œ λ°œκΈ‰λ°›μ€ μ ‘μˆ˜λ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 return 전솑정보 as list raise PopbillException
entailment
def getMessagesRN(self, CorpNum, RequestNum, UserID=None): """ 문자 전솑결과 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ RequestNum : μ „μ†‘μš”μ²­μ‹œ ν• λ‹Ήν•œ μ „μ†‘μš”μ²­λ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 return 전솑정보 as list raise PopbillException """ if RequestNum == None or RequestNum == '': raise PopbillException(-99999999, "μš”μ²­λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") return self._httpget('/Message/Get/' + RequestNum, CorpNum, UserID)
문자 전솑결과 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ RequestNum : μ „μ†‘μš”μ²­μ‹œ ν• λ‹Ήν•œ μ „μ†‘μš”μ²­λ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 return 전솑정보 as list raise PopbillException
entailment
def cancelReserveRN(self, CorpNum, RequestNum, UserID=None): """ 문자 μ˜ˆμ•½μ „μ†‘ μ·¨μ†Œ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ RequestNum : μ „μ†‘μš”μ²­μ‹œ ν• λ‹Ήν•œ μ „μ†‘μš”μ²­λ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException """ if RequestNum == None or RequestNum == '': raise PopbillException(-99999999, "μš”μ²­λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") return self._httpget('/Message/Cancel/' + RequestNum, CorpNum, UserID)
문자 μ˜ˆμ•½μ „μ†‘ μ·¨μ†Œ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ RequestNum : μ „μ†‘μš”μ²­μ‹œ ν• λ‹Ήν•œ μ „μ†‘μš”μ²­λ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException
entailment
def getURL(self, CorpNum, UserID, ToGo): """ 문자 κ΄€λ ¨ 팝빌 URL args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 TOGO : BOX (μ „μ†‘λ‚΄μ—­μ‘°νšŒ νŒμ—…) return 팝빌 URL raise PopbillException """ if ToGo == None or ToGo == '': raise PopbillException(-99999999, "TOGO값이 μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") result = self._httpget('/Message/?TG=' + ToGo, CorpNum, UserID) return result.url
문자 κ΄€λ ¨ 팝빌 URL args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 TOGO : BOX (μ „μ†‘λ‚΄μ—­μ‘°νšŒ νŒμ—…) return 팝빌 URL raise PopbillException
entailment
def getStates(self, Corpnum, reciptNumList, UserID=None): """ 전솑내역 μš”μ•½μ •λ³΄ 확인 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ reciptNumList : λ¬Έμžμ „μ†‘ μ ‘μˆ˜λ²ˆν˜Έ λ°°μ—΄ UserID : νŒλΉŒνšŒμ› 아이디 return 전솑정보 as list raise PopbillException """ if reciptNumList == None or len(reciptNumList) < 1: raise PopbillException(-99999999, "μ ‘μˆ˜λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") postData = self._stringtify(reciptNumList) return self._httppost('/Message/States', postData, Corpnum, UserID)
전솑내역 μš”μ•½μ •λ³΄ 확인 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ reciptNumList : λ¬Έμžμ „μ†‘ μ ‘μˆ˜λ²ˆν˜Έ λ°°μ—΄ UserID : νŒλΉŒνšŒμ› 아이디 return 전솑정보 as list raise PopbillException
entailment
def funcGauss1D(x, mu, sig): """ Create 1D Gaussian. Source: http://mathworld.wolfram.com/GaussianFunction.html """ arrOut = np.exp(-np.power((x - mu)/sig, 2.)/2) # normalize arrOut = arrOut/(np.sqrt(2.*np.pi)*sig) return arrOut
Create 1D Gaussian. Source: http://mathworld.wolfram.com/GaussianFunction.html
entailment
def absolute_path(user_path): """ Some paths must be made absolute, this will attempt to convert them. """ if os.path.abspath(user_path): return unix_path_coercion(user_path) else: try: openaccess_epub.utils.evaluate_relative_path(relative=user_path) except: raise ValidationError('This path could not be rendered as absolute')
Some paths must be made absolute, this will attempt to convert them.
entailment
def configure(default=None, dev=None): """ The inner control loops for user interaction during quickstart configuration. """ cache_loc = openaccess_epub.utils.cache_location() config_loc = openaccess_epub.utils.config_location() #Make the cache directory openaccess_epub.utils.mkdir_p(cache_loc) defaults = {'now': time.asctime(), 'oae-version': openaccess_epub.__version__, 'cache-location': unix_path_coercion(cache_loc), 'input-relative-images': 'images-*', 'use-input-relative-images': 'y', 'image-cache': os.path.join(cache_loc, 'img_cache'), 'use-image-cache': 'n', 'use-image-fetching': 'y', 'default-output': '.', 'input-relative-css': '.', 'epubcheck-jarfile': os.path.join(cache_loc, 'epubcheck-3.0', 'epubcheck-3.0.jar')} if default or dev: # Skip interactive and apply defaults #Pass through the validation/modification steps if dev: # The only current difference between dev and default defaults['use-image-cache'] = 'y' defaults['input-relative-images'] = list_opts(defaults['input-relative-images']) defaults['use-input-relative-images'] = boolean(defaults['use-input-relative-images']) defaults['image-cache'] = absolute_path(defaults['image-cache']) defaults['use-image-cache'] = boolean(defaults['use-image-cache']) defaults['use-image-fetching'] = boolean(defaults['use-image-fetching']) defaults['default-output'] = nonempty(defaults['default-output']) defaults['input-relative-css'] = nonempty(defaults['input-relative-css']) defaults['epubcheck-jarfile'] = absolute_path(defaults['epubcheck-jarfile']) config = config_formatter(CONFIG_TEXT, defaults) with open(config_loc, 'wb') as conf_out: conf_out.write(bytes(config, 'UTF-8')) print('The config file has been written to {0}'.format(config_loc)) return config_dict = {'now': time.asctime(), 'oae-version': openaccess_epub.__version__, 'cache-location': unix_path_coercion(cache_loc)} print('''\nWelcome to the interactive configuration for OpenAccess_EPUB''') print(''' Please enter values for the following settings. To accept the default value for the settings, shown in brackets, just push Enter. -------------------------------------------------------------------------------\ ''') print(''' OpenAccess_EPUB defines a default cache location for the storage of various data (and the global config.py file), this location is:\n\n{0} '''.format(cache_loc)) input('Press Enter to start...') #Image Configuration print(''' -- Configure Image Behavior -- When OpenAccess_EPUB is executed using the oaepub script, it can find the images for the input articles using the following strategies (in order of preference): Input-Relative: a path relative to the input file Cached Images: locate the images in a cache Fetched Online: attempts to download from the Internet (may fail) We'll configure some values for each of these, and you\'ll also have the option to turn them off.''') #Input-relative image details print(''' Where should OpenAccess_EPUB look for images relative to the input file? A star "*" may be used as a wildcard to match the name of the input file. Multiple path values may be specified if separated by commas.''') user_prompt(config_dict, 'input-relative-images', 'Input-relative images?:', default=defaults['input-relative-images'], validator=list_opts) print(''' Should OpenAccess_EPUB look for images relative to the input file by default?\ ''') user_prompt(config_dict, 'use-input-relative-images', 'Use input-relative images?: (Y/n)', default=defaults['use-input-relative-images'], validator=boolean) #Image cache details print(''' Where should OpenAccess_EPUB place the image cache?''') user_prompt(config_dict, 'image-cache', 'Image cache?:', default=defaults['image-cache'], validator=absolute_path) print(''' Should OpenAccess_EPUB use the image cache by default? This feature is intended for developers and testers without local access to the image files and will consume extra disk space for storage.''') user_prompt(config_dict, 'use-image-cache', 'Use image cache?: (y/N)', default=defaults['use-image-cache'], validator=boolean) #Image fetching online details print(''' Should OpenAccess_EPUB attempt to download the images from the Internet? This is not supported for all publishers and not 100% guaranteed to succeed, you may need to download them manually if this does not work.''') user_prompt(config_dict, 'use-image-fetching', 'Attempt image download?: (Y/n)', default=defaults['use-image-fetching'], validator=boolean) #Output configuration print(''' -- Configure Output Behavior -- OpenAccess_EPUB produces ePub and log files as output. The following options will determine what is done with these. Where should OpenAccess_EPUB place the output ePub and log files? If you supply a relative path, the output path will be relative to the input; if you supply an absolute path, the output will always be placed there. The default behavior is to place them in the same directory as the input.''') user_prompt(config_dict, 'default-output', 'Output path?:', default=defaults['default-output'], validator=nonempty) print(''' -- Configure CSS Behavior -- ePub files use CSS for improved styling, and ePub-readers must support a basic subset of CSS functions. OpenAccess_EPUB provides a default CSS file, but a manual one may be supplied, relative to the input. Please define an appropriate input-relative path.''') user_prompt(config_dict, 'input-relative-css', 'Input-relative CSS path?:', default=defaults['input-relative-css'], validator=nonempty) print(''' -- Configure EpubCheck -- EpubCheck is a program written and maintained by the IDPF as a tool to validate ePub. In order to use it, your system must have Java installed and it is recommended to use the latest version. Downloads of this program are found here: https://github.com/IDPF/epubcheck/releases Once you have downloaded the zip file for the program, unzip the archive and write a path to the .jar file here.''') user_prompt(config_dict, 'epubcheck-jarfile', 'Absolute path to epubcheck?:', default=defaults['epubcheck-jarfile'], validator=absolute_path) #Write the config.py file config = config_formatter(CONFIG_TEXT, config_dict) with open(config_loc, 'wb') as conf_out: conf_out.write(bytes(config, 'UTF-8')) print(''' Done configuring OpenAccess_EPUB!''')
The inner control loops for user interaction during quickstart configuration.
entailment
def getCertificateExpireDate(self, CorpNum): """ κ³΅μΈμΈμ¦μ„œ 만료일 확인, 등둝여뢀 ν™•μΈμš©λ„λ‘œ ν™œμš©κ°€λŠ₯ args CorpNum : 확인할 νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ return λ“±λ‘μ‹œ 만료일자, λ―Έλ“±λ‘μ‹œ ν•΄λ‹Ή PopbillException raise. raise PopbillException """ result = self._httpget('/Taxinvoice?cfg=CERT', CorpNum) return datetime.strptime(result.certificateExpiration, '%Y%m%d%H%M%S')
κ³΅μΈμΈμ¦μ„œ 만료일 확인, 등둝여뢀 ν™•μΈμš©λ„λ‘œ ν™œμš©κ°€λŠ₯ args CorpNum : 확인할 νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ return λ“±λ‘μ‹œ 만료일자, λ―Έλ“±λ‘μ‹œ ν•΄λ‹Ή PopbillException raise. raise PopbillException
entailment
def checkMgtKeyInUse(self, CorpNum, MgtKeyType, MgtKey): """ νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ μ‚¬μš©μ€‘ μ—¬λΆ€ 확인. args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return μ‚¬μš©μ€‘ μ—¬λΆ€ by True/False raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") try: result = self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, CorpNum) return result.itemKey != None and result.itemKey != "" except PopbillException as PE: if PE.code == -11000005: return False raise PE
νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ μ‚¬μš©μ€‘ μ—¬λΆ€ 확인. args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return μ‚¬μš©μ€‘ μ—¬λΆ€ by True/False raise PopbillException
entailment
def register(self, CorpNum, taxinvoice, writeSpecification=False, UserID=None): """ μž„μ‹œμ €μž₯ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 taxinvoice : 등둝할 μ„ΈκΈˆκ³„μ‚°μ„œ object. Made with Taxinvoice(...) writeSpecification : λ“±λ‘μ‹œ 거래λͺ…μ„Έμ„œ λ™μ‹œ μž‘μ„± μ—¬λΆ€ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException """ if taxinvoice == None: raise PopbillException(-99999999, "등둝할 μ„ΈκΈˆκ³„μ‚°μ„œ 정보가 μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if writeSpecification: taxinvoice.writeSpecification = True postData = self._stringtify(taxinvoice) return self._httppost('/Taxinvoice', postData, CorpNum, UserID)
μž„μ‹œμ €μž₯ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 taxinvoice : 등둝할 μ„ΈκΈˆκ³„μ‚°μ„œ object. Made with Taxinvoice(...) writeSpecification : λ“±λ‘μ‹œ 거래λͺ…μ„Έμ„œ λ™μ‹œ μž‘μ„± μ—¬λΆ€ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException
entailment
def registIssue(self, CorpNum, taxinvoice, writeSpecification=False, forceIssue=False, dealInvoiceMgtKey=None, memo=None, emailSubject=None, UserID=None): """ μ¦‰μ‹œ λ°œν–‰ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ taxinvoice : μ„ΈκΈˆκ³„μ‚°μ„œ 객체 writeSpecification : 거래λͺ…μ„Έμ„œ λ™μ‹œμž‘μ„± μ—¬λΆ€ forceIssue : μ§€μ—°λ°œν–‰ κ°•μ œμ—¬λΆ€ dealInvoiceMgtKey : 거래λͺ…μ„Έμ„œ λ¬Έμ„œκ΄€λ¦¬λ²ˆν˜Έ memo : λ©”λͺ¨ emailSubject : λ©”μΌμ œλͺ©, λ―ΈκΈ°μž¬μ‹œ 기본제λͺ©μœΌλ‘œ 전솑 UsreID : νŒλΉŒνšŒμ› 아이디 return 검색결과 정보 raise PopbillException """ if writeSpecification: taxinvoice.writeSpecification = True if forceIssue: taxinvoice.forceIssue = True if dealInvoiceMgtKey != None and dealInvoiceMgtKey != '': taxinvoice.dealInvoiceMgtKey = dealInvoiceMgtKey if memo != None and memo != '': taxinvoice.memo = memo if emailSubject != None and emailSubject != '': taxinvoice.emailSubject = emailSubject postData = self._stringtify(taxinvoice) return self._httppost('/Taxinvoice', postData, CorpNum, UserID, "ISSUE")
μ¦‰μ‹œ λ°œν–‰ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ taxinvoice : μ„ΈκΈˆκ³„μ‚°μ„œ 객체 writeSpecification : 거래λͺ…μ„Έμ„œ λ™μ‹œμž‘μ„± μ—¬λΆ€ forceIssue : μ§€μ—°λ°œν–‰ κ°•μ œμ—¬λΆ€ dealInvoiceMgtKey : 거래λͺ…μ„Έμ„œ λ¬Έμ„œκ΄€λ¦¬λ²ˆν˜Έ memo : λ©”λͺ¨ emailSubject : λ©”μΌμ œλͺ©, λ―ΈκΈ°μž¬μ‹œ 기본제λͺ©μœΌλ‘œ 전솑 UsreID : νŒλΉŒνšŒμ› 아이디 return 검색결과 정보 raise PopbillException
entailment
def update(self, CorpNum, MgtKeyType, MgtKey, taxinvoice, writeSpecification=False, UserID=None): """ μˆ˜μ • args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ taxinvoice : μˆ˜μ •ν•  μ„ΈκΈˆκ³„μ‚°μ„œ object. Made with Taxinvoice(...) writeSpecification : λ“±λ‘μ‹œ 거래λͺ…μ„Έμ„œ λ™μ‹œ μž‘μ„± μ—¬λΆ€ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if taxinvoice == None: raise PopbillException(-99999999, "μˆ˜μ •ν•  μ„ΈκΈˆκ³„μ‚°μ„œ 정보가 μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if writeSpecification: taxinvoice.writeSpecification = True postData = self._stringtify(taxinvoice) return self._httppost('/Taxinvoice/' + MgtKeyType + '/' + MgtKey, postData, CorpNum, UserID, 'PATCH')
μˆ˜μ • args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ taxinvoice : μˆ˜μ •ν•  μ„ΈκΈˆκ³„μ‚°μ„œ object. Made with Taxinvoice(...) writeSpecification : λ“±λ‘μ‹œ 거래λͺ…μ„Έμ„œ λ™μ‹œ μž‘μ„± μ—¬λΆ€ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException
entailment
def getInfo(self, CorpNum, MgtKeyType, MgtKey): """ μƒνƒœμ •λ³΄ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") return self._httpget('/Taxinvoice/' + MgtKeyType + '/' + MgtKey, CorpNum)
μƒνƒœμ •λ³΄ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return 처리결과. consist of code and message raise PopbillException
entailment
def getDetailInfo(self, CorpNum, MgtKeyType, MgtKey): """ 상세정보 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "?Detail", CorpNum)
상세정보 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return 처리결과. consist of code and message raise PopbillException
entailment
def delete(self, CorpNum, MgtKeyType, MgtKey, UserID=None): """ μ‚­μ œ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, '', CorpNum, UserID, "DELETE")
μ‚­μ œ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException
entailment
def send(self, CorpNum, MgtKeyType, MgtKey, Memo=None, EmailSubject=None, UserID=None): """ μŠΉμΈμš”μ²­ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ Memo : 처리 λ©”λͺ¨ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") req = {} if Memo != None and Memo != '': req["memo"] = Memo if EmailSubject != None and EmailSubject != '': req["emailSubject"] = EmailSubject postData = self._stringtify(req) return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "SEND")
μŠΉμΈμš”μ²­ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ Memo : 처리 λ©”λͺ¨ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException
entailment
def cancelSend(self, CorpNum, MgtKeyType, MgtKey, Memo=None, UserID=None): """ μŠΉμΈμš”μ²­ μ·¨μ†Œ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ Memo : 처리 λ©”λͺ¨ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if Memo != None and Memo != '': postData = self._stringtify({"memo": Memo}) else: postData = '' return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "CANCELSEND")
μŠΉμΈμš”μ²­ μ·¨μ†Œ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ Memo : 처리 λ©”λͺ¨ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException
entailment
def issue(self, CorpNum, MgtKeyType, MgtKey, Memo=None, EmailSubject=None, ForceIssue=False, UserID=None): """ λ°œν–‰ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ Memo : 처리 λ©”λͺ¨ EmailSubject : λ°œν–‰λ©”μΌ 이메일 제λͺ© ForceIssue : μ§€μ—°λ°œν–‰ μ„ΈκΈˆκ³„μ‚°μ„œ κ°•μ œλ°œν–‰ μ—¬λΆ€. UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") req = {"forceIssue": ForceIssue} if Memo != None and Memo != '': req["memo"] = Memo if EmailSubject != None and EmailSubject != '': req["emailSubject"] = EmailSubject postData = self._stringtify(req) return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "ISSUE")
λ°œν–‰ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ Memo : 처리 λ©”λͺ¨ EmailSubject : λ°œν–‰λ©”μΌ 이메일 제λͺ© ForceIssue : μ§€μ—°λ°œν–‰ μ„ΈκΈˆκ³„μ‚°μ„œ κ°•μ œλ°œν–‰ μ—¬λΆ€. UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException
entailment
def registRequest(self, CorpNum, taxinvoice, memo=None, UserID=None): """ μ¦‰μ‹œ μš”μ²­ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ taxinvoice : μ„ΈκΈˆκ³„μ‚°μ„œ 객체 memo : λ©”λͺ¨ UsreID : νŒλΉŒνšŒμ› 아이디 return 검색결과 정보 raise PopbillException """ if memo != None and memo != '': taxinvoice.memo = memo postData = self._stringtify(taxinvoice) return self._httppost('/Taxinvoice', postData, CorpNum, UserID, "REQUEST")
μ¦‰μ‹œ μš”μ²­ args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ taxinvoice : μ„ΈκΈˆκ³„μ‚°μ„œ 객체 memo : λ©”λͺ¨ UsreID : νŒλΉŒνšŒμ› 아이디 return 검색결과 정보 raise PopbillException
entailment
def sendToNTS(self, CorpNum, MgtKeyType, MgtKey, UserID=None): """ κ΅­μ„Έμ²­ μ¦‰μ‹œμ „μ†‘ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") postData = '' return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey, postData, CorpNum, UserID, "NTS")
κ΅­μ„Έμ²­ μ¦‰μ‹œμ „μ†‘ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException
entailment
def getLogs(self, CorpNum, MgtKeyType, MgtKey): """ μ„ΈκΈˆκ³„μ‚°μ„œ λ¬Έμ„œμ΄λ ₯ λͺ©λ‘ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return λ¬Έμ„œμ΄λ ₯ 정보 λͺ©λ‘ as List raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Logs", CorpNum)
μ„ΈκΈˆκ³„μ‚°μ„œ λ¬Έμ„œμ΄λ ₯ λͺ©λ‘ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return λ¬Έμ„œμ΄λ ₯ 정보 λͺ©λ‘ as List raise PopbillException
entailment
def getFiles(self, CorpNum, MgtKeyType, MgtKey): """ μ²¨λΆ€νŒŒμΌ λͺ©λ‘ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return μ²©λΆ€νŒŒμΌ 정보 λͺ©λ‘ as List raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") return self._httpget('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Files", CorpNum)
μ²¨λΆ€νŒŒμΌ λͺ©λ‘ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ return μ²©λΆ€νŒŒμΌ 정보 λͺ©λ‘ as List raise PopbillException
entailment
def deleteFile(self, CorpNum, MgtKeyType, MgtKey, FileID, UserID=None): """ μ²¨λΆ€νŒŒμΌ μ‚­μ œ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if FileID == None or FileID == "": raise PopbillException(-99999999, "νŒŒμΌμ•„μ΄λ””κ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") postData = '' return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Files/" + FileID, postData, CorpNum, UserID, 'DELETE')
μ²¨λΆ€νŒŒμΌ μ‚­μ œ args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 처리결과. consist of code and message raise PopbillException
entailment
def getMassPrintURL(self, CorpNum, MgtKeyType, MgtKeyList, UserID=None): """ λ‹€λŸ‰ 인쇄 URL 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKeyList : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ λͺ©λ‘ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 팝빌 URL as str raise PopbillException """ if MgtKeyList == None or len(MgtKeyList) < 1: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") postData = self._stringtify(MgtKeyList) Result = self._httppost('/Taxinvoice/' + MgtKeyType + "?Print", postData, CorpNum, UserID) return Result.url
λ‹€λŸ‰ 인쇄 URL 확인 args CorpNum : νšŒμ› μ‚¬μ—…μž 번호 MgtKeyType : κ΄€λ¦¬λ²ˆν˜Έ μœ ν˜• one of ['SELL','BUY','TRUSTEE'] MgtKeyList : νŒŒνŠΈλ„ˆ κ΄€λ¦¬λ²ˆν˜Έ λͺ©λ‘ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” return 팝빌 URL as str raise PopbillException
entailment
def search(self, CorpNum, MgtKeyType, DType, SDate, EDate, State, Type, TaxType, LateOnly, TaxRegIDYN, TaxRegIDType, TaxRegID, Page, PerPage, Order, UserID=None, QString=None, InterOPYN=None, IssueType=None): """ λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ MgtKeyType : μ„ΈκΈˆκ³„μ‚°μ„œμœ ν˜•, SELL-맀좜, BUY-λ§€μž…, TRUSTEE-μœ„μˆ˜νƒ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμ‹œ, W-μž‘μ„±μΌμž, I-λ°œν–‰μΌμ‹œ 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ Type : λ¬Έμ„œν˜•νƒœ λ°°μ—΄, N-μΌλ°˜μ„ΈκΈˆκ³„μ‚°μ„œ, M-μˆ˜μ •μ„ΈκΈˆκ³„μ‚°μ„œ TaxType : κ³Όμ„Έν˜•νƒœ λ°°μ—΄, T-κ³Όμ„Έ, N-λ©΄μ„Έ, Z-μ˜μ„Έ LateOnly : μ§€μ—°λ°œν–‰, 곡백-μ „μ²΄μ‘°νšŒ, 0-μ •μƒλ°œν–‰μ‘°νšŒ, 1-μ§€μ—°λ°œν–‰ 쑰회 TaxRegIdYN : 쒅사업μž₯번호 유무, 곡백-μ „μ²΄μ‘°νšŒ, 0-쒅사업μž₯번호 μ—†μŒ 1-쒅사업μž₯번호 있음 TaxRegIDType : 쒅사업μž₯번호 μ‚¬μ—…μžμœ ν˜•, S-κ³΅κΈ‰μž, B-κ³΅κΈ‰λ°›λŠ”μž, T-μˆ˜νƒμž TaxRegID : 쒅사업μž₯번호, 콀마(,)둜 κ΅¬λΆ„ν•˜μ—¬ ꡬ성 ex)'0001,1234' Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή λͺ©λ‘κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” QString : 거래처 정보, 거래처 μƒν˜Έ λ˜λŠ” μ‚¬μ—…μžλ“±λ‘λ²ˆν˜Έ 기재, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ InterOPYN : μ—°λ™λ¬Έμ„œ μ—¬λΆ€, 곡백-μ „μ²΄μ‘°νšŒ, 0-μΌλ°˜λ¬Έμ„œ 쑰회, 1-μ—°λ™λ¬Έμ„œ 쑰회 IssueType : λ°œν–‰ν˜•νƒœ λ°°μ—΄, N-μ •λ°œν–‰, R-μ—­λ°œν–‰, T-μœ„μˆ˜νƒ return 쑰회λͺ©λ‘ Object raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if DType == None or DType == '': raise PopbillException(-99999999, "μΌμžμœ ν˜•μ΄ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if SDate == None or SDate == '': raise PopbillException(-99999999, "μ‹œμž‘μΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if EDate == None or EDate == '': raise PopbillException(-99999999, "μ’…λ£ŒμΌμžκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") uri = '/Taxinvoice/' + MgtKeyType uri += '?DType=' + DType uri += '&SDate=' + SDate uri += '&EDate=' + EDate uri += '&State=' + ','.join(State) uri += '&Type=' + ','.join(Type) uri += '&TaxType=' + ','.join(TaxType) uri += '&TaxRegIDType=' + TaxRegIDType uri += '&TaxRegID=' + TaxRegID uri += '&Page=' + str(Page) uri += '&PerPage=' + str(PerPage) uri += '&Order=' + Order uri += '&InterOPYN=' + InterOPYN if LateOnly != '': uri += '&LateOnly=' + LateOnly if TaxRegIDYN != '': uri += '&TaxRegIDType=' + TaxRegIDType if QString is not None: uri += '&QString=' + QString if IssueType is not None: uri += '&IssueType=' + ','.join(IssueType) return self._httpget(uri, CorpNum, UserID)
λͺ©λ‘ 쑰회 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ MgtKeyType : μ„ΈκΈˆκ³„μ‚°μ„œμœ ν˜•, SELL-맀좜, BUY-λ§€μž…, TRUSTEE-μœ„μˆ˜νƒ DType : μΌμžμœ ν˜•, R-λ“±λ‘μΌμ‹œ, W-μž‘μ„±μΌμž, I-λ°œν–‰μΌμ‹œ 쀑 택 1 SDate : μ‹œμž‘μΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) EDate : μ’…λ£ŒμΌμž, ν‘œμ‹œν˜•μ‹(yyyyMMdd) State : μƒνƒœμ½”λ“œ, 2,3번째 μžλ¦¬μ— μ™€μΌλ“œμΉ΄λ“œ(*) μ‚¬μš©κ°€λŠ₯ Type : λ¬Έμ„œν˜•νƒœ λ°°μ—΄, N-μΌλ°˜μ„ΈκΈˆκ³„μ‚°μ„œ, M-μˆ˜μ •μ„ΈκΈˆκ³„μ‚°μ„œ TaxType : κ³Όμ„Έν˜•νƒœ λ°°μ—΄, T-κ³Όμ„Έ, N-λ©΄μ„Έ, Z-μ˜μ„Έ LateOnly : μ§€μ—°λ°œν–‰, 곡백-μ „μ²΄μ‘°νšŒ, 0-μ •μƒλ°œν–‰μ‘°νšŒ, 1-μ§€μ—°λ°œν–‰ 쑰회 TaxRegIdYN : 쒅사업μž₯번호 유무, 곡백-μ „μ²΄μ‘°νšŒ, 0-쒅사업μž₯번호 μ—†μŒ 1-쒅사업μž₯번호 있음 TaxRegIDType : 쒅사업μž₯번호 μ‚¬μ—…μžμœ ν˜•, S-κ³΅κΈ‰μž, B-κ³΅κΈ‰λ°›λŠ”μž, T-μˆ˜νƒμž TaxRegID : 쒅사업μž₯번호, 콀마(,)둜 κ΅¬λΆ„ν•˜μ—¬ ꡬ성 ex)'0001,1234' Page : νŽ˜μ΄μ§€λ²ˆν˜Έ PerPage : νŽ˜μ΄μ§€λ‹Ή λͺ©λ‘κ°œμˆ˜ Order : μ •λ ¬λ°©ν–₯, D-λ‚΄λ¦Όμ°¨μˆœ, A-μ˜€λ¦„μ°¨μˆœ UserID : 팝빌 νšŒμ›μ•„μ΄λ”” QString : 거래처 정보, 거래처 μƒν˜Έ λ˜λŠ” μ‚¬μ—…μžλ“±λ‘λ²ˆν˜Έ 기재, λ―ΈκΈ°μž¬μ‹œ μ „μ²΄μ‘°νšŒ InterOPYN : μ—°λ™λ¬Έμ„œ μ—¬λΆ€, 곡백-μ „μ²΄μ‘°νšŒ, 0-μΌλ°˜λ¬Έμ„œ 쑰회, 1-μ—°λ™λ¬Έμ„œ 쑰회 IssueType : λ°œν–‰ν˜•νƒœ λ°°μ—΄, N-μ •λ°œν–‰, R-μ—­λ°œν–‰, T-μœ„μˆ˜νƒ return 쑰회λͺ©λ‘ Object raise PopbillException
entailment
def attachStatement(self, CorpNum, MgtKeyType, MgtKey, ItemCode, StmtMgtKey, UserID=None): """ μ „μžλͺ…μ„Έμ„œ 첨뢀 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ MgtKeyType : μ„ΈκΈˆκ³„μ‚°μ„œ μœ ν˜•, SELL-맀좜, BUY-λ§€μž…, TRUSTEE-μœ„μˆ˜νƒ MgtKey : μ„ΈκΈˆκ³„μ‚°μ„œ λ¬Έμ„œκ΄€λ¦¬λ²ˆν˜Έ StmtCode : λͺ…μ„Έμ„œ μ’…λ₯˜μ½”λ“œ, 121-λͺ…μ„Έμ„œ, 122-μ²­κ΅¬μ„œ, 123-κ²¬μ μ„œ, 124-λ°œμ£Όμ„œ 125-μž…κΈˆν‘œ, 126-영수증 StmtMgtKey : μ „μžλͺ…μ„Έμ„œ λ¬Έμ„œκ΄€λ¦¬λ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έ ν˜•νƒœκ°€ μ˜¬λ°”λ₯΄μ§€ μ•ŠμŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") uri = '/Taxinvoice/' + MgtKeyType + '/' + MgtKey + '/AttachStmt' postData = self._stringtify({"ItemCode": ItemCode, "MgtKey": StmtMgtKey}) return self._httppost(uri, postData, CorpNum, UserID)
μ „μžλͺ…μ„Έμ„œ 첨뢀 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ MgtKeyType : μ„ΈκΈˆκ³„μ‚°μ„œ μœ ν˜•, SELL-맀좜, BUY-λ§€μž…, TRUSTEE-μœ„μˆ˜νƒ MgtKey : μ„ΈκΈˆκ³„μ‚°μ„œ λ¬Έμ„œκ΄€λ¦¬λ²ˆν˜Έ StmtCode : λͺ…μ„Έμ„œ μ’…λ₯˜μ½”λ“œ, 121-λͺ…μ„Έμ„œ, 122-μ²­κ΅¬μ„œ, 123-κ²¬μ μ„œ, 124-λ°œμ£Όμ„œ 125-μž…κΈˆν‘œ, 126-영수증 StmtMgtKey : μ „μžλͺ…μ„Έμ„œ λ¬Έμ„œκ΄€λ¦¬λ²ˆν˜Έ UserID : νŒλΉŒνšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException
entailment
def assignMgtKey(self, CorpNum, MgtKeyType, ItemKey, MgtKey, UserID=None): """ κ΄€λ¦¬λ²ˆν˜Έν• λ‹Ή args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ MgtKeyType : μ„ΈκΈˆκ³„μ‚°μ„œ μœ ν˜•, SELL-맀좜, BUY-λ§€μž…, TRUSTEE-μœ„μˆ˜νƒ ItemKey : μ•„μ΄ν…œν‚€ (Search API둜 쑰회 κ°€λŠ₯) MgtKey : μ„ΈκΈˆκ³„μ‚°μ„œμ— ν• λ‹Ήν•  νŒŒνŠΈλ„ˆ 관리 번호 UserID : νŒλΉŒνšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException """ if MgtKeyType == None or MgtKeyType == '': raise PopbillException(-99999999, "μ„ΈκΈˆκ³„μ‚°μ„œ λ°œν–‰μœ ν˜•μ΄ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if ItemKey == None or ItemKey == '': raise PopbillException(-99999999, "μ•„μ΄ν…œν‚€κ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") if MgtKey == None or MgtKey == '': raise PopbillException(-99999999, "κ΄€λ¦¬λ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") postDate = "MgtKey=" + MgtKey return self._httppost('/Taxinvoice/' + ItemKey + '/' + MgtKeyType, postDate, CorpNum, UserID, "", "application/x-www-form-urlencoded; charset=utf-8")
κ΄€λ¦¬λ²ˆν˜Έν• λ‹Ή args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ MgtKeyType : μ„ΈκΈˆκ³„μ‚°μ„œ μœ ν˜•, SELL-맀좜, BUY-λ§€μž…, TRUSTEE-μœ„μˆ˜νƒ ItemKey : μ•„μ΄ν…œν‚€ (Search API둜 쑰회 κ°€λŠ₯) MgtKey : μ„ΈκΈˆκ³„μ‚°μ„œμ— ν• λ‹Ήν•  νŒŒνŠΈλ„ˆ 관리 번호 UserID : νŒλΉŒνšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException
entailment
def getSealURL(self, CorpNum, UserID): """ 팝빌 인감 및 μ²¨λΆ€λ¬Έμ„œ 등둝 URL args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νšŒμ› νŒλΉŒμ•„μ΄λ”” return 30초 λ³΄μ•ˆ 토큰을 ν¬ν•¨ν•œ url raise PopbillException """ result = self._httpget('/?TG=SEAL', CorpNum, UserID) return result.url
팝빌 인감 및 μ²¨λΆ€λ¬Έμ„œ 등둝 URL args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νšŒμ› νŒλΉŒμ•„μ΄λ”” return 30초 λ³΄μ•ˆ 토큰을 ν¬ν•¨ν•œ url raise PopbillException
entailment
def getTaxCertURL(self, CorpNum, UserID): """ κ³΅μΈμΈμ¦μ„œ 등둝 URL args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νšŒμ› νŒλΉŒμ•„μ΄λ”” return 30초 λ³΄μ•ˆ 토큰을 ν¬ν•¨ν•œ url raise PopbillException """ result = self._httpget('/?TG=CERT', CorpNum, UserID) return result.url
κ³΅μΈμΈμ¦μ„œ 등둝 URL args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νšŒμ› νŒλΉŒμ•„μ΄λ”” return 30초 λ³΄μ•ˆ 토큰을 ν¬ν•¨ν•œ url raise PopbillException
entailment
def encloses(self, location: FileLocation ) -> Optional[FunctionDesc]: """ Returns the function, if any, that encloses a given location. """ for func in self.in_file(location.filename): if location in func.location: return func return None
Returns the function, if any, that encloses a given location.
entailment
def in_file(self, filename: str) -> Iterator[FunctionDesc]: """ Returns an iterator over all of the functions definitions that are contained within a given file. """ yield from self.__filename_to_functions.get(filename, [])
Returns an iterator over all of the functions definitions that are contained within a given file.
entailment
def model_creation_opt(dicCnfg, aryMdlParams, strPathHrf=None, varRat=None, lgcPrint=True): """ Create or load pRF model time courses. Parameters ---------- dicCnfg : dict Dictionary containing config parameters. aryMdlParams : numpy arrays x, y and sigma parameters. strPathHrf : str or None: Path to npy file with custom hrf parameters. If None, default parameters will be used. varRat : float, default None Ratio of size suppressive surround to size of center pRF lgcPrint : boolean Whether print statements should be executed. Returns ------- aryPrfTc : np.array 4D numpy array with pRF time course models, with following dimensions: `aryPrfTc[x-position, y-position, SD, volume]`. """ # ************************************************************************* # *** Load parameters from config file # Load config parameters from dictionary into namespace: cfg = cls_set_config(dicCnfg) # ************************************************************************* if cfg.lgcCrteMdl: # ********************************************************************* # *** Load spatial condition information arySptExpInf = np.load(cfg.strSptExpInf) # Here we assume scientific convention and orientation of images where # the origin should fall in the lower left corner, the x-axis occupies # the width and the y-axis occupies the height dimension of the screen. # We also assume that the first dimension that the user provides # indexes x and the second indexes the y-axis. Since python is column # major (i.e. first indexes columns, only then rows), we need to rotate # arySptExpInf by 90 degrees rightward. This will insure that with the # 0th axis we index the scientific x-axis and higher values move us to # the right on that x-axis. It will also ensure that the 1st # python axis indexes the scientific y-axis and higher values will # move us up. arySptExpInf = np.rot90(arySptExpInf, k=3) # ********************************************************************* # ********************************************************************* # *** Load temporal condition information # load temporal information about presented stimuli aryTmpExpInf = np.load(cfg.strTmpExpInf) # add fourth column to make it appropriate for pyprf_feature if aryTmpExpInf.shape[-1] == 3: vecNewCol = np.greater(aryTmpExpInf[:, 0], 0).astype(np.float16) aryTmpExpInf = np.concatenate( (aryTmpExpInf, np.expand_dims(vecNewCol, axis=1)), axis=1) # ********************************************************************* # ********************************************************************* # If desired by user, also create model parameters for supp surround if varRat is not None: aryMdlParamsSur = np.copy(aryMdlParams) aryMdlParamsSur[:, 2] = aryMdlParamsSur[:, 2] * varRat # ********************************************************************* # ********************************************************************* # *** Create 2D Gauss model responses to spatial conditions. aryMdlRsp = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX), int(cfg.varVslSpcSzeY)), aryMdlParams, cfg.varPar, lgcPrint=lgcPrint) # If desired by user, also create model responses for supp surround if varRat is not None: aryMdlRspSur = crt_mdl_rsp(arySptExpInf, (int(cfg.varVslSpcSzeX), int(cfg.varVslSpcSzeY)), aryMdlParamsSur, cfg.varPar, lgcPrint=lgcPrint) del(arySptExpInf) # ********************************************************************* # ********************************************************************* # *** Create prf time course models # Check whether path to npy file with hrf parameters was provided if strPathHrf is not None: if lgcPrint: print('---------Load custom hrf parameters') aryCstPrm = np.load(strPathHrf) dctPrm = {} dctPrm['peak_delay'] = aryCstPrm[0] dctPrm['under_delay'] = aryCstPrm[1] dctPrm['peak_disp'] = aryCstPrm[2] dctPrm['under_disp'] = aryCstPrm[3] dctPrm['p_u_ratio'] = aryCstPrm[4] # If not, set dctPrm to None, which will result in default hrf params else: if lgcPrint: print('---------Use default hrf parameters') dctPrm = None aryPrfTc = crt_prf_ftr_tc(aryMdlRsp, aryTmpExpInf, cfg.varNumVol, cfg.varTr, cfg.varTmpOvsmpl, cfg.switchHrfSet, (int(cfg.varVslSpcSzeX), int(cfg.varVslSpcSzeY)), cfg.varPar, dctPrm=dctPrm, lgcPrint=lgcPrint) # If desired by user, create prf time course models for supp surround if varRat is not None: if lgcPrint: print('---------Add suppressive surround') aryPrfTcSur = crt_prf_ftr_tc(aryMdlRspSur, aryTmpExpInf, cfg.varNumVol, cfg.varTr, cfg.varTmpOvsmpl, cfg.switchHrfSet, (int(cfg.varVslSpcSzeX), int(cfg.varVslSpcSzeY)), cfg.varPar, dctPrm=dctPrm, lgcPrint=lgcPrint) # Concatenate aryPrfTc and aryPrfTcSur aryPrfTc = np.concatenate((aryPrfTc, aryPrfTcSur), axis=1) # ********************************************************************* return aryPrfTc
Create or load pRF model time courses. Parameters ---------- dicCnfg : dict Dictionary containing config parameters. aryMdlParams : numpy arrays x, y and sigma parameters. strPathHrf : str or None: Path to npy file with custom hrf parameters. If None, default parameters will be used. varRat : float, default None Ratio of size suppressive surround to size of center pRF lgcPrint : boolean Whether print statements should be executed. Returns ------- aryPrfTc : np.array 4D numpy array with pRF time course models, with following dimensions: `aryPrfTc[x-position, y-position, SD, volume]`.
entailment
def parse_config(args): """ Try to load config, to load other journal locations Otherwise, return default location Returns journal location """ # Try user config or return default location early config_path = path.expanduser(args.config_file) if not path.exists(config_path): # Complain if they provided non-existant config if args.config_file != DEFAULT_JOURNAL_RC: print("journal: error: config file '" + args.config_file + "' not found") sys.exit() else: # If no config file, use default journal location return DEFAULT_JOURNAL # If we get here, assume valid config file config = ConfigParser.SafeConfigParser({ 'journal':{'default':'__journal'}, '__journal':{'location':DEFAULT_JOURNAL} }) config.read(config_path) journal_location = config.get(config.get('journal', 'default'), 'location'); if args.journal: journal_location = config.get(args.journal, 'location'); return journal_location
Try to load config, to load other journal locations Otherwise, return default location Returns journal location
entailment
def record_entries(journal_location, entries): """ args entry - list of entries to record """ check_journal_dest(journal_location) current_date = datetime.datetime.today() date_header = current_date.strftime("%a %H:%M:%S %Y-%m-%d") + "\n" with open(build_journal_path(journal_location, current_date), "a") as date_file: entry_output = date_header # old style # for entry in entries: # entry_output += "-" + entry + "\n" # new style entry_output += '-' + ' '.join(entries) + "\n" entry_output += "\n" date_file.write(entry_output)
args entry - list of entries to record
entailment
def get_entry(journal_location, date): """ args date - date object returns entry text or None if entry doesn't exist """ if not isinstance(date, datetime.date): return None try: with open(build_journal_path(journal_location, date), "r") as entry_file: return entry_file.read() except IOError: return None
args date - date object returns entry text or None if entry doesn't exist
entailment
def replace( fname1, fname2, dfilter1, dfilter2, has_header1=True, has_header2=True, frow1=0, frow2=0, ofname=None, ocols=None, ): r""" Replace data in one file with data from another file. :param fname1: Name of the input comma-separated values file, the file that contains the columns to be replaced :type fname1: FileNameExists_ :param fname2: Name of the replacement comma-separated values file, the file that contains the replacement data :type fname2: FileNameExists_ :param dfilter1: Row and/or column filter for the input file :type dfilter1: :ref:`CsvDataFilter` :param dfilter2: Row and/or column filter for the replacement file :type dfilter2: :ref:`CsvDataFilter` :param has_header1: Flag that indicates whether the input comma-separated values file has column headers in its first line (True) or not (False) :type has_header1: boolean :param has_header2: Flag that indicates whether the replacement comma-separated values file has column headers in its first line (True) or not (False) :type has_header2: boolean :param frow1: Input comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow1: NonNegativeInteger_ :param frow2: Replacement comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow2: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the input file data but with some columns replaced with data from the replacement file. If None the input file is replaced "in place" :type ofname: FileName_ :param ocols: Names of the replaced columns in the output comma-separated values file. If None the column names in the input file are used if **has_header1** is True, otherwise no header is used :type ocols: list or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.replace.replace :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`dfilter1\` is not valid) * RuntimeError (Argument \`dfilter2\` is not valid) * RuntimeError (Argument \`fname1\` is not valid) * RuntimeError (Argument \`fname2\` is not valid) * RuntimeError (Argument \`frow1\` is not valid) * RuntimeError (Argument \`frow2\` is not valid) * RuntimeError (Argument \`ocols\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * RuntimeError (Number of input and output columns are different) * RuntimeError (Number of input and replacement columns are different) * ValueError (Column *[column_identifier]* not found) * ValueError (Number of rows mismatch between input and replacement data) .. [[[end]]] """ # pylint: disable=R0913,R0914 irmm_ex = pexdoc.exh.addex( RuntimeError, "Number of input and replacement columns are different" ) iomm_ex = pexdoc.exh.addex( RuntimeError, "Number of input and output columns are different" ) # Read and validate input data iobj = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1) # Read and validate replacement data robj = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2) # Assign output data structure ofname = fname1 if ofname is None else ofname icfilter = iobj.header() if iobj.cfilter is None else iobj.cfilter rcfilter = robj.header() if robj.cfilter is None else robj.cfilter ocols = icfilter if ocols is None else ocols # Miscellaneous data validation irmm_ex(len(icfilter) != len(rcfilter)) iomm_ex(len(icfilter) != len(ocols)) # Replace data iobj.replace(rdata=robj.data(filtered=True), filtered=True) iheader_upper = [ item.upper() if isinstance(item, str) else item for item in iobj.header() ] icfilter_index = [ iheader_upper.index(item.upper() if isinstance(item, str) else item) for item in icfilter ] # Create new header orow = [] if has_header1: for col_num, idata in enumerate(iobj.header()): orow.append( ocols[icfilter_index.index(col_num)] if col_num in icfilter_index else idata ) # Write (new) file iobj.write(fname=ofname, header=orow if orow else False, append=False)
r""" Replace data in one file with data from another file. :param fname1: Name of the input comma-separated values file, the file that contains the columns to be replaced :type fname1: FileNameExists_ :param fname2: Name of the replacement comma-separated values file, the file that contains the replacement data :type fname2: FileNameExists_ :param dfilter1: Row and/or column filter for the input file :type dfilter1: :ref:`CsvDataFilter` :param dfilter2: Row and/or column filter for the replacement file :type dfilter2: :ref:`CsvDataFilter` :param has_header1: Flag that indicates whether the input comma-separated values file has column headers in its first line (True) or not (False) :type has_header1: boolean :param has_header2: Flag that indicates whether the replacement comma-separated values file has column headers in its first line (True) or not (False) :type has_header2: boolean :param frow1: Input comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow1: NonNegativeInteger_ :param frow2: Replacement comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow2: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the input file data but with some columns replaced with data from the replacement file. If None the input file is replaced "in place" :type ofname: FileName_ :param ocols: Names of the replaced columns in the output comma-separated values file. If None the column names in the input file are used if **has_header1** is True, otherwise no header is used :type ocols: list or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.replace.replace :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`dfilter1\` is not valid) * RuntimeError (Argument \`dfilter2\` is not valid) * RuntimeError (Argument \`fname1\` is not valid) * RuntimeError (Argument \`fname2\` is not valid) * RuntimeError (Argument \`frow1\` is not valid) * RuntimeError (Argument \`frow2\` is not valid) * RuntimeError (Argument \`ocols\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * RuntimeError (Number of input and output columns are different) * RuntimeError (Number of input and replacement columns are different) * ValueError (Column *[column_identifier]* not found) * ValueError (Number of rows mismatch between input and replacement data) .. [[[end]]]
entailment
def spmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1, p_u_ratio=6): """Normalized SPM HRF function from sum of two gamma PDFs Parameters ---------- t : array-like vector of times at which to sample HRF Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- [1] This is the canonical HRF function as used in SPM. It has the following defaults: - delay of response (relative to onset) : 6s - delay of undershoot (relative to onset) : 16s - dispersion of response : 1s - dispersion of undershoot : 1s - ratio of response to undershoot : 6s - onset : 0s - length of kernel : 32s References: ----- [1] http://nipy.org/ [2] https://github.com/fabianp/hrf_estimation """ return spm_hrf_compat(t, peak_delay=peak_delay, under_delay=under_delay, peak_disp=peak_disp, under_disp=under_disp, p_u_ratio=p_u_ratio, normalize=True)
Normalized SPM HRF function from sum of two gamma PDFs Parameters ---------- t : array-like vector of times at which to sample HRF Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- [1] This is the canonical HRF function as used in SPM. It has the following defaults: - delay of response (relative to onset) : 6s - delay of undershoot (relative to onset) : 16s - dispersion of response : 1s - dispersion of undershoot : 1s - ratio of response to undershoot : 6s - onset : 0s - length of kernel : 32s References: ----- [1] http://nipy.org/ [2] https://github.com/fabianp/hrf_estimation
entailment
def dspmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1, p_u_ratio=6): """ SPM canonical HRF derivative, HRF derivative values for time values `t` Parameters ---------- t : array-like vector of times at which to sample HRF Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- [1] This is the canonical HRF derivative function as used in SPM. [2] It is the numerical difference of the HRF sampled at time `t` minus the values sampled at time `t` -1 References: ----- [1] http://nipy.org/ [2] https://github.com/fabianp/hrf_estimation """ t = np.asarray(t) aryRsp1 = spmt(t, peak_delay=peak_delay, under_delay=under_delay, peak_disp=peak_disp, under_disp=under_disp, p_u_ratio=p_u_ratio) aryRsp2 = spmt(t-1, peak_delay=peak_delay, under_delay=under_delay, peak_disp=peak_disp, under_disp=under_disp, p_u_ratio=p_u_ratio) return aryRsp1 - aryRsp2
SPM canonical HRF derivative, HRF derivative values for time values `t` Parameters ---------- t : array-like vector of times at which to sample HRF Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- [1] This is the canonical HRF derivative function as used in SPM. [2] It is the numerical difference of the HRF sampled at time `t` minus the values sampled at time `t` -1 References: ----- [1] http://nipy.org/ [2] https://github.com/fabianp/hrf_estimation
entailment
def ddspmt(t, peak_delay=6, under_delay=16, peak_disp=1, under_disp=1, p_u_ratio=6): """ SPM canonical HRF dispersion derivative, values for time values `t` Parameters ---------- t : array-like vector of times at which to sample HRF Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- [1] This is the canonical HRF dispersion derivative function as used in SPM [2] It is the numerical difference between the HRF sampled at time `t`, and values at `t` for another HRF shape with a small change in the peak dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`). References: ----- [1] http://nipy.org/ [2] https://github.com/fabianp/hrf_estimation """ _spm_dd_func = partial(spmt, peak_delay=peak_delay, under_delay=under_delay, under_disp=under_disp, p_u_ratio=p_u_ratio, peak_disp=1.01) return (spmt(t) - _spm_dd_func(t)) / 0.01
SPM canonical HRF dispersion derivative, values for time values `t` Parameters ---------- t : array-like vector of times at which to sample HRF Returns ------- hrf : array vector length ``len(t)`` of samples from HRF at times `t` Notes ----- [1] This is the canonical HRF dispersion derivative function as used in SPM [2] It is the numerical difference between the HRF sampled at time `t`, and values at `t` for another HRF shape with a small change in the peak dispersion parameter (``peak_disp`` in func:`spm_hrf_compat`). References: ----- [1] http://nipy.org/ [2] https://github.com/fabianp/hrf_estimation
entailment
def create_boxcar(aryCnd, aryOns, aryDrt, varTr, varNumVol, aryExclCnd=None, varTmpOvsmpl=1000.): """ Creation of condition time courses in temporally upsampled space. Parameters ---------- aryCnd : np.array 1D array with condition identifiers (every condition has its own int) aryOns : np.array, same len as aryCnd 1D array with condition onset times in seconds. aryDrt : np.array, same len as aryCnd 1D array with condition durations of different conditions in seconds. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varNumVol : float, positive Number of volumes of the (fMRI) data. aryExclCnd : array 1D array containing condition identifiers for conditions to be excluded varTmpOvsmpl : float, positive Factor by which the time courses should be temporally upsampled. Returns ------- aryBxCrOut : np.array, float16 Condition time courses in temporally upsampled space. References: ----- [1] https://github.com/fabianp/hrf_estimation """ if aryExclCnd is not None: for cond in aryExclCnd: aryOns = aryOns[aryCnd != cond] aryDrt = aryDrt[aryCnd != cond] aryCnd = aryCnd[aryCnd != cond] resolution = varTr / float(varTmpOvsmpl) aryCnd = np.asarray(aryCnd) aryOns = np.asarray(aryOns, dtype=np.float) unique_conditions = np.sort(np.unique(aryCnd)) boxcar = [] for c in unique_conditions: tmp = np.zeros(int(varNumVol * varTr/resolution)) onset_c = aryOns[aryCnd == c] duration_c = aryDrt[aryCnd == c] onset_idx = np.round(onset_c / resolution).astype(np.int) duration_idx = np.round(duration_c / resolution).astype(np.int) aux = np.arange(int(varNumVol * varTr/resolution)) for start, dur in zip(onset_idx, duration_idx): lgc = np.logical_and(aux >= start, aux < start + dur) tmp = tmp + lgc assert np.all(np.less(tmp, 2)) boxcar.append(tmp) aryBxCrOut = np.array(boxcar).T if aryBxCrOut.shape[1] == 1: aryBxCrOut = np.squeeze(aryBxCrOut) return aryBxCrOut.astype('float16')
Creation of condition time courses in temporally upsampled space. Parameters ---------- aryCnd : np.array 1D array with condition identifiers (every condition has its own int) aryOns : np.array, same len as aryCnd 1D array with condition onset times in seconds. aryDrt : np.array, same len as aryCnd 1D array with condition durations of different conditions in seconds. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varNumVol : float, positive Number of volumes of the (fMRI) data. aryExclCnd : array 1D array containing condition identifiers for conditions to be excluded varTmpOvsmpl : float, positive Factor by which the time courses should be temporally upsampled. Returns ------- aryBxCrOut : np.array, float16 Condition time courses in temporally upsampled space. References: ----- [1] https://github.com/fabianp/hrf_estimation
entailment
def cnvl_tc(idxPrc, aryPrfTcChunk, lstHrf, varTr, varNumVol, varTmpOvsmpl, queOut, varHrfLen=32., dctPrm=None): """Convolution of time courses with HRF model. Parameters ---------- idxPrc : int, positive Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0 (just one thread on CPU). aryPrfTcChunk : np.array 2D array with model time course to be convolved with HRF. lstHrf : list List containing the different HRF functions. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varNumVol : float, positive Number of volumes of the (fMRI) data. varTmpOvsmpl : float, positive Factor by which the time courses should be temporally upsampled. queOut : multiprocessing.queues.Queue Queue to put the results on. varHrfLen : float, positive, default=32 Length of the HRF time course in seconds. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. Returns ------- lstOut : list int, positive : Process ID of the process calling this function. 2D np.array, float16 : Model time course convolved with HRF. References: ----- [1] https://github.com/fabianp/hrf_estimation """ # Adjust the input, if necessary, such that input is 2D, with last dim time tplInpShp = aryPrfTcChunk.shape aryPrfTcChunk = aryPrfTcChunk.reshape((-1, aryPrfTcChunk.shape[-1])) # Prepare list to collect hrf basis functions lstBse = [] # Prepare array that contains time intervals aryTme = np.linspace(0, varHrfLen, (varHrfLen // varTr) * varTmpOvsmpl) for fnHrf in lstHrf: # If hrf parameter dictionary is None, run with default parameters if dctPrm is None: vecTmpBse = fnHrf(aryTme) # Otherwise, run with custom parameters else: vecTmpBse = fnHrf(aryTme, **dctPrm) # Normalise HRF so that the sum of values is 1 (see FSL) # otherwise, after convolution values for predictors are very high vecTmpBse = np.divide(vecTmpBse, np.sum(vecTmpBse)) lstBse.append(vecTmpBse) # Get frame times, i.e. start point of every volume in seconds vecFrms = np.arange(0, varTr * varNumVol, varTr) # Get supersampled frames times, i.e. start point of every volume in # upsampled res, since convolution takes place in temp. upsampled space vecFrmTms = np.arange(0, varTr * varNumVol, varTr / varTmpOvsmpl) # Prepare an empty array for ouput aryConv = np.zeros((aryPrfTcChunk.shape[0], len(lstHrf), varNumVol), dtype=np.float16) # Each time course is convolved with the HRF separately, because the # numpy convolution function can only be used on one-dimensional data. # Thus, we have to loop through time courses: for idxTc in range(0, aryConv.shape[0]): # Extract the current time course (already in upsampled space): vecTcUps = aryPrfTcChunk[idxTc, :] # *** convolve for indBase, base in enumerate(lstBse): # Make sure base and vecTcUps are float64 to avoid overflow base = base.astype(np.float64) vecTcUps = vecTcUps.astype(np.float64) # Perform the convolution (previously: np.convolve) col = fftconvolve(base, vecTcUps, mode='full')[:vecTcUps.size] # Get function for downsampling f = interp1d(vecFrmTms, col) # Downsample to original resoltuion to match res of data # take the value from the centre of each volume's period (see FSL) aryConv[idxTc, indBase, :] = f(vecFrms + varTr/2. ).astype(np.float16) # Determine output shape tplOutShp = tplInpShp[:-1] + (len(lstHrf), ) + (varNumVol, ) if queOut is None: # if user is not using multiprocessing, return the array directly return aryConv.reshape(tplOutShp) else: # Create list containing the convolved timecourses, and the process ID: lstOut = [idxPrc, aryConv.reshape(tplOutShp)] # Put output to queue: queOut.put(lstOut)
Convolution of time courses with HRF model. Parameters ---------- idxPrc : int, positive Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0 (just one thread on CPU). aryPrfTcChunk : np.array 2D array with model time course to be convolved with HRF. lstHrf : list List containing the different HRF functions. varTr : float, positive Time to repeat (TR) of the (fMRI) experiment. varNumVol : float, positive Number of volumes of the (fMRI) data. varTmpOvsmpl : float, positive Factor by which the time courses should be temporally upsampled. queOut : multiprocessing.queues.Queue Queue to put the results on. varHrfLen : float, positive, default=32 Length of the HRF time course in seconds. dctPrm : dictionary, default None Dictionary with customized hrf parameters. If this is None, default hrf parameters will be used. Returns ------- lstOut : list int, positive : Process ID of the process calling this function. 2D np.array, float16 : Model time course convolved with HRF. References: ----- [1] https://github.com/fabianp/hrf_estimation
entailment
def create_inputs_to_reference(job_data, input_files, input_directories): """ Creates a dictionary with the summarized information in job_data, input_files and input_directories :param job_data: The job data specifying input parameters other than files and directories. :param input_files: A dictionary describing the input files. :param input_directories: A dictionary describing the input directories. :return: A summarized dictionary containing information about all given inputs. """ return {**deepcopy(job_data), **deepcopy(input_files), **deepcopy(input_directories)}
Creates a dictionary with the summarized information in job_data, input_files and input_directories :param job_data: The job data specifying input parameters other than files and directories. :param input_files: A dictionary describing the input files. :param input_directories: A dictionary describing the input directories. :return: A summarized dictionary containing information about all given inputs.
entailment
def _partition_all_internal(s, sep): """ Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. :param s: The string to split. :param sep: A separator string. :return: A list of parts split by sep """ parts = list(s.partition(sep)) # if sep found if parts[1] == sep: new_parts = partition_all(parts[2], sep) parts.pop() parts.extend(new_parts) return [p for p in parts if p] else: if parts[0]: return [parts[0]] else: return []
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. :param s: The string to split. :param sep: A separator string. :return: A list of parts split by sep
entailment
def partition_all(s, sep): """ Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. If sep is a list, all separators are evaluated. :param s: The string to split. :param sep: A separator string or a list of separator strings. :return: A list of parts split by sep """ if isinstance(sep, list): parts = _partition_all_internal(s, sep[0]) sep = sep[1:] for s in sep: tmp = [] for p in parts: tmp.extend(_partition_all_internal(p, s)) parts = tmp return parts else: return _partition_all_internal(s, sep)
Uses str.partition() to split every occurrence of sep in s. The returned list does not contain empty strings. If sep is a list, all separators are evaluated. :param s: The string to split. :param sep: A separator string or a list of separator strings. :return: A list of parts split by sep
entailment
def split_input_references(to_split): """ Returns the given string in normal strings and unresolved input references. An input reference is identified as something of the following form $(...). Example: split_input_reference("a$(b)cde()$(fg)") == ["a", "$(b)", "cde()", "$(fg)"] :param to_split: The string to split :raise InvalidInputReference: If an input reference is not closed and a new reference starts or the string ends. :return: A list of normal strings and unresolved input references. """ parts = partition_all(to_split, [INPUT_REFERENCE_START, INPUT_REFERENCE_END]) result = [] part = [] in_reference = False for p in parts: if in_reference: if p == INPUT_REFERENCE_START: raise InvalidInputReference('A new input reference has been started, although the old input reference' 'has not yet been completed.\n{}'.format(to_split)) elif p == ")": part.append(")") result.append(''.join(part)) part = [] in_reference = False else: part.append(p) else: if p == INPUT_REFERENCE_START: if part: result.append(''.join(part)) part = [INPUT_REFERENCE_START] in_reference = True else: part.append(p) if in_reference: raise InvalidInputReference('Input reference not closed.\n{}'.format(to_split)) elif part: result.append(''.join(part)) return result
Returns the given string in normal strings and unresolved input references. An input reference is identified as something of the following form $(...). Example: split_input_reference("a$(b)cde()$(fg)") == ["a", "$(b)", "cde()", "$(fg)"] :param to_split: The string to split :raise InvalidInputReference: If an input reference is not closed and a new reference starts or the string ends. :return: A list of normal strings and unresolved input references.
entailment
def split_all(reference, sep): """ Splits a given string at a given separator or list of separators. :param reference: The reference to split. :param sep: Separator string or list of separator strings. :return: A list of split strings """ parts = partition_all(reference, sep) return [p for p in parts if p not in sep]
Splits a given string at a given separator or list of separators. :param reference: The reference to split. :param sep: Separator string or list of separator strings. :return: A list of split strings
entailment
def _resolve_file(attributes, input_file, input_identifier, input_reference): """ Returns the attributes in demand of the input file. :param attributes: A list of attributes to get from the input_file. :param input_file: The file from which to get the attributes. :param input_identifier: The input identifier of the given file. :param input_reference: The reference string :return: The attribute in demand """ if input_file['isArray']: raise InvalidInputReference('Input References to Arrays of input files are currently not supported.\n' '"{}" is an array of files and can not be resolved for input references:' '\n{}'.format(input_identifier, input_reference)) single_file = input_file['files'][0] try: return _get_dict_element(single_file, attributes) except KeyError: raise InvalidInputReference('Could not get attributes "{}" from input file "{}", needed in input reference:' '\n{}'.format(attributes, input_identifier, input_reference))
Returns the attributes in demand of the input file. :param attributes: A list of attributes to get from the input_file. :param input_file: The file from which to get the attributes. :param input_identifier: The input identifier of the given file. :param input_reference: The reference string :return: The attribute in demand
entailment
def _resolve_directory(attributes, input_directory, input_identifier, input_reference): """ Returns the attributes in demand of the input directory. :param attributes: A list of attributes to get from the input directory. :param input_directory: The directory from which to get the attributes. :param input_identifier: The input identifier of the given directory. :param input_reference: The reference string :return: The attribute in demand """ if input_directory['isArray']: raise InvalidInputReference('Input References to Arrays of input directories are currently not supported.\n' 'input directory "{}" is an array of directories and can not be resolved for input' 'references:\n{}'.format(input_identifier, input_reference)) single_directory = input_directory['directories'][0] try: return _get_dict_element(single_directory, attributes) except KeyError: raise InvalidInputReference('Could not get attributes "{}" from input directory "{}", needed in input' 'reference:\n{}'.format(attributes, input_identifier, input_reference))
Returns the attributes in demand of the input directory. :param attributes: A list of attributes to get from the input directory. :param input_directory: The directory from which to get the attributes. :param input_identifier: The input identifier of the given directory. :param input_reference: The reference string :return: The attribute in demand
entailment
def resolve_input_reference(reference, inputs_to_reference): """ Replaces a given input_reference by a string extracted from inputs_to_reference. :param reference: The input reference to resolve. :param inputs_to_reference: A dictionary containing information about the given inputs. :raise InvalidInputReference: If the given input reference could not be resolved. :return: A string which is the resolved input reference. """ if not reference.startswith('{}inputs.'.format(INPUT_REFERENCE_START)): raise InvalidInputReference('An input reference must have the following form' '"$(inputs.<input_name>[.<attribute>]".\n' 'The invalid reference is: "{}"'.format(reference)) # remove "$(inputs." and ")" reference = reference[2:-1] parts = split_all(reference, ATTRIBUTE_SEPARATOR_SYMBOLS) if len(parts) < 2: raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input' 'reference does not comply with it:\n{}'.format(reference)) elif parts[0] != "inputs": raise InvalidInputReference('InputReference should at least contain "$(inputs.identifier)". The following input' ' reference does not comply with it:\n$({})'.format(reference)) else: input_identifier = parts[1] input_to_reference = inputs_to_reference.get(input_identifier) if input_to_reference is None: raise InvalidInputReference('Input identifier "{}" not found in inputs, but needed in input reference:\n{}' .format(input_identifier, reference)) elif isinstance(input_to_reference, dict): if 'files' in input_to_reference: return _resolve_file(parts[2:], input_to_reference, input_identifier, reference) elif 'directories' in input_to_reference: return _resolve_directory(parts[2:], input_to_reference, input_identifier, reference) else: raise InvalidInputReference('Unknown input type for input identifier "{}"'.format(input_identifier)) else: if len(parts) > 2: raise InvalidInputReference('Attribute "{}" of input reference "{}" could not be resolved' .format(parts[2], reference)) else: return parts[1]
Replaces a given input_reference by a string extracted from inputs_to_reference. :param reference: The input reference to resolve. :param inputs_to_reference: A dictionary containing information about the given inputs. :raise InvalidInputReference: If the given input reference could not be resolved. :return: A string which is the resolved input reference.
entailment
def resolve_input_references(to_resolve, inputs_to_reference): """ Resolves input references given in the string to_resolve by using the inputs_to_reference. See http://www.commonwl.org/user_guide/06-params/index.html for more information. Example: "$(inputs.my_file.nameroot).md" -> "filename.md" :param to_resolve: The path to match :param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename). :return: A string in which the input references are replaced with actual values. """ splitted = split_input_references(to_resolve) result = [] for part in splitted: if is_input_reference(part): result.append(str(resolve_input_reference(part, inputs_to_reference))) else: result.append(part) return ''.join(result)
Resolves input references given in the string to_resolve by using the inputs_to_reference. See http://www.commonwl.org/user_guide/06-params/index.html for more information. Example: "$(inputs.my_file.nameroot).md" -> "filename.md" :param to_resolve: The path to match :param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename). :return: A string in which the input references are replaced with actual values.
entailment
def data(self): """ Returns a dictionnary containing all the passed data and an item ``error_list`` which holds the result of :attr:`error_list`. """ res = {'error_list': self.error_list} res.update(super(ValidationErrors, self).data) return res
Returns a dictionnary containing all the passed data and an item ``error_list`` which holds the result of :attr:`error_list`.
entailment
def circDiff(length, ary1, ary2): """calculate the circular difference between two paired arrays. This function will return the difference between pairs of numbers; however the difference that is output will be minimal in the sense that if we assume an array with length = 4: [0, 1, 2, 3], the difference between 0 and 3 will not be 3, but 1 (i.e. circular difference)""" x = np.arange(length) mod = length % 2 if mod == 0: temp = np.ones(length) temp[length/2:] = -1 else: x = x - np.floor(length/2) temp = np.copy(x) temp[np.less(x, 0)] = 1 temp[np.greater(x, 0)] = -1 x = np.cumsum(temp) diagDiffmat = np.empty((length, length)) for idx in np.arange(length): x = np.roll(x, 1) diagDiffmat[idx, :] = x # return diagDiffmat[ary1][ary2] flat = diagDiffmat.flatten() ind = ary1*diagDiffmat.shape[0] + ary2 ind = ind.astype('int') return flat[ind]
calculate the circular difference between two paired arrays. This function will return the difference between pairs of numbers; however the difference that is output will be minimal in the sense that if we assume an array with length = 4: [0, 1, 2, 3], the difference between 0 and 3 will not be 3, but 1 (i.e. circular difference)
entailment
def getPartnerURL(self, CorpNum, TOGO): """ 팝빌 νšŒμ› μž”μ—¬ν¬μΈνŠΈ 확인 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ TOGO : "CHRG" return URL raise PopbillException """ try: return linkhub.getPartnerURL(self._getToken(CorpNum), TOGO) except LinkhubException as LE: raise PopbillException(LE.code, LE.message)
팝빌 νšŒμ› μž”μ—¬ν¬μΈνŠΈ 확인 args CorpNum : νŒλΉŒνšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ TOGO : "CHRG" return URL raise PopbillException
entailment
def getBalance(self, CorpNum): """ 팝빌 νšŒμ› μž”μ—¬ν¬μΈνŠΈ 확인 args CorpNum : ν™•μΈν•˜κ³ μž ν•˜λŠ” νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ return μž”μ—¬ν¬μΈνŠΈ by float raise PopbillException """ try: return linkhub.getBalance(self._getToken(CorpNum)) except LinkhubException as LE: raise PopbillException(LE.code, LE.message)
팝빌 νšŒμ› μž”μ—¬ν¬μΈνŠΈ 확인 args CorpNum : ν™•μΈν•˜κ³ μž ν•˜λŠ” νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ return μž”μ—¬ν¬μΈνŠΈ by float raise PopbillException
entailment
def getAccessURL(self, CorpNum, UserID): """ 팝빌 둜그인 URL args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νšŒμ› νŒλΉŒμ•„μ΄λ”” return 30초 λ³΄μ•ˆ 토큰을 ν¬ν•¨ν•œ url raise PopbillException """ result = self._httpget('/?TG=LOGIN', CorpNum, UserID) return result.url
팝빌 둜그인 URL args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νšŒμ› νŒλΉŒμ•„μ΄λ”” return 30초 λ³΄μ•ˆ 토큰을 ν¬ν•¨ν•œ url raise PopbillException
entailment
def getChargeURL(self, CorpNum, UserID): """ 팝빌 μ—°λ™νšŒμ› 포인트 μΆ©μ „ URL args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νšŒμ› νŒλΉŒμ•„μ΄λ”” return 30초 λ³΄μ•ˆ 토큰을 ν¬ν•¨ν•œ url raise PopbillException """ result = self._httpget('/?TG=CHRG', CorpNum, UserID) return result.url
팝빌 μ—°λ™νšŒμ› 포인트 μΆ©μ „ URL args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ UserID : νšŒμ› νŒλΉŒμ•„μ΄λ”” return 30초 λ³΄μ•ˆ 토큰을 ν¬ν•¨ν•œ url raise PopbillException
entailment
def checkIsMember(self, CorpNum): """ νšŒμ›κ°€μž…μ—¬λΆ€ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ return νšŒμ›κ°€μž…μ—¬λΆ€ True/False raise PopbillException """ if CorpNum == None or CorpNum == '': raise PopbillException(-99999999, "μ‚¬μ—…μžλ²ˆν˜Έκ°€ μž…λ ₯λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.") return self._httpget('/Join?CorpNum=' + CorpNum + '&LID=' + self.__linkID, None, None)
νšŒμ›κ°€μž…μ—¬λΆ€ 확인 args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ return νšŒμ›κ°€μž…μ—¬λΆ€ True/False raise PopbillException
entailment
def joinMember(self, JoinInfo): """ 팝빌 νšŒμ›κ°€μž… args JoinInfo : νšŒμ›κ°€μž…μ •λ³΄. Reference JoinForm class return 처리결과. consist of code and message raise PopbillException """ JoinInfo.LinkID = self.__linkID postData = self._stringtify(JoinInfo) return self._httppost('/Join', postData)
팝빌 νšŒμ›κ°€μž… args JoinInfo : νšŒμ›κ°€μž…μ •λ³΄. Reference JoinForm class return 처리결과. consist of code and message raise PopbillException
entailment
def updateContact(self, CorpNum, ContactInfo, UserID=None): """ λ‹΄λ‹Ήμž 정보 μˆ˜μ • args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ ContactInfo : λ‹΄λ‹Ήμž 정보, Reference ContactInfo class UserID : νšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException """ postData = self._stringtify(ContactInfo) return self._httppost('/IDs', postData, CorpNum, UserID)
λ‹΄λ‹Ήμž 정보 μˆ˜μ • args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ ContactInfo : λ‹΄λ‹Ήμž 정보, Reference ContactInfo class UserID : νšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException
entailment
def updateCorpInfo(self, CorpNum, CorpInfo, UserID=None): """ λ‹΄λ‹Ήμž 정보 μˆ˜μ • args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ CorpInfo : νšŒμ‚¬ 정보, Reference CorpInfo class UserID : νšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException """ postData = self._stringtify(CorpInfo) return self._httppost('/CorpInfo', postData, CorpNum, UserID)
λ‹΄λ‹Ήμž 정보 μˆ˜μ • args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ CorpInfo : νšŒμ‚¬ 정보, Reference CorpInfo class UserID : νšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException
entailment
def registContact(self, CorpNum, ContactInfo, UserID=None): """ λ‹΄λ‹Ήμž μΆ”κ°€ args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ ContactInfo : λ‹΄λ‹Ήμž 정보, Reference ContactInfo class UserID : νšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException """ postData = self._stringtify(ContactInfo) return self._httppost('/IDs/New', postData, CorpNum, UserID)
λ‹΄λ‹Ήμž μΆ”κ°€ args CorpNum : νšŒμ› μ‚¬μ—…μžλ²ˆν˜Έ ContactInfo : λ‹΄λ‹Ήμž 정보, Reference ContactInfo class UserID : νšŒμ› 아이디 return 처리결과. consist of code and message raise PopbillException
entailment
def TemplateValidator(value): """Try to compile a string into a Django template""" try: Template(value) except Exception as e: raise ValidationError( _("Cannot compile template (%(exception)s)"), params={"exception": e} )
Try to compile a string into a Django template
entailment
def merge( fname1, fname2, dfilter1=None, dfilter2=None, has_header1=True, has_header2=True, frow1=0, frow2=0, ofname=None, ocols=None, ): r""" Merge two comma-separated values files. Data columns from the second file are appended after data columns from the first file. Empty values in columns are used if the files have different number of rows :param fname1: Name of the first comma-separated values file, the file whose columns appear first in the output file :type fname1: FileNameExists_ :param fname2: Name of the second comma-separated values file, the file whose columns appear last in the output file :type fname2: FileNameExists_ :param dfilter1: Row and/or column filter for the first file. If None no data filtering is done on the file :type dfilter1: :ref:`CsvDataFilter` or None :param dfilter2: Row and/or column filter for the second file. If None no data filtering is done on the file :type dfilter2: :ref:`CsvDataFilter` or None :param has_header1: Flag that indicates whether the first comma-separated values file has column headers in its first line (True) or not (False) :type has_header1: boolean :param has_header2: Flag that indicates whether the second comma-separated values file has column headers in its first line (True) or not (False) :type has_header2: boolean :param frow1: First comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow1: NonNegativeInteger_ :param frow2: Second comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow2: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the data from the first and second files. If None the first file is replaced "in place" :type ofname: FileName_ or None :param ocols: Column names of the output comma-separated values file. If None the column names in the first and second files are used if **has_header1** and/or **has_header2** are True. The column labels :code:`'Column [column_number]'` are used when one of the two files does not have a header, where :code:`[column_number]` is an integer representing the column number (column 0 is the leftmost column). No header is used if **has_header1** and **has_header2** are False :type ocols: list or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.merge.merge :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`dfilter1\` is not valid) * RuntimeError (Argument \`dfilter2\` is not valid) * RuntimeError (Argument \`fname1\` is not valid) * RuntimeError (Argument \`fname2\` is not valid) * RuntimeError (Argument \`frow1\` is not valid) * RuntimeError (Argument \`frow2\` is not valid) * RuntimeError (Argument \`ocols\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (Combined columns in data files and output columns are different) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """ # pylint: disable=R0913,R0914 iomm_ex = pexdoc.exh.addex( RuntimeError, "Combined columns in data files and output columns are different" ) # Read and validate file 1 obj1 = CsvFile(fname=fname1, dfilter=dfilter1, has_header=has_header1, frow=frow1) # Read and validate file 2 obj2 = CsvFile(fname=fname2, dfilter=dfilter2, has_header=has_header2, frow=frow2) # Assign output data structure ofname = fname1 if ofname is None else ofname cfilter1 = obj1.header() if obj1.cfilter is None else obj1.cfilter cfilter2 = obj2.header() if obj1.cfilter is None else obj2.cfilter # Create new header cols1 = len(cfilter1) cols2 = len(cfilter2) if (ocols is None) and has_header1 and has_header2: ocols = [cfilter1 + cfilter2] elif (ocols is None) and has_header1 and (not has_header2): ocols = [ cfilter1 + [ "Column {0}".format(item) for item in range(cols1 + 1, cols1 + cols2 + 1) ] ] elif (ocols is None) and (not has_header1) and has_header2: ocols = [["Column {0}".format(item) for item in range(1, cols1 + 1)] + cfilter2] elif ocols is None: ocols = [] else: iomm_ex(cols1 + cols2 != len(ocols)) ocols = [ocols] # Even out rows delta = obj1.rows(filtered=True) - obj2.rows(filtered=True) data1 = obj1.data(filtered=True) data2 = obj2.data(filtered=True) if delta > 0: row = [cols2 * [None]] data2 += delta * row elif delta < 0: row = [cols1 * [None]] data1 += abs(delta) * row data = ocols for item1, item2 in zip(data1, data2): data.append(item1 + item2) write(fname=ofname, data=data, append=False)
r""" Merge two comma-separated values files. Data columns from the second file are appended after data columns from the first file. Empty values in columns are used if the files have different number of rows :param fname1: Name of the first comma-separated values file, the file whose columns appear first in the output file :type fname1: FileNameExists_ :param fname2: Name of the second comma-separated values file, the file whose columns appear last in the output file :type fname2: FileNameExists_ :param dfilter1: Row and/or column filter for the first file. If None no data filtering is done on the file :type dfilter1: :ref:`CsvDataFilter` or None :param dfilter2: Row and/or column filter for the second file. If None no data filtering is done on the file :type dfilter2: :ref:`CsvDataFilter` or None :param has_header1: Flag that indicates whether the first comma-separated values file has column headers in its first line (True) or not (False) :type has_header1: boolean :param has_header2: Flag that indicates whether the second comma-separated values file has column headers in its first line (True) or not (False) :type has_header2: boolean :param frow1: First comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow1: NonNegativeInteger_ :param frow2: Second comma-separated values file first data row (starting from 1). If 0 the row where data starts is auto-detected as the first row that has a number (integer of float) in at least one of its columns :type frow2: NonNegativeInteger_ :param ofname: Name of the output comma-separated values file, the file that will contain the data from the first and second files. If None the first file is replaced "in place" :type ofname: FileName_ or None :param ocols: Column names of the output comma-separated values file. If None the column names in the first and second files are used if **has_header1** and/or **has_header2** are True. The column labels :code:`'Column [column_number]'` are used when one of the two files does not have a header, where :code:`[column_number]` is an integer representing the column number (column 0 is the leftmost column). No header is used if **has_header1** and **has_header2** are False :type ocols: list or None .. [[[cog cog.out(exobj.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for pcsv.merge.merge :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \`dfilter1\` is not valid) * RuntimeError (Argument \`dfilter2\` is not valid) * RuntimeError (Argument \`fname1\` is not valid) * RuntimeError (Argument \`fname2\` is not valid) * RuntimeError (Argument \`frow1\` is not valid) * RuntimeError (Argument \`frow2\` is not valid) * RuntimeError (Argument \`ocols\` is not valid) * RuntimeError (Argument \`ofname\` is not valid) * RuntimeError (Column headers are not unique in file *[fname]*) * RuntimeError (Combined columns in data files and output columns are different) * RuntimeError (File *[fname]* has no valid data) * RuntimeError (File *[fname]* is empty) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]]
entailment
def pyprf_opt_brute(strCsvCnfg, objNspc, lgcTest=False, strPathHrf=None, varRat=None): """ Function for optimizing given pRF paramaters using brute-force grid search. Parameters ---------- strCsvCnfg : str Absolute file path of config file. objNspc : object Name space from command line arguments. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of pyprf libary will be prepended to config file paths. strPathHrf : str or None: Path to npy file with custom hrf parameters. If None, default parameters will be used. varRat : float, default None Ratio of size suppressive surround to size of center pRF """ # ************************************************************************* # *** Check time print('---pRF analysis') varTme01 = time.time() # ************************************************************************* # ************************************************************************* # *** Preparations # Load config parameters from csv file into dictionary: dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest) # Load config parameters from dictionary into namespace: cfg = cls_set_config(dicCnfg) # Conditional imports: if cfg.strVersion == 'gpu': from pyprf_feature.analysis.find_prf_gpu import find_prf_gpu if ((cfg.strVersion == 'cython') or (cfg.strVersion == 'numpy')): from pyprf_feature.analysis.find_prf_cpu import find_prf_cpu # Convert preprocessing parameters (for temporal smoothing) # from SI units (i.e. [s]) into units of data array (volumes): cfg.varSdSmthTmp = np.divide(cfg.varSdSmthTmp, cfg.varTr) # ************************************************************************* # ************************************************************************* # *** Preprocessing # The functional data will be masked and demeaned: aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func( cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100.) # set the precision of the header to np.float32 so that the prf results # will be saved in this precision later hdrMsk.set_data_dtype(np.float32) print('---Number of voxels included in analysis: ' + str(np.sum(aryLgcVar))) # ************************************************************************* # *** Checks # Make sure that if gpu fitting is used, the number of cross-validations is # set to 1, not higher if cfg.strVersion == 'gpu': strErrMsg = 'Stopping program. ' + \ 'Cross-validation on GPU is currently not supported. ' + \ 'Set varNumXval equal to 1 in csv file in order to continue. ' assert cfg.varNumXval == 1, strErrMsg # For the GPU version, we need to set down the parallelisation to 1 now, # because no separate CPU threads are to be created. We may still use CPU # parallelisation for preprocessing, which is why the parallelisation # factor is only reduced now, not earlier. if cfg.strVersion == 'gpu': cfg.varPar = 1 # check whether we need to crossvalidate if np.greater(cfg.varNumXval, 1): cfg.lgcXval = True elif np.equal(cfg.varNumXval, 1): cfg.lgcXval = False strErrMsg = 'Stopping program. ' + \ 'Set numXval (number of crossvalidation folds) to 1 or higher' assert np.greater_equal(cfg.varNumXval, 1), strErrMsg # derive number of feature for fitting if varRat is not None: # since there will be a beta parameter estimate both for the center and # the surround, we multiply by 2 varNumFtr = int(2*cfg.switchHrfSet) else: varNumFtr = cfg.switchHrfSet # ************************************************************************* # ************************************************************************* # Load previous pRF fitting results print('---String to prior results provided by user:') print(objNspc.strPthPrior) # Load the x, y, sigma winner parameters from pyprf_feature lstWnrPrm = [objNspc.strPthPrior + '_x_pos.nii.gz', objNspc.strPthPrior + '_y_pos.nii.gz', objNspc.strPthPrior + '_SD.nii.gz', objNspc.strPthPrior + '_eccentricity.nii.gz'] lstPrmInt, objHdr, aryAff = load_res_prm(lstWnrPrm, lstFlsMsk=[cfg.strPathNiiMask]) # Convert list to array assert len(lstPrmInt) == 1 aryIntGssPrm = lstPrmInt[0] del(lstPrmInt) # Some voxels were excluded because they did not have sufficient mean # and/or variance - exclude their nitial parameters, too aryIntGssPrm = aryIntGssPrm[aryLgcVar, :] # ************************************************************************* # ************************************************************************* # *** Sort voxels by polar angle/previous parameters # Calculate the polar angles that were found in independent localiser aryPlrAng = np.arctan2(aryIntGssPrm[:, 1], aryIntGssPrm[:, 0]) # Calculate the unique polar angles that are expected from grid search aryUnqPlrAng = np.linspace(0.0, 2*np.pi, objNspc.varNumOpt2, endpoint=False) # Expected polar angle values are range from 0 to 2*pi, while # the calculated angle values will range from -pi to pi # Thus, bring empirical values from range -pi, pi to range 0, 2pi aryPlrAng = (aryPlrAng + 2 * np.pi) % (2 * np.pi) # For every empirically found polar angle get the index of the nearest # theoretically expected polar angle, this is to offset small imprecisions aryUnqPlrAngInd, aryDstPlrAng = find_near_pol_ang(aryPlrAng, aryUnqPlrAng) # Make sure that the maximum distance from a found polar angle to a grid # point is smaller than the distance between two neighbor grid points assert np.max(aryDstPlrAng) < np.divide(2*np.pi, objNspc.varNumOpt2) # Update unique polar angles such that it contains only the ones which # were found in data aryUnqPlrAng = aryUnqPlrAng[np.unique(aryUnqPlrAngInd)] # Update indices aryUnqPlrAngInd, aryDstPlrAng = find_near_pol_ang(aryPlrAng, aryUnqPlrAng) # Get logical arrays that index voxels with particular polar angle lstLgcUnqPlrAng = [] for indPlrAng in range(len(aryUnqPlrAng)): lstLgcUnqPlrAng.append([aryUnqPlrAngInd == indPlrAng][0]) print('---Number of radial position options provided by user: ' + str(objNspc.varNumOpt1)) print('---Number of angular position options provided by user: ' + str(objNspc.varNumOpt2)) print('---Number of unique polar angles found in prior estimates: ' + str(len(aryUnqPlrAng))) print('---Maximum displacement in radial direction that is allowed: ' + str(objNspc.varNumOpt3)) print('---Fitted modelled are restricted to stimulated area: ' + str(objNspc.lgcRstrCentre)) # ************************************************************************* # *** Perform prf fitting # Create array for collecting winner parameters aryBstXpos = np.zeros((aryPlrAng.shape[0])) aryBstYpos = np.zeros((aryPlrAng.shape[0])) aryBstSd = np.zeros((aryPlrAng.shape[0])) aryBstR2 = np.zeros((aryPlrAng.shape[0])) aryBstBts = np.zeros((aryPlrAng.shape[0], varNumFtr)) if np.greater(cfg.varNumXval, 1): aryBstR2Single = np.zeros((aryPlrAng.shape[0], len(cfg.lstPathNiiFunc))) # loop over all found instances of polar angle/previous parameters for indPlrAng in range(len(aryUnqPlrAng)): print('------Polar angle number ' + str(indPlrAng+1) + ' out of ' + str(len(aryUnqPlrAng))) # get the polar angle for the current voxel batch varPlrAng = np.array(aryUnqPlrAng[indPlrAng]) # get logical array to index voxels with this particular polar angle lgcUnqPlrAng = lstLgcUnqPlrAng[indPlrAng] # get prior eccentricities for current voxel batch vecPrrEcc = aryIntGssPrm[lgcUnqPlrAng, 3] print('---------Number of voxels of this polar angle: ' + str(np.sum(lgcUnqPlrAng))) # ********************************************************************* # ********************************************************************* # *** Create time course models for this particular polar angle # Vector with the radial position: vecRad = np.linspace(0.0, cfg.varExtXmax, objNspc.varNumOpt1, endpoint=True) # Get all possible combinations on the grid, using matrix indexing ij # of output aryRad, aryTht = np.meshgrid(vecRad, varPlrAng, indexing='ij') # Flatten arrays to be able to combine them with meshgrid vecRad = aryRad.flatten() vecTht = aryTht.flatten() # Convert from polar to cartesian vecX, vecY = map_pol_to_crt(vecTht, vecRad) # Vector with standard deviations pRF models (in degree of vis angle): vecPrfSd = np.linspace(cfg.varPrfStdMin, cfg.varPrfStdMax, cfg.varNumPrfSizes, endpoint=True) # Create model parameters varNumMdls = len(vecX) * len(vecPrfSd) aryMdlParams = np.zeros((varNumMdls, 3), dtype=np.float32) varCntMdlPrms = 0 # Loop through x-positions: for idxXY in range(0, len(vecX)): # Loop through standard deviations (of Gaussian pRF models): for idxSd in range(0, len(vecPrfSd)): # Place index and parameters in array: aryMdlParams[varCntMdlPrms, 0] = vecX[idxXY] aryMdlParams[varCntMdlPrms, 1] = vecY[idxXY] aryMdlParams[varCntMdlPrms, 2] = vecPrfSd[idxSd] # Increment parameter index: varCntMdlPrms += 1 # Convert winner parameters from degrees of visual angle to pixel vecIntX, vecIntY, vecIntSd = rmp_deg_pixel_xys(aryMdlParams[:, 0], aryMdlParams[:, 1], aryMdlParams[:, 2], cfg.tplVslSpcSze, cfg.varExtXmin, cfg.varExtXmax, cfg.varExtYmin, cfg.varExtYmax) aryMdlParamsPxl = np.column_stack((vecIntX, vecIntY, vecIntSd)) if objNspc.lgcRstrCentre: # Calculate the areas that were stimulated during the experiment arySptExpInf = np.load(cfg.strSptExpInf) arySptExpInf = np.rot90(arySptExpInf, k=3) aryStimArea = np.sum(arySptExpInf, axis=-1).astype(np.bool) # Get logical to exclude models with pRF centre outside stim area lgcMdlInc = aryStimArea[aryMdlParamsPxl[:, 0].astype(np.int32), aryMdlParamsPxl[:, 1].astype(np.int32)] # Exclude models with prf center outside stimulated area aryMdlParams = aryMdlParams[lgcMdlInc, :] aryMdlParamsPxl = aryMdlParamsPxl[lgcMdlInc, :] # Create model time courses aryPrfTc = model_creation_opt(dicCnfg, aryMdlParamsPxl, strPathHrf=strPathHrf, varRat=varRat, lgcPrint=False) # The model time courses will be preprocessed such that they are # smoothed (temporally) with same factor as the data and that they will # be z-scored: aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp, lgcPrint=False) # ********************************************************************* # *** Create logical to restrict model fitting in radial direction if objNspc.varNumOpt3 is not None: # Calculate eccentricity of currently tested model parameters vecMdlEcc = np.sqrt(np.add(np.square(aryMdlParams[:, 0]), np.square(aryMdlParams[:, 1]))) # Compare model eccentricity against prior eccentricity vecPrrEccGrd, vecMdlEccGrd = np.meshgrid(vecPrrEcc, vecMdlEcc, indexing='ij') # Consider allowed eccentricity shift as specified by user lgcRstr = np.logical_and(np.less_equal(vecMdlEccGrd, np.add(vecPrrEccGrd, objNspc.varNumOpt3)), np.greater(vecMdlEccGrd, np.subtract(vecPrrEccGrd, objNspc.varNumOpt3) ) ) else: lgcRstr = np.ones((np.sum(lgcUnqPlrAng), aryMdlParams.shape[0]), dtype=np.bool) # ********************************************************************* # *** Check for every voxel there is at least one model being tried # Is there at least 1 model for each voxel? lgcMdlPerVxl = np.greater(np.sum(lgcRstr, axis=1), 0) print('---------Number of voxels fitted: ' + str(np.sum(lgcMdlPerVxl))) # Those voxels for which no model would be tried, for example because # the pRF parameters estimated in the prior were outside the stimulated # area, are escluded from model fitting by setting their logical False lgcUnqPlrAng[lgcUnqPlrAng] = lgcMdlPerVxl # We need to update the index table for restricting model fitting lgcRstr = lgcRstr[lgcMdlPerVxl, :] # ********************************************************************* # *** Find best model for voxels with this particular polar angle # Only perform the fitting if there are voxels with models to optimize if np.any(lgcUnqPlrAng): # Empty list for results (parameters of best fitting pRF model): lstPrfRes = [None] * cfg.varPar # Empty list for processes: lstPrcs = [None] * cfg.varPar # Create a queue to put the results in: queOut = mp.Queue() # Put logical for model restriction in list lstRst = np.array_split(lgcRstr, cfg.varPar) del(lgcRstr) # Create list with chunks of func data for parallel processes: lstFunc = np.array_split(aryFunc[lgcUnqPlrAng, :], cfg.varPar) # CPU version (using numpy or cython for pRF finding): if ((cfg.strVersion == 'numpy') or (cfg.strVersion == 'cython')): # Create processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc] = mp.Process(target=find_prf_cpu, args=(idxPrc, lstFunc[idxPrc], aryPrfTc, aryMdlParams, cfg.strVersion, cfg.lgcXval, cfg.varNumXval, queOut), kwargs={'lgcRstr': lstRst[idxPrc], 'lgcPrint': False}, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # GPU version (using tensorflow for pRF finding): elif cfg.strVersion == 'gpu': # Create processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc] = mp.Process(target=find_prf_gpu, args=(idxPrc, aryMdlParams, lstFunc[idxPrc], aryPrfTc, queOut), kwargs={'lgcRstr': lstRst[idxPrc], 'lgcPrint': False}, ) # Daemon (kills processes when exiting): lstPrcs[idxPrc].Daemon = True # Start processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc].start() # Delete reference to list with function data (the data continues # to exists in child process): del(lstFunc) # Collect results from queue: for idxPrc in range(0, cfg.varPar): lstPrfRes[idxPrc] = queOut.get(True) # Join processes: for idxPrc in range(0, cfg.varPar): lstPrcs[idxPrc].join() # ***************************************************************** # ***************************************************************** # *** Prepare pRF finding results for export # Put output into correct order: lstPrfRes = sorted(lstPrfRes) # collect results from parallelization aryBstTmpXpos = joinRes(lstPrfRes, cfg.varPar, 1, inFormat='1D') aryBstTmpYpos = joinRes(lstPrfRes, cfg.varPar, 2, inFormat='1D') aryBstTmpSd = joinRes(lstPrfRes, cfg.varPar, 3, inFormat='1D') aryBstTmpR2 = joinRes(lstPrfRes, cfg.varPar, 4, inFormat='1D') aryBstTmpBts = joinRes(lstPrfRes, cfg.varPar, 5, inFormat='2D') if np.greater(cfg.varNumXval, 1): aryTmpBstR2Single = joinRes(lstPrfRes, cfg.varPar, 6, inFormat='2D') # Delete unneeded large objects: del(lstPrfRes) # ***************************************************************** # ***************************************************************** # Put findings for voxels with specific polar angle into ary with # result for all voxels aryBstXpos[lgcUnqPlrAng] = aryBstTmpXpos aryBstYpos[lgcUnqPlrAng] = aryBstTmpYpos aryBstSd[lgcUnqPlrAng] = aryBstTmpSd aryBstR2[lgcUnqPlrAng] = aryBstTmpR2 aryBstBts[lgcUnqPlrAng, :] = aryBstTmpBts if np.greater(cfg.varNumXval, 1): aryBstR2Single[lgcUnqPlrAng, :] = aryTmpBstR2Single # ***************************************************************** # ************************************************************************* # Calculate polar angle map: aryPlrAng = np.arctan2(aryBstYpos, aryBstXpos) # Calculate eccentricity map (r = sqrt( x^2 + y^2 ) ): aryEcc = np.sqrt(np.add(np.square(aryBstXpos), np.square(aryBstYpos))) # It is possible that after optimization the pRF has moved to location 0, 0 # In this cases, the polar angle parameter is arbitrary and will be # assigned either 0 or pi. To preserve smoothness of the map, assign the # initial polar angle value from independent localiser lgcMvdOrgn = np.logical_and(aryBstXpos == 0.0, aryBstYpos == 0.0) lgcMvdOrgn = np.logical_and(lgcMvdOrgn, aryBstSd > 0) aryIntPlrAng = np.arctan2(aryIntGssPrm[:, 1], aryIntGssPrm[:, 0]) aryPlrAng[lgcMvdOrgn] = np.copy(aryIntPlrAng[lgcMvdOrgn]) # ************************************************************************* # ************************************************************************* # Export each map of best parameters as a 3D nii file print('---------Exporting results') # Append 'hrf' to cfg.strPathOut, if fitting was done with custom hrf if strPathHrf is not None: cfg.strPathOut = cfg.strPathOut + '_hrf' # Xoncatenate all the best voxel maps aryBstMaps = np.stack([aryBstXpos, aryBstYpos, aryBstSd, aryBstR2, aryPlrAng, aryEcc], axis=1) # List with name suffices of output images: lstNiiNames = ['_x_pos_brute', '_y_pos_brute', '_SD_brute', '_R2_brute', '_polar_angle_brute', '_eccentricity_brute'] if varRat is not None: lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # Create full path names from nii file names and output path lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames] # export map results as seperate 3D nii files export_nii(aryBstMaps, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='3D') # ************************************************************************* # ************************************************************************* # Save beta parameter estimates for every feature: # List with name suffices of output images: lstNiiNames = ['_Betas_brute'] if varRat is not None: lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # Create full path names from nii file names and output path lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames] # export beta parameter as a single 4D nii file export_nii(aryBstBts, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='4D') # ************************************************************************* # ************************************************************************* # Save R2 maps from crossvalidation (saved for every run) as nii: if np.greater(cfg.varNumXval, 1): # truncate extremely negative R2 values aryBstR2Single[np.where(np.less_equal(aryBstR2Single, -1.0))] = -1.0 # List with name suffices of output images: lstNiiNames = ['_R2_single_brute'] if varRat is not None: lstNiiNames = [strNii + '_' + str(varRat) for strNii in lstNiiNames] # Create full path names from nii file names and output path lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in lstNiiNames] # export R2 maps as a single 4D nii file export_nii(aryBstR2Single, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='4D') # ************************************************************************* # ************************************************************************* # *** Report time varTme02 = time.time() varTme03 = varTme02 - varTme01 print('---Elapsed time: ' + str(varTme03) + ' s') print('---Done.')
Function for optimizing given pRF paramaters using brute-force grid search. Parameters ---------- strCsvCnfg : str Absolute file path of config file. objNspc : object Name space from command line arguments. lgcTest : Boolean Whether this is a test (pytest). If yes, absolute path of pyprf libary will be prepended to config file paths. strPathHrf : str or None: Path to npy file with custom hrf parameters. If None, default parameters will be used. varRat : float, default None Ratio of size suppressive surround to size of center pRF
entailment
def get_lbry_api_function_docs(url=LBRY_API_RAW_JSON_URL): """ Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API :param str url: URL to the documentation we need to obtain, pybry.constants.LBRY_API_RAW_JSON_URL by default :return: List of functions retrieved from the `url` given :rtype: list """ try: # Grab the page content docs_page = urlopen(url) # Read the contents of the actual url we grabbed and decode them into UTF-8 contents = docs_page.read().decode("utf-8") # Return the contents loaded as JSON return loads(contents) # If we get an exception, simply exit except URLError as UE: print(UE) except Exception as E: print(E) return []
Scrapes the given URL to a page in JSON format to obtain the documentation for the LBRY API :param str url: URL to the documentation we need to obtain, pybry.constants.LBRY_API_RAW_JSON_URL by default :return: List of functions retrieved from the `url` given :rtype: list
entailment
def generate_method_definition(func): """ Generates the body for the given function :param dict func: dict of a JSON-Formatted function as defined by the API docs :return: A String containing the definition for the function as it should be written in code :rtype: str """ indent = 4 # initial definition method_definition = (" " * indent) + "def " + func["name"] # Here we just create a queue and put all the parameters # into the queue in the order that they were given, params_required = [ param for param in func["arguments"] if param["is_required"] ] params_optional = [ param for param in func["arguments"] if not param["is_required"] ] # Open the parameter definitions method_definition += "(self, " for param in params_required: # Put the parameter into the queue method_definition += param["name"] method_definition += ", " for param in params_optional: method_definition += param["name"] # Default methods not required method_definition += "=None, " # Peel off the final ", " and close off the parameter definition method_definition = method_definition.rstrip(", ") + "):\n" indent += 4 # re-indent method_definition += " " * indent # Begin with description. method_definition += '"""' + func["description"] # re-indent method_definition += "\n\n" + " " * indent # Go through each parameter and insert description & type hint for param in params_required + params_optional: # Add the type method_definition += ":param " + DTYPE_MAPPING[param["type"].lower()] # Add the name method_definition += " " + param["name"] + ": " # Add the description method_definition += param["description"] # Add optionality & reindent method_definition += "\n" if param[ "is_required"] else " (Optional)\n" method_definition += " " * indent open_index = func["returns"].find('(') close_index = func["returns"].find( ')', (open_index if open_index > -1 else 0)) func["returns"] = func["returns"].replace("\t", " " * 4) return_string = func["returns"].replace("\n", "") if open_index < close_index and func["returns"][ open_index + 1:close_index] in DTYPE_MAPPING: method_definition += ":rtype: " + DTYPE_MAPPING[ func["returns"][open_index + 1:close_index]] func["returns"] = func["returns"].replace( func["returns"][open_index:close_index + 1], "") method_definition += "\n" + " " * indent method_definition += ":return: " + return_string for i in range(0, len(return_string) + 1, 80 - (indent + 2)): method_definition += return_string[i:i + ( 80 - (indent + 2))] + "\n" + " " * indent # Close it off & reindent method_definition += '"""' + "\n" + " " * indent # Create the params map params_map = "__params_map = {" # Save the indent params_indent, num_params = len( params_map), len(params_required) + len(params_optional) # Append the map to the method_definition method_definition += params_map # Go through the required parameters first for i, param in enumerate(params_required + params_optional): # append the methods to the map method_definition += "'" + param["name"] + "': " + param["name"] if not param["is_required"]: method_definition + " if " + param[ "name"] + "is not None else None" # add commas or ending bracket if needed & reindent correctly method_definition += ",\n" + " " * indent + ' ' * params_indent if i + 1 < num_params else "" method_definition += '}\n\n' + ' ' * indent method_definition += "return self.make_request(SERVER_ADDRESS, '" + func["name"] + "', " \ + params_map.rstrip(" = {") + ", timeout=self.timeout)\n\n" return method_definition
Generates the body for the given function :param dict func: dict of a JSON-Formatted function as defined by the API docs :return: A String containing the definition for the function as it should be written in code :rtype: str
entailment
def generate_lbryd_wrapper(url=LBRY_API_RAW_JSON_URL, read_file=__LBRYD_BASE_FPATH__, write_file=LBRYD_FPATH): """ Generates the actual functions for lbryd_api.py based on lbry's documentation :param str url: URL to the documentation we need to obtain, pybry.constants.LBRY_API_RAW_JSON_URL by default :param str read_file: This is the path to the file from which we will be reading :param str write_file: Path from project root to the file we'll be writing to. """ functions = get_lbry_api_function_docs(url) # Open the actual file for appending with open(write_file, 'w') as lbry_file: lbry_file.write("# This file was generated at build time using the generator function\n") lbry_file.write("# You may edit but do so with caution\n") with open(read_file, 'r') as template: header = template.read() lbry_file.write(header) # Iterate through all the functions we retrieved for func in functions: method_definition = generate_method_definition(func) # Write to file lbry_file.write(method_definition) try: from yapf.yapflib.yapf_api import FormatFile # Now we should format the file using the yapf formatter FormatFile(write_file, in_place=True) except ImportError as IE: print("[Warning]: yapf is not installed, so the generated code will not follow an easy-to-read standard") print(IE)
Generates the actual functions for lbryd_api.py based on lbry's documentation :param str url: URL to the documentation we need to obtain, pybry.constants.LBRY_API_RAW_JSON_URL by default :param str read_file: This is the path to the file from which we will be reading :param str write_file: Path from project root to the file we'll be writing to.
entailment
def load_nii(strPathIn, varSzeThr=5000.0): """ Load nii file. Parameters ---------- strPathIn : str Path to nii file to load. varSzeThr : float If the nii file is larger than this threshold (in MB), the file is loaded volume-by-volume in order to prevent memory overflow. Default threshold is 1000 MB. Returns ------- aryNii : np.array Array containing nii data. 32 bit floating point precision. objHdr : header object Header of nii file. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data. Notes ----- If the nii file is larger than the specified threshold (`varSzeThr`), the file is loaded volume-by-volume in order to prevent memory overflow. The reason for this is that nibabel imports data at float64 precision, which can lead to a memory overflow even for relatively small files. """ # Load nii file (this does not load the data into memory yet): objNii = nb.load(strPathIn) # Get size of nii file: varNiiSze = os.path.getsize(strPathIn) # Convert to MB: varNiiSze = np.divide(float(varNiiSze), 1000000.0) # Load volume-by-volume or all at once, depending on file size: if np.greater(varNiiSze, float(varSzeThr)): # Load large nii file print(('---------Large file size (' + str(np.around(varNiiSze)) + ' MB), reading volume-by-volume')) # Get image dimensions: tplSze = objNii.shape # Create empty array for nii data: aryNii = np.zeros(tplSze, dtype=np.float32) # Loop through volumes: for idxVol in range(tplSze[3]): aryNii[..., idxVol] = np.asarray( objNii.dataobj[..., idxVol]).astype(np.float32) else: # Load small nii file # Load nii file (this doesn't load the data into memory yet): objNii = nb.load(strPathIn) # Load data into array: aryNii = np.asarray(objNii.dataobj).astype(np.float32) # Get headers: objHdr = objNii.header # Get 'affine': aryAff = objNii.affine # Output nii data (as numpy array), header, and 'affine': return aryNii, objHdr, aryAff
Load nii file. Parameters ---------- strPathIn : str Path to nii file to load. varSzeThr : float If the nii file is larger than this threshold (in MB), the file is loaded volume-by-volume in order to prevent memory overflow. Default threshold is 1000 MB. Returns ------- aryNii : np.array Array containing nii data. 32 bit floating point precision. objHdr : header object Header of nii file. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data. Notes ----- If the nii file is larger than the specified threshold (`varSzeThr`), the file is loaded volume-by-volume in order to prevent memory overflow. The reason for this is that nibabel imports data at float64 precision, which can lead to a memory overflow even for relatively small files.
entailment
def load_res_prm(lstFunc, lstFlsMsk=None): """Load result parameters from multiple nii files, with optional mask. Parameters ---------- lstFunc : list, list of str with file names of 3D or 4D nii files lstFlsMsk : list, optional list of str with paths to 3D nii files that can act as mask/s Returns ------- lstPrmAry : list The list will contain as many numpy arrays as masks were provided. Each array is 2D with shape [nr voxel in mask, nr nii files in lstFunc] objHdr : header object Header of nii file. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data. """ # load parameter/functional maps into a list lstPrm = [] for ind, path in enumerate(lstFunc): aryFnc = load_nii(path)[0].astype(np.float32) if aryFnc.ndim == 3: lstPrm.append(aryFnc) # handle cases where nii array is 4D, in this case split arrays up in # 3D arrays and appenbd those elif aryFnc.ndim == 4: for indAx in range(aryFnc.shape[-1]): lstPrm.append(aryFnc[..., indAx]) # load mask/s if available if lstFlsMsk is not None: lstMsk = [None] * len(lstFlsMsk) for ind, path in enumerate(lstFlsMsk): aryMsk = load_nii(path)[0].astype(np.bool) lstMsk[ind] = aryMsk else: print('------------No masks were provided') if lstFlsMsk is None: # if no mask was provided we just flatten all parameter array in list # and return resulting list lstPrmAry = [ary.flatten() for ary in lstPrm] else: # if masks are available, we loop over masks and then over parameter # maps to extract selected voxels and parameters lstPrmAry = [None] * len(lstFlsMsk) for indLst, aryMsk in enumerate(lstMsk): # prepare array that will hold parameter values of selected voxels aryPrmSel = np.empty((np.sum(aryMsk), len(lstPrm)), dtype=np.float32) # loop over different parameter maps for indAry, aryPrm in enumerate(lstPrm): # get voxels specific to this mask aryPrmSel[:, indAry] = aryPrm[aryMsk, ...] # put array away in list, if only one parameter map was provided # the output will be squeezed lstPrmAry[indLst] = aryPrmSel # also get header object and affine array # we simply take it for the first functional nii file, cause that is the # only file that has to be provided by necessity objHdr, aryAff = load_nii(lstFunc[0])[1:] return lstPrmAry, objHdr, aryAff
Load result parameters from multiple nii files, with optional mask. Parameters ---------- lstFunc : list, list of str with file names of 3D or 4D nii files lstFlsMsk : list, optional list of str with paths to 3D nii files that can act as mask/s Returns ------- lstPrmAry : list The list will contain as many numpy arrays as masks were provided. Each array is 2D with shape [nr voxel in mask, nr nii files in lstFunc] objHdr : header object Header of nii file. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data.
entailment
def export_nii(ary2dNii, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff, hdrMsk, outFormat='3D'): """ Export nii file(s). Parameters ---------- ary2dNii : numpy array Numpy array with results to be exported to nii. lstNiiNames : list List that contains strings with the complete file names. aryLgcMsk : numpy array If the nii file is larger than this threshold (in MB), the file is loaded volume-by-volume in order to prevent memory overflow. Default threshold is 1000 MB. aryLgcVar : np.array 1D numpy array containing logical values. One value per voxel after mask has been applied. If `True`, the variance and mean of the voxel's time course are greater than the provided thresholds in all runs and the voxel is included in the output array (`aryFunc`). If `False`, the variance or mean of the voxel's time course is lower than threshold in at least one run and the voxel has been excluded from the output (`aryFunc`). This is to avoid problems in the subsequent model fitting. This array is necessary to put results into original dimensions after model fitting. tplNiiShp : tuple Tuple that describes the 3D shape of the output volume aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data. hdrMsk : nibabel-header-object Nii header of mask. outFormat : string, either '3D' or '4D' String specifying whether images will be saved as seperate 3D nii files or one 4D nii file Notes ----- [1] This function does not return any arrays but instead saves to disk. [2] Depending on whether outFormat is '3D' or '4D' images will be saved as seperate 3D nii files or one 4D nii file. """ # Number of voxels that were included in the mask: varNumVoxMsk = np.sum(aryLgcMsk) # Number of maps in ary2dNii varNumMaps = ary2dNii.shape[-1] # Place voxels based on low-variance exlusion: aryPrfRes01 = np.zeros((varNumVoxMsk, varNumMaps), dtype=np.float32) for indMap in range(varNumMaps): aryPrfRes01[aryLgcVar, indMap] = ary2dNii[:, indMap] # Total number of voxels: varNumVoxTlt = (tplNiiShp[0] * tplNiiShp[1] * tplNiiShp[2]) # Place voxels based on mask-exclusion: aryPrfRes02 = np.zeros((varNumVoxTlt, aryPrfRes01.shape[-1]), dtype=np.float32) for indDim in range(aryPrfRes01.shape[-1]): aryPrfRes02[aryLgcMsk, indDim] = aryPrfRes01[:, indDim] # Reshape pRF finding results into original image dimensions: aryPrfRes = np.reshape(aryPrfRes02, [tplNiiShp[0], tplNiiShp[1], tplNiiShp[2], aryPrfRes01.shape[-1]]) if outFormat == '3D': # Save nii results: for idxOut in range(0, aryPrfRes.shape[-1]): # Create nii object for results: niiOut = nb.Nifti1Image(aryPrfRes[..., idxOut], aryAff, header=hdrMsk ) # Save nii: strTmp = lstNiiNames[idxOut] nb.save(niiOut, strTmp) elif outFormat == '4D': # adjust header hdrMsk.set_data_shape(aryPrfRes.shape) # Create nii object for results: niiOut = nb.Nifti1Image(aryPrfRes, aryAff, header=hdrMsk ) # Save nii: strTmp = lstNiiNames[0] nb.save(niiOut, strTmp)
Export nii file(s). Parameters ---------- ary2dNii : numpy array Numpy array with results to be exported to nii. lstNiiNames : list List that contains strings with the complete file names. aryLgcMsk : numpy array If the nii file is larger than this threshold (in MB), the file is loaded volume-by-volume in order to prevent memory overflow. Default threshold is 1000 MB. aryLgcVar : np.array 1D numpy array containing logical values. One value per voxel after mask has been applied. If `True`, the variance and mean of the voxel's time course are greater than the provided thresholds in all runs and the voxel is included in the output array (`aryFunc`). If `False`, the variance or mean of the voxel's time course is lower than threshold in at least one run and the voxel has been excluded from the output (`aryFunc`). This is to avoid problems in the subsequent model fitting. This array is necessary to put results into original dimensions after model fitting. tplNiiShp : tuple Tuple that describes the 3D shape of the output volume aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of nii data. hdrMsk : nibabel-header-object Nii header of mask. outFormat : string, either '3D' or '4D' String specifying whether images will be saved as seperate 3D nii files or one 4D nii file Notes ----- [1] This function does not return any arrays but instead saves to disk. [2] Depending on whether outFormat is '3D' or '4D' images will be saved as seperate 3D nii files or one 4D nii file.
entailment
def joinRes(lstPrfRes, varPar, idxPos, inFormat='1D'): """Join results from different processing units (here cores). Parameters ---------- lstPrfRes : list Output of results from parallelization. varPar : integer, positive Number of cores that were used during parallelization idxPos : integer, positive List position index that we expect the results to be collected to have. inFormat : string Specifies whether input will be 1d or 2d. Returns ------- aryOut : numpy array Numpy array with results collected from different cores """ if inFormat == '1D': # initialize output array aryOut = np.zeros((0,)) # gather arrays from different processing units for idxRes in range(0, varPar): aryOut = np.append(aryOut, lstPrfRes[idxRes][idxPos]) elif inFormat == '2D': # initialize output array aryOut = np.zeros((0, lstPrfRes[0][idxPos].shape[-1])) # gather arrays from different processing units for idxRes in range(0, varPar): aryOut = np.concatenate((aryOut, lstPrfRes[idxRes][idxPos]), axis=0) return aryOut
Join results from different processing units (here cores). Parameters ---------- lstPrfRes : list Output of results from parallelization. varPar : integer, positive Number of cores that were used during parallelization idxPos : integer, positive List position index that we expect the results to be collected to have. inFormat : string Specifies whether input will be 1d or 2d. Returns ------- aryOut : numpy array Numpy array with results collected from different cores
entailment
def cmp_res_R2(lstRat, lstNiiNames, strPathOut, strPathMdl, lgcSveMdlTc=True, lgcDel=False, strNmeExt=''): """"Compare results for different exponents and create winner nii. Parameters ---------- lstRat : list List of floats containing the ratios that were tested for surround suppression. lstNiiNames : list List of names of the different pRF maps (e.g. xpos, ypos, SD) strPathOut : string Path to the parent directory where the results should be saved. strPathMdl : string Path to the parent directory where pRF models should be saved. lgcDel : boolean Should model time courses be saved as npy file? lgcDel : boolean Should inbetween results (in form of nii files) be deleted? strNmeExt : string Extra name appendix to denominate experiment name. If undesidered, provide empty string. Notes ----- [1] This function does not return any arrays but instead saves to disk. """ print('---Compare results for different ratios') # Extract the index position for R2 and Betas map in lstNiiNames indPosR2 = [ind for ind, item in enumerate(lstNiiNames) if 'R2' in item] indPosBetas = [ind for ind, item in enumerate(lstNiiNames) if 'Betas' in item] # Check that only one index was found msgError = 'More than one nii file was provided that could serve as R2 map' assert len(indPosR2) == 1, msgError assert len(indPosBetas) == 1, msgError # turn list int index indPosR2 = indPosR2[0] indPosBetas = indPosBetas[0] # Get the names of the nii files with in-between results lstCmpRes = [] for indRat in range(len(lstRat)): # Get strExpSve strExpSve = '_' + str(lstRat[indRat]) # If ratio is marked with 1.0, set empty string to find results. # 1.0 is the key for fitting without a surround. if lstRat[indRat] == 1.0: strExpSve = '' # Create full path names from nii file names and output path lstPthNames = [strPathOut + strNii + strNmeExt + strExpSve + '.nii.gz' for strNii in lstNiiNames] # Append list to list that contains nii names for all exponents lstCmpRes.append(lstPthNames) print('------Find ratio that yielded highest R2 per voxel') # Initialize winner R2 map with R2 values from fit without surround aryWnrR2 = load_nii(lstCmpRes[0][indPosR2])[0] # Initialize ratio map with 1 where no-surround model was fit, otherwise 0 aryRatMap = np.zeros(aryWnrR2.shape) aryRatMap[np.nonzero(aryWnrR2)] = 1.0 # Loop over R2 maps to establish which exponents wins # Skip the first ratio, since this is the reference ratio (no surround) # and is reflected already in the initialized arrays - aryWnrR2 & aryRatMap for indRat, lstMaps in zip(lstRat[1:], lstCmpRes[1:]): # Load R2 map for this particular exponent aryTmpR2 = load_nii(lstMaps[indPosR2])[0] # Load beta values for this particular exponent aryTmpBetas = load_nii(lstMaps[indPosBetas])[0] # Get logical that tells us where current R2 map is greater than # previous ones aryLgcWnr = np.greater(aryTmpR2, aryWnrR2) # Get logical that tells us where the beta parameter estimate for the # centre is positive and the estimate for the surround is negative aryLgcCtrSur1 = np.logical_and(np.greater(aryTmpBetas[..., 0], 0.0), np.less(aryTmpBetas[..., 1], 0.0)) # Get logical that tells us where the absolute beta parameter estimate # for the surround is less than beta parameter estimate for the center aryLgcCtrSur2 = np.less(np.abs(aryTmpBetas[..., 1]), np.abs(aryTmpBetas[..., 0])) # Combine the two logicals aryLgcCtrSur = np.logical_and(aryLgcCtrSur1, aryLgcCtrSur2) # Combine logical for winner R2 and center-surround conditions aryLgcWnr = np.logical_and(aryLgcWnr, aryLgcCtrSur) # Replace values of R2, where current R2 map was greater aryWnrR2[aryLgcWnr] = np.copy(aryTmpR2[aryLgcWnr]) # Remember the index of the exponent that gave rise to this new R2 aryRatMap[aryLgcWnr] = indRat # Initialize list with winner maps. The winner maps are initialized with # the same shape as the maps that the last tested ratio maps had. lstRatMap = [] for strPthMaps in lstCmpRes[-1]: lstRatMap.append(np.zeros(nb.load(strPthMaps).shape)) # Compose other maps by assigning map value from the map that resulted from # the exponent that won for particular voxel for indRat, lstMaps in zip(lstRat, lstCmpRes): # Find out where this exponent won in terms of R2 lgcWinnerMap = [aryRatMap == indRat][0] # Loop over all the maps for indMap, _ in enumerate(lstMaps): # Load map for this particular ratio aryTmpMap = load_nii(lstMaps[indMap])[0] # Handle exception: beta map will be 1D, if from ratio 1.0 # In this case we want to make it 2D. In particular, the second # set of beta weights should be all zeros, so that later when # forming the model time course, the 2nd predictors contributes 0 if indRat == 1.0 and indMap == indPosBetas: aryTmpMap = np.concatenate((aryTmpMap, np.zeros(aryTmpMap.shape)), axis=-1) # Load current winner map from array aryCrrWnrMap = np.copy(lstRatMap[indMap]) # Assign values in temporary map to current winner map for voxels # where this ratio won aryCrrWnrMap[lgcWinnerMap] = np.copy(aryTmpMap[lgcWinnerMap]) lstRatMap[indMap] = aryCrrWnrMap print('------Export results as nii') # Save winner maps as nii files # Get header and affine array hdrMsk, aryAff = load_nii(lstMaps[indPosR2])[1:] # Loop over all the maps for indMap, aryMap in enumerate(lstRatMap): # Create nii object for results: niiOut = nb.Nifti1Image(aryMap, aryAff, header=hdrMsk ) # Save nii: strTmp = strPathOut + '_supsur' + lstNiiNames[indMap] + strNmeExt + \ '.nii.gz' nb.save(niiOut, strTmp) # Save map with best ratios as nii niiOut = nb.Nifti1Image(aryRatMap, aryAff, header=hdrMsk ) # Save nii: strTmp = strPathOut + '_supsur' + '_Ratios' + strNmeExt + '.nii.gz' nb.save(niiOut, strTmp) if lgcSveMdlTc: print('------Save model time courses/parameters/responses for ' + 'centre and surround, across all ratios') # Get the names of the npy files with inbetween model responses lstCmpMdlRsp = [] for indRat in range(len(lstRat)): # Get strExpSve strExpSve = '_' + str(lstRat[indRat]) # If ratio is marked with 0, set empty string to find results. # This is the code for fitting without a surround. if lstRat[indRat] == 1.0: strExpSve = '' # Create full path names from npy file names and output path lstPthNames = [strPathMdl + strNpy + strNmeExt + strExpSve + '.npy' for strNpy in ['', '_params', '_mdlRsp']] # Append list to list that contains nii names for all exponents lstCmpMdlRsp.append(lstPthNames) # Load tc/parameters/responses for different ratios, for now skip "0.0" # ratio because its tc/parameters/responses differs in shape lstPrfTcSur = [] lstMdlParamsSur = [] lstMdlRspSur = [] for indNpy, lstNpy in enumerate(lstCmpMdlRsp[1:]): lstPrfTcSur.append(np.load(lstNpy[0])) lstMdlParamsSur.append(np.load(lstNpy[1])) lstMdlRspSur.append(np.load(lstNpy[2])) # Turn into arrays aryPrfTcSur = np.stack(lstPrfTcSur, axis=2) aryMdlParamsSur = np.stack(lstMdlParamsSur, axis=2) aryMdlRspSur = np.stack(lstMdlRspSur, axis=2) # Now handle the "1.0" ratio # Load the tc/parameters/responses of the "1.0" ratio aryPrfTc = np.load(lstCmpMdlRsp[0][0]) aryMdlParams = np.load(lstCmpMdlRsp[0][1]) aryMdlRsp = np.load(lstCmpMdlRsp[0][2]) # Make 2nd row of time courses all zeros so they get no weight in lstsq aryPrfTc = np.concatenate((aryPrfTc, np.zeros(aryPrfTc.shape)), axis=1) # Make 2nd row of parameters the same as first row aryMdlParams = np.stack((aryMdlParams, aryMdlParams), axis=1) # Make 2nd row of responses all zeros so they get no weight in lstsq aryMdlRsp = np.stack((aryMdlRsp, np.zeros(aryMdlRsp.shape)), axis=1) # Add the "1.0" ratio to tc/parameters/responses of other ratios aryPrfTcSur = np.concatenate((np.expand_dims(aryPrfTc, axis=2), aryPrfTcSur), axis=2) aryMdlParamsSur = np.concatenate((np.expand_dims(aryMdlParams, axis=2), aryMdlParamsSur), axis=2) aryMdlRspSur = np.concatenate((np.expand_dims(aryMdlRsp, axis=2), aryMdlRspSur), axis=2) # Save parameters/response for centre and surround, for all ratios np.save(strPathMdl + '_supsur' + '', aryPrfTcSur) np.save(strPathMdl + '_supsur' + '_params', aryMdlParamsSur) np.save(strPathMdl + '_supsur' + '_mdlRsp', aryMdlRspSur) # Delete all the inbetween results, if desired by user, skip "0.0" ratio if lgcDel: lstCmpRes = [item for sublist in lstCmpRes[1:] for item in sublist] print('------Delete in-between results') for strMap in lstCmpRes[:]: os.remove(strMap) if lgcSveMdlTc: lstCmpMdlRsp = [item for sublist in lstCmpMdlRsp[1:] for item in sublist] for strMap in lstCmpMdlRsp[:]: os.remove(strMap)
Compare results for different exponents and create winner nii. Parameters ---------- lstRat : list List of floats containing the ratios that were tested for surround suppression. lstNiiNames : list List of names of the different pRF maps (e.g. xpos, ypos, SD) strPathOut : string Path to the parent directory where the results should be saved. strPathMdl : string Path to the parent directory where pRF models should be saved. lgcDel : boolean Should model time courses be saved as npy file? lgcDel : boolean Should inbetween results (in form of nii files) be deleted? strNmeExt : string Extra name appendix to denominate experiment name. If undesidered, provide empty string. Notes ----- [1] This function does not return any arrays but instead saves to disk.
entailment
def map_crt_to_pol(aryXCrds, aryYrds): """Remap coordinates from cartesian to polar Parameters ---------- aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values. Returns ------- aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates. """ aryRad = np.sqrt(aryXCrds**2+aryYrds**2) aryTht = np.arctan2(aryYrds, aryXCrds) return aryTht, aryRad
Remap coordinates from cartesian to polar Parameters ---------- aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values. Returns ------- aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates.
entailment
def map_pol_to_crt(aryTht, aryRad): """Remap coordinates from polar to cartesian Parameters ---------- aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates. Returns ------- aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values. """ aryXCrds = aryRad * np.cos(aryTht) aryYrds = aryRad * np.sin(aryTht) return aryXCrds, aryYrds
Remap coordinates from polar to cartesian Parameters ---------- aryTht : 1D numpy array Angle of coordinates aryRad : 1D numpy array Radius of coordinates. Returns ------- aryXCrds : 1D numpy array Array with x coordinate values. aryYrds : 1D numpy array Array with y coordinate values.
entailment
def find_near_pol_ang(aryEmpPlrAng, aryExpPlrAng): """Return index of nearest expected polar angle. Parameters ---------- aryEmpPlrAng : 1D numpy array Empirically found polar angle estimates aryExpPlrAng : 1D numpy array Theoretically expected polar angle estimates Returns ------- aryXCrds : 1D numpy array Indices of nearest theoretically expected polar angle. aryYrds : 1D numpy array Distances to nearest theoretically expected polar angle. """ dist = np.abs(np.subtract(aryEmpPlrAng[:, None], aryExpPlrAng[None, :])) return np.argmin(dist, axis=-1), np.min(dist, axis=-1)
Return index of nearest expected polar angle. Parameters ---------- aryEmpPlrAng : 1D numpy array Empirically found polar angle estimates aryExpPlrAng : 1D numpy array Theoretically expected polar angle estimates Returns ------- aryXCrds : 1D numpy array Indices of nearest theoretically expected polar angle. aryYrds : 1D numpy array Distances to nearest theoretically expected polar angle.
entailment
def rmp_rng(aryVls, varNewMin, varNewMax, varOldThrMin=None, varOldAbsMax=None): """Remap values in an array from one range to another. Parameters ---------- aryVls : 1D numpy array Array with values that need to be remapped. varNewMin : float Desired minimum value of new, remapped array. varNewMax : float Desired maximum value of new, remapped array. varOldThrMin : float Theoretical minimum of old distribution. Can be specified if this theoretical minimum does not occur in empirical distribution but should be considered nontheless. varOldThrMin : float Theoretical maximum of old distribution. Can be specified if this theoretical maximum does not occur in empirical distribution but should be considered nontheless. Returns ------- aryVls : 1D numpy array Array with remapped values. """ if varOldThrMin is None: varOldMin = aryVls.min() else: varOldMin = varOldThrMin if varOldAbsMax is None: varOldMax = aryVls.max() else: varOldMax = varOldAbsMax aryNewVls = np.empty((aryVls.shape), dtype=aryVls.dtype) for ind, val in enumerate(aryVls): aryNewVls[ind] = (((val - varOldMin) * (varNewMax - varNewMin)) / (varOldMax - varOldMin)) + varNewMin return aryNewVls
Remap values in an array from one range to another. Parameters ---------- aryVls : 1D numpy array Array with values that need to be remapped. varNewMin : float Desired minimum value of new, remapped array. varNewMax : float Desired maximum value of new, remapped array. varOldThrMin : float Theoretical minimum of old distribution. Can be specified if this theoretical minimum does not occur in empirical distribution but should be considered nontheless. varOldThrMin : float Theoretical maximum of old distribution. Can be specified if this theoretical maximum does not occur in empirical distribution but should be considered nontheless. Returns ------- aryVls : 1D numpy array Array with remapped values.
entailment
def rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize, varExtXmin, varExtXmax, varExtYmin, varExtYmax): """Remap x, y, sigma parameters from degrees to pixel. Parameters ---------- vecX : 1D numpy array Array with possible x parametrs in degree vecY : 1D numpy array Array with possible y parametrs in degree vecPrfSd : 1D numpy array Array with possible sd parametrs in degree tplPngSize : tuple, 2 Pixel dimensions of the visual space in pixel (width, height). varExtXmin : float Extent of visual space from centre in negative x-direction (width) varExtXmax : float Extent of visual space from centre in positive x-direction (width) varExtYmin : float Extent of visual space from centre in negative y-direction (height) varExtYmax : float Extent of visual space from centre in positive y-direction (height) Returns ------- vecX : 1D numpy array Array with possible x parametrs in pixel vecY : 1D numpy array Array with possible y parametrs in pixel vecPrfSd : 1D numpy array Array with possible sd parametrs in pixel """ # Remap modelled x-positions of the pRFs: vecXpxl = rmp_rng(vecX, 0.0, (tplPngSize[0] - 1), varOldThrMin=varExtXmin, varOldAbsMax=varExtXmax) # Remap modelled y-positions of the pRFs: vecYpxl = rmp_rng(vecY, 0.0, (tplPngSize[1] - 1), varOldThrMin=varExtYmin, varOldAbsMax=varExtYmax) # We calculate the scaling factor from degrees of visual angle to # pixels separately for the x- and the y-directions (the two should # be the same). varDgr2PixX = np.divide(tplPngSize[0], (varExtXmax - varExtXmin)) varDgr2PixY = np.divide(tplPngSize[1], (varExtYmax - varExtYmin)) # Check whether varDgr2PixX and varDgr2PixY are similar: strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \ 'stimulus space (in degrees of visual angle) and the ' + \ 'ratio of X and Y dimensions in the upsampled visual space' + \ 'do not agree' assert 0.5 > np.absolute((varDgr2PixX - varDgr2PixY)), strErrMsg # Convert prf sizes from degrees of visual angles to pixel vecPrfSdpxl = np.multiply(vecPrfSd, varDgr2PixX) # Return new values in column stack. # Since values are now in pixel, they should be integer return np.column_stack((vecXpxl, vecYpxl, vecPrfSdpxl)).astype(np.int32)
Remap x, y, sigma parameters from degrees to pixel. Parameters ---------- vecX : 1D numpy array Array with possible x parametrs in degree vecY : 1D numpy array Array with possible y parametrs in degree vecPrfSd : 1D numpy array Array with possible sd parametrs in degree tplPngSize : tuple, 2 Pixel dimensions of the visual space in pixel (width, height). varExtXmin : float Extent of visual space from centre in negative x-direction (width) varExtXmax : float Extent of visual space from centre in positive x-direction (width) varExtYmin : float Extent of visual space from centre in negative y-direction (height) varExtYmax : float Extent of visual space from centre in positive y-direction (height) Returns ------- vecX : 1D numpy array Array with possible x parametrs in pixel vecY : 1D numpy array Array with possible y parametrs in pixel vecPrfSd : 1D numpy array Array with possible sd parametrs in pixel
entailment
def cnvl_2D_gauss(idxPrc, aryMdlParamsChnk, arySptExpInf, tplPngSize, queOut, strCrd='crt'): """Spatially convolve input with 2D Gaussian model. Parameters ---------- idxPrc : int Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0 (just one thread on CPU). aryMdlParamsChnk : 2d numpy array, shape [n_models, n_model_params] Array with the model parameter combinations for this chunk. arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions] All spatial conditions stacked along second axis. tplPngSize : tuple, 2. Pixel dimensions of the visual space (width, height). queOut : multiprocessing.queues.Queue Queue to put the results on. If this is None, the user is not running multiprocessing but is just calling the function strCrd, string, either 'crt' or 'pol' Whether model parameters are provided in cartesian or polar coordinates Returns ------- data : 2d numpy array, shape [n_models, n_conditions] Closed data. Reference --------- [1] """ # Number of combinations of model parameters in the current chunk: varChnkSze = aryMdlParamsChnk.shape[0] # Number of conditions / time points of the input data varNumLstAx = arySptExpInf.shape[-1] # Output array with results of convolution: aryOut = np.zeros((varChnkSze, varNumLstAx)) # Loop through combinations of model parameters: for idxMdl in range(0, varChnkSze): # Spatial parameters of current model: if strCrd == 'pol': # Position was given in polar coordinates varTmpEcc = aryMdlParamsChnk[idxMdl, 0] varTmpPlrAng = aryMdlParamsChnk[idxMdl, 1] # Convert from polar to to cartesian coordinates varTmpX = varTmpEcc * np.cos(varTmpPlrAng) + tplPngSize[0]/2. varTmpY = varTmpEcc * np.sin(varTmpPlrAng) + tplPngSize[1]/2. elif strCrd == 'crt': varTmpX = aryMdlParamsChnk[idxMdl, 0] varTmpY = aryMdlParamsChnk[idxMdl, 1] # Standard deviation does not depend on coordinate system varTmpSd = aryMdlParamsChnk[idxMdl, 2] # Create pRF model (2D): aryGauss = crt_2D_gauss(tplPngSize[0], tplPngSize[1], varTmpX, varTmpY, varTmpSd) # Multiply pixel-time courses with Gaussian pRF models: aryCndTcTmp = np.multiply(arySptExpInf, aryGauss[:, :, None]) # Calculate sum across x- and y-dimensions - the 'area under the # Gaussian surface'. aryCndTcTmp = np.sum(aryCndTcTmp, axis=(0, 1)) # Put model time courses into function's output with 2d Gaussian # arrray: aryOut[idxMdl, :] = aryCndTcTmp if queOut is None: # if user is not using multiprocessing, return the array directly return aryOut else: # Put column with the indices of model-parameter-combinations into the # output array (in order to be able to put the pRF model time courses # into the correct order after the parallelised function): lstOut = [idxPrc, aryOut] # Put output to queue: queOut.put(lstOut)
Spatially convolve input with 2D Gaussian model. Parameters ---------- idxPrc : int Process ID of the process calling this function (for CPU multi-threading). In GPU version, this parameter is 0 (just one thread on CPU). aryMdlParamsChnk : 2d numpy array, shape [n_models, n_model_params] Array with the model parameter combinations for this chunk. arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions] All spatial conditions stacked along second axis. tplPngSize : tuple, 2. Pixel dimensions of the visual space (width, height). queOut : multiprocessing.queues.Queue Queue to put the results on. If this is None, the user is not running multiprocessing but is just calling the function strCrd, string, either 'crt' or 'pol' Whether model parameters are provided in cartesian or polar coordinates Returns ------- data : 2d numpy array, shape [n_models, n_conditions] Closed data. Reference --------- [1]
entailment
def process(self, checksum, revision=None): """ Process a new revision and detect a revert if it occurred. Note that you can pass whatever you like as `revision` and it will be returned in the case that a revert occurs. :Parameters: checksum : str Any identity-machable string-based hash of revision content revision : `mixed` Revision metadata. Note that any data will just be returned in the case of a revert. :Returns: a :class:`~mwreverts.Revert` if one occured or `None` """ revert = None if checksum in self: # potential revert reverteds = list(self.up_to(checksum)) if len(reverteds) > 0: # If no reverted revisions, this is a noop revert = Revert(revision, reverteds, self[checksum]) self.insert(checksum, revision) return revert
Process a new revision and detect a revert if it occurred. Note that you can pass whatever you like as `revision` and it will be returned in the case that a revert occurs. :Parameters: checksum : str Any identity-machable string-based hash of revision content revision : `mixed` Revision metadata. Note that any data will just be returned in the case of a revert. :Returns: a :class:`~mwreverts.Revert` if one occured or `None`
entailment
def main(): """pyprf_feature entry point.""" # %% Print Welcome message strWelcome = 'pyprf_feature ' + __version__ strDec = '=' * len(strWelcome) print(strDec + '\n' + strWelcome + '\n' + strDec) # %% Get list of input arguments # Create parser object: objParser = argparse.ArgumentParser() # Add argument to namespace - config file path: objParser.add_argument('-config', metavar='config.csv', help='Absolute file path of config file with \ parameters for pRF analysis. Ignored if in \ testing mode.' ) # Add argument to namespace -mdl_rsp flag: objParser.add_argument('-strPathHrf', default=None, required=False, metavar='/path/to/custom_hrf_parameter.npy', help='Path to npy file with custom hrf parameters. \ Ignored if in testing mode.') objParser.add_argument('-supsur', nargs='+', help='List of floats that represent the ratio of \ size neg surround to size pos center.', type=float, default=None) # Add argument to namespace -save_tc flag: objParser.add_argument('-save_tc', dest='save_tc', action='store_true', default=False, help='Save fitted and empirical time courses to \ nifti file. Ignored if in testing mode.') # Add argument to namespace -mdl_rsp flag: objParser.add_argument('-mdl_rsp', dest='lgcMdlRsp', action='store_true', default=False, help='When saving fitted and empirical time \ courses, should fitted aperture responses be \ saved as well? Ignored if in testing mode.') # Namespace object containign arguments and values: objNspc = objParser.parse_args() # Get path of config file from argument parser: strCsvCnfg = objNspc.config # %% Decide which action to perform # If no config argument is provided, print info to user. if strCsvCnfg is None: print('Please provide the file path to a config file, e.g.:') print(' pyprf_feature -config /path/to/my_config_file.csv') # If config file is provided, either perform fitting or recreate fitted # and empirical time courses depending on whether save_tc is True or False else: # Signal non-test mode to lower functions (needed for pytest): lgcTest = False # If save_tc true, save fitted and empirical time courses to nifti file # This assumes that fitting has already been run and will throw an # error if the resulting nii files of the fitting cannot be found. if objNspc.save_tc: print('***Mode: Save fitted and empirical time courses***') if objNspc.lgcMdlRsp: print(' ***Also save fitted aperture responses***') # Call to function save_tc_to_nii(strCsvCnfg, lgcTest=lgcTest, lstRat=objNspc.supsur, lgcMdlRsp=objNspc.lgcMdlRsp, strPathHrf=objNspc.strPathHrf) # If save_tc false, perform pRF fitting, either with or without # suppressive surround else: # Perform pRF fitting without suppressive surround if objNspc.supsur is None: print('***Mode: Fit pRF models, no suppressive surround***') # Call to main function, to invoke pRF fitting: pyprf(strCsvCnfg, lgcTest, varRat=None, strPathHrf=objNspc.strPathHrf) # Perform pRF fitting with suppressive surround else: print('***Mode: Fit pRF models, suppressive surround***') # Load config parameters from csv file into dictionary: dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest, lgcPrint=False) # Load config parameters from dictionary into namespace. # We do this on every loop so we have a fresh start in case # variables are redefined during the prf analysis cfg = cls_set_config(dicCnfg) # Make sure that lgcCrteMdl is set to True since we will need # to loop iteratively over pyprf_feature with different ratios # for size surround to size center. On every loop models, # reflecting the new ratio, need to be created from scratch errorMsg = 'lgcCrteMdl needs to be set to True for -supsur.' assert cfg.lgcCrteMdl, errorMsg # Make sure that switchHrf is set to 1. It would not make sense # to find the negative surround for the hrf deriavtive function errorMsg = 'switchHrfSet needs to be set to 1 for -supsur.' assert cfg.switchHrfSet == 1, errorMsg # Get list with size ratios lstRat = objNspc.supsur # Make sure that all ratios are larger than 1.0 errorMsg = 'All provided ratios need to be larger than 1.0' assert np.all(np.greater(np.array(lstRat), 1.0)), errorMsg # Append None as the first entry, so fitting without surround # is performed once as well lstRat.insert(0, None) # Loop over ratios and find best pRF for varRat in lstRat: # Print to command line, so the user knows which exponent # is used print('---Ratio surround to center: ' + str(varRat)) # Call to main function, to invoke pRF analysis: pyprf(strCsvCnfg, lgcTest=lgcTest, varRat=varRat, strPathHrf=objNspc.strPathHrf) # List with name suffices of output images: lstNiiNames = ['_x_pos', '_y_pos', '_SD', '_R2', '_polar_angle', '_eccentricity', '_Betas'] # Compare results for the different ratios, export nii files # based on the results of the comparison and delete in-between # results # Replace first entry (None) with 1, so it can be saved to nii lstRat[0] = 1.0 # Append 'hrf' to cfg.strPathOut, if fitting was done with # custom hrf if objNspc.strPathHrf is not None: cfg.strPathOut = cfg.strPathOut + '_hrf' cmp_res_R2(lstRat, lstNiiNames, cfg.strPathOut, cfg.strPathMdl, lgcDel=True)
pyprf_feature entry point.
entailment
def funcSmthSpt(aryFuncChnk, varSdSmthSpt): """Apply spatial smoothing to the input data. Parameters ---------- aryFuncChnk : np.array TODO varSdSmthSpt : float (?) Extent of smoothing. Returns ------- aryFuncChnk : np.array Smoothed data. """ varNdim = aryFuncChnk.ndim # Number of time points in this chunk: varNumVol = aryFuncChnk.shape[-1] # Loop through volumes: if varNdim == 4: for idxVol in range(0, varNumVol): aryFuncChnk[:, :, :, idxVol] = gaussian_filter( aryFuncChnk[:, :, :, idxVol], varSdSmthSpt, order=0, mode='nearest', truncate=4.0) elif varNdim == 5: varNumMtnDrctns = aryFuncChnk.shape[3] for idxVol in range(0, varNumVol): for idxMtn in range(0, varNumMtnDrctns): aryFuncChnk[:, :, :, idxMtn, idxVol] = gaussian_filter( aryFuncChnk[:, :, :, idxMtn, idxVol], varSdSmthSpt, order=0, mode='nearest', truncate=4.0) # Output list: return aryFuncChnk
Apply spatial smoothing to the input data. Parameters ---------- aryFuncChnk : np.array TODO varSdSmthSpt : float (?) Extent of smoothing. Returns ------- aryFuncChnk : np.array Smoothed data.
entailment
def funcSmthTmp(aryFuncChnk, varSdSmthTmp): """Apply temporal smoothing to fMRI data & pRF time course models. Parameters ---------- aryFuncChnk : np.array TODO varSdSmthTmp : float (?) extend of smoothing Returns ------- aryFuncChnk : np.array TODO """ # For the filtering to perform well at the ends of the time series, we # set the method to 'nearest' and place a volume with mean intensity # (over time) at the beginning and at the end. aryFuncChnkMean = np.mean(aryFuncChnk, axis=1, keepdims=True) aryFuncChnk = np.concatenate((aryFuncChnkMean, aryFuncChnk, aryFuncChnkMean), axis=1) # In the input data, time goes from left to right. Therefore, we apply # the filter along axis=1. aryFuncChnk = gaussian_filter1d(aryFuncChnk, varSdSmthTmp, axis=1, order=0, mode='nearest', truncate=4.0) # Remove mean-intensity volumes at the beginning and at the end: aryFuncChnk = aryFuncChnk[:, 1:-1] # Output list: return aryFuncChnk
Apply temporal smoothing to fMRI data & pRF time course models. Parameters ---------- aryFuncChnk : np.array TODO varSdSmthTmp : float (?) extend of smoothing Returns ------- aryFuncChnk : np.array TODO
entailment
def prep_models(aryPrfTc, varSdSmthTmp=2.0, lgcPrint=True): """ Prepare pRF model time courses. Parameters ---------- aryPrfTc : np.array 4D numpy array with pRF time course models, with following dimensions: `aryPrfTc[x-position, y-position, SD, volume]`. varSdSmthTmp : float Extent of temporal smoothing that is applied to functional data and pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`, no temporal smoothing is applied. lgcPrint : boolean Whether print statements should be executed. Returns ------- aryPrfTc : np.array 4D numpy array with prepared pRF time course models, same dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`). """ if lgcPrint: print('------Prepare pRF time course models') # Define temporal smoothing of pRF time course models def funcSmthTmp(aryPrfTc, varSdSmthTmp, lgcPrint=True): """Apply temporal smoothing to fMRI data & pRF time course models. Parameters ---------- aryPrfTc : np.array 4D numpy array with pRF time course models, with following dimensions: `aryPrfTc[x-position, y-position, SD, volume]`. varSdSmthTmp : float, positive Extent of temporal smoothing that is applied to functional data and pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`, no temporal smoothing is applied. lgcPrint : boolean Whether print statements should be executed. Returns ------- aryPrfTc : np.array 4D numpy array with prepared pRF time course models, same dimension as input (`aryPrfTc[x-position, y-position, SD, volume]`). """ # adjust the input, if necessary, such that input is 2D, with last # dim time tplInpShp = aryPrfTc.shape aryPrfTc = aryPrfTc.reshape((-1, aryPrfTc.shape[-1])) # For the filtering to perform well at the ends of the time series, we # set the method to 'nearest' and place a volume with mean intensity # (over time) at the beginning and at the end. aryPrfTcMean = np.mean(aryPrfTc, axis=-1, keepdims=True).reshape(-1, 1) aryPrfTc = np.concatenate((aryPrfTcMean, aryPrfTc, aryPrfTcMean), axis=-1) # In the input data, time goes from left to right. Therefore, we apply # the filter along axis=1. aryPrfTc = gaussian_filter1d(aryPrfTc.astype('float32'), varSdSmthTmp, axis=-1, order=0, mode='nearest', truncate=4.0) # Remove mean-intensity volumes at the beginning and at the end: aryPrfTc = aryPrfTc[..., 1:-1] # Output array: return aryPrfTc.reshape(tplInpShp).astype('float16') # Perform temporal smoothing of pRF time course models if 0.0 < varSdSmthTmp: if lgcPrint: print('---------Temporal smoothing on pRF time course models') print('------------SD tmp smooth is: ' + str(varSdSmthTmp)) aryPrfTc = funcSmthTmp(aryPrfTc, varSdSmthTmp) # Z-score the prf time course models if lgcPrint: print('---------Zscore the pRF time course models') # De-mean the prf time course models: aryPrfTc = np.subtract(aryPrfTc, np.mean(aryPrfTc, axis=-1)[..., None]) # Standardize the prf time course models: # In order to avoid devision by zero, only divide those voxels with a # standard deviation greater than zero: aryTmpStd = np.std(aryPrfTc, axis=-1) aryTmpLgc = np.greater(aryTmpStd, np.array([0.0])) aryPrfTc[aryTmpLgc, :] = np.divide(aryPrfTc[aryTmpLgc, :], aryTmpStd[aryTmpLgc, None]) return aryPrfTc
Prepare pRF model time courses. Parameters ---------- aryPrfTc : np.array 4D numpy array with pRF time course models, with following dimensions: `aryPrfTc[x-position, y-position, SD, volume]`. varSdSmthTmp : float Extent of temporal smoothing that is applied to functional data and pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`, no temporal smoothing is applied. lgcPrint : boolean Whether print statements should be executed. Returns ------- aryPrfTc : np.array 4D numpy array with prepared pRF time course models, same dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`).
entailment
def prep_func(strPathNiiMask, lstPathNiiFunc, varAvgThr=100., varVarThr=0.0001, strPrePro='demean'): """ Load & prepare functional data. Parameters ---------- strPathNiiMask: str Path to mask used to restrict pRF model finding. Only voxels with a value greater than zero in the mask are considered. lstPathNiiFunc : list List of paths of functional data (nii files). varAvgThr : float, positive, default = 100. Float. Voxels that have at least one run with a mean lower than this (before demeaning) will be excluded from model fitting. varVarThr : float, positive, default = 0.0001 Float. Voxels that have at least one run with a variance lower than this (after demeaning) will be excluded from model fitting. strPrePro : string, default 'demean' Preprocessing that will be applied to the data. By default they are demeaned. Returns ------- aryLgcMsk : np.array 3D numpy array with logial values. Externally supplied mask (e.g grey matter mask). Voxels that are `False` in the mask are excluded. vecLgcIncl : np.array 1D numpy array containing logical values. One value per voxel after mask has been applied. If `True`, the variance and mean of the voxel's time course are greater than the provided thresholds in all runs and the voxel is included in the output array (`aryFunc`). If `False`, the variance or mean of the voxel's time course is lower than threshold in at least one run and the voxel has been excluded from the output (`aryFunc`). This is to avoid problems in the subsequent model fitting. This array is necessary to put results into original dimensions after model fitting. hdrMsk : nibabel-header-object Nii header of mask. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of mask nii data. aryFunc : np.array 2D numpy array containing prepared functional data, of the form aryFunc[voxelCount, time]. tplNiiShp : tuple Spatial dimensions of input nii data (number of voxels in x, y, z direction). The data are reshaped during preparation, this information is needed to fit final output into original spatial dimensions. Notes ----- Functional data is loaded from disk. The functional data is reshaped, into the form aryFunc[voxel, time]. A mask is applied (externally supplied, e.g. a grey matter mask). Subsequently, the functional data is de-meaned. """ print('------Load & prepare nii data') # Load mask (to restrict model fitting): aryMask, hdrMsk, aryAff = load_nii(strPathNiiMask) # Mask is loaded as float32, but is better represented as integer: aryMask = np.array(aryMask).astype(np.int16) # Number of non-zero voxels in mask: # varNumVoxMsk = int(np.count_nonzero(aryMask)) # Dimensions of nii data: tplNiiShp = aryMask.shape # Total number of voxels: varNumVoxTlt = (tplNiiShp[0] * tplNiiShp[1] * tplNiiShp[2]) # Reshape mask: aryMask = np.reshape(aryMask, varNumVoxTlt) # List for arrays with functional data (possibly several runs): lstFunc = [] # List for averages of the individual runs (before demeaning) lstFuncAvg = [] # List for variances of the individual runs (after demeaning) lstFuncVar = [] # Number of runs: varNumRun = len(lstPathNiiFunc) # Loop through runs and load data: for idxRun in range(varNumRun): print(('---------Prepare run ' + str(idxRun + 1))) # Load 4D nii data: aryTmpFunc, _, _ = load_nii(lstPathNiiFunc[idxRun]) # Dimensions of nii data (including temporal dimension; spatial # dimensions need to be the same for mask & functional data): tplNiiShp = aryTmpFunc.shape # Reshape functional nii data, from now on of the form # aryTmpFunc[voxelCount, time]: aryTmpFunc = np.reshape(aryTmpFunc, [varNumVoxTlt, tplNiiShp[3]]) # Apply mask: print('------------Mask') aryLgcMsk = np.greater(aryMask.astype(np.int16), np.array([0], dtype=np.int16)[0]) aryTmpFunc = aryTmpFunc[aryLgcMsk, :] # save the mean of the run lstFuncAvg.append(np.mean(aryTmpFunc, axis=1, dtype=np.float32)) # also save the variance of the run lstFuncVar.append(np.var(aryTmpFunc, axis=1, dtype=np.float32)) # De-mean functional data: if strPrePro == 'demean': print('------------Demean') aryTmpFunc = np.subtract(aryTmpFunc, np.mean(aryTmpFunc, axis=1, dtype=np.float32)[:, None]) elif strPrePro == 'zscore': print('------------Zscore') aryTmpFunc = np.subtract(aryTmpFunc, np.mean(aryTmpFunc, axis=1, dtype=np.float32)[:, None]) # Standardize the data time courses: # In order to avoid devision by zero, only divide # those voxels with a standard deviation greater # than zero: aryTmpStd = np.std(aryTmpFunc, axis=-1) aryTmpLgc = np.greater(aryTmpStd, np.array([0.0])) aryTmpFunc[aryTmpLgc, :] = np.divide(aryTmpFunc[aryTmpLgc, :], aryTmpStd[aryTmpLgc, None]) # Put prepared functional data of current run into list: lstFunc.append(aryTmpFunc) del(aryTmpFunc) # Put functional data from separate runs into one array. 2D array of the # form aryFunc[voxelCount, time] aryFunc = np.concatenate(lstFunc, axis=1).astype(np.float32, copy=False) del(lstFunc) # Put the averages (before demeaning) from the separate runs into one # array. 2D array of the form aryFuncVar[voxelCount, nr of runs] aryFuncAvg = np.stack(lstFuncAvg, axis=1).astype(np.float32, copy=False) del(lstFuncAvg) # Put the variance (after demeaning) from the separate runs into one array. # 2D array of the form aryFuncVar[voxelCount, nr of runs] aryFuncVar = np.stack(lstFuncVar, axis=1).astype(np.float32, copy=False) del(lstFuncVar) # Especially if data were recorded in different sessions, there can # sometimes be voxels that have close to zero signal in runs from one # session but regular signal in the runs from another session. These voxels # are very few, are located at the edge of the functional and can cause # problems during model fitting. They are therefore excluded. # Is the mean greater than threshold? aryLgcAvg = np.greater(aryFuncAvg, np.array([varAvgThr]).astype(np.float32)[0]) # Mean needs to be greater than threshold in every single run vecLgcAvg = np.all(aryLgcAvg, axis=1) # Voxels that are outside the brain and have no, or very little, signal # should not be included in the pRF model finding. We take the variance # over time and exclude voxels with a suspiciously low variance, if they # have low variance in at least one run. Because the data given into the # cython or GPU function has float32 precision, we calculate the variance # on data with float32 precision. # Is the variance greater than threshold? aryLgcVar = np.greater(aryFuncVar, np.array([varVarThr]).astype(np.float32)[0]) # Variance needs to be greater than threshold in every single run vecLgcVar = np.all(aryLgcVar, axis=1) # Are there any nan values in the functional time series? vecLgcNan = np.invert(np.any(np.isnan(aryFunc), axis=1)) # combine the logical vectors for exclusion resulting from low variance and # low mean signal time course vecLgcIncl = np.logical_and(vecLgcAvg, vecLgcVar) # combine logical vectors for mean/variance with vector for nan exclsion vecLgcIncl = np.logical_and(vecLgcIncl, vecLgcNan) # Array with functional data for which conditions (mask inclusion and # cutoff value) are fullfilled: aryFunc = aryFunc[vecLgcIncl, :] # print info about the exclusion of voxels print('---------Minimum mean threshold for voxels applied at: ' + str(varAvgThr)) print('---------Minimum variance threshold for voxels applied at: ' + str(varVarThr)) print('---------Number of voxels excluded due to low mean or variance: ' + str(np.sum(np.invert(vecLgcIncl)))) return aryLgcMsk, vecLgcIncl, hdrMsk, aryAff, aryFunc, tplNiiShp
Load & prepare functional data. Parameters ---------- strPathNiiMask: str Path to mask used to restrict pRF model finding. Only voxels with a value greater than zero in the mask are considered. lstPathNiiFunc : list List of paths of functional data (nii files). varAvgThr : float, positive, default = 100. Float. Voxels that have at least one run with a mean lower than this (before demeaning) will be excluded from model fitting. varVarThr : float, positive, default = 0.0001 Float. Voxels that have at least one run with a variance lower than this (after demeaning) will be excluded from model fitting. strPrePro : string, default 'demean' Preprocessing that will be applied to the data. By default they are demeaned. Returns ------- aryLgcMsk : np.array 3D numpy array with logial values. Externally supplied mask (e.g grey matter mask). Voxels that are `False` in the mask are excluded. vecLgcIncl : np.array 1D numpy array containing logical values. One value per voxel after mask has been applied. If `True`, the variance and mean of the voxel's time course are greater than the provided thresholds in all runs and the voxel is included in the output array (`aryFunc`). If `False`, the variance or mean of the voxel's time course is lower than threshold in at least one run and the voxel has been excluded from the output (`aryFunc`). This is to avoid problems in the subsequent model fitting. This array is necessary to put results into original dimensions after model fitting. hdrMsk : nibabel-header-object Nii header of mask. aryAff : np.array Array containing 'affine', i.e. information about spatial positioning of mask nii data. aryFunc : np.array 2D numpy array containing prepared functional data, of the form aryFunc[voxelCount, time]. tplNiiShp : tuple Spatial dimensions of input nii data (number of voxels in x, y, z direction). The data are reshaped during preparation, this information is needed to fit final output into original spatial dimensions. Notes ----- Functional data is loaded from disk. The functional data is reshaped, into the form aryFunc[voxel, time]. A mask is applied (externally supplied, e.g. a grey matter mask). Subsequently, the functional data is de-meaned.
entailment
def get(self,style): """ what's the value of a style at the current stack level""" level = len(self.stack) -1 while level >= 0: if style in self.stack[level]: return self.stack[level][style] else: level = level - 1 return None
what's the value of a style at the current stack level
entailment
def enforce_type(self, attr, val): """converts a value to the attribute's type""" if not attr in self.types: return utfstr(val) elif self.types[attr] == 'int': return int(float(val)) elif self.types[attr] == 'float': return float(val) else: return utfstr(val)
converts a value to the attribute's type
entailment
def push(self, style={}): """push a new level on the stack with a style dictionnary containing style:value pairs""" _style = {} for attr in style: if attr in self.cmds and not style[attr] in self.cmds[attr]: print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr) else: _style[attr] = self.enforce_type(attr, style[attr]) self.stack.append(_style)
push a new level on the stack with a style dictionnary containing style:value pairs
entailment
def set(self, style={}): """overrides style values at the current stack level""" _style = {} for attr in style: if attr in self.cmds and not style[attr] in self.cmds[attr]: print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr) else: self.stack[-1][attr] = self.enforce_type(attr, style[attr])
overrides style values at the current stack level
entailment
def to_escpos(self): """ converts the current style to an escpos command string """ cmd = '' ordered_cmds = self.cmds.keys() ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order'])) for style in ordered_cmds: cmd += self.cmds[style][self.get(style)] return cmd
converts the current style to an escpos command string
entailment