sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def start_inline(self,stylestack=None): """ starts an inline entity with an optional style definition """ self.stack.append('inline') if self.dirty: self.escpos._raw(' ') if stylestack: self.style(stylestack)
starts an inline entity with an optional style definition
entailment
def start_block(self,stylestack=None): """ starts a block entity with an optional style definition """ if self.dirty: self.escpos._raw('\n') self.dirty = False self.stack.append('block') if stylestack: self.style(stylestack)
starts a block entity with an optional style definition
entailment
def end_entity(self): """ ends the entity definition. (but does not cancel the active style!) """ if self.stack[-1] == 'block' and self.dirty: self.escpos._raw('\n') self.dirty = False if len(self.stack) > 1: self.stack = self.stack[:-1]
ends the entity definition. (but does not cancel the active style!)
entailment
def pre(self,text): """ puts a string of text in the entity keeping the whitespace intact """ if text: self.escpos.text(text) self.dirty = True
puts a string of text in the entity keeping the whitespace intact
entailment
def text(self,text): """ puts text in the entity. Whitespace and newlines are stripped to single spaces. """ if text: text = utfstr(text) text = text.strip() text = re.sub('\s+',' ',text) if text: self.dirty = True self.escpos.text(text)
puts text in the entity. Whitespace and newlines are stripped to single spaces.
entailment
def _check_image_size(self, size): """ Check and fix the size of the image to 32 bits """ if size % 32 == 0: return (0, 0) else: image_border = 32 - (size % 32) if (image_border % 2) == 0: return (image_border / 2, image_border / 2) else: return (image_border / 2, (image_border / 2) + 1)
Check and fix the size of the image to 32 bits
entailment
def _print_image(self, line, size): """ Print formatted image """ i = 0 cont = 0 buffer = "" self._raw(S_RASTER_N) buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0) self._raw(buffer.decode('hex')) buffer = "" while i < len(line): hex_string = int(line[i:i+8],2) buffer += "%02X" % hex_string i += 8 cont += 1 if cont % 4 == 0: self._raw(buffer.decode("hex")) buffer = "" cont = 0
Print formatted image
entailment
def _raw_print_image(self, line, size, output=None ): """ Print formatted image """ i = 0 cont = 0 buffer = "" raw = "" def __raw(string): if output: output(string) else: self._raw(string) raw += S_RASTER_N buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0) raw += buffer.decode('hex') buffer = "" while i < len(line): hex_string = int(line[i:i+8],2) buffer += "%02X" % hex_string i += 8 cont += 1 if cont % 4 == 0: raw += buffer.decode("hex") buffer = "" cont = 0 return raw
Print formatted image
entailment
def _convert_image(self, im): """ Parse image and prepare it to a printable format """ pixels = [] pix_line = "" im_left = "" im_right = "" switch = 0 img_size = [ 0, 0 ] if im.size[0] > 512: print "WARNING: Image is wider than 512 and could be truncated at print time " if im.size[1] > 255: raise ImageSizeError() im_border = self._check_image_size(im.size[0]) for i in range(im_border[0]): im_left += "0" for i in range(im_border[1]): im_right += "0" for y in range(im.size[1]): img_size[1] += 1 pix_line += im_left img_size[0] += im_border[0] for x in range(im.size[0]): img_size[0] += 1 RGB = im.getpixel((x, y)) im_color = (RGB[0] + RGB[1] + RGB[2]) im_pattern = "1X0" pattern_len = len(im_pattern) switch = (switch - 1 ) * (-1) for x in range(pattern_len): if im_color <= (255 * 3 / pattern_len * (x+1)): if im_pattern[x] == "X": pix_line += "%d" % switch else: pix_line += im_pattern[x] break elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3): pix_line += im_pattern[-1] break pix_line += im_right img_size[0] += im_border[1] return (pix_line, img_size)
Parse image and prepare it to a printable format
entailment
def image(self,path_img): """ Open image file """ im_open = Image.open(path_img) im = im_open.convert("RGB") # Convert the RGB image in printable image pix_line, img_size = self._convert_image(im) self._print_image(pix_line, img_size)
Open image file
entailment
def qr(self,text): """ Print QR Code for the provided string """ qr_code = qrcode.QRCode(version=4, box_size=4, border=1) qr_code.add_data(text) qr_code.make(fit=True) qr_img = qr_code.make_image() im = qr_img._img.convert("RGB") # Convert the RGB image in printable image self._convert_image(im)
Print QR Code for the provided string
entailment
def barcode(self, code, bc, width=255, height=2, pos='below', font='a'): """ Print Barcode """ # Align Bar Code() self._raw(TXT_ALIGN_CT) # Height if height >=2 or height <=6: self._raw(BARCODE_HEIGHT) else: raise BarcodeSizeError() # Width if width >= 1 or width <=255: self._raw(BARCODE_WIDTH) else: raise BarcodeSizeError() # Font if font.upper() == "B": self._raw(BARCODE_FONT_B) else: # DEFAULT FONT: A self._raw(BARCODE_FONT_A) # Position if pos.upper() == "OFF": self._raw(BARCODE_TXT_OFF) elif pos.upper() == "BOTH": self._raw(BARCODE_TXT_BTH) elif pos.upper() == "ABOVE": self._raw(BARCODE_TXT_ABV) else: # DEFAULT POSITION: BELOW self._raw(BARCODE_TXT_BLW) # Type if bc.upper() == "UPC-A": self._raw(BARCODE_UPC_A) elif bc.upper() == "UPC-E": self._raw(BARCODE_UPC_E) elif bc.upper() == "EAN13": self._raw(BARCODE_EAN13) elif bc.upper() == "EAN8": self._raw(BARCODE_EAN8) elif bc.upper() == "CODE39": self._raw(BARCODE_CODE39) elif bc.upper() == "ITF": self._raw(BARCODE_ITF) elif bc.upper() == "NW7": self._raw(BARCODE_NW7) else: raise BarcodeTypeError() # Print Code if code: self._raw(code) else: raise exception.BarcodeCodeError()
Print Barcode
entailment
def receipt(self,xml): """ Prints an xml based receipt definition """ def strclean(string): if not string: string = '' string = string.strip() string = re.sub('\s+',' ',string) return string def format_value(value, decimals=3, width=0, decimals_separator='.', thousands_separator=',', autoint=False, symbol='', position='after'): decimals = max(0,int(decimals)) width = max(0,int(width)) value = float(value) if autoint and math.floor(value) == value: decimals = 0 if width == 0: width = '' if thousands_separator: formatstr = "{:"+str(width)+",."+str(decimals)+"f}" else: formatstr = "{:"+str(width)+"."+str(decimals)+"f}" ret = formatstr.format(value) ret = ret.replace(',','COMMA') ret = ret.replace('.','DOT') ret = ret.replace('COMMA',thousands_separator) ret = ret.replace('DOT',decimals_separator) if symbol: if position == 'after': ret = ret + symbol else: ret = symbol + ret return ret def print_elem(stylestack, serializer, elem, indent=0): elem_styles = { 'h1': {'bold': 'on', 'size':'double'}, 'h2': {'size':'double'}, 'h3': {'bold': 'on', 'size':'double-height'}, 'h4': {'size': 'double-height'}, 'h5': {'bold': 'on'}, 'em': {'font': 'b'}, 'b': {'bold': 'on'}, } stylestack.push() if elem.tag in elem_styles: stylestack.set(elem_styles[elem.tag]) stylestack.set(elem.attrib) if elem.tag in ('p','div','section','article','receipt','header','footer','li','h1','h2','h3','h4','h5'): serializer.start_block(stylestack) serializer.text(elem.text) for child in elem: print_elem(stylestack,serializer,child) serializer.start_inline(stylestack) serializer.text(child.tail) serializer.end_entity() serializer.end_entity() elif elem.tag in ('span','em','b','left','right'): serializer.start_inline(stylestack) serializer.text(elem.text) for child in elem: print_elem(stylestack,serializer,child) serializer.start_inline(stylestack) serializer.text(child.tail) serializer.end_entity() serializer.end_entity() elif elem.tag == 'value': serializer.start_inline(stylestack) serializer.pre(format_value( elem.text, decimals=stylestack.get('value-decimals'), width=stylestack.get('value-width'), decimals_separator=stylestack.get('value-decimals-separator'), thousands_separator=stylestack.get('value-thousands-separator'), autoint=(stylestack.get('value-autoint') == 'on'), symbol=stylestack.get('value-symbol'), position=stylestack.get('value-symbol-position') )) serializer.end_entity() elif elem.tag == 'line': width = stylestack.get('width') if stylestack.get('size') in ('double', 'double-width'): width = width / 2 lineserializer = XmlLineSerializer(stylestack.get('indent')+indent,stylestack.get('tabwidth'),width,stylestack.get('line-ratio')) serializer.start_block(stylestack) for child in elem: if child.tag == 'left': print_elem(stylestack,lineserializer,child,indent=indent) elif child.tag == 'right': lineserializer.start_right() print_elem(stylestack,lineserializer,child,indent=indent) serializer.pre(lineserializer.get_line()) serializer.end_entity() elif elem.tag == 'ul': serializer.start_block(stylestack) bullet = stylestack.get('bullet') for child in elem: if child.tag == 'li': serializer.style(stylestack) serializer.raw(' ' * indent * stylestack.get('tabwidth') + bullet) print_elem(stylestack,serializer,child,indent=indent+1) serializer.end_entity() elif elem.tag == 'ol': cwidth = len(str(len(elem))) + 2 i = 1 serializer.start_block(stylestack) for child in elem: if child.tag == 'li': serializer.style(stylestack) serializer.raw(' ' * indent * stylestack.get('tabwidth') + ' ' + (str(i)+')').ljust(cwidth)) i = i + 1 print_elem(stylestack,serializer,child,indent=indent+1) serializer.end_entity() elif elem.tag == 'pre': serializer.start_block(stylestack) serializer.pre(elem.text) serializer.end_entity() elif elem.tag == 'hr': width = stylestack.get('width') if stylestack.get('size') in ('double', 'double-width'): width = width / 2 serializer.start_block(stylestack) serializer.text('-'*width) serializer.end_entity() elif elem.tag == 'br': serializer.linebreak() elif elem.tag == 'img': if 'src' in elem.attrib and 'data:' in elem.attrib['src']: self.print_base64_image(elem.attrib['src']) elif elem.tag == 'barcode' and 'encoding' in elem.attrib: serializer.start_block(stylestack) self.barcode(strclean(elem.text),elem.attrib['encoding']) serializer.end_entity() elif elem.tag == 'cut': self.cut() elif elem.tag == 'partialcut': self.cut(mode='part') elif elem.tag == 'cashdraw': self.cashdraw(2) self.cashdraw(5) stylestack.pop() try: stylestack = StyleStack() serializer = XmlSerializer(self) root = ET.fromstring(xml.encode('utf-8')) if 'sheet' in root.attrib and root.attrib['sheet'] == 'slip': self._raw(SHEET_SLIP_MODE) self.slip_sheet_mode = True else: self._raw(SHEET_ROLL_MODE) self._raw(stylestack.to_escpos()) print_elem(stylestack,serializer,root) if 'open-cashdrawer' in root.attrib and root.attrib['open-cashdrawer'] == 'true': self.cashdraw(2) self.cashdraw(5) if not 'cut' in root.attrib or root.attrib['cut'] == 'true' : if self.slip_sheet_mode: self._raw(CTL_FF) else: self.cut() except Exception as e: errmsg = str(e)+'\n'+'-'*48+'\n'+traceback.format_exc() + '-'*48+'\n' self.text(errmsg) self.cut() raise e
Prints an xml based receipt definition
entailment
def text(self,txt): """ Print Utf8 encoded alpha-numeric text """ if not txt: return try: txt = txt.decode('utf-8') except: try: txt = txt.decode('utf-16') except: pass self.extra_chars = 0 def encode_char(char): """ Encodes a single utf-8 character into a sequence of esc-pos code page change instructions and character declarations """ char_utf8 = char.encode('utf-8') encoded = '' encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character encodings = { # TODO use ordering to prevent useless switches # TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis ) 'cp437': TXT_ENC_PC437, 'cp850': TXT_ENC_PC850, 'cp852': TXT_ENC_PC852, 'cp857': TXT_ENC_PC857, 'cp858': TXT_ENC_PC858, 'cp860': TXT_ENC_PC860, 'cp863': TXT_ENC_PC863, 'cp865': TXT_ENC_PC865, 'cp866': TXT_ENC_PC866, 'cp862': TXT_ENC_PC862, 'cp720': TXT_ENC_PC720, 'cp936': TXT_ENC_PC936, 'iso8859_2': TXT_ENC_8859_2, 'iso8859_7': TXT_ENC_8859_7, 'iso8859_9': TXT_ENC_8859_9, 'cp1254' : TXT_ENC_WPC1254, 'cp1255' : TXT_ENC_WPC1255, 'cp1256' : TXT_ENC_WPC1256, 'cp1257' : TXT_ENC_WPC1257, 'cp1258' : TXT_ENC_WPC1258, 'katakana' : TXT_ENC_KATAKANA, } remaining = copy.copy(encodings) if not encoding : encoding = 'cp437' while True: # Trying all encoding until one succeeds try: if encoding == 'katakana': # Japanese characters if jcconv: # try to convert japanese text to a half-katakanas kata = jcconv.kata2half(jcconv.hira2kata(char_utf8)) if kata != char_utf8: self.extra_chars += len(kata.decode('utf-8')) - 1 # the conversion may result in multiple characters return encode_str(kata.decode('utf-8')) else: kata = char_utf8 if kata in TXT_ENC_KATAKANA_MAP: encoded = TXT_ENC_KATAKANA_MAP[kata] break else: raise ValueError() else: encoded = char.encode(encoding) break except ValueError: #the encoding failed, select another one and retry if encoding in remaining: del remaining[encoding] if len(remaining) >= 1: encoding = remaining.items()[0][0] else: encoding = 'cp437' encoded = '\xb1' # could not encode, output error character break; if encoding != self.encoding: # if the encoding changed, remember it and prefix the character with # the esc-pos encoding change sequence self.encoding = encoding encoded = encodings[encoding] + encoded return encoded def encode_str(txt): buffer = '' for c in txt: buffer += encode_char(c) return buffer txt = encode_str(txt) # if the utf-8 -> codepage conversion inserted extra characters, # remove double spaces to try to restore the original string length # and prevent printing alignment issues while self.extra_chars > 0: dspace = txt.find(' ') if dspace > 0: txt = txt[:dspace] + txt[dspace+1:] self.extra_chars -= 1 else: break self._raw(txt)
Print Utf8 encoded alpha-numeric text
entailment
def set(self, align='left', font='a', type='normal', width=1, height=1): """ Set text properties """ # Align if align.upper() == "CENTER": self._raw(TXT_ALIGN_CT) elif align.upper() == "RIGHT": self._raw(TXT_ALIGN_RT) elif align.upper() == "LEFT": self._raw(TXT_ALIGN_LT) # Font if font.upper() == "B": self._raw(TXT_FONT_B) else: # DEFAULT FONT: A self._raw(TXT_FONT_A) # Type if type.upper() == "B": self._raw(TXT_BOLD_ON) self._raw(TXT_UNDERL_OFF) elif type.upper() == "U": self._raw(TXT_BOLD_OFF) self._raw(TXT_UNDERL_ON) elif type.upper() == "U2": self._raw(TXT_BOLD_OFF) self._raw(TXT_UNDERL2_ON) elif type.upper() == "BU": self._raw(TXT_BOLD_ON) self._raw(TXT_UNDERL_ON) elif type.upper() == "BU2": self._raw(TXT_BOLD_ON) self._raw(TXT_UNDERL2_ON) elif type.upper == "NORMAL": self._raw(TXT_BOLD_OFF) self._raw(TXT_UNDERL_OFF) # Width if width == 2 and height != 2: self._raw(TXT_NORMAL) self._raw(TXT_2WIDTH) elif height == 2 and width != 2: self._raw(TXT_NORMAL) self._raw(TXT_2HEIGHT) elif height == 2 and width == 2: self._raw(TXT_2WIDTH) self._raw(TXT_2HEIGHT) else: # DEFAULT SIZE: NORMAL self._raw(TXT_NORMAL)
Set text properties
entailment
def cut(self, mode=''): """ Cut paper """ # Fix the size between last line and cut # TODO: handle this with a line feed self._raw("\n\n\n\n\n\n") if mode.upper() == "PART": self._raw(PAPER_PART_CUT) else: # DEFAULT MODE: FULL CUT self._raw(PAPER_FULL_CUT)
Cut paper
entailment
def cashdraw(self, pin): """ Send pulse to kick the cash drawer """ if pin == 2: self._raw(CD_KICK_2) elif pin == 5: self._raw(CD_KICK_5) else: raise CashDrawerError()
Send pulse to kick the cash drawer
entailment
def hw(self, hw): """ Hardware operations """ if hw.upper() == "INIT": self._raw(HW_INIT) elif hw.upper() == "SELECT": self._raw(HW_SELECT) elif hw.upper() == "RESET": self._raw(HW_RESET) else: # DEFAULT: DOES NOTHING pass
Hardware operations
entailment
def control(self, ctl): """ Feed control sequences """ if ctl.upper() == "LF": self._raw(CTL_LF) elif ctl.upper() == "FF": self._raw(CTL_FF) elif ctl.upper() == "CR": self._raw(CTL_CR) elif ctl.upper() == "HT": self._raw(CTL_HT) elif ctl.upper() == "VT": self._raw(CTL_VT)
Feed control sequences
entailment
def open(self): """ Search device on USB tree and set is as escpos device """ self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct) if self.device is None: raise NoDeviceError() try: if self.device.is_kernel_driver_active(self.interface): self.device.detach_kernel_driver(self.interface) self.device.set_configuration() usb.util.claim_interface(self.device, self.interface) except usb.core.USBError as e: raise HandleDeviceError(e)
Search device on USB tree and set is as escpos device
entailment
def _raw(self, msg): """ Print any command sent in raw format """ if len(msg) != self.device.write(self.out_ep, msg, self.interface): self.device.write(self.out_ep, self.errorText, self.interface) raise TicketNotPrinted()
Print any command sent in raw format
entailment
def open(self): """ Setup serial port and set is as escpos device """ self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True) if self.device is not None: print "Serial printer enabled" else: print "Unable to open serial printer on: %s" % self.devfile
Setup serial port and set is as escpos device
entailment
def open(self): """ Open TCP socket and set it as escpos device """ self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.device.connect((self.host, self.port)) if self.device is None: print "Could not open socket for %s" % self.host
Open TCP socket and set it as escpos device
entailment
def _pinyin_generator(chars, format): """Generate pinyin for chars, if char is not chinese character, itself will be returned. Chars must be unicode list. """ for char in chars: key = "%X" % ord(char) pinyin = pinyin_dict.get(key, char) tone = pinyin_tone.get(key, 0) if tone == 0 or format == "strip": pass elif format == "numerical": pinyin += str(tone) elif format == "diacritical": # Find first vowel -- where we should put the diacritical mark vowels = itertools.chain((c for c in pinyin if c in "aeo"), (c for c in pinyin if c in "iuv")) vowel = pinyin.index(next(vowels)) + 1 pinyin = pinyin[:vowel] + tonemarks[tone] + pinyin[vowel:] else: error = "Format must be one of: numerical/diacritical/strip" raise ValueError(error) yield unicodedata.normalize('NFC', pinyin)
Generate pinyin for chars, if char is not chinese character, itself will be returned. Chars must be unicode list.
entailment
def get(s, delimiter='', format="diacritical"): """Return pinyin of string, the string must be unicode """ return delimiter.join(_pinyin_generator(u(s), format=format))
Return pinyin of string, the string must be unicode
entailment
def get_initial(s, delimiter=' '): """Return the 1st char of pinyin of string, the string must be unicode """ initials = (p[0] for p in _pinyin_generator(u(s), format="strip")) return delimiter.join(initials)
Return the 1st char of pinyin of string, the string must be unicode
entailment
def _add_to_tree(tree, word, meaning): ''' We build word search trees, where we walk down the letters of a word. For example: 你 Good 你好 Hello Would build the tree 你 / \ You 好 \ Hello ''' if len(word) == 0: tree[''] = meaning else: _add_to_tree(tree[word[0]], word[1:], meaning)
We build word search trees, where we walk down the letters of a word. For example: 你 Good 你好 Hello Would build the tree 你 / \ You 好 \ Hello
entailment
def init(): ''' Load in the Chinese-English dictionary. This takes 1-2 seconds. It is done when the other functions are used, but this is public since preloading sometimes makes sense. ''' global dictionaries, trees dictionaries = { 'traditional': {}, 'simplified': {} } trees = { 'traditional': Tree(), 'simplified': Tree() } lines = gzip.open( os.path.join(os.path.dirname(__file__), "cedict.txt.gz"), mode='rt', encoding='utf-8' ) exp = re.compile("^([^ ]+) ([^ ]+) \[(.*)\] /(.+)/") parsed_lines = (exp.match(line).groups() for line in lines if line[0] != '#') for traditional, simplified, pinyin, meaning in parsed_lines: meaning = meaning.split('/') dictionaries['traditional'][traditional] = meaning dictionaries['simplified'][simplified] = meaning _add_to_tree(trees['traditional'], traditional, meaning) _add_to_tree(trees['simplified'], simplified, meaning)
Load in the Chinese-English dictionary. This takes 1-2 seconds. It is done when the other functions are used, but this is public since preloading sometimes makes sense.
entailment
def translate_word(word, dictionary=['simplified']): ''' Return the set of translations for a single character or word, if available. ''' if not dictionaries: init() for d in dictionary: if word in dictionaries[d]: return dictionaries[d][word] return None
Return the set of translations for a single character or word, if available.
entailment
def _words_at_the_beginning(word, tree, prefix=""): ''' We return all portions of the tree corresponding to the beginning of `word`. This is used recursively, so we pass the prefix so we can return meaningful words+translations. ''' l = [] if "" in tree: l.append([prefix, tree[""]]) if len(word) > 0 and word[0] in tree: l.extend(_words_at_the_beginning( word[1:], tree[word[0]], prefix=prefix+word[0] )) return l
We return all portions of the tree corresponding to the beginning of `word`. This is used recursively, so we pass the prefix so we can return meaningful words+translations.
entailment
def all_phrase_translations(phrase): ''' Return the set of translations for all possible words in a full phrase. Chinese is sometimes ambiguous. We do not attempt to disambiguate, or handle unknown letters especially well. Full parsing is left to upstream logic. ''' if not trees: init() phrase = phrase.split(string.whitespace) for word in phrase: for x in range(len(word)): for translation in _words_at_the_beginning( word[x+1:], trees['simplified'][word[x]], prefix=word[x]): yield translation
Return the set of translations for all possible words in a full phrase. Chinese is sometimes ambiguous. We do not attempt to disambiguate, or handle unknown letters especially well. Full parsing is left to upstream logic.
entailment
def process(self, instance, force=False): """Processing is triggered by field's pre_save method. It will be executed if field's value has been changed (known through descriptor and stashing logic) or if model instance has never been saved before, i.e. no pk set, because there is a chance that field was initialized through model's `__init__`, hence default value was stashed with pre_init handler. """ if self.should_process and (force or self.has_stashed_value): self.set_status(instance, {'state': 'busy'}) for d in filter(lambda d: d.has_processor(), self.dependencies): d.stash_previous_value(instance, d.get_value(instance)) try: if self.has_async: for d in filter(lambda d: not d.async and d.should_process(), self.dependencies): self._process(d, instance) async_handler = AsyncHandler(self, instance) async_handler.start() else: for d in filter(lambda d: d.should_process(), self.dependencies): self._process(d, instance) self.finished_processing(instance) except BaseException as e: self.failed_processing(instance, e) if not isinstance(e, ProcessingError): raise elif self.has_stashed_value: self.cleanup_stash()
Processing is triggered by field's pre_save method. It will be executed if field's value has been changed (known through descriptor and stashing logic) or if model instance has never been saved before, i.e. no pk set, because there is a chance that field was initialized through model's `__init__`, hence default value was stashed with pre_init handler.
entailment
def get_status_key(self, instance): """Generates a key used to set a status on a field""" key_id = "inst_%s" % id(instance) if instance.pk is None else instance.pk return "%s.%s-%s-%s" % (instance._meta.app_label, get_model_name(instance), key_id, self.field.name)
Generates a key used to set a status on a field
entailment
def get_status(self, instance): """Retrives a status of a field from cache. Fields in state 'error' and 'complete' will not retain the status after the call. """ status_key, status = self._get_status(instance) if status['state'] in ['complete', 'error']: cache.delete(status_key) return status
Retrives a status of a field from cache. Fields in state 'error' and 'complete' will not retain the status after the call.
entailment
def set_status(self, instance, status): """Sets the field status for up to 5 minutes.""" status_key = self.get_status_key(instance) cache.set(status_key, status, timeout=300)
Sets the field status for up to 5 minutes.
entailment
def get_mode(self, old_mode=None): """Returns output mode. If `mode` not set it will try to guess best mode, or next best mode comparing to old mode """ if self.mode is not None: return self.mode assert self.can_write, "This format does not have a supported output mode." if old_mode is None: return self.output_modes[0] if old_mode in self.output_modes: return old_mode # now let's get best mode available from supported try: idx = PILLOW_MODES.index(old_mode) except ValueError: # maybe some unknown or uncommon mode return self.output_modes[0] for mode in PILLOW_MODES[idx+1:]: if mode in self.output_modes: return mode # since there is no better one, lets' look for closest one in opposite direction opposite = PILLOW_MODES[:idx] opposite.reverse() for mode in opposite: if mode in self.output_modes: return mode
Returns output mode. If `mode` not set it will try to guess best mode, or next best mode comparing to old mode
entailment
def token_at_cursor(code, pos=0): """ Find the token present at the passed position in the code buffer :return (tuple): a pair (token, start_position) """ l = len(code) end = start = pos # Go forwards while we get alphanumeric chars while end < l and code[end].isalpha(): end += 1 # Go backwards while we get alphanumeric chars while start > 0 and code[start-1].isalpha(): start -= 1 # If previous character is a %, add it (potential magic) if start > 0 and code[start-1] == '%': start -= 1 return code[start:end], start
Find the token present at the passed position in the code buffer :return (tuple): a pair (token, start_position)
entailment
def _send(self, data, msg_type='ok', silent=False): """ Send a response to the frontend and return an execute message @param data: response to send @param msg_type (str): message type: 'ok', 'raw', 'error', 'multi' @param silent (bool): suppress output @return (dict): the return value for the kernel """ # Data to send back if data is not None: # log the message try: self._klog.debug(u"msg to frontend (%d): %.160s...", silent, data) except Exception as e: self._klog.warn(u"can't log response: %s", e) # send it to the frontend if not silent: if msg_type != 'raw': data = data_msg(data, mtype=msg_type) self.send_response(self.iopub_socket, 'display_data', data) # Result message return {'status': 'error' if msg_type == 'error' else 'ok', # The base class will increment the execution count 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {} }
Send a response to the frontend and return an execute message @param data: response to send @param msg_type (str): message type: 'ok', 'raw', 'error', 'multi' @param silent (bool): suppress output @return (dict): the return value for the kernel
entailment
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): """ Method called to execute a cell """ self._klog.info("[%.30s] [%d] [%s]", code, silent, user_expressions) # Split lines and remove empty lines & comments code_noc = [line.strip() for line in code.split('\n') if line and line[0] != '#'] if not code_noc: return self._send(None) # Process try: # Detect if we've got magics magic_lines = [] for line in code_noc: if line[0] != '%': break magic_lines.append(line) # Process magics. Once done, remove them from the query buffer if magic_lines: out = [self._k.magic(line) for line in magic_lines] self._send(out, 'multi', silent=silent) code = '\n'.join(code_noc[len(magic_lines):]) # If we have a regular SPARQL query, process it now result = self._k.query(code, num=self.execution_count) if code else None # Return the result return self._send(result, 'raw', silent=silent) except Exception as e: return self._send(e, 'error', silent=silent)
Method called to execute a cell
entailment
def do_inspect(self, code, cursor_pos, detail_level=0): """ Method called on help requests """ self._klog.info("{%s}", code[cursor_pos:cursor_pos+10]) # Find the token for which help is requested token, start = token_at_cursor(code, cursor_pos) self._klog.debug("token={%s} {%d}", token, detail_level) # Find the help for this token if not is_magic(token, start, code): info = sparql_help.get(token.upper(), None) elif token == '%': info = magic_help else: info = magics.get(token, None) if info: info = '{} {}\n\n{}'.format(token, *info) return {'status': 'ok', 'data': {'text/plain': info}, 'metadata': {}, 'found': info is not None }
Method called on help requests
entailment
def do_complete(self, code, cursor_pos): """ Method called on autocompletion requests """ self._klog.info("{%s}", code[cursor_pos:cursor_pos+10]) token, start = token_at_cursor(code, cursor_pos) tkn_low = token.lower() if is_magic(token, start, code): matches = [k for k in magics.keys() if k.startswith(tkn_low)] else: matches = [sparql_names[k] for k in sparql_names if k.startswith(tkn_low)] self._klog.debug("token={%s} matches={%r}", token, matches) if matches: return {'status': 'ok', 'cursor_start': start, 'cursor_end': start+len(token), 'matches': matches}
Method called on autocompletion requests
entailment
def run(self): '''The body of the tread: read lines and put them on the queue.''' for line in iter(self._fd.readline, ''): self._queue.put(line)
The body of the tread: read lines and put them on the queue.
entailment
def escape( x, lb=False ): """ Ensure a string does not contain HTML-reserved characters (including double quotes) Optionally also insert a linebreak if the string is too long """ # Insert a linebreak? Roughly around the middle of the string, if lb: l = len(x) if l >= 10: l >>= 1 # middle of the string s1 = x.find( ' ', l ) # first ws to the right s2 = x.rfind( ' ', 0, l ) # first ws to the left if s2 > 0: s = s2 if s1<0 or l-s1 > s2-l else s1 x = x[:s] + '\\n' + x[s+1:] elif s1 > 0: x = x[:s1] + '\\n' + x[s1+1:] # Escape HTML reserved characters return x.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;").replace('"', "&quot;")
Ensure a string does not contain HTML-reserved characters (including double quotes) Optionally also insert a linebreak if the string is too long
entailment
def div( txt, *args, **kwargs ): """ Create & return an HTML <div> element by wrapping the passed text buffer. @param txt (basestring): the text buffer to use @param *args (list): if present, \c txt is considered a Python format string, and the arguments are formatted into it @param kwargs (dict): the \c css field can contain the CSS class for the <div> element """ if args: txt = txt.format( *args ) css = kwargs.get('css',HTML_DIV_CLASS) return u'<div class="{}">{!s}</div>'.format( css, txt )
Create & return an HTML <div> element by wrapping the passed text buffer. @param txt (basestring): the text buffer to use @param *args (list): if present, \c txt is considered a Python format string, and the arguments are formatted into it @param kwargs (dict): the \c css field can contain the CSS class for the <div> element
entailment
def data_msglist( msglist ): """ Return a Jupyter display_data message, in both HTML & text formats, by joining together all passed messages. @param msglist (iterable): an iterable containing a list of tuples (message, css_style) Each message is either a text string, or a list. In the latter case it is assumed to be a format string + parameters. """ txt = html = u'' for msg, css in msglist: if is_collection(msg): msg = msg[0].format(*msg[1:]) html += div( escape(msg).replace('\n','<br/>'), css=css or 'msg' ) txt += msg + "\n" return { 'data': {'text/html' : div(html), 'text/plain' : txt }, 'metadata' : {} }
Return a Jupyter display_data message, in both HTML & text formats, by joining together all passed messages. @param msglist (iterable): an iterable containing a list of tuples (message, css_style) Each message is either a text string, or a list. In the latter case it is assumed to be a format string + parameters.
entailment
def data_msg( msg, mtype=None ): """ Return a Jupyter display_data message, in both HTML & text formats, by formatting a given single message. The passed message may be: * An exception (including a KrnlException): will generate an error message * A list of messages (with \c mtype equal to \c multi) * A single message @param msg (str,list): a string, or a list of format string + args, or an iterable of (msg,mtype) @param mtype (str): the message type (used for the CSS class). If it's \c multi, then \c msg will be treated as a multi-message. If not passed, \c krn-error will be used for exceptions and \c msg for everything else """ if isinstance(msg,KrnlException): return msg() # a KrnlException knows how to format itself elif isinstance(msg,Exception): return KrnlException(msg)() elif mtype == 'multi': return data_msglist( msg ) else: return data_msglist( [ (msg, mtype) ] )
Return a Jupyter display_data message, in both HTML & text formats, by formatting a given single message. The passed message may be: * An exception (including a KrnlException): will generate an error message * A list of messages (with \c mtype equal to \c multi) * A single message @param msg (str,list): a string, or a list of format string + args, or an iterable of (msg,mtype) @param mtype (str): the message type (used for the CSS class). If it's \c multi, then \c msg will be treated as a multi-message. If not passed, \c krn-error will be used for exceptions and \c msg for everything else
entailment
def copyresource( resource, filename, destdir ): """ Copy a resource file to a destination """ data = pkgutil.get_data(resource, os.path.join('resources',filename) ) #log.info( "Installing %s", os.path.join(destdir,filename) ) with open( os.path.join(destdir,filename), 'wb' ) as fp: fp.write(data)
Copy a resource file to a destination
entailment
def install_kernel_resources( destdir, resource=PKGNAME, files=None ): """ Copy the resource files to the kernelspec folder. """ if files is None: files = ['logo-64x64.png', 'logo-32x32.png'] for filename in files: try: copyresource( resource, filename, destdir ) except Exception as e: sys.stderr.write(str(e))
Copy the resource files to the kernelspec folder.
entailment
def install_custom_css( destdir, cssfile, resource=PKGNAME ): """ Add the kernel CSS to custom.css """ ensure_dir_exists( destdir ) custom = os.path.join( destdir, 'custom.css' ) prefix = css_frame_prefix(resource) # Check if custom.css already includes it. If so, let's remove it first exists = False if os.path.exists( custom ): with io.open(custom) as f: for line in f: if line.find( prefix ) >= 0: exists = True break if exists: remove_custom_css( destdir, resource ) # Fetch the CSS file cssfile += '.css' data = pkgutil.get_data( resource, os.path.join('resources',cssfile) ) # get_data() delivers encoded data, str (Python2) or bytes (Python3) # Add the CSS at the beginning of custom.css # io.open uses unicode strings (unicode in Python2, str in Python3) with io.open(custom + '-new', 'wt', encoding='utf-8') as fout: fout.write( u'{}START ======================== */\n'.format(prefix)) fout.write( data.decode('utf-8') ) fout.write( u'{}END ======================== */\n'.format(prefix)) if os.path.exists( custom ): with io.open( custom, 'rt', encoding='utf-8' ) as fin: for line in fin: fout.write( unicode(line) ) os.rename( custom+'-new',custom)
Add the kernel CSS to custom.css
entailment
def remove_custom_css(destdir, resource=PKGNAME ): """ Remove the kernel CSS from custom.css """ # Remove the inclusion in the main CSS if not os.path.isdir( destdir ): return False custom = os.path.join( destdir, 'custom.css' ) copy = True found = False prefix = css_frame_prefix(resource) with io.open(custom + '-new', 'wt') as fout: with io.open(custom) as fin: for line in fin: if line.startswith( prefix + 'START' ): copy = False found = True elif line.startswith( prefix + 'END' ): copy = True elif copy: fout.write( line ) if found: os.rename( custom+'-new',custom) else: os.unlink( custom+'-new') return found
Remove the kernel CSS from custom.css
entailment
def html_elem(e, ct, withtype=False): """ Format a result element as an HTML table cell. @param e (list): a pair \c (value,type) @param ct (str): cell type (th or td) @param withtype (bool): add an additional cell with the element type """ # Header cell if ct == 'th': return '<th>{0}</th><th>{1}</th>'.format(*e) if withtype else '<th>{}</th>'.format(e) # Content cell if e[1] in ('uri', 'URIRef'): html = u'<{0} class=val><a href="{1}" target="_other">{2}</a></{0}>'.format(ct, e[0], escape(e[0])) else: html = u'<{0} class=val>{1}</{0}>'.format(ct, escape(e[0])) # Create the optional cell for the type if withtype: html += u'<{0} class=typ>{1}</{0}>'.format(ct, e[1]) return html
Format a result element as an HTML table cell. @param e (list): a pair \c (value,type) @param ct (str): cell type (th or td) @param withtype (bool): add an additional cell with the element type
entailment
def html_table(data, header=True, limit=None, withtype=False): """ Return a double iterable as an HTML table @param data (iterable): the data to format @param header (bool): if the first row is a header row @param limit (int): maximum number of rows to render (excluding header) @param withtype (bool): if columns are to have an alternating CSS class (even/odd) or not. @return (int,string): a pair <number-of-rendered-rows>, <html-table> """ if header and limit: limit += 1 ct = 'th' if header else 'td' rc = 'hdr' if header else 'odd' # import codecs # import datetime # with codecs.open( '/tmp/dump', 'w', encoding='utf-8') as f: # print( '************', datetime.datetime.now(), file=f ) # for n, row in enumerate(data): # print( '-------', n, file=f ) # for n, c in enumerate(row): # print( type(c), repr(c), file=f ) html = u'<table>' rn = -1 for rn, row in enumerate(data): html += u'<tr class={}>'.format(rc) html += '\n'.join((html_elem(c, ct, withtype) for c in row)) html += u'</tr>' rc = 'even' if rc == 'odd' else 'odd' ct = 'td' if limit: limit -= 1 if not limit: break return (0, '') if rn < 0 else (rn+1-header, html+u'</table>')
Return a double iterable as an HTML table @param data (iterable): the data to format @param header (bool): if the first row is a header row @param limit (int): maximum number of rows to render (excluding header) @param withtype (bool): if columns are to have an alternating CSS class (even/odd) or not. @return (int,string): a pair <number-of-rendered-rows>, <html-table>
entailment
def jtype(c): """ Return the a string with the data type of a value, for JSON data """ ct = c['type'] return ct if ct != 'literal' else '{}, {}'.format(ct, c.get('xml:lang'))
Return the a string with the data type of a value, for JSON data
entailment
def gtype(n): """ Return the a string with the data type of a value, for Graph data """ t = type(n).__name__ return str(t) if t != 'Literal' else 'Literal, {}'.format(n.language)
Return the a string with the data type of a value, for Graph data
entailment
def lang_match_json(row, hdr, accepted_languages): '''Find if the JSON row contains acceptable language data''' if not accepted_languages: return True languages = set([row[c].get('xml:lang') for c in hdr if c in row and row[c]['type'] == 'literal']) return (not languages) or (languages & accepted_languages)
Find if the JSON row contains acceptable language data
entailment
def lang_match_rdf(triple, accepted_languages): '''Find if the RDF triple contains acceptable language data''' if not accepted_languages: return True languages = set([n.language for n in triple if isinstance(n, Literal)]) return (not languages) or (languages & accepted_languages)
Find if the RDF triple contains acceptable language data
entailment
def lang_match_xml(row, accepted_languages): '''Find if the XML row contains acceptable language data''' if not accepted_languages: return True column_languages = set() for elem in row: lang = elem[0].attrib.get(XML_LANG, None) if lang: column_languages.add(lang) return (not column_languages) or (column_languages & accepted_languages)
Find if the XML row contains acceptable language data
entailment
def json_iterator(hdr, rowlist, lang, add_vtype=False): """ Convert a JSON response into a double iterable, by rows and columns Optionally add element type, and filter triples by language (on literals) """ # Return the header row yield hdr if not add_vtype else ((h, 'type') for h in hdr) # Now the data rows for row in rowlist: if lang and not lang_match_json(row, hdr, lang): continue yield ((row[c]['value'], jtype(row[c])) if c in row else ('', '') for c in hdr)
Convert a JSON response into a double iterable, by rows and columns Optionally add element type, and filter triples by language (on literals)
entailment
def rdf_iterator(graph, lang, add_vtype=False): """ Convert a Graph response into a double iterable, by triples and elements. Optionally add element type, and filter triples by language (on literals) """ # Return the header row hdr = ('subject', 'predicate', 'object') yield hdr if not add_vtype else ((h, 'type') for h in hdr) # Now the data rows for row in graph: if lang and not lang_match_rdf(row, lang): continue yield ((unicode(c), gtype(c)) for c in row)
Convert a Graph response into a double iterable, by triples and elements. Optionally add element type, and filter triples by language (on literals)
entailment
def render_json(result, cfg, **kwargs): """ Render to output a result in JSON format """ result = json.loads(result.decode('utf-8')) head = result['head'] if 'results' not in result: if 'boolean' in result: r = u'Result: {}'.format(result['boolean']) else: r = u'Unsupported result: \n' + unicode(result) return {'data': {'text/plain': r}, 'metadata': {}} vars = head['vars'] nrow = len(result['results']['bindings']) if cfg.dis == 'table': j = json_iterator(vars, result['results']['bindings'], set(cfg.lan), add_vtype=cfg.typ) n, data = html_table(j, limit=cfg.lmt, withtype=cfg.typ) data += div('Total: {}, Shown: {}', nrow, n, css="tinfo") data = {'text/html': div(data)} else: result = json.dumps(result, ensure_ascii=False, indent=2, sort_keys=True) data = {'text/plain': unicode(result)} return {'data': data, 'metadata': {}}
Render to output a result in JSON format
entailment
def xml_row(row, lang): ''' Generator for an XML row ''' for elem in row: name = elem.get('name') child = elem[0] ftype = re.sub(r'\{[^}]+\}', '', child.tag) if ftype == 'literal': ftype = '{}, {}'.format(ftype, child.attrib.get(XML_LANG, 'none')) yield (name, (child.text, ftype))
Generator for an XML row
entailment
def xml_iterator(columns, rowlist, lang, add_vtype=False): """ Convert an XML response into a double iterable, by rows and columns Options are: filter triples by language (on literals), add element type """ # Return the header row yield columns if not add_vtype else ((h, 'type') for h in columns) # Now the data rows for row in rowlist: if not lang_match_xml(row, lang): continue rowdata = {nam: val for nam, val in xml_row(row, lang)} yield (rowdata.get(field, ('', '')) for field in columns)
Convert an XML response into a double iterable, by rows and columns Options are: filter triples by language (on literals), add element type
entailment
def render_xml(result, cfg, **kwargs): """ Render to output a result in XML format """ # Raw mode if cfg.dis == 'raw': return {'data': {'text/plain': result.decode('utf-8')}, 'metadata': {}} # Table try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET root = ET.fromstring(result) try: ns = {'ns': re.match(r'\{([^}]+)\}', root.tag).group(1)} except Exception: raise KrnlException('Invalid XML data: cannot get namespace') columns = [c.attrib['name'] for c in root.find('ns:head', ns)] results = root.find('ns:results', ns) nrow = len(results) j = xml_iterator(columns, results, set(cfg.lan), add_vtype=cfg.typ) n, data = html_table(j, limit=cfg.lmt, withtype=cfg.typ) data += div('Total: {}, Shown: {}', nrow, n, css="tinfo") return {'data': {'text/html': div(data)}, 'metadata': {}}
Render to output a result in XML format
entailment
def render_graph(result, cfg, **kwargs): """ Render to output a result that can be parsed as an RDF graph """ # Mapping from MIME types to formats accepted by RDFlib rdflib_formats = {'text/rdf+n3': 'n3', 'text/turtle': 'turtle', 'application/x-turtle': 'turtle', 'text/turtle': 'turtle', 'application/rdf+xml': 'xml', 'text/rdf': 'xml', 'application/rdf+xml': 'xml'} try: got = kwargs.get('format', 'text/rdf+n3') fmt = rdflib_formats[got] except KeyError: raise KrnlException('Unsupported format for graph processing: {!s}', got) g = ConjunctiveGraph() g.load(StringInputSource(result), format=fmt) display = cfg.dis[0] if is_collection(cfg.dis) else cfg.dis if display in ('png', 'svg'): try: literal = len(cfg.dis) > 1 and cfg.dis[1].startswith('withlit') opt = {'lang': cfg.lan, 'literal': literal, 'graphviz': []} data, metadata = draw_graph(g, fmt=display, options=opt) return {'data': data, 'metadata': metadata} except Exception as e: raise KrnlException('Exception while drawing graph: {!r}', e) elif display == 'table': it = rdf_iterator(g, set(cfg.lan), add_vtype=cfg.typ) n, data = html_table(it, limit=cfg.lmt, withtype=cfg.typ) data += div('Shown: {}, Total rows: {}', n if cfg.lmt else 'all', len(g), css="tinfo") data = {'text/html': div(data)} elif len(g) == 0: data = {'text/html': div(div('empty graph', css='krn-warn'))} else: data = {'text/plain': g.serialize(format='nt').decode('utf-8')} return {'data': data, 'metadata': {}}
Render to output a result that can be parsed as an RDF graph
entailment
def magic(self, line): """ Read and process magics @param line (str): the full line containing a magic @return (list): a tuple (output-message,css-class), where the output message can be a single string or a list (containing a Python format string and its arguments) """ # The %lsmagic has no parameters if line.startswith('%lsmagic'): return magic_help, 'magic-help' # Split line into command & parameters try: cmd, param = line.split(None, 1) except ValueError: raise KrnlException("invalid magic: {}", line) cmd = cmd[1:].lower() # Process each magic if cmd == 'endpoint': self.srv = SPARQLWrapper.SPARQLWrapper(param) return ['Endpoint set to: {}', param], 'magic' elif cmd == 'auth': auth_data = param.split(None, 2) if auth_data[0].lower() == 'none': self.cfg.aut = None return ['HTTP authentication: None'], 'magic' if auth_data and len(auth_data) != 3: raise KrnlException("invalid %auth magic") self.cfg.aut = auth_data return ['HTTP authentication: {}', auth_data], 'magic' elif cmd == 'qparam': v = param.split(None, 1) if len(v) == 0: raise KrnlException("missing %qparam name") elif len(v) == 1: self.cfg.par.pop(v[0],None) return ['Param deleted: {}', v[0]] else: self.cfg.par[v[0]] = v[1] return ['Param set: {} = {}'] + v, 'magic' elif cmd == 'prefix': v = param.split(None, 1) if len(v) == 0: raise KrnlException("missing %prefix value") elif len(v) == 1: self.cfg.pfx.pop(v[0], None) return ['Prefix deleted: {}', v[0]], 'magic' else: self.cfg.pfx[v[0]] = v[1] return ['Prefix set: {} = {}'] + v, 'magic' elif cmd == 'show': if param == 'all': self.cfg.lmt = None else: try: self.cfg.lmt = int(param) except ValueError as e: raise KrnlException("invalid result limit: {}", e) sz = self.cfg.lmt if self.cfg.lmt is not None else 'unlimited' return ['Result maximum size: {}', sz], 'magic' elif cmd == 'format': fmt_list = {'JSON': SPARQLWrapper.JSON, 'N3': SPARQLWrapper.N3, 'XML': SPARQLWrapper.XML, 'DEFAULT': None, 'ANY': False} try: fmt = param.upper() self.cfg.fmt = fmt_list[fmt] except KeyError: raise KrnlException('unsupported format: {}\nSupported formats are: {!s}', param, list(fmt_list.keys())) return ['Return format: {}', fmt], 'magic' elif cmd == 'lang': self.cfg.lan = DEFAULT_TEXT_LANG if param == 'default' else [] if param=='all' else param.split() return ['Label preferred languages: {}', self.cfg.lan], 'magic' elif cmd in 'graph': self.cfg.grh = param if param else None return ['Default graph: {}', param if param else 'None'], 'magic' elif cmd == 'display': v = param.lower().split(None, 2) if len(v) == 0 or v[0] not in ('table', 'raw', 'graph', 'diagram'): raise KrnlException('invalid %display command: {}', param) msg_extra = '' if v[0] not in ('diagram', 'graph'): self.cfg.dis = v[0] self.cfg.typ = len(v) > 1 and v[1].startswith('withtype') if self.cfg.typ and self.cfg.dis == 'table': msg_extra = '\nShow Types: on' elif len(v) == 1: # graph format, defaults self.cfg.dis = ['svg'] else: # graph format, with options if v[1] not in ('png', 'svg'): raise KrnlException('invalid graph format: {}', param) if len(v) > 2: if not v[2].startswith('withlit'): raise KrnlException('invalid graph option: {}', param) msg_extra = '\nShow literals: on' self.cfg.dis = v[1:3] display = self.cfg.dis[0] if is_collection(self.cfg.dis) else self.cfg.dis return ['Display: {}{}', display, msg_extra], 'magic' elif cmd == 'outfile': if param == 'NONE': self.cfg.out = None return ['no output file'], 'magic' else: self.cfg.out = param return ['Output file: {}', os.path.abspath(param)], 'magic' elif cmd == 'log': if not param: raise KrnlException('missing log level') try: lev = param.upper() parent_logger = logging.getLogger(__name__.rsplit('.', 1)[0]) parent_logger.setLevel(lev) return ("Logging set to {}", lev), 'magic' except ValueError: raise KrnlException('unknown log level: {}', param) elif cmd == 'header': if param.upper() == 'OFF': num = len(self.cfg.hdr) self.cfg.hdr = [] return ['All headers deleted ({})', num], 'magic' else: if param in self.cfg.hdr: return ['Header skipped (repeated)'], 'magic' self.cfg.hdr.append(param) return ['Header added: {}', param], 'magic' else: raise KrnlException("magic not found: {}", cmd)
Read and process magics @param line (str): the full line containing a magic @return (list): a tuple (output-message,css-class), where the output message can be a single string or a list (containing a Python format string and its arguments)
entailment
def query(self, query, num=0, silent=False): """ Launch an SPARQL query, process & convert results and return them """ if self.srv is None: raise KrnlException('no endpoint defined') # Add to the query all predefined SPARQL prefixes if self.cfg.pfx: prefix = '\n'.join(('PREFIX {} {}'.format(*v) for v in self.cfg.pfx.items())) query = prefix + '\n' + query # Prepend to the query all predefined Header entries # The header should be before the prefix and other sparql commands if self.cfg.hdr: query = '\n'.join(self.cfg.hdr) + '\n' + query if self.log.isEnabledFor(logging.DEBUG): self.log.debug("\n%50s%s", query, '...' if len(query) > 50 else '') # Select requested format if self.cfg.fmt is not None: fmt_req = self.cfg.fmt elif re.search(r'\bselect\b', query, re.I): fmt_req = SPARQLWrapper.JSON elif re.search(r'\b(?:describe|construct)\b', query, re.I): fmt_req = SPARQLWrapper.N3 else: fmt_req = False # Set the query self.srv.resetQuery() if self.cfg.aut: self.srv.setHTTPAuth(self.cfg.aut[0]) self.srv.setCredentials(*self.cfg.aut[1:]) else: self.srv.setCredentials(None, None) self.log.debug(u'request-format: %s display: %s', fmt_req, self.cfg.dis) if fmt_req: self.srv.setReturnFormat(fmt_req) if self.cfg.grh: self.srv.addParameter("default-graph-uri", self.cfg.grh) for p in self.cfg.par.items(): self.srv.addParameter(*p) self.srv.setQuery(query) if not silent or self.cfg.out: try: # Launch query start = datetime.datetime.utcnow() res = self.srv.query() now = datetime.datetime.utcnow() self.log.debug(u'response elapsed=%s', now-start) start = now # See what we got info = res.info() self.log.debug(u'response info: %s', info) fmt_got = info['content-type'].split(';')[0] if 'content-type' in info else None # Check we received a MIME type according to what we requested if fmt_req and fmt_got not in mime_type[fmt_req]: raise KrnlException(u'Unexpected response format: {} (requested: {})', fmt_got, fmt_req) # Get the result data = b''.join((line for line in res)) except KrnlException: raise except SPARQLWrapperException as e: raise KrnlException(u'SPARQL error: {}', touc(e)) except Exception as e: raise KrnlException(u'Query processing error: {!s}', e) # Write the raw result to a file if self.cfg.out: try: outname = self.cfg.out % num except TypeError: outname = self.cfg.out with io.open(outname, 'wb') as f: f.write(data) # Render the result into the desired display format try: # Data format we will render fmt = (fmt_req if fmt_req else SPARQLWrapper.JSON if fmt_got in mime_type[SPARQLWrapper.JSON] else SPARQLWrapper.N3 if fmt_got in mime_type[SPARQLWrapper.N3] else SPARQLWrapper.XML if fmt_got in mime_type[SPARQLWrapper.XML] else 'text/plain' if self.cfg.dis == 'raw' else fmt_got if fmt_got in ('text/plain', 'text/html') else 'text/plain') #self.log.debug(u'format: req=%s got=%s rend=%s',fmt_req,fmt_got,fmt) # Can't process? Just write the data as is if fmt in ('text/plain', 'text/html'): out = data.decode('utf-8') if isinstance(data, bytes) else data r = {'data': {fmt: out}, 'metadata': {}} else: f = render_json if fmt == SPARQLWrapper.JSON else render_xml if fmt == SPARQLWrapper.XML else render_graph r = f(data, self.cfg, format=fmt_got) now = datetime.datetime.utcnow() self.log.debug(u'response formatted=%s', now-start) if not silent: return r except Exception as e: raise KrnlException(u'Response processing error: {}', touc(e))
Launch an SPARQL query, process & convert results and return them
entailment
def set_logging( logfilename=None, level=None ): """ Set a logging configuration, with a rolling file appender. If passed a filename, use it as the logfile, else use a default name. The default logfile is \c sparqlkernel.log, placed in the directory given by (in that order) the \c LOGDIR environment variable, the logdir specified upon kernel installation or the default temporal directory. """ if logfilename is None: # Find the logging diectory logdir = os.environ.get( 'LOGDIR' ) if logdir is None: logdir = os.environ.get( 'LOGDIR_DEFAULT', tempfile.gettempdir() ) # Define the log filename basename = __name__.split('.')[-2] logfilename = os.path.join( logdir, basename + '.log' ) LOGCONFIG['handlers']['default']['filename'] = logfilename if level is not None: LOGCONFIG['loggers']['sparqlkernel']['level'] = level dictConfig( LOGCONFIG )
Set a logging configuration, with a rolling file appender. If passed a filename, use it as the logfile, else use a default name. The default logfile is \c sparqlkernel.log, placed in the directory given by (in that order) the \c LOGDIR environment variable, the logdir specified upon kernel installation or the default temporal directory.
entailment
def smartfields_get_field_status(self, field_name): """A way to find out a status of a filed.""" manager = self._smartfields_managers.get(field_name, None) if manager is not None: return manager.get_status(self) return {'state': 'ready'}
A way to find out a status of a filed.
entailment
def get_ext(self, format=None, **kwargs): """Returns new file extension based on a processor's `format` parameter. Overwrite if different extension should be set ex: `'.txt'` or `None` if this processor does not change file's extension. """ try: format = format or self.default_params['format'] return ".%s" % format.lower() except KeyError: pass
Returns new file extension based on a processor's `format` parameter. Overwrite if different extension should be set ex: `'.txt'` or `None` if this processor does not change file's extension.
entailment
def get_output_file(self, in_file, instance, field, **kwargs): """Creates a temporary file. With regular `FileSystemStorage` it does not need to be deleted, instaed file is safely moved over. With other cloud based storage it is a good idea to set `delete=True`.""" return NamedTemporaryFile(mode='rb', suffix='_%s_%s%s' % ( get_model_name(instance), field.name, self.get_ext()), delete=False)
Creates a temporary file. With regular `FileSystemStorage` it does not need to be deleted, instaed file is safely moved over. With other cloud based storage it is a good idea to set `delete=True`.
entailment
def label(x, gr, preferred_languages=None): """ @param x : graph entity @param gr (Graph): RDF graph @param preferred_languages (iterable) Return the best available label in the graph for the passed entity. If a set of preferred languages is given, try them in order. If none is found, an arbitrary language will be chosen """ # Find all labels & their language labels = { l.language : l for labelProp in LABEL_PROPERTIES for l in gr.objects(x,labelProp) } if labels: #return repr(preferred_languages) + repr(labels) #return u'|'.join(preferred_languages) + u' -> ' + u'/'.join( u'{}:{}'.format(*i) for i in labels.items() ) if preferred_languages is not None: for l in preferred_languages: if l in labels: return labels[l] return labels.itervalues().next() # No labels available. Try to generate a QNAME, or else, the string itself try: return gr.namespace_manager.compute_qname(x)[2].replace('_',' ') except: # Attempt to extract the trailing part of an URI m = re.search( '([^/]+)$', x ) return m.group(1).replace('_',' ') if m else x
@param x : graph entity @param gr (Graph): RDF graph @param preferred_languages (iterable) Return the best available label in the graph for the passed entity. If a set of preferred languages is given, try them in order. If none is found, an arbitrary language will be chosen
entailment
def rdf2dot( g, stream, opts={} ): """ Convert the RDF graph to DOT Write the dot output to the stream """ accept_lang = set( opts.get('lang',[]) ) do_literal = opts.get('literal') nodes = {} links = [] def node_id(x): if x not in nodes: nodes[x] = "node%d" % len(nodes) return nodes[x] def qname(x, g): try: q = g.compute_qname(x) return q[0] + ":" + q[2] except: return x def accept( node ): if isinstance( node, (rdflib.URIRef,rdflib.BNode) ): return True if not do_literal: return False return (not accept_lang) or (node.language in accept_lang) stream.write( u'digraph { \n node [ fontname="DejaVu Sans,Tahoma,Geneva,sans-serif" ] ; \n' ) # Write all edges. In the process make a list of all nodes for s, p, o in g: # skip triples for labels if p == rdflib.RDFS.label: continue # Create a link if both objects are graph nodes # (or, if literals are also included, if their languages match) if not (accept(s) and accept(o)): continue # add the nodes to the list sn = node_id(s) on = node_id(o) # add the link q = qname(p,g) if isinstance(p, rdflib.URIRef): opstr = u'\t%s -> %s [ arrowhead="open", color="#9FC9E560", fontsize=9, fontcolor="#204080", label="%s", href="%s", target="_other" ] ;\n' % (sn,on,q,p) else: opstr = u'\t%s -> %s [ arrowhead="open", color="#9FC9E560", fontsize=9, fontcolor="#204080", label="%s" ] ;\n'%(sn,on,q) stream.write( opstr ) # Write all nodes for u, n in nodes.items(): lbl = escape( label(u,g,accept_lang), True ) if isinstance(u, rdflib.URIRef): opstr = u'%s [ shape=none, fontsize=10, fontcolor=%s, label="%s", href="%s", target=_other ] \n' % (n, 'blue', lbl, u ) else: opstr = u'%s [ shape=none, fontsize=10, fontcolor=%s, label="%s" ] \n' % (n, 'black', lbl ) stream.write( u"# %s %s\n" % (u, n) ) stream.write( opstr ) stream.write(u'}\n')
Convert the RDF graph to DOT Write the dot output to the stream
entailment
def draw_graph( g, fmt='svg', prg='dot', options={} ): """ Draw an RDF graph as an image """ # Convert RDF to Graphviz buf = StringIO() rdf2dot( g, buf, options ) gv_options = options.get('graphviz',[]) if fmt == 'png': gv_options += [ '-Gdpi=220', '-Gsize=25,10!' ] metadata = { "width": 5500, "height": 2200, "unconfined" : True } #import codecs #with codecs.open('/tmp/sparqlkernel-img.dot','w',encoding='utf-8') as f: # f.write( buf.getvalue() ) # Now use Graphviz to generate the graph image = run_dot( buf.getvalue(), fmt=fmt, options=gv_options, prg=prg ) #with open('/tmp/sparqlkernel-img.'+fmt,'w') as f: # f.write( image ) # Return it if fmt == 'png': return { 'image/png' : base64.b64encode(image).decode('ascii') }, \ { "image/png" : metadata } elif fmt == 'svg': return { 'image/svg+xml' : image.decode('utf-8').replace('<svg','<svg class="unconfined"',1) }, \ { "unconfined" : True }
Draw an RDF graph as an image
entailment
def get_SZ(self, psd, geometry): """ Compute the scattering matrices for the given PSD and geometries. Returns: The new amplitude (S) and phase (Z) matrices. """ if (self._S_table is None) or (self._Z_table is None): raise AttributeError( "Initialize or load the scattering table first.") if (not isinstance(psd, PSD)) or self._previous_psd != psd: self._S_dict = {} self._Z_dict = {} psd_w = psd(self._psd_D) for geom in self.geometries: self._S_dict[geom] = \ trapz(self._S_table[geom] * psd_w, self._psd_D) self._Z_dict[geom] = \ trapz(self._Z_table[geom] * psd_w, self._psd_D) self._previous_psd = psd return (self._S_dict[geometry], self._Z_dict[geometry])
Compute the scattering matrices for the given PSD and geometries. Returns: The new amplitude (S) and phase (Z) matrices.
entailment
def init_scatter_table(self, tm, angular_integration=False, verbose=False): """Initialize the scattering lookup tables. Initialize the scattering lookup tables for the different geometries. Before calling this, the following attributes must be set: num_points, m_func, axis_ratio_func, D_max, geometries and additionally, all the desired attributes of the Scatterer class (e.g. wavelength, aspect ratio). Args: tm: a Scatterer instance. angular_integration: If True, also calculate the angle-integrated quantities (scattering cross section, extinction cross section, asymmetry parameter). These are needed to call the corresponding functions in the scatter module when PSD integration is active. The default is False. verbose: if True, print information about the progress of the calculation (which may take a while). If False (default), run silently. """ self._psd_D = np.linspace(self.D_max/self.num_points, self.D_max, self.num_points) self._S_table = {} self._Z_table = {} self._previous_psd = None self._m_table = np.empty(self.num_points, dtype=complex) if angular_integration: self._angular_table = {"sca_xsect": {}, "ext_xsect": {}, "asym": {}} else: self._angular_table = None (old_m, old_axis_ratio, old_radius, old_geom, old_psd_integrator) = \ (tm.m, tm.axis_ratio, tm.radius, tm.get_geometry(), tm.psd_integrator) try: # temporarily disable PSD integration to avoid recursion tm.psd_integrator = None for geom in self.geometries: self._S_table[geom] = \ np.empty((2,2,self.num_points), dtype=complex) self._Z_table[geom] = np.empty((4,4,self.num_points)) if angular_integration: for int_var in ["sca_xsect", "ext_xsect", "asym"]: self._angular_table[int_var][geom] = \ np.empty(self.num_points) for (i,D) in enumerate(self._psd_D): if verbose: print("Computing point {i} at D={D}...".format(i=i, D=D)) if self.m_func != None: tm.m = self.m_func(D) if self.axis_ratio_func != None: tm.axis_ratio = self.axis_ratio_func(D) self._m_table[i] = tm.m tm.radius = D/2.0 for geom in self.geometries: tm.set_geometry(geom) (S, Z) = tm.get_SZ_orient() self._S_table[geom][:,:,i] = S self._Z_table[geom][:,:,i] = Z if angular_integration: self._angular_table["sca_xsect"][geom][i] = \ scatter.sca_xsect(tm) self._angular_table["ext_xsect"][geom][i] = \ scatter.ext_xsect(tm) self._angular_table["asym"][geom][i] = \ scatter.asym(tm) finally: #restore old values (tm.m, tm.axis_ratio, tm.radius, tm.psd_integrator) = \ (old_m, old_axis_ratio, old_radius, old_psd_integrator) tm.set_geometry(old_geom)
Initialize the scattering lookup tables. Initialize the scattering lookup tables for the different geometries. Before calling this, the following attributes must be set: num_points, m_func, axis_ratio_func, D_max, geometries and additionally, all the desired attributes of the Scatterer class (e.g. wavelength, aspect ratio). Args: tm: a Scatterer instance. angular_integration: If True, also calculate the angle-integrated quantities (scattering cross section, extinction cross section, asymmetry parameter). These are needed to call the corresponding functions in the scatter module when PSD integration is active. The default is False. verbose: if True, print information about the progress of the calculation (which may take a while). If False (default), run silently.
entailment
def save_scatter_table(self, fn, description=""): """Save the scattering lookup tables. Save the state of the scattering lookup tables to a file. This can be loaded later with load_scatter_table. Other variables will not be saved, but this does not matter because the results of the computations are based only on the contents of the table. Args: fn: The name of the scattering table file. description (optional): A description of the table. """ data = { "description": description, "time": datetime.now(), "psd_scatter": (self.num_points, self.D_max, self._psd_D, self._S_table, self._Z_table, self._angular_table, self._m_table, self.geometries), "version": tmatrix_aux.VERSION } pickle.dump(data, file(fn, 'w'), pickle.HIGHEST_PROTOCOL)
Save the scattering lookup tables. Save the state of the scattering lookup tables to a file. This can be loaded later with load_scatter_table. Other variables will not be saved, but this does not matter because the results of the computations are based only on the contents of the table. Args: fn: The name of the scattering table file. description (optional): A description of the table.
entailment
def load_scatter_table(self, fn): """Load the scattering lookup tables. Load the scattering lookup tables saved with save_scatter_table. Args: fn: The name of the scattering table file. """ data = pickle.load(file(fn)) if ("version" not in data) or (data["version"]!=tmatrix_aux.VERSION): warnings.warn("Loading data saved with another version.", Warning) (self.num_points, self.D_max, self._psd_D, self._S_table, self._Z_table, self._angular_table, self._m_table, self.geometries) = data["psd_scatter"] return (data["time"], data["description"])
Load the scattering lookup tables. Load the scattering lookup tables saved with save_scatter_table. Args: fn: The name of the scattering table file.
entailment
def gaussian_pdf(std=10.0, mean=0.0): """Gaussian PDF for orientation averaging. Args: std: The standard deviation in degrees of the Gaussian PDF mean: The mean in degrees of the Gaussian PDF. This should be a number in the interval [0, 180) Returns: pdf(x), a function that returns the value of the spherical Jacobian- normalized Gaussian PDF with the given STD at x (degrees). It is normalized for the interval [0, 180]. """ norm_const = 1.0 def pdf(x): return norm_const*np.exp(-0.5 * ((x-mean)/std)**2) * \ np.sin(np.pi/180.0 * x) norm_dev = quad(pdf, 0.0, 180.0)[0] # ensure that the integral over the distribution equals 1 norm_const /= norm_dev return pdf
Gaussian PDF for orientation averaging. Args: std: The standard deviation in degrees of the Gaussian PDF mean: The mean in degrees of the Gaussian PDF. This should be a number in the interval [0, 180) Returns: pdf(x), a function that returns the value of the spherical Jacobian- normalized Gaussian PDF with the given STD at x (degrees). It is normalized for the interval [0, 180].
entailment
def uniform_pdf(): """Uniform PDF for orientation averaging. Returns: pdf(x), a function that returns the value of the spherical Jacobian- normalized uniform PDF. It is normalized for the interval [0, 180]. """ norm_const = 1.0 def pdf(x): return norm_const * np.sin(np.pi/180.0 * x) norm_dev = quad(pdf, 0.0, 180.0)[0] # ensure that the integral over the distribution equals 1 norm_const /= norm_dev return pdf
Uniform PDF for orientation averaging. Returns: pdf(x), a function that returns the value of the spherical Jacobian- normalized uniform PDF. It is normalized for the interval [0, 180].
entailment
def orient_averaged_adaptive(tm): """Compute the T-matrix using variable orientation scatterers. This method uses a very slow adaptive routine and should mainly be used for reference purposes. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance Returns: The amplitude (S) and phase (Z) matrices. """ S = np.zeros((2,2), dtype=complex) Z = np.zeros((4,4)) def Sfunc(beta, alpha, i, j, real): (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) s = S_ang[i,j].real if real else S_ang[i,j].imag return s * tm.or_pdf(beta) ind = range(2) for i in ind: for j in ind: S.real[i,j] = dblquad(Sfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0 S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0 def Zfunc(beta, alpha, i, j): (S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) return Z_ang[i,j] * tm.or_pdf(beta) ind = range(4) for i in ind: for j in ind: Z[i,j] = dblquad(Zfunc, 0.0, 360.0, lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0 return (S, Z)
Compute the T-matrix using variable orientation scatterers. This method uses a very slow adaptive routine and should mainly be used for reference purposes. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance Returns: The amplitude (S) and phase (Z) matrices.
entailment
def orient_averaged_fixed(tm): """Compute the T-matrix using variable orientation scatterers. This method uses a fast Gaussian quadrature and is suitable for most use. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance. Returns: The amplitude (S) and phase (Z) matrices. """ S = np.zeros((2,2), dtype=complex) Z = np.zeros((4,4)) ap = np.linspace(0, 360, tm.n_alpha+1)[:-1] aw = 1.0/tm.n_alpha for alpha in ap: for (beta, w) in zip(tm.beta_p, tm.beta_w): (S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta) S += w * S_ang Z += w * Z_ang sw = tm.beta_w.sum() #normalize to get a proper average S *= aw/sw Z *= aw/sw return (S, Z)
Compute the T-matrix using variable orientation scatterers. This method uses a fast Gaussian quadrature and is suitable for most use. Uses the set particle orientation PDF, ignoring the alpha and beta attributes. Args: tm: TMatrix (or descendant) instance. Returns: The amplitude (S) and phase (Z) matrices.
entailment
def set_geometry(self, geom): """A convenience function to set the geometry variables. Args: geom: A tuple containing (thet0, thet, phi0, phi, alpha, beta). See the Scatterer class documentation for a description of these angles. """ (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta) = geom
A convenience function to set the geometry variables. Args: geom: A tuple containing (thet0, thet, phi0, phi, alpha, beta). See the Scatterer class documentation for a description of these angles.
entailment
def get_geometry(self): """A convenience function to get the geometry variables. Returns: A tuple containing (thet0, thet, phi0, phi, alpha, beta). See the Scatterer class documentation for a description of these angles. """ return (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta)
A convenience function to get the geometry variables. Returns: A tuple containing (thet0, thet, phi0, phi, alpha, beta). See the Scatterer class documentation for a description of these angles.
entailment
def _init_tmatrix(self): """Initialize the T-matrix. """ if self.radius_type == Scatterer.RADIUS_MAXIMUM: # Maximum radius is not directly supported in the original # so we convert it to equal volume radius radius_type = Scatterer.RADIUS_EQUAL_VOLUME radius = self.equal_volume_from_maximum() else: radius_type = self.radius_type radius = self.radius self.nmax = pytmatrix.calctmat(radius, radius_type, self.wavelength, self.m.real, self.m.imag, self.axis_ratio, self.shape, self.ddelt, self.ndgs) self._tm_signature = (self.radius, self.radius_type, self.wavelength, self.m, self.axis_ratio, self.shape, self.ddelt, self.ndgs)
Initialize the T-matrix.
entailment
def _init_orient(self): """Retrieve the quadrature points and weights if needed. """ if self.orient == orientation.orient_averaged_fixed: (self.beta_p, self.beta_w) = quadrature.get_points_and_weights( self.or_pdf, 0, 180, self.n_beta) self._set_orient_signature()
Retrieve the quadrature points and weights if needed.
entailment
def _set_scatter_signature(self): """Mark the amplitude and scattering matrices as up to date. """ self._scatter_signature = (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta, self.orient)
Mark the amplitude and scattering matrices as up to date.
entailment
def get_SZ_single(self, alpha=None, beta=None): """Get the S and Z matrices for a single orientation. """ if alpha == None: alpha = self.alpha if beta == None: beta = self.beta tm_outdated = self._tm_signature != (self.radius, self.radius_type, self.wavelength, self.m, self.axis_ratio, self.shape, self.ddelt, self.ndgs) if tm_outdated: self._init_tmatrix() scatter_outdated = self._scatter_signature != (self.thet0, self.thet, self.phi0, self.phi, alpha, beta, self.orient) outdated = tm_outdated or scatter_outdated if outdated: (self._S_single, self._Z_single) = pytmatrix.calcampl(self.nmax, self.wavelength, self.thet0, self.thet, self.phi0, self.phi, alpha, beta) self._set_scatter_signature() return (self._S_single, self._Z_single)
Get the S and Z matrices for a single orientation.
entailment
def get_SZ_orient(self): """Get the S and Z matrices using the specified orientation averaging. """ tm_outdated = self._tm_signature != (self.radius, self.radius_type, self.wavelength, self.m, self.axis_ratio, self.shape, self.ddelt, self.ndgs) scatter_outdated = self._scatter_signature != (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta, self.orient) orient_outdated = self._orient_signature != \ (self.orient, self.or_pdf, self.n_alpha, self.n_beta) if orient_outdated: self._init_orient() outdated = tm_outdated or scatter_outdated or orient_outdated if outdated: (self._S_orient, self._Z_orient) = self.orient(self) self._set_scatter_signature() return (self._S_orient, self._Z_orient)
Get the S and Z matrices using the specified orientation averaging.
entailment
def get_SZ(self): """Get the S and Z matrices using the current parameters. """ if self.psd_integrator is None: (self._S, self._Z) = self.get_SZ_orient() else: scatter_outdated = self._scatter_signature != (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta, self.orient) psd_outdated = self._psd_signature != (self.psd,) outdated = scatter_outdated or psd_outdated if outdated: (self._S, self._Z) = self.psd_integrator(self.psd, self.get_geometry()) self._set_scatter_signature() self._set_psd_signature() return (self._S, self._Z)
Get the S and Z matrices using the current parameters.
entailment
def get_points_and_weights(w_func=lambda x : np.ones(x.shape), left=-1.0, right=1.0, num_points=5, n=4096): """Quadratude points and weights for a weighting function. Points and weights for approximating the integral I = \int_left^right f(x) w(x) dx given the weighting function w(x) using the approximation I ~ w_i f(x_i) Args: w_func: The weighting function w(x). Must be a function that takes one argument and is valid over the open interval (left, right). left: The left boundary of the interval right: The left boundary of the interval num_points: number of integration points to return n: the number of points to evaluate w_func at. Returns: A tuple (points, weights) where points is a sorted array of the points x_i and weights gives the corresponding weights w_i. """ dx = (float(right)-left)/n z = np.hstack(np.linspace(left+0.5*dx, right-0.5*dx, n)) w = dx*w_func(z) (a, b) = discrete_gautschi(z, w, num_points) alpha = a beta = np.sqrt(b) J = np.diag(alpha) J += np.diag(beta, k=-1) J += np.diag(beta, k=1) (points,v) = np.linalg.eigh(J) ind = points.argsort() points = points[ind] weights = v[0,:]**2 * w.sum() weights = weights[ind] return (points, weights)
Quadratude points and weights for a weighting function. Points and weights for approximating the integral I = \int_left^right f(x) w(x) dx given the weighting function w(x) using the approximation I ~ w_i f(x_i) Args: w_func: The weighting function w(x). Must be a function that takes one argument and is valid over the open interval (left, right). left: The left boundary of the interval right: The left boundary of the interval num_points: number of integration points to return n: the number of points to evaluate w_func at. Returns: A tuple (points, weights) where points is a sorted array of the points x_i and weights gives the corresponding weights w_i.
entailment
def sca_intensity(scatterer, h_pol=True): """Scattering intensity (phase function) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The differential scattering cross section. """ Z = scatterer.get_Z() return (Z[0,0] - Z[0,1]) if h_pol else (Z[0,0] + Z[0,1])
Scattering intensity (phase function) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The differential scattering cross section.
entailment
def ldr(scatterer, h_pol=True): """ Linear depolarizarion ratio (LDR) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), return LDR_h. If False, return LDR_v. Returns: The LDR. """ Z = scatterer.get_Z() if h_pol: return (Z[0,0] - Z[0,1] + Z[1,0] - Z[1,1]) / \ (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1]) else: return (Z[0,0] + Z[0,1] - Z[1,0] - Z[1,1]) / \ (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
Linear depolarizarion ratio (LDR) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), return LDR_h. If False, return LDR_v. Returns: The LDR.
entailment
def sca_xsect(scatterer, h_pol=True): """Scattering cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The scattering cross section. """ if scatterer.psd_integrator is not None: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "sca_xsect") old_geom = scatterer.get_geometry() def d_xsect(thet, phi): (scatterer.phi, scatterer.thet) = (phi*rad_to_deg, thet*rad_to_deg) Z = scatterer.get_Z() I = sca_intensity(scatterer, h_pol) return I * np.sin(thet) try: xsect = dblquad(d_xsect, 0.0, 2*np.pi, lambda x: 0.0, lambda x: np.pi)[0] finally: scatterer.set_geometry(old_geom) return xsect
Scattering cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The scattering cross section.
entailment
def ext_xsect(scatterer, h_pol=True): """Extinction cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The extinction cross section. """ if scatterer.psd_integrator is not None: try: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "ext_xsect") except AttributeError: # Fall back to the usual method of computing this from S pass old_geom = scatterer.get_geometry() (thet0, thet, phi0, phi, alpha, beta) = old_geom try: scatterer.set_geometry((thet0, thet0, phi0, phi0, alpha, beta)) S = scatterer.get_S() finally: scatterer.set_geometry(old_geom) if h_pol: return 2 * scatterer.wavelength * S[1,1].imag else: return 2 * scatterer.wavelength * S[0,0].imag
Extinction cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The extinction cross section.
entailment
def ssa(scatterer, h_pol=True): """Single-scattering albedo for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The single-scattering albedo. """ ext_xs = ext_xsect(scatterer, h_pol=h_pol) return sca_xsect(scatterer, h_pol=h_pol)/ext_xs if ext_xs > 0.0 else 0.0
Single-scattering albedo for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The single-scattering albedo.
entailment
def asym(scatterer, h_pol=True): """Asymmetry parameter for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The asymmetry parameter. """ if scatterer.psd_integrator is not None: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "asym") old_geom = scatterer.get_geometry() cos_t0 = np.cos(scatterer.thet0 * deg_to_rad) sin_t0 = np.sin(scatterer.thet0 * deg_to_rad) p0 = scatterer.phi0 * deg_to_rad def integrand(thet, phi): (scatterer.phi, scatterer.thet) = (phi*rad_to_deg, thet*rad_to_deg) cos_T_sin_t = 0.5 * (np.sin(2*thet)*cos_t0 + \ (1-np.cos(2*thet))*sin_t0*np.cos(p0-phi)) I = sca_intensity(scatterer, h_pol) return I * cos_T_sin_t try: cos_int = dblquad(integrand, 0.0, 2*np.pi, lambda x: 0.0, lambda x: np.pi)[0] finally: scatterer.set_geometry(old_geom) return cos_int/sca_xsect(scatterer, h_pol)
Asymmetry parameter for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The asymmetry parameter.
entailment
def radar_xsect(scatterer, h_pol=True): """Radar cross section for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The radar cross section. """ Z = scatterer.get_Z() if h_pol: return 2 * np.pi * \ (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1]) else: return 2 * np.pi * \ (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
Radar cross section for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The radar cross section.
entailment
def refl(scatterer, h_pol=True): """Reflectivity (with number concentration N=1) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The reflectivity. NOTE: To compute reflectivity in dBZ, give the particle diameter and wavelength in [mm], then take 10*log10(Zi). """ return scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * \ radar_xsect(scatterer, h_pol)
Reflectivity (with number concentration N=1) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The reflectivity. NOTE: To compute reflectivity in dBZ, give the particle diameter and wavelength in [mm], then take 10*log10(Zi).
entailment
def delta_hv(scatterer): """ Delta_hv for the current setup. Args: scatterer: a Scatterer instance. Returns: Delta_hv [rad]. """ Z = scatterer.get_Z() return np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])
Delta_hv for the current setup. Args: scatterer: a Scatterer instance. Returns: Delta_hv [rad].
entailment
def rho_hv(scatterer): """ Copolarized correlation (rho_hv) for the current setup. Args: scatterer: a Scatterer instance. Returns: rho_hv. """ Z = scatterer.get_Z() a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2 b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1]) c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1]) return np.sqrt(a / (b*c))
Copolarized correlation (rho_hv) for the current setup. Args: scatterer: a Scatterer instance. Returns: rho_hv.
entailment