repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
khancyr/ardupilot
libraries/AP_HAL_ChibiOS/hwdef/scripts/convert_uart_order.py
21
1096
#!/usr/bin/env python ''' convert UART_ORDER in a hwdef.dat into a SERIAL_ORDER ''' import sys, shlex def convert_file(fname): lines = open(fname, 'r').readlines() for i in range(len(lines)): if lines[i].startswith('SERIAL_ORDER'): print("Already has SERIAL_ORDER: %s" % fname) return for i in range(len(lines)): line = lines[i] if not line.startswith('UART_ORDER'): continue a = shlex.split(line, posix=False) if a[0] != 'UART_ORDER': continue uart_order = a[1:] if not fname.endswith('-bl.dat'): while len(uart_order) < 4: uart_order += ['EMPTY'] a += ['EMPTY'] map = [ 0, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12 ] for j in range(len(uart_order)): a[j+1] = uart_order[map[j]] a[0] = 'SERIAL_ORDER' print("%s new order " % fname, a) lines[i] = ' '.join(a) + '\n' open(fname, 'w').write(''.join(lines)) files=sys.argv[1:] for fname in files: convert_file(fname)
gpl-3.0
sourcepole/qgis
python/plugins/osm/OsmPlugin.py
2
13996
"""@package OsmPlugin This is the main module of the OSM Plugin. It shows/hides all tool buttons, widgets and dialogs. After closing dialogs it does all actions related with their return codes. /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4.QtNetwork import * from qgis.core import * from OsmLoadDlg import OsmLoadDlg from OsmSaveDlg import OsmSaveDlg from OsmDownloadDlg import OsmDownloadDlg from OsmUploadDlg import OsmUploadDlg from OsmImportDlg import OsmImportDlg from OsmFeatureDW import * from OsmUndoRedoDW import * # initialize Qt resources from file resouces.py import resources_rc def versionNumber(): """Returns current version number of OpenStreetMap plugin. @return current version number of the plugin """ return "0.5" class OsmPlugin: """OsmPlugin is the main class OSM Plugin module. It shows/hides all tool buttons, widgets and dialogs and after closing dialogs it does all actions related with their return codes. """ def __init__(self, iface): """The constructor. @param iface QgisInterface object """ self.iface=iface self.canvas=self.iface.mapCanvas() self.http=QHttp() self.outFile=None self.httpGetId=0 self.httpRequestAborted=False self.fname="" def initGui(self): """Function initalizes GUI of the OSM Plugin. """ self.dockWidgetVisible = False # create action for loading OSM file self.actionLoad=QAction(QIcon(":/plugins/osm_plugin/images/osm_load.png") ,"Load OSM from file", self.iface.mainWindow()) self.actionLoad.setWhatsThis("Load OpenStreetMap from file") # create action for import of a layer into OSM self.actionImport=QAction(QIcon(":/plugins/osm_plugin/images/osm_import.png") ,"Import data from a layer", self.iface.mainWindow()) self.actionImport.setWhatsThis("Import data from a layer to OpenStreetMap") # create action for saving OSM file self.actionSave=QAction(QIcon(":/plugins/osm_plugin/images/osm_save.png") ,"Save OSM to file", self.iface.mainWindow()) self.actionSave.setWhatsThis("Save OpenStreetMap to file") # create action for OSM data downloading self.actionDownload=QAction(QIcon(":/plugins/osm_plugin/images/osm_download.png") ,"Download OSM data", self.iface.mainWindow()) self.actionDownload.setWhatsThis("Download OpenStreetMap data") # create action for OSM data downloading self.actionUpload=QAction(QIcon(":/plugins/osm_plugin/images/osm_upload.png") ,"Upload OSM data", self.iface.mainWindow()) self.actionUpload.setWhatsThis("Upload OpenStreetMap data") # create action for OSM dockable window self.actionDockWidget=QAction(QIcon(":/plugins/osm_plugin/images/osm_featureManager.png") ,"Show/Hide OSM Feature Manager",self.iface.mainWindow()) self.actionDockWidget.setWhatsThis("Show/Hide OpenStreetMap Feature Manager") self.actionDockWidget.setCheckable(True) # connect new action to plugin function - when action is triggered QObject.connect(self.actionLoad, SIGNAL("triggered()"), self.loadOsmFromFile) QObject.connect(self.actionSave, SIGNAL("triggered()"), self.saveOsmToFile) QObject.connect(self.actionDownload, SIGNAL("triggered()"), self.downloadOsmData) QObject.connect(self.actionUpload, SIGNAL("triggered()"), self.uploadOsmData) QObject.connect(self.actionDockWidget, SIGNAL("triggered()"), self.showHideDockWidget) QObject.connect(self.actionImport, SIGNAL("triggered()"), self.importData) # create a toolbar self.toolBar=self.iface.addToolBar("OpenStreetMap") self.toolBar.setObjectName("OpenStreetMap") self.toolBar.addAction(self.actionLoad) self.toolBar.addAction(self.actionDockWidget) self.toolBar.addAction(self.actionDownload) self.toolBar.addAction(self.actionUpload) self.toolBar.addAction(self.actionImport) self.toolBar.addAction(self.actionSave) # populate plugins menu self.iface.addPluginToMenu("&OpenStreetMap", self.actionLoad) self.iface.addPluginToMenu("&OpenStreetMap", self.actionDockWidget) self.iface.addPluginToMenu("&OpenStreetMap", self.actionDownload) self.iface.addPluginToMenu("&OpenStreetMap", self.actionUpload) self.iface.addPluginToMenu("&OpenStreetMap", self.actionImport) self.iface.addPluginToMenu("&OpenStreetMap", self.actionSave) # create manager of sqlite database(-s) self.dbm=OsmDatabaseManager(self) self.undoredo=None self.dockWidget=None # create widget for undo/redo actions self.undoredo=OsmUndoRedoDW(self) self.iface.addDockWidget(Qt.LeftDockWidgetArea,self.undoredo) self.undoredo.hide() QObject.connect(self.undoredo,SIGNAL("visibilityChanged(bool)"),self.__urVisibilityChanged) self.undoredo.setContentEnabled(False) # create widget for osm feature info self.dockWidget=OsmFeatureDW(self) self.iface.addDockWidget(Qt.RightDockWidgetArea, self.dockWidget) QObject.connect(self.dockWidget,SIGNAL("visibilityChanged(bool)"),self.__ofVisibilityChanged) self.dockWidget.setContentEnabled(False) def unload(self): """Function unloads the OSM Plugin. """ self.canvas.unsetMapTool(self.dockWidget.mapTool) del self.dockWidget.mapTool self.dockWidget.mapTool=None # remove the plugin menu items self.iface.removePluginMenu("&OpenStreetMap",self.actionLoad) self.iface.removePluginMenu("&OpenStreetMap",self.actionSave) self.iface.removePluginMenu("&OpenStreetMap",self.actionDownload) self.iface.removePluginMenu("&OpenStreetMap",self.actionUpload) self.iface.removePluginMenu("&OpenStreetMap",self.actionImport) self.iface.removePluginMenu("&OpenStreetMap",self.actionDockWidget) self.dockWidget.close() if self.dockWidget.rubBand: self.dockWidget.rubBand.reset(False) if self.dockWidget.rubBandPol: self.dockWidget.rubBandPol.reset(True) self.undoredo.clear() self.undoredo.close() self.iface.removeDockWidget(self.dockWidget) self.iface.removeDockWidget(self.undoredo) del self.dockWidget del self.undoredo self.dockWidget=None self.undoredo=None # remove toolbar del self.toolBar # w/o osm plugin we don't need osm layers self.dbm.removeAllOsmLayers() def loadOsmFromFile(self): """Function shows up the "Load OSM from file" dialog. After closing it, function calls the appropriate actions according to dialog's return code. """ # sanity check whether we're able to load osm data if 'osm' not in QgsProviderRegistry.instance().providerList(): QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!") return # show modal dialog with OSM file selection self.dlgLoad=OsmLoadDlg(self) # continue only if OK button was clicked if self.dlgLoad.exec_()==0: return self.fname=self.dlgLoad.OSMFileEdit.text() self.dbFileName=self.fname+".db" self.dbm.addDatabase(self.dbFileName,self.dlgLoad.pointLayer,self.dlgLoad.lineLayer,self.dlgLoad.polygonLayer) self.undoredo.clear() self.dockWidget.setContentEnabled(True) self.undoredo.setContentEnabled(True) self.dataLoaded=True def saveOsmToFile(self): """Function shows up the "Save OSM to file" dialog. After closing it, function calls the appropriate actions according to dialog's return code. """ if 'osm' not in QgsProviderRegistry.instance().providerList(): QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!") return if not self.dbm.currentKey: QMessageBox.information(QWidget(), QString("OSM Save to file") ,"No OSM data are loaded/downloaded or no OSM layer is selected in Layers panel. \ Please change this situation first, because OSM Plugin doesn't know what to save.") return # show modal dialog with OSM file selection self.dlgSave=OsmSaveDlg(self) # continue only if OK button was clicked if self.dlgSave.exec_()==0: return def downloadOsmData(self): """Function shows up the "Download OSM data" dialog. After closing it, function calls the appropriate actions according to dialog's return code. """ if 'osm' not in QgsProviderRegistry.instance().providerList(): QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!") return self.dlgDownload=OsmDownloadDlg(self) self.dlgDownload.exec_() if not self.dlgDownload.httpSuccess: return if not self.dlgDownload.autoLoadCheckBox.isChecked(): return # create loading dialog, submit it self.dlgLoad=OsmLoadDlg(self) self.dlgLoad.setModal(True) self.dlgLoad.show() self.dlgLoad.close() self.dlgLoad.OSMFileEdit.setText(self.dlgDownload.destdirLineEdit.text()) self.dlgLoad.styleCombo.setCurrentIndex(self.dlgDownload.styleCombo.currentIndex()) if self.dlgDownload.chkCustomRenderer.isChecked(): self.dlgLoad.chkCustomRenderer.setChecked(True) else: self.dlgLoad.chkCustomRenderer.setChecked(False) for row in xrange(self.dlgLoad.lstTags.count()): self.dlgLoad.lstTags.item(row).setCheckState(Qt.Checked) if self.dlgDownload.chkReplaceData.isChecked(): self.dlgLoad.chkReplaceData.setChecked(True) else: self.dlgLoad.chkReplaceData.setChecked(False) self.dlgLoad.onOK() self.fname=self.dlgLoad.OSMFileEdit.text() self.dbFileName=self.fname+".db" self.dbm.addDatabase(self.dbFileName,self.dlgLoad.pointLayer,self.dlgLoad.lineLayer,self.dlgLoad.polygonLayer) def uploadOsmData(self): """Function shows up the "Upload OSM data" dialog. After closing it, function calls the appropriate actions according to dialog's return code. """ if 'osm' not in QgsProviderRegistry.instance().providerList(): QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!") return # first check if there are some data; if not upload doesn't have sense if not self.dbm.currentKey: QMessageBox.information(QWidget(), QString("OSM Upload") ,"No OSM data are loaded/downloaded or no OSM layer is selected in Layers panel. \ Please change this situation first, because OSM Plugin doesn't know what to upload.") return self.dlgUpload=OsmUploadDlg(self) self.dlgUpload.exec_() def importData(self): """Function shows up the "Import OSM data" dialog. After closing it, function calls the appropriate actions according to dialog's return code. """ if 'osm' not in QgsProviderRegistry.instance().providerList(): QMessageBox.critical(None, "Sorry", "You don't have OSM provider installed!") return if self.dbm.currentKey is None: QMessageBox.information(self.iface.mainWindow(), "OSM Import" ,"No OSM data are loaded/downloaded or no OSM layer is selected in Layers panel. \ Please change this situation first, because OSM Plugin doesn't know what layer will be destination of the import.") return dlg=OsmImportDlg(self) if dlg.cboLayer.count()==0: QMessageBox.information(self.iface.mainWindow(), "OSM Import", "There are currently no available vector layers.") return dlg.exec_() def showHideDockWidget(self): """Function shows/hides main dockable widget of the plugin ("OSM Feature" widget) """ if self.dockWidget.isVisible(): self.dockWidget.hide() else: self.dockWidget.show() def __urVisibilityChanged(self): """Function is called after visibilityChanged(...) signal is emitted on OSM Edit History widget. Function changes state of related checkbox according to the fact if widget is currently visible of not. """ if self.undoredo.isVisible(): self.dockWidget.urDetailsButton.setChecked(True) else: self.dockWidget.urDetailsButton.setChecked(False) def __ofVisibilityChanged(self): """Function is called after visibilityChanged(...) signal is emitted on OSM Feature widget. Function changes state of appropriate tool button according to the fact if widget is currently visible of not. """ if self.dockWidget.isVisible(): self.actionDockWidget.setChecked(True) else: self.actionDockWidget.setChecked(False)
gpl-2.0
joonas-fi/sumatrapdf
ext/freetype2/src/tools/chktrcmp.py
192
3823
#!/usr/bin/env python # # Check trace components in FreeType 2 source. # Author: suzuki toshiya, 2009, 2013 # # This code is explicitly into the public domain. import sys import os import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [ "src" ] TRACE_DEF_FILES = [ "include/internal/fttrace.h" ] # -------------------------------------------------------------- # Parse command line options # for i in range( 1, len( sys.argv ) ): if sys.argv[i].startswith( "--help" ): print "Usage: %s [option]" % sys.argv[0] print "Search used-but-defined and defined-but-not-used trace_XXX macros" print "" print " --help:" print " Show this help" print "" print " --src-dirs=dir1:dir2:..." print " Specify the directories of C source files to be checked" print " Default is %s" % ":".join( SRC_FILE_DIRS ) print "" print " --def-files=file1:file2:..." print " Specify the header files including FT_TRACE_DEF()" print " Default is %s" % ":".join( TRACE_DEF_FILES ) print "" exit(0) if sys.argv[i].startswith( "--src-dirs=" ): SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" ) elif sys.argv[i].startswith( "--def-files=" ): TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" ) # -------------------------------------------------------------- # Scan C source and header files using trace macros. # c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE ) trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+trace_' ) for d in SRC_FILE_DIRS: for ( p, dlst, flst ) in os.walk( d ): for f in flst: if c_pathname_pat.match( f ) != None: src_pathname = os.path.join( p, f ) line_num = 0 for src_line in open( src_pathname, 'r' ): line_num = line_num + 1 src_line = src_line.strip() if trace_use_pat.match( src_line ) != None: component_name = trace_use_pat.sub( '', src_line ) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) ) else: USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ] # -------------------------------------------------------------- # Scan header file(s) defining trace macros. # trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' ) trace_def_pat_cls = re.compile( '[ \t\)].*$' ) for f in TRACE_DEF_FILES: line_num = 0 for hdr_line in open( f, 'r' ): line_num = line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match( hdr_line ) != None: component_name = trace_def_pat_opn.sub( '', hdr_line ) component_name = trace_def_pat_cls.sub( '', component_name ) if component_name in KNOWN_COMPONENT: print "trace component %s is defined twice, see %s and fttrace.h:%d" % \ ( component_name, KNOWN_COMPONENT[component_name], line_num ) else: KNOWN_COMPONENT[component_name] = "%s:%d" % \ ( os.path.basename( f ), line_num ) # -------------------------------------------------------------- # Compare the used and defined trace macros. # print "# Trace component used in the implementations but not defined in fttrace.h." cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in KNOWN_COMPONENT: print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) ) print "# Trace component is defined but not used in the implementations." cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in USED_COMPONENT: if c != "any": print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
gpl-3.0
haripradhan/MissionPlanner
Lib/xml/etree/SimpleXMLTreeBuilder.py
42
4805
# # ElementTree # $Id: SimpleXMLTreeBuilder.py 3225 2007-08-27 21:32:08Z fredrik $ # # A simple XML tree builder, based on Python's xmllib # # Note that due to bugs in xmllib, this builder does not fully support # namespaces (unqualified attributes are put in the default namespace, # instead of being left as is). Run this module as a script to find # out if this affects your Python version. # # history: # 2001-10-20 fl created # 2002-05-01 fl added namespace support for xmllib # 2002-08-17 fl added xmllib sanity test # # Copyright (c) 1999-2004 by Fredrik Lundh. All rights reserved. # # fredrik@pythonware.com # http://www.pythonware.com # # -------------------------------------------------------------------- # The ElementTree toolkit is # # Copyright (c) 1999-2007 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # -------------------------------------------------------------------- ## # Tools to build element trees from XML files, using <b>xmllib</b>. # This module can be used instead of the standard tree builder, for # Python versions where "expat" is not available (such as 1.5.2). # <p> # Note that due to bugs in <b>xmllib</b>, the namespace support is # not reliable (you can run the module as a script to find out exactly # how unreliable it is on your Python version). ## import xmllib, string import ElementTree ## # ElementTree builder for XML source data. # # @see elementtree.ElementTree class TreeBuilder(xmllib.XMLParser): def __init__(self, html=0, target=None, encoding=None): self.__builder = ElementTree.TreeBuilder() if html: import htmlentitydefs self.entitydefs.update(htmlentitydefs.entitydefs) xmllib.XMLParser.__init__(self) ## # Feeds data to the parser. # # @param data Encoded data. def feed(self, data): xmllib.XMLParser.feed(self, data) ## # Finishes feeding data to the parser. # # @return An element structure. # @defreturn Element def close(self): xmllib.XMLParser.close(self) return self.__builder.close() def handle_data(self, data): self.__builder.data(data) handle_cdata = handle_data def unknown_starttag(self, tag, attrs): attrib = {} for key, value in attrs.items(): attrib[fixname(key)] = value self.__builder.start(fixname(tag), attrib) def unknown_endtag(self, tag): self.__builder.end(fixname(tag)) def fixname(name, split=string.split): # xmllib in 2.0 and later provides limited (and slightly broken) # support for XML namespaces. if " " not in name: return name return "{%s}%s" % tuple(split(name, " ", 1)) if __name__ == "__main__": import sys # sanity check: look for known namespace bugs in xmllib p = TreeBuilder() text = """\ <root xmlns='default'> <tag attribute='value' /> </root> """ p.feed(text) tree = p.close() status = [] # check for bugs in the xmllib implementation tag = tree.find("{default}tag") if tag is None: status.append("namespaces not supported") if tag is not None and tag.get("{default}attribute"): status.append("default namespace applied to unqualified attribute") # report bugs if status: print "xmllib doesn't work properly in this Python version:" for bug in status: print "-", bug else: print "congratulations; no problems found in xmllib"
gpl-3.0
wonder-sk/QGIS
python/ext-libs/requests/packages/chardet/escsm.py
2930
7839
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .constants import eStart, eError, eItsMe HZ_cls = ( 1,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,1,0,0,0,0, # 18 - 1f 0,0,0,0,0,0,0,0, # 20 - 27 0,0,0,0,0,0,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,0,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,4,0,5,2,0, # 78 - 7f 1,1,1,1,1,1,1,1, # 80 - 87 1,1,1,1,1,1,1,1, # 88 - 8f 1,1,1,1,1,1,1,1, # 90 - 97 1,1,1,1,1,1,1,1, # 98 - 9f 1,1,1,1,1,1,1,1, # a0 - a7 1,1,1,1,1,1,1,1, # a8 - af 1,1,1,1,1,1,1,1, # b0 - b7 1,1,1,1,1,1,1,1, # b8 - bf 1,1,1,1,1,1,1,1, # c0 - c7 1,1,1,1,1,1,1,1, # c8 - cf 1,1,1,1,1,1,1,1, # d0 - d7 1,1,1,1,1,1,1,1, # d8 - df 1,1,1,1,1,1,1,1, # e0 - e7 1,1,1,1,1,1,1,1, # e8 - ef 1,1,1,1,1,1,1,1, # f0 - f7 1,1,1,1,1,1,1,1, # f8 - ff ) HZ_st = ( eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17 5,eError, 6,eError, 5, 5, 4,eError,# 18-1f 4,eError, 4, 4, 4,eError, 4,eError,# 20-27 4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f ) HZCharLenTable = (0, 0, 0, 0, 0, 0) HZSMModel = {'classTable': HZ_cls, 'classFactor': 6, 'stateTable': HZ_st, 'charLenTable': HZCharLenTable, 'name': "HZ-GB-2312"} ISO2022CN_cls = ( 2,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,1,0,0,0,0, # 18 - 1f 0,0,0,0,0,0,0,0, # 20 - 27 0,3,0,0,0,0,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,4,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 2,2,2,2,2,2,2,2, # 80 - 87 2,2,2,2,2,2,2,2, # 88 - 8f 2,2,2,2,2,2,2,2, # 90 - 97 2,2,2,2,2,2,2,2, # 98 - 9f 2,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 2,2,2,2,2,2,2,2, # e0 - e7 2,2,2,2,2,2,2,2, # e8 - ef 2,2,2,2,2,2,2,2, # f0 - f7 2,2,2,2,2,2,2,2, # f8 - ff ) ISO2022CN_st = ( eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27 5, 6,eError,eError,eError,eError,eError,eError,# 28-2f eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37 eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f ) ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0) ISO2022CNSMModel = {'classTable': ISO2022CN_cls, 'classFactor': 9, 'stateTable': ISO2022CN_st, 'charLenTable': ISO2022CNCharLenTable, 'name': "ISO-2022-CN"} ISO2022JP_cls = ( 2,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,2,2, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,1,0,0,0,0, # 18 - 1f 0,0,0,0,7,0,0,0, # 20 - 27 3,0,0,0,0,0,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 6,0,4,0,8,0,0,0, # 40 - 47 0,9,5,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 2,2,2,2,2,2,2,2, # 80 - 87 2,2,2,2,2,2,2,2, # 88 - 8f 2,2,2,2,2,2,2,2, # 90 - 97 2,2,2,2,2,2,2,2, # 98 - 9f 2,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 2,2,2,2,2,2,2,2, # e0 - e7 2,2,2,2,2,2,2,2, # e8 - ef 2,2,2,2,2,2,2,2, # f0 - f7 2,2,2,2,2,2,2,2, # f8 - ff ) ISO2022JP_st = ( eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f eError, 5,eError,eError,eError, 4,eError,eError,# 20-27 eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37 eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47 ) ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0) ISO2022JPSMModel = {'classTable': ISO2022JP_cls, 'classFactor': 10, 'stateTable': ISO2022JP_st, 'charLenTable': ISO2022JPCharLenTable, 'name': "ISO-2022-JP"} ISO2022KR_cls = ( 2,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 0,0,0,1,0,0,0,0, # 18 - 1f 0,0,0,0,3,0,0,0, # 20 - 27 0,4,0,0,0,0,0,0, # 28 - 2f 0,0,0,0,0,0,0,0, # 30 - 37 0,0,0,0,0,0,0,0, # 38 - 3f 0,0,0,5,0,0,0,0, # 40 - 47 0,0,0,0,0,0,0,0, # 48 - 4f 0,0,0,0,0,0,0,0, # 50 - 57 0,0,0,0,0,0,0,0, # 58 - 5f 0,0,0,0,0,0,0,0, # 60 - 67 0,0,0,0,0,0,0,0, # 68 - 6f 0,0,0,0,0,0,0,0, # 70 - 77 0,0,0,0,0,0,0,0, # 78 - 7f 2,2,2,2,2,2,2,2, # 80 - 87 2,2,2,2,2,2,2,2, # 88 - 8f 2,2,2,2,2,2,2,2, # 90 - 97 2,2,2,2,2,2,2,2, # 98 - 9f 2,2,2,2,2,2,2,2, # a0 - a7 2,2,2,2,2,2,2,2, # a8 - af 2,2,2,2,2,2,2,2, # b0 - b7 2,2,2,2,2,2,2,2, # b8 - bf 2,2,2,2,2,2,2,2, # c0 - c7 2,2,2,2,2,2,2,2, # c8 - cf 2,2,2,2,2,2,2,2, # d0 - d7 2,2,2,2,2,2,2,2, # d8 - df 2,2,2,2,2,2,2,2, # e0 - e7 2,2,2,2,2,2,2,2, # e8 - ef 2,2,2,2,2,2,2,2, # f0 - f7 2,2,2,2,2,2,2,2, # f8 - ff ) ISO2022KR_st = ( eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07 eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17 eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27 ) ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0) ISO2022KRSMModel = {'classTable': ISO2022KR_cls, 'classFactor': 6, 'stateTable': ISO2022KR_st, 'charLenTable': ISO2022KRCharLenTable, 'name': "ISO-2022-KR"} # flake8: noqa
gpl-2.0
Metaswitch/horizon
openstack_dashboard/api/ceilometer.py
18
49104
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import threading from ceilometerclient import client as ceilometer_client from django.conf import settings from django.utils import datastructures from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon.utils.memoized import memoized # noqa from openstack_dashboard.api import base from openstack_dashboard.api import keystone from openstack_dashboard.api import nova LOG = logging.getLogger(__name__) def get_flavor_names(request): # TODO(lsmola) The flavors can be set per project, # so it should show only valid ones. try: flavors = nova.flavor_list(request, None) return [f.name for f in flavors] except Exception: return ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge'] def is_iterable(var): """Return True if the given is list or tuple.""" return (isinstance(var, (list, tuple)) or issubclass(var.__class__, (list, tuple))) def make_query(user_id=None, tenant_id=None, resource_id=None, user_ids=None, tenant_ids=None, resource_ids=None): """Returns query built from given parameters. This query can be then used for querying resources, meters and statistics. :Parameters: - `user_id`: user_id, has a priority over list of ids - `tenant_id`: tenant_id, has a priority over list of ids - `resource_id`: resource_id, has a priority over list of ids - `user_ids`: list of user_ids - `tenant_ids`: list of tenant_ids - `resource_ids`: list of resource_ids """ user_ids = user_ids or [] tenant_ids = tenant_ids or [] resource_ids = resource_ids or [] query = [] if user_id: user_ids = [user_id] for u_id in user_ids: query.append({"field": "user_id", "op": "eq", "value": u_id}) if tenant_id: tenant_ids = [tenant_id] for t_id in tenant_ids: query.append({"field": "project_id", "op": "eq", "value": t_id}) if resource_id: resource_ids = [resource_id] for r_id in resource_ids: query.append({"field": "resource_id", "op": "eq", "value": r_id}) return query class Meter(base.APIResourceWrapper): """Represents one Ceilometer meter.""" _attrs = ['name', 'type', 'unit', 'resource_id', 'user_id', 'project_id'] def __init__(self, apiresource): super(Meter, self).__init__(apiresource) self._label = self.name self._description = "" def augment(self, label=None, description=None): if label: self._label = label if description: self._description = description @property def description(self): return self._description @property def label(self): return self._label class Resource(base.APIResourceWrapper): """Represents one Ceilometer resource.""" _attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata', 'links'] def __init__(self, apiresource, ceilometer_usage=None): super(Resource, self).__init__(apiresource) # Save empty strings to IDs rather than None, so it gets # serialized correctly. We don't want 'None' strings. self.project_id = self.project_id or "" self.user_id = self.user_id or "" self.resource_id = self.resource_id or "" self._id = "%s__%s__%s" % (self.project_id, self.user_id, self.resource_id) # Meters with statistics data self._meters = {} # TODO(lsmola) make parallel obtaining of tenant and user # make the threading here, thread join into resource_list if ceilometer_usage and self.project_id: self._tenant = ceilometer_usage.get_tenant(self.project_id) else: self._tenant = None if ceilometer_usage and self.user_id: self._user = ceilometer_usage.get_user(self.user_id) else: self._user = None self._query = make_query(tenant_id=self.project_id, user_id=self.user_id, resource_id=self.resource_id) @property def name(self): name = self.metadata.get("name", None) display_name = self.metadata.get("display_name", None) return name or display_name or "" @property def id(self): return self._id @property def tenant(self): return self._tenant @property def user(self): return self._user @property def resource(self): return self.resource_id @property def query(self): return self._query @property def meters(self): return self._meters def get_meter(self, meter_name): return self._meters.get(meter_name, None) def set_meter(self, meter_name, value): self._meters[meter_name] = value class ResourceAggregate(Resource): """Represents aggregate of more resources together. Aggregate of resources can be obtained by specifying multiple ids in one parameter or by not specifying one parameter. It can also be specified by query directly. Example: We can obtain an aggregate of resources by specifying multiple resource_ids in resource_id parameter in init. Or we can specify only tenant_id, which will return all resources of that tenant. """ def __init__(self, tenant_id=None, user_id=None, resource_id=None, tenant_ids=None, user_ids=None, resource_ids=None, ceilometer_usage=None, query=None, identifier=None): self._id = identifier self.tenant_id = None self.user_id = None self.resource_id = None # Meters with statistics data self._meters = {} if query: self._query = query else: # TODO(lsmola) make parallel obtaining of tenant and user # make the threading here, thread join into resource_list if ceilometer_usage and tenant_id: self.tenant_id = tenant_id self._tenant = ceilometer_usage.get_tenant(tenant_id) else: self._tenant = None if ceilometer_usage and user_id: self.user_id = user_id self._user = ceilometer_usage.get_user(user_id) else: self._user = None if resource_id: self.resource_id = resource_id self._query = make_query(tenant_id=tenant_id, user_id=user_id, resource_id=resource_id, tenant_ids=tenant_ids, user_ids=user_ids, resource_ids=resource_ids) @property def id(self): return self._id class Sample(base.APIResourceWrapper): """Represents one Ceilometer sample.""" _attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp', 'resource_metadata', 'source', 'counter_unit', 'counter_volume', 'project_id', 'counter_type', 'resource_metadata'] @property def instance(self): display_name = self.resource_metadata.get('display_name', None) instance_id = self.resource_metadata.get('instance_id', None) return display_name or instance_id @property def name(self): name = self.resource_metadata.get("name", None) display_name = self.resource_metadata.get("display_name", None) return name or display_name or "" class Statistic(base.APIResourceWrapper): """Represents one Ceilometer statistic.""" _attrs = ['period', 'period_start', 'period_end', 'count', 'min', 'max', 'sum', 'avg', 'duration', 'duration_start', 'duration_end'] @memoized def ceilometerclient(request): """Initialization of Ceilometer client.""" endpoint = base.url_for(request, 'metering') insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None) return ceilometer_client.Client('2', endpoint, token=(lambda: request.user.token.id), insecure=insecure, cacert=cacert) def resource_list(request, query=None, ceilometer_usage_object=None): """List the resources.""" resources = ceilometerclient(request).resources.list(q=query) return [Resource(r, ceilometer_usage_object) for r in resources] def sample_list(request, meter_name, query=None, limit=None): """List the samples for this meters.""" samples = ceilometerclient(request).samples.list(meter_name=meter_name, q=query, limit=limit) return [Sample(s) for s in samples] def meter_list(request, query=None): """List the user's meters.""" meters = ceilometerclient(request).meters.list(query) return [Meter(m) for m in meters] def statistic_list(request, meter_name, query=None, period=None): """List of statistics.""" statistics = ceilometerclient(request).\ statistics.list(meter_name=meter_name, q=query, period=period) return [Statistic(s) for s in statistics] class ThreadedUpdateResourceWithStatistics(threading.Thread): """Multithread wrapper for update_with_statistics method of resource_usage. A join logic is placed in process_list class method. All resources will have its statistics attribute filled in separate threads. The resource_usage object is shared between threads. Each thread is updating one Resource. :Parameters: - `resource`: Resource or ResourceAggregate object, that will be filled by statistic data. - `resources`: List of Resource or ResourceAggregate object, that will be filled by statistic data. - `resource_usage`: Wrapping resource usage object, that holds all statistics data. - `meter_names`: List of meter names of the statistics we want. - `period`: In seconds. If no period is given, only one aggregate statistic is returned. If given, a faceted result will be returned, divided into given periods. Periods with no data are ignored. - `stats_attr`: String representing the attribute name of the stats. E.g. (avg, max, min...) If None is given, whole statistic object is returned, - `additional_query`: Additional query for the statistics. E.g. timespan, etc. """ # TODO(lsmola) Can be removed once Ceilometer supports sample-api # and group-by, so all of this optimization will not be necessary. # It is planned somewhere to I. def __init__(self, resource_usage, resource, meter_names=None, period=None, filter_func=None, stats_attr=None, additional_query=None): super(ThreadedUpdateResourceWithStatistics, self).__init__() self.resource_usage = resource_usage self.resource = resource self.meter_names = meter_names self.period = period self.stats_attr = stats_attr self.additional_query = additional_query def run(self): # Run the job self.resource_usage.update_with_statistics( self.resource, meter_names=self.meter_names, period=self.period, stats_attr=self.stats_attr, additional_query=self.additional_query) @classmethod def process_list(cls, resource_usage, resources, meter_names=None, period=None, filter_func=None, stats_attr=None, additional_query=None): threads = [] for resource in resources: # add statistics data into resource thread = cls(resource_usage, resource, meter_names=meter_names, period=period, stats_attr=stats_attr, additional_query=additional_query) thread.start() threads.append(thread) for thread in threads: thread.join() class CeilometerUsage(object): """Represents wrapper of any Ceilometer queries. One instance of this class should be shared between resources as this class provides a place where users and tenants are cached. So there are no duplicate queries to API. This class also wraps Ceilometer API calls and provides parallel HTTP calls to API. This class should also serve as reasonable abstraction, that will cover huge amount of optimization due to optimization of Ceilometer service, without changing of the interface. """ def __init__(self, request): self._request = request # Cached users and tenants. self._users = {} self._tenants = {} def get_user(self, user_id): """Returns user fetched from API. Caching the result, so it doesn't contact API twice with the same query. """ user = self._users.get(user_id, None) if not user: user = keystone.user_get(self._request, user_id) # caching the user, for later use self._users[user_id] = user return user def preload_all_users(self): """Preloads all users into dictionary. It's more effective to preload all users, rather than fetching many users by separate API get calls. """ users = keystone.user_list(self._request) # Cache all users on right indexes, this is more effective than to # obtain large number of users one by one by keystone.user_get for u in users: self._users[u.id] = u def get_tenant(self, tenant_id): """Returns tenant fetched from API. Caching the result, so it doesn't contact API twice with the same query. """ tenant = self._tenants.get(tenant_id, None) if not tenant: tenant = keystone.tenant_get(self._request, tenant_id) # caching the tenant for later use self._tenants[tenant_id] = tenant return tenant def preload_all_tenants(self): """Preloads all tenants into dictionary. It's more effective to preload all tenants, rather than fetching each tenant by separate API get calls. """ tenants, more = keystone.tenant_list(self._request) # Cache all tenants on right indexes, this is more effective than to # obtain large number of tenants one by one by keystone.tenant_get for t in tenants: self._tenants[t.id] = t def global_data_get(self, used_cls=None, query=None, with_statistics=False, additional_query=None, with_users_and_tenants=True): """Obtaining a resources for table view. It obtains resources with statistics data according to declaration in used_cls class. :Parameters: - `user_cls`: Class wrapper for usage data. It acts as wrapper for settings needed. See the call of this method for details. - `query`: Explicit query definition for fetching the resources. If no query is provided, it takes a default_query from used_cls. If no default query is provided, it fetches all the resources and filters them by meters defined in used_cls. - `with_statistic`: Define whether statistics data from the meters defined in used_cls should be fetched. Can be used to first obtain only the pure resources, then with the statistics data by AJAX. - `additional_query`: Additional query for the statistics. E.g. timespan, etc. - `with_users_and_tenants`: If true a user and a tenant object will be added to each resource object. """ default_query = used_cls.default_query query = query or default_query filter_func = None def filter_resources(resource): """Method for filtering resources by their links.rel attr. The links.rel attributes contain all meters the resource has. """ for link in resource.links: if link['rel'] in used_cls.meters: return True return False if not query: # Not all resource types can be obtained by query, if there is not # a query, we are filtering all resources by this function. filter_func = filter_resources if with_statistics: # Will add statistic data into resources. resources = self.resources_with_statistics( query, used_cls.meters, filter_func=filter_func, stats_attr=used_cls.stats_attr, additional_query=additional_query, with_users_and_tenants=with_users_and_tenants) else: # Will load only resources without statistical data. resources = self.resources( query, filter_func=filter_func, with_users_and_tenants=with_users_and_tenants) return [used_cls(resource) for resource in resources] def query_from_object_id(self, object_id): """Obtaining a query from resource id. Query can be then used to identify a resource in resources or meters API calls. ID is being built in the Resource initializer, or returned by Datatable into UpdateRow functionality. """ try: tenant_id, user_id, resource_id = object_id.split("__") except ValueError: return [] return make_query(tenant_id=tenant_id, user_id=user_id, resource_id=resource_id) def update_with_statistics(self, resource, meter_names=None, period=None, stats_attr=None, additional_query=None): """Adding statistical data into one Resource or ResourceAggregate. It adds each statistic of each meter_names into the resource attributes. Attribute name is the meter name with replaced '.' to '_'. :Parameters: - `resource`: Resource or ResourceAggregate object, that will be filled by statistic data. - `meter_names`: List of meter names of which we want the statistics. - `period`: In seconds. If no period is given, only one aggregate statistic is returned. If given a faceted result will be returned, dividend into given periods. Periods with no data are ignored. - `stats_attr`: String representing the specific name of the stats. E.g. (avg, max, min...) If defined, meter attribute will contain just the one value. If None is given, meter attribute will contain the whole Statistic object. - `additional_query`: Additional query for the statistics. E.g. timespan, etc. """ if not meter_names: raise ValueError("meter_names and resources must be defined to be " "able to obtain the statistics.") # query for identifying one resource in meters query = resource.query if additional_query: if not is_iterable(additional_query): raise ValueError("Additional query must be list of" " conditions. See the docs for format.") query = query + additional_query # TODO(lsmola) thread for each meter will be probably overkill # but I should test lets say thread pool with 100 of threads # and apply it only to this code. # Though I do expect Ceilometer will support bulk requests, # so all of this optimization will not be necessary. for meter in meter_names: statistics = statistic_list(self._request, meter, query=query, period=period) meter = meter.replace(".", "_") if statistics: if stats_attr: # I want to load only a specific attribute resource.set_meter( meter, getattr(statistics[0], stats_attr, None)) else: # I want a dictionary of all statistics resource.set_meter(meter, statistics) else: resource.set_meter(meter, None) return resource def resources(self, query=None, filter_func=None, with_users_and_tenants=False): """Obtaining resources with the query or filter_func. Obtains resources and also fetch tenants and users associated with those resources if with_users_and_tenants flag is true. :Parameters: - `query`: Query for fetching the Ceilometer Resources. - `filter_func`: Callable for filtering of the obtained resources. - `with_users_and_tenants`: If true a user and a tenant object will be added to each resource object. """ if with_users_and_tenants: ceilometer_usage_object = self else: ceilometer_usage_object = None resources = resource_list( self._request, query=query, ceilometer_usage_object=ceilometer_usage_object) if filter_func: resources = [resource for resource in resources if filter_func(resource)] return resources def resources_with_statistics(self, query=None, meter_names=None, period=None, filter_func=None, stats_attr=None, additional_query=None, with_users_and_tenants=False): """Obtaining resources with statistics data inside. :Parameters: - `query`: Query for fetching the Ceilometer Resources. - `filter_func`: Callable for filtering of the obtained resources. - `meter_names`: List of meter names of which we want the statistics. - `period`: In seconds. If no period is given, only one aggregate statistic is returned. If given, a faceted result will be returned, divided into given periods. Periods with no data are ignored. - `stats_attr`: String representing the specific name of the stats. E.g. (avg, max, min...) If defined, meter attribute will contain just the one value. If None is given, meter attribute will contain the whole Statistic object. - `additional_query`: Additional query for the statistics. E.g. timespan, etc. - `with_users_and_tenants`: If true a user and a tenant object will be added to each resource object. """ resources = self.resources( query, filter_func=filter_func, with_users_and_tenants=with_users_and_tenants) ThreadedUpdateResourceWithStatistics.process_list( self, resources, meter_names=meter_names, period=period, stats_attr=stats_attr, additional_query=additional_query) return resources def resource_aggregates(self, queries=None): """Obtaining resource aggregates with queries. Representing a resource aggregate by query is a most general way how to obtain a resource aggregates. :Parameters: - `queries`: Dictionary of named queries that defines a bulk of resource aggregates. """ resource_aggregates = [] for identifier, query in queries.items(): resource_aggregates.append(ResourceAggregate(query=query, ceilometer_usage=None, identifier=identifier)) return resource_aggregates def resource_aggregates_with_statistics(self, queries=None, meter_names=None, period=None, filter_func=None, stats_attr=None, additional_query=None): """Obtaining resource aggregates with statistics data inside. :Parameters: - `queries`: Dictionary of named queries that defines a bulk of resource aggregates. - `meter_names`: List of meter names of which we want the statistics. - `period`: In seconds. If no period is given, only one aggregate statistic is returned. If given, a faceted result will be returned, divided into given periods. Periods with no data are ignored. - `stats_attr`: String representing the specific name of the stats. E.g. (avg, max, min...) If defined, meter attribute will contain just the one value. If None is given, meter attribute will contain the whole Statistic object. - `additional_query`: Additional query for the statistics. E.g. timespan, etc. """ resource_aggregates = self.resource_aggregates(queries) ThreadedUpdateResourceWithStatistics.process_list( self, resource_aggregates, meter_names=meter_names, period=period, stats_attr=stats_attr, additional_query=additional_query) return resource_aggregates def diff_lists(a, b): if not a: return [] elif not b: return a else: return list(set(a) - set(b)) class Meters(object): """Class for listing of available meters. It is listing meters defined in this class that are available in Ceilometer meter_list. It is storing information that is not available in Ceilometer, i.e. label, description. """ def __init__(self, request=None, ceilometer_meter_list=None): # Storing the request. self._request = request # Storing the Ceilometer meter list if ceilometer_meter_list: self._ceilometer_meter_list = ceilometer_meter_list else: try: self._ceilometer_meter_list = meter_list(request) except Exception: self._ceilometer_meter_list = [] exceptions.handle(self._request, _('Unable to retrieve Ceilometer meter ' 'list.')) # Storing the meters info categorized by their services. self._nova_meters_info = self._get_nova_meters_info() self._neutron_meters_info = self._get_neutron_meters_info() self._glance_meters_info = self._get_glance_meters_info() self._cinder_meters_info = self._get_cinder_meters_info() self._swift_meters_info = self._get_swift_meters_info() self._kwapi_meters_info = self._get_kwapi_meters_info() self._ipmi_meters_info = self._get_ipmi_meters_info() # Storing the meters info of all services together. all_services_meters = (self._nova_meters_info, self._neutron_meters_info, self._glance_meters_info, self._cinder_meters_info, self._swift_meters_info, self._kwapi_meters_info, self._ipmi_meters_info) self._all_meters_info = {} for service_meters in all_services_meters: self._all_meters_info.update(dict([(meter_name, meter_info) for meter_name, meter_info in service_meters.items()])) # Here will be the cached Meter objects, that will be reused for # repeated listing. self._cached_meters = {} def list_all(self, only_meters=None, except_meters=None): """Returns a list of meters based on the meters names. :Parameters: - `only_meters`: The list of meter names we want to show. - `except_meters`: The list of meter names we don't want to show. """ return self._list(only_meters=only_meters, except_meters=except_meters) def list_nova(self, except_meters=None): """Returns a list of meters tied to nova. :Parameters: - `except_meters`: The list of meter names we don't want to show. """ return self._list(only_meters=self._nova_meters_info.keys(), except_meters=except_meters) def list_neutron(self, except_meters=None): """Returns a list of meters tied to neutron. :Parameters: - `except_meters`: The list of meter names we don't want to show. """ return self._list(only_meters=self._neutron_meters_info.keys(), except_meters=except_meters) def list_glance(self, except_meters=None): """Returns a list of meters tied to glance. :Parameters: - `except_meters`: The list of meter names we don't want to show. """ return self._list(only_meters=self._glance_meters_info.keys(), except_meters=except_meters) def list_cinder(self, except_meters=None): """Returns a list of meters tied to cinder. :Parameters: - `except_meters`: The list of meter names we don't want to show. """ return self._list(only_meters=self._cinder_meters_info.keys(), except_meters=except_meters) def list_swift(self, except_meters=None): """Returns a list of meters tied to swift. :Parameters: - `except_meters`: The list of meter names we don't want to show. """ return self._list(only_meters=self._swift_meters_info.keys(), except_meters=except_meters) def list_kwapi(self, except_meters=None): """Returns a list of meters tied to kwapi. :Parameters: - `except_meters`: The list of meter names we don't want to show. """ return self._list(only_meters=self._kwapi_meters_info.keys(), except_meters=except_meters) def list_ipmi(self, except_meters=None): """Returns a list of meters tied to ipmi :Parameters: - `except_meters`: The list of meter names we don't want to show """ return self._list(only_meters=self._ipmi_meters_info.keys(), except_meters=except_meters) def _list(self, only_meters=None, except_meters=None): """Returns a list of meters based on the meters names. :Parameters: - `only_meters`: The list of meter names we want to show. - `except_meters`: The list of meter names we don't want to show. """ # Get all wanted meter names. if only_meters: meter_names = only_meters else: meter_names = [meter_name for meter_name in self._all_meters_info.keys()] meter_names = diff_lists(meter_names, except_meters) # Collect meters for wanted meter names. return self._get_meters(meter_names) def _get_meters(self, meter_names): """Obtain meters based on meter_names. The meters that do not exist in Ceilometer meter list are left out. :Parameters: - `meter_names`: A list of meter names we want to fetch. """ meters = [] for meter_name in meter_names: meter = self._get_meter(meter_name) if meter: meters.append(meter) return meters def _get_meter(self, meter_name): """Obtains a meter. Obtains meter either from cache or from Ceilometer meter list joined with statically defined meter info like label and description. :Parameters: - `meter_name`: A meter name we want to fetch. """ meter = self._cached_meters.get(meter_name, None) if not meter: meter_candidates = [m for m in self._ceilometer_meter_list if m.name == meter_name] if meter_candidates: meter_info = self._all_meters_info.get(meter_name, None) if meter_info: label = meter_info["label"] description = meter_info["description"] else: label = "" description = "" meter = meter_candidates[0] meter.augment(label=label, description=description) self._cached_meters[meter_name] = meter return meter def _get_nova_meters_info(self): """Returns additional info for each meter. That will be used for augmenting the Ceilometer meter. """ # TODO(lsmola) Unless the Ceilometer will provide the information # below, I need to define it as a static here. I will be joining this # to info that I am able to obtain from Ceilometer meters, hopefully # some day it will be supported all. meters_info = datastructures.SortedDict([ ("instance", { 'label': '', 'description': _("Existence of instance"), }), ("instance:<type>", { 'label': '', 'description': _("Existence of instance <type> " "(openstack types)"), }), ("memory", { 'label': '', 'description': _("Volume of RAM"), }), ("memory.usage", { 'label': '', 'description': _("Volume of RAM used"), }), ("cpu", { 'label': '', 'description': _("CPU time used"), }), ("cpu_util", { 'label': '', 'description': _("Average CPU utilization"), }), ("vcpus", { 'label': '', 'description': _("Number of VCPUs"), }), ("disk.read.requests", { 'label': '', 'description': _("Number of read requests"), }), ("disk.write.requests", { 'label': '', 'description': _("Number of write requests"), }), ("disk.read.bytes", { 'label': '', 'description': _("Volume of reads"), }), ("disk.write.bytes", { 'label': '', 'description': _("Volume of writes"), }), ("disk.read.requests.rate", { 'label': '', 'description': _("Average rate of read requests"), }), ("disk.write.requests.rate", { 'label': '', 'description': _("Average rate of write requests"), }), ("disk.read.bytes.rate", { 'label': '', 'description': _("Average rate of reads"), }), ("disk.write.bytes.rate", { 'label': '', 'description': _("Average volume of writes"), }), ("disk.root.size", { 'label': '', 'description': _("Size of root disk"), }), ("disk.ephemeral.size", { 'label': '', 'description': _("Size of ephemeral disk"), }), ("network.incoming.bytes", { 'label': '', 'description': _("Number of incoming bytes " "on the network for a VM interface"), }), ("network.outgoing.bytes", { 'label': '', 'description': _("Number of outgoing bytes " "on the network for a VM interface"), }), ("network.incoming.packets", { 'label': '', 'description': _("Number of incoming " "packets for a VM interface"), }), ("network.outgoing.packets", { 'label': '', 'description': _("Number of outgoing " "packets for a VM interface"), }), ("network.incoming.bytes.rate", { 'label': '', 'description': _("Average rate per sec of incoming " "bytes on a VM network interface"), }), ("network.outgoing.bytes.rate", { 'label': '', 'description': _("Average rate per sec of outgoing " "bytes on a VM network interface"), }), ("network.incoming.packets.rate", { 'label': '', 'description': _("Average rate per sec of incoming " "packets on a VM network interface"), }), ("network.outgoing.packets.rate", { 'label': '', 'description': _("Average rate per sec of outgoing " "packets on a VM network interface"), }), ]) # Adding flavor based meters into meters_info dict # TODO(lsmola) this kind of meter will be probably deprecated # https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then. for flavor in get_flavor_names(self._request): name = 'instance:%s' % flavor meters_info[name] = dict(meters_info["instance:<type>"]) meters_info[name]['description'] = ( _('Duration of instance type %s (openstack flavor)') % flavor) # TODO(lsmola) allow to set specific in local_settings. For all meters # because users can have their own agents and meters. return meters_info def _get_neutron_meters_info(self): """Returns additional info for each meter. That will be used for augmenting the Ceilometer meter. """ # TODO(lsmola) Unless the Ceilometer will provide the information # below, I need to define it as a static here. I will be joining this # to info that I am able to obtain from Ceilometer meters, hopefully # some day it will be supported all. return datastructures.SortedDict([ ('network', { 'label': '', 'description': _("Existence of network"), }), ('network.create', { 'label': '', 'description': _("Creation requests for this network"), }), ('network.update', { 'label': '', 'description': _("Update requests for this network"), }), ('subnet', { 'label': '', 'description': _("Existence of subnet"), }), ('subnet.create', { 'label': '', 'description': _("Creation requests for this subnet"), }), ('subnet.update', { 'label': '', 'description': _("Update requests for this subnet"), }), ('port', { 'label': '', 'description': _("Existence of port"), }), ('port.create', { 'label': '', 'description': _("Creation requests for this port"), }), ('port.update', { 'label': '', 'description': _("Update requests for this port"), }), ('router', { 'label': '', 'description': _("Existence of router"), }), ('router.create', { 'label': '', 'description': _("Creation requests for this router"), }), ('router.update', { 'label': '', 'description': _("Update requests for this router"), }), ('ip.floating', { 'label': '', 'description': _("Existence of floating ip"), }), ('ip.floating.create', { 'label': '', 'description': _("Creation requests for this floating ip"), }), ('ip.floating.update', { 'label': '', 'description': _("Update requests for this floating ip"), }), ]) def _get_glance_meters_info(self): """Returns additional info for each meter. That will be used for augmenting the Ceilometer meter. """ # TODO(lsmola) Unless the Ceilometer will provide the information # below, I need to define it as a static here. I will be joining this # to info that I am able to obtain from Ceilometer meters, hopefully # some day it will be supported all. return datastructures.SortedDict([ ('image', { 'label': '', 'description': _("Image existence check"), }), ('image.size', { 'label': '', 'description': _("Uploaded image size"), }), ('image.update', { 'label': '', 'description': _("Number of image updates"), }), ('image.upload', { 'label': '', 'description': _("Number of image uploads"), }), ('image.delete', { 'label': '', 'description': _("Number of image deletions"), }), ('image.download', { 'label': '', 'description': _("Image is downloaded"), }), ('image.serve', { 'label': '', 'description': _("Image is served out"), }), ]) def _get_cinder_meters_info(self): """Returns additional info for each meter. That will be used for augmenting the Ceilometer meter. """ # TODO(lsmola) Unless the Ceilometer will provide the information # below, I need to define it as a static here. I will be joining this # to info that I am able to obtain from Ceilometer meters, hopefully # some day it will be supported all. return datastructures.SortedDict([ ('volume', { 'label': '', 'description': _("Existence of volume"), }), ('volume.size', { 'label': '', 'description': _("Size of volume"), }), ]) def _get_swift_meters_info(self): """Returns additional info for each meter. That will be used for augmenting the Ceilometer meter. """ # TODO(lsmola) Unless the Ceilometer will provide the information # below, I need to define it as a static here. I will be joining this # to info that I am able to obtain from Ceilometer meters, hopefully # some day it will be supported all. return datastructures.SortedDict([ ('storage.objects', { 'label': '', 'description': _("Number of objects"), }), ('storage.objects.size', { 'label': '', 'description': _("Total size of stored objects"), }), ('storage.objects.containers', { 'label': '', 'description': _("Number of containers"), }), ('storage.objects.incoming.bytes', { 'label': '', 'description': _("Number of incoming bytes"), }), ('storage.objects.outgoing.bytes', { 'label': '', 'description': _("Number of outgoing bytes"), }), ('storage.api.request', { 'label': '', 'description': _("Number of API requests against swift"), }), ]) def _get_kwapi_meters_info(self): """Returns additional info for each meter. That will be used for augmenting the Ceilometer meter. """ # TODO(lsmola) Unless the Ceilometer will provide the information # below, I need to define it as a static here. I will be joining this # to info that I am able to obtain from Ceilometer meters, hopefully # some day it will be supported all. return datastructures.SortedDict([ ('energy', { 'label': '', 'description': _("Amount of energy"), }), ('power', { 'label': '', 'description': _("Power consumption"), }), ]) def _get_ipmi_meters_info(self): """Returns additional info for each meter That will be used for augmenting the Ceilometer meter """ # TODO(lsmola) Unless the Ceilometer will provide the information # below, I need to define it as a static here. I will be joining this # to info that I am able to obtain from Ceilometer meters, hopefully # some day it will be supported all. return datastructures.SortedDict([ ('hardware.ipmi.node.power', { 'label': '', 'description': _("System Current Power"), }), ('hardware.ipmi.fan', { 'label': '', 'description': _("Fan RPM"), }), ('hardware.ipmi.temperature', { 'label': '', 'description': _("Sensor Temperature Reading"), }), ('hardware.ipmi.current', { 'label': '', 'description': _("Sensor Current Reading"), }), ('hardware.ipmi.voltage', { 'label': '', 'description': _("Sensor Voltage Reading"), }), ('hardware.ipmi.node.temperature', { 'label': '', 'description': _("System Temperature Reading"), }), ('hardware.ipmi.node.outlet_temperature', { 'label': '', 'description': _("System Outlet Temperature Reading"), }), ('hardware.ipmi.node.airflow', { 'label': '', 'description': _("System Airflow Reading"), }), ('hardware.ipmi.node.cups', { 'label': '', 'description': _("System CUPS Reading"), }), ('hardware.ipmi.node.cpu_util', { 'label': '', 'description': _("System CPU Utility Reading"), }), ('hardware.ipmi.node.mem_util', { 'label': '', 'description': _("System Memory Utility Reading"), }), ('hardware.ipmi.node.io_util', { 'label': '', 'description': _("System IO Utility Reading"), }), ])
apache-2.0
israelbenatar/boto
tests/integration/kinesis/test_kinesis.py
99
4404
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import time import boto from tests.compat import unittest from boto.kinesis.exceptions import ResourceNotFoundException class TimeoutError(Exception): pass class TestKinesis(unittest.TestCase): def setUp(self): self.kinesis = boto.connect_kinesis() def test_kinesis(self): kinesis = self.kinesis # Create a new stream kinesis.create_stream('test', 1) self.addCleanup(self.kinesis.delete_stream, 'test') # Wait for the stream to be ready tries = 0 while tries < 10: tries += 1 time.sleep(15) response = kinesis.describe_stream('test') if response['StreamDescription']['StreamStatus'] == 'ACTIVE': shard_id = response['StreamDescription']['Shards'][0]['ShardId'] break else: raise TimeoutError('Stream is still not active, aborting...') # Make a tag. kinesis.add_tags_to_stream(stream_name='test', tags={'foo': 'bar'}) # Check that the correct tag is there. response = kinesis.list_tags_for_stream(stream_name='test') self.assertEqual(len(response['Tags']), 1) self.assertEqual(response['Tags'][0], {'Key':'foo', 'Value': 'bar'}) # Remove the tag and ensure it is removed. kinesis.remove_tags_from_stream(stream_name='test', tag_keys=['foo']) response = kinesis.list_tags_for_stream(stream_name='test') self.assertEqual(len(response['Tags']), 0) # Get ready to process some data from the stream response = kinesis.get_shard_iterator('test', shard_id, 'TRIM_HORIZON') shard_iterator = response['ShardIterator'] # Write some data to the stream data = 'Some data ...' record = { 'Data': data, 'PartitionKey': data, } response = kinesis.put_record('test', data, data) response = kinesis.put_records([record, record.copy()], 'test') # Wait for the data to show up tries = 0 num_collected = 0 num_expected_records = 3 collected_records = [] while tries < 100: tries += 1 time.sleep(1) response = kinesis.get_records(shard_iterator) shard_iterator = response['NextShardIterator'] for record in response['Records']: if 'Data' in record: collected_records.append(record['Data']) num_collected += 1 if num_collected >= num_expected_records: self.assertEqual(num_expected_records, num_collected) break else: raise TimeoutError('No records found, aborting...') # Read the data, which should be the same as what we wrote for record in collected_records: self.assertEqual(data, record) def test_describe_non_existent_stream(self): with self.assertRaises(ResourceNotFoundException) as cm: self.kinesis.describe_stream('this-stream-shouldnt-exist') # Assert things about the data we passed along. self.assertEqual(cm.exception.error_code, None) self.assertTrue('not found' in cm.exception.message)
mit
blaggacao/OpenUpgrade
addons/hr_attendance/__init__.py
434
1122
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import hr_attendance import wizard import report import res_config # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
75651/kbengine_cloud
kbe/res/scripts/common/Lib/ssl.py
67
34420
# Wrapper module for _ssl, providing some additional facilities # implemented in Python. Written by Bill Janssen. """This module provides some more Pythonic support for SSL. Object types: SSLSocket -- subtype of socket.socket which does SSL over the socket Exceptions: SSLError -- exception raised for I/O errors Functions: cert_time_to_seconds -- convert time string used for certificate notBefore and notAfter functions to integer seconds past the Epoch (the time values returned from time.time()) fetch_server_certificate (HOST, PORT) -- fetch the certificate provided by the server running on HOST at port PORT. No validation of the certificate is performed. Integer constants: SSL_ERROR_ZERO_RETURN SSL_ERROR_WANT_READ SSL_ERROR_WANT_WRITE SSL_ERROR_WANT_X509_LOOKUP SSL_ERROR_SYSCALL SSL_ERROR_SSL SSL_ERROR_WANT_CONNECT SSL_ERROR_EOF SSL_ERROR_INVALID_ERROR_CODE The following group define certificate requirements that one side is allowing/requiring from the other side: CERT_NONE - no certificates from the other side are required (or will be looked at if provided) CERT_OPTIONAL - certificates are not required, but if provided will be validated, and if validation fails, the connection will also fail CERT_REQUIRED - certificates are required, and will be validated, and if validation fails, the connection will also fail The following constants identify various SSL protocol variants: PROTOCOL_SSLv2 PROTOCOL_SSLv3 PROTOCOL_SSLv23 PROTOCOL_TLSv1 PROTOCOL_TLSv1_1 PROTOCOL_TLSv1_2 The following constants identify various SSL alert message descriptions as per http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6 ALERT_DESCRIPTION_CLOSE_NOTIFY ALERT_DESCRIPTION_UNEXPECTED_MESSAGE ALERT_DESCRIPTION_BAD_RECORD_MAC ALERT_DESCRIPTION_RECORD_OVERFLOW ALERT_DESCRIPTION_DECOMPRESSION_FAILURE ALERT_DESCRIPTION_HANDSHAKE_FAILURE ALERT_DESCRIPTION_BAD_CERTIFICATE ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE ALERT_DESCRIPTION_CERTIFICATE_REVOKED ALERT_DESCRIPTION_CERTIFICATE_EXPIRED ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN ALERT_DESCRIPTION_ILLEGAL_PARAMETER ALERT_DESCRIPTION_UNKNOWN_CA ALERT_DESCRIPTION_ACCESS_DENIED ALERT_DESCRIPTION_DECODE_ERROR ALERT_DESCRIPTION_DECRYPT_ERROR ALERT_DESCRIPTION_PROTOCOL_VERSION ALERT_DESCRIPTION_INSUFFICIENT_SECURITY ALERT_DESCRIPTION_INTERNAL_ERROR ALERT_DESCRIPTION_USER_CANCELLED ALERT_DESCRIPTION_NO_RENEGOTIATION ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE ALERT_DESCRIPTION_UNRECOGNIZED_NAME ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY """ import textwrap import re import sys import os from collections import namedtuple from enum import Enum as _Enum import _ssl # if we can't import it, let the error propagate from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION from _ssl import _SSLContext from _ssl import ( SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError, SSLSyscallError, SSLEOFError, ) from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED from _ssl import (VERIFY_DEFAULT, VERIFY_CRL_CHECK_LEAF, VERIFY_CRL_CHECK_CHAIN, VERIFY_X509_STRICT) from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj from _ssl import RAND_status, RAND_egd, RAND_add, RAND_bytes, RAND_pseudo_bytes def _import_symbols(prefix): for n in dir(_ssl): if n.startswith(prefix): globals()[n] = getattr(_ssl, n) _import_symbols('OP_') _import_symbols('ALERT_DESCRIPTION_') _import_symbols('SSL_ERROR_') from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1 from _ssl import _OPENSSL_API_VERSION _PROTOCOL_NAMES = { PROTOCOL_TLSv1: "TLSv1", PROTOCOL_SSLv23: "SSLv23", PROTOCOL_SSLv3: "SSLv3", } try: from _ssl import PROTOCOL_SSLv2 _SSLv2_IF_EXISTS = PROTOCOL_SSLv2 except ImportError: _SSLv2_IF_EXISTS = None else: _PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2" try: from _ssl import PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2 except ImportError: pass else: _PROTOCOL_NAMES[PROTOCOL_TLSv1_1] = "TLSv1.1" _PROTOCOL_NAMES[PROTOCOL_TLSv1_2] = "TLSv1.2" if sys.platform == "win32": from _ssl import enum_certificates, enum_crls from socket import socket, AF_INET, SOCK_STREAM, create_connection from socket import SOL_SOCKET, SO_TYPE import base64 # for DER-to-PEM translation import errno socket_error = OSError # keep that public name in module namespace if _ssl.HAS_TLS_UNIQUE: CHANNEL_BINDING_TYPES = ['tls-unique'] else: CHANNEL_BINDING_TYPES = [] # Disable weak or insecure ciphers by default # (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL') # Enable a better set of ciphers by default # This list has been explicitly chosen to: # * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE) # * Prefer ECDHE over DHE for better performance # * Prefer any AES-GCM over any AES-CBC for better performance and security # * Then Use HIGH cipher suites as a fallback # * Then Use 3DES as fallback which is secure but slow # * Finally use RC4 as a fallback which is problematic but needed for # compatibility some times. # * Disable NULL authentication, NULL encryption, and MD5 MACs for security # reasons _DEFAULT_CIPHERS = ( 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:' 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:' 'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5' ) # Restricted and more secure ciphers for the server side # This list has been explicitly chosen to: # * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE) # * Prefer ECDHE over DHE for better performance # * Prefer any AES-GCM over any AES-CBC for better performance and security # * Then Use HIGH cipher suites as a fallback # * Then Use 3DES as fallback which is secure but slow # * Disable NULL authentication, NULL encryption, MD5 MACs, DSS, and RC4 for # security reasons _RESTRICTED_SERVER_CIPHERS = ( 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:' 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:' '!eNULL:!MD5:!DSS:!RC4' ) class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False leftmost, *remainder = dn.split(r'.') wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survery of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate, match_hostname needs a " "SSL socket or SSL context with either " "CERT_OPTIONAL or CERT_REQUIRED") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found") DefaultVerifyPaths = namedtuple("DefaultVerifyPaths", "cafile capath openssl_cafile_env openssl_cafile openssl_capath_env " "openssl_capath") def get_default_verify_paths(): """Return paths to default cafile and capath. """ parts = _ssl.get_default_verify_paths() # environment vars shadow paths cafile = os.environ.get(parts[0], parts[1]) capath = os.environ.get(parts[2], parts[3]) return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None, capath if os.path.isdir(capath) else None, *parts) class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")): """ASN.1 object identifier lookup """ __slots__ = () def __new__(cls, oid): return super().__new__(cls, *_txt2obj(oid, name=False)) @classmethod def fromnid(cls, nid): """Create _ASN1Object from OpenSSL numeric ID """ return super().__new__(cls, *_nid2obj(nid)) @classmethod def fromname(cls, name): """Create _ASN1Object from short name, long name or OID """ return super().__new__(cls, *_txt2obj(name, name=True)) class Purpose(_ASN1Object, _Enum): """SSLContext purpose flags with X509v3 Extended Key Usage objects """ SERVER_AUTH = '1.3.6.1.5.5.7.3.1' CLIENT_AUTH = '1.3.6.1.5.5.7.3.2' class SSLContext(_SSLContext): """An SSLContext holds various SSL-related configuration options and data, such as certificates and possibly a private key.""" __slots__ = ('protocol', '__weakref__') _windows_cert_stores = ("CA", "ROOT") def __new__(cls, protocol, *args, **kwargs): self = _SSLContext.__new__(cls, protocol) if protocol != _SSLv2_IF_EXISTS: self.set_ciphers(_DEFAULT_CIPHERS) return self def __init__(self, protocol): self.protocol = protocol def wrap_socket(self, sock, server_side=False, do_handshake_on_connect=True, suppress_ragged_eofs=True, server_hostname=None): return SSLSocket(sock=sock, server_side=server_side, do_handshake_on_connect=do_handshake_on_connect, suppress_ragged_eofs=suppress_ragged_eofs, server_hostname=server_hostname, _context=self) def set_npn_protocols(self, npn_protocols): protos = bytearray() for protocol in npn_protocols: b = bytes(protocol, 'ascii') if len(b) == 0 or len(b) > 255: raise SSLError('NPN protocols must be 1 to 255 in length') protos.append(len(b)) protos.extend(b) self._set_npn_protocols(protos) def _load_windows_store_certs(self, storename, purpose): certs = bytearray() for cert, encoding, trust in enum_certificates(storename): # CA certs are never PKCS#7 encoded if encoding == "x509_asn": if trust is True or purpose.oid in trust: certs.extend(cert) self.load_verify_locations(cadata=certs) return certs def load_default_certs(self, purpose=Purpose.SERVER_AUTH): if not isinstance(purpose, _ASN1Object): raise TypeError(purpose) if sys.platform == "win32": for storename in self._windows_cert_stores: self._load_windows_store_certs(storename, purpose) else: self.set_default_verify_paths() def create_default_context(purpose=Purpose.SERVER_AUTH, *, cafile=None, capath=None, cadata=None): """Create a SSLContext object with default settings. NOTE: The protocol and settings may change anytime without prior deprecation. The values represent a fair balance between maximum compatibility and security. """ if not isinstance(purpose, _ASN1Object): raise TypeError(purpose) context = SSLContext(PROTOCOL_SSLv23) # SSLv2 considered harmful. context.options |= OP_NO_SSLv2 # SSLv3 has problematic security and is only required for really old # clients such as IE6 on Windows XP context.options |= OP_NO_SSLv3 # disable compression to prevent CRIME attacks (OpenSSL 1.0+) context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0) if purpose == Purpose.SERVER_AUTH: # verify certs and host name in client mode context.verify_mode = CERT_REQUIRED context.check_hostname = True elif purpose == Purpose.CLIENT_AUTH: # Prefer the server's ciphers by default so that we get stronger # encryption context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0) # Use single use keys in order to improve forward secrecy context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0) context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0) # disallow ciphers with known vulnerabilities context.set_ciphers(_RESTRICTED_SERVER_CIPHERS) if cafile or capath or cadata: context.load_verify_locations(cafile, capath, cadata) elif context.verify_mode != CERT_NONE: # no explicit cafile, capath or cadata but the verify mode is # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system # root CA certificates for the given purpose. This may fail silently. context.load_default_certs(purpose) return context def _create_stdlib_context(protocol=PROTOCOL_SSLv23, *, cert_reqs=None, check_hostname=False, purpose=Purpose.SERVER_AUTH, certfile=None, keyfile=None, cafile=None, capath=None, cadata=None): """Create a SSLContext object for Python stdlib modules All Python stdlib modules shall use this function to create SSLContext objects in order to keep common settings in one place. The configuration is less restrict than create_default_context()'s to increase backward compatibility. """ if not isinstance(purpose, _ASN1Object): raise TypeError(purpose) context = SSLContext(protocol) # SSLv2 considered harmful. context.options |= OP_NO_SSLv2 if cert_reqs is not None: context.verify_mode = cert_reqs context.check_hostname = check_hostname if keyfile and not certfile: raise ValueError("certfile must be specified") if certfile or keyfile: context.load_cert_chain(certfile, keyfile) # load CA root certs if cafile or capath or cadata: context.load_verify_locations(cafile, capath, cadata) elif context.verify_mode != CERT_NONE: # no explicit cafile, capath or cadata but the verify mode is # CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system # root CA certificates for the given purpose. This may fail silently. context.load_default_certs(purpose) return context class SSLSocket(socket): """This class implements a subtype of socket.socket that wraps the underlying OS socket in an SSL context when necessary, and provides read and write methods over that channel.""" def __init__(self, sock=None, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, ssl_version=PROTOCOL_SSLv23, ca_certs=None, do_handshake_on_connect=True, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None, suppress_ragged_eofs=True, npn_protocols=None, ciphers=None, server_hostname=None, _context=None): if _context: self._context = _context else: if server_side and not certfile: raise ValueError("certfile must be specified for server-side " "operations") if keyfile and not certfile: raise ValueError("certfile must be specified") if certfile and not keyfile: keyfile = certfile self._context = SSLContext(ssl_version) self._context.verify_mode = cert_reqs if ca_certs: self._context.load_verify_locations(ca_certs) if certfile: self._context.load_cert_chain(certfile, keyfile) if npn_protocols: self._context.set_npn_protocols(npn_protocols) if ciphers: self._context.set_ciphers(ciphers) self.keyfile = keyfile self.certfile = certfile self.cert_reqs = cert_reqs self.ssl_version = ssl_version self.ca_certs = ca_certs self.ciphers = ciphers # Can't use sock.type as other flags (such as SOCK_NONBLOCK) get # mixed in. if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM: raise NotImplementedError("only stream sockets are supported") if server_side and server_hostname: raise ValueError("server_hostname can only be specified " "in client mode") if self._context.check_hostname and not server_hostname: if HAS_SNI: raise ValueError("check_hostname requires server_hostname") else: raise ValueError("check_hostname requires server_hostname, " "but it's not supported by your OpenSSL " "library") self.server_side = server_side self.server_hostname = server_hostname self.do_handshake_on_connect = do_handshake_on_connect self.suppress_ragged_eofs = suppress_ragged_eofs if sock is not None: socket.__init__(self, family=sock.family, type=sock.type, proto=sock.proto, fileno=sock.fileno()) self.settimeout(sock.gettimeout()) sock.detach() elif fileno is not None: socket.__init__(self, fileno=fileno) else: socket.__init__(self, family=family, type=type, proto=proto) # See if we are connected try: self.getpeername() except OSError as e: if e.errno != errno.ENOTCONN: raise connected = False else: connected = True self._closed = False self._sslobj = None self._connected = connected if connected: # create the SSL object try: self._sslobj = self._context._wrap_socket(self, server_side, server_hostname) if do_handshake_on_connect: timeout = self.gettimeout() if timeout == 0.0: # non-blocking raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets") self.do_handshake() except (OSError, ValueError): self.close() raise @property def context(self): return self._context @context.setter def context(self, ctx): self._context = ctx self._sslobj.context = ctx def dup(self): raise NotImplemented("Can't dup() %s instances" % self.__class__.__name__) def _checkClosed(self, msg=None): # raise an exception here if you wish to check for spurious closes pass def _check_connected(self): if not self._connected: # getpeername() will raise ENOTCONN if the socket is really # not connected; note that we can be connected even without # _connected being set, e.g. if connect() first returned # EAGAIN. self.getpeername() def read(self, len=0, buffer=None): """Read up to LEN bytes and return them. Return zero-length string on EOF.""" self._checkClosed() if not self._sslobj: raise ValueError("Read on closed or unwrapped SSL socket.") try: if buffer is not None: v = self._sslobj.read(len, buffer) else: v = self._sslobj.read(len or 1024) return v except SSLError as x: if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs: if buffer is not None: return 0 else: return b'' else: raise def write(self, data): """Write DATA to the underlying SSL channel. Returns number of bytes of DATA actually transmitted.""" self._checkClosed() if not self._sslobj: raise ValueError("Write on closed or unwrapped SSL socket.") return self._sslobj.write(data) def getpeercert(self, binary_form=False): """Returns a formatted version of the data in the certificate provided by the other end of the SSL channel. Return None if no certificate was provided, {} if a certificate was provided, but not validated.""" self._checkClosed() self._check_connected() return self._sslobj.peer_certificate(binary_form) def selected_npn_protocol(self): self._checkClosed() if not self._sslobj or not _ssl.HAS_NPN: return None else: return self._sslobj.selected_npn_protocol() def cipher(self): self._checkClosed() if not self._sslobj: return None else: return self._sslobj.cipher() def compression(self): self._checkClosed() if not self._sslobj: return None else: return self._sslobj.compression() def send(self, data, flags=0): self._checkClosed() if self._sslobj: if flags != 0: raise ValueError( "non-zero flags not allowed in calls to send() on %s" % self.__class__) try: v = self._sslobj.write(data) except SSLError as x: if x.args[0] == SSL_ERROR_WANT_READ: return 0 elif x.args[0] == SSL_ERROR_WANT_WRITE: return 0 else: raise else: return v else: return socket.send(self, data, flags) def sendto(self, data, flags_or_addr, addr=None): self._checkClosed() if self._sslobj: raise ValueError("sendto not allowed on instances of %s" % self.__class__) elif addr is None: return socket.sendto(self, data, flags_or_addr) else: return socket.sendto(self, data, flags_or_addr, addr) def sendmsg(self, *args, **kwargs): # Ensure programs don't send data unencrypted if they try to # use this method. raise NotImplementedError("sendmsg not allowed on instances of %s" % self.__class__) def sendall(self, data, flags=0): self._checkClosed() if self._sslobj: if flags != 0: raise ValueError( "non-zero flags not allowed in calls to sendall() on %s" % self.__class__) amount = len(data) count = 0 while (count < amount): v = self.send(data[count:]) count += v return amount else: return socket.sendall(self, data, flags) def recv(self, buflen=1024, flags=0): self._checkClosed() if self._sslobj: if flags != 0: raise ValueError( "non-zero flags not allowed in calls to recv() on %s" % self.__class__) return self.read(buflen) else: return socket.recv(self, buflen, flags) def recv_into(self, buffer, nbytes=None, flags=0): self._checkClosed() if buffer and (nbytes is None): nbytes = len(buffer) elif nbytes is None: nbytes = 1024 if self._sslobj: if flags != 0: raise ValueError( "non-zero flags not allowed in calls to recv_into() on %s" % self.__class__) return self.read(nbytes, buffer) else: return socket.recv_into(self, buffer, nbytes, flags) def recvfrom(self, buflen=1024, flags=0): self._checkClosed() if self._sslobj: raise ValueError("recvfrom not allowed on instances of %s" % self.__class__) else: return socket.recvfrom(self, buflen, flags) def recvfrom_into(self, buffer, nbytes=None, flags=0): self._checkClosed() if self._sslobj: raise ValueError("recvfrom_into not allowed on instances of %s" % self.__class__) else: return socket.recvfrom_into(self, buffer, nbytes, flags) def recvmsg(self, *args, **kwargs): raise NotImplementedError("recvmsg not allowed on instances of %s" % self.__class__) def recvmsg_into(self, *args, **kwargs): raise NotImplementedError("recvmsg_into not allowed on instances of " "%s" % self.__class__) def pending(self): self._checkClosed() if self._sslobj: return self._sslobj.pending() else: return 0 def shutdown(self, how): self._checkClosed() self._sslobj = None socket.shutdown(self, how) def unwrap(self): if self._sslobj: s = self._sslobj.shutdown() self._sslobj = None return s else: raise ValueError("No SSL wrapper around " + str(self)) def _real_close(self): self._sslobj = None socket._real_close(self) def do_handshake(self, block=False): """Perform a TLS/SSL handshake.""" self._check_connected() timeout = self.gettimeout() try: if timeout == 0.0 and block: self.settimeout(None) self._sslobj.do_handshake() finally: self.settimeout(timeout) if self.context.check_hostname: if not self.server_hostname: raise ValueError("check_hostname needs server_hostname " "argument") match_hostname(self.getpeercert(), self.server_hostname) def _real_connect(self, addr, connect_ex): if self.server_side: raise ValueError("can't connect in server-side mode") # Here we assume that the socket is client-side, and not # connected at the time of the call. We connect it, then wrap it. if self._connected: raise ValueError("attempt to connect already-connected SSLSocket!") self._sslobj = self.context._wrap_socket(self, False, self.server_hostname) try: if connect_ex: rc = socket.connect_ex(self, addr) else: rc = None socket.connect(self, addr) if not rc: self._connected = True if self.do_handshake_on_connect: self.do_handshake() return rc except (OSError, ValueError): self._sslobj = None raise def connect(self, addr): """Connects to remote ADDR, and then wraps the connection in an SSL channel.""" self._real_connect(addr, False) def connect_ex(self, addr): """Connects to remote ADDR, and then wraps the connection in an SSL channel.""" return self._real_connect(addr, True) def accept(self): """Accepts a new connection from a remote client, and returns a tuple containing that new connection wrapped with a server-side SSL channel, and the address of the remote client.""" newsock, addr = socket.accept(self) newsock = self.context.wrap_socket(newsock, do_handshake_on_connect=self.do_handshake_on_connect, suppress_ragged_eofs=self.suppress_ragged_eofs, server_side=True) return newsock, addr def get_channel_binding(self, cb_type="tls-unique"): """Get channel binding data for current connection. Raise ValueError if the requested `cb_type` is not supported. Return bytes of the data or None if the data is not available (e.g. before the handshake). """ if cb_type not in CHANNEL_BINDING_TYPES: raise ValueError("Unsupported channel binding type") if cb_type != "tls-unique": raise NotImplementedError( "{0} channel binding type not implemented" .format(cb_type)) if self._sslobj is None: return None return self._sslobj.tls_unique_cb() def wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=CERT_NONE, ssl_version=PROTOCOL_SSLv23, ca_certs=None, do_handshake_on_connect=True, suppress_ragged_eofs=True, ciphers=None): return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile, server_side=server_side, cert_reqs=cert_reqs, ssl_version=ssl_version, ca_certs=ca_certs, do_handshake_on_connect=do_handshake_on_connect, suppress_ragged_eofs=suppress_ragged_eofs, ciphers=ciphers) # some utility functions def cert_time_to_seconds(cert_time): """Takes a date-time string in standard ASN1_print form ("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return a Python time value in seconds past the epoch.""" import time return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT")) PEM_HEADER = "-----BEGIN CERTIFICATE-----" PEM_FOOTER = "-----END CERTIFICATE-----" def DER_cert_to_PEM_cert(der_cert_bytes): """Takes a certificate in binary DER format and returns the PEM version of it as a string.""" f = str(base64.standard_b64encode(der_cert_bytes), 'ASCII', 'strict') return (PEM_HEADER + '\n' + textwrap.fill(f, 64) + '\n' + PEM_FOOTER + '\n') def PEM_cert_to_DER_cert(pem_cert_string): """Takes a certificate in ASCII PEM format and returns the DER-encoded version of it as a byte sequence""" if not pem_cert_string.startswith(PEM_HEADER): raise ValueError("Invalid PEM encoding; must start with %s" % PEM_HEADER) if not pem_cert_string.strip().endswith(PEM_FOOTER): raise ValueError("Invalid PEM encoding; must end with %s" % PEM_FOOTER) d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)] return base64.decodebytes(d.encode('ASCII', 'strict')) def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None): """Retrieve the certificate from the server at the specified address, and return it as a PEM-encoded string. If 'ca_certs' is specified, validate the server cert against it. If 'ssl_version' is specified, use it in the connection attempt.""" host, port = addr if ca_certs is not None: cert_reqs = CERT_REQUIRED else: cert_reqs = CERT_NONE context = _create_stdlib_context(ssl_version, cert_reqs=cert_reqs, cafile=ca_certs) with create_connection(addr) as sock: with context.wrap_socket(sock) as sslsock: dercert = sslsock.getpeercert(True) return DER_cert_to_PEM_cert(dercert) def get_protocol_name(protocol_code): return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
lgpl-3.0
gnarula/eden_deployment
modules/s3db/msg.py
1
88933
# -*- coding: utf-8 -*- """ Sahana Eden Messaging Model @copyright: 2009-2014 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ("S3ChannelModel", "S3MessageModel", "S3MessageAttachmentModel", "S3EmailModel", "S3FacebookModel", "S3MCommonsModel", "S3ParsingModel", "S3RSSModel", "S3SMSModel", "S3SMSOutboundModel", "S3TropoModel", "S3TwilioModel", "S3TwitterModel", "S3TwitterSearchModel", "S3XFormsModel", "S3BaseStationModel", ) from gluon import * from gluon.storage import Storage from ..s3 import * # Compact JSON encoding SEPARATORS = (",", ":") # ============================================================================= class S3ChannelModel(S3Model): """ Messaging Channels - all Inbound & Outbound channels for messages are instances of this super-entity """ names = ("msg_channel", "msg_channel_limit", "msg_channel_status", "msg_channel_id", "msg_channel_enable", "msg_channel_disable", "msg_channel_enable_interactive", "msg_channel_disable_interactive", "msg_channel_onaccept", ) def model(self): T = current.T db = current.db define_table = self.define_table #---------------------------------------------------------------------- # Super entity: msg_channel # channel_types = Storage(msg_email_channel = T("Email (Inbound)"), msg_facebook_channel = T("Facebook"), msg_mcommons_channel = T("Mobile Commons (Inbound)"), msg_rss_channel = T("RSS Feed"), msg_sms_modem_channel = T("SMS Modem"), msg_sms_webapi_channel = T("SMS WebAPI (Outbound)"), msg_sms_smtp_channel = T("SMS via SMTP (Outbound)"), msg_tropo_channel = T("Tropo"), msg_twilio_channel = T("Twilio (Inbound)"), msg_twitter_channel = T("Twitter"), ) tablename = "msg_channel" self.super_entity(tablename, "channel_id", channel_types, Field("name", #label = T("Name"), ), Field("description", #label = T("Description"), ), Field("enabled", "boolean", default = True, #label = T("Enabled?") #represent = s3_yes_no_represent, ), # @ToDo: Indicate whether channel can be used for Inbound or Outbound #Field("inbound", "boolean", # label = T("Inbound?")), #Field("outbound", "boolean", # label = T("Outbound?")), ) # @todo: make lazy_table table = db[tablename] table.instance_type.readable = True # Reusable Field channel_id = S3ReusableField("channel_id", "reference %s" % tablename, label = T("Channel"), ondelete = "SET NULL", represent = S3Represent(lookup=tablename), requires = IS_EMPTY_OR( IS_ONE_OF_EMPTY(db, "msg_channel.id")), ) self.add_components(tablename, msg_channel_status = "channel_id", ) # --------------------------------------------------------------------- # Channel Limit # Used to limit the number of emails sent from the system # - works by simply recording an entry for the timestamp to be checked against # # - currently just used by msg.send_email() # tablename = "msg_channel_limit" define_table(tablename, # @ToDo: Make it per-channel #channel_id(), *s3_timestamp()) # --------------------------------------------------------------------- # Channel Status # Used to record errors encountered in the Channel # tablename = "msg_channel_status" define_table(tablename, channel_id(), Field("status", #label = T("Status") #represent = s3_yes_no_represent, ), *s3_meta_fields()) # --------------------------------------------------------------------- return dict(msg_channel_id = channel_id, msg_channel_enable = self.channel_enable, msg_channel_disable = self.channel_disable, msg_channel_enable_interactive = self.channel_enable_interactive, msg_channel_disable_interactive = self.channel_disable_interactive, msg_channel_onaccept = self.channel_onaccept, msg_channel_poll = self.channel_poll, ) # ------------------------------------------------------------------------- @staticmethod def channel_enable(tablename, channel_id): """ Enable a Channel - Schedule a Poll for new messages - Enable all associated Parsers CLI API for shell scripts & to be called by S3Method """ db = current.db s3db = current.s3db table = s3db.table(tablename) record = db(table.channel_id == channel_id).select(table.id, # needed for update_record table.enabled, limitby=(0, 1), ).first() if not record.enabled: # Flag it as enabled # Update Instance record.update_record(enabled = True) # Update Super s3db.update_super(table, record) # Enable all Parser tasks on this channel ptable = s3db.msg_parser query = (ptable.channel_id == channel_id) & \ (ptable.deleted == False) parsers = db(query).select(ptable.id) for parser in parsers: s3db.msg_parser_enable(parser.id) # Do we have an existing Task? ttable = db.scheduler_task args = '["%s", %s]' % (tablename, channel_id) query = ((ttable.function_name == "msg_poll") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: return "Channel already enabled" else: current.s3task.schedule_task("msg_poll", args = [tablename, channel_id], period = 300, # seconds timeout = 300, # seconds repeats = 0 # unlimited ) return "Channel enabled" # ------------------------------------------------------------------------- @staticmethod def channel_enable_interactive(r, **attr): """ Enable a Channel - Schedule a Poll for new messages S3Method for interactive requests """ tablename = r.tablename result = current.s3db.msg_channel_enable(tablename, r.record.channel_id) current.session.confirmation = result fn = tablename.split("_", 1)[1] redirect(URL(f=fn)) # ------------------------------------------------------------------------- @staticmethod def channel_disable(tablename, channel_id): """ Disable a Channel - Remove schedule for Polling for new messages - Disable all associated Parsers CLI API for shell scripts & to be called by S3Method """ db = current.db s3db = current.s3db table = s3db.table(tablename) record = db(table.channel_id == channel_id).select(table.id, # needed for update_record table.enabled, limitby=(0, 1), ).first() if record.enabled: # Flag it as disabled # Update Instance record.update_record(enabled = False) # Update Super s3db.update_super(table, record) # Disable all Parser tasks on this channel ptable = s3db.msg_parser parsers = db(ptable.channel_id == channel_id).select(ptable.id) for parser in parsers: s3db.msg_parser_disable(parser.id) # Do we have an existing Task? ttable = db.scheduler_task args = '["%s", %s]' % (tablename, channel_id) query = ((ttable.function_name == "msg_poll") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: # Disable all db(query).update(status="STOPPED") return "Channel disabled" else: return "Channel already disabled" # -------------------------------------------------------------------------- @staticmethod def channel_disable_interactive(r, **attr): """ Disable a Channel - Remove schedule for Polling for new messages S3Method for interactive requests """ tablename = r.tablename result = current.s3db.msg_channel_disable(tablename, r.record.channel_id) current.session.confirmation = result fn = tablename.split("_", 1)[1] redirect(URL(f=fn)) # ------------------------------------------------------------------------- @staticmethod def channel_onaccept(form): """ Process the Enabled Flag """ if form.record: # Update form # process of changed if form.record.enabled and not form.vars.enabled: current.s3db.msg_channel_disable(form.table._tablename, form.vars.channel_id) elif form.vars.enabled and not form.record.enabled: current.s3db.msg_channel_enable(form.table._tablename, form.vars.channel_id) else: # Create form # Process only if enabled if form.vars.enabled: current.s3db.msg_channel_enable(form.table._tablename, form.vars.channel_id) # ------------------------------------------------------------------------- @staticmethod def channel_poll(r, **attr): """ Poll a Channel for new messages S3Method for interactive requests """ tablename = r.tablename current.s3task.async("msg_poll", args=[tablename, r.record.channel_id]) current.session.confirmation = \ current.T("The poll request has been submitted, so new messages should appear shortly - refresh to see them") if tablename == "msg_email_channel": fn = "email_inbox" elif tablename == "msg_mcommons_channel": fn = "sms_inbox" elif tablename == "msg_rss_channel": fn = "rss" elif tablename == "msg_twilio_channel": fn = "sms_inbox" elif tablename == "msg_twitter_channel": fn = "twitter_inbox" else: return "Unsupported channel: %s" % tablename redirect(URL(f=fn)) # ============================================================================= class S3MessageModel(S3Model): """ Messages """ names = ("msg_message", "msg_message_id", "msg_message_represent", "msg_outbox", ) def model(self): T = current.T db = current.db UNKNOWN_OPT = current.messages.UNKNOWN_OPT configure = self.configure define_table = self.define_table # Message priority msg_priority_opts = {3 : T("High"), 2 : T("Medium"), 1 : T("Low"), } # --------------------------------------------------------------------- # Message Super Entity - all Inbound & Outbound Messages # message_types = Storage(msg_email = T("Email"), msg_facebook = T("Facebook"), msg_rss = T("RSS"), msg_sms = T("SMS"), msg_twitter = T("Twitter"), msg_twitter_result = T("Twitter Search Results"), ) tablename = "msg_message" self.super_entity(tablename, "message_id", message_types, # Knowing which Channel Incoming Messages # came in on allows correlation to Outbound # messages (campaign_message, deployment_alert, etc) self.msg_channel_id(), s3_datetime(default="now"), Field("body", "text", label = T("Message"), ), Field("from_address", label = T("From"), ), Field("to_address", label = T("To"), ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or \ [T("Out")])[0], ), ) # @todo: make lazy_table table = db[tablename] table.instance_type.readable = True table.instance_type.writable = True configure(tablename, list_fields = ["instance_type", "from_address", "to_address", "body", "inbound", ], ) # Reusable Field message_represent = S3Represent(lookup=tablename, fields=["body"]) message_id = S3ReusableField("message_id", "reference %s" % tablename, ondelete = "RESTRICT", represent = message_represent, requires = IS_EMPTY_OR( IS_ONE_OF_EMPTY(db, "msg_message.id")), ) self.add_components(tablename, msg_attachment = "message_id", deploy_response = "message_id", ) # --------------------------------------------------------------------- # Outbound Messages # # Show only the supported messaging methods MSG_CONTACT_OPTS = current.msg.MSG_CONTACT_OPTS # Maximum number of retries to send a message MAX_SEND_RETRIES = current.deployment_settings.get_msg_max_send_retries() # Valid message outbox statuses MSG_STATUS_OPTS = {1 : T("Unsent"), 2 : T("Sent"), 3 : T("Draft"), 4 : T("Invalid"), 5 : T("Failed"), } opt_msg_status = S3ReusableField("status", "integer", notnull=True, requires = IS_IN_SET(MSG_STATUS_OPTS, zero=None), default = 1, label = T("Status"), represent = lambda opt: \ MSG_STATUS_OPTS.get(opt, UNKNOWN_OPT)) # Outbox - needs to be separate to Message since a single message # sent needs different outbox entries for each recipient tablename = "msg_outbox" define_table(tablename, # FK not instance message_id(), # Person/Group to send the message out to: self.super_link("pe_id", "pr_pentity"), # If set used instead of picking up from pe_id: Field("address"), Field("contact_method", length=32, default = "EMAIL", label = T("Contact Method"), represent = lambda opt: \ MSG_CONTACT_OPTS.get(opt, UNKNOWN_OPT), requires = IS_IN_SET(MSG_CONTACT_OPTS, zero=None), ), opt_msg_status(), # Used to loop through a PE to get it's members Field("system_generated", "boolean", default = False, ), # Give up if we can't send after MAX_RETRIES Field("retries", "integer", default = MAX_SEND_RETRIES, readable = False, writable = False, ), *s3_meta_fields()) configure(tablename, list_fields = ["id", "message_id", "pe_id", "status", ], orderby = "msg_outbox.created_on desc", ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) return dict(msg_message_id = message_id, msg_message_represent = message_represent, ) # ------------------------------------------------------------------------- @staticmethod def defaults(): """ Return safe defaults in case the model has been deactivated. """ dummy = S3ReusableField("dummy_id", "integer", readable = False, writable = False) return dict(msg_message_id = lambda **attr: dummy("message_id"), ) # ============================================================================= class S3MessageAttachmentModel(S3Model): """ Message Attachments - link table between msg_message & doc_document """ names = ("msg_attachment",) def model(self): # --------------------------------------------------------------------- # tablename = "msg_attachment" self.define_table(tablename, # FK not instance self.msg_message_id(), self.doc_document_id(), *s3_meta_fields()) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) return dict() # ============================================================================= class S3EmailModel(S3ChannelModel): """ Email InBound Channels Outbound Email is currently handled via deployment_settings InBox/OutBox """ names = ("msg_email_channel", "msg_email", ) def model(self): T = current.T configure = self.configure define_table = self.define_table set_method = self.set_method super_link = self.super_link # --------------------------------------------------------------------- # Email Inbound Channels # tablename = "msg_email_channel" define_table(tablename, # Instance super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("server"), Field("protocol", requires = IS_IN_SET(["imap", "pop3"], zero=None), ), Field("use_ssl", "boolean"), Field("port", "integer"), Field("username"), Field("password", "password", length=64, readable = False, requires = IS_NOT_EMPTY(), ), # Set true to delete messages from the remote # inbox after fetching them. Field("delete_from_server", "boolean"), *s3_meta_fields()) configure(tablename, onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "email_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "email_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "email_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Email Messages: InBox & Outbox # sender = current.deployment_settings.get_mail_sender() tablename = "msg_email" define_table(tablename, # Instance super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default = "now"), Field("subject", length=78, # RFC 2822 label = T("Subject"), ), Field("body", "text", label = T("Message"), ), Field("from_address", #notnull=True, default = sender, label = T("Sender"), requires = IS_EMAIL(), ), Field("to_address", label = T("To"), requires = IS_EMAIL(), ), Field("raw", "text", label = T("Message Source"), readable = False, writable = False, ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or [T("Out")])[0], ), *s3_meta_fields()) configure(tablename, orderby = "msg_email.date desc", super_entity = "msg_message", ) # Components self.add_components(tablename, # Used to link to custom tab deploy_response_select_mission: deploy_mission = {"name": "select", "link": "deploy_response", "joinby": "message_id", "key": "mission_id", "autodelete": False, }, ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3FacebookModel(S3ChannelModel): """ Facebook Channels InBox/OutBox https://developers.facebook.com/docs/graph-api """ names = ("msg_facebook_channel", "msg_facebook", "msg_facebook_login", ) def model(self): T = current.T configure = self.configure define_table = self.define_table set_method = self.set_method super_link = self.super_link # --------------------------------------------------------------------- # Facebook Channels # tablename = "msg_facebook_channel" define_table(tablename, # Instance super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("login", "boolean", default = False, label = T("Use for Login?"), represent = s3_yes_no_represent, ), Field("app_id", "bigint", requires = IS_INT_IN_RANGE(0, +1e16) ), Field("app_secret", "password", length=64, readable = False, requires = IS_NOT_EMPTY(), ), # Optional Field("page_id", "bigint", requires = IS_INT_IN_RANGE(0, +1e16) ), Field("page_access_token"), *s3_meta_fields()) configure(tablename, onaccept = self.msg_facebook_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "facebook_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "facebook_channel", method = "disable", action = self.msg_channel_disable_interactive) #set_method("msg", "facebook_channel", # method = "poll", # action = self.msg_channel_poll) # --------------------------------------------------------------------- # Facebook Messages: InBox & Outbox # tablename = "msg_facebook" define_table(tablename, # Instance super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default = "now"), Field("body", "text", label = T("Message"), ), # @ToDo: Are from_address / to_address relevant in Facebook? Field("from_address", #notnull=True, #default = sender, label = T("Sender"), ), Field("to_address", label = T("To"), ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or [T("Out")])[0], ), *s3_meta_fields()) configure(tablename, orderby = "msg_facebook.date desc", super_entity = "msg_message", ) # --------------------------------------------------------------------- return dict(msg_facebook_login = self.msg_facebook_login, ) # ------------------------------------------------------------------------- @staticmethod def defaults(): """ Safe defaults for model-global names if module is disabled """ return dict(msg_facebook_login = lambda: False, ) # ------------------------------------------------------------------------- @staticmethod def msg_facebook_channel_onaccept(form): if form.vars.login: # Ensure only a single account used for Login current.db(current.s3db.msg_facebook_channel.id != form.vars.id).update(login = False) # Normal onaccept processing S3ChannelModel.channel_onaccept(form) # ------------------------------------------------------------------------- @staticmethod def msg_facebook_login(): table = current.s3db.msg_facebook_channel query = (table.login == True) & \ (table.deleted == False) c = current.db(query).select(table.app_id, table.app_secret, limitby=(0, 1) ).first() return c # ============================================================================= class S3MCommonsModel(S3ChannelModel): """ Mobile Commons Inbound SMS Settings - Outbound can use Web API """ names = ("msg_mcommons_channel",) def model(self): #T = current.T define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- tablename = "msg_mcommons_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, #label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("campaign_id", length=128, unique=True, requires = IS_NOT_EMPTY(), ), Field("url", default = \ "https://secure.mcommons.com/api/messages", requires = IS_URL() ), Field("username", requires = IS_NOT_EMPTY(), ), Field("password", "password", readable = False, requires = IS_NOT_EMPTY(), ), Field("query"), Field("timestmp", "datetime", writable = False, ), *s3_meta_fields()) self.configure(tablename, onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "mcommons_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "mcommons_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "mcommons_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3ParsingModel(S3Model): """ Message Parsing Model """ names = ("msg_parser", "msg_parsing_status", "msg_session", "msg_keyword", "msg_sender", "msg_parser_enabled", "msg_parser_enable", "msg_parser_disable", "msg_parser_enable_interactive", "msg_parser_disable_interactive", ) def model(self): T = current.T define_table = self.define_table set_method = self.set_method channel_id = self.msg_channel_id message_id = self.msg_message_id # --------------------------------------------------------------------- # Link between Message Channels and Parsers in parser.py # tablename = "msg_parser" define_table(tablename, # Source channel_id(ondelete = "CASCADE"), Field("function_name", label = T("Parser"), ), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), *s3_meta_fields()) self.configure(tablename, onaccept = self.msg_parser_onaccept, ) set_method("msg", "parser", method = "enable", action = self.parser_enable_interactive) set_method("msg", "parser", method = "disable", action = self.parser_disable_interactive) set_method("msg", "parser", method = "parse", action = self.parser_parse) # --------------------------------------------------------------------- # Message parsing status # - component to core msg_message table # tablename = "msg_parsing_status" define_table(tablename, # Component, not Instance message_id(ondelete = "CASCADE"), # Source channel_id(ondelete = "CASCADE"), Field("is_parsed", "boolean", default = False, label = T("Parsing Status"), represent = lambda parsed: \ (parsed and [T("Parsed")] or \ [T("Not Parsed")])[0], ), message_id("reply_id", label = T("Reply"), ondelete = "CASCADE", ), *s3_meta_fields()) # --------------------------------------------------------------------- # Login sessions for Message Parsing # - links a from_address with a login until expiry # tablename = "msg_session" define_table(tablename, Field("from_address"), Field("email"), Field("created_datetime", "datetime", default = current.request.utcnow, ), Field("expiration_time", "integer"), Field("is_expired", "boolean", default = False, ), *s3_meta_fields()) # --------------------------------------------------------------------- # Keywords for Message Parsing # tablename = "msg_keyword" define_table(tablename, Field("keyword", label = T("Keyword"), ), # @ToDo: Move this to a link table self.event_incident_type_id(), *s3_meta_fields()) # --------------------------------------------------------------------- # Senders for Message Parsing # - whitelist / blacklist / prioritise # tablename = "msg_sender" define_table(tablename, Field("sender", label = T("Sender"), ), # @ToDo: Make pe_id work for this #self.super_link("pe_id", "pr_pentity"), Field("priority", "integer", label = T("Priority"), ), *s3_meta_fields()) # --------------------------------------------------------------------- return dict(msg_parser_enabled = self.parser_enabled, msg_parser_enable = self.parser_enable, msg_parser_disable = self.parser_disable, ) # ----------------------------------------------------------------------------- @staticmethod def parser_parse(r, **attr): """ Parse unparsed messages S3Method for interactive requests """ record = r.record current.s3task.async("msg_parse", args=[record.channel_id, record.function_name]) current.session.confirmation = \ current.T("The parse request has been submitted") redirect(URL(f="parser")) # ------------------------------------------------------------------------- @staticmethod def parser_enabled(channel_id): """ Helper function to see if there is a Parser connected to a Channel - used to determine whether to populate the msg_parsing_status table """ table = current.s3db.msg_parser record = current.db(table.channel_id == channel_id).select(table.enabled, limitby=(0, 1), ).first() if record and record.enabled: return True else: return False # ------------------------------------------------------------------------- @staticmethod def parser_enable(id): """ Enable a Parser - Connect a Parser to a Channel CLI API for shell scripts & to be called by S3Method @ToDo: Ensure only 1 Parser is connected to any Channel at a time """ db = current.db s3db = current.s3db table = s3db.msg_parser record = db(table.id == id).select(table.id, # needed for update_record table.enabled, table.channel_id, table.function_name, limitby=(0, 1), ).first() if not record.enabled: # Flag it as enabled record.update_record(enabled = True) channel_id = record.channel_id function_name = record.function_name # Do we have an existing Task? ttable = db.scheduler_task args = '[%s, "%s"]' % (channel_id, function_name) query = ((ttable.function_name == "msg_parse") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: return "Parser already enabled" else: current.s3task.schedule_task("msg_parse", args = [channel_id, function_name], period = 300, # seconds timeout = 300, # seconds repeats = 0 # unlimited ) return "Parser enabled" # ------------------------------------------------------------------------- @staticmethod def parser_enable_interactive(r, **attr): """ Enable a Parser - Connect a Parser to a Channel S3Method for interactive requests """ result = current.s3db.msg_parser_enable(r.id) current.session.confirmation = result redirect(URL(f="parser")) # ------------------------------------------------------------------------- @staticmethod def parser_disable(id): """ Disable a Parser - Disconnect a Parser from a Channel CLI API for shell scripts & to be called by S3Method """ db = current.db s3db = current.s3db table = s3db.msg_parser record = db(table.id == id).select(table.id, # needed for update_record table.enabled, table.channel_id, table.function_name, limitby=(0, 1), ).first() if record.enabled: # Flag it as disabled record.update_record(enabled = False) # Do we have an existing Task? ttable = db.scheduler_task args = '[%s, "%s"]' % (record.channel_id, record.function_name) query = ((ttable.function_name == "msg_parse") & \ (ttable.args == args) & \ (ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"]))) exists = db(query).select(ttable.id, limitby=(0, 1)).first() if exists: # Disable all db(query).update(status="STOPPED") return "Parser disabled" else: return "Parser already disabled" # ------------------------------------------------------------------------- @staticmethod def parser_disable_interactive(r, **attr): """ Disable a Parser - Disconnect a Parser from a Channel S3Method for interactive requests """ result = current.s3db.msg_parser_disable(r.id) current.session.confirmation = result redirect(URL(f="parser")) # ------------------------------------------------------------------------- @staticmethod def msg_parser_onaccept(form): """ Process the Enabled Flag """ if form.record: # Update form # process of changed if form.record.enabled and not form.vars.enabled: current.s3db.msg_parser_disable(form.vars.id) elif form.vars.enabled and not form.record.enabled: current.s3db.msg_parser_enable(form.vars.id) else: # Create form # Process only if enabled if form.vars.enabled: current.s3db.msg_parser_enable(form.vars.id) # ============================================================================= class S3RSSModel(S3ChannelModel): """ RSS channel """ names = ("msg_rss_channel", "msg_rss", ) def model(self): T = current.T define_table = self.define_table set_method = self.set_method super_link = self.super_link # --------------------------------------------------------------------- # RSS Settings for an account # tablename = "msg_rss_channel" define_table(tablename, # Instance super_link("channel_id", "msg_channel"), Field("name", length=255, unique=True, label = T("Name"), ), Field("description", label = T("Description"), ), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("url", label = T("URL"), requires = IS_URL(), ), s3_datetime(label = T("Last Polled"), writable = False, ), Field("etag", label = T("ETag"), writable = False ), *s3_meta_fields()) self.configure(tablename, list_fields = ["name", "description", "enabled", "url", "date", "channel_status.status", ], onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "rss_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "rss_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "rss_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # RSS Feed Posts # tablename = "msg_rss" define_table(tablename, # Instance super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default="now", label = T("Published on"), ), Field("title", label = T("Title"), ), Field("body", "text", label = T("Content"), ), Field("from_address", label = T("Link"), ), # http://pythonhosted.org/feedparser/reference-feed-author_detail.html Field("author", label = T("Author"), ), # http://pythonhosted.org/feedparser/reference-entry-tags.html Field("tags", "list:string", label = T("Tags"), ), self.gis_location_id(), # Just present for Super Entity Field("inbound", "boolean", default = True, readable = False, writable = False, ), *s3_meta_fields()) self.configure(tablename, deduplicate = self.msg_rss_duplicate, list_fields = ["channel_id", "title", "from_address", "date", "body" ], super_entity = current.s3db.msg_message, ) # --------------------------------------------------------------------- return dict() # --------------------------------------------------------------------- @staticmethod def msg_rss_duplicate(item): """ Import item deduplication, match by link (from_address) @param item: the S3ImportItem instance """ if item.tablename == "msg_rss": table = item.table from_address = item.data.get("from_address") query = (table.from_address == from_address) duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() if duplicate: item.id = duplicate.id item.method = item.METHOD.UPDATE # ============================================================================= class S3SMSModel(S3Model): """ SMS: Short Message Service These can be received through a number of different gateways - MCommons - Modem (@ToDo: Restore this) - Tropo - Twilio """ names = ("msg_sms",) def model(self): #T = current.T user = current.auth.user if user and user.organisation_id: # SMS Messages need to be tagged to their org so that they can be sent through the correct gateway default = user.organisation_id else: default = None # --------------------------------------------------------------------- # SMS Messages: InBox & Outbox # tablename = "msg_sms" self.define_table(tablename, # Instance self.super_link("message_id", "msg_message"), self.msg_channel_id(), self.org_organisation_id(default = default), s3_datetime(default="now"), Field("body", "text", # Allow multi-part SMS #length = 160, #label = T("Message"), ), Field("from_address", #label = T("Sender"), ), Field("to_address", #label = T("To"), ), Field("inbound", "boolean", default = False, #represent = lambda direction: \ # (direction and [T("In")] or \ # [T("Out")])[0], #label = T("Direction")), ), # Used e.g. for Clickatell Field("remote_id", #label = T("Remote ID"), ), *s3_meta_fields()) self.configure(tablename, super_entity = "msg_message", ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3SMSOutboundModel(S3Model): """ SMS: Short Message Service - Outbound Channels These can be sent through a number of different gateways - Modem - SMTP - Tropo - Web API (inc Clickatell, MCommons, mVaayoo) """ names = ("msg_sms_outbound_gateway", "msg_sms_modem_channel", "msg_sms_smtp_channel", "msg_sms_webapi_channel", ) def model(self): #T = current.T configure = self.configure define_table = self.define_table # --------------------------------------------------------------------- # SMS Outbound Gateway # - select which gateway is in active use for which Organisation/Branch # tablename = "msg_sms_outbound_gateway" define_table(tablename, self.msg_channel_id( requires = IS_ONE_OF(current.db, "msg_channel.channel_id", S3Represent(lookup="msg_channel"), instance_types = ("msg_sms_modem_channel", "msg_sms_webapi_channel", "msg_sms_smtp_channel", ), sort = True, ), ), #Field("outgoing_sms_handler", length=32, # requires = IS_IN_SET(current.msg.GATEWAY_OPTS, # zero = None), # ), # Allow selection of different gateways based on Organisation/Branch self.org_organisation_id(), # @ToDo: Allow selection of different gateways based on destination Location #self.gis_location_id(), # @ToDo: Allow addition of relevant country code (currently in deployment_settings) #Field("default_country_code", "integer", # default = 44), *s3_meta_fields()) # --------------------------------------------------------------------- # SMS Modem Channel # tablename = "msg_sms_modem_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("modem_port"), Field("modem_baud", "integer", default = 115200, ), Field("enabled", "boolean", default = True, ), Field("max_length", "integer", default = 160, ), *s3_meta_fields()) configure(tablename, super_entity = "msg_channel", ) # --------------------------------------------------------------------- # SMS via SMTP Channel # tablename = "msg_sms_smtp_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("address", length=64, requires = IS_NOT_EMPTY(), ), Field("subject", length=64), Field("enabled", "boolean", default = True, ), Field("max_length", "integer", default = 160, ), *s3_meta_fields()) configure(tablename, super_entity = "msg_channel", ) # --------------------------------------------------------------------- # Settings for Web API services # # @ToDo: Simplified dropdown of services which prepopulates entries & provides nice prompts for the config options # + Advanced mode for raw access to real fields # tablename = "msg_sms_webapi_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("url", default = "https://api.clickatell.com/http/sendmsg", # Clickatell #default = "https://secure.mcommons.com/api/send_message", # Mobile Commons requires = IS_URL(), ), Field("parameters", default = "user=yourusername&password=yourpassword&api_id=yourapiid", # Clickatell #default = "campaign_id=yourid", # Mobile Commons ), Field("message_variable", "string", default = "text", # Clickatell #default = "body", # Mobile Commons requires = IS_NOT_EMPTY(), ), Field("to_variable", "string", default = "to", # Clickatell #default = "phone_number", # Mobile Commons requires = IS_NOT_EMPTY(), ), Field("max_length", "integer", default = 480, # Clickatell concat 3 ), # If using HTTP Auth (e.g. Mobile Commons) Field("username"), Field("password"), Field("enabled", "boolean", default = True, ), *s3_meta_fields()) configure(tablename, super_entity = "msg_channel", ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3TropoModel(S3Model): """ Tropo can be used to send & receive SMS, Twitter & XMPP https://www.tropo.com """ names = ("msg_tropo_channel", "msg_tropo_scratch", ) def model(self): #T = current.T define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Tropo Channels # tablename = "msg_tropo_channel" define_table(tablename, self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, #label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("token_messaging"), #Field("token_voice"), *s3_meta_fields()) self.configure(tablename, super_entity = "msg_channel", ) set_method("msg", "tropo_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "tropo_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "tropo_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Tropo Scratch pad for outbound messaging # tablename = "msg_tropo_scratch" define_table(tablename, Field("row_id", "integer"), Field("message_id", "integer"), Field("recipient"), Field("message"), Field("network"), ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3TwilioModel(S3ChannelModel): """ Twilio Inbound SMS channel """ names = ("msg_twilio_channel", "msg_twilio_sid", ) def model(self): #T = current.T define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Twilio Channels # tablename = "msg_twilio_channel" define_table(tablename, # Instance self.super_link("channel_id", "msg_channel"), Field("name"), Field("description"), Field("enabled", "boolean", default = True, #label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("account_name", length=255, unique=True), Field("url", default = \ "https://api.twilio.com/2010-04-01/Accounts" ), Field("account_sid", length=64, requires = IS_NOT_EMPTY(), ), Field("auth_token", "password", length=64, readable = False, requires = IS_NOT_EMPTY(), ), *s3_meta_fields()) self.configure(tablename, onaccept = self.msg_channel_onaccept, super_entity = "msg_channel", ) set_method("msg", "twilio_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "twilio_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "twilio_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Twilio Message extensions # - store message sid to know which ones we've already downloaded # tablename = "msg_twilio_sid" define_table(tablename, # Component not Instance self.msg_message_id(ondelete = "CASCADE"), Field("sid"), *s3_meta_fields()) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3TwitterModel(S3Model): names = ("msg_twitter_channel", "msg_twitter", ) def model(self): T = current.T db = current.db configure = self.configure define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Twitter Channel # tablename = "msg_twitter_channel" define_table(tablename, #Instance self.super_link("channel_id", "msg_channel"), # @ToDo: Allow different Twitter accounts for different PEs (Orgs / Teams) #self.pr_pe_id(), Field("name"), Field("description"), Field("enabled", "boolean", default = True, label = T("Enabled?"), represent = s3_yes_no_represent, ), Field("twitter_account"), Field("consumer_key", "password"), Field("consumer_secret", "password"), Field("access_token", "password"), Field("access_token_secret", "password"), *s3_meta_fields()) configure(tablename, onaccept = self.msg_channel_onaccept, #onvalidation = self.twitter_channel_onvalidation super_entity = "msg_channel", ) set_method("msg", "twitter_channel", method = "enable", action = self.msg_channel_enable_interactive) set_method("msg", "twitter_channel", method = "disable", action = self.msg_channel_disable_interactive) set_method("msg", "twitter_channel", method = "poll", action = self.msg_channel_poll) # --------------------------------------------------------------------- # Twitter Messages: InBox & Outbox # tablename = "msg_twitter" define_table(tablename, # Instance self.super_link("message_id", "msg_message"), self.msg_channel_id(), s3_datetime(default = "now", label = T("Posted on"), ), Field("body", length=140, label = T("Message"), ), Field("from_address", #notnull=True, label = T("From"), represent = self.twitter_represent, requires = IS_NOT_EMPTY(), ), Field("to_address", label = T("To"), represent = self.twitter_represent, ), Field("inbound", "boolean", default = False, label = T("Direction"), represent = lambda direction: \ (direction and [T("In")] or \ [T("Out")])[0], ), Field("msg_id", # Twitter Message ID readable = False, writable = False, ), *s3_meta_fields()) configure(tablename, list_fields = ["id", #"priority", #"category", "body", "from_address", "date", #"location_id", ], #orderby = ~table.priority, super_entity = "msg_message", ) # --------------------------------------------------------------------- return dict() # ------------------------------------------------------------------------- @staticmethod def twitter_represent(nickname, show_link=True): """ Represent a Twitter account """ if not nickname: return current.messages["NONE"] db = current.db s3db = current.s3db table = s3db.pr_contact query = (table.contact_method == "TWITTER") & \ (table.value == nickname) row = db(query).select(table.pe_id, limitby=(0, 1)).first() if row: repr = s3db.pr_pentity_represent(row.pe_id) if show_link: # Assume person ptable = s3db.pr_person row = db(ptable.pe_id == row.pe_id).select(ptable.id, limitby=(0, 1)).first() if row: link = URL(c="pr", f="person", args=[row.id]) return A(repr, _href=link) return repr else: return nickname # ------------------------------------------------------------------------- @staticmethod def twitter_channel_onvalidation(form): """ Complete oauth: take tokens from session + pin from form, and do the 2nd API call to Twitter """ T = current.T session = current.session settings = current.deployment_settings.msg s3 = session.s3 vars = form.vars if vars.pin and s3.twitter_request_key and s3.twitter_request_secret: try: import tweepy except: raise HTTP(501, body=T("Can't import tweepy")) oauth = tweepy.OAuthHandler(settings.twitter_oauth_consumer_key, settings.twitter_oauth_consumer_secret) oauth.set_request_token(s3.twitter_request_key, s3.twitter_request_secret) try: oauth.get_access_token(vars.pin) vars.oauth_key = oauth.access_token.key vars.oauth_secret = oauth.access_token.secret twitter = tweepy.API(oauth) vars.twitter_account = twitter.me().screen_name vars.pin = "" # we won't need it anymore return except tweepy.TweepError: session.error = T("Settings were reset because authenticating with Twitter failed") # Either user asked to reset, or error - clear everything for k in ["oauth_key", "oauth_secret", "twitter_account"]: vars[k] = None for k in ["twitter_request_key", "twitter_request_secret"]: s3[k] = "" # ============================================================================= class S3TwitterSearchModel(S3ChannelModel): """ Twitter Searches - results can be fed to KeyGraph https://dev.twitter.com/docs/api/1.1/get/search/tweets """ names = ("msg_twitter_search", "msg_twitter_result", ) def model(self): T = current.T db = current.db configure = self.configure define_table = self.define_table set_method = self.set_method # --------------------------------------------------------------------- # Twitter Search Query # tablename = "msg_twitter_search" define_table(tablename, Field("keywords", "text", label = T("Keywords"), ), # @ToDo: Allow setting a Point & Radius for filtering by geocode #self.gis_location_id(), Field("lang", # Set in controller #default = current.response.s3.language, label = T("Language"), ), Field("count", "integer", default = 100, label = T("# Results per query"), ), Field("include_entities", "boolean", default = False, label = T("Include Entity Information?"), represent = s3_yes_no_represent, comment = DIV(_class="tooltip", _title="%s|%s" % (T("Entity Information"), T("This is required if analyzing with KeyGraph."))), ), # @ToDo: Rename or even move to Component Table Field("is_processed", "boolean", default = False, label = T("Processed with KeyGraph?"), represent = s3_yes_no_represent, ), Field("is_searched", "boolean", default = False, label = T("Searched?"), represent = s3_yes_no_represent, ), *s3_meta_fields()) configure(tablename, list_fields = ["keywords", "lang", "count", #"include_entities", ], ) # Reusable Query ID represent = S3Represent(lookup=tablename, fields=["keywords"]) search_id = S3ReusableField("search_id", "reference %s" % tablename, label = T("Search Query"), ondelete = "CASCADE", represent = represent, requires = IS_EMPTY_OR( IS_ONE_OF_EMPTY(db, "msg_twitter_search.id") ), ) set_method("msg", "twitter_search", method = "poll", action = self.twitter_search_poll) set_method("msg", "twitter_search", method = "keygraph", action = self.twitter_keygraph) set_method("msg", "twitter_result", method = "timeline", action = self.twitter_timeline) # --------------------------------------------------------------------- # Twitter Search Results # # @ToDo: Store the places mentioned in the Tweet as linked Locations # tablename = "msg_twitter_result" define_table(tablename, # Instance self.super_link("message_id", "msg_message"), # Just present for Super Entity #self.msg_channel_id(), search_id(), s3_datetime(default="now", label = T("Tweeted on"), ), Field("tweet_id", label = T("Tweet ID")), Field("lang", label = T("Language")), Field("from_address", label = T("Tweeted by")), Field("body", label = T("Tweet")), # @ToDo: Populate from Parser #Field("category", # writable = False, # label = T("Category"), # ), #Field("priority", "integer", # writable = False, # label = T("Priority"), # ), self.gis_location_id(), # Just present for Super Entity #Field("inbound", "boolean", # default = True, # readable = False, # writable = False, # ), *s3_meta_fields()) configure(tablename, list_fields = [#"category", #"priority", "body", "from_address", "date", "location_id", ], #orderby=~table.priority, super_entity = "msg_message", ) # --------------------------------------------------------------------- return dict() # ----------------------------------------------------------------------------- @staticmethod def twitter_search_poll(r, **attr): """ Perform a Search of Twitter S3Method for interactive requests """ id = r.id tablename = r.tablename current.s3task.async("msg_twitter_search", args=[id]) current.session.confirmation = \ current.T("The search request has been submitted, so new messages should appear shortly - refresh to see them") # Filter results to this Search redirect(URL(f="twitter_result", vars={"~.search_id": id})) # ----------------------------------------------------------------------------- @staticmethod def twitter_keygraph(r, **attr): """ Prcoess Search Results with KeyGraph S3Method for interactive requests """ tablename = r.tablename current.s3task.async("msg_process_keygraph", args=[r.id]) current.session.confirmation = \ current.T("The search results are now being processed with KeyGraph") # @ToDo: Link to KeyGraph results redirect(URL(f="twitter_result")) # ============================================================================= @staticmethod def twitter_timeline(r, **attr): """ Display the Tweets on a Simile Timeline http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline """ if r.representation == "html" and r.name == "twitter_result": response = current.response s3 = response.s3 appname = r.application # Add core Simile Code s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % appname) # Add our control script if s3.debug: s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % appname) else: s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname) # Add our data # @ToDo: Make this the initial data & then collect extra via REST with a stylesheet # add in JS using S3.timeline.eventSource.addMany(events) where events is a [] if r.record: # Single record rows = [r.record] else: # Multiple records # @ToDo: Load all records & sort to closest in time # http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d rows = r.resource.select(["date", "body"], limit=2000, as_rows=True) data = {"dateTimeFormat": "iso8601", } now = r.utcnow tl_start = tl_end = now events = [] import re for row in rows: # Dates start = row.date or "" if start: if start < tl_start: tl_start = start if start > tl_end: tl_end = start start = start.isoformat() title = (re.sub(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)|RT", "", row.body)) if len(title) > 30: title = title[:30] events.append({"start": start, "title": title, "description": row.body, }) data["events"] = events data = json.dumps(data, separators=SEPARATORS) code = "".join(( '''S3.timeline.data=''', data, ''' S3.timeline.tl_start="''', tl_start.isoformat(), '''" S3.timeline.tl_end="''', tl_end.isoformat(), '''" S3.timeline.now="''', now.isoformat(), '''" ''')) # Control our code in static/scripts/S3/s3.timeline.js s3.js_global.append(code) # Create the DIV item = DIV(_id="s3timeline", _class="s3-timeline") output = dict(item=item) # Maintain RHeader for consistency if attr.get("rheader"): rheader = attr["rheader"](r) if rheader: output["rheader"] = rheader output["title"] = current.T("Twitter Timeline") response.view = "timeline.html" return output else: r.error(405, current.ERROR.BAD_METHOD) # ============================================================================= class S3XFormsModel(S3Model): """ XForms are used by the ODK Collect mobile client http://eden.sahanafoundation.org/wiki/BluePrint/Mobile#Android """ names = ("msg_xforms_store",) def model(self): #T = current.T # --------------------------------------------------------------------- # SMS store for persistence and scratch pad for combining incoming xform chunks tablename = "msg_xforms_store" self.define_table(tablename, Field("sender", length=20), Field("fileno", "integer"), Field("totalno", "integer"), Field("partno", "integer"), Field("message", length=160) ) # --------------------------------------------------------------------- return dict() # ============================================================================= class S3BaseStationModel(S3Model): """ Base Stations (Cell Towers) are a type of Site @ToDo: Calculate Coverage from Antenna Height, Radio Power and Terrain - see RadioMobile """ names = ("msg_basestation",) def model(self): T = current.T define_table = self.define_table # --------------------------------------------------------------------- # Base Stations (Cell Towers) # tablename = "msg_basestation" define_table(tablename, self.super_link("site_id", "org_site"), Field("name", notnull=True, length=64, # Mayon Compatibility label = T("Name"), ), Field("code", length=10, # Mayon compatibility label = T("Code"), # Deployments that don't wants site codes can hide them #readable = False, #writable = False, # @ToDo: Deployment Setting to add validator to make these unique ), self.org_organisation_id( label = T("Operator"), #widget=S3OrganisationAutocompleteWidget(default_from_profile=True), requires = self.org_organisation_requires(required=True, updateable=True), ), self.gis_location_id(), s3_comments(), *s3_meta_fields()) # CRUD strings ADD_BASE = T("Create Base Station") current.response.s3.crud_strings[tablename] = Storage( label_create=T("Create Base Station"), title_display=T("Base Station Details"), title_list=T("Base Stations"), title_update=T("Edit Base Station"), title_upload=T("Import Base Stations"), title_map=T("Map of Base Stations"), label_list_button=T("List Base Stations"), label_delete_button=T("Delete Base Station"), msg_record_created=T("Base Station added"), msg_record_modified=T("Base Station updated"), msg_record_deleted=T("Base Station deleted"), msg_list_empty=T("No Base Stations currently registered")) self.configure(tablename, deduplicate = self.msg_basestation_duplicate, super_entity = "org_site", ) # --------------------------------------------------------------------- # Pass names back to global scope (s3.*) # return dict() # --------------------------------------------------------------------- @staticmethod def msg_basestation_duplicate(item): """ Import item deduplication, match by name (Adding location_id doesn't seem to be a good idea) @param item: the S3ImportItem instance """ if item.tablename == "msg_basestation": table = item.table name = "name" in item.data and item.data.name query = (table.name.lower() == name.lower()) #location_id = None # if "location_id" in item.data: # location_id = item.data.location_id ## This doesn't find deleted records: # query = query & (table.location_id == location_id) duplicate = current.db(query).select(table.id, limitby=(0, 1)).first() # if duplicate is None and location_id: ## Search for deleted basestations with this name # query = (table.name.lower() == name.lower()) & \ # (table.deleted == True) # row = db(query).select(table.id, table.deleted_fk, # limitby=(0, 1)).first() # if row: # fkeys = json.loads(row.deleted_fk) # if "location_id" in fkeys and \ # str(fkeys["location_id"]) == str(location_id): # duplicate = row if duplicate: item.id = duplicate.id item.method = item.METHOD.UPDATE # END =========================================================================
mit
CodeDJ/qt5-hidpi
qt/qtwebkit/Tools/QueueStatusServer/handlers/statusbubble_unittest.py
128
2482
# Copyright (C) 2010 Google, Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Research in Motion Ltd. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from handlers.statusbubble import StatusBubble from model.queues import Queue class MockAttachment(object): def __init__(self): self.id = 1 def status_for_queue(self, queue): return None def position_in_queue(self, queue): return 1 class StatusBubbleTest(unittest.TestCase): def test_build_bubble(self): bubble = StatusBubble() queue = Queue("mac-ews") attachment = MockAttachment() bubble_dict = bubble._build_bubble(queue, attachment, 1) # FIXME: assertDictEqual (in Python 2.7) would be better to use here. self.assertEqual(bubble_dict["name"], "mac") self.assertEqual(bubble_dict["attachment_id"], 1) self.assertEqual(bubble_dict["queue_position"], 1) self.assertEqual(bubble_dict["state"], "none") self.assertEqual(bubble_dict["status"], None) if __name__ == '__main__': unittest.main()
lgpl-2.1
LukeHoersten/ansible-modules-core
cloud/amazon/iam_cert.py
102
11642
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: iam_cert short_description: Manage server certificates for use on ELBs and CloudFront description: - Allows for the management of server certificates version_added: "2.0" options: name: description: - Name of certificate to add, update or remove. required: true aliases: [] new_name: description: - When present, this will update the name of the cert with the value passed here. required: false aliases: [] new_path: description: - When present, this will update the path of the cert with the value passed here. required: false aliases: [] state: description: - Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified. required: true default: null choices: [ "present", "absent" ] aliases: [] path: description: - When creating or updating, specify the desired path of the certificate required: false default: "/" aliases: [] cert_chain: description: - The path to the CA certificate chain in PEM encoded format. required: false default: null aliases: [] cert: description: - The path to the certificate body in PEM encoded format. required: false aliases: [] key: description: - The path to the private key of the certificate in PEM encoded format. dup_ok: description: - By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique. required: false default: False aliases: [] aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false default: null aliases: [ 'ec2_secret_key', 'secret_key' ] aws_access_key: description: - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: null aliases: [ 'ec2_access_key', 'access_key' ] requirements: [ "boto" ] author: Jonathan I. Davila extends_documentation_fragment: aws ''' EXAMPLES = ''' # Basic server certificate upload tasks: - name: Upload Certifcate iam_cert: name: very_ssl state: present cert: somecert.pem key: privcertkey cert_chain: myverytrustedchain ''' import json import sys try: import boto import boto.iam HAS_BOTO = True except ImportError: HAS_BOTO = False def boto_exception(err): '''generic error message handler''' if hasattr(err, 'error_message'): error = err.error_message elif hasattr(err, 'message'): error = err.message else: error = '%s: %s' % (Exception, err) return error def cert_meta(iam, name): opath = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ server_certificate_metadata.\ path ocert = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ certificate_body ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ server_certificate_metadata.\ server_certificate_id upload_date = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ server_certificate_metadata.\ upload_date exp = iam.get_server_certificate(name).get_server_certificate_result.\ server_certificate.\ server_certificate_metadata.\ expiration return opath, ocert, ocert_id, upload_date, exp def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok): update=False if any(ct in orig_cert_names for ct in [name, new_name]): for i_name in [name, new_name]: if i_name is None: continue if cert is not None: try: c_index=orig_cert_names.index(i_name) except NameError: continue else: if orig_cert_bodies[c_index] == cert: update=True break elif orig_cert_bodies[c_index] != cert: module.fail_json(changed=False, msg='A cert with the name %s already exists and' ' has a different certificate body associated' ' with it. Certifcates cannot have the same name') else: update=True break elif cert in orig_cert_bodies and not dup_ok: for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies): if crt_body == cert: module.fail_json(changed=False, msg='This certificate already' ' exists under the name %s' % crt_name) return update def cert_action(module, iam, name, cpath, new_name, new_path, state, cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok): if state == 'present': update = dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok) if update: opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) changed=True if new_name and new_path: iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path) module.exit_json(changed=changed, original_name=name, new_name=new_name, original_path=opath, new_path=new_path, cert_body=ocert, upload_date=upload_date, expiration_date=exp) elif new_name and not new_path: iam.update_server_cert(name, new_cert_name=new_name) module.exit_json(changed=changed, original_name=name, new_name=new_name, cert_path=opath, cert_body=ocert, upload_date=upload_date, expiration_date=exp) elif not new_name and new_path: iam.update_server_cert(name, new_path=new_path) module.exit_json(changed=changed, name=new_name, original_path=opath, new_path=new_path, cert_body=ocert, upload_date=upload_date, expiration_date=exp) else: changed=False module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, upload_date=upload_date, expiration_date=exp, msg='No new path or name specified. No changes made') else: changed=True iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath) opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, upload_date=upload_date, expiration_date=exp) elif state == 'absent': if name in orig_cert_names: changed=True iam.delete_server_cert(name) module.exit_json(changed=changed, deleted_cert=name) else: changed=False module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name) def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict( default=None, required=True, choices=['present', 'absent']), name=dict(default=None, required=False), cert=dict(default=None, required=False), key=dict(default=None, required=False), cert_chain=dict(default=None, required=False), new_name=dict(default=None, required=False), path=dict(default='/', required=False), new_path=dict(default=None, required=False), dup_ok=dict(default=False, required=False, choices=[False, True]) ) ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[], ) if not HAS_BOTO: module.fail_json(msg="Boto is required for this module") ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) try: iam = boto.iam.connection.IAMConnection( aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, ) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) state = module.params.get('state') name = module.params.get('name') path = module.params.get('path') new_name = module.params.get('new_name') new_path = module.params.get('new_path') cert_chain = module.params.get('cert_chain') dup_ok = module.params.get('dup_ok') if state == 'present': cert = open(module.params.get('cert'), 'r').read().rstrip() key = open(module.params.get('key'), 'r').read().rstrip() if cert_chain is not None: cert_chain = open(module.params.get('cert_chain'), 'r').read() else: key=cert=chain=None orig_certs = [ctb['server_certificate_name'] for ctb in \ iam.get_all_server_certs().\ list_server_certificates_result.\ server_certificate_metadata_list] orig_bodies = [iam.get_server_certificate(thing).\ get_server_certificate_result.\ certificate_body \ for thing in orig_certs] if new_name == name: new_name = None if new_path == path: new_path = None changed = False try: cert_action(module, iam, name, path, new_name, new_path, state, cert, key, cert_chain, orig_certs, orig_bodies, dup_ok) except boto.exception.BotoServerError, err: module.fail_json(changed=changed, msg=str(err), debug=[cert,key]) from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
gpl-3.0
jkyeung/XlsxWriter
xlsxwriter/test/comparison/test_chart_combined03.py
1
1649
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'chart_combined03.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {'xl/charts/chart1.xml': ['<c:dispBlanksAs']} def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart1 = workbook.add_chart({'type': 'column'}) chart2 = workbook.add_chart({'type': 'line'}) data = [ [2, 7, 3, 6, 2], [20, 25, 10, 10, 20], [4, 2, 5, 2, 1], ] worksheet.write_column('A1', data[0]) worksheet.write_column('B1', data[1]) worksheet.write_column('C1', data[2]) chart1.add_series({'values': '=Sheet1!$A$1:$A$5'}) chart1.add_series({'values': '=Sheet1!$B$1:$B$5'}) chart2.add_series({'values': '=Sheet1!$C$1:$C$5'}) chart1.combine(chart2) worksheet.insert_chart('E9', chart1) workbook.close() self.assertExcelEqual()
bsd-2-clause
Qinusty/rethinkdb
test/common/unit.py
7
2157
#!/user/bin/env python # Copyright 2014-2015 RethinkDB, all rights reserved. import collections, os, subprocess, sys import test_framework, utils class AllUnitTests(test_framework.Test): def __init__(self, filters=[]): super(AllUnitTests, self).__init__() self.filters = filters self.configured = False self.tests = None def filter(self, filter): return AllUnitTests(self.filters + [filter]) def configure(self, conf): unit_executable = os.path.join(conf['BUILD_DIR'], "rethinkdb-unittest") if not os.access(unit_executable, os.X_OK): sys.stderr.write('Warning: no useable rethinkdb-unittest executable at: %s\n' % unit_executable) return test_framework.TestTree() output = subprocess.check_output([unit_executable, "--gtest_list_tests"]) key = None dict = collections.defaultdict(list) for line in output.split("\n"): if not line: continue elif line[-1] == '.': key = line[:-1] else: dict[key].append(line.strip()) tests = test_framework.TestTree( (group, UnitTest(unit_executable, group, tests)) for group, tests in dict.items()) for filter in self.filters: tests = tests.filter(filter) return tests class UnitTest(test_framework.Test): def __init__(self, unit_executable, test, child_tests=[]): super(UnitTest, self).__init__() self.unit_executable = unit_executable self.test = test self.child_tests = child_tests def run(self): filter = self.test if self.child_tests: filter = filter + ".*" subprocess.check_call([self.unit_executable, "--gtest_filter=" + filter]) def filter(self, filter): if filter.all_same() or not self.child_tests: return self if filter.match() else None tests = test_framework.TestTree(( (child, UnitTest(self.unit_executable, self.test + "." + child)) for child in self.child_tests)) return tests.filter(filter)
agpl-3.0
coddingtonbear/d-rats
d_rats/mapdisplay.py
3
52155
#!/usr/bin/python import os import math import urllib import time import random import shutil import tempfile import threading import copy import gtk import gobject import mainapp import platform import miscwidgets import inputdialog import utils import geocode_ui import map_sources import map_source_editor import signals from ui.main_common import ask_for_confirmation from gps import GPSPosition, distance, value_with_units, DPRS_TO_APRS CROSSHAIR = "+" COLORS = ["red", "green", "cornflower blue", "pink", "orange", "grey"] BASE_DIR = None def set_base_dir(basedir): global BASE_DIR BASE_DIR = basedir CONFIG = None CONNECTED = True MAX_TILE_LIFE = 0 PROXY = None def set_connected(connected): global CONNECTED CONNECTED = connected def set_tile_lifetime(lifetime): global MAX_TILE_LIFE MAX_TILE_LIFE = lifetime def set_proxy(proxy): global PROXY PROXY = proxy def fetch_url(url, local): global CONNECTED global PROXY if not CONNECTED: raise Exception("Not connected") if PROXY: proxies = {"http" : PROXY} else: proxies = None data = urllib.urlopen(url, proxies=proxies) local_file = file(local, "wb") d = data.read() local_file.write(d) data.close() local_file.close() class MarkerEditDialog(inputdialog.FieldDialog): def __init__(self): inputdialog.FieldDialog.__init__(self, title=_("Add Marker")) self.icons = [] for sym in sorted(DPRS_TO_APRS.values()): icon = utils.get_icon(sym) if icon: self.icons.append((icon, sym)) self.add_field(_("Group"), miscwidgets.make_choice([], True)) self.add_field(_("Name"), gtk.Entry()) self.add_field(_("Latitude"), miscwidgets.LatLonEntry()) self.add_field(_("Longitude"), miscwidgets.LatLonEntry()) self.add_field(_("Lookup"), gtk.Button("By Address")) self.add_field(_("Comment"), gtk.Entry()) self.add_field(_("Icon"), miscwidgets.make_pixbuf_choice(self.icons)) self._point = None def set_groups(self, groups, group=None): grpsel = self.get_field(_("Group")) for grp in groups: grpsel.append_text(grp) if group is not None: grpsel.child.set_text(group) grpsel.set_sensitive(False) else: grpsel.child.set_text(_("Misc")) def get_group(self): return self.get_field(_("Group")).child.get_text() def set_point(self, point): self.get_field(_("Name")).set_text(point.get_name()) self.get_field(_("Latitude")).set_text("%.4f" % point.get_latitude()) self.get_field(_("Longitude")).set_text("%.4f" % point.get_longitude()) self.get_field(_("Comment")).set_text(point.get_comment()) iconsel = self.get_field(_("Icon")) if isinstance(point, map_sources.MapStation): symlist = [y for x,y in self.icons] try: iidx = symlist.index(point.get_aprs_symbol()) iconsel.set_active(iidx) except ValueError: print "No such symbol `%s'" % point.get_aprs_symbol() else: iconsel.set_sensitive(False) self._point = point def get_point(self): name = self.get_field(_("Name")).get_text() lat = self.get_field(_("Latitude")).value() lon = self.get_field(_("Longitude")).value() comment = self.get_field(_("Comment")).get_text() idx = self.get_field(_("Icon")).get_active() self._point.set_name(name) self._point.set_latitude(lat) self._point.set_longitude(lon) self._point.set_comment(comment) if isinstance(self._point, map_sources.MapStation): self._point.set_icon_from_aprs_sym(self.icons[idx][1]) return self._point # These functions taken from: # http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames def deg2num(lat_deg, lon_deg, zoom): lat_rad = lat_deg * math.pi / 180.0 n = 2.0 ** zoom xtile = int((lon_deg + 180.0) / 360.0 * n) ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n) return(xtile, ytile) def num2deg(xtile, ytile, zoom): n = 2.0 ** zoom lon_deg = xtile / n * 360.0 - 180.0 lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n))) lat_deg = lat_rad * 180.0 / math.pi return(lat_deg, lon_deg) class MapTile(object): def path_els(self): return deg2num(self.lat, self.lon, self.zoom) def tile_edges(self): n, w = num2deg(self.x, self.y, self.zoom) s, e = num2deg(self.x+1, self.y+1, self.zoom) return (s, w, n, e) def lat_range(self): s, w, n, e = self.tile_edges() return (n, s) def lon_range(self): s, w, n, e = self.tile_edges() return (w, e) def path(self): return "%d/%d/%d.png" % (self.zoom, self.x, self.y) def _local_path(self): path = os.path.join(self.dir, self.path()) if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) return path def is_local(self): if MAX_TILE_LIFE == 0 or not CONNECTED: return os.path.exists(self._local_path()) else: try: ts = os.stat(self._local_path()).st_mtime return (time.time() - ts) < MAX_TILE_LIFE except OSError: return False def fetch(self): if not self.is_local(): for i in range(10): url = self.remote_path() try: fetch_url(url, self._local_path()) return True except Exception, e: print "[%i] Failed to fetch `%s': %s" % (i, url, e) return False else: return True def _thread(self, cb, *args): if self.fetch(): fname = self._local_path() else: fname = None gobject.idle_add(cb, fname, *args) def threaded_fetch(self, cb, *args): _args = (cb,) + args t = threading.Thread(target=self._thread, args=_args) t.setDaemon(True) t.start() def local_path(self): path = self._local_path() self.fetch() return path def remote_path(self): return "http://tile.openstreetmap.org/%s" % (self.path()) def __add__(self, count): (x, y) = count return MapTile(self.x+x, self.y+y, self.zoom) def __sub__(self, tile): return (self.x - tile.x, self.y - tile.y) def __contains__(self, point): (lat, lon) = point # FIXME for non-western! (lat_max, lat_min) = self.lat_range() (lon_min, lon_max) = self.lon_range() lat_match = (lat < lat_max and lat > lat_min) lon_match = (lon < lon_max and lon > lon_min) return lat_match and lon_match def __init__(self, lat, lon, zoom): self.zoom = zoom if isinstance(lat, int) and isinstance(lon, int): self.x = lat self.y = lon self.lat, self.lon = num2deg(self.x, self.y, self.zoom) else: self.lat = lat self.lon = lon self.x, self.y = deg2num(self.lat, self.lon, self.zoom) if BASE_DIR: self.dir = BASE_DIR else: p = platform.get_platform() self.dir = os.path.join(p.config_dir(), "maps") if not os.path.isdir(self.dir): os.mkdir(self.dir) def __str__(self): return "%.4f,%.4f (%i,%i)" % (self.lat, self.lon, self.x, self.y) class LoadContext(object): pass class MapWidget(gtk.DrawingArea): __gsignals__ = { "redraw-markers" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()), "new-tiles-loaded" : (gobject.SIGNAL_ACTION, gobject.TYPE_NONE, ()), } def draw_text_marker_at(self, x, y, text, color="yellow"): gc = self.get_style().black_gc if self.zoom < 12: size = 'size="x-small"' elif self.zoom < 14: size = 'size="small"' else: size = '' text = utils.filter_to_ascii(text) pl = self.create_pango_layout("") markup = '<span %s background="%s">%s</span>' % (size, color, text) pl.set_markup(markup) self.window.draw_layout(gc, int(x), int(y), pl) def draw_image_at(self, x, y, pb): gc = self.get_style().black_gc self.window.draw_pixbuf(gc, pb, 0, 0, int(x), int(y)) return pb.get_height() def draw_cross_marker_at(self, x, y): width = 2 cm = self.window.get_colormap() color = cm.alloc_color("red") gc = self.window.new_gc(foreground=color, line_width=width) x = int(x) y = int(y) self.window.draw_lines(gc, [(x, y-5), (x, y+5)]) self.window.draw_lines(gc, [(x-5, y), (x+5, y)]) def latlon2xy(self, lat, lon): y = 1- ((lat - self.lat_min) / (self.lat_max - self.lat_min)) x = 1- ((lon - self.lon_min) / (self.lon_max - self.lon_min)) x *= (self.tilesize * self.width) y *= (self.tilesize * self.height) y += self.lat_fudge return (x, y) def xy2latlon(self, x, y): y -= self.lat_fudge lon = 1 - (float(x) / (self.tilesize * self.width)) lat = 1 - (float(y) / (self.tilesize * self.height)) lat = (lat * (self.lat_max - self.lat_min)) + self.lat_min lon = (lon * (self.lon_max - self.lon_min)) + self.lon_min return lat, lon def draw_marker(self, label, lat, lon, img=None): color = "red" try: x, y = self.latlon2xy(lat, lon) except ZeroDivisionError: return if label == CROSSHAIR: self.draw_cross_marker_at(x, y) else: if img: y += (4 + self.draw_image_at(x, y, img)) self.draw_text_marker_at(x, y, label, color) def expose(self, area, event): if len(self.map_tiles) == 0: self.load_tiles() gc = self.get_style().black_gc self.window.draw_drawable(gc, self.pixmap, 0, 0, 0, 0, -1, -1) self.emit("redraw-markers") def calculate_bounds(self): center = MapTile(self.lat, self.lon, self.zoom) topleft = center + (-2, -2) botright = center + (2, 2) (self.lat_min, _, _, self.lon_min) = botright.tile_edges() (_, self.lon_max, self.lat_max, _) = topleft.tile_edges() # I have no idea why, but for some reason we can calculate the # longitude (x) just fine, but not the latitude (y). The result # of either latlon2xy() or tile_edges() is bad, which causes the # y calculation of a given latitude to be off by some amount. # The amount grows large at small values of zoom (zoomed out) and # starts to seriously affect the correctness of marker placement. # Until I figure out why that is, we calculate a fudge factor below. # # To do this, we ask the center tile for its NW corner's # coordinates. We then ask latlon2xy() (with fudge of zero) what # the corresponding x,y is. Since we know what the correct value # should be, we record the offset and use that to shift the y in # further calculations for this zoom level. self.lat_fudge = 0 s, w, n, e = center.tile_edges() x, y = self.latlon2xy(n, w) self.lat_fudge = ((self.height / 2) * self.tilesize) - y if False: print "------ Bounds Calculation ------" print "Center tile should be at %i,%i" % (\ (self.height/2) * self.tilesize, (self.width/2) * self.tilesize) print "We calculate it based on Lat,Lon to be %i, %i" % (x, y) print "--------------------------------" print "Latitude Fudge Factor: %i (zoom %i)" % (self.lat_fudge, self.zoom) def broken_tile(self): if self.__broken_tile: return self.__broken_tile broken = [ "48 16 3 1", " c #FFFFFFFFFFFF", "x c #FFFF00000000", "X c #000000000000", "xx xx XX X XXX ", " xx xx X X X X X ", " xx xx X X X X X ", " xx xx X X X X X ", " xx xx X X X X X ", " xx xx X X X X X ", " xx xx X XX XXX ", " xxx ", " xxx ", " xx xx XXXX XX XXXXX XX ", " xx xx X X X X X X X ", " xx xx X X X X X X X ", " xx xx X X X X X X X ", " xx xx X X XXXXXX X XXXXXX ", " xx xx X X X X X X X ", "xx xx XXXX X X X X X " ] # return gtk.gdk.pixbuf_new_from_xpm_data(broken) pm = gtk.gdk.pixmap_create_from_xpm_d(self.window, None, broken)[0] pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, self.tilesize, self.tilesize) pb.fill(0xffffffff) x = y = (self.tilesize / 2) pb.get_from_drawable(pm, pm.get_colormap(), 0, 0, x, y, -1, -1) self.__broken_tile = pb return pb def draw_tile(self, path, x, y, ctx=None): if ctx and ctx.zoom != self.zoom: # Zoom level has chnaged, so don't do anything return gc = self.pixmap.new_gc() if path: try: pb = gtk.gdk.pixbuf_new_from_file(path) except Exception, e: utils.log_exception() pb = self.broken_tile() else: pb = self.broken_tile() if ctx: ctx.loaded_tiles += 1 frac = float(ctx.loaded_tiles) / float(ctx.total_tiles) if ctx.loaded_tiles == ctx.total_tiles: self.status(0.0, "") else: self.status(frac, _("Loaded") + " %.0f%%" % (frac * 100.0)) self.pixmap.draw_pixbuf(gc, pb, 0, 0, x, y, -1, -1) self.queue_draw() @utils.run_gtk_locked def draw_tile_locked(self, *args): self.draw_tile(*args) def load_tiles(self): self.map_tiles = [] ctx = LoadContext() ctx.loaded_tiles = 0 ctx.total_tiles = self.width * self.height ctx.zoom = self.zoom center = MapTile(self.lat, self.lon, self.zoom) delta_h = self.height / 2 delta_w = self.width / 2 count = 0 total = self.width * self.height if not self.window: # Window is not loaded, thus can't load tiles return try: self.pixmap = gtk.gdk.Pixmap(self.window, self.width * self.tilesize, self.height * self.tilesize) except Exception, e: # Window is not loaded, thus can't load tiles return gc = self.pixmap.new_gc() for i in range(0, self.width): for j in range(0, self.height): tile = center + (i - delta_w, j - delta_h) if not tile.is_local(): message = _("Retrieving") else: message = _("Loading") if tile.is_local(): path = tile._local_path() self.draw_tile(tile._local_path(), self.tilesize * i, self.tilesize * j, ctx) else: self.draw_tile(None, self.tilesize * i, self.tilesize * j) tile.threaded_fetch(self.draw_tile_locked, self.tilesize * i, self.tilesize * j, ctx) self.map_tiles.append(tile) count += 1 self.calculate_bounds() self.emit("new-tiles-loaded") def export_to(self, filename, bounds=None): if not bounds: x = 0 y = 0 bounds = (0,0,-1,-1) width = self.tilesize * self.width height = self.tilesize * self.height else: x = bounds[0] y = bounds[1] width = bounds[2] - bounds[0] height = bounds[3] - bounds[1] pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, width, height) pb.get_from_drawable(self.pixmap, self.pixmap.get_colormap(), x, y, 0, 0, width, height) pb.save(filename, "png") def __init__(self, width, height, tilesize=256, status=None): gtk.DrawingArea.__init__(self) self.__broken_tile = None self.height = height self.width = width self.tilesize = tilesize self.status = status self.lat = 0 self.lon = 0 self.zoom = 1 self.lat_max = self.lat_min = 0 self.lon_max = self.lon_min = 0 self.map_tiles = [] self.set_size_request(self.tilesize * self.width, self.tilesize * self.height) self.connect("expose-event", self.expose) def set_center(self, lat, lon): self.lat = lat self.lon = lon self.map_tiles = [] self.queue_draw() def get_center(self): return (self.lat, self.lon) def set_zoom(self, zoom): if zoom > 17 or zoom == 1: return self.zoom = zoom self.map_tiles = [] self.queue_draw() def get_zoom(self): return self.zoom def scale(self, x, y, pixels=128): shift = 15 tick = 5 #rect = gtk.gdk.Rectangle(x-pixels,y-shift-tick,x,y) #self.window.invalidate_rect(rect, True) (lat_a, lon_a) = self.xy2latlon(self.tilesize, self.tilesize) (lat_b, lon_b) = self.xy2latlon(self.tilesize * 2, self.tilesize) # width of one tile d = distance(lat_a, lon_a, lat_b, lon_b) * (float(pixels) / self.tilesize) dist = value_with_units(d) color = self.window.get_colormap().alloc_color("black") gc = self.window.new_gc(line_width=1, foreground=color) self.window.draw_line(gc, x-pixels, y-shift, x, y-shift) self.window.draw_line(gc, x-pixels, y-shift, x-pixels, y-shift-tick) self.window.draw_line(gc, x, y-shift, x, y-shift-tick) self.window.draw_line(gc, x-(pixels/2), y-shift, x-(pixels/2), y-shift-tick) pl = self.create_pango_layout("") pl.set_markup("%s" % dist) self.window.draw_layout(gc, x-pixels, y-shift, pl) def point_is_visible(self, lat, lon): for i in self.map_tiles: if (lat, lon) in i: return True return False class MapWindow(gtk.Window): __gsignals__ = { "reload-sources" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()), "user-send-chat" : signals.USER_SEND_CHAT, "get-station-list" : signals.GET_STATION_LIST, } _signals = {"user-send-chat" : None, "get-station-list" : None, } def zoom(self, widget, frame): adj = widget.get_adjustment() self.map.set_zoom(int(adj.value)) frame.set_label(_("Zoom") + " (%i)" % int(adj.value)) def make_zoom_controls(self): box = gtk.HBox(False, 3) box.set_border_width(3) box.show() l = gtk.Label(_("Min")) l.show() box.pack_start(l, 0,0,0) adj = gtk.Adjustment(value=14, lower=2, upper=17, step_incr=1, page_incr=3) sb = gtk.HScrollbar(adj) sb.show() box.pack_start(sb, 1,1,1) l = gtk.Label(_("Max")) l.show() box.pack_start(l, 0,0,0) frame = gtk.Frame(_("Zoom")) frame.set_label_align(0.5, 0.5) frame.set_size_request(150, 50) frame.show() frame.add(box) sb.connect("value-changed", self.zoom, frame) return frame def toggle_show(self, group, *vals): if group: station = vals[1] else: group = vals[1] station = None for src in self.map_sources: if group != src.get_name(): continue if station: try: point = src.get_point_by_name(station) except KeyError: continue point.set_visible(vals[0]) self.add_point_visible(point) else: src.set_visible(vals[0]) for point in src.get_points(): point.set_visible(vals[0]) self.update_point(src, point) src.save() break self.map.queue_draw() def marker_mh(self, _action, id, group): action = _action.get_name() if action == "delete": print "Deleting %s/%s" % (group, id) for source in self.map_sources: if source.get_name() == group: if not source.get_mutable(): return point = source.get_point_by_name(id) source.del_point(point) source.save() elif action == "edit": for source in self.map_sources: if source.get_name() == group: break if not source.get_mutable(): return if not source: return for point in source.get_points(): if point.get_name() == id: break if not point: return _point = point.dup() upoint, foo = self.prompt_to_set_marker(point, source.get_name()) if upoint: self.del_point(source, _point) self.add_point(source, upoint) source.save() def _make_marker_menu(self, store, iter): menu_xml = """ <ui> <popup name="menu"> <menuitem action="edit"/> <menuitem action="delete"/> <menuitem action="center"/> </popup> </ui> """ ag = gtk.ActionGroup("menu") try: id, = store.get(iter, 1) group, = store.get(store.iter_parent(iter), 1) except TypeError: id = group = None edit = gtk.Action("edit", _("Edit"), None, None) edit.connect("activate", self.marker_mh, id, group) if not id: edit.set_sensitive(False) ag.add_action(edit) delete = gtk.Action("delete", _("Delete"), None, None) delete.connect("activate", self.marker_mh, id, group) ag.add_action(delete) center = gtk.Action("center", _("Center on this"), None, None) center.connect("activate", self.marker_mh, id, group) # This isn't implemented right now, because I'm lazy center.set_sensitive(False) ag.add_action(center) uim = gtk.UIManager() uim.insert_action_group(ag, 0) uim.add_ui_from_string(menu_xml) return uim.get_widget("/menu") def make_marker_popup(self, _, view, event): if event.button != 3: return if event.window == view.get_bin_window(): x, y = event.get_coords() pathinfo = view.get_path_at_pos(int(x), int(y)) if pathinfo is None: return else: view.set_cursor_on_cell(pathinfo[0]) (store, iter) = view.get_selection().get_selected() menu = self._make_marker_menu(store, iter) if menu: menu.popup(None, None, None, event.button, event.time) def make_marker_list(self): cols = [(gobject.TYPE_BOOLEAN, _("Show")), (gobject.TYPE_STRING, _("Station")), (gobject.TYPE_FLOAT, _("Latitude")), (gobject.TYPE_FLOAT, _("Longitude")), (gobject.TYPE_FLOAT, _("Distance")), (gobject.TYPE_FLOAT, _("Direction")), ] self.marker_list = miscwidgets.TreeWidget(cols, 1, parent=False) self.marker_list.toggle_cb.append(self.toggle_show) self.marker_list.connect("click-on-list", self.make_marker_popup) self.marker_list._view.connect("row-activated", self.recenter_cb) def render_station(col, rend, model, iter): parent = model.iter_parent(iter) if not parent: parent = iter group = model.get_value(parent, 1) if self.colors.has_key(group): rend.set_property("foreground", self.colors[group]) c = self.marker_list._view.get_column(1) c.set_expand(True) c.set_min_width(150) r = c.get_cell_renderers()[0] c.set_cell_data_func(r, render_station) def render_coord(col, rend, model, iter, cnum): if model.iter_parent(iter): rend.set_property('text', "%.4f" % model.get_value(iter, cnum)) else: rend.set_property('text', '') for col in [2, 3]: c = self.marker_list._view.get_column(col) r = c.get_cell_renderers()[0] c.set_cell_data_func(r, render_coord, col) def render_dist(col, rend, model, iter, cnum): if model.iter_parent(iter): rend.set_property('text', "%.2f" % model.get_value(iter, cnum)) else: rend.set_property('text', '') for col in [4, 5]: c = self.marker_list._view.get_column(col) r = c.get_cell_renderers()[0] c.set_cell_data_func(r, render_dist, col) sw = gtk.ScrolledWindow() sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) sw.add(self.marker_list.packable()) sw.set_size_request(-1, 150) sw.show() return sw def refresh_marker_list(self, group=None): (lat, lon) = self.map.get_center() center = GPSPosition(lat=lat, lon=lon) for item in self.marker_list.get_values(group): try: _parent, children = item except ValueError: # Empty group continue parent = _parent[1] for child in children: this = GPSPosition(lat=child[2], lon=child[3]) dist = center.distance_from(this) bear = center.bearing_to(this) self.marker_list.set_item(parent, child[0], child[1], child[2], child[3], dist, bear) def make_track(self): def toggle(cb, mw): mw.tracking_enabled = cb.get_active() cb = gtk.CheckButton(_("Track center")) cb.connect("toggled", toggle, self) cb.show() return cb def clear_map_cache(self): d = gtk.MessageDialog(buttons=gtk.BUTTONS_YES_NO) d.set_property("text", _("Are you sure you want to clear your map cache?")) r = d.run() d.destroy() if r == gtk.RESPONSE_YES: dir = os.path.join(platform.get_platform().config_dir(), "maps") shutil.rmtree(dir, True) self.map.queue_draw() def printable_map(self, bounds=None): p = platform.get_platform() f = tempfile.NamedTemporaryFile() fn = f.name f.close() mf = "%s.png" % fn hf = "%s.html" % fn ts = time.strftime("%H:%M:%S %d-%b-%Y") station_map = _("Station map") generated_at = _("Generated at") html = """ <html> <body> <h2>D-RATS %s</h2> <h5>%s %s</h5> <img src="file://%s"/> </body> </html> """ % (station_map, generated_at, ts, mf) self.map.export_to(mf, bounds) f = file(hf, "w") f.write(html) f.close() p.open_html_file(hf) def save_map(self, bounds=None): p = platform.get_platform() f = p.gui_save_file(default_name="map_%s.png" % \ time.strftime("%m%d%Y%_H%M%S")) if not f: return if not f.endswith(".png"): f += ".png" self.map.export_to(f, bounds) def get_visible_bounds(self): ha = self.sw.get_hadjustment() va = self.sw.get_vadjustment() return (int(ha.value), int(va.value), int(ha.value + ha.page_size), int(va.value + va.page_size)) def mh(self, _action): action = _action.get_name() if action == "refresh": self.map_tiles = [] self.map.queue_draw() elif action == "clearcache": self.clear_map_cache() elif action == "save": self.save_map() elif action == "savevis": self.save_map(self.get_visible_bounds()) elif action == "printable": self.printable_map() elif action == "printablevis": self.printable_map(self.get_visible_bounds()) elif action == "editsources": srced = map_source_editor.MapSourcesEditor(self.config) srced.run() srced.destroy() self.emit("reload-sources") def make_menu(self): menu_xml = """ <ui> <menubar name="MenuBar"> <menu action="map"> <menuitem action="refresh"/> <menuitem action="clearcache"/> <menuitem action="editsources"/> <menu action="export"> <menuitem action="printable"/> <menuitem action="printablevis"/> <menuitem action="save"/> <menuitem action="savevis"/> </menu> </menu> </menubar> </ui> """ actions = [('map', None, "_" + _("Map"), None, None, self.mh), ('refresh', None, "_" + _("Refresh"), None, None, self.mh), ('clearcache', None, "_" + _("Clear Cache"), None, None, self.mh), ('editsources', None, _("Edit Sources"), None, None, self.mh), ('export', None, "_" + _("Export"), None, None, self.mh), ('printable', None, "_" + _("Printable"), "<Control>p", None, self.mh), ('printablevis', None, _("Printable (visible area)"), "<Control><Alt>P", None, self.mh), ('save', None, "_" + _("Save Image"), "<Control>s", None, self.mh), ('savevis', None, _('Save Image (visible area)'), "<Control><Alt>S", None, self.mh), ] uim = gtk.UIManager() self.menu_ag = gtk.ActionGroup("MenuBar") self.menu_ag.add_actions(actions) uim.insert_action_group(self.menu_ag, 0) menuid = uim.add_ui_from_string(menu_xml) self._accel_group = uim.get_accel_group() return uim.get_widget("/MenuBar") def make_controls(self): vbox = gtk.VBox(False, 2) vbox.pack_start(self.make_zoom_controls(), 0,0,0) vbox.pack_start(self.make_track(), 0,0,0) vbox.show() return vbox def make_bottom_pane(self): box = gtk.HBox(False, 2) box.pack_start(self.make_marker_list(), 1,1,1) box.pack_start(self.make_controls(), 0,0,0) box.show() return box def scroll_to_center(self, widget): a = widget.get_vadjustment() a.set_value((a.upper - a.page_size) / 2) a = widget.get_hadjustment() a.set_value((a.upper - a.page_size) / 2) def center_on(self, lat, lon): ha = self.sw.get_hadjustment() va = self.sw.get_vadjustment() x, y = self.map.latlon2xy(lat, lon) ha.set_value(x - (ha.page_size / 2)) va.set_value(y - (va.page_size / 2)) def status(self, frac, message): self.sb_prog.set_text(message) self.sb_prog.set_fraction(frac) def recenter(self, lat, lon): self.map.set_center(lat, lon) self.map.load_tiles() self.refresh_marker_list() self.center_on(lat, lon) self.map.queue_draw() def refresh(self): self.map.load_tiles() def prompt_to_set_marker(self, point, group=None): def do_address(button, latw, lonw, namew): dlg = geocode_ui.AddressAssistant() r = dlg.run() if r == gtk.RESPONSE_OK: if not namew.get_text(): namew.set_text(dlg.place) latw.set_text("%.5f" % dlg.lat) lonw.set_text("%.5f" % dlg.lon) d = MarkerEditDialog() sources = [] for src in self.map_sources: if src.get_mutable(): sources.append(src.get_name()) d.set_groups(sources, group) d.set_point(point) r = d.run() if r == gtk.RESPONSE_OK: point = d.get_point() group = d.get_group() d.destroy() if r == gtk.RESPONSE_OK: return point, group else: return None, None def prompt_to_send_loc(self, _lat, _lon): d = inputdialog.FieldDialog(title=_("Broadcast Location")) d.add_field(_("Callsign"), gtk.Entry(8)) d.add_field(_("Description"), gtk.Entry(20)) d.add_field(_("Latitude"), miscwidgets.LatLonEntry()) d.add_field(_("Longitude"), miscwidgets.LatLonEntry()) d.get_field(_("Latitude")).set_text("%.4f" % _lat) d.get_field(_("Longitude")).set_text("%.4f" % _lon) while d.run() == gtk.RESPONSE_OK: try: call = d.get_field(_("Callsign")).get_text() desc = d.get_field(_("Description")).get_text() lat = d.get_field(_("Latitude")).get_text() lon = d.get_field(_("Longitude")).get_text() fix = GPSPosition(lat=lat, lon=lon, station=call) fix.comment = desc for port in self.emit("get-station-list").keys(): self.emit("user-send-chat", "CQCQCQ", port, fix.to_NMEA_GGA(), True) break except Exception, e: utils.log_exception() ed = gtk.MessageDialog(buttons=gtk.BUTTONS_OK, parent=d) ed.set_property("text", _("Invalid value") + ": %s" % e) ed.run() ed.destroy() d.destroy() def recenter_cb(self, view, path, column, data=None): model = view.get_model() if model.iter_parent(model.get_iter(path)) == None: return items = self.marker_list.get_selected() self.center_mark = items[1] self.recenter(items[2], items[3]) self.sb_center.pop(self.STATUS_CENTER) self.sb_center.push(self.STATUS_CENTER, _("Center") + ": %s" % self.center_mark) def make_popup(self, vals): def _an(cap): return cap.replace(" ", "_") xml = "" for action in [_an(x) for x in self._popup_items.keys()]: xml += "<menuitem action='%s'/>\n" % action xml = """ <ui> <popup name="menu"> <menuitem action='title'/> <separator/> %s </popup> </ui> """ % xml ag = gtk.ActionGroup("menu") t = gtk.Action("title", "%.4f,%.4f" % (vals["lat"], vals["lon"]), None, None) t.set_sensitive(False) ag.add_action(t) for name, handler in self._popup_items.items(): action = gtk.Action(_an(name), name, None, None) action.connect("activate", handler, vals) ag.add_action(action) uim = gtk.UIManager() uim.insert_action_group(ag, 0) uim.add_ui_from_string(xml) return uim.get_widget("/menu") def mouse_click_event(self, widget, event): x,y = event.get_coords() ha = widget.get_hadjustment() va = widget.get_vadjustment() mx = x + int(ha.get_value()) my = y + int(va.get_value()) lat, lon = self.map.xy2latlon(mx, my) print "Button %i at %i,%i" % (event.button, mx, my) if event.button == 3: vals = { "lat" : lat, "lon" : lon, "x" : mx, "y" : my } menu = self.make_popup(vals) if menu: menu.popup(None, None, None, event.button, event.time) elif event.type == gtk.gdk.BUTTON_PRESS: print "Clicked: %.4f,%.4f" % (lat, lon) # The crosshair marker has been missing since 0.3.0 #self.set_marker(GPSPosition(station=CROSSHAIR, # lat=lat, lon=lon)) elif event.type == gtk.gdk._2BUTTON_PRESS: print "Recenter on %.4f, %.4f" % (lat,lon) self.recenter(lat, lon) def mouse_move_event(self, widget, event): if not self.__last_motion: gobject.timeout_add(100, self._mouse_motion_handler) self.__last_motion = (time.time(), event.x, event.y) def _mouse_motion_handler(self): if self.__last_motion == None: return False t, x, y = self.__last_motion if (time.time() - t) < 0.5: self.info_window.hide() return True lat, lon = self.map.xy2latlon(x, y) ha = self.sw.get_hadjustment() va = self.sw.get_vadjustment() mx = x - int(ha.get_value()) my = y - int(va.get_value()) hit = False for source in self.map_sources: if not source.get_visible(): continue for point in source.get_points(): if not point.get_visible(): continue try: _x, _y = self.map.latlon2xy(point.get_latitude(), point.get_longitude()) except ZeroDivisionError: continue dx = abs(x - _x) dy = abs(y - _y) if dx < 20 and dy < 20: hit = True date = time.ctime(point.get_timestamp()) text = "<b>Station:</b> %s" % point.get_name() + \ "\n<b>Latitude:</b> %.5f" % point.get_latitude() + \ "\n<b>Longitude:</b> %.5f"% point.get_longitude() + \ "\n<b>Last update:</b> %s" % date text += "\n<b>Info</b>: %s" % point.get_comment() label = gtk.Label() label.set_markup(text) label.show() for child in self.info_window.get_children(): self.info_window.remove(child) self.info_window.add(label) posx, posy = self.get_position() posx += mx + 10 posy += my - 10 self.info_window.move(int(posx), int(posy)) self.info_window.show() break if not hit: self.info_window.hide() self.sb_coords.pop(self.STATUS_COORD) self.sb_coords.push(self.STATUS_COORD, "%.4f, %.4f" % (lat, lon)) self.__last_motion = None return False def ev_destroy(self, widget, data=None): self.hide() return True def ev_delete(self, widget, event, data=None): self.hide() return True def update_gps_status(self, string): self.sb_gps.pop(self.STATUS_GPS) self.sb_gps.push(self.STATUS_GPS, string) def add_point_visible(self, point): if point in self.points_visible: self.points_visible.remove(point) if self.map.point_is_visible(point.get_latitude(), point.get_longitude()): if point.get_visible(): self.points_visible.append(point) return True else: return False else: return False def update_point(self, source, point): (lat, lon) = self.map.get_center() center = GPSPosition(*self.map.get_center()) this = GPSPosition(point.get_latitude(), point.get_longitude()) try: self.marker_list.set_item(source.get_name(), point.get_visible(), point.get_name(), point.get_latitude(), point.get_longitude(), center.distance_from(this), center.bearing_to(this)) except Exception, e: if str(e) == "Item not found": # this is evil print "Adding point instead of updating" return self.add_point(source, point) self.add_point_visible(point) self.map.queue_draw() def add_point(self, source, point): (lat, lon) = self.map.get_center() center = GPSPosition(*self.map.get_center()) this = GPSPosition(point.get_latitude(), point.get_longitude()) self.marker_list.add_item(source.get_name(), point.get_visible(), point.get_name(), point.get_latitude(), point.get_longitude(), center.distance_from(this), center.bearing_to(this)) self.add_point_visible(point) self.map.queue_draw() def del_point(self, source, point): self.marker_list.del_item(source.get_name(), point.get_name()) if point in self.points_visible: self.points_visible.remove(point) self.map.queue_draw() def get_map_source(self, name): for source in self.get_map_sources(): if source.get_name() == name: return source return None def add_map_source(self, source): self.map_sources.append(source) self.marker_list.add_item(None, source.get_visible(), source.get_name(), 0, 0, 0, 0) for point in source.get_points(): self.add_point(source, point) #source.connect("point-updated", self.update_point) source.connect("point-added", self.add_point) source.connect("point-deleted", self.del_point) source.connect("point-updated", self.maybe_recenter_on_updated_point) def update_points_visible(self): for src in self.map_sources: for point in src.get_points(): self.update_point(src, point) self.map.queue_draw() def maybe_recenter_on_updated_point(self, source, point): if point.get_name() == self.center_mark and \ self.tracking_enabled: print "Center updated" self.recenter(point.get_latitude(), point.get_longitude()) self.update_point(source, point) def clear_map_sources(self): self.marker_list.clear() self.map_sources = [] self.points_visible = [] self.update_points_visible() def get_map_sources(self): return self.map_sources def redraw_markers(self, map): for point in self.points_visible: map.draw_marker(point.get_name(), point.get_latitude(), point.get_longitude(), point.get_icon()) def __init__(self, config, *args): gtk.Window.__init__(self, *args) self.config = config self.STATUS_COORD = 0 self.STATUS_CENTER = 1 self.STATUS_GPS = 2 self.center_mark = None self.tracking_enabled = False tiles = 5 self.points_visible = [] self.map_sources = [] self.map = MapWidget(tiles, tiles, status=self.status) self.map.show() self.map.connect("redraw-markers", self.redraw_markers) self.map.connect("new-tiles-loaded", lambda m: self.update_points_visible()) box = gtk.VBox(False, 2) self.menubar = self.make_menu() self.menubar.show() box.pack_start(self.menubar, 0,0,0) self.add_accel_group(self._accel_group) self.sw = gtk.ScrolledWindow() self.sw.add_with_viewport(self.map) self.sw.show() def pre_scale(sw, event, mw): ha = mw.sw.get_hadjustment() va = mw.sw.get_vadjustment() px = ha.get_value() + ha.page_size py = va.get_value() + va.page_size rect = gtk.gdk.Rectangle(int(ha.get_value()), int(va.get_value()), int(py), int(py)) mw.map.window.invalidate_rect(rect, True) @utils.run_gtk_locked def _scale(sw, event, mw): ha = mw.sw.get_hadjustment() va = mw.sw.get_vadjustment() px = ha.get_value() + ha.page_size py = va.get_value() + va.page_size pm = mw.map.scale(int(px) - 5, int(py)) def scale(sw, event, mw): gobject.idle_add(_scale, sw, event, mw) self.sw.connect("expose-event", pre_scale, self) self.sw.connect_after("expose-event", scale, self) self.__last_motion = None self.map.add_events(gtk.gdk.POINTER_MOTION_MASK) self.map.connect("motion-notify-event", self.mouse_move_event) self.sw.connect("button-press-event", self.mouse_click_event) self.sw.connect('realize', self.scroll_to_center) hbox = gtk.HBox(False, 2) self.sb_coords = gtk.Statusbar() self.sb_coords.show() self.sb_coords.set_has_resize_grip(False) self.sb_center = gtk.Statusbar() self.sb_center.show() self.sb_center.set_has_resize_grip(False) self.sb_gps = gtk.Statusbar() self.sb_gps.show() self.sb_prog = gtk.ProgressBar() self.sb_prog.set_size_request(150, -1) self.sb_prog.show() hbox.pack_start(self.sb_coords, 1,1,1) hbox.pack_start(self.sb_center, 1,1,1) hbox.pack_start(self.sb_prog, 0,0,0) hbox.pack_start(self.sb_gps, 1,1,1) hbox.show() box.pack_start(self.sw, 1,1,1) box.pack_start(self.make_bottom_pane(), 0,0,0) box.pack_start(hbox, 0,0,0) box.show() self.set_default_size(600,600) self.set_geometry_hints(max_width=tiles*256, max_height=tiles*256) self.markers = {} self.colors = {} self.color_index = 0 self.add(box) self.connect("destroy", self.ev_destroy) self.connect("delete_event", self.ev_delete) self._popup_items = {} self.add_popup_handler(_("Center here"), lambda a, vals: self.recenter(vals["lat"], vals["lon"])) def set_mark_at(a, vals): p = map_sources.MapStation("STATION", vals["lat"], vals["lon"]) p.set_icon_from_aprs_sym("\\<") point, group = self.prompt_to_set_marker(p) if not point: return for source in self.map_sources: print "%s,%s" % (source.get_name(), group) if source.get_name() == group: print "Adding new point %s to %s" % (point.get_name(), source.get_name()) source.add_point(point) source.save() return # No matching group q = "%s %s %s" % \ (_("Group"), group, _("does not exist. Do you want to create it?")) if not ask_for_confirmation(q): return s = map_sources.MapFileSource.open_source_by_name(self.config, group, True) s.add_point(point) s.save() self.add_map_source(s) self.add_popup_handler(_("New marker here"), set_mark_at) self.add_popup_handler(_("Broadcast this location"), lambda a, vals: self.prompt_to_send_loc(vals["lat"], vals["lon"])) self.info_window = gtk.Window(gtk.WINDOW_POPUP) self.info_window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_MENU) self.info_window.set_decorated(False) self.info_window.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("yellow")) def add_popup_handler(self, name, handler): self._popup_items[name] = handler def set_zoom(self, zoom): self.map.set_zoom(zoom) def set_center(self, lat, lon): self.map.set_center(lat, lon) if __name__ == "__main__": import sys import gps if len(sys.argv) == 3: m = MapWindow() m.set_center(gps.parse_dms(sys.argv[1]), gps.parse_dms(sys.argv[2])) m.set_zoom(15) else: m = MapWindow() m.set_center(45.525012, -122.916434) m.set_zoom(14) m.set_marker(GPSPosition(station="KI4IFW_H", lat=45.520, lon=-122.916434)) m.set_marker(GPSPosition(station="KE7FTE", lat=45.5363, lon=-122.9105)) m.set_marker(GPSPosition(station="KA7VQH", lat=45.4846, lon=-122.8278)) m.set_marker(GPSPosition(station="N7QQU", lat=45.5625, lon=-122.8645)) m.del_marker("N7QQU") m.show() try: gtk.main() except: pass # area = gtk.DrawingArea() # area.set_size_request(768, 768) # # w = gtk.Window(gtk.WINDOW_TOPLEVEL) # w.add(area) # area.show() # w.show() # # def expose(area, event): # for i in range(1,4): # img = gtk.gdk.pixbuf_new_from_file("/tmp/tile%i.png" % i) # area.window.draw_pixbuf(area.get_style().black_gc, # img, # 0, 0, 256 * (i-1), 0, 256, 256) # # area.connect("expose-event", expose) #
gpl-3.0
MrNuggles/HeyBoet-Telegram-Bot
temboo/Library/Tumblr/Post/EditAudioPostWithURL.py
5
7243
# -*- coding: utf-8 -*- ############################################################################### # # EditAudioPostWithURL # Updates a specified audio post on a Tumblr blog using a specified external URL. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class EditAudioPostWithURL(Choreography): def __init__(self, temboo_session): """ Create a new instance of the EditAudioPostWithURL Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(EditAudioPostWithURL, self).__init__(temboo_session, '/Library/Tumblr/Post/EditAudioPostWithURL') def new_input_set(self): return EditAudioPostWithURLInputSet() def _make_result_set(self, result, path): return EditAudioPostWithURLResultSet(result, path) def _make_execution(self, session, exec_id, path): return EditAudioPostWithURLChoreographyExecution(session, exec_id, path) class EditAudioPostWithURLInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the EditAudioPostWithURL Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_ExternalURL(self, value): """ Set the value of the ExternalURL input for this Choreo. ((required, string) The URL of the site that hosts the audio file (not Tumblr).) """ super(EditAudioPostWithURLInputSet, self)._set_input('ExternalURL', value) def set_APIKey(self, value): """ Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Tumblr (AKA the OAuth Consumer Key).) """ super(EditAudioPostWithURLInputSet, self)._set_input('APIKey', value) def set_AccessTokenSecret(self, value): """ Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.) """ super(EditAudioPostWithURLInputSet, self)._set_input('AccessTokenSecret', value) def set_AccessToken(self, value): """ Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.) """ super(EditAudioPostWithURLInputSet, self)._set_input('AccessToken', value) def set_BaseHostname(self, value): """ Set the value of the BaseHostname input for this Choreo. ((required, string) The standard or custom blog hostname (i.e. temboo.tumblr.com).) """ super(EditAudioPostWithURLInputSet, self)._set_input('BaseHostname', value) def set_Caption(self, value): """ Set the value of the Caption input for this Choreo. ((optional, string) The user-supplied caption. HTML is allowed.) """ super(EditAudioPostWithURLInputSet, self)._set_input('Caption', value) def set_Date(self, value): """ Set the value of the Date input for this Choreo. ((optional, date) The GMT date and time of the post. Can be an epoch timestamp in milliseconds or formatted like: Dec 8th, 2011 4:03pm. Defaults to NOW().) """ super(EditAudioPostWithURLInputSet, self)._set_input('Date', value) def set_ID(self, value): """ Set the value of the ID input for this Choreo. ((required, integer) The ID of the post you want to edit.) """ super(EditAudioPostWithURLInputSet, self)._set_input('ID', value) def set_Markdown(self, value): """ Set the value of the Markdown input for this Choreo. ((optional, boolean) Indicates whether the post uses markdown syntax. Defaults to false. Set to 1 to indicate true.) """ super(EditAudioPostWithURLInputSet, self)._set_input('Markdown', value) def set_ResponseFormat(self, value): """ Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.) """ super(EditAudioPostWithURLInputSet, self)._set_input('ResponseFormat', value) def set_SecretKey(self, value): """ Set the value of the SecretKey input for this Choreo. ((required, string) The Secret Key provided by Tumblr (AKA the OAuth Consumer Secret).) """ super(EditAudioPostWithURLInputSet, self)._set_input('SecretKey', value) def set_Slug(self, value): """ Set the value of the Slug input for this Choreo. ((optional, string) Adds a short text summary to the end of the post URL.) """ super(EditAudioPostWithURLInputSet, self)._set_input('Slug', value) def set_State(self, value): """ Set the value of the State input for this Choreo. ((optional, string) The state of the post. Specify one of the following: published, draft, queue. Defaults to published.) """ super(EditAudioPostWithURLInputSet, self)._set_input('State', value) def set_Tags(self, value): """ Set the value of the Tags input for this Choreo. ((optional, string) Comma-separated tags for this post.) """ super(EditAudioPostWithURLInputSet, self)._set_input('Tags', value) def set_Tweet(self, value): """ Set the value of the Tweet input for this Choreo. ((optional, string) Manages the autotweet (if enabled) for this post. Set to "off" for no tweet. Enter text to override the default tweet.) """ super(EditAudioPostWithURLInputSet, self)._set_input('Tweet', value) class EditAudioPostWithURLResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the EditAudioPostWithURL Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from Tumblr. Default is JSON, can be set to XML by entering 'xml' in ResponseFormat.) """ return self._output.get('Response', None) class EditAudioPostWithURLChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return EditAudioPostWithURLResultSet(response, path)
gpl-3.0
vmindru/ansible
lib/ansible/modules/cloud/amazon/_ec2_ami_find.py
17
13419
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['deprecated'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: ec2_ami_find version_added: '2.0' short_description: Searches for AMIs to obtain the AMI ID and other information deprecated: removed_in: "2.9" why: Various AWS modules have been combined and replaced with M(ec2_ami_facts). alternative: Use M(ec2_ami_facts) instead. description: - Returns list of matching AMIs with AMI ID, along with other useful information - Can search AMIs with different owners - Can search by matching tag(s), by AMI name and/or other criteria - Results can be sorted and sliced author: "Tom Bamford (@tombamford)" notes: - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. - See the example below for a suggestion of how to search by distro/release. options: region: description: - The AWS region to use. required: true aliases: [ 'aws_region', 'ec2_region' ] owner: description: - Search AMIs owned by the specified owner - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace' - If not specified, all EC2 AMIs in the specified region will be searched. - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\. ami_id: description: - An AMI ID to match. ami_tags: description: - A hash/dictionary of tags to match for the AMI. architecture: description: - An architecture type to match (e.g. x86_64). hypervisor: description: - A hypervisor type type to match (e.g. xen). is_public: description: - Whether or not the image(s) are public. type: bool name: description: - An AMI name to match. platform: description: - Platform type to match. product_code: description: - Marketplace product code to match. version_added: "2.3" sort: description: - Optional attribute which with to sort the results. - If specifying 'tag', the 'tag_name' parameter is required. - Starting at version 2.1, additional sort choices of architecture, block_device_mapping, creationDate, hypervisor, is_public, location, owner_id, platform, root_device_name, root_device_type, state, and virtualization_type are supported. choices: - 'name' - 'description' - 'tag' - 'architecture' - 'block_device_mapping' - 'creationDate' - 'hypervisor' - 'is_public' - 'location' - 'owner_id' - 'platform' - 'root_device_name' - 'root_device_type' - 'state' - 'virtualization_type' sort_tag: description: - Tag name with which to sort results. - Required when specifying 'sort=tag'. sort_order: description: - Order in which to sort results. - Only used when the 'sort' parameter is specified. choices: ['ascending', 'descending'] default: 'ascending' sort_start: description: - Which result to start with (when sorting). - Corresponds to Python slice notation. sort_end: description: - Which result to end with (when sorting). - Corresponds to Python slice notation. state: description: - AMI state to match. default: 'available' virtualization_type: description: - Virtualization type to match (e.g. hvm). root_device_type: description: - Root device type to match (e.g. ebs, instance-store). version_added: "2.5" no_result_action: description: - What to do when no results are found. - "'success' reports success and returns an empty array" - "'fail' causes the module to report failure" choices: ['success', 'fail'] default: 'success' extends_documentation_fragment: - aws requirements: - "python >= 2.6" - boto ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Search for the AMI tagged "project:website" - ec2_ami_find: owner: self ami_tags: project: website no_result_action: fail register: ami_find # Search for the latest Ubuntu 14.04 AMI - ec2_ami_find: name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*" owner: 099720109477 sort: name sort_order: descending sort_end: 1 register: ami_find # Launch an EC2 instance - ec2: image: "{{ ami_find.results[0].ami_id }}" instance_type: m3.medium key_name: mykey wait: yes ''' RETURN = ''' ami_id: description: id of found amazon image returned: when AMI found type: str sample: "ami-e9095e8c" architecture: description: architecture of image returned: when AMI found type: str sample: "x86_64" block_device_mapping: description: block device mapping associated with image returned: when AMI found type: dict sample: "{ '/dev/xvda': { 'delete_on_termination': true, 'encrypted': false, 'size': 8, 'snapshot_id': 'snap-ca0330b8', 'volume_type': 'gp2' }" creationDate: description: creation date of image returned: when AMI found type: str sample: "2015-10-15T22:43:44.000Z" description: description: description of image returned: when AMI found type: str sample: "test-server01" hypervisor: description: type of hypervisor returned: when AMI found type: str sample: "xen" is_public: description: whether image is public returned: when AMI found type: bool sample: false location: description: location of image returned: when AMI found type: str sample: "435210894375/test-server01-20151015-234343" name: description: ami name of image returned: when AMI found type: str sample: "test-server01-20151015-234343" owner_id: description: owner of image returned: when AMI found type: str sample: "435210894375" platform: description: platform of image returned: when AMI found type: str sample: null root_device_name: description: root device name of image returned: when AMI found type: str sample: "/dev/xvda" root_device_type: description: root device type of image returned: when AMI found type: str sample: "ebs" state: description: state of image returned: when AMI found type: str sample: "available" tags: description: tags assigned to image returned: when AMI found type: dict sample: "{ 'Environment': 'devel', 'Name': 'test-server01', 'Role': 'web' }" virtualization_type: description: image virtualization type returned: when AMI found type: str sample: "hvm" ''' import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect def get_block_device_mapping(image): """ Retrieves block device mapping from AMI """ bdm_dict = dict() bdm = getattr(image, 'block_device_mapping') for device_name in bdm.keys(): bdm_dict[device_name] = { 'size': bdm[device_name].size, 'snapshot_id': bdm[device_name].snapshot_id, 'volume_type': bdm[device_name].volume_type, 'encrypted': bdm[device_name].encrypted, 'delete_on_termination': bdm[device_name].delete_on_termination } return bdm_dict def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( owner=dict(required=False, default=None), ami_id=dict(required=False), ami_tags=dict(required=False, type='dict', aliases=['search_tags', 'image_tags']), architecture=dict(required=False), hypervisor=dict(required=False), is_public=dict(required=False, type='bool'), name=dict(required=False), platform=dict(required=False), product_code=dict(required=False), sort=dict(required=False, default=None, choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location', 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']), sort_tag=dict(required=False), sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']), sort_start=dict(required=False), sort_end=dict(required=False), state=dict(required=False, default='available'), virtualization_type=dict(required=False), no_result_action=dict(required=False, default='success', choices=['success', 'fail']), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) module.deprecate("The 'ec2_ami_find' module has been deprecated. Use 'ec2_ami_facts' instead.", version=2.9) if not HAS_BOTO: module.fail_json(msg='boto required for this module, install via pip or your package manager') ami_id = module.params.get('ami_id') ami_tags = module.params.get('ami_tags') architecture = module.params.get('architecture') hypervisor = module.params.get('hypervisor') is_public = module.params.get('is_public') name = module.params.get('name') owner = module.params.get('owner') platform = module.params.get('platform') product_code = module.params.get('product_code') root_device_type = module.params.get('root_device_type') sort = module.params.get('sort') sort_tag = module.params.get('sort_tag') sort_order = module.params.get('sort_order') sort_start = module.params.get('sort_start') sort_end = module.params.get('sort_end') state = module.params.get('state') virtualization_type = module.params.get('virtualization_type') no_result_action = module.params.get('no_result_action') filter = {'state': state} if ami_id: filter['image_id'] = ami_id if ami_tags: for tag in ami_tags: filter['tag:' + tag] = ami_tags[tag] if architecture: filter['architecture'] = architecture if hypervisor: filter['hypervisor'] = hypervisor if is_public: filter['is_public'] = 'true' if name: filter['name'] = name if platform: filter['platform'] = platform if product_code: filter['product-code'] = product_code if root_device_type: filter['root_device_type'] = root_device_type if virtualization_type: filter['virtualization_type'] = virtualization_type ec2 = ec2_connect(module) images_result = ec2.get_all_images(owners=owner, filters=filter) if no_result_action == 'fail' and len(images_result) == 0: module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter)) results = [] for image in images_result: data = { 'ami_id': image.id, 'architecture': image.architecture, 'block_device_mapping': get_block_device_mapping(image), 'creationDate': image.creationDate, 'description': image.description, 'hypervisor': image.hypervisor, 'is_public': image.is_public, 'location': image.location, 'name': image.name, 'owner_id': image.owner_id, 'platform': image.platform, 'root_device_name': image.root_device_name, 'root_device_type': image.root_device_type, 'state': image.state, 'tags': image.tags, 'virtualization_type': image.virtualization_type, } if image.kernel_id: data['kernel_id'] = image.kernel_id if image.ramdisk_id: data['ramdisk_id'] = image.ramdisk_id results.append(data) if sort == 'tag': if not sort_tag: module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'") results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order == 'descending')) elif sort: results.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending')) try: if sort and sort_start and sort_end: results = results[int(sort_start):int(sort_end)] elif sort and sort_start: results = results[int(sort_start):] elif sort and sort_end: results = results[:int(sort_end)] except TypeError: module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end") module.exit_json(results=results) if __name__ == '__main__': main()
gpl-3.0
dnguyen0304/clare
clare/clare/common/messaging/consumer/interfaces.py
1
1596
# -*- coding: utf-8 -*- import abc class IConsumer(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def consume(self, interval, timeout): """ Parameters ---------- interval : float Rate of work. The units are in seconds. timeout : float Maximum duration to try fetching a new record. The units are in seconds. Returns ------- None """ pass @abc.abstractmethod def _consume_once(self, timeout): """ Parameters ---------- timeout : float Maximum duration to try fetching a new record. The units are in seconds. Returns ------- None Raises ------ clare.common.messaging.consumer.exceptions.FetchTimeout If the fetcher times out before fetching the minimum fetch size. """ pass class IFetcher(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def pop(self, timeout): """ Parameters ---------- timeout : float Returns ------- clare.common.messaging.records.Record Raises ------ clare.common.messaging.consumer.exceptions.FetchTimeout """ pass class IHandler(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def handle(self, record): """ Parameters ---------- record : clare.common.messaging.records.Record """ pass
mit
jrha/aquilon
tests/broker/test_manage_validate_branch.py
2
6137
#!/usr/bin/env python2.6 # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2012,2013 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for testing the manage command.""" import os import unittest if __name__ == "__main__": import utils utils.import_depends() from broker.brokertest import TestBrokerCommand class TestManageValidateBranch(TestBrokerCommand): def test_000_add_managetest1_sandbox(self): self.successtest(["add", "sandbox", "--sandbox", "managetest1"]) def test_000_add_managetest2_sandbox(self): self.successtest(["add", "sandbox", "--sandbox", "managetest2"]) def test_100_manage_for_uncommitted_change(self): # aquilon63.aqd-unittest.ms.com & aquilon64.aqd-unittest.ms.com are # sitting in "%s/utsandbox" we manage it to managetest1 to start clean. user = self.config.get("unittest", "user") self.noouttest(["manage", "--hostname=aquilon63.aqd-unittest.ms.com", "--sandbox=%s/managetest1" % user, "--force"]) self.noouttest(["manage", "--hostname=aquilon64.aqd-unittest.ms.com", "--sandbox=%s/managetest1" % user, "--force"]) def test_101_make_uncommitted_change(self): sandboxdir = os.path.join(self.sandboxdir, "managetest1") template = self.find_template("aquilon", "archetype", "base", sandbox="managetest1") f = open(template) try: contents = f.readlines() finally: f.close() contents.append("#Added by test_manage unittest %s \n" % sandboxdir) f = open(template, 'w') try: f.writelines(contents) finally: f.close() self.gitcommand(["add", template], cwd=sandboxdir) def test_102_fail_uncommitted_change(self): user = self.config.get("unittest", "user") command = ["manage", "--hostname", "aquilon63.aqd-unittest.ms.com", "--sandbox", "%s/managetest2" % user] out = self.badrequesttest(command) self.matchoutput(out, "The source sandbox managetest1 contains uncommitted" " files.", command) def test_110_commit_uncommitted_change(self): sandboxdir = os.path.join(self.sandboxdir, "managetest1") self.gitcommand(["commit", "-a", "-m", "added test_manage unittest comment"], cwd=sandboxdir) def test_112_fail_missing_committed_change_in_template_king(self): user = self.config.get("unittest", "user") command = ["manage", "--hostname", "aquilon63.aqd-unittest.ms.com", "--sandbox", "%s/managetest2" % user] out = self.badrequesttest(command) self.matchoutput(out, "The source sandbox managetest1 latest commit has " "not been published to template-king yet.", command) def test_114_publish_committed_change(self): sandboxdir = os.path.join(self.sandboxdir, "managetest1") self.successtest(["publish", "--branch", "managetest1"], env=self.gitenv(), cwd=sandboxdir) def test_115_fail_missing_committed_change_in_target(self): user = self.config.get("unittest", "user") command = ["manage", "--hostname", "aquilon63.aqd-unittest.ms.com", "--sandbox", "%s/managetest2" % user] out = self.badrequesttest(command) self.matchoutput(out, "The target sandbox managetest2 does not contain the " "latest commit from source sandbox managetest1.", command) def test_116_pull_committed_change(self): kingdir = self.config.get("broker", "kingdir") user = self.config.get("unittest", "user") managetest2dir = os.path.join(self.sandboxdir, "managetest2") self.gitcommand(["pull", "--no-ff", kingdir, "managetest1"], cwd=managetest2dir) def test_120_manage_committed(self): user = self.config.get("unittest", "user") self.noouttest(["manage", "--hostname=aquilon63.aqd-unittest.ms.com", "--sandbox=%s/managetest2" % user]) def test_121_verify_manage_committed(self): user = self.config.get("unittest", "user") command = "show host --hostname aquilon63.aqd-unittest.ms.com" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Primary Name: aquilon63.aqd-unittest.ms.com", command) self.matchoutput(out, "Sandbox: %s/managetest2" % user, command) def test_130_force_manage_committed(self): user = self.config.get("unittest", "user") self.noouttest(["manage", "--hostname=aquilon64.aqd-unittest.ms.com", "--sandbox=%s/managetest2" % user, "--force"]) def test_131_verify_force_manage_committed(self): user = self.config.get("unittest", "user") command = "show host --hostname aquilon64.aqd-unittest.ms.com" out = self.commandtest(command.split(" ")) self.matchoutput(out, "Primary Name: aquilon64.aqd-unittest.ms.com", command) self.matchoutput(out, "Sandbox: %s/managetest2" % user, command) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestManageValidateBranch) unittest.TextTestRunner(verbosity=2).run(suite)
apache-2.0
fredmorcos/attic
projects/plantmaker/plantmaker-main/src/benchmark/benchmark.py
1
2905
from os import path from extra.printer import pprint, GREEN, BLUE, RED class Benchmark(object): useCairoPlot = False useGnuPlot = True def __init__(self, plant, orderList, testNumber): self.prefix = "generic" self.testName = "generic" self.testNumber = testNumber self.cairoPlotTimes = [] self.gnuPlotTimes = [] self.plant = plant self.orderList = orderList self.orderListSize = -1 self.machineListSize = -1 self.startValue = 0 def addGnuPlotTime(self, x, y): self.gnuPlotTimes.append((x, y)) pprint("PERF Time = " + str(y), GREEN) def addCairoPlotTime(self, t): self.cairoPlotTimes.append(t) pprint("PERF Time = " + str(t), GREEN) def prepare(self): pprint("PERF Starting " + self.prefix + " benchmark test " + str(self.testNumber) + " on " + self.testName, BLUE) if self.orderListSize != -1: self.orderList.orders = self.orderList.orders[:self.orderListSize] if self.machineListSize != -1: self.plant.machines = self.plant.machines[:self.machineListSize] self.times = [i * 0 for i in range(self.startValue)] def save(self): if Benchmark.useCairoPlot == True: self.plotCairoPlot() if Benchmark.useGnuPlot == True: self.plotGnuPlot() def plotGnuPlot(self): import os, subprocess p = subprocess.Popen(['/usr/bin/gnuplot'], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = os.getcwd()) output = "" hasFloat = False for i in self.gnuPlotTimes: if type(i[0]) == float: hasFloat = True output += str(i[0]) + " " + str(i[1]) + "\n" with open("plantmaker-tmp", "w") as f: f.write(output) f.close() of = "benchmarks/" + self.prefix + "-" + self.testName + "-" + \ str(self.testNumber) + "-gp.eps" commString = "set grid; set term postscript; set out '" + of + "'; " + \ "set format y \"%.4f\"; " + "set xlabel \"" + self.testName + "\"; " + \ "set ylabel \"Time (Seconds)\"; unset key; " if hasFloat == True: commString += "set format x \"%.1f\"; " commString += "plot 'plantmaker-tmp' with lines lw 3, 'plantmaker-tmp' with points pt 7 ps 1\n" p.communicate(commString) p.wait() os.remove("plantmaker-tmp") def plotCairoPlot(self): try: from thirdparty.CairoPlot import dot_line_plot except: pprint("PERF Will not output to graph. Install CairoPlot.", RED) return dot_line_plot(path.join("benchmarks", self.prefix + "-" + self.testName + "-" + str(self.testNumber)) + ".png", self.cairoPlotTimes, 800, 800, (255, 255, 255), 5, True, True, True, None, None, None, None) dot_line_plot(path.join("benchmarks", self.prefix + "-" + self.testName + "-" + str(self.testNumber)) + ".ps", self.cairoPlotTimes, 800, 800, (255, 255, 255), 5, True, True, True, None, None, None, None) def run(self): self.prepare() self.bench() self.save() def bench(self): pass
isc
olopez32/syncless
googlecode_upload.py
5
9044
#!/usr/local/bin/stackless2.6 # # Downloaded from http://support.googlecode.com/svn/trunk/scripts/googlecode_upload.py # at Thu Apr 29 17:24:59 CEST 2010 # # Copyright 2006, 2007 Google Inc. All Rights Reserved. # Author: danderson@google.com (David Anderson) # # Script for uploading files to a Google Code project. # # This is intended to be both a useful script for people who want to # streamline project uploads and a reference implementation for # uploading files to Google Code projects. # # To upload a file to Google Code, you need to provide a path to the # file on your local machine, a small summary of what the file is, a # project name, and a valid account that is a member or owner of that # project. You can optionally provide a list of labels that apply to # the file. The file will be uploaded under the same name that it has # in your local filesystem (that is, the "basename" or last path # component). Run the script with '--help' to get the exact syntax # and available options. # # Note that the upload script requests that you enter your # googlecode.com password. This is NOT your Gmail account password! # This is the password you use on googlecode.com for committing to # Subversion and uploading files. You can find your password by going # to http://code.google.com/hosting/settings when logged in with your # Gmail account. If you have already committed to your project's # Subversion repository, the script will automatically retrieve your # credentials from there (unless disabled, see the output of '--help' # for details). # # If you are looking at this script as a reference for implementing # your own Google Code file uploader, then you should take a look at # the upload() function, which is the meat of the uploader. You # basically need to build a multipart/form-data POST request with the # right fields and send it to https://PROJECT.googlecode.com/files . # Authenticate the request using HTTP Basic authentication, as is # shown below. # # Licensed under the terms of the Apache Software License 2.0: # http://www.apache.org/licenses/LICENSE-2.0 # # Questions, comments, feature requests and patches are most welcome. # Please direct all of these to the Google Code users group: # http://groups.google.com/group/google-code-hosting """Google Code file uploader script. """ __author__ = 'danderson@google.com (David Anderson)' import httplib import os.path import optparse import getpass import base64 import sys def upload(file, project_name, user_name, password, summary, labels=None): """Upload a file to a Google Code project's file server. Args: file: The local path to the file. project_name: The name of your project on Google Code. user_name: Your Google account name. password: The googlecode.com password for your account. Note that this is NOT your global Google Account password! summary: A small description for the file. labels: an optional list of label strings with which to tag the file. Returns: a tuple: http_status: 201 if the upload succeeded, something else if an error occured. http_reason: The human-readable string associated with http_status file_url: If the upload succeeded, the URL of the file on Google Code, None otherwise. """ # The login is the user part of user@gmail.com. If the login provided # is in the full user@domain form, strip it down. if user_name.endswith('@gmail.com'): user_name = user_name[:user_name.index('@gmail.com')] form_fields = [('summary', summary)] if labels is not None: form_fields.extend([('label', l.strip()) for l in labels]) content_type, body = encode_upload_request(form_fields, file) upload_host = '%s.googlecode.com' % project_name upload_uri = '/files' auth_token = base64.b64encode('%s:%s'% (user_name, password)) headers = { 'Authorization': 'Basic %s' % auth_token, 'User-Agent': 'Googlecode.com uploader v0.9.4', 'Content-Type': content_type, } server = httplib.HTTPSConnection(upload_host) server.request('POST', upload_uri, body, headers) resp = server.getresponse() server.close() if resp.status == 201: location = resp.getheader('Location', None) else: location = None return resp.status, resp.reason, location def encode_upload_request(fields, file_path): """Encode the given fields and file into a multipart form body. fields is a sequence of (name, value) pairs. file is the path of the file to upload. The file will be uploaded to Google Code with the same file name. Returns: (content_type, body) ready for httplib.HTTP instance """ BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla' CRLF = '\r\n' body = [] # Add the metadata about the upload first for key, value in fields: body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="%s"' % key, '', value, ]) # Now add the file itself file_name = os.path.basename(file_path) f = open(file_path, 'rb') file_content = f.read() f.close() body.extend( ['--' + BOUNDARY, 'Content-Disposition: form-data; name="filename"; filename="%s"' % file_name, # The upload server determines the mime-type, no need to set it. 'Content-Type: application/octet-stream', '', file_content, ]) # Finalize the form body body.extend(['--' + BOUNDARY + '--', '']) return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body) def upload_find_auth(file_path, project_name, summary, labels=None, user_name=None, password=None, tries=3): """Find credentials and upload a file to a Google Code project's file server. file_path, project_name, summary, and labels are passed as-is to upload. Args: file_path: The local path to the file. project_name: The name of your project on Google Code. summary: A small description for the file. labels: an optional list of label strings with which to tag the file. config_dir: Path to Subversion configuration directory, 'none', or None. user_name: Your Google account name. tries: How many attempts to make. """ while tries > 0: if user_name is None: # Read username if not specified or loaded from svn config, or on # subsequent tries. sys.stdout.write('Please enter your googlecode.com username: ') sys.stdout.flush() user_name = sys.stdin.readline().rstrip() if password is None: # Read password if not loaded from svn config, or on subsequent tries. print 'Please enter your googlecode.com password.' print '** Note that this is NOT your Gmail account password! **' print 'It is the password you use to access Subversion repositories,' print 'and can be found here: http://code.google.com/hosting/settings' password = getpass.getpass() status, reason, url = upload(file_path, project_name, user_name, password, summary, labels) # Returns 403 Forbidden instead of 401 Unauthorized for bad # credentials as of 2007-07-17. if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]: # Rest for another try. user_name = password = None tries = tries - 1 else: # We're done. break return status, reason, url def main(): parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY ' '-p PROJECT [options] FILE') parser.add_option('-s', '--summary', dest='summary', help='Short description of the file') parser.add_option('-p', '--project', dest='project', help='Google Code project name') parser.add_option('-u', '--user', dest='user', help='Your Google Code username') parser.add_option('-w', '--password', dest='password', help='Your Google Code password') parser.add_option('-l', '--labels', dest='labels', help='An optional list of comma-separated labels to attach ' 'to the file') options, args = parser.parse_args() if not options.summary: parser.error('File summary is missing.') elif not options.project: parser.error('Project name is missing.') elif len(args) < 1: parser.error('File to upload not provided.') elif len(args) > 1: parser.error('Only one file may be specified.') file_path = args[0] if options.labels: labels = options.labels.split(',') else: labels = None status, reason, url = upload_find_auth(file_path, options.project, options.summary, labels, options.user, options.password) if url: print 'The file was uploaded successfully.' print 'URL: %s' % url return 0 else: print 'An error occurred. Your file was not uploaded.' print 'Google Code upload server said: %s (%s)' % (reason, status) return 1 if __name__ == '__main__': sys.exit(main())
apache-2.0
netroby/WinObjC
deps/3rdparty/icu/icu/source/test/depstest/dependencies.py
198
7330
#! /usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2011-2014, International Business Machines # Corporation and others. All Rights Reserved. # # file name: dependencies.py # # created on: 2011may26 """Reader module for dependency data for the ICU dependency tester. Reads dependencies.txt and makes the data available. Attributes: files: Set of "library/filename.o" files mentioned in the dependencies file. items: Map from library or group names to item maps. Each item has a "type" ("library" or "group" or "system_symbols"). A library or group item can have an optional set of "files" (as in the files attribute). Each item can have an optional set of "deps" (libraries & groups). A group item also has a "library" name unless it is a group of system symbols. The one "system_symbols" item and its groups have sets of "system_symbols" with standard-library system symbol names. libraries: Set of library names mentioned in the dependencies file. file_to_item: Map from a symbol (ushoe.o) to library or group (shoesize) """ __author__ = "Markus W. Scherer" # TODO: Support binary items. # .txt syntax: binary: tools/genrb # item contents: {"type": "binary"} with optional files & deps # A binary must not be used as a dependency for anything else. import sys files = set() items = {} libraries = set() file_to_item = {} _line_number = 0 _groups_to_be_defined = set() def _CheckLibraryName(name): global _line_number if not name: sys.exit("Error:%d: \"library: \" without name" % _line_number) if name.endswith(".o"): sys.exit("Error:%d: invalid library name %s" % (_line_number, name)) def _CheckGroupName(name): global _line_number if not name: sys.exit("Error:%d: \"group: \" without name" % _line_number) if "/" in name or name.endswith(".o"): sys.exit("Error:%d: invalid group name %s" % (_line_number, name)) def _CheckFileName(name): global _line_number if "/" in name or not name.endswith(".o"): sys.exit("Error:%d: invalid file name %s" % (_line_number, name)) def _RemoveComment(line): global _line_number _line_number = _line_number + 1 index = line.find("#") # Remove trailing comment. if index >= 0: line = line[:index] return line.rstrip() # Remove trailing newlines etc. def _ReadLine(f): while True: line = _RemoveComment(f.next()) if line: return line def _ReadFiles(deps_file, item, library_name): global files item_files = item.get("files") while True: line = _ReadLine(deps_file) if not line: continue if not line.startswith(" "): return line if item_files == None: item_files = item["files"] = set() for file_name in line.split(): _CheckFileName(file_name) file_name = library_name + "/" + file_name if file_name in files: sys.exit("Error:%d: file %s listed in multiple groups" % (_line_number, file_name)) files.add(file_name) item_files.add(file_name) file_to_item[file_name] = item["name"] def _IsLibrary(item): return item and item["type"] == "library" def _IsLibraryGroup(item): return item and "library" in item def _ReadDeps(deps_file, item, library_name): global items, _line_number, _groups_to_be_defined item_deps = item.get("deps") while True: line = _ReadLine(deps_file) if not line: continue if not line.startswith(" "): return line if item_deps == None: item_deps = item["deps"] = set() for dep in line.split(): _CheckGroupName(dep) dep_item = items.get(dep) if item["type"] == "system_symbols" and (_IsLibraryGroup(dep_item) or _IsLibrary(dep_item)): sys.exit(("Error:%d: system_symbols depend on previously defined " + "library or library group %s") % (_line_number, dep)) if dep_item == None: # Add this dependency as a new group. items[dep] = {"type": "group"} if library_name: items[dep]["library"] = library_name _groups_to_be_defined.add(dep) item_deps.add(dep) def _AddSystemSymbol(item, symbol): exports = item.get("system_symbols") if exports == None: exports = item["system_symbols"] = set() exports.add(symbol) def _ReadSystemSymbols(deps_file, item): global _line_number while True: line = _ReadLine(deps_file) if not line: continue if not line.startswith(" "): return line line = line.lstrip() if '"' in line: # One double-quote-enclosed symbol on the line, allows spaces in a symbol name. symbol = line[1:-1] if line.startswith('"') and line.endswith('"') and '"' not in symbol: _AddSystemSymbol(item, symbol) else: sys.exit("Error:%d: invalid quoted symbol name %s" % (_line_number, line)) else: # One or more space-separate symbols. for symbol in line.split(): _AddSystemSymbol(item, symbol) def Load(): """Reads "dependencies.txt" and populates the module attributes.""" global items, libraries, _line_number, _groups_to_be_defined deps_file = open("dependencies.txt") try: line = None current_type = None while True: while not line: line = _RemoveComment(deps_file.next()) if line.startswith("library: "): current_type = "library" name = line[9:].lstrip() _CheckLibraryName(name) if name in items: sys.exit("Error:%d: library definition using duplicate name %s" % (_line_number, name)) libraries.add(name) item = items[name] = {"type": "library", "name": name} line = _ReadFiles(deps_file, item, name) elif line.startswith("group: "): current_type = "group" name = line[7:].lstrip() _CheckGroupName(name) if name not in items: sys.exit("Error:%d: group %s defined before mentioned as a dependency" % (_line_number, name)) if name not in _groups_to_be_defined: sys.exit("Error:%d: group definition using duplicate name %s" % (_line_number, name)) _groups_to_be_defined.remove(name) item = items[name] item["name"] = name library_name = item.get("library") if library_name: line = _ReadFiles(deps_file, item, library_name) else: line = _ReadSystemSymbols(deps_file, item) elif line == " deps": if current_type == "library": line = _ReadDeps(deps_file, items[name], name) elif current_type == "group": item = items[name] line = _ReadDeps(deps_file, item, item.get("library")) elif current_type == "system_symbols": item = items[current_type] line = _ReadDeps(deps_file, item, None) else: sys.exit("Error:%d: deps before any library or group" % _line_number) elif line == "system_symbols:": current_type = "system_symbols" if current_type in items: sys.exit("Error:%d: duplicate entry for system_symbols" % _line_number) item = items[current_type] = {"type": current_type, "name": current_type} line = _ReadSystemSymbols(deps_file, item) else: sys.exit("Syntax error:%d: %s" % (_line_number, line)) except StopIteration: pass if _groups_to_be_defined: sys.exit("Error: some groups mentioned in dependencies are undefined: %s" % _groups_to_be_defined)
mit
cdubz/babybuddy
reports/graphs/feeding_amounts.py
1
1422
# -*- coding: utf-8 -*- from django.utils import timezone from django.utils.translation import gettext as _ import plotly.offline as plotly import plotly.graph_objs as go from reports import utils def feeding_amounts(instances): """ Create a graph showing daily feeding amounts over time. :param instances: a QuerySet of Feeding instances. :returns: a tuple of the the graph's html and javascript. """ totals = {} for instance in instances: end = timezone.localtime(instance.end) date = end.date() if date not in totals.keys(): totals[date] = 0 totals[date] += instance.amount or 0 amounts = [round(amount, 2) for amount in totals.values()] trace = go.Bar( name=_('Total feeding amount'), x=list(totals.keys()), y=amounts, hoverinfo='text', textposition='outside', text=amounts ) layout_args = utils.default_graph_layout_options() layout_args['title'] = _('<b>Total Feeding Amounts</b>') layout_args['xaxis']['title'] = _('Date') layout_args['xaxis']['rangeselector'] = utils.rangeselector_date() layout_args['yaxis']['title'] = _('Feeding amount') fig = go.Figure({ 'data': [trace], 'layout': go.Layout(**layout_args) }) output = plotly.plot(fig, output_type='div', include_plotlyjs=False) return utils.split_graph_output(output)
bsd-2-clause
geraldinepascal/FROGS
tools/phyloseq_beta_diversity/phyloseq_beta_diversity.py
1
7336
#!/usr/bin/env python3 # # Copyright (C) 2018 INRA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # __author__ = 'Ta Thi Ngan & Maria Bernard INRA - SIGENAE' __copyright__ = 'Copyright (C) 2017 INRA' __license__ = 'GNU General Public License' __version__ = '3.2.3' __email__ = 'frogs-support@inrae.fr' __status__ = 'prod' import os import sys import argparse CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) FROGS_DIR="" if CURRENT_DIR.endswith("phyloseq_beta_diversity"): FROGS_DIR = os.path.dirname(os.path.dirname(CURRENT_DIR)) else: FROGS_DIR = os.path.dirname(CURRENT_DIR) # PATH BIN_DIR = os.path.abspath(os.path.join(FROGS_DIR, "libexec")) os.environ['PATH'] = BIN_DIR + os.pathsep + os.environ['PATH'] APP_DIR = os.path.abspath(os.path.join(FROGS_DIR, "app")) os.environ['PATH'] = APP_DIR + os.pathsep + os.environ['PATH'] # PYTHONPATH LIB_DIR = os.path.abspath(os.path.join(FROGS_DIR, "lib")) sys.path.append(LIB_DIR) if os.getenv('PYTHONPATH') is None: os.environ['PYTHONPATH'] = LIB_DIR else: os.environ['PYTHONPATH'] = LIB_DIR + os.pathsep + os.environ['PYTHONPATH'] # LIBR LIBR_DIR = os.path.join(LIB_DIR,"external-lib") from frogsUtils import * ################################################################################################################################################## # # COMMAND LINES # ################################################################################################################################################## class Rscript(Cmd): """ @summary: Launch Rmarkdown script to present the data beta diversity with phyloseq. @see: http://rmarkdown.rstudio.com/ https://joey711.github.io/phyloseq/ @return: html file containing the plots beta divesity distance matrix tsv file(s) """ def __init__(self, html, phyloseq, varExp, methods, outdir, rmd_stderr): """ @param html: [str] path to store resulting html file. @param phyloseq: [str] path to phyloseq object in RData file, the result of FROGS Phyloseq Import Data. @param varExp: [str] Experiment variable to split plot. @param methods: [str] one or more of beta diversity method. @param outdir: [str] The path to store resulting beta diversity distance matrix. @param rmd_stderr: [str] Path to temporary Rmarkdown stderr output file """ rmd = os.path.join(CURRENT_DIR, "phyloseq_beta_diversity.Rmd") Cmd.__init__( self, 'Rscript', 'Run 1 code Rmarkdown', '-e "rmarkdown::render(' + "'" + rmd + "',knit_root_dir='" + outdir + "',output_file='" + html + \ "', params=list(phyloseq='" + phyloseq + "', varExp='" + varExp + "', methods='" + methods + "', libdir ='" + LIBR_DIR + "'), intermediates_dir='" + os.path.dirname(html) + "')" + '" 2> ' + rmd_stderr, "-e '(sessionInfo()[[1]][13])[[1]][1]; paste(\"Rmarkdown version: \",packageVersion(\"rmarkdown\")) ; library(phyloseq); paste(\"Phyloseq version: \",packageVersion(\"phyloseq\"))'") def get_version(self): """ @summary: Returns the program version number. @return: [str] Version number if this is possible, otherwise this method return 'unknown'. """ return Cmd.get_version(self, 'stdout') ################################################################################################################################################## # # MAIN # ################################################################################################################################################## if __name__ == "__main__": # Manage parameters parser = argparse.ArgumentParser( description='To present the data beta diversity with phyloseq.') parser.add_argument( '--debug', default=False, action='store_true', help="Keep temporary files to debug program." ) parser.add_argument( '--version', action='version', version=__version__ ) parser.add_argument('-v', '--varExp', type=str, required=True, default=None, help='The experiment variable you want to analyse.') parser.add_argument('-m', '--distance-methods', required=True, type=str, default='bray,cc,unifrac,wunifrac', help='Comma separated values beta diversity methods available in Phyloseq (see https://www.bioconductor.org/packages/devel/bioc/manuals/phyloseq/man/phyloseq.pdf). [Default: %(default)s].') # Inputs group_input = parser.add_argument_group( 'Inputs' ) group_input.add_argument('-r','--rdata', required=True, default=None, help="The path of RData file containing a phyloseq object-the result of FROGS Phyloseq Import Data" ) # output group_output = parser.add_argument_group( 'Outputs' ) group_output.add_argument('--matrix-outdir', required=True, action="store", type=str, help="Path to output matrix file") group_output.add_argument('-o','--html', default='phyloseq_beta_diversity.nb.html', help="The HTML file containing the graphs. [Default: %(default)s]" ) group_output.add_argument( '-l', '--log-file', default=sys.stdout, help='This output file will contain several informations on executed commands.') args = parser.parse_args() prevent_shell_injections(args) Logger.static_write(args.log_file, "## Application\nSoftware :" + sys.argv[0] + " (version : " + str(__version__) + ")\nCommand : " + " ".join(sys.argv) + "\n\n") # check parameter list_distance=["unifrac","wunifrac","bray","cc","dpcoa","jsd","manhattan","euclidean","canberra","kulczynski","jaccard","gower","altGower","morisita","horn","mountford","raup","binomial","chao","cao","wt","-1","c","wb","rt","I","e","t","me","j","sor","m","-2","co","g","-3","l","19","hk","rlb","sim","gl","z","maximum","binary","minkowski","ANY"] methods = args.distance_methods.strip() if not args.distance_methods.strip()[-1]=="," else args.distance_methods.strip()[:-1] for method in methods.split(","): if method not in list_distance: raise_exception( Exception( '\n\n#ERROR : Your method "'+str(method)+'", name is not correct !!! Please make sure that it is in the list:'+str(list_distance)+"\n\n")) # Process outdir = os.path.abspath(args.matrix_outdir) if not os.path.exists(outdir): os.makedirs(outdir) phyloseq=os.path.abspath(args.rdata) html=os.path.abspath(args.html) try: tmpFiles = TmpFiles(os.path.dirname(html)) rmd_stderr = tmpFiles.add("rmarkdown.stderr") Rscript(html, phyloseq, args.varExp, methods, outdir, rmd_stderr).submit( args.log_file ) finally : if not args.debug: tmpFiles.deleteAll()
gpl-3.0
edmorley/django
django/db/models/deletion.py
38
13580
from collections import Counter, OrderedDict from operator import attrgetter from django.db import IntegrityError, connections, transaction from django.db.models import signals, sql class ProtectedError(IntegrityError): def __init__(self, msg, protected_objects): self.protected_objects = protected_objects super().__init__(msg, protected_objects) def CASCADE(collector, field, sub_objs, using): collector.collect(sub_objs, source=field.remote_field.model, source_attr=field.name, nullable=field.null) if field.null and not connections[using].features.can_defer_constraint_checks: collector.add_field_update(field, None, sub_objs) def PROTECT(collector, field, sub_objs, using): raise ProtectedError( "Cannot delete some instances of model '%s' because they are " "referenced through a protected foreign key: '%s.%s'" % ( field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name ), sub_objs ) def SET(value): if callable(value): def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value(), sub_objs) else: def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value, sub_objs) set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {}) return set_on_delete def SET_NULL(collector, field, sub_objs, using): collector.add_field_update(field, None, sub_objs) def SET_DEFAULT(collector, field, sub_objs, using): collector.add_field_update(field, field.get_default(), sub_objs) def DO_NOTHING(collector, field, sub_objs, using): pass def get_candidate_relations_to_delete(opts): # The candidate relations are the ones that come from N-1 and 1-1 relations. # N-N (i.e., many-to-many) relations aren't candidates for deletion. return ( f for f in opts.get_fields(include_hidden=True) if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many) ) class Collector: def __init__(self, using): self.using = using # Initially, {model: {instances}}, later values become lists. self.data = OrderedDict() self.field_updates = {} # {model: {(field, value): {instances}}} # fast_deletes is a list of queryset-likes that can be deleted without # fetching the objects into memory. self.fast_deletes = [] # Tracks deletion-order dependency for databases without transactions # or ability to defer constraint checks. Only concrete model classes # should be included, as the dependencies exist only between actual # database tables; proxy models are represented here by their concrete # parent. self.dependencies = {} # {model: {models}} def add(self, objs, source=None, nullable=False, reverse_dependency=False): """ Add 'objs' to the collection of objects to be deleted. If the call is the result of a cascade, 'source' should be the model that caused it, and 'nullable' should be set to True if the relation can be null. Return a list of all objects that were not already collected. """ if not objs: return [] new_objs = [] model = objs[0].__class__ instances = self.data.setdefault(model, set()) for obj in objs: if obj not in instances: new_objs.append(obj) instances.update(new_objs) # Nullable relationships can be ignored -- they are nulled out before # deleting, and therefore do not affect the order in which objects have # to be deleted. if source is not None and not nullable: if reverse_dependency: source, model = model, source self.dependencies.setdefault( source._meta.concrete_model, set()).add(model._meta.concrete_model) return new_objs def add_field_update(self, field, value, objs): """ Schedule a field update. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). """ if not objs: return model = objs[0].__class__ self.field_updates.setdefault( model, {}).setdefault( (field, value), set()).update(objs) def can_fast_delete(self, objs, from_field=None): """ Determine if the objects in the given queryset-like can be fast-deleted. This can be done if there are no cascades, no parents and no signal listeners for the object class. The 'from_field' tells where we are coming from - we need this to determine if the objects are in fact to be deleted. Allow also skipping parent -> child -> parent chain preventing fast delete of the child. """ if from_field and from_field.remote_field.on_delete is not CASCADE: return False if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')): return False model = objs.model if (signals.pre_delete.has_listeners(model) or signals.post_delete.has_listeners(model) or signals.m2m_changed.has_listeners(model)): return False # The use of from_field comes from the need to avoid cascade back to # parent when parent delete is cascading to child. opts = model._meta if any(link != from_field for link in opts.concrete_model._meta.parents.values()): return False # Foreign keys pointing to this model, both from m2m and other # models. for related in get_candidate_relations_to_delete(opts): if related.field.remote_field.on_delete is not DO_NOTHING: return False for field in model._meta.private_fields: if hasattr(field, 'bulk_related_objects'): # It's something like generic foreign key. return False return True def get_del_batches(self, objs, field): """ Return the objs in suitably sized batches for the used connection. """ conn_batch_size = max( connections[self.using].ops.bulk_batch_size([field.name], objs), 1) if len(objs) > conn_batch_size: return [objs[i:i + conn_batch_size] for i in range(0, len(objs), conn_batch_size)] else: return [objs] def collect(self, objs, source=None, nullable=False, collect_related=True, source_attr=None, reverse_dependency=False, keep_parents=False): """ Add 'objs' to the collection of objects to be deleted as well as all parent instances. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). If 'collect_related' is True, related objects will be handled by their respective on_delete handler. If the call is the result of a cascade, 'source' should be the model that caused it and 'nullable' should be set to True, if the relation can be null. If 'reverse_dependency' is True, 'source' will be deleted before the current model, rather than after. (Needed for cascading to parent models, the one case in which the cascade follows the forwards direction of an FK rather than the reverse direction.) If 'keep_parents' is True, data of parent model's will be not deleted. """ if self.can_fast_delete(objs): self.fast_deletes.append(objs) return new_objs = self.add(objs, source, nullable, reverse_dependency=reverse_dependency) if not new_objs: return model = new_objs[0].__class__ if not keep_parents: # Recursively collect concrete model's parent models, but not their # related objects. These will be found by meta.get_fields() concrete_model = model._meta.concrete_model for ptr in concrete_model._meta.parents.values(): if ptr: parent_objs = [getattr(obj, ptr.name) for obj in new_objs] self.collect(parent_objs, source=model, source_attr=ptr.remote_field.related_name, collect_related=False, reverse_dependency=True) if collect_related: parents = model._meta.parents for related in get_candidate_relations_to_delete(model._meta): # Preserve parent reverse relationships if keep_parents=True. if keep_parents and related.model in parents: continue field = related.field if field.remote_field.on_delete == DO_NOTHING: continue batches = self.get_del_batches(new_objs, field) for batch in batches: sub_objs = self.related_objects(related, batch) if self.can_fast_delete(sub_objs, from_field=field): self.fast_deletes.append(sub_objs) elif sub_objs: field.remote_field.on_delete(self, field, sub_objs, self.using) for field in model._meta.private_fields: if hasattr(field, 'bulk_related_objects'): # It's something like generic foreign key. sub_objs = field.bulk_related_objects(new_objs, self.using) self.collect(sub_objs, source=model, nullable=True) def related_objects(self, related, objs): """ Get a QuerySet of objects related to `objs` via the relation `related`. """ return related.related_model._base_manager.using(self.using).filter( **{"%s__in" % related.field.name: objs} ) def instances_with_model(self): for model, instances in self.data.items(): for obj in instances: yield model, obj def sort(self): sorted_models = [] concrete_models = set() models = list(self.data) while len(sorted_models) < len(models): found = False for model in models: if model in sorted_models: continue dependencies = self.dependencies.get(model._meta.concrete_model) if not (dependencies and dependencies.difference(concrete_models)): sorted_models.append(model) concrete_models.add(model._meta.concrete_model) found = True if not found: return self.data = OrderedDict((model, self.data[model]) for model in sorted_models) def delete(self): # sort instance collections for model, instances in self.data.items(): self.data[model] = sorted(instances, key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that # don't support transactions or cannot defer constraint checks until the # end of a transaction. self.sort() # number of objects deleted for each model label deleted_counter = Counter() with transaction.atomic(using=self.using, savepoint=False): # send pre_delete signals for model, obj in self.instances_with_model(): if not model._meta.auto_created: signals.pre_delete.send( sender=model, instance=obj, using=self.using ) # fast deletes for qs in self.fast_deletes: count = qs._raw_delete(using=self.using) deleted_counter[qs.model._meta.label] += count # update fields for model, instances_for_fieldvalues in self.field_updates.items(): query = sql.UpdateQuery(model) for (field, value), instances in instances_for_fieldvalues.items(): query.update_batch([obj.pk for obj in instances], {field.name: value}, self.using) # reverse instance collections for instances in self.data.values(): instances.reverse() # delete instances for model, instances in self.data.items(): query = sql.DeleteQuery(model) pk_list = [obj.pk for obj in instances] count = query.delete_batch(pk_list, self.using) deleted_counter[model._meta.label] += count if not model._meta.auto_created: for obj in instances: signals.post_delete.send( sender=model, instance=obj, using=self.using ) # update collected instances for model, instances_for_fieldvalues in self.field_updates.items(): for (field, value), instances in instances_for_fieldvalues.items(): for obj in instances: setattr(obj, field.attname, value) for model, instances in self.data.items(): for instance in instances: setattr(instance, model._meta.pk.attname, None) return sum(deleted_counter.values()), dict(deleted_counter)
bsd-3-clause
Jamlum/pytomo
pytomo/dns/renderer.py
2
12024
# Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """Help for building DNS wire format messages""" from __future__ import absolute_import import cStringIO import struct import random import time from . import exception as dns_exception from . import tsig as dns_tsig from . import rdataclass as dns_rdataclass from . import rdatatype as dns_rdatatype QUESTION = 0 ANSWER = 1 AUTHORITY = 2 ADDITIONAL = 3 class Renderer(object): """Helper class for building DNS wire-format messages. Most applications can use the higher-level L{dns_message.Message} class and its to_wire() method to generate wire-format messages. This class is for those applications which need finer control over the generation of messages. Typical use:: r = dns_renderer.Renderer(id=1, flags=0x80, max_size=512) r.add_question(qname, qtype, qclass) r.add_rrset(dns_renderer.ANSWER, rrset_1) r.add_rrset(dns_renderer.ANSWER, rrset_2) r.add_rrset(dns_renderer.AUTHORITY, ns_rrset) r.add_edns(0, 0, 4096) r.add_rrset(dns_renderer.ADDTIONAL, ad_rrset_1) r.add_rrset(dns_renderer.ADDTIONAL, ad_rrset_2) r.write_header() r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac) wire = r.get_wire() @ivar output: where rendering is written @type output: cStringIO.StringIO object @ivar id: the message id @type id: int @ivar flags: the message flags @type flags: int @ivar max_size: the maximum size of the message @type max_size: int @ivar origin: the origin to use when rendering relative names @type origin: dns_name.Name object @ivar compress: the compression table @type compress: dict @ivar section: the section currently being rendered @type section: int (dns_renderer.QUESTION, dns_renderer.ANSWER, dns_renderer.AUTHORITY, or dns_renderer.ADDITIONAL) @ivar counts: list of the number of RRs in each section @type counts: int list of length 4 @ivar mac: the MAC of the rendered message (if TSIG was used) @type mac: string """ def __init__(self, id=None, flags=0, max_size=65535, origin=None): """Initialize a new renderer. @param id: the message id @type id: int @param flags: the DNS message flags @type flags: int @param max_size: the maximum message size; the default is 65535. If rendering results in a message greater than I{max_size}, then L{dns_exception.TooBig} will be raised. @type max_size: int @param origin: the origin to use when rendering relative names @type origin: dns_name.Namem or None. """ self.output = cStringIO.StringIO() if id is None: self.id = random.randint(0, 65535) else: self.id = id self.flags = flags self.max_size = max_size self.origin = origin self.compress = {} self.section = QUESTION self.counts = [0, 0, 0, 0] self.output.write('\x00' * 12) self.mac = '' def _rollback(self, where): """Truncate the output buffer at offset I{where}, and remove any compression table entries that pointed beyond the truncation point. @param where: the offset @type where: int """ self.output.seek(where) self.output.truncate() keys_to_delete = [] for k, v in self.compress.iteritems(): if v >= where: keys_to_delete.append(k) for k in keys_to_delete: del self.compress[k] def _set_section(self, section): """Set the renderer's current section. Sections must be rendered order: QUESTION, ANSWER, AUTHORITY, ADDITIONAL. Sections may be empty. @param section: the section @type section: int @raises dns_exception.FormError: an attempt was made to set a section value less than the current section. """ if self.section != section: if self.section > section: raise dns_exception.FormError self.section = section def add_question(self, qname, rdtype, rdclass=dns_rdataclass.IN): """Add a question to the message. @param qname: the question name @type qname: dns_name.Name @param rdtype: the question rdata type @type rdtype: int @param rdclass: the question rdata class @type rdclass: int """ self._set_section(QUESTION) before = self.output.tell() qname.to_wire(self.output, self.compress, self.origin) self.output.write(struct.pack("!HH", rdtype, rdclass)) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns_exception.TooBig self.counts[QUESTION] += 1 def add_rrset(self, section, rrset, **kw): """Add the rrset to the specified section. Any keyword arguments are passed on to the rdataset's to_wire() routine. @param section: the section @type section: int @param rrset: the rrset @type rrset: dns_rrset.RRset object """ self._set_section(section) before = self.output.tell() n = rrset.to_wire(self.output, self.compress, self.origin, **kw) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns_exception.TooBig self.counts[section] += n def add_rdataset(self, section, name, rdataset, **kw): """Add the rdataset to the specified section, using the specified name as the owner name. Any keyword arguments are passed on to the rdataset's to_wire() routine. @param section: the section @type section: int @param name: the owner name @type name: dns_name.Name object @param rdataset: the rdataset @type rdataset: dns_rdataset.Rdataset object """ self._set_section(section) before = self.output.tell() n = rdataset.to_wire(name, self.output, self.compress, self.origin, **kw) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns_exception.TooBig self.counts[section] += n def add_edns(self, edns, ednsflags, payload, options=None): """Add an EDNS OPT record to the message. @param edns: The EDNS level to use. @type edns: int @param ednsflags: EDNS flag values. @type ednsflags: int @param payload: The EDNS sender's payload field, which is the maximum size of UDP datagram the sender can handle. @type payload: int @param options: The EDNS options list @type options: list of dns_edns_Option instances @see: RFC 2671 """ # make sure the EDNS version in ednsflags agrees with edns ednsflags &= 0xFF00FFFFL ednsflags |= (edns << 16) self._set_section(ADDITIONAL) before = self.output.tell() self.output.write(struct.pack('!BHHIH', 0, dns_rdatatype.OPT, payload, ednsflags, 0)) if not options is None: lstart = self.output.tell() for opt in options: stuff = struct.pack("!HH", opt.otype, 0) self.output.write(stuff) start = self.output.tell() opt.to_wire(self.output) end = self.output.tell() assert end - start < 65536 self.output.seek(start - 2) stuff = struct.pack("!H", end - start) self.output.write(stuff) self.output.seek(0, 2) lend = self.output.tell() assert lend - lstart < 65536 self.output.seek(lstart - 2) stuff = struct.pack("!H", lend - lstart) self.output.write(stuff) self.output.seek(0, 2) after = self.output.tell() if after >= self.max_size: self._rollback(before) raise dns_exception.TooBig self.counts[ADDITIONAL] += 1 def add_tsig(self, keyname, secret, fudge, id, tsig_error, other_data, request_mac, algorithm=dns_tsig.default_algorithm): """Add a TSIG signature to the message. @param keyname: the TSIG key name @type keyname: dns_name.Name object @param secret: the secret to use @type secret: string @param fudge: TSIG time fudge @type fudge: int @param id: the message id to encode in the tsig signature @type id: int @param tsig_error: TSIG error code; default is 0. @type tsig_error: int @param other_data: TSIG other data. @type other_data: string @param request_mac: This message is a response to the request which had the specified MAC. @param algorithm: the TSIG algorithm to use @type request_mac: string """ self._set_section(ADDITIONAL) before = self.output.tell() s = self.output.getvalue() (tsig_rdata, self.mac, ctx) = dns_tsig.sign(s, keyname, secret, int(time.time()), fudge, id, tsig_error, other_data, request_mac, algorithm=algorithm) keyname.to_wire(self.output, self.compress, self.origin) self.output.write(struct.pack('!HHIH', dns_rdatatype.TSIG, dns_rdataclass.ANY, 0, 0)) rdata_start = self.output.tell() self.output.write(tsig_rdata) after = self.output.tell() assert after - rdata_start < 65536 if after >= self.max_size: self._rollback(before) raise dns_exception.TooBig self.output.seek(rdata_start - 2) self.output.write(struct.pack('!H', after - rdata_start)) self.counts[ADDITIONAL] += 1 self.output.seek(10) self.output.write(struct.pack('!H', self.counts[ADDITIONAL])) self.output.seek(0, 2) def write_header(self): """Write the DNS message header. Writing the DNS message header is done asfter all sections have been rendered, but before the optional TSIG signature is added. """ self.output.seek(0) self.output.write(struct.pack('!HHHHHH', self.id, self.flags, self.counts[0], self.counts[1], self.counts[2], self.counts[3])) self.output.seek(0, 2) def get_wire(self): """Return the wire format message. @rtype: string """ return self.output.getvalue()
gpl-2.0
yanglr/book
a.py
2
2054
import os, pycurl, re, sys, shutil md=":" login=":" def dxDown(url, fullpath): c=pycurl.Curl() # 縮寫一下 c.setopt(c.FOLLOWLOCATION, True) # 允許重定向 c.setopt(pycurl.USERAGENT, b"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)") # 模擬瀏覽器 c.setopt(pycurl.URL, url) # 訪問指定網址 c.setopt(pycurl.COOKIEJAR, 'cookie.txt') # 把 cookie 存到文件 c.setopt(pycurl.COOKIEFILE, "cookie.txt") # 用文件掛 cookie f = open(fullpath, 'wb') # 定義一個文件 c.setopt(c.WRITEDATA, f) # 指定返回信息的寫入文件,或作 c.setopt(c.WRITEFUNCTION, f.write) c.perform() # 獲得服務器返回信息 f.close() if c.getinfo(pycurl.HTTP_CODE) != 200: os.remove(fullpath) print("Failed!") def getAtt(DXurl,md,login): g = open("temp.txt", 'wb') d=pycurl.Curl() d.setopt(d.FOLLOWLOCATION, True) d.setopt(pycurl.PROXY, md) # 掛代理 d.setopt(pycurl.PROXYUSERPWD, login) # 代理的用戶名密碼 d.setopt(pycurl.USERAGENT, b"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)") d.setopt(pycurl.URL, DXurl) d.setopt(pycurl.COOKIEJAR, 'dxcookie.txt') d.setopt(pycurl.COOKIEFILE, "dxcookie.txt") d.setopt(d.WRITEFUNCTION, g.write) d.perform() d.close() g.close() data=open("temp.txt","rU",encoding='UTF-8').read() jpgPath=re.compile(r'(?<=jpgPath:")[0-9a-zA-Z\/]+(?=")') strf=jpgPath.search(str(data)).group() title=re.compile(u'(?<=<title>)[^<]+(?=</title>)') bookname=title.search(str(data)).group() ss=re.compile(r'(?<=ssid:.)\d{8}') ssid=ss.search(str(data)).group() legurl="http://img.duxiu.com"+strf+"leg001?zoom=2" ssFolder="F:\\ss\\"+bookname+"_"+ssid+"\\" FolderExist=os.path.isdir(ssFolder) if FolderExist == False: os.mkdir(r'f:/ss/'+bookname+'_'+ssid+'/') legpath=ssFolder+"leg001.pdg" dxDown(legurl,legpath) shutil.move(ssFolder,"F:\\ss\\leg\\"+bookname+"_"+ssid+"\\") os.remove("temp.txt") os.remove("cookie.txt") os.remove("dxcookie.txt") def main(): getAtt(sys.argv[1],md,login) if __name__ == "__main__": main()
gpl-3.0
Detailscool/YHSpider
JiraStoryMaker/JiraStoryMaker2.py
1
5993
#!/usr/bin/python # -*- coding:utf-8 -*- # JiraStoryMaker.py # Created by Henry on 2018/4/9 # Description : from selenium import webdriver from selenium.webdriver.support.ui import Select from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By import os import json import time import sys reload(sys) sys.setdefaultencoding('utf-8') def create_story(**kwargs): summary_text = kwargs.get('summary_text', None) work_time_text = kwargs.get('work_time_text', None) REQ = kwargs.get('REQ', None) isFirst = kwargs.get('isFirst', False) time.sleep(1) new_button = driver.find_element_by_css_selector('#create_link') new_button.click() WebDriverWait(driver, 10000).until( EC.presence_of_element_located((By.CSS_SELECTOR, 'span.drop-menu')) ) drop_menus = driver.find_elements_by_css_selector('span.drop-menu') if isFirst: project = drop_menus[0] project.click() data_suggestions = driver.find_element_by_id('project-options').get_attribute('data-suggestions') items = json.loads(data_suggestions) # print items if isinstance(items, list) and items and isinstance(items[0], dict) and isinstance(items[0]['items'], list) and items[0]['items'] and isinstance(items[0]['items'][0], dict) and items[0]['items'][0]['label']: select_group = items[0]['items'][0]['label'] if u'IOSZHIBO' not in select_group: groups = [a for a in driver.find_elements_by_css_selector('li a.aui-list-item-link') if 'IOSZHIBO' in a.text] # print '\ngroups:', groups if groups: groups[0].click() print 'click' time.sleep(0.5) else: project.click() story_type = driver.find_element_by_id('issuetype-single-select') story_type.click() story_type_groups = [a for a in driver.find_elements_by_css_selector('li a.aui-list-item-link') if u'故事'==a.text] if story_type_groups: story_type_groups[0].click() time.sleep(0.5) drop_menus = driver.find_elements_by_css_selector('span.drop-menu') if len(drop_menus) < 5: time.sleep(10) print '出错啦' sys.exit(1) test_type = Select(driver.find_element_by_id('customfield_10200')) test_type.select_by_value('10202') time.sleep(0.5) requirement = Select(driver.find_element_by_id('customfield_10101')) requirement.select_by_value('10101') time.sleep(0.5) summary = driver.find_element_by_id('summary') summary.send_keys(unicode(summary_text)) time.sleep(0.5) work_time = driver.find_element_by_id('customfield_10833') work_time.send_keys(work_time_text) time.sleep(0.5) sprint = drop_menus[5] sprint.click() sprint_groups = [] while not sprint_groups: time.sleep(0.5) sprint_groups = [a for a in driver.find_elements_by_css_selector('li a') if group in a.text and u'在用' in a.text] sprint_groups[0].click() time.sleep(0.5) # time.sleep(15) # code = driver.find_element_by_id('customfield_10503-3') # code.click() if REQ: question = driver.find_element_by_css_selector('#issuelinks-issues-multi-select textarea') question.send_keys(unicode(REQ)) time.sleep(0.5) items = driver.find_elements_by_css_selector('li.menu-item') if items and len(items) > 1: relationship_item = items[1] relationship_item.click() time.sleep(0.5) dev_person = driver.find_element_by_css_selector('#customfield_10300_container textarea') if dev_person and login_token.split('-'): dev_person.send_keys(login_token.split('-')[0]) time.sleep(0.5) tester_person = driver.find_element_by_css_selector('#customfield_10400_container textarea') if tester_person and tester: tester_person.send_keys(tester) time.sleep(0.5) submit = driver.find_element_by_id('create-issue-submit') submit.click() WebDriverWait(driver, 10000).until( EC.element_to_be_clickable((By.XPATH, '//*[@id="aui-flag-container"]/div/div/a')) ) story = driver.find_element_by_xpath('//*[@id="aui-flag-container"]/div/div/a') story_href = story.get_attribute('href') print summary_text, ': ', story_href # print '已建: ', summary_text, ', 时长, :', work_time_text, '天' driver.refresh() if __name__ == '__main__': login_token = sys.argv[1] file_path = sys.argv[2] tester = sys.argv[3] if not os.path.exists(file_path): print '出错啦' sys.exit(1) else: with open(file_path, 'r') as f: lines = f.readlines() f.close() if '-' not in login_token: print '出错啦' sys.exit(1) elif len(login_token.split('-')[-1]) != 32: print '出错啦' sys.exit(1) chrome_options = webdriver.ChromeOptions() # chrome_options.add_argument('--headless') driver = webdriver.Chrome(chrome_options=chrome_options) url = '' + login_token print url driver.get(url) # print driver.get_cookies() group = u'iOS直播服务组' for idx, line in enumerate(lines): if ',' in line and ',' not in line: words = line.encode('utf-8').strip().split(',') elif ',' in line and ',' not in line: words = line.encode('utf-8').strip().split(',') else: words = [] if len(words) == 2: create_story(summary_text=words[0].strip(), work_time_text=words[1].strip(), isFirst=(idx==0)) elif len(words) == 3: create_story(summary_text=words[0].strip(), work_time_text=words[1].strip(), REQ=words[2].strip(), isFirst=(idx==0)) driver.close()
mit
axbaretto/beam
sdks/python/.tox/py27gcp/lib/python2.7/site-packages/pbr/extra_files.py
145
1096
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from distutils import errors import os _extra_files = [] def get_extra_files(): global _extra_files return _extra_files def set_extra_files(extra_files): # Let's do a sanity check for filename in extra_files: if not os.path.exists(filename): raise errors.DistutilsFileError( '%s from the extra_files option in setup.cfg does not ' 'exist' % filename) global _extra_files _extra_files[:] = extra_files[:]
apache-2.0
sposh-science/pycode-browser
Code/Physics/spring2.py
6
1784
""" spring2.py The rk4_two() routine in this program does a two step integration using an array method. The current x and xprime values are kept in a global list named 'val'. val[0] = current position; val[1] = current velocity The results are compared with analytically calculated values. """ from pylab import * def accn(t, val): force = -spring_const * val[0] - damping * val[1] return force/mass def vel(t, val): return val[1] def rk4_two(t, h): # Time and Step value global xxp # x and xprime values in a 'xxp' k1 = [0,0] # initialize 5 empty lists. k2 = [0,0] k3 = [0,0] k4 = [0,0] tmp= [0,0] k1[0] = vel(t,xxp) k1[1] = accn(t,xxp) for i in range(2): # value of functions at t + h/2 tmp[i] = xxp[i] + k1[i] * h/2 k2[0] = vel(t + h/2, tmp) k2[1] = accn(t + h/2, tmp) for i in range(2): # value of functions at t + h/2 tmp[i] = xxp[i] + k2[i] * h/2 k3[0] = vel(t + h/2, tmp) k3[1] = accn(t + h/2, tmp) for i in range(2): # value of functions at t + h tmp[i] = xxp[i] + k3[i] * h k4[0] = vel(t+h, tmp) k4[1] = accn(t+h, tmp) for i in range(2): # value of functions at t + h xxp[i] = xxp[i] + ( k1[i] + \ 2.0*k2[i] + 2.0*k3[i] + k4[i]) * h/ 6.0 t = 0.0 # Stating time h = 0.01 # Runge-Kutta step size, time increment xxp = [2.0, 0.0] # initial position & velocity spring_const = 100.0 # spring constant mass = 2.0 # mass of the oscillating object damping = 0.0 tm = [0.0] # Lists to store time, position & velocity x = [xxp[0]] xp = [xxp[1]] xth = [xxp[0]] while t < 5: rk4_two(t,h) # Do one step RK integration t = t + h tm.append(t) xp.append(xxp[1]) x.append(xxp[0]) th = 2.0 * cos(sqrt(spring_const/mass)* (t)) xth.append(th) plot(tm,x) plot(tm,xth,'+') show()
gpl-3.0
ASCrookes/django
tests/gis_tests/distapp/tests.py
154
33547
from __future__ import unicode_literals from django.contrib.gis.db.models.functions import ( Area, Distance, Length, Perimeter, Transform, ) from django.contrib.gis.geos import GEOSGeometry, LineString, Point from django.contrib.gis.measure import D # alias for Distance from django.db import connection from django.db.models import Q from django.test import TestCase, ignore_warnings, skipUnlessDBFeature from django.utils.deprecation import RemovedInDjango20Warning from ..utils import no_oracle, oracle, postgis, spatialite from .models import ( AustraliaCity, CensusZipcode, Interstate, SouthTexasCity, SouthTexasCityFt, SouthTexasInterstate, SouthTexasZipcode, ) @skipUnlessDBFeature("gis_enabled") class DistanceTest(TestCase): fixtures = ['initial'] def setUp(self): # A point we are testing distances with -- using a WGS84 # coordinate that'll be implicitly transformed to that to # the coordinate system of the field, EPSG:32140 (Texas South Central # w/units in meters) self.stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326) # Another one for Australia self.au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326) def get_names(self, qs): cities = [c.name for c in qs] cities.sort() return cities def test_init(self): """ Test initialization of distance models. """ self.assertEqual(9, SouthTexasCity.objects.count()) self.assertEqual(9, SouthTexasCityFt.objects.count()) self.assertEqual(11, AustraliaCity.objects.count()) self.assertEqual(4, SouthTexasZipcode.objects.count()) self.assertEqual(4, CensusZipcode.objects.count()) self.assertEqual(1, Interstate.objects.count()) self.assertEqual(1, SouthTexasInterstate.objects.count()) @skipUnlessDBFeature("supports_dwithin_lookup") def test_dwithin(self): """ Test the `dwithin` lookup type. """ # Distances -- all should be equal (except for the # degree/meter pair in au_cities, that's somewhat # approximate). tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)] au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)] # Expected cities for Australia and Texas. tx_cities = ['Downtown Houston', 'Southside Place'] au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong'] # Performing distance queries on two projected coordinate systems one # with units in meters and the other in units of U.S. survey feet. for dist in tx_dists: if isinstance(dist, tuple): dist1, dist2 = dist else: dist1 = dist2 = dist qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1)) qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2)) for qs in qs1, qs2: self.assertEqual(tx_cities, self.get_names(qs)) # Now performing the `dwithin` queries on a geodetic coordinate system. for dist in au_dists: if isinstance(dist, D) and not oracle: type_error = True else: type_error = False if isinstance(dist, tuple): if oracle: dist = dist[1] else: dist = dist[0] # Creating the query set. qs = AustraliaCity.objects.order_by('name') if type_error: # A ValueError should be raised on PostGIS when trying to pass # Distance objects into a DWithin query using a geodetic field. self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count) else: self.assertListEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist)))) @skipUnlessDBFeature("has_distance_method") @ignore_warnings(category=RemovedInDjango20Warning) def test_distance_projected(self): """ Test the `distance` GeoQuerySet method on projected coordinate systems. """ # The point for La Grange, TX lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326) # Reference distances in feet and in meters. Got these values from # using the provided raw SQL statements. # SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) # FROM distapp_southtexascity; m_distances = [147075.069813, 139630.198056, 140888.552826, 138809.684197, 158309.246259, 212183.594374, 70870.188967, 165337.758878, 139196.085105] # SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) # FROM distapp_southtexascityft; # Oracle 11 thinks this is not a projected coordinate system, so it's # not tested. ft_distances = [482528.79154625, 458103.408123001, 462231.860397575, 455411.438904354, 519386.252102563, 696139.009211594, 232513.278304279, 542445.630586414, 456679.155883207] # Testing using different variations of parameters and using models # with different projected coordinate systems. dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point').order_by('id') dist2 = SouthTexasCity.objects.distance(lagrange).order_by('id') # Using GEOSGeometry parameter if spatialite or oracle: dist_qs = [dist1, dist2] else: dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt).order_by('id') # Using EWKT string parameter. dist4 = SouthTexasCityFt.objects.distance(lagrange).order_by('id') dist_qs = [dist1, dist2, dist3, dist4] # Original query done on PostGIS, have to adjust AlmostEqual tolerance # for Oracle. tol = 2 if oracle else 5 # Ensuring expected distances are returned for each distance queryset. for qs in dist_qs: for i, c in enumerate(qs): self.assertAlmostEqual(m_distances[i], c.distance.m, tol) self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol) @skipUnlessDBFeature("has_distance_method", "supports_distance_geodetic") @ignore_warnings(category=RemovedInDjango20Warning) def test_distance_geodetic(self): """ Test the `distance` GeoQuerySet method on geodetic coordinate systems. """ tol = 2 if oracle else 5 # Testing geodetic distance calculation with a non-point geometry # (a LineString of Wollongong and Shellharbour coords). ls = LineString(((150.902, -34.4245), (150.87, -34.5789))) # Reference query: # SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) # FROM distapp_australiacity ORDER BY name; distances = [1120954.92533513, 140575.720018241, 640396.662906304, 60580.9693849269, 972807.955955075, 568451.8357838, 40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0] qs = AustraliaCity.objects.distance(ls).order_by('name') for city, distance in zip(qs, distances): # Testing equivalence to within a meter. self.assertAlmostEqual(distance, city.distance.m, 0) # Got the reference distances using the raw SQL statements: # SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), # 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11)); # SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) # FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0): # PROJ.4 versions 4.7+ have updated datums, and thus different # distance values. spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404, 90847.4358768573, 217402.811919332, 709599.234564757, 640011.483550888, 7772.00667991925, 1047861.78619339, 1165126.55236034] sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719, 90804.7533823494, 217713.384600405, 709134.127242793, 639828.157159169, 7786.82949717788, 1049204.06569028, 1162623.7238134] else: spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115, 90847.435881812, 217402.811862568, 709599.234619957, 640011.483583758, 7772.00667666425, 1047861.7859506, 1165126.55237647] sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184, 90804.4414289463, 217712.63666124, 709131.691061906, 639825.959074112, 7786.80274606706, 1049200.46122281, 1162619.7297006] # Testing with spheroid distances first. hillsdale = AustraliaCity.objects.get(name='Hillsdale') qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True).order_by('id') for i, c in enumerate(qs): self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol) if postgis: # PostGIS uses sphere-only distances by default, testing these as well. qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point).order_by('id') for i, c in enumerate(qs): self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol) @no_oracle # Oracle already handles geographic distance calculation. @skipUnlessDBFeature("has_distance_method") @ignore_warnings(category=RemovedInDjango20Warning) def test_distance_transform(self): """ Test the `distance` GeoQuerySet method used with `transform` on a geographic field. """ # We'll be using a Polygon (created by buffering the centroid # of 77005 to 100m) -- which aren't allowed in geographic distance # queries normally, however our field has been transformed to # a non-geographic system. z = SouthTexasZipcode.objects.get(name='77005') # Reference query: # SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), # ST_GeomFromText('<buffer_wkt>', 32140)) # FROM "distapp_censuszipcode"; dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242] # Having our buffer in the SRID of the transformation and of the field # -- should get the same results. The first buffer has no need for # transformation SQL because it is the same SRID as what was given # to `transform()`. The second buffer will need to be transformed, # however. buf1 = z.poly.centroid.buffer(100) buf2 = buf1.transform(4269, clone=True) ref_zips = ['77002', '77025', '77401'] for buf in [buf1, buf2]: qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf).order_by('name') self.assertListEqual(ref_zips, self.get_names(qs)) for i, z in enumerate(qs): self.assertAlmostEqual(z.distance.m, dists_m[i], 5) @skipUnlessDBFeature("supports_distances_lookups") def test_distance_lookups(self): """ Test the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types. """ # Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole' # (thus, Houston and Southside place will be excluded as tested in # the `test02_dwithin` above). qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter( point__distance_lte=(self.stx_pnt, D(km=20)), ) # Can't determine the units on SpatiaLite from PROJ.4 string, and # Oracle 11 incorrectly thinks it is not projected. if spatialite or oracle: dist_qs = (qs1,) else: qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter( point__distance_lte=(self.stx_pnt, D(km=20)), ) dist_qs = (qs1, qs2) for qs in dist_qs: cities = self.get_names(qs) self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place']) # Doing a distance query using Polygons instead of a Point. z = SouthTexasZipcode.objects.get(name='77005') qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275))) self.assertEqual(['77025', '77401'], self.get_names(qs)) # If we add a little more distance 77002 should be included. qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300))) self.assertEqual(['77002', '77025', '77401'], self.get_names(qs)) @skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic") def test_geodetic_distance_lookups(self): """ Test distance lookups on geodetic coordinate systems. """ # Line is from Canberra to Sydney. Query is for all other cities within # a 100km of that line (which should exclude only Hobart & Adelaide). line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326) dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100))) self.assertEqual(9, dist_qs.count()) self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale', 'Melbourne', 'Mittagong', 'Shellharbour', 'Sydney', 'Thirroul', 'Wollongong'], self.get_names(dist_qs)) # Too many params (4 in this case) should raise a ValueError. queryset = AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')) self.assertRaises(ValueError, len, queryset) # Not enough params should raise a ValueError. self.assertRaises(ValueError, len, AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',))) # Getting all cities w/in 550 miles of Hobart. hobart = AustraliaCity.objects.get(name='Hobart') qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550))) cities = self.get_names(qs) self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne']) # Cities that are either really close or really far from Wollongong -- # and using different units of distance. wollongong = AustraliaCity.objects.get(name='Wollongong') d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles. # Normal geodetic distance lookup (uses `distance_sphere` on PostGIS. gq1 = Q(point__distance_lte=(wollongong.point, d1)) gq2 = Q(point__distance_gte=(wollongong.point, d2)) qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2) # Geodetic distance lookup but telling GeoDjango to use `distance_spheroid` # instead (we should get the same results b/c accuracy variance won't matter # in this test case). querysets = [qs1] if connection.features.has_distance_spheroid_method: gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid')) gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid')) qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4) querysets.append(qs2) for qs in querysets: cities = self.get_names(qs) self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul']) @skipUnlessDBFeature("has_area_method") @ignore_warnings(category=RemovedInDjango20Warning) def test_area(self): """ Test the `area` GeoQuerySet method. """ # Reference queries: # SELECT ST_Area(poly) FROM distapp_southtexaszipcode; area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461] # Tolerance has to be lower for Oracle tol = 2 for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').area()): self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol) @skipUnlessDBFeature("has_length_method") @ignore_warnings(category=RemovedInDjango20Warning) def test_length(self): """ Test the `length` GeoQuerySet method. """ # Reference query (should use `length_spheroid`). # SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, # AUTHORITY["EPSG","7030"]]'); len_m1 = 473504.769553813 len_m2 = 4617.668 if connection.features.supports_distance_geodetic: qs = Interstate.objects.length() tol = 2 if oracle else 3 self.assertAlmostEqual(len_m1, qs[0].length.m, tol) else: # Does not support geodetic coordinate systems. self.assertRaises(ValueError, Interstate.objects.length) # Now doing length on a projected coordinate system. i10 = SouthTexasInterstate.objects.length().get(name='I-10') self.assertAlmostEqual(len_m2, i10.length.m, 2) @skipUnlessDBFeature("has_perimeter_method") @ignore_warnings(category=RemovedInDjango20Warning) def test_perimeter(self): """ Test the `perimeter` GeoQuerySet method. """ # Reference query: # SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode; perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697] tol = 2 if oracle else 7 for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').perimeter()): self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol) # Running on points; should return 0. for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')): self.assertEqual(0, c.perim.m) @skipUnlessDBFeature("has_area_method", "has_distance_method") @ignore_warnings(category=RemovedInDjango20Warning) def test_measurement_null_fields(self): """ Test the measurement GeoQuerySet methods on fields with NULL values. """ # Creating SouthTexasZipcode w/NULL value. SouthTexasZipcode.objects.create(name='78212') # Performing distance/area queries against the NULL PolygonField, # and ensuring the result of the operations is None. htown = SouthTexasCity.objects.get(name='Downtown Houston') z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212') self.assertIsNone(z.distance) self.assertIsNone(z.area) @skipUnlessDBFeature("has_distance_method") @ignore_warnings(category=RemovedInDjango20Warning) def test_distance_order_by(self): qs = SouthTexasCity.objects.distance(Point(3, 3)).order_by( 'distance' ).values_list('name', flat=True).filter(name__in=('San Antonio', 'Pearland')) self.assertQuerysetEqual(qs, ['San Antonio', 'Pearland'], lambda x: x) ''' ============================= Distance functions on PostGIS ============================= | Projected Geometry | Lon/lat Geometry | Geography (4326) ST_Distance(geom1, geom2) | OK (meters) | :-( (degrees) | OK (meters) ST_Distance(geom1, geom2, use_spheroid=False) | N/A | N/A | OK (meters), less accurate, quick Distance_Sphere(geom1, geom2) | N/A | OK (meters) | N/A Distance_Spheroid(geom1, geom2, spheroid) | N/A | OK (meters) | N/A ================================ Distance functions on Spatialite ================================ | Projected Geometry | Lon/lat Geometry ST_Distance(geom1, geom2) | OK (meters) | N/A ST_Distance(geom1, geom2, use_ellipsoid=True) | N/A | OK (meters) ST_Distance(geom1, geom2, use_ellipsoid=False) | N/A | OK (meters), less accurate, quick ''' @skipUnlessDBFeature("gis_enabled") class DistanceFunctionsTests(TestCase): fixtures = ['initial'] @skipUnlessDBFeature("has_Area_function") def test_area(self): # Reference queries: # SELECT ST_Area(poly) FROM distapp_southtexaszipcode; area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461] # Tolerance has to be lower for Oracle tol = 2 for i, z in enumerate(SouthTexasZipcode.objects.annotate(area=Area('poly')).order_by('name')): # MySQL is returning a raw float value self.assertAlmostEqual(area_sq_m[i], z.area.sq_m if hasattr(z.area, 'sq_m') else z.area, tol) @skipUnlessDBFeature("has_Distance_function") def test_distance_simple(self): """ Test a simple distance query, with projected coordinates and without transformation. """ lagrange = GEOSGeometry('POINT(805066.295722839 4231496.29461335)', 32140) houston = SouthTexasCity.objects.annotate(dist=Distance('point', lagrange)).order_by('id').first() tol = 2 if oracle else 5 self.assertAlmostEqual( houston.dist.m if hasattr(houston.dist, 'm') else houston.dist, 147075.069813, tol ) @skipUnlessDBFeature("has_Distance_function", "has_Transform_function") def test_distance_projected(self): """ Test the `Distance` function on projected coordinate systems. """ # The point for La Grange, TX lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326) # Reference distances in feet and in meters. Got these values from # using the provided raw SQL statements. # SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) # FROM distapp_southtexascity; m_distances = [147075.069813, 139630.198056, 140888.552826, 138809.684197, 158309.246259, 212183.594374, 70870.188967, 165337.758878, 139196.085105] # SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) # FROM distapp_southtexascityft; # Oracle 11 thinks this is not a projected coordinate system, so it's # not tested. ft_distances = [482528.79154625, 458103.408123001, 462231.860397575, 455411.438904354, 519386.252102563, 696139.009211594, 232513.278304279, 542445.630586414, 456679.155883207] # Testing using different variations of parameters and using models # with different projected coordinate systems. dist1 = SouthTexasCity.objects.annotate(distance=Distance('point', lagrange)).order_by('id') if spatialite or oracle: dist_qs = [dist1] else: dist2 = SouthTexasCityFt.objects.annotate(distance=Distance('point', lagrange)).order_by('id') # Using EWKT string parameter. dist3 = SouthTexasCityFt.objects.annotate(distance=Distance('point', lagrange.ewkt)).order_by('id') dist_qs = [dist1, dist2, dist3] # Original query done on PostGIS, have to adjust AlmostEqual tolerance # for Oracle. tol = 2 if oracle else 5 # Ensuring expected distances are returned for each distance queryset. for qs in dist_qs: for i, c in enumerate(qs): self.assertAlmostEqual(m_distances[i], c.distance.m, tol) self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol) @skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic") def test_distance_geodetic(self): """ Test the `Distance` function on geodetic coordinate systems. """ # Testing geodetic distance calculation with a non-point geometry # (a LineString of Wollongong and Shellharbour coords). ls = LineString(((150.902, -34.4245), (150.87, -34.5789)), srid=4326) # Reference query: # SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) # FROM distapp_australiacity ORDER BY name; distances = [1120954.92533513, 140575.720018241, 640396.662906304, 60580.9693849269, 972807.955955075, 568451.8357838, 40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0] qs = AustraliaCity.objects.annotate(distance=Distance('point', ls)).order_by('name') for city, distance in zip(qs, distances): # Testing equivalence to within a meter. self.assertAlmostEqual(distance, city.distance.m, 0) @skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic") def test_distance_geodetic_spheroid(self): tol = 2 if oracle else 5 # Got the reference distances using the raw SQL statements: # SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), # 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11)); # SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) # FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0): # PROJ.4 versions 4.7+ have updated datums, and thus different # distance values. spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404, 90847.4358768573, 217402.811919332, 709599.234564757, 640011.483550888, 7772.00667991925, 1047861.78619339, 1165126.55236034] sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719, 90804.7533823494, 217713.384600405, 709134.127242793, 639828.157159169, 7786.82949717788, 1049204.06569028, 1162623.7238134] else: spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115, 90847.435881812, 217402.811862568, 709599.234619957, 640011.483583758, 7772.00667666425, 1047861.7859506, 1165126.55237647] sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184, 90804.4414289463, 217712.63666124, 709131.691061906, 639825.959074112, 7786.80274606706, 1049200.46122281, 1162619.7297006] # Testing with spheroid distances first. hillsdale = AustraliaCity.objects.get(name='Hillsdale') qs = AustraliaCity.objects.exclude(id=hillsdale.id).annotate( distance=Distance('point', hillsdale.point, spheroid=True) ).order_by('id') for i, c in enumerate(qs): self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol) if postgis: # PostGIS uses sphere-only distances by default, testing these as well. qs = AustraliaCity.objects.exclude(id=hillsdale.id).annotate( distance=Distance('point', hillsdale.point) ).order_by('id') for i, c in enumerate(qs): self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol) @no_oracle # Oracle already handles geographic distance calculation. @skipUnlessDBFeature("has_Distance_function", 'has_Transform_function') def test_distance_transform(self): """ Test the `Distance` function used with `Transform` on a geographic field. """ # We'll be using a Polygon (created by buffering the centroid # of 77005 to 100m) -- which aren't allowed in geographic distance # queries normally, however our field has been transformed to # a non-geographic system. z = SouthTexasZipcode.objects.get(name='77005') # Reference query: # SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), # ST_GeomFromText('<buffer_wkt>', 32140)) # FROM "distapp_censuszipcode"; dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242] # Having our buffer in the SRID of the transformation and of the field # -- should get the same results. The first buffer has no need for # transformation SQL because it is the same SRID as what was given # to `transform()`. The second buffer will need to be transformed, # however. buf1 = z.poly.centroid.buffer(100) buf2 = buf1.transform(4269, clone=True) ref_zips = ['77002', '77025', '77401'] for buf in [buf1, buf2]: qs = CensusZipcode.objects.exclude(name='77005').annotate( distance=Distance(Transform('poly', 32140), buf) ).order_by('name') self.assertEqual(ref_zips, sorted([c.name for c in qs])) for i, z in enumerate(qs): self.assertAlmostEqual(z.distance.m, dists_m[i], 5) @skipUnlessDBFeature("has_Distance_function") def test_distance_order_by(self): qs = SouthTexasCity.objects.annotate(distance=Distance('point', Point(3, 3, srid=32140))).order_by( 'distance' ).values_list('name', flat=True).filter(name__in=('San Antonio', 'Pearland')) self.assertQuerysetEqual(qs, ['San Antonio', 'Pearland'], lambda x: x) @skipUnlessDBFeature("has_Length_function") def test_length(self): """ Test the `Length` function. """ # Reference query (should use `length_spheroid`). # SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, # AUTHORITY["EPSG","7030"]]'); len_m1 = 473504.769553813 len_m2 = 4617.668 if connection.features.supports_length_geodetic: qs = Interstate.objects.annotate(length=Length('path')) tol = 2 if oracle else 3 self.assertAlmostEqual(len_m1, qs[0].length.m, tol) # TODO: test with spheroid argument (True and False) else: # Does not support geodetic coordinate systems. with self.assertRaises(NotImplementedError): list(Interstate.objects.annotate(length=Length('path'))) # Now doing length on a projected coordinate system. i10 = SouthTexasInterstate.objects.annotate(length=Length('path')).get(name='I-10') self.assertAlmostEqual(len_m2, i10.length.m if isinstance(i10.length, D) else i10.length, 2) self.assertTrue( SouthTexasInterstate.objects.annotate(length=Length('path')).filter(length__gt=4000).exists() ) @skipUnlessDBFeature("has_Perimeter_function") def test_perimeter(self): """ Test the `Perimeter` function. """ # Reference query: # SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode; perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697] tol = 2 if oracle else 7 qs = SouthTexasZipcode.objects.annotate(perimeter=Perimeter('poly')).order_by('name') for i, z in enumerate(qs): self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol) # Running on points; should return 0. qs = SouthTexasCity.objects.annotate(perim=Perimeter('point')) for city in qs: self.assertEqual(0, city.perim.m) @skipUnlessDBFeature("supports_null_geometries", "has_Area_function", "has_Distance_function") def test_measurement_null_fields(self): """ Test the measurement functions on fields with NULL values. """ # Creating SouthTexasZipcode w/NULL value. SouthTexasZipcode.objects.create(name='78212') # Performing distance/area queries against the NULL PolygonField, # and ensuring the result of the operations is None. htown = SouthTexasCity.objects.get(name='Downtown Houston') z = SouthTexasZipcode.objects.annotate( distance=Distance('poly', htown.point), area=Area('poly') ).get(name='78212') self.assertIsNone(z.distance) self.assertIsNone(z.area)
bsd-3-clause
citrix-openstack-build/ceilometer
tests/storage/test_impl_hbase.py
3
1963
# -*- encoding: utf-8 -*- # # Copyright © 2012, 2013 Dell Inc. # # Author: Stas Maksimov <Stanislav_M@dell.com> # Author: Shengjie Min <Shengjie_Min@dell.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/impl_hbase.py .. note:: In order to run the tests against real HBase server set the environment variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before running the tests. Make sure the Thrift server is running on that server. """ from oslo.config import cfg from ceilometer.storage.impl_hbase import Connection from ceilometer.storage.impl_hbase import MConnection from ceilometer.tests import db as tests_db class HBaseEngineTestBase(tests_db.TestBase): database_connection = 'hbase://__test__' class ConnectionTest(HBaseEngineTestBase): def test_hbase_connection(self): cfg.CONF.database.connection = self.database_connection conn = Connection(cfg.CONF) self.assertIsInstance(conn.conn, MConnection) class TestConn(object): def __init__(self, host, port): self.netloc = '%s:%s' % (host, port) def open(self): pass cfg.CONF.database.connection = 'hbase://test_hbase:9090' self.stubs.Set(Connection, '_get_connection', lambda self, x: TestConn(x['host'], x['port'])) conn = Connection(cfg.CONF) self.assertIsInstance(conn.conn, TestConn)
apache-2.0
srluge/SickRage
lib/sqlalchemy/dialects/mysql/cymysql.py
78
2352
# mysql/cymysql.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+cymysql :name: CyMySQL :dbapi: cymysql :connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>[?<options>] :url: https://github.com/nakagami/CyMySQL """ import re from .mysqldb import MySQLDialect_mysqldb from .base import (BIT, MySQLDialect) from ... import util class _cymysqlBIT(BIT): def result_processor(self, dialect, coltype): """Convert a MySQL's 64 bit, variable length binary string to a long. """ def process(value): if value is not None: v = 0 for i in util.iterbytes(value): v = v << 8 | i return v return value return process class MySQLDialect_cymysql(MySQLDialect_mysqldb): driver = 'cymysql' description_encoding = None supports_sane_rowcount = True supports_sane_multi_rowcount = False supports_unicode_statements = True colspecs = util.update_copy( MySQLDialect.colspecs, { BIT: _cymysqlBIT, } ) @classmethod def dbapi(cls): return __import__('cymysql') def _get_server_version_info(self, connection): dbapi_con = connection.connection version = [] r = re.compile('[.\-]') for n in r.split(dbapi_con.server_version): try: version.append(int(n)) except ValueError: version.append(n) return tuple(version) def _detect_charset(self, connection): return connection.connection.charset def _extract_error_code(self, exception): return exception.errno def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.OperationalError): return self._extract_error_code(e) in \ (2006, 2013, 2014, 2045, 2055) elif isinstance(e, self.dbapi.InterfaceError): # if underlying connection is closed, # this is the error you get return True else: return False dialect = MySQLDialect_cymysql
gpl-3.0
NCBI-Hackathons/Pharmacogenomics_Prediction_Pipeline_P3
doc/source/conf.py
4
9581
# -*- coding: utf-8 -*- # # Pharmacogenomics Prediction Pipeline (P3) documentation build configuration file, created by # sphinx-quickstart on Thu Aug 13 09:37:04 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Pharmacogenomics Prediction Pipeline (P3)' copyright = u'2015, various' author = u'various' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'PharmacogenomicsPredictionPipelineP3doc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'PharmacogenomicsPredictionPipelineP3.tex', u'Pharmacogenomics Prediction Pipeline (P3) Documentation', u'various', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pharmacogenomicspredictionpipelinep3', u'Pharmacogenomics Prediction Pipeline (P3) Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'PharmacogenomicsPredictionPipelineP3', u'Pharmacogenomics Prediction Pipeline (P3) Documentation', author, 'PharmacogenomicsPredictionPipelineP3', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
cc0-1.0
xu6148152/Binea_Python_Project
automate the boring stuff with python/automate_online-materials/phoneAndEmail.py
4
1235
#! python3 # phoneAndEmail.py - Finds phone numbers and email addresses on the clipboard. import pyperclip, re phoneRegex = re.compile(r'''( (\d{3}|\(\d{3}\))? # area code (\s|-|\.)? # separator (\d{3}) # first 3 digits (\s|-|\.) # separator (\d{4}) # last 4 digits (\s*(ext|x|ext.)\s*(\d{2,5}))? # extension )''', re.VERBOSE) # Create email regex. emailRegex = re.compile(r'''( [a-zA-Z0-9._%+-]+ # username @ # @ symbol [a-zA-Z0-9.-]+ # domain name (\.[a-zA-Z]{2,4}){1,2} # dot-something )''', re.VERBOSE) # Find matches in clipboard text. text = str(pyperclip.paste()) matches = [] for groups in phoneRegex.findall(text): phoneNum = '-'.join([groups[1], groups[3], groups[5]]) if groups[8] != '': phoneNum += ' x' + groups[8] matches.append(phoneNum) for groups in emailRegex.findall(text): matches.append(groups[0]) # Copy results to the clipboard. if len(matches) > 0: pyperclip.copy('\n'.join(matches)) print('Copied to clipboard:') print('\n'.join(matches)) else: print('No phone numbers or email addresses found.')
mit
sdgathman/pymilter
testsample.py
1
5060
import unittest import Milter import sample import template import mime import zipfile from Milter.test import TestBase from Milter.testctx import TestCtx class TestMilter(TestBase,sample.sampleMilter): def __init__(self): TestBase.__init__(self) sample.sampleMilter.__init__(self) class BMSMilterTestCase(unittest.TestCase): def setUp(self): self.zf = zipfile.ZipFile('test/virus.zip','r') self.zf.setpassword(b'denatured') def tearDown(self): self.zf.close() self.zf = None def testTemplate(self,fname='test2'): ctx = TestCtx() Milter.factory = template.myMilter ctx._setsymval('{auth_authen}','batman') ctx._setsymval('{auth_type}','batcomputer') ctx._setsymval('j','mailhost') count = 10 while count > 0: rc = ctx._connect(helo='milter-template.example.org') self.assertEquals(rc,Milter.CONTINUE) with open('test/'+fname,'rb') as fp: rc = ctx._feedFile(fp) milter = ctx.getpriv() self.assertFalse(ctx._bodyreplaced,"Message body replaced") ctx._close() count -= 1 def testHeader(self,fname='utf8'): ctx = TestCtx() Milter.factory = sample.sampleMilter ctx._setsymval('{auth_authen}','batman') ctx._setsymval('{auth_type}','batcomputer') ctx._setsymval('j','mailhost') rc = ctx._connect() self.assertEquals(rc,Milter.CONTINUE) with open('test/'+fname,'rb') as fp: rc = ctx._feedFile(fp) milter = ctx.getpriv() self.assertFalse(ctx._bodyreplaced,"Message body replaced") fp = ctx._body with open('test/'+fname+".tstout","wb") as ofp: ofp.write(fp.getvalue()) ctx._close() def testCtx(self,fname='virus1'): ctx = TestCtx() Milter.factory = sample.sampleMilter ctx._setsymval('{auth_authen}','batman') ctx._setsymval('{auth_type}','batcomputer') ctx._setsymval('j','mailhost') rc = ctx._connect() self.assertTrue(rc == Milter.CONTINUE) with self.zf.open(fname) as fp: rc = ctx._feedFile(fp) milter = ctx.getpriv() # self.assertTrue(milter.user == 'batman',"getsymval failed: "+ # "%s != %s"%(milter.user,'batman')) self.assertEquals(milter.user,'batman') self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed") self.assertTrue(rc == Milter.ACCEPT) self.assertTrue(ctx._bodyreplaced,"Message body not replaced") fp = ctx._body with open('test/'+fname+".tstout","wb") as f: f.write(fp.getvalue()) #self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read()) fp.seek(0) msg = mime.message_from_file(fp) s = msg.get_payload(1).get_payload() milter.log(s) ctx._close() def testDefang(self,fname='virus1'): milter = TestMilter() milter.setsymval('{auth_authen}','batman') milter.setsymval('{auth_type}','batcomputer') milter.setsymval('j','mailhost') rc = milter.connect() self.assertTrue(rc == Milter.CONTINUE) with self.zf.open(fname) as fp: rc = milter.feedFile(fp) self.assertTrue(milter.user == 'batman',"getsymval failed") # setsymlist not working in TestBase #self.assertTrue(milter.auth_type != 'batcomputer',"setsymlist failed") self.assertTrue(rc == Milter.ACCEPT) self.assertTrue(milter._bodyreplaced,"Message body not replaced") fp = milter._body with open('test/'+fname+".tstout","wb") as f: f.write(fp.getvalue()) #self.assertTrue(fp.getvalue() == open("test/virus1.out","r").read()) fp.seek(0) msg = mime.message_from_file(fp) s = msg.get_payload(1).get_payload() milter.log(s) milter.close() def testParse(self,fname='spam7'): milter = TestMilter() milter.connect('somehost') rc = milter.feedMsg(fname) self.assertTrue(rc == Milter.ACCEPT) self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.") fp = milter._body with open('test/'+fname+".tstout","wb") as f: f.write(fp.getvalue()) milter.close() def testDefang2(self): milter = TestMilter() milter.connect('somehost') rc = milter.feedMsg('samp1') self.assertTrue(rc == Milter.ACCEPT) self.assertFalse(milter._bodyreplaced,"Milter needlessly replaced body.") with self.zf.open("virus3") as fp: rc = milter.feedFile(fp) self.assertTrue(rc == Milter.ACCEPT) self.assertTrue(milter._bodyreplaced,"Message body not replaced") fp = milter._body with open("test/virus3.tstout","wb") as f: f.write(fp.getvalue()) #self.assertTrue(fp.getvalue() == open("test/virus3.out","r").read()) with self.zf.open("virus6") as fp: rc = milter.feedFile(fp) self.assertTrue(rc == Milter.ACCEPT) self.assertTrue(milter._bodyreplaced,"Message body not replaced") self.assertTrue(milter._headerschanged,"Message headers not adjusted") fp = milter._body with open("test/virus6.tstout","wb") as f: f.write(fp.getvalue()) milter.close() def suite(): return unittest.makeSuite(BMSMilterTestCase,'test') if __name__ == '__main__': unittest.main()
gpl-2.0
Gagnavarslan/djangosaml2
tests/testprofiles/tests.py
9
2048
# Copyright (C) 2012 Sam Bull (lsb@pocketuniverse.ca) # Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es) # Copyright (C) 2010 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.contrib.auth.models import User from django.test import TestCase from djangosaml2.backends import Saml2Backend from testprofiles.models import TestProfile class Saml2BackendTests(TestCase): def test_update_user(self): # we need a user user = User.objects.create(username='john') backend = Saml2Backend() attribute_mapping = { 'uid': ('username', ), 'mail': ('email', ), 'cn': ('first_name', ), 'sn': ('last_name', ), } attributes = { 'uid': ('john', ), 'mail': ('john@example.com', ), 'cn': ('John', ), 'sn': ('Doe', ), } backend.update_user(user, attributes, attribute_mapping) self.assertEquals(user.email, 'john@example.com') self.assertEquals(user.first_name, 'John') self.assertEquals(user.last_name, 'Doe') # now we create a user profile and link it to the user profile = TestProfile.objects.create(user=user) self.assertNotEquals(profile, None) attribute_mapping['saml_age'] = ('age', ) attributes['saml_age'] = ('22', ) backend.update_user(user, attributes, attribute_mapping) self.assertEquals(user.get_profile().age, '22')
apache-2.0
bobobox/ansible
lib/ansible/plugins/action/vyos.py
5
4465
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import sys import copy from ansible.plugins.action.normal import ActionModule as _ActionModule from ansible.utils.path import unfrackpath from ansible.plugins import connection_loader from ansible.compat.six import iteritems from ansible.module_utils.vyos import vyos_argument_spec from ansible.module_utils.basic import AnsibleFallbackNotFound from ansible.module_utils._text import to_bytes try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): if self._play_context.connection != 'local': return dict( failed=True, msg='invalid connection specified, expected connection=local, ' 'got %s' % self._play_context.connection ) provider = self.load_provider() pc = copy.deepcopy(self._play_context) pc.connection = 'network_cli' pc.network_os = 'vyos' pc.port = provider['port'] or self._play_context.port or 22 pc.remote_user = provider['username'] or self._play_context.connection_user pc.password = provider['password'] or self._play_context.password pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file pc.timeout = provider['timeout'] or self._play_context.timeout connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) socket_path = self._get_socket_path(pc) display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not os.path.exists(socket_path): # start the connection if it isn't started rc, out, err = connection.exec_command('open_shell()') if rc != 0: return {'failed': True, 'msg': 'unable to connect to control socket'} else: # make sure we are in the right cli context which should be # enable mode and not config module rc, out, err = connection.exec_command('prompt()') while str(out).strip().endswith('#'): display.debug('wrong context, sending exit to device', self._play_context.remote_addr) connection.exec_command('exit') rc, out, err = connection.exec_command('prompt()') task_vars['ansible_socket'] = socket_path return super(ActionModule, self).run(tmp, task_vars) def _get_socket_path(self, play_context): ssh = connection_loader.get('ssh', class_only=True) cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user) path = unfrackpath("$HOME/.ansible/pc") return cp % dict(directory=path) def load_provider(self): provider = self._task.args.get('provider', {}) for key, value in iteritems(vyos_argument_spec): if key != 'provider' and key not in provider: if key in self._task.args: provider[key] = self._task.args[key] elif 'fallback' in value: provider[key] = self._fallback(value['fallback']) elif key not in provider: provider[key] = None return provider def _fallback(self, fallback): strategy = fallback[0] args = [] kwargs = {} for item in fallback[1:]: if isinstance(item, dict): kwargs = item else: args = item try: return strategy(*args, **kwargs) except AnsibleFallbackNotFound: pass
gpl-3.0
damonkohler/sl4a
python/src/Lib/lib-tk/Tkconstants.py
375
1493
# Symbolic constants for Tk # Booleans NO=FALSE=OFF=0 YES=TRUE=ON=1 # -anchor and -sticky N='n' S='s' W='w' E='e' NW='nw' SW='sw' NE='ne' SE='se' NS='ns' EW='ew' NSEW='nsew' CENTER='center' # -fill NONE='none' X='x' Y='y' BOTH='both' # -side LEFT='left' TOP='top' RIGHT='right' BOTTOM='bottom' # -relief RAISED='raised' SUNKEN='sunken' FLAT='flat' RIDGE='ridge' GROOVE='groove' SOLID = 'solid' # -orient HORIZONTAL='horizontal' VERTICAL='vertical' # -tabs NUMERIC='numeric' # -wrap CHAR='char' WORD='word' # -align BASELINE='baseline' # -bordermode INSIDE='inside' OUTSIDE='outside' # Special tags, marks and insert positions SEL='sel' SEL_FIRST='sel.first' SEL_LAST='sel.last' END='end' INSERT='insert' CURRENT='current' ANCHOR='anchor' ALL='all' # e.g. Canvas.delete(ALL) # Text widget and button states NORMAL='normal' DISABLED='disabled' ACTIVE='active' # Canvas state HIDDEN='hidden' # Menu item types CASCADE='cascade' CHECKBUTTON='checkbutton' COMMAND='command' RADIOBUTTON='radiobutton' SEPARATOR='separator' # Selection modes for list boxes SINGLE='single' BROWSE='browse' MULTIPLE='multiple' EXTENDED='extended' # Activestyle for list boxes # NONE='none' is also valid DOTBOX='dotbox' UNDERLINE='underline' # Various canvas styles PIESLICE='pieslice' CHORD='chord' ARC='arc' FIRST='first' LAST='last' BUTT='butt' PROJECTING='projecting' ROUND='round' BEVEL='bevel' MITER='miter' # Arguments to xview/yview MOVETO='moveto' SCROLL='scroll' UNITS='units' PAGES='pages'
apache-2.0
UManPychron/pychron
pychron/hardware/arduino/arduino_valve_actuator.py
2
1597
# =============================================================================== # Copyright 2011 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= # from traits.api import HasTraits, on_trait_change,Str,Int,Float,Button # from traitsui.api import View,Item,Group,HGroup,VGroup # ============= standard library imports ======================== # ============= local library imports ========================== from __future__ import absolute_import from .arduino_gp_actuator import ArduinoGPActuator class ArduinoValveActuator(ArduinoGPActuator): def get_open_indicator_state(self, obj): ''' ''' pass def get_closed_indicator_state(self, obj): ''' ''' pass # def get_hard_lock_indicator_state(self, obj): # ''' # ''' # cmd = 'A{}'.format(obj.name) # return self.ask(cmd, verbose=False) == '1' # ============= EOF ====================================
apache-2.0
ryanahall/django
django/db/migrations/executor.py
12
8846
from __future__ import unicode_literals from django.apps.registry import apps as global_apps from django.db import migrations from .loader import MigrationLoader from .recorder import MigrationRecorder from .state import ProjectState class MigrationExecutor(object): """ End-to-end migration execution - loads migrations, and runs them up or down to a specified set of targets. """ def __init__(self, connection, progress_callback=None): self.connection = connection self.loader = MigrationLoader(self.connection) self.recorder = MigrationRecorder(self.connection) self.progress_callback = progress_callback def migration_plan(self, targets, clean_start=False): """ Given a set of targets, returns a list of (Migration instance, backwards?). """ plan = [] if clean_start: applied = set() else: applied = set(self.loader.applied_migrations) for target in targets: # If the target is (app_label, None), that means unmigrate everything if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] == target[0]: for migration in self.loader.graph.backwards_plan(root): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) # If the migration is already applied, do backwards mode, # otherwise do forwards mode. elif target in applied: # Don't migrate backwards all the way to the target node (that # may roll back dependencies in other apps that don't need to # be rolled back); instead roll back through target's immediate # child(ren) in the same app, and no further. next_in_app = sorted( n for n in self.loader.graph.node_map[target].children if n[0] == target[0] ) for node in next_in_app: for migration in self.loader.graph.backwards_plan(node): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) else: for migration in self.loader.graph.forwards_plan(target): if migration not in applied: plan.append((self.loader.graph.nodes[migration], False)) applied.add(migration) return plan def migrate(self, targets, plan=None, fake=False, fake_initial=False): """ Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations. """ if plan is None: plan = self.migration_plan(targets) migrations_to_run = {m[0] for m in plan} # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) # Holds all states right before and right after a migration is applied # if the migration is being run. states = {} state = ProjectState(real_apps=list(self.loader.unmigrated_apps)) if self.progress_callback: self.progress_callback("render_start") state.apps # Render all real_apps -- performance critical if self.progress_callback: self.progress_callback("render_success") # Phase 1 -- Store all required states for migration, _ in full_plan: if migration in migrations_to_run: states[migration] = state.clone() state = migration.mutate_state(state) # state is cloned inside # Phase 2 -- Run the migrations for migration, backwards in plan: if not backwards: self.apply_migration(states[migration], migration, fake=fake, fake_initial=fake_initial) else: self.unapply_migration(states[migration], migration, fake=fake) def collect_sql(self, plan): """ Takes a migration plan and returns a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] state = None for migration, backwards in plan: with self.connection.schema_editor(collect_sql=True) as schema_editor: if state is None: state = self.loader.project_state((migration.app_label, migration.name), at_end=False) if not backwards: state = migration.apply(state, schema_editor, collect_sql=True) else: state = migration.unapply(state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements def apply_migration(self, state, migration, fake=False, fake_initial=False): """ Runs a migration forwards. """ if self.progress_callback: self.progress_callback("apply_start", migration, fake) if not fake: if fake_initial: # Test to see if this is an already-applied initial migration applied, state = self.detect_soft_applied(state, migration) if applied: fake = True if not fake: # Alright, do it normally with self.connection.schema_editor() as schema_editor: state = migration.apply(state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_applied(app_label, name) else: self.recorder.record_applied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("apply_success", migration, fake) return state def unapply_migration(self, state, migration, fake=False): """ Runs a migration backwards. """ if self.progress_callback: self.progress_callback("unapply_start", migration, fake) if not fake: with self.connection.schema_editor() as schema_editor: state = migration.unapply(state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_unapplied(app_label, name) else: self.recorder.record_unapplied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("unapply_success", migration, fake) return state def detect_soft_applied(self, project_state, migration): """ Tests whether a migration has been implicitly applied - that the tables it would create exist. This is intended only for use on initial migrations (as it only looks for CreateModel). """ # Bail if the migration isn't the first one in its app if [name for app, name in migration.dependencies if app == migration.app_label]: return False, project_state if project_state is None: after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True) else: after_state = migration.mutate_state(project_state) apps = after_state.apps found_create_migration = False # Make sure all create model are done for operation in migration.operations: if isinstance(operation, migrations.CreateModel): model = apps.get_model(migration.app_label, operation.name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()): return False, project_state found_create_migration = True # If we get this far and we found at least one CreateModel migration, # the migration is considered implicitly applied. return found_create_migration, after_state
bsd-3-clause
Inspq/ansible
lib/ansible/modules/storage/netapp/sf_volume_manager.py
49
11480
#!/usr/bin/python # (c) 2017, NetApp, Inc # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: sf_volume_manager short_description: Manage SolidFire volumes extends_documentation_fragment: - netapp.solidfire version_added: '2.3' author: Sumit Kumar (sumit4@netapp.com) description: - Create, destroy, or update volumes on SolidFire options: state: description: - Whether the specified volume should exist or not. required: true choices: ['present', 'absent'] name: description: - The name of the volume to manage. required: true account_id: description: - Account ID for the owner of this volume. required: true 512emulation: description: - Should the volume provide 512-byte sector emulation? - Required when C(state=present) required: false qos: description: Initial quality of service settings for this volume. required: false default: None attributes: description: A YAML dictionary of attributes that you would like to apply on this volume. required: false default: None volume_id: description: - The ID of the volume to manage or update. - In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id) parameter with an arbitary value. However, the specified volume_id will not be assigned to the newly created volume (since it's an auto-generated property). required: false default: None size: description: - The size of the volume in (size_unit). - Required when C(state = present). required: false size_unit: description: - The unit used to interpret the size parameter. required: false choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] default: 'gb' access: required: false choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget'] description: - "Access allowed for the volume." - "readOnly: Only read operations are allowed." - "readWrite: Reads and writes are allowed." - "locked: No reads or writes are allowed." - "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked." - "If unspecified, the access settings of the clone will be the same as the source." default: None ''' EXAMPLES = """ - name: Create Volume sf_volume_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: present name: AnsibleVol account_id: 3 enable512e: False size: 1 size_unit: gb - name: Update Volume sf_volume_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: present name: AnsibleVol account_id: 3 access: readWrite - name: Delete Volume sf_volume_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: absent name: AnsibleVol account_id: 2 """ RETURN = """ msg: description: Success message returned: success type: string """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception import ansible.module_utils.netapp as netapp_utils HAS_SF_SDK = netapp_utils.has_sf_sdk() class SolidFireVolume(object): def __init__(self): self._size_unit_map = netapp_utils.SF_BYTE_MAP self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() self.argument_spec.update(dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True, type='str'), account_id=dict(required=True, type='int'), enable512e=dict(type='bool', aliases=['512emulation']), qos=dict(required=False, type='str', default=None), attributes=dict(required=False, type='dict', default=None), volume_id=dict(type='int', default=None), size=dict(type='int'), size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'), access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite', 'locked', 'replicationTarget']), )) self.module = AnsibleModule( argument_spec=self.argument_spec, required_if=[ ('state', 'present', ['size', 'enable512e']) ], supports_check_mode=True ) p = self.module.params # set up state variables self.state = p['state'] self.name = p['name'] self.account_id = p['account_id'] self.enable512e = p['enable512e'] self.qos = p['qos'] self.attributes = p['attributes'] self.volume_id = p['volume_id'] self.size_unit = p['size_unit'] if p['size'] is not None: self.size = p['size'] * self._size_unit_map[self.size_unit] else: self.size = None self.access = p['access'] if HAS_SF_SDK is False: self.module.fail_json(msg="Unable to import the SolidFire Python SDK") else: self.sfe = netapp_utils.create_sf_connection(module=self.module) def get_volume(self): """ Return volume object if found :return: Details about the volume. None if not found. :rtype: dict """ volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id) for volume in volume_list.volumes: if volume.name == self.name: # Update self.volume_id if self.volume_id is not None: if volume.volume_id == self.volume_id and str(volume.delete_time) == "": return volume else: if str(volume.delete_time) == "": self.volume_id = volume.volume_id return volume return None def create_volume(self): try: self.sfe.create_volume(name=self.name, account_id=self.account_id, total_size=self.size, enable512e=self.enable512e, qos=self.qos, attributes=self.attributes) except: err = get_exception() self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size), exception=str(err)) def delete_volume(self): try: self.sfe.delete_volume(volume_id=self.volume_id) except: err = get_exception() self.module.fail_json(msg="Error deleting volume %s" % self.volume_id, exception=str(err)) def update_volume(self): try: self.sfe.modify_volume(self.volume_id, account_id=self.account_id, access=self.access, qos=self.qos, total_size=self.size, attributes=self.attributes) except: err = get_exception() self.module.fail_json(msg="Error updating volume %s" % self.name, exception=str(err)) def apply(self): changed = False volume_exists = False update_volume = False volume_detail = self.get_volume() if volume_detail: volume_exists = True if self.state == 'absent': # Checking for state change(s) here, and applying it later in the code allows us to support # check_mode changed = True elif self.state == 'present': if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access: update_volume = True changed = True elif volume_detail.account_id is not None and self.account_id is not None \ and volume_detail.account_id != self.account_id: update_volume = True changed = True elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos: update_volume = True changed = True elif volume_detail.total_size is not None and volume_detail.total_size != self.size: size_difference = abs(float(volume_detail.total_size - self.size)) # Change size only if difference is bigger than 0.001 if size_difference/self.size > 0.001: update_volume = True changed = True elif volume_detail.attributes is not None and self.attributes is not None and \ volume_detail.attributes != self.attributes: update_volume = True changed = True else: if self.state == 'present': changed = True result_message = "" if changed: if self.module.check_mode: result_message = "Check mode, skipping changes" pass else: if self.state == 'present': if not volume_exists: self.create_volume() result_message = "Volume created" elif update_volume: self.update_volume() result_message = "Volume updated" elif self.state == 'absent': self.delete_volume() result_message = "Volume deleted" self.module.exit_json(changed=changed, msg=result_message) def main(): v = SolidFireVolume() v.apply() if __name__ == '__main__': main()
gpl-3.0
gauribhoite/personfinder
env/site-packages/babel/support.py
137
21812
# -*- coding: utf-8 -*- """ babel.support ~~~~~~~~~~~~~ Several classes and functions that help with integrating and using Babel in applications. .. note: the code in this module is not used by Babel itself :copyright: (c) 2013 by the Babel Team. :license: BSD, see LICENSE for more details. """ import gettext import locale from babel.core import Locale from babel.dates import format_date, format_datetime, format_time, \ format_timedelta from babel.numbers import format_number, format_decimal, format_currency, \ format_percent, format_scientific from babel._compat import PY2, text_type, text_to_native class Format(object): """Wrapper class providing the various date and number formatting functions bound to a specific locale and time-zone. >>> from babel.util import UTC >>> from datetime import date >>> fmt = Format('en_US', UTC) >>> fmt.date(date(2007, 4, 1)) u'Apr 1, 2007' >>> fmt.decimal(1.2345) u'1.234' """ def __init__(self, locale, tzinfo=None): """Initialize the formatter. :param locale: the locale identifier or `Locale` instance :param tzinfo: the time-zone info (a `tzinfo` instance or `None`) """ self.locale = Locale.parse(locale) self.tzinfo = tzinfo def date(self, date=None, format='medium'): """Return a date formatted according to the given pattern. >>> from datetime import date >>> fmt = Format('en_US') >>> fmt.date(date(2007, 4, 1)) u'Apr 1, 2007' """ return format_date(date, format, locale=self.locale) def datetime(self, datetime=None, format='medium'): """Return a date and time formatted according to the given pattern. >>> from datetime import datetime >>> from pytz import timezone >>> fmt = Format('en_US', tzinfo=timezone('US/Eastern')) >>> fmt.datetime(datetime(2007, 4, 1, 15, 30)) u'Apr 1, 2007, 11:30:00 AM' """ return format_datetime(datetime, format, tzinfo=self.tzinfo, locale=self.locale) def time(self, time=None, format='medium'): """Return a time formatted according to the given pattern. >>> from datetime import datetime >>> from pytz import timezone >>> fmt = Format('en_US', tzinfo=timezone('US/Eastern')) >>> fmt.time(datetime(2007, 4, 1, 15, 30)) u'11:30:00 AM' """ return format_time(time, format, tzinfo=self.tzinfo, locale=self.locale) def timedelta(self, delta, granularity='second', threshold=.85, format='medium', add_direction=False): """Return a time delta according to the rules of the given locale. >>> from datetime import timedelta >>> fmt = Format('en_US') >>> fmt.timedelta(timedelta(weeks=11)) u'3 months' """ return format_timedelta(delta, granularity=granularity, threshold=threshold, format=format, add_direction=add_direction, locale=self.locale) def number(self, number): """Return an integer number formatted for the locale. >>> fmt = Format('en_US') >>> fmt.number(1099) u'1,099' """ return format_number(number, locale=self.locale) def decimal(self, number, format=None): """Return a decimal number formatted for the locale. >>> fmt = Format('en_US') >>> fmt.decimal(1.2345) u'1.234' """ return format_decimal(number, format, locale=self.locale) def currency(self, number, currency): """Return a number in the given currency formatted for the locale. """ return format_currency(number, currency, locale=self.locale) def percent(self, number, format=None): """Return a number formatted as percentage for the locale. >>> fmt = Format('en_US') >>> fmt.percent(0.34) u'34%' """ return format_percent(number, format, locale=self.locale) def scientific(self, number): """Return a number formatted using scientific notation for the locale. """ return format_scientific(number, locale=self.locale) class LazyProxy(object): """Class for proxy objects that delegate to a specified function to evaluate the actual object. >>> def greeting(name='world'): ... return 'Hello, %s!' % name >>> lazy_greeting = LazyProxy(greeting, name='Joe') >>> print lazy_greeting Hello, Joe! >>> u' ' + lazy_greeting u' Hello, Joe!' >>> u'(%s)' % lazy_greeting u'(Hello, Joe!)' This can be used, for example, to implement lazy translation functions that delay the actual translation until the string is actually used. The rationale for such behavior is that the locale of the user may not always be available. In web applications, you only know the locale when processing a request. The proxy implementation attempts to be as complete as possible, so that the lazy objects should mostly work as expected, for example for sorting: >>> greetings = [ ... LazyProxy(greeting, 'world'), ... LazyProxy(greeting, 'Joe'), ... LazyProxy(greeting, 'universe'), ... ] >>> greetings.sort() >>> for greeting in greetings: ... print greeting Hello, Joe! Hello, universe! Hello, world! """ __slots__ = ['_func', '_args', '_kwargs', '_value', '_is_cache_enabled'] def __init__(self, func, *args, **kwargs): is_cache_enabled = kwargs.pop('enable_cache', True) # Avoid triggering our own __setattr__ implementation object.__setattr__(self, '_func', func) object.__setattr__(self, '_args', args) object.__setattr__(self, '_kwargs', kwargs) object.__setattr__(self, '_is_cache_enabled', is_cache_enabled) object.__setattr__(self, '_value', None) @property def value(self): if self._value is None: value = self._func(*self._args, **self._kwargs) if not self._is_cache_enabled: return value object.__setattr__(self, '_value', value) return self._value def __contains__(self, key): return key in self.value def __nonzero__(self): return bool(self.value) def __dir__(self): return dir(self.value) def __iter__(self): return iter(self.value) def __len__(self): return len(self.value) def __str__(self): return str(self.value) def __unicode__(self): return unicode(self.value) def __add__(self, other): return self.value + other def __radd__(self, other): return other + self.value def __mod__(self, other): return self.value % other def __rmod__(self, other): return other % self.value def __mul__(self, other): return self.value * other def __rmul__(self, other): return other * self.value def __call__(self, *args, **kwargs): return self.value(*args, **kwargs) def __lt__(self, other): return self.value < other def __le__(self, other): return self.value <= other def __eq__(self, other): return self.value == other def __ne__(self, other): return self.value != other def __gt__(self, other): return self.value > other def __ge__(self, other): return self.value >= other def __delattr__(self, name): delattr(self.value, name) def __getattr__(self, name): return getattr(self.value, name) def __setattr__(self, name, value): setattr(self.value, name, value) def __delitem__(self, key): del self.value[key] def __getitem__(self, key): return self.value[key] def __setitem__(self, key, value): self.value[key] = value class NullTranslations(gettext.NullTranslations, object): DEFAULT_DOMAIN = None def __init__(self, fp=None): """Initialize a simple translations class which is not backed by a real catalog. Behaves similar to gettext.NullTranslations but also offers Babel's on *gettext methods (e.g. 'dgettext()'). :param fp: a file-like object (ignored in this class) """ # These attributes are set by gettext.NullTranslations when a catalog # is parsed (fp != None). Ensure that they are always present because # some *gettext methods (including '.gettext()') rely on the attributes. self._catalog = {} self.plural = lambda n: int(n != 1) super(NullTranslations, self).__init__(fp=fp) self.files = filter(None, [getattr(fp, 'name', None)]) self.domain = self.DEFAULT_DOMAIN self._domains = {} def dgettext(self, domain, message): """Like ``gettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).gettext(message) def ldgettext(self, domain, message): """Like ``lgettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).lgettext(message) def udgettext(self, domain, message): """Like ``ugettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).ugettext(message) # backward compatibility with 0.9 dugettext = udgettext def dngettext(self, domain, singular, plural, num): """Like ``ngettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).ngettext(singular, plural, num) def ldngettext(self, domain, singular, plural, num): """Like ``lngettext()``, but look the message up in the specified domain. """ return self._domains.get(domain, self).lngettext(singular, plural, num) def udngettext(self, domain, singular, plural, num): """Like ``ungettext()`` but look the message up in the specified domain. """ return self._domains.get(domain, self).ungettext(singular, plural, num) # backward compatibility with 0.9 dungettext = udngettext # Most of the downwards code, until it get's included in stdlib, from: # http://bugs.python.org/file10036/gettext-pgettext.patch # # The encoding of a msgctxt and a msgid in a .mo file is # msgctxt + "\x04" + msgid (gettext version >= 0.15) CONTEXT_ENCODING = '%s\x04%s' def pgettext(self, context, message): """Look up the `context` and `message` id in the catalog and return the corresponding message string, as an 8-bit string encoded with the catalog's charset encoding, if known. If there is no entry in the catalog for the `message` id and `context` , and a fallback has been set, the look up is forwarded to the fallback's ``pgettext()`` method. Otherwise, the `message` id is returned. """ ctxt_msg_id = self.CONTEXT_ENCODING % (context, message) missing = object() tmsg = self._catalog.get(ctxt_msg_id, missing) if tmsg is missing: if self._fallback: return self._fallback.pgettext(context, message) return message # Encode the Unicode tmsg back to an 8-bit string, if possible if self._output_charset: return text_to_native(tmsg, self._output_charset) elif self._charset: return text_to_native(tmsg, self._charset) return tmsg def lpgettext(self, context, message): """Equivalent to ``pgettext()``, but the translation is returned in the preferred system encoding, if no other encoding was explicitly set with ``bind_textdomain_codeset()``. """ ctxt_msg_id = self.CONTEXT_ENCODING % (context, message) missing = object() tmsg = self._catalog.get(ctxt_msg_id, missing) if tmsg is missing: if self._fallback: return self._fallback.lpgettext(context, message) return message if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) def npgettext(self, context, singular, plural, num): """Do a plural-forms lookup of a message id. `singular` is used as the message id for purposes of lookup in the catalog, while `num` is used to determine which plural form to use. The returned message string is an 8-bit string encoded with the catalog's charset encoding, if known. If the message id for `context` is not found in the catalog, and a fallback is specified, the request is forwarded to the fallback's ``npgettext()`` method. Otherwise, when ``num`` is 1 ``singular`` is returned, and ``plural`` is returned in all other cases. """ ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular) try: tmsg = self._catalog[(ctxt_msg_id, self.plural(num))] if self._output_charset: return text_to_native(tmsg, self._output_charset) elif self._charset: return text_to_native(tmsg, self._charset) return tmsg except KeyError: if self._fallback: return self._fallback.npgettext(context, singular, plural, num) if num == 1: return singular else: return plural def lnpgettext(self, context, singular, plural, num): """Equivalent to ``npgettext()``, but the translation is returned in the preferred system encoding, if no other encoding was explicitly set with ``bind_textdomain_codeset()``. """ ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular) try: tmsg = self._catalog[(ctxt_msg_id, self.plural(num))] if self._output_charset: return tmsg.encode(self._output_charset) return tmsg.encode(locale.getpreferredencoding()) except KeyError: if self._fallback: return self._fallback.lnpgettext(context, singular, plural, num) if num == 1: return singular else: return plural def upgettext(self, context, message): """Look up the `context` and `message` id in the catalog and return the corresponding message string, as a Unicode string. If there is no entry in the catalog for the `message` id and `context`, and a fallback has been set, the look up is forwarded to the fallback's ``upgettext()`` method. Otherwise, the `message` id is returned. """ ctxt_message_id = self.CONTEXT_ENCODING % (context, message) missing = object() tmsg = self._catalog.get(ctxt_message_id, missing) if tmsg is missing: if self._fallback: return self._fallback.upgettext(context, message) return text_type(message) return tmsg def unpgettext(self, context, singular, plural, num): """Do a plural-forms lookup of a message id. `singular` is used as the message id for purposes of lookup in the catalog, while `num` is used to determine which plural form to use. The returned message string is a Unicode string. If the message id for `context` is not found in the catalog, and a fallback is specified, the request is forwarded to the fallback's ``unpgettext()`` method. Otherwise, when `num` is 1 `singular` is returned, and `plural` is returned in all other cases. """ ctxt_message_id = self.CONTEXT_ENCODING % (context, singular) try: tmsg = self._catalog[(ctxt_message_id, self.plural(num))] except KeyError: if self._fallback: return self._fallback.unpgettext(context, singular, plural, num) if num == 1: tmsg = text_type(singular) else: tmsg = text_type(plural) return tmsg def dpgettext(self, domain, context, message): """Like `pgettext()`, but look the message up in the specified `domain`. """ return self._domains.get(domain, self).pgettext(context, message) def udpgettext(self, domain, context, message): """Like `upgettext()`, but look the message up in the specified `domain`. """ return self._domains.get(domain, self).upgettext(context, message) # backward compatibility with 0.9 dupgettext = udpgettext def ldpgettext(self, domain, context, message): """Equivalent to ``dpgettext()``, but the translation is returned in the preferred system encoding, if no other encoding was explicitly set with ``bind_textdomain_codeset()``. """ return self._domains.get(domain, self).lpgettext(context, message) def dnpgettext(self, domain, context, singular, plural, num): """Like ``npgettext``, but look the message up in the specified `domain`. """ return self._domains.get(domain, self).npgettext(context, singular, plural, num) def udnpgettext(self, domain, context, singular, plural, num): """Like ``unpgettext``, but look the message up in the specified `domain`. """ return self._domains.get(domain, self).unpgettext(context, singular, plural, num) # backward compatibility with 0.9 dunpgettext = udnpgettext def ldnpgettext(self, domain, context, singular, plural, num): """Equivalent to ``dnpgettext()``, but the translation is returned in the preferred system encoding, if no other encoding was explicitly set with ``bind_textdomain_codeset()``. """ return self._domains.get(domain, self).lnpgettext(context, singular, plural, num) if not PY2: ugettext = gettext.NullTranslations.gettext ungettext = gettext.NullTranslations.ngettext class Translations(NullTranslations, gettext.GNUTranslations): """An extended translation catalog class.""" DEFAULT_DOMAIN = 'messages' def __init__(self, fp=None, domain=None): """Initialize the translations catalog. :param fp: the file-like object the translation should be read from :param domain: the message domain (default: 'messages') """ super(Translations, self).__init__(fp=fp) self.domain = domain or self.DEFAULT_DOMAIN if not PY2: ugettext = gettext.GNUTranslations.gettext ungettext = gettext.GNUTranslations.ngettext @classmethod def load(cls, dirname=None, locales=None, domain=None): """Load translations from the given directory. :param dirname: the directory containing the ``MO`` files :param locales: the list of locales in order of preference (items in this list can be either `Locale` objects or locale strings) :param domain: the message domain (default: 'messages') """ if locales is not None: if not isinstance(locales, (list, tuple)): locales = [locales] locales = [str(locale) for locale in locales] if not domain: domain = cls.DEFAULT_DOMAIN filename = gettext.find(domain, dirname, locales) if not filename: return NullTranslations() with open(filename, 'rb') as fp: return cls(fp=fp, domain=domain) def __repr__(self): return '<%s: "%s">' % (type(self).__name__, self._info.get('project-id-version')) def add(self, translations, merge=True): """Add the given translations to the catalog. If the domain of the translations is different than that of the current catalog, they are added as a catalog that is only accessible by the various ``d*gettext`` functions. :param translations: the `Translations` instance with the messages to add :param merge: whether translations for message domains that have already been added should be merged with the existing translations """ domain = getattr(translations, 'domain', self.DEFAULT_DOMAIN) if merge and domain == self.domain: return self.merge(translations) existing = self._domains.get(domain) if merge and existing is not None: existing.merge(translations) else: translations.add_fallback(self) self._domains[domain] = translations return self def merge(self, translations): """Merge the given translations into the catalog. Message translations in the specified catalog override any messages with the same identifier in the existing catalog. :param translations: the `Translations` instance with the messages to merge """ if isinstance(translations, gettext.GNUTranslations): self._catalog.update(translations._catalog) if isinstance(translations, Translations): self.files.extend(translations.files) return self
apache-2.0
arvinquilao/android_kernel_cyanogen_msm8916
tools/perf/scripts/python/net_dropmonitor.py
2669
1738
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") except: return for line in f: loc = int(line.split()[0], 16) name = line.split()[2] kallsyms.append((loc, name)) kallsyms.sort() def get_sym(sloc): loc = int(sloc) # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start # kallsyms[i][0] > loc for all end <= i < len(kallsyms) start, end = -1, len(kallsyms) while end != start + 1: pivot = (start + end) // 2 if loc < kallsyms[pivot][0]: end = pivot else: start = pivot # Now (start == -1 or kallsyms[start][0] <= loc) # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0]) if start >= 0: symloc, name = kallsyms[start] return (name, loc - symloc) else: return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, location, protocol): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
gpl-2.0
fengzhyuan/scikit-learn
sklearn/tests/test_metaestimators.py
226
4954
"""Common tests for metaestimators""" import functools import numpy as np from sklearn.base import BaseEstimator from sklearn.externals.six import iterkeys from sklearn.datasets import make_classification from sklearn.utils.testing import assert_true, assert_false, assert_raises from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV, RandomizedSearchCV from sklearn.feature_selection import RFE, RFECV from sklearn.ensemble import BaggingClassifier class DelegatorData(object): def __init__(self, name, construct, skip_methods=(), fit_args=make_classification()): self.name = name self.construct = construct self.fit_args = fit_args self.skip_methods = skip_methods DELEGATING_METAESTIMATORS = [ DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])), DelegatorData('GridSearchCV', lambda est: GridSearchCV( est, param_grid={'param': [5]}, cv=2), skip_methods=['score']), DelegatorData('RandomizedSearchCV', lambda est: RandomizedSearchCV( est, param_distributions={'param': [5]}, cv=2, n_iter=1), skip_methods=['score']), DelegatorData('RFE', RFE, skip_methods=['transform', 'inverse_transform', 'score']), DelegatorData('RFECV', RFECV, skip_methods=['transform', 'inverse_transform', 'score']), DelegatorData('BaggingClassifier', BaggingClassifier, skip_methods=['transform', 'inverse_transform', 'score', 'predict_proba', 'predict_log_proba', 'predict']) ] def test_metaestimator_delegation(): # Ensures specified metaestimators have methods iff subestimator does def hides(method): @property def wrapper(obj): if obj.hidden_method == method.__name__: raise AttributeError('%r is hidden' % obj.hidden_method) return functools.partial(method, obj) return wrapper class SubEstimator(BaseEstimator): def __init__(self, param=1, hidden_method=None): self.param = param self.hidden_method = hidden_method def fit(self, X, y=None, *args, **kwargs): self.coef_ = np.arange(X.shape[1]) return True def _check_fit(self): if not hasattr(self, 'coef_'): raise RuntimeError('Estimator is not fit') @hides def inverse_transform(self, X, *args, **kwargs): self._check_fit() return X @hides def transform(self, X, *args, **kwargs): self._check_fit() return X @hides def predict(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def predict_proba(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def predict_log_proba(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def decision_function(self, X, *args, **kwargs): self._check_fit() return np.ones(X.shape[0]) @hides def score(self, X, *args, **kwargs): self._check_fit() return 1.0 methods = [k for k in iterkeys(SubEstimator.__dict__) if not k.startswith('_') and not k.startswith('fit')] methods.sort() for delegator_data in DELEGATING_METAESTIMATORS: delegate = SubEstimator() delegator = delegator_data.construct(delegate) for method in methods: if method in delegator_data.skip_methods: continue assert_true(hasattr(delegate, method)) assert_true(hasattr(delegator, method), msg="%s does not have method %r when its delegate does" % (delegator_data.name, method)) # delegation before fit raises an exception assert_raises(Exception, getattr(delegator, method), delegator_data.fit_args[0]) delegator.fit(*delegator_data.fit_args) for method in methods: if method in delegator_data.skip_methods: continue # smoke test delegation getattr(delegator, method)(delegator_data.fit_args[0]) for method in methods: if method in delegator_data.skip_methods: continue delegate = SubEstimator(hidden_method=method) delegator = delegator_data.construct(delegate) assert_false(hasattr(delegate, method)) assert_false(hasattr(delegator, method), msg="%s has method %r when its delegate does not" % (delegator_data.name, method))
bsd-3-clause
pombredanne/Rusthon
regtests/bench/fannkuch.py
2
1085
# The Computer Language Benchmarks Game # http://shootout.alioth.debian.org/ # # contributed by Sokolov Yura # modified by Tupteq # modified by hartsantler 2014 from time import clock from runtime import * DEFAULT_ARG = 9 def main(): times = [] for i in range(4): t0 = clock() res = fannkuch(DEFAULT_ARG) tk = clock() times.append(tk - t0) avg = sum(times) / len(times) print(avg) def fannkuch(n): count = list(range(1, n+1)) perm1 = list(range(n)) perm = list(range(n)) max_flips = 0 m = n-1 r = n check = 0 while True: if check < 30: check += 1 while r != 1: count[r-1] = r r -= 1 if perm1[0] != 0 and perm1[m] != m: perm = perm1[:] flips_count = 0 k = perm[0] #while k: ## TODO fix for dart while k != 0: perm[:k+1] = perm[k::-1] flips_count += 1 k = perm[0] if flips_count > max_flips: max_flips = flips_count do_return = True while r != n: perm1.insert(r, perm1.pop(0)) count[r] -= 1 if count[r] > 0: do_return = False break r += 1 if do_return: return max_flips main()
bsd-3-clause
Mixser/django
tests/template_tests/filter_tests/test_wordwrap.py
324
1666
from django.template.defaultfilters import wordwrap from django.test import SimpleTestCase from django.utils.safestring import mark_safe from ..utils import setup class WordwrapTests(SimpleTestCase): @setup({'wordwrap01': '{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}'}) def test_wordwrap01(self): output = self.engine.render_to_string('wordwrap01', {'a': 'a & b', 'b': mark_safe('a & b')}) self.assertEqual(output, 'a &\nb a &\nb') @setup({'wordwrap02': '{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}'}) def test_wordwrap02(self): output = self.engine.render_to_string('wordwrap02', {'a': 'a & b', 'b': mark_safe('a & b')}) self.assertEqual(output, 'a &amp;\nb a &\nb') class FunctionTests(SimpleTestCase): def test_wrap(self): self.assertEqual( wordwrap('this is a long paragraph of text that really needs to be wrapped I\'m afraid', 14), 'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid', ) def test_indent(self): self.assertEqual( wordwrap('this is a short paragraph of text.\n But this line should be indented', 14), 'this is a\nshort\nparagraph of\ntext.\n But this\nline should be\nindented', ) def test_indent2(self): self.assertEqual( wordwrap('this is a short paragraph of text.\n But this line should be indented', 15), 'this is a short\nparagraph of\ntext.\n But this line\nshould be\nindented', ) def test_non_string_input(self): self.assertEqual(wordwrap(123, 2), '123')
bsd-3-clause
StuartGordonReid/Comp-Finance
Optimizers/Solution.py
1
1512
__author__ = 'Stuart Gordon Reid' __email__ = 'stuartgordonreid@gmail.com' __website__ = 'http://www.stuartreid.co.za' """ File description """ class Solution(object): solution = [] def __init__(self, solution, problem): """ Abstract initialization method for a solution to some optimization function :param solution: a numpy array (much faster than lists) """ self.solution = solution self.problem = problem return def __len__(self): """ Overload of the len operator for the Solution class :rtype : Sized? """ return len(self.solution) def update(self, solution): """ This method is used for updating a solution """ self.solution = solution def get(self): """ This method is used to retrieve the numpy array for direct manipulation """ return self.solution def evaluate(self): return self.problem.evaluate(self.solution) def __gt__(self, other): assert isinstance(other, Solution) if self.problem.optimization is "min": return self.evaluate() < other.evaluate() elif self.problem.optimization is "max": return self.evaluate() > other.evaluate() def deep_copy(self): copy = Solution(None, self.problem) copy.solution = [] for i in range(len(self.solution)): copy.solution.append(self.solution[i]) return copy
lgpl-3.0
defaultnamehere/grr
lib/flows/general/endtoend_test.py
3
8099
#!/usr/bin/env python """Tests for grr.lib.flows.general.endtoend.""" # pylint: disable=unused-import, g-bad-import-order from grr.lib import server_plugins # pylint: enable=unused-import, g-bad-import-order from grr.endtoend_tests import base from grr.lib import action_mocks from grr.lib import aff4 from grr.lib import flags from grr.lib import flow from grr.lib import rdfvalue from grr.lib import test_lib from grr.lib import utils class MockEndToEndTest(base.AutomatedTest): platforms = ["Linux", "Darwin"] flow = "ListDirectory" args = {"pathspec": rdfvalue.PathSpec( path="/bin", pathtype=rdfvalue.PathSpec.PathType.OS)} output_path = "/fs/os/bin" file_to_find = "ls" def setUp(self): pass def CheckFlow(self): pass def tearDown(self): pass class MockEndToEndTestBadFlow(MockEndToEndTest): flow = "RaiseOnStart" args = {} class TestBadSetUp(MockEndToEndTest): def setUp(self): raise RuntimeError class TestBadTearDown(MockEndToEndTest): def tearDown(self): raise RuntimeError class TestFailure(MockEndToEndTest): def CheckFlow(self): raise RuntimeError("This should be logged") class TestEndToEndTestFlow(test_lib.FlowTestsBaseclass): def setUp(self): super(TestEndToEndTestFlow, self).setUp() install_time = rdfvalue.RDFDatetime().Now() user = "testuser" userobj = rdfvalue.User(username=user) interface = rdfvalue.Interface(ifname="eth0") self.client = aff4.FACTORY.Create(self.client_id, "VFSGRRClient", mode="rw", token=self.token, age=aff4.ALL_TIMES) self.client.Set(self.client.Schema.HOSTNAME("hostname")) self.client.Set(self.client.Schema.SYSTEM("Linux")) self.client.Set(self.client.Schema.OS_RELEASE("debian")) self.client.Set(self.client.Schema.OS_VERSION("14.04")) self.client.Set(self.client.Schema.KERNEL("3.15-rc2")) self.client.Set(self.client.Schema.FQDN("hostname.example.com")) self.client.Set(self.client.Schema.ARCH("x86_64")) self.client.Set(self.client.Schema.INSTALL_DATE(install_time)) self.client.Set(self.client.Schema.USER([userobj])) self.client.Set(self.client.Schema.USERNAMES([user])) self.client.Set(self.client.Schema.LAST_INTERFACES([interface])) self.client.Flush() self.client_mock = action_mocks.ActionMock("ListDirectory", "StatFile") def testRunSuccess(self): args = rdfvalue.EndToEndTestFlowArgs( test_names=["TestListDirectoryOSLinuxDarwin", "MockEndToEndTest", "TestListDirectoryOSLinuxDarwin"]) with test_lib.Instrument(flow.GRRFlow, "SendReply") as send_reply: for _ in test_lib.TestFlowHelper( "EndToEndTestFlow", self.client_mock, client_id=self.client_id, token=self.token, args=args): pass results = [] for _, reply in send_reply.args: if isinstance(reply, rdfvalue.EndToEndTestResult): results.append(reply) self.assertTrue(reply.success) self.assertTrue(reply.test_class_name in [ "TestListDirectoryOSLinuxDarwin", "MockEndToEndTest"]) self.assertFalse(reply.log) # We only expect 2 results because we dedup test names self.assertEqual(len(results), 2) def testNoApplicableTests(self): """Try to run linux tests on windows.""" install_time = rdfvalue.RDFDatetime().Now() user = "testuser" userobj = rdfvalue.User(username=user) interface = rdfvalue.Interface(ifname="eth0") self.client = aff4.FACTORY.Create(self.client_id, "VFSGRRClient", mode="rw", token=self.token, age=aff4.ALL_TIMES) self.client.Set(self.client.Schema.HOSTNAME("hostname")) self.client.Set(self.client.Schema.SYSTEM("Windows")) self.client.Set(self.client.Schema.OS_RELEASE("7")) self.client.Set(self.client.Schema.OS_VERSION("6.1.7601SP1")) self.client.Set(self.client.Schema.KERNEL("6.1.7601")) self.client.Set(self.client.Schema.FQDN("hostname.example.com")) self.client.Set(self.client.Schema.ARCH("AMD64")) self.client.Set(self.client.Schema.INSTALL_DATE(install_time)) self.client.Set(self.client.Schema.USER([userobj])) self.client.Set(self.client.Schema.USERNAMES([user])) self.client.Set(self.client.Schema.LAST_INTERFACES([interface])) self.client.Flush() args = rdfvalue.EndToEndTestFlowArgs( test_names=["TestListDirectoryOSLinuxDarwin", "MockEndToEndTest", "TestListDirectoryOSLinuxDarwin"]) self.assertRaises(flow.FlowError, list, test_lib.TestFlowHelper( "EndToEndTestFlow", self.client_mock, client_id=self.client_id, token=self.token, args=args)) def testRunSuccessAndFail(self): args = rdfvalue.EndToEndTestFlowArgs() with utils.Stubber(base.AutomatedTest, "classes", {"MockEndToEndTest": MockEndToEndTest, "TestFailure": TestFailure}): with test_lib.Instrument(flow.GRRFlow, "SendReply") as send_reply: for _ in test_lib.TestFlowHelper( "EndToEndTestFlow", self.client_mock, client_id=self.client_id, token=self.token, args=args): pass results = [] for _, reply in send_reply.args: if isinstance(reply, rdfvalue.EndToEndTestResult): results.append(reply) if reply.test_class_name == "MockEndToEndTest": self.assertTrue(reply.success) self.assertFalse(reply.log) elif reply.test_class_name == "TestFailure": self.assertFalse(reply.success) self.assertTrue("This should be logged" in reply.log) self.assertItemsEqual([x.test_class_name for x in results], ["MockEndToEndTest", "TestFailure"]) self.assertEqual(len(results), 2) def testRunBadSetUp(self): args = rdfvalue.EndToEndTestFlowArgs( test_names=["TestBadSetUp"]) self.assertRaises(RuntimeError, list, test_lib.TestFlowHelper( "EndToEndTestFlow", self.client_mock, client_id=self.client_id, token=self.token, args=args)) def testRunBadTearDown(self): args = rdfvalue.EndToEndTestFlowArgs( test_names=["TestBadTearDown"]) self.assertRaises(RuntimeError, list, test_lib.TestFlowHelper( "EndToEndTestFlow", self.client_mock, client_id=self.client_id, token=self.token, args=args)) def testRunBadFlow(self): """Test behaviour when test flow raises in Start. A flow that raises in its Start method will kill the EndToEndTest run. Protecting and reporting on this significantly complicates this code, and a flow raising in Start is really broken, so we allow this behaviour. """ args = rdfvalue.EndToEndTestFlowArgs( test_names=["MockEndToEndTestBadFlow", "MockEndToEndTest"]) self.assertRaises(RuntimeError, list, test_lib.TestFlowHelper( "EndToEndTestFlow", self.client_mock, client_id=self.client_id, token=self.token, args=args)) def testEndToEndTestFailure(self): args = rdfvalue.EndToEndTestFlowArgs( test_names=["TestFailure"]) with test_lib.Instrument(flow.GRRFlow, "SendReply") as send_reply: for _ in test_lib.TestFlowHelper( "EndToEndTestFlow", self.client_mock, client_id=self.client_id, token=self.token, args=args): pass results = [] for _, reply in send_reply.args: if isinstance(reply, rdfvalue.EndToEndTestResult): results.append(reply) self.assertFalse(reply.success) self.assertEqual(reply.test_class_name, "TestFailure") self.assertTrue("This should be logged" in reply.log) self.assertEqual(len(results), 1) class FlowTestLoader(test_lib.GRRTestLoader): base_class = TestEndToEndTestFlow def main(argv): # Run the full test suite test_lib.GrrTestProgram(argv=argv, testLoader=FlowTestLoader()) if __name__ == "__main__": flags.StartMain(main)
apache-2.0
kytvi2p/tahoe-lafs
src/allmydata/scripts/tahoe_backup.py
2
12614
import os.path import time import urllib import simplejson import datetime from allmydata.scripts.common import get_alias, escape_path, DEFAULT_ALIAS, \ UnknownAliasError from allmydata.scripts.common_http import do_http, HTTPError, format_http_error from allmydata.util import time_format from allmydata.scripts import backupdb from allmydata.util.encodingutil import listdir_unicode, quote_output, \ to_str, FilenameEncodingError, unicode_to_url from allmydata.util.assertutil import precondition from allmydata.util.fileutil import abspath_expanduser_unicode def get_local_metadata(path): metadata = {} # posix stat(2) metadata, depends on the platform os.stat_float_times(True) s = os.stat(path) metadata["ctime"] = s.st_ctime metadata["mtime"] = s.st_mtime misc_fields = ("st_mode", "st_ino", "st_dev", "st_uid", "st_gid") macos_misc_fields = ("st_rsize", "st_creator", "st_type") for field in misc_fields + macos_misc_fields: if hasattr(s, field): metadata[field] = getattr(s, field) # TODO: extended attributes, like on OS-X's HFS+ return metadata def mkdir(contents, options): kids = dict([ (childname, (contents[childname][0], {"ro_uri": contents[childname][1], "metadata": contents[childname][2], })) for childname in contents ]) body = simplejson.dumps(kids).encode("utf-8") url = options['node-url'] + "uri?t=mkdir-immutable" resp = do_http("POST", url, body) if resp.status < 200 or resp.status >= 300: raise HTTPError("Error during mkdir", resp) dircap = to_str(resp.read().strip()) return dircap def put_child(dirurl, childname, childcap): assert dirurl[-1] == "/" url = dirurl + urllib.quote(unicode_to_url(childname)) + "?t=uri" resp = do_http("PUT", url, childcap) if resp.status not in (200, 201): raise HTTPError("Error during put_child", resp) class BackupProcessingError(Exception): pass class BackerUpper: def __init__(self, options): self.options = options self.files_uploaded = 0 self.files_reused = 0 self.files_checked = 0 self.files_skipped = 0 self.directories_created = 0 self.directories_reused = 0 self.directories_checked = 0 self.directories_skipped = 0 def run(self): options = self.options nodeurl = options['node-url'] self.verbosity = 1 if options['quiet']: self.verbosity = 0 if options['verbose']: self.verbosity = 2 stdout = options.stdout stderr = options.stderr start_timestamp = datetime.datetime.now() self.backupdb = None bdbfile = os.path.join(options["node-directory"], "private", "backupdb.sqlite") bdbfile = abspath_expanduser_unicode(bdbfile) self.backupdb = backupdb.get_backupdb(bdbfile, stderr) if not self.backupdb: print >>stderr, "ERROR: Unable to load backup db." return 1 try: rootcap, path = get_alias(options.aliases, options.to_dir, DEFAULT_ALIAS) except UnknownAliasError, e: e.display(stderr) return 1 to_url = nodeurl + "uri/%s/" % urllib.quote(rootcap) if path: to_url += escape_path(path) if not to_url.endswith("/"): to_url += "/" archives_url = to_url + "Archives/" # first step: make sure the target directory exists, as well as the # Archives/ subdirectory. resp = do_http("GET", archives_url + "?t=json") if resp.status == 404: resp = do_http("POST", archives_url + "?t=mkdir") if resp.status != 200: print >>stderr, format_http_error("Unable to create target directory", resp) return 1 # second step: process the tree new_backup_dircap = self.process(options.from_dir) # third: attach the new backup to the list now = time_format.iso_utc(int(time.time()), sep="_") + "Z" put_child(archives_url, now, new_backup_dircap) put_child(to_url, "Latest", new_backup_dircap) end_timestamp = datetime.datetime.now() # calc elapsed time, omitting microseconds elapsed_time = str(end_timestamp - start_timestamp).split('.')[0] if self.verbosity >= 1: print >>stdout, (" %d files uploaded (%d reused), " "%d files skipped, " "%d directories created (%d reused), " "%d directories skipped" % (self.files_uploaded, self.files_reused, self.files_skipped, self.directories_created, self.directories_reused, self.directories_skipped)) if self.verbosity >= 2: print >>stdout, (" %d files checked, %d directories checked" % (self.files_checked, self.directories_checked)) print >>stdout, " backup done, elapsed time: %s" % elapsed_time # The command exits with code 2 if files or directories were skipped if self.files_skipped or self.directories_skipped: return 2 # done! return 0 def verboseprint(self, msg): precondition(isinstance(msg, str), msg) if self.verbosity >= 2: print >>self.options.stdout, msg def warn(self, msg): precondition(isinstance(msg, str), msg) print >>self.options.stderr, msg def process(self, localpath): precondition(isinstance(localpath, unicode), localpath) # returns newdircap self.verboseprint("processing %s" % quote_output(localpath)) create_contents = {} # childname -> (type, rocap, metadata) compare_contents = {} # childname -> rocap try: children = listdir_unicode(localpath) except EnvironmentError: self.directories_skipped += 1 self.warn("WARNING: permission denied on directory %s" % quote_output(localpath)) children = [] except FilenameEncodingError: self.directories_skipped += 1 self.warn("WARNING: could not list directory %s due to a filename encoding error" % quote_output(localpath)) children = [] for child in self.options.filter_listdir(children): assert isinstance(child, unicode), child childpath = os.path.join(localpath, child) # note: symlinks to directories are both islink() and isdir() if os.path.isdir(childpath) and not os.path.islink(childpath): metadata = get_local_metadata(childpath) # recurse on the child directory childcap = self.process(childpath) assert isinstance(childcap, str) create_contents[child] = ("dirnode", childcap, metadata) compare_contents[child] = childcap elif os.path.isfile(childpath) and not os.path.islink(childpath): try: childcap, metadata = self.upload(childpath) assert isinstance(childcap, str) create_contents[child] = ("filenode", childcap, metadata) compare_contents[child] = childcap except EnvironmentError: self.files_skipped += 1 self.warn("WARNING: permission denied on file %s" % quote_output(childpath)) else: self.files_skipped += 1 if os.path.islink(childpath): self.warn("WARNING: cannot backup symlink %s" % quote_output(childpath)) else: self.warn("WARNING: cannot backup special file %s" % quote_output(childpath)) must_create, r = self.check_backupdb_directory(compare_contents) if must_create: self.verboseprint(" creating directory for %s" % quote_output(localpath)) newdircap = mkdir(create_contents, self.options) assert isinstance(newdircap, str) if r: r.did_create(newdircap) self.directories_created += 1 return newdircap else: self.verboseprint(" re-using old directory for %s" % quote_output(localpath)) self.directories_reused += 1 return r.was_created() def check_backupdb_file(self, childpath): if not self.backupdb: return True, None use_timestamps = not self.options["ignore-timestamps"] r = self.backupdb.check_file(childpath, use_timestamps) if not r.was_uploaded(): return True, r if not r.should_check(): # the file was uploaded or checked recently, so we can just use # it return False, r # we must check the file before using the results filecap = r.was_uploaded() self.verboseprint("checking %s" % quote_output(filecap)) nodeurl = self.options['node-url'] checkurl = nodeurl + "uri/%s?t=check&output=JSON" % urllib.quote(filecap) self.files_checked += 1 resp = do_http("POST", checkurl) if resp.status != 200: # can't check, so we must assume it's bad return True, r cr = simplejson.loads(resp.read()) healthy = cr["results"]["healthy"] if not healthy: # must upload return True, r # file is healthy, no need to upload r.did_check_healthy(cr) return False, r def check_backupdb_directory(self, compare_contents): if not self.backupdb: return True, None r = self.backupdb.check_directory(compare_contents) if not r.was_created(): return True, r if not r.should_check(): # the file was uploaded or checked recently, so we can just use # it return False, r # we must check the directory before re-using it dircap = r.was_created() self.verboseprint("checking %s" % quote_output(dircap)) nodeurl = self.options['node-url'] checkurl = nodeurl + "uri/%s?t=check&output=JSON" % urllib.quote(dircap) self.directories_checked += 1 resp = do_http("POST", checkurl) if resp.status != 200: # can't check, so we must assume it's bad return True, r cr = simplejson.loads(resp.read()) healthy = cr["results"]["healthy"] if not healthy: # must create return True, r # directory is healthy, no need to upload r.did_check_healthy(cr) return False, r # This function will raise an IOError exception when called on an unreadable file def upload(self, childpath): precondition(isinstance(childpath, unicode), childpath) #self.verboseprint("uploading %s.." % quote_output(childpath)) metadata = get_local_metadata(childpath) # we can use the backupdb here must_upload, bdb_results = self.check_backupdb_file(childpath) if must_upload: self.verboseprint("uploading %s.." % quote_output(childpath)) infileobj = open(childpath, "rb") url = self.options['node-url'] + "uri" resp = do_http("PUT", url, infileobj) if resp.status not in (200, 201): raise HTTPError("Error during file PUT", resp) filecap = resp.read().strip() self.verboseprint(" %s -> %s" % (quote_output(childpath, quotemarks=False), quote_output(filecap, quotemarks=False))) #self.verboseprint(" metadata: %s" % (quote_output(metadata, quotemarks=False),)) if bdb_results: bdb_results.did_upload(filecap) self.files_uploaded += 1 return filecap, metadata else: self.verboseprint("skipping %s.." % quote_output(childpath)) self.files_reused += 1 return bdb_results.was_uploaded(), metadata def backup(options): bu = BackerUpper(options) return bu.run()
gpl-2.0
mozilla/lumbergh
careers/careers/feeds.py
2
1413
# -*- coding: utf-8 -*- from datetime import date from django.contrib.syndication.views import Feed from django.urls import reverse from django.utils import feedgenerator from careers.careers.models import Position class LatestPositionsFeed(Feed): feed_type = feedgenerator.Rss201rev2Feed title = 'Current Mozilla job openings' description = ('The current list of job openings, available internships ' 'and contract opportunities at Mozilla.') feed_copyright = ('Portions of this content are ©1998–%s by individual ' 'mozilla.org contributors. Content available under a ' 'Creative Commons license.' % date.today().year) def link(self): return reverse('careers.listings') def feed_url(self): return reverse('careers.feed') def categories(self): return Position.categories() def items(self): return Position.objects.all() def item_title(self, item): return item.title def item_description(self, item): return item.description def item_pubdate(self, item): return item.updated_at def item_categories(self, item): categories = [] categories.append(item.department) categories += item.location_list if 'Remote' in item.location_list: categories.append('Worldwide') return categories
mpl-2.0
Fl0rianFischer/sme_odoo
addons/website_sale_digital/controllers/main.py
20
4262
# -*- coding: utf-8 -*- import base64 from openerp.addons.web import http from openerp.addons.web.http import request from openerp.addons.website_portal.controllers.main import website_account from openerp.addons.website_sale.controllers.main import website_sale from cStringIO import StringIO from werkzeug.utils import redirect class website_sale_digital_confirmation(website_sale): @http.route([ '/shop/confirmation', ], type='http', auth="public", website=True) def payment_confirmation(self, **post): response = super(website_sale_digital_confirmation, self).payment_confirmation(**post) order_lines = response.qcontext['order'].order_line digital_content = map(lambda x: x.product_id.type == 'digital', order_lines) response.qcontext.update(digital=any(digital_content)) return response class website_sale_digital(website_account): orders_page = '/my/orders' @http.route([ '/my/orders/<int:order>', ], type='http', auth='user', website=True) def orders_followup(self, order=None, **post): response = super(website_sale_digital, self).orders_followup(order=order, **post) order_products_attachments = {} order = response.qcontext['order'] invoiced_lines = request.env['account.invoice.line'].sudo().search([('invoice_id', 'in', order.invoice_ids.ids), ('invoice_id.state', '=', 'paid')]) purchased_products_attachments = {} for il in invoiced_lines: p_obj = il.product_id # Ignore products that do not have digital content if not p_obj.product_tmpl_id.type == 'digital': continue # Search for product attachments A = request.env['ir.attachment'] p_id = p_obj.id template = p_obj.product_tmpl_id att = A.search_read( domain=['|', '&', ('res_model', '=', p_obj._name), ('res_id', '=', p_id), '&', ('res_model', '=', template._name), ('res_id', '=', template.id)], fields=['name', 'write_date'], order='write_date desc', ) # Ignore products with no attachments if not att: continue purchased_products_attachments[p_id] = att response.qcontext.update({ 'digital_attachments': purchased_products_attachments, }) return response @http.route([ '/my/download', ], type='http', auth='public') def download_attachment(self, attachment_id): # Check if this is a valid attachment id attachment = request.env['ir.attachment'].sudo().search_read( [('id', '=', int(attachment_id))], ["name", "datas", "file_type", "res_model", "res_id", "type", "url"] ) if attachment: attachment = attachment[0] else: return redirect(self.orders_page) # Check if the user has bought the associated product res_model = attachment['res_model'] res_id = attachment['res_id'] purchased_products = request.env['account.invoice.line'].get_digital_purchases(request.uid) if res_model == 'product.product': if res_id not in purchased_products: return redirect(self.orders_page) # Also check for attachments in the product templates elif res_model == 'product.template': P = request.env['product.product'] template_ids = map(lambda x: P.browse(x).product_tmpl_id.id, purchased_products) if res_id not in template_ids: return redirect(self.orders_page) else: return redirect(self.orders_page) # The client has bought the product, otherwise it would have been blocked by now if attachment["type"] == "url": if attachment["url"]: return redirect(attachment["url"]) else: return request.not_found() elif attachment["datas"]: data = StringIO(base64.standard_b64decode(attachment["datas"])) return http.send_file(data, filename=attachment['name'], as_attachment=True) else: return request.not_found()
gpl-3.0
yesudeep/cmc
app/jinja2/docs/jinjaext.py
9
6833
# -*- coding: utf-8 -*- """ Jinja Documentation Extensions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for automatically documenting filters and tests. :copyright: Copyright 2008 by Armin Ronacher. :license: BSD. """ import os import re import inspect import jinja2 from itertools import islice from types import BuiltinFunctionType from docutils import nodes from docutils.statemachine import ViewList from sphinx.ext.autodoc import prepare_docstring from sphinx.application import TemplateBridge from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic from jinja2 import Environment, FileSystemLoader def parse_rst(state, content_offset, doc): node = nodes.section() # hack around title style bookkeeping surrounding_title_styles = state.memo.title_styles surrounding_section_level = state.memo.section_level state.memo.title_styles = [] state.memo.section_level = 0 state.nested_parse(doc, content_offset, node, match_titles=1) state.memo.title_styles = surrounding_title_styles state.memo.section_level = surrounding_section_level return node.children class JinjaStyle(Style): title = 'Jinja Style' default_style = "" styles = { Comment: 'italic #aaaaaa', Comment.Preproc: 'noitalic #B11414', Comment.Special: 'italic #505050', Keyword: 'bold #B80000', Keyword.Type: '#808080', Operator.Word: 'bold #B80000', Name.Builtin: '#333333', Name.Function: '#333333', Name.Class: 'bold #333333', Name.Namespace: 'bold #333333', Name.Entity: 'bold #363636', Name.Attribute: '#686868', Name.Tag: 'bold #686868', Name.Decorator: '#686868', String: '#AA891C', Number: '#444444', Generic.Heading: 'bold #000080', Generic.Subheading: 'bold #800080', Generic.Deleted: '#aa0000', Generic.Inserted: '#00aa00', Generic.Error: '#aa0000', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: '#555555', Generic.Output: '#888888', Generic.Traceback: '#aa0000', Error: '#F00 bg:#FAA' } _sig_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*(\(.*?\))') def format_function(name, aliases, func): lines = inspect.getdoc(func).splitlines() signature = '()' if isinstance(func, BuiltinFunctionType): match = _sig_re.match(lines[0]) if match is not None: del lines[:1 + bool(lines and not lines[0])] signature = match.group(1) else: try: argspec = inspect.getargspec(func) if getattr(func, 'environmentfilter', False) or \ getattr(func, 'contextfilter', False): del argspec[0][0] signature = inspect.formatargspec(*argspec) except: pass result = ['.. function:: %s%s' % (name, signature), ''] result.extend(' ' + line for line in lines) if aliases: result.extend(('', ' :aliases: %s' % ', '.join( '``%s``' % x for x in sorted(aliases)))) return result def dump_functions(mapping): def directive(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): reverse_mapping = {} for name, func in mapping.iteritems(): reverse_mapping.setdefault(func, []).append(name) filters = [] for func, names in reverse_mapping.iteritems(): aliases = sorted(names, key=lambda x: len(x)) name = aliases.pop() filters.append((name, aliases, func)) filters.sort() result = ViewList() for name, aliases, func in filters: for item in format_function(name, aliases, func): result.append(item, '<jinjaext>') node = nodes.paragraph() state.nested_parse(result, content_offset, node) return node.children return directive from jinja2.defaults import DEFAULT_FILTERS, DEFAULT_TESTS jinja_filters = dump_functions(DEFAULT_FILTERS) jinja_tests = dump_functions(DEFAULT_TESTS) def jinja_nodes(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): from jinja2.nodes import Node doc = ViewList() def walk(node, indent): p = ' ' * indent sig = ', '.join(node.fields) doc.append(p + '.. autoclass:: %s(%s)' % (node.__name__, sig), '') if node.abstract: members = [] for key, name in node.__dict__.iteritems(): if not key.startswith('_') and \ not hasattr(node.__base__, key) and callable(name): members.append(key) if members: members.sort() doc.append('%s :members: %s' % (p, ', '.join(members)), '') if node.__base__ != object: doc.append('', '') doc.append('%s :Node type: :class:`%s`' % (p, node.__base__.__name__), '') doc.append('', '') children = node.__subclasses__() children.sort(key=lambda x: x.__name__.lower()) for child in children: walk(child, indent) walk(Node, 0) return parse_rst(state, content_offset, doc) def inject_toc(app, doctree, docname): titleiter = iter(doctree.traverse(nodes.title)) try: # skip first title, we are not interested in that one titleiter.next() title = titleiter.next() # and check if there is at least another title titleiter.next() except StopIteration: return tocnode = nodes.section('') tocnode['classes'].append('toc') toctitle = nodes.section('') toctitle['classes'].append('toctitle') toctitle.append(nodes.title(text='Table Of Contents')) tocnode.append(toctitle) tocnode += doctree.document.settings.env.get_toc_for(docname)[0][1] title.parent.insert(title.parent.children.index(title), tocnode) def setup(app): app.add_directive('jinjafilters', jinja_filters, 0, (0, 0, 0)) app.add_directive('jinjatests', jinja_tests, 0, (0, 0, 0)) app.add_directive('jinjanodes', jinja_nodes, 0, (0, 0, 0)) # uncomment for inline toc. links are broken unfortunately ##app.connect('doctree-resolved', inject_toc)
mit
fhaoquan/kbengine
kbe/src/lib/python/Lib/test/test_parser.py
113
26114
import parser import unittest import sys import operator import struct from test import support from test.script_helper import assert_python_failure # # First, we test that we can generate trees from valid source fragments, # and that these valid trees are indeed allowed by the tree-loading side # of the parser module. # class RoundtripLegalSyntaxTestCase(unittest.TestCase): def roundtrip(self, f, s): st1 = f(s) t = st1.totuple() try: st2 = parser.sequence2st(t) except parser.ParserError as why: self.fail("could not roundtrip %r: %s" % (s, why)) self.assertEqual(t, st2.totuple(), "could not re-generate syntax tree") def check_expr(self, s): self.roundtrip(parser.expr, s) def test_flags_passed(self): # The unicode literals flags has to be passed from the paser to AST # generation. suite = parser.suite("from __future__ import unicode_literals; x = ''") code = suite.compile() scope = {} exec(code, {}, scope) self.assertIsInstance(scope["x"], str) def check_suite(self, s): self.roundtrip(parser.suite, s) def test_yield_statement(self): self.check_suite("def f(): yield 1") self.check_suite("def f(): yield") self.check_suite("def f(): x += yield") self.check_suite("def f(): x = yield 1") self.check_suite("def f(): x = y = yield 1") self.check_suite("def f(): x = yield") self.check_suite("def f(): x = y = yield") self.check_suite("def f(): 1 + (yield)*2") self.check_suite("def f(): (yield 1)*2") self.check_suite("def f(): return; yield 1") self.check_suite("def f(): yield 1; return") self.check_suite("def f(): yield from 1") self.check_suite("def f(): x = yield from 1") self.check_suite("def f(): f((yield from 1))") self.check_suite("def f(): yield 1; return 1") self.check_suite("def f():\n" " for x in range(30):\n" " yield x\n") self.check_suite("def f():\n" " if (yield):\n" " yield x\n") def test_nonlocal_statement(self): self.check_suite("def f():\n" " x = 0\n" " def g():\n" " nonlocal x\n") self.check_suite("def f():\n" " x = y = 0\n" " def g():\n" " nonlocal x, y\n") def test_expressions(self): self.check_expr("foo(1)") self.check_expr("[1, 2, 3]") self.check_expr("[x**3 for x in range(20)]") self.check_expr("[x**3 for x in range(20) if x % 3]") self.check_expr("[x**3 for x in range(20) if x % 2 if x % 3]") self.check_expr("list(x**3 for x in range(20))") self.check_expr("list(x**3 for x in range(20) if x % 3)") self.check_expr("list(x**3 for x in range(20) if x % 2 if x % 3)") self.check_expr("foo(*args)") self.check_expr("foo(*args, **kw)") self.check_expr("foo(**kw)") self.check_expr("foo(key=value)") self.check_expr("foo(key=value, *args)") self.check_expr("foo(key=value, *args, **kw)") self.check_expr("foo(key=value, **kw)") self.check_expr("foo(a, b, c, *args)") self.check_expr("foo(a, b, c, *args, **kw)") self.check_expr("foo(a, b, c, **kw)") self.check_expr("foo(a, *args, keyword=23)") self.check_expr("foo + bar") self.check_expr("foo - bar") self.check_expr("foo * bar") self.check_expr("foo / bar") self.check_expr("foo // bar") self.check_expr("lambda: 0") self.check_expr("lambda x: 0") self.check_expr("lambda *y: 0") self.check_expr("lambda *y, **z: 0") self.check_expr("lambda **z: 0") self.check_expr("lambda x, y: 0") self.check_expr("lambda foo=bar: 0") self.check_expr("lambda foo=bar, spaz=nifty+spit: 0") self.check_expr("lambda foo=bar, **z: 0") self.check_expr("lambda foo=bar, blaz=blat+2, **z: 0") self.check_expr("lambda foo=bar, blaz=blat+2, *y, **z: 0") self.check_expr("lambda x, *y, **z: 0") self.check_expr("(x for x in range(10))") self.check_expr("foo(x for x in range(10))") self.check_expr("...") self.check_expr("a[...]") def test_simple_expression(self): # expr_stmt self.check_suite("a") def test_simple_assignments(self): self.check_suite("a = b") self.check_suite("a = b = c = d = e") def test_simple_augmented_assignments(self): self.check_suite("a += b") self.check_suite("a -= b") self.check_suite("a *= b") self.check_suite("a /= b") self.check_suite("a //= b") self.check_suite("a %= b") self.check_suite("a &= b") self.check_suite("a |= b") self.check_suite("a ^= b") self.check_suite("a <<= b") self.check_suite("a >>= b") self.check_suite("a **= b") def test_function_defs(self): self.check_suite("def f(): pass") self.check_suite("def f(*args): pass") self.check_suite("def f(*args, **kw): pass") self.check_suite("def f(**kw): pass") self.check_suite("def f(foo=bar): pass") self.check_suite("def f(foo=bar, *args): pass") self.check_suite("def f(foo=bar, *args, **kw): pass") self.check_suite("def f(foo=bar, **kw): pass") self.check_suite("def f(a, b): pass") self.check_suite("def f(a, b, *args): pass") self.check_suite("def f(a, b, *args, **kw): pass") self.check_suite("def f(a, b, **kw): pass") self.check_suite("def f(a, b, foo=bar): pass") self.check_suite("def f(a, b, foo=bar, *args): pass") self.check_suite("def f(a, b, foo=bar, *args, **kw): pass") self.check_suite("def f(a, b, foo=bar, **kw): pass") self.check_suite("@staticmethod\n" "def f(): pass") self.check_suite("@staticmethod\n" "@funcattrs(x, y)\n" "def f(): pass") self.check_suite("@funcattrs()\n" "def f(): pass") # keyword-only arguments self.check_suite("def f(*, a): pass") self.check_suite("def f(*, a = 5): pass") self.check_suite("def f(*, a = 5, b): pass") self.check_suite("def f(*, a, b = 5): pass") self.check_suite("def f(*, a, b = 5, **kwds): pass") self.check_suite("def f(*args, a): pass") self.check_suite("def f(*args, a = 5): pass") self.check_suite("def f(*args, a = 5, b): pass") self.check_suite("def f(*args, a, b = 5): pass") self.check_suite("def f(*args, a, b = 5, **kwds): pass") # function annotations self.check_suite("def f(a: int): pass") self.check_suite("def f(a: int = 5): pass") self.check_suite("def f(*args: list): pass") self.check_suite("def f(**kwds: dict): pass") self.check_suite("def f(*, a: int): pass") self.check_suite("def f(*, a: int = 5): pass") self.check_suite("def f() -> int: pass") def test_class_defs(self): self.check_suite("class foo():pass") self.check_suite("class foo(object):pass") self.check_suite("@class_decorator\n" "class foo():pass") self.check_suite("@class_decorator(arg)\n" "class foo():pass") self.check_suite("@decorator1\n" "@decorator2\n" "class foo():pass") def test_import_from_statement(self): self.check_suite("from sys.path import *") self.check_suite("from sys.path import dirname") self.check_suite("from sys.path import (dirname)") self.check_suite("from sys.path import (dirname,)") self.check_suite("from sys.path import dirname as my_dirname") self.check_suite("from sys.path import (dirname as my_dirname)") self.check_suite("from sys.path import (dirname as my_dirname,)") self.check_suite("from sys.path import dirname, basename") self.check_suite("from sys.path import (dirname, basename)") self.check_suite("from sys.path import (dirname, basename,)") self.check_suite( "from sys.path import dirname as my_dirname, basename") self.check_suite( "from sys.path import (dirname as my_dirname, basename)") self.check_suite( "from sys.path import (dirname as my_dirname, basename,)") self.check_suite( "from sys.path import dirname, basename as my_basename") self.check_suite( "from sys.path import (dirname, basename as my_basename)") self.check_suite( "from sys.path import (dirname, basename as my_basename,)") self.check_suite("from .bogus import x") def test_basic_import_statement(self): self.check_suite("import sys") self.check_suite("import sys as system") self.check_suite("import sys, math") self.check_suite("import sys as system, math") self.check_suite("import sys, math as my_math") def test_relative_imports(self): self.check_suite("from . import name") self.check_suite("from .. import name") # check all the way up to '....', since '...' is tokenized # differently from '.' (it's an ellipsis token). self.check_suite("from ... import name") self.check_suite("from .... import name") self.check_suite("from .pkg import name") self.check_suite("from ..pkg import name") self.check_suite("from ...pkg import name") self.check_suite("from ....pkg import name") def test_pep263(self): self.check_suite("# -*- coding: iso-8859-1 -*-\n" "pass\n") def test_assert(self): self.check_suite("assert alo < ahi and blo < bhi\n") def test_with(self): self.check_suite("with open('x'): pass\n") self.check_suite("with open('x') as f: pass\n") self.check_suite("with open('x') as f, open('y') as g: pass\n") def test_try_stmt(self): self.check_suite("try: pass\nexcept: pass\n") self.check_suite("try: pass\nfinally: pass\n") self.check_suite("try: pass\nexcept A: pass\nfinally: pass\n") self.check_suite("try: pass\nexcept A: pass\nexcept: pass\n" "finally: pass\n") self.check_suite("try: pass\nexcept: pass\nelse: pass\n") self.check_suite("try: pass\nexcept: pass\nelse: pass\n" "finally: pass\n") def test_position(self): # An absolutely minimal test of position information. Better # tests would be a big project. code = "def f(x):\n return x + 1" st1 = parser.suite(code) st2 = st1.totuple(line_info=1, col_info=1) def walk(tree): node_type = tree[0] next = tree[1] if isinstance(next, tuple): for elt in tree[1:]: for x in walk(elt): yield x else: yield tree terminals = list(walk(st2)) self.assertEqual([ (1, 'def', 1, 0), (1, 'f', 1, 4), (7, '(', 1, 5), (1, 'x', 1, 6), (8, ')', 1, 7), (11, ':', 1, 8), (4, '', 1, 9), (5, '', 2, -1), (1, 'return', 2, 4), (1, 'x', 2, 11), (14, '+', 2, 13), (2, '1', 2, 15), (4, '', 2, 16), (6, '', 2, -1), (4, '', 2, -1), (0, '', 2, -1)], terminals) def test_extended_unpacking(self): self.check_suite("*a = y") self.check_suite("x, *b, = m") self.check_suite("[*a, *b] = y") self.check_suite("for [*x, b] in x: pass") def test_raise_statement(self): self.check_suite("raise\n") self.check_suite("raise e\n") self.check_suite("try:\n" " suite\n" "except Exception as e:\n" " raise ValueError from e\n") def test_set_displays(self): self.check_expr('{2}') self.check_expr('{2,}') self.check_expr('{2, 3}') self.check_expr('{2, 3,}') def test_dict_displays(self): self.check_expr('{}') self.check_expr('{a:b}') self.check_expr('{a:b,}') self.check_expr('{a:b, c:d}') self.check_expr('{a:b, c:d,}') def test_set_comprehensions(self): self.check_expr('{x for x in seq}') self.check_expr('{f(x) for x in seq}') self.check_expr('{f(x) for x in seq if condition(x)}') def test_dict_comprehensions(self): self.check_expr('{x:x for x in seq}') self.check_expr('{x**2:x[3] for x in seq if condition(x)}') self.check_expr('{x:x for x in seq1 for y in seq2 if condition(x, y)}') # # Second, we take *invalid* trees and make sure we get ParserError # rejections for them. # class IllegalSyntaxTestCase(unittest.TestCase): def check_bad_tree(self, tree, label): try: parser.sequence2st(tree) except parser.ParserError: pass else: self.fail("did not detect invalid tree for %r" % label) def test_junk(self): # not even remotely valid: self.check_bad_tree((1, 2, 3), "<junk>") def test_illegal_yield_1(self): # Illegal yield statement: def f(): return 1; yield 1 tree = \ (257, (264, (285, (259, (1, 'def'), (1, 'f'), (260, (7, '('), (8, ')')), (11, ':'), (291, (4, ''), (5, ''), (264, (265, (266, (272, (275, (1, 'return'), (313, (292, (293, (294, (295, (297, (298, (299, (300, (301, (302, (303, (304, (305, (2, '1')))))))))))))))))), (264, (265, (266, (272, (276, (1, 'yield'), (313, (292, (293, (294, (295, (297, (298, (299, (300, (301, (302, (303, (304, (305, (2, '1')))))))))))))))))), (4, ''))), (6, ''))))), (4, ''), (0, '')))) self.check_bad_tree(tree, "def f():\n return 1\n yield 1") def test_illegal_yield_2(self): # Illegal return in generator: def f(): return 1; yield 1 tree = \ (257, (264, (265, (266, (278, (1, 'from'), (281, (1, '__future__')), (1, 'import'), (279, (1, 'generators')))), (4, ''))), (264, (285, (259, (1, 'def'), (1, 'f'), (260, (7, '('), (8, ')')), (11, ':'), (291, (4, ''), (5, ''), (264, (265, (266, (272, (275, (1, 'return'), (313, (292, (293, (294, (295, (297, (298, (299, (300, (301, (302, (303, (304, (305, (2, '1')))))))))))))))))), (264, (265, (266, (272, (276, (1, 'yield'), (313, (292, (293, (294, (295, (297, (298, (299, (300, (301, (302, (303, (304, (305, (2, '1')))))))))))))))))), (4, ''))), (6, ''))))), (4, ''), (0, '')))) self.check_bad_tree(tree, "def f():\n return 1\n yield 1") def test_a_comma_comma_c(self): # Illegal input: a,,c tree = \ (258, (311, (290, (291, (292, (293, (295, (296, (297, (298, (299, (300, (301, (302, (303, (1, 'a')))))))))))))), (12, ','), (12, ','), (290, (291, (292, (293, (295, (296, (297, (298, (299, (300, (301, (302, (303, (1, 'c'))))))))))))))), (4, ''), (0, '')) self.check_bad_tree(tree, "a,,c") def test_illegal_operator(self): # Illegal input: a $= b tree = \ (257, (264, (265, (266, (267, (312, (291, (292, (293, (294, (296, (297, (298, (299, (300, (301, (302, (303, (304, (1, 'a'))))))))))))))), (268, (37, '$=')), (312, (291, (292, (293, (294, (296, (297, (298, (299, (300, (301, (302, (303, (304, (1, 'b'))))))))))))))))), (4, ''))), (0, '')) self.check_bad_tree(tree, "a $= b") def test_malformed_global(self): #doesn't have global keyword in ast tree = (257, (264, (265, (266, (282, (1, 'foo'))), (4, ''))), (4, ''), (0, '')) self.check_bad_tree(tree, "malformed global ast") def test_missing_import_source(self): # from import fred tree = \ (257, (268, (269, (270, (282, (284, (1, 'from'), (1, 'import'), (287, (285, (1, 'fred')))))), (4, ''))), (4, ''), (0, '')) self.check_bad_tree(tree, "from import fred") class CompileTestCase(unittest.TestCase): # These tests are very minimal. :-( def test_compile_expr(self): st = parser.expr('2 + 3') code = parser.compilest(st) self.assertEqual(eval(code), 5) def test_compile_suite(self): st = parser.suite('x = 2; y = x + 3') code = parser.compilest(st) globs = {} exec(code, globs) self.assertEqual(globs['y'], 5) def test_compile_error(self): st = parser.suite('1 = 3 + 4') self.assertRaises(SyntaxError, parser.compilest, st) def test_compile_badunicode(self): st = parser.suite('a = "\\U12345678"') self.assertRaises(SyntaxError, parser.compilest, st) st = parser.suite('a = "\\u1"') self.assertRaises(SyntaxError, parser.compilest, st) def test_issue_9011(self): # Issue 9011: compilation of an unary minus expression changed # the meaning of the ST, so that a second compilation produced # incorrect results. st = parser.expr('-3') code1 = parser.compilest(st) self.assertEqual(eval(code1), -3) code2 = parser.compilest(st) self.assertEqual(eval(code2), -3) class ParserStackLimitTestCase(unittest.TestCase): """try to push the parser to/over its limits. see http://bugs.python.org/issue1881 for a discussion """ def _nested_expression(self, level): return "["*level+"]"*level def test_deeply_nested_list(self): # XXX used to be 99 levels in 2.x e = self._nested_expression(93) st = parser.expr(e) st.compile() def test_trigger_memory_error(self): e = self._nested_expression(100) rc, out, err = assert_python_failure('-c', e) # parsing the expression will result in an error message # followed by a MemoryError (see #11963) self.assertIn(b's_push: parser stack overflow', err) self.assertIn(b'MemoryError', err) class STObjectTestCase(unittest.TestCase): """Test operations on ST objects themselves""" def test_comparisons(self): # ST objects should support order and equality comparisons st1 = parser.expr('2 + 3') st2 = parser.suite('x = 2; y = x + 3') st3 = parser.expr('list(x**3 for x in range(20))') st1_copy = parser.expr('2 + 3') st2_copy = parser.suite('x = 2; y = x + 3') st3_copy = parser.expr('list(x**3 for x in range(20))') # exercise fast path for object identity self.assertEqual(st1 == st1, True) self.assertEqual(st2 == st2, True) self.assertEqual(st3 == st3, True) # slow path equality self.assertEqual(st1, st1_copy) self.assertEqual(st2, st2_copy) self.assertEqual(st3, st3_copy) self.assertEqual(st1 == st2, False) self.assertEqual(st1 == st3, False) self.assertEqual(st2 == st3, False) self.assertEqual(st1 != st1, False) self.assertEqual(st2 != st2, False) self.assertEqual(st3 != st3, False) self.assertEqual(st1 != st1_copy, False) self.assertEqual(st2 != st2_copy, False) self.assertEqual(st3 != st3_copy, False) self.assertEqual(st2 != st1, True) self.assertEqual(st1 != st3, True) self.assertEqual(st3 != st2, True) # we don't particularly care what the ordering is; just that # it's usable and self-consistent self.assertEqual(st1 < st2, not (st2 <= st1)) self.assertEqual(st1 < st3, not (st3 <= st1)) self.assertEqual(st2 < st3, not (st3 <= st2)) self.assertEqual(st1 < st2, st2 > st1) self.assertEqual(st1 < st3, st3 > st1) self.assertEqual(st2 < st3, st3 > st2) self.assertEqual(st1 <= st2, st2 >= st1) self.assertEqual(st3 <= st1, st1 >= st3) self.assertEqual(st2 <= st3, st3 >= st2) # transitivity bottom = min(st1, st2, st3) top = max(st1, st2, st3) mid = sorted([st1, st2, st3])[1] self.assertTrue(bottom < mid) self.assertTrue(bottom < top) self.assertTrue(mid < top) self.assertTrue(bottom <= mid) self.assertTrue(bottom <= top) self.assertTrue(mid <= top) self.assertTrue(bottom <= bottom) self.assertTrue(mid <= mid) self.assertTrue(top <= top) # interaction with other types self.assertEqual(st1 == 1588.602459, False) self.assertEqual('spanish armada' != st2, True) self.assertRaises(TypeError, operator.ge, st3, None) self.assertRaises(TypeError, operator.le, False, st1) self.assertRaises(TypeError, operator.lt, st1, 1815) self.assertRaises(TypeError, operator.gt, b'waterloo', st2) check_sizeof = support.check_sizeof @support.cpython_only def test_sizeof(self): def XXXROUNDUP(n): if n <= 1: return n if n <= 128: return (n + 3) & ~3 return 1 << (n - 1).bit_length() basesize = support.calcobjsize('Pii') nodesize = struct.calcsize('hP3iP0h') def sizeofchildren(node): if node is None: return 0 res = 0 hasstr = len(node) > 1 and isinstance(node[-1], str) if hasstr: res += len(node[-1]) + 1 children = node[1:-1] if hasstr else node[1:] if children: res += XXXROUNDUP(len(children)) * nodesize for child in children: res += sizeofchildren(child) return res def check_st_sizeof(st): self.check_sizeof(st, basesize + nodesize + sizeofchildren(st.totuple())) check_st_sizeof(parser.expr('2 + 3')) check_st_sizeof(parser.expr('2 + 3 + 4')) check_st_sizeof(parser.suite('x = 2 + 3')) check_st_sizeof(parser.suite('')) check_st_sizeof(parser.suite('# -*- coding: utf-8 -*-')) check_st_sizeof(parser.expr('[' + '2,' * 1000 + ']')) # XXX tests for pickling and unpickling of ST objects should go here class OtherParserCase(unittest.TestCase): def test_two_args_to_expr(self): # See bug #12264 with self.assertRaises(TypeError): parser.expr("a", "b") def test_main(): support.run_unittest( RoundtripLegalSyntaxTestCase, IllegalSyntaxTestCase, CompileTestCase, ParserStackLimitTestCase, STObjectTestCase, OtherParserCase, ) if __name__ == "__main__": test_main()
lgpl-3.0
manhong2112/CodeColle
Python/Pygame/pygame~/compat.py
5
3445
# coding: ascii """Python 2.x/3.x compatibility tools""" import sys __all__ = ['geterror', 'long_', 'xrange_', 'ord_', 'unichr_', 'unicode_', 'raw_input_', 'as_bytes', 'as_unicode', 'bytes_', 'next_', 'imap_', 'PY_MAJOR_VERSION', 'PY_MINOR_VERSION'] PY_MAJOR_VERSION, PY_MINOR_VERSION = sys.version_info[0:2] def geterror(): return sys.exc_info()[1] # Python 3 if PY_MAJOR_VERSION >= 3: long_ = int xrange_ = range from io import StringIO from io import BytesIO unichr_ = chr unicode_ = str bytes_ = bytes raw_input_ = input imap_ = map # Represent escaped bytes and strings in a portable way. # # as_bytes: Allow a Python 3.x string to represent a bytes object. # e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x # as_bytes("a\x01\b") == "a\x01b" # Python 2.x # as_unicode: Allow a Python "r" string to represent a unicode string. # e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x # as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x def as_bytes(string): """ '<binary literal>' => b'<binary literal>' """ return string.encode('latin-1', 'strict') def as_unicode(rstring): """ r'<Unicode literal>' => '<Unicode literal>' """ return rstring.encode('ascii', 'strict').decode('unicode_escape', 'strict') # Python 2 else: long_ = long xrange_ = xrange from cStringIO import StringIO BytesIO = StringIO unichr_ = unichr unicode_ = unicode bytes_ = str raw_input_ = raw_input from itertools import imap as imap_ # Represent escaped bytes and strings in a portable way. # # as_bytes: Allow a Python 3.x string to represent a bytes object. # e.g.: as_bytes("a\x01\b") == b"a\x01b" # Python 3.x # as_bytes("a\x01\b") == "a\x01b" # Python 2.x # as_unicode: Allow a Python "r" string to represent a unicode string. # e.g.: as_unicode(r"Bo\u00F6tes") == u"Bo\u00F6tes" # Python 2.x # as_unicode(r"Bo\u00F6tes") == "Bo\u00F6tes" # Python 3.x def as_bytes(string): """ '<binary literal>' => '<binary literal>' """ return string def as_unicode(rstring): """ r'<Unicode literal>' => u'<Unicode literal>' """ return rstring.decode('unicode_escape', 'strict') def get_BytesIO(): return BytesIO def get_StringIO(): return StringIO def ord_(o): try: return ord(o) except TypeError: return o if sys.platform == 'win32': filesystem_errors = "replace" elif PY_MAJOR_VERSION >= 3: filesystem_errors = "surrogateescape" else: filesystem_errors = "strict" def filesystem_encode(u): fsencoding = sys.getfilesystemencoding() if fsencoding.lower() == 'ascii' and sys.platform.startswith('linux'): # Don't believe Linux systems claiming ASCII-only filesystems. In # practice, arbitrary bytes are allowed, and most things expect UTF-8. fsencoding = 'utf-8' return u.encode(fsencoding, filesystem_errors) # Include a next compatible function for Python versions < 2.6 if (PY_MAJOR_VERSION, PY_MINOR_VERSION) >= (2, 6): next_ = next else: def next_(i, *args): try: return i.next() except StopIteration: if args: return args[0] raise
mit
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/view_tests/tests/test_i18n.py
13
11317
# -*- coding:utf-8 -*- import gettext import json import os import unittest from os import path from django.conf import settings from django.core.urlresolvers import reverse from django.test import ( LiveServerTestCase, TestCase, modify_settings, override_settings, ) from django.utils import six from django.utils._os import upath from django.utils.module_loading import import_string from django.utils.translation import LANGUAGE_SESSION_KEY, override from ..urls import locale_dir @override_settings(ROOT_URLCONF='view_tests.urls') class I18NTests(TestCase): """ Tests django views in django/views/i18n.py """ def test_setlang(self): """ The set_language view can be used to change the session language. The user is redirected to the 'next' argument if provided. """ for lang_code, lang_name in settings.LANGUAGES: post_data = dict(language=lang_code, next='/') response = self.client.post('/i18n/setlang/', data=post_data) self.assertRedirects(response, 'http://testserver/') self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code) def test_setlang_unsafe_next(self): """ The set_language view only redirects to the 'next' argument if it is "safe". """ lang_code, lang_name = settings.LANGUAGES[0] post_data = dict(language=lang_code, next='//unsafe/redirection/') response = self.client.post('/i18n/setlang/', data=post_data) self.assertEqual(response.url, 'http://testserver/') self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code) def test_setlang_reversal(self): self.assertEqual(reverse('set_language'), '/i18n/setlang/') def test_setlang_cookie(self): # we force saving language to a cookie rather than a session # by excluding session middleware and those which do require it test_settings = dict( MIDDLEWARE_CLASSES=('django.middleware.common.CommonMiddleware',), LANGUAGE_COOKIE_NAME='mylanguage', LANGUAGE_COOKIE_AGE=3600 * 7 * 2, LANGUAGE_COOKIE_DOMAIN='.example.com', LANGUAGE_COOKIE_PATH='/test/', ) with self.settings(**test_settings): post_data = dict(language='pl', next='/views/') response = self.client.post('/i18n/setlang/', data=post_data) language_cookie = response.cookies.get('mylanguage') self.assertEqual(language_cookie.value, 'pl') self.assertEqual(language_cookie['domain'], '.example.com') self.assertEqual(language_cookie['path'], '/test/') self.assertEqual(language_cookie['max-age'], 3600 * 7 * 2) def test_jsi18n(self): """The javascript_catalog can be deployed with language settings""" for lang_code in ['es', 'fr', 'ru']: with override(lang_code): catalog = gettext.translation('djangojs', locale_dir, [lang_code]) if six.PY3: trans_txt = catalog.gettext('this is to be translated') else: trans_txt = catalog.ugettext('this is to be translated') response = self.client.get('/jsi18n/') # response content must include a line like: # "this is to be translated": <value of trans_txt Python variable> # json.dumps() is used to be able to check unicode strings self.assertContains(response, json.dumps(trans_txt), 1) if lang_code == 'fr': # Message with context (msgctxt) self.assertContains(response, r'"month name\u0004May": "mai"', 1) @override_settings(ROOT_URLCONF='view_tests.urls') class JsI18NTests(TestCase): """ Tests django views in django/views/i18n.py that need to change settings.LANGUAGE_CODE. """ def test_jsi18n_with_missing_en_files(self): """ The javascript_catalog shouldn't load the fallback language in the case that the current selected language is actually the one translated from, and hence missing translation files completely. This happens easily when you're translating from English to other languages and you've set settings.LANGUAGE_CODE to some other language than English. """ with self.settings(LANGUAGE_CODE='es'), override('en-us'): response = self.client.get('/jsi18n/') self.assertNotContains(response, 'esto tiene que ser traducido') def test_jsi18n_fallback_language(self): """ Let's make sure that the fallback language is still working properly in cases where the selected language cannot be found. """ with self.settings(LANGUAGE_CODE='fr'), override('fi'): response = self.client.get('/jsi18n/') self.assertContains(response, 'il faut le traduire') def test_i18n_language_non_english_default(self): """ Check if the Javascript i18n view returns an empty language catalog if the default language is non-English, the selected language is English and there is not 'en' translation available. See #13388, #3594 and #13726 for more details. """ with self.settings(LANGUAGE_CODE='fr'), override('en-us'): response = self.client.get('/jsi18n/') self.assertNotContains(response, 'Choisir une heure') @modify_settings(INSTALLED_APPS={'append': 'view_tests.app0'}) def test_non_english_default_english_userpref(self): """ Same as above with the difference that there IS an 'en' translation available. The Javascript i18n view must return a NON empty language catalog with the proper English translations. See #13726 for more details. """ with self.settings(LANGUAGE_CODE='fr'), override('en-us'): response = self.client.get('/jsi18n_english_translation/') self.assertContains(response, 'this app0 string is to be translated') def test_i18n_language_non_english_fallback(self): """ Makes sure that the fallback language is still working properly in cases where the selected language cannot be found. """ with self.settings(LANGUAGE_CODE='fr'), override('none'): response = self.client.get('/jsi18n/') self.assertContains(response, 'Choisir une heure') def test_escaping(self): # Force a language via GET otherwise the gettext functions are a noop! response = self.client.get('/jsi18n_admin/?language=de') self.assertContains(response, '\\x04') @modify_settings(INSTALLED_APPS={'append': ['view_tests.app5']}) def test_non_BMP_char(self): """ Non-BMP characters should not break the javascript_catalog (#21725). """ with self.settings(LANGUAGE_CODE='en-us'), override('fr'): response = self.client.get('/jsi18n/app5/') self.assertEqual(response.status_code, 200) self.assertContains(response, 'emoji') self.assertContains(response, '\\ud83d\\udca9') @override_settings(ROOT_URLCONF='view_tests.urls') class JsI18NTestsMultiPackage(TestCase): """ Tests for django views in django/views/i18n.py that need to change settings.LANGUAGE_CODE and merge JS translation from several packages. """ @modify_settings(INSTALLED_APPS={'append': ['view_tests.app1', 'view_tests.app2']}) def test_i18n_language_english_default(self): """ Check if the JavaScript i18n view returns a complete language catalog if the default language is en-us, the selected language has a translation available and a catalog composed by djangojs domain translations of multiple Python packages is requested. See #13388, #3594 and #13514 for more details. """ with self.settings(LANGUAGE_CODE='en-us'), override('fr'): response = self.client.get('/jsi18n_multi_packages1/') self.assertContains(response, 'il faut traduire cette cha\\u00eene de caract\\u00e8res de app1') @modify_settings(INSTALLED_APPS={'append': ['view_tests.app3', 'view_tests.app4']}) def test_i18n_different_non_english_languages(self): """ Similar to above but with neither default or requested language being English. """ with self.settings(LANGUAGE_CODE='fr'), override('es-ar'): response = self.client.get('/jsi18n_multi_packages2/') self.assertContains(response, 'este texto de app3 debe ser traducido') def test_i18n_with_locale_paths(self): extended_locale_paths = settings.LOCALE_PATHS + ( path.join(path.dirname( path.dirname(path.abspath(upath(__file__)))), 'app3', 'locale'),) with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths): with override('es-ar'): response = self.client.get('/jsi18n/') self.assertContains(response, 'este texto de app3 debe ser traducido') skip_selenium = not os.environ.get('DJANGO_SELENIUM_TESTS', False) @unittest.skipIf(skip_selenium, 'Selenium tests not requested') @override_settings(ROOT_URLCONF='view_tests.urls') class JavascriptI18nTests(LiveServerTestCase): # The test cases use fixtures & translations from these apps. available_apps = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'view_tests', ] webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver' @classmethod def setUpClass(cls): try: cls.selenium = import_string(cls.webdriver_class)() except Exception as e: raise unittest.SkipTest('Selenium webdriver "%s" not installed or ' 'not operational: %s' % (cls.webdriver_class, str(e))) super(JavascriptI18nTests, cls).setUpClass() @classmethod def tearDownClass(cls): cls.selenium.quit() super(JavascriptI18nTests, cls).tearDownClass() @override_settings(LANGUAGE_CODE='de') def test_javascript_gettext(self): self.selenium.get('%s%s' % (self.live_server_url, '/jsi18n_template/')) elem = self.selenium.find_element_by_id("gettext") self.assertEqual(elem.text, "Entfernen") elem = self.selenium.find_element_by_id("ngettext_sing") self.assertEqual(elem.text, "1 Element") elem = self.selenium.find_element_by_id("ngettext_plur") self.assertEqual(elem.text, "455 Elemente") elem = self.selenium.find_element_by_id("pgettext") self.assertEqual(elem.text, "Kann") elem = self.selenium.find_element_by_id("npgettext_sing") self.assertEqual(elem.text, "1 Resultat") elem = self.selenium.find_element_by_id("npgettext_plur") self.assertEqual(elem.text, "455 Resultate") class JavascriptI18nChromeTests(JavascriptI18nTests): webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver' class JavascriptI18nIETests(JavascriptI18nTests): webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
mit
ModoUnreal/PyWeather
setup.py
1
99122
''' _______ | \ \ / @@@; | \ \ / `#....@ | | \ / ,;@.....;,; | | \ / @..@........@` PyWeather Setup | | \ / .............@ version 0.6.3 beta | / \ / .............@ (c) 2017-2018 - o355 |_______/ | @...........#` | | .+@@++++@#; | | @ ; , | | : ' . | | @ # .` | | @ # .` ''' # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys if sys.version_info < (3, 0, 0): print("You'll need Python 3 to run PyWeather.", "Press enter to exit.") input() sys.exit() elif (sys.version_info > (3, 0, 0) and sys.version_info < (3, 5, 0)): print("You have a Python version between 3.0 and 3.4.", "While PyWeather will work, you may experience a few quirks.", "Try updating to Python 3.6, as it works more reliably.", "Please take note of this in PyWeather.","", sep="\n") elif sys.version_info >= (3, 7, 0): print("You have a Python version of 3.7 and greater.", "Please note that PyWeather 0.6.2 beta is NOT certified to work with", "Python 3.7. Python 3.6 and below should work just fine.", sep="\n") import configparser import traceback import subprocess import logging import os import urllib # Now force the writing of the versioninfo file during setup, this should prevent issues # in the event I forget to gitignore the file. try: open('updater//versioninfo.txt', 'w').close() with open("updater//versioninfo.txt", 'a') as out: out.write("0.6.3 beta") out.close() except: print("Couldn't write the versioninfo file. This may cause issues with PyWeather down the road.") config = configparser.ConfigParser() config.read('storage//config.ini') def configprovision(): try: config.add_section("GEOCODER API") except configparser.DuplicateSectionError: print("Failed to add the Geocoder API section.") try: config.add_section("FAVORITE LOCATIONS") except configparser.DuplicateSectionError: print("Failed to add the favorite locations section.") try: config.add_section("PREVIOUS LOCATIONS") except configparser.DuplicateSectionError: print("Failed to add the previous locations section") try: config.add_section("HURRICANE") except configparser.DuplicateSectionError: print("Failed to add the hurricane section.") try: config.add_section("FIRSTINPUT") except configparser.DuplicateSectionError: print("Failed to add the firstinput section.") try: config.add_section('SUMMARY') except configparser.DuplicateSectionError: print("Failed to add the summary section.") try: config.add_section('VERBOSITY') except configparser.DuplicateSectionError: print("Failed to add the verbosity section.") try: config.add_section('TRACEBACK') except configparser.DuplicateSectionError: print("Failed to add the traceback section.") try: config.add_section('UI') except configparser.DuplicateSectionError: print("Failed to add the UI section.") try: config.add_section('PREFETCH') except configparser.DuplicateSectionError: print("Failed to add the prefetch section.") try: config.add_section('UPDATER') except configparser.DuplicateSectionError: print("Failed to add the updater section.") try: config.add_section('KEYBACKUP') except configparser.DuplicateSectionError: print("Failed to add the keybackup section.") try: config.add_section('PYWEATHER BOOT') except configparser.DuplicateSectionError: print("Failed to add the PyWeather Boot section.") try: config.add_section('USER') except configparser.DuplicateSectionError: print("Failed to add the user section.") try: config.add_section('CACHE') except configparser.DuplicateSectionError: print("Failed to add the cache section.") try: config.add_section('RADAR GUI') except configparser.DuplicateSectionError: print("Failed to add the Radar GUI section.") try: config.add_section('GEOCODER') except configparser.DuplicateSectionError: print("Failed to add the Geocoder section.") config['SUMMARY']['sundata_summary'] = 'False' config['SUMMARY']['almanac_summary'] = 'False' config['SUMMARY']['showalertsonsummary'] = 'True' config['SUMMARY']['showtideonsummary'] = 'False' config['SUMMARY']['showyesterdayonsummary'] = 'False' config['VERBOSITY']['verbosity'] = 'False' config['VERBOSITY']['json_verbosity'] = 'False' config['VERBOSITY']['setup_verbosity'] = 'False' config['VERBOSITY']['setup_jsonverbosity'] = 'False' config['VERBOSITY']['updater_verbosity'] = 'False' config['VERBOSITY']['updater_jsonverbosity'] = 'False' config['VERBOSITY']['keybackup_verbosity'] = 'False' config['VERBOSITY']['configdefault_verbosity'] = 'False' config['TRACEBACK']['tracebacks'] = 'False' config['TRACEBACK']['setup_tracebacks'] = 'False' config['TRACEBACK']['updater_tracebacks'] = 'False' config['TRACEBACK']['configdefault_tracebacks'] = 'False' config['UI']['show_entertocontinue'] = 'True' config['UI']['detailedinfoloops'] = '6' config['UI']['forecast_detailedinfoloops'] = '5' config['UI']['show_completediterations'] = 'False' config['UI']['alerts_usiterations'] = '1' config['UI']['alerts_euiterations'] = '2' config['UI']['extratools_enabled'] = 'False' config['PREFETCH']['10dayfetch_atboot'] = 'False' config['PREFETCH']['yesterdaydata_atboot'] = 'False' config['UPDATER']['autocheckforupdates'] = 'False' config['UPDATER']['show_updaterreleasetag'] = 'False' config['KEYBACKUP']['savedirectory'] = 'backup//' config['PYWEATHER BOOT']['validateapikey'] = 'True' config['UPDATER']['showReleaseNotes'] = 'True' config['UPDATER']['showReleaseNotes_uptodate'] = 'False' config['UPDATER']['showNewVersionReleaseDate'] = 'True' config['USER']['configprovisioned'] = 'True' config['CACHE']['enabled'] = 'True' config['CACHE']['alerts_cachedtime'] = '5' config['CACHE']['current_cachedtime'] = '10' config['CACHE']['threedayhourly_cachedtime'] = '60' config['CACHE']['tendayhourly_cachedtime'] = '60' config['CACHE']['forecast_cachedtime'] = '60' config['CACHE']['almanac_cachedtime'] = '240' config['CACHE']['sundata_cachedtime'] = '480' config['CACHE']['tide_cachedtime'] = '480' config['CACHE']['hurricane_cachedtime'] = '180' config['CACHE']['yesterday_cachedtime'] = '720' config['RADAR GUI']['radar_imagesize'] = 'normal' config['RADAR GUI']['bypassconfirmation'] = 'False' config['GEOCODER']['scheme'] = 'https' config['GEOCODER API']['customkey_enabled'] = 'False' config['GEOCODER API']['customkey'] = 'None' config['PREFETCH']['hurricanedata_atboot'] = 'False' config['FIRSTINPUT']['geoipservice_enabled'] = 'False' config['FIRSTINPUT']['allow_pwsqueries'] = 'True' config['HURRICANE']['enablenearestcity'] = 'False' config['HURRICANE']['enablenearestcity_forecast'] = 'False' config['HURRICANE']['api_username'] = 'pyweather_proj' config['HURRICANE']['nearestcitysize'] = 'medium' config['FAVORITE LOCATIONS']['enabled'] = 'True' config['FAVORITE LOCATIONS']['favloc1'] = 'None' config['FAVORITE LOCATIONS']['favloc2'] = 'None' config['FAVORITE LOCATIONS']['favloc3'] = 'None' config['FAVORITE LOCATIONS']['favloc4'] = 'None' config['FAVORITE LOCATIONS']['favloc5'] = 'None' config['FAVORITE LOCATIONS']['favloc1_data'] = 'None' config['FAVORITE LOCATIONS']['favloc2_data'] = 'None' config['FAVORITE LOCATIONS']['favloc3_data'] = 'None' config['FAVORITE LOCATIONS']['favloc4_data'] = 'None' config['FAVORITE LOCATIONS']['favloc5_data'] = 'None' config['PREVIOUS LOCATIONS']['enabled'] = 'True' config['PREVIOUS LOCATIONS']['prevloc1'] = 'None' config['PREVIOUS LOCATIONS']['prevloc2'] = 'None' config['PREVIOUS LOCATIONS']['prevloc3'] = 'None' config['PREVIOUS LOCATIONS']['prevloc4'] = 'None' config['PREVIOUS LOCATIONS']['prevloc5'] = 'None' config['PREVIOUS LOCATIONS']['prevloc1_data'] = 'None' config['PREVIOUS LOCATIONS']['prevloc2_data'] = 'None' config['PREVIOUS LOCATIONS']['prevloc3_data'] = 'None' config['PREVIOUS LOCATIONS']['prevloc4_data'] = 'None' config['PREVIOUS LOCATIONS']['prevloc5_data'] = 'None' try: with open('storage//config.ini', 'w') as configfile: config.write(configfile) except: print("Hmmf...an odd error occurred. A full traceback will be", "printed below. Please report this issue on GitHub", "(github.com/o355/pyweather), as that would be greatly appreciated", "for trying to fix the bug that you just encountered!", sep="\n") traceback.print_exc() # Giving users choice, unlike Microsoft. print("Would you like to continue using PyWeather with an unprovisioned config?", "It's highly recommended you don't continue, as you may encounter", "unexpected errors and issues with using PyWeather. Yes or No.", sep="\n") provisionfailed_continue = input("Input here: ").lower() if provisionfailed_continue == "yes": print("Continuing with PyWeather Setup. Please remember, you may encounter", "unexpected errors and issues. You can always retry provisioning your config", "by using the configsetup.py script in the storage folder.", sep="\n") elif provisionfailed_continue == "no": print("Stopping PyWeather Setup. You can retry to provision your config by using", "the configsetup.py script in the storage folder.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Couldn't understand your input. By default, PyWeather Setup is stopping.", "You can retry to provision your config by using the configsetup.py script", "in the storage folder. Press enter to exit.", sep="\n") input() sys.exit() # See if the config is "provisioned". If it isn't, a KeyError will occur, # because it's not created. Here, we set up the config to defaults if it's not # provisioned. try: configprovisioned = config.getboolean('USER', 'configprovisioned') except: print("Your config likely isn't provisioned. Would you like to provision your config?", "It's highly recommended you provision your config. If you decide not to,", "you may run into issues using PyWeather.", "Yes or No.", sep="\n") provisionconfig = input("Input here: ").lower() if provisionconfig == "yes": print("Provisioning your config.") configprovision() print("Config file provisioned successfully! Moving on with PyWeather setup...") elif provisionconfig == "no": print("Not provisioning your config. You may encounter unexpected errors", "and issues when using PyWeather, however.", sep="\n") else: print("Couldn't understand your input. By default, I'm going to provision", "your config. Beginning now...", sep="\n") configprovision() print("Config file provisioned successfully! Moving on with PyWeather setup...") try: verbosity = config.getboolean('VERBOSITY', 'setup_verbosity') jsonVerbosity = config.getboolean('VERBOSITY', 'setup_jsonverbosity') tracebacksEnabled = config.getboolean('TRACEBACK', 'setup_tracebacks') except: print("Couldn't load your config file. Make sure there aren't any typos", "in the config, and that the config file is accessible.", "Setting config variables to their defaults.", "Here's the full traceback, in case you need it.", sep="\n") traceback.print_exc() verbosity = False jsonVerbosity = False tracebacksEnabled = False def printException(): if tracebacksEnabled == True: print("Here's the full traceback (for error reporting):") traceback.print_exc() def printException_loggerwarn(): if verbosity == True: logger.warning("Oh snap! We ran into a non-critical error. Here's the traceback.") traceback.print_exc() logger = logging.getLogger(name='pyweather_setup_0.6.2beta') logger.setLevel(logging.DEBUG) logformat = '%(asctime)s | %(levelname)s | %(message)s' logging.basicConfig(format=logformat) if verbosity == True: logger.setLevel(logging.DEBUG) elif tracebacksEnabled == True: logger.setLevel(logging.ERROR) else: logger.setLevel(logging.CRITICAL) logger.debug("Listing configuration options:") logger.debug("verbosity: %s ; jsonVerbosity: %s" % (verbosity, jsonVerbosity)) logger.debug("tracebacksEnabled: %s" % tracebacksEnabled) print("Hi! Welcome to PyWeather 0.6.3 beta! Glad that you're here.", "I'm here to help set up PyWeather, and let you configure it to your liking.", "Let's begin!", sep="\n") import shutil import time import json import codecs buildnumber = 63 buildversion = "0.6.3 beta" logger.debug("buildnumber: %s ; buildversion: %s" % (buildnumber, buildversion)) print("","Before we get started, I want to confirm some permissions from you.", "Is it okay if I use 1-5 MB of data (downloading libraries), save a small", "text file called apikey.txt (under 2 KB), and automatically install Python", "libraries?", "Please input yes or no below:", sep="\n") confirmPermissions = input("Input here: ").lower() logger.debug("confirmPermissions: %s" % confirmPermissions) if confirmPermissions == "no": logger.debug("User denied permissions. Closing...") print("Okay! Closing now.", "Press enter to exit.", sep="\n") input() sys.exit() elif confirmPermissions != "yes": logger.debug("Couldn't understand. Closing...") print("I couldn't understand what you said.", "As a precaution, I won't proceed any further.", "Press enter to exit.", sep="\n") input() sys.exit() print("","Cool! Let's start.", "I'm going to start by checking for necessary libraries (to run PyWeather).", "This can take a moment, so please hold tight while I check!", sep="\n") try: import pip except ImportError: logger.warn("pip is NOT installed! Asking user for automated install...") printException_loggerwarn() print("","Shucks! PIP couldn't be imported, and I need PIP to install", "libraries for you. Would you like me to install PIP for you?", "Yes or No.", sep="\n") pipConfirm = input("Input here: ").lower() logger.debug("pipConfirm: %s" % pipConfirm) if pipConfirm == "no": logger.info("User denied PIP install, closing...") print("","Okay! I'm closing setup, as I need PIP to continue.", "Press enter to continue.", sep="\n") input() sys.exit() elif pipConfirm == "yes": logger.info("User allowed PIP install. Starting...") print("","Okay!", "I'll download PIP's installer, and run it.", "Doing such uses about 2-4 MB of data, and will quit PW setup.", "When the setup script finishes, you'll need to run the setup script again." "I'll start in a few seconds.", sep="\n") time.sleep(3) print("Downloading the installer...") # We use the built-in urllib library, as some Python installs don't include requests. try: with urllib.request.urlopen('https://bootstrap.pypa.io/get-pip.py') as update_response, open('get-pip.py', 'wb') as update_out_file: logger.debug("update_response: %s ; update_out_file: %s" % (update_response, update_out_file)) shutil.copyfileobj(update_response, update_out_file) except: print("Couldn't download the PIP installer, either due to no internet connection, or the library that fetches", "files has failed. As an alternative, you can download the installer yourself.", "Please download this file: 'https://bootstrap.pypa.io/get-pip.py', and place it in PyWeather's base directory.", "Afterwards, press enter to execute the installer. Press Control + C to exit.", sep="\n") printException() input() print("Running the installer...") logger.debug("Executing get-pip.py. If this script exits, please restart the setup script.") exec(open("get-pip.py").read()) else: logger.warn("Couldn't understand the input. Closing...") print("","I didn't understand what you said.", "As a precaution, I'm closing setup, as I need PIP to continue.", "Press enter to exit.", sep="\n") input() sys.exit() except PermissionError: traceback.print_exc() print("PIP has incorrect permissions on your machine. Please attempt to fix", "permissions on the folder that is listed in the traceback.", "Linux users: Use sudo chown -R <yourusername> <folder>, this should fix the issue.", "Press enter to exit.", sep="\n") input() sys.exit() print("Deleting the PIP installer file (if it exists)") try: os.remove("get-pip.py") except: printException_loggerwarn() print("The file get-pip.py didn't exist, or we had wrong permissions.") neededLibraries = 0 try: import colorama coloramaInstalled = True logger.info("Colorama is installed.") logger.debug("coloramaInstalled: %s" % coloramaInstalled) except ImportError: coloramaInstalled = False neededLibraries = neededLibraries + 1 logger.warn("Colorama is not installed.") printException_loggerwarn() logger.debug("coloramaInstalled: %s ; neededLibraries: %s" % (coloramaInstalled, neededLibraries)) try: import geopy geopyInstalled = True logger.info("geopy is installed.") logger.debug("geopyInstalled: %s" % geopyInstalled) except ImportError: geopyInstalled = False neededLibraries = neededLibraries + 1 logger.info("geopy is NOT installed.") printException_loggerwarn() logger.debug("geopyInstalled: %s ; neededLibraries: %s" % (geopyInstalled, neededLibraries)) try: from appJar import gui appjarInstalled = True logger.info("appjar is installed.") logger.debug("appjarInstalled: %s" % appjarInstalled) except ImportError as e: if e == "No module named '_tkinter', please install the python3-tk package": print("appJar cannot run on this platform. Skipping installation...") appjarInstalled = True logger.debug("appjarInstalled: %s" % appjarInstalled) else: appjarInstalled = False neededLibraries = neededLibraries + 1 logger.debug("appJar is NOT installed.") printException_loggerwarn() logger.debug("appjarInstalled: %s ; neededLibraries: %s" % (appjarInstalled, neededLibraries)) try: import requests requestsInstalled = True logger.debug("requests is installed.") logger.debug("requestsInstalled: %s" % requestsInstalled) except: requestsInstalled = False neededLibraries = neededLibraries + 1 logger.debug("requests is NOT installed.") printException_loggerwarn() logger.debug("requestsInstalled: %s ; neededLibraries: %s" % (requestsInstalled, neededLibraries)) try: import halo haloInstalled = True logger.debug("halo is installed.") logger.debug("haloInstalled: %s" % haloInstalled) except: haloInstalled = False neededLibraries += 1 logger.debug("halo is NOT installed.") printException_loggerwarn() logger.debug("haloInstalled: %s ; neededLibraries: %s" % (haloInstalled, neededLibraries)) print("All done!") if neededLibraries == 0: logger.debug("All libraries are installed.") print("All necessary libraries have been installed!") else: logger.debug("Libraries need to be installed.") print("Shucks. Not all necessary libraries are installed. Here's what needs to be installed:") if coloramaInstalled is False: print("- Colorama") if geopyInstalled is False: print("- Geopy") if appjarInstalled is False: print("- appJar") if requestsInstalled is False: print("- Requests") if haloInstalled is False: print("- Halo") print("If you want me to, I can automatically install these libraries.", "Would you like me to do such? Yes or No.", sep="\n") neededLibrariesConfirm = input("Input here: ").lower() logger.debug("neededLibrariesConfirm: %s" % neededLibrariesConfirm) if neededLibrariesConfirm == "no": logger.warning("Not installing necessary libraries. Now exiting...") print("Okay. I needed to install necessary libraries to continue.", "Now quitting...", "Press enter to exit.", sep="\n") input() sys.exit() elif neededLibrariesConfirm == "yes": print("Now installing necessary libraries...") if coloramaInstalled is False: print("Installing Colorama...") pip.main(['install', 'colorama']) if geopyInstalled is False: print("Installing geopy...") pip.main(['install', 'geopy']) if appjarInstalled is False: print("Installing appJar...") pip.main(['install', 'appJar']) if requestsInstalled is False: print("Installing requests...") pip.main(['install', 'requests']) if haloInstalled is False: print("Installing halo...") pip.main(['install', 'halo']) logger.info("Running the double check on libraries...") print("Sweet! All libraries should be installed.", "Just to confirm, I'm double checking if needed libraries are installed.", sep="\n") try: import colorama logger.info("Colorama installed successfully.") except ImportError: logger.warn("colorama was not installed successfully.") print("Hmm...Colorama didn't install properly.") printException() print("As a last resort, we can use sudo -H to install packages.", "Do you want to use the shell option to install colorama?", "WARNING: Using the last-resort method may screw up PIP, and", "may require you to reinstall PIP on your machine." "Yes or No.", sep="\n") colorama_lastresort = input("Input here: ").lower() logger.debug("colorama_lastresort: %s" % colorama_lastresort) if colorama_lastresort == "yes": try: print("Now executing `sudo -H pip3 install colorama`.", "Please enter the password for sudo when the prompt", "comes up. Press Control + C to cancel.", "Starting in 5 seconds...", sep="\n") time.sleep(5) try: subprocess.call(["sudo -H pip3 install colorama"], shell=True) try: print("Attempting to reimport colorama.") import colorama print("Colorama is FINALLY installed!") except: print("Colorama still wasn't successfully installed.", "Cannot continue without Colorama.", "Try doing a manual install of Colorama with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except: print("When running the command, an error occurred", "Try doing a manual install of Colorama with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except KeyboardInterrupt: print("Command execution aborted.", "Cannot continue without Colorama.", "Try and do a manual install of Colorama with PIP", "in a command line.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() elif colorama_lastresort == "no": print("Not installing Colorama with a shell command.", "Cannot continue without Colorama.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Did not understand your input. Defaulting to not installing", "via the shell. Cannot continue without Colorama.", "Try installing Colorama with PIP.", "Press enter to exit.") input() sys.exit() try: import geopy logger.info("geopy installed successfully.") except ImportError: logger.warn("geopy was not installed successfully.") print("Hmm...geopy didn't install properly.") printException() print("As a last resort, we can use sudo -H to install packages.", "Do you want to use the shell option to install geopy?", "WARNING: Using the last-resort method may screw up PIP, and", "may require you to reinstall PIP on your machine." "Yes or No.", sep="\n") geopy_lastresort = input("Input here: ").lower() logger.debug("geopy_lastresort: %s" % geopy_lastresort) if geopy_lastresort == "yes": try: print("Now executing `sudo -H pip3 install geopy`.", "Please enter the password for sudo when the prompt", "comes up. Press Control + C to cancel.", "Starting in 5 seconds...", sep="\n") time.sleep(5) try: subprocess.call(["sudo -H pip3 install geopy"], shell=True) try: print("Attempting to reimport geopy.") import geopy print("Geopy is FINALLY installed!") except: print("Geopy still wasn't successfully installed.", "Cannot continue without geopy.", "Try doing a manual install of geopy with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except: print("When running the command, an error occurred", "Try doing a manual install of geopy with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except KeyboardInterrupt: print("Command execution aborted.", "Cannot continue without geopy.", "Try and do a manual install of geopy with PIP", "in a command line.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() elif geopy_lastresort == "no": print("Not installing geopy with a shell command.", "Cannot continue without geopy.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Did not understand your input. Defaulting to not installing", "via the shell. Cannot continue without geopy.", "Try installing geopy with PIP.", "Press enter to exit.") input() sys.exit() # Why is appJar not here? When appJar is straight up imported in a non-GUI environment, it'll throw an error # even when it's installed. I don't check for an install because of this reason. try: import requests logger.info("requests installed successfully.") except ImportError: logger.warning("Requests was not installed successfully.") print("Hmm...requests didn't install properly.") printException() print("As a last resort, we can use sudo -H to install packages.", "Do you want to use the shell option to install requests?", "WARNING: Using the last-resort method may screw up PIP, and", "may require you to reinstall PIP on your machine." "Yes or No.", sep="\n") requests_lastresort = input("Input here: ").lower() logger.debug("requests_lastresort: %s" % requests_lastresort) if requests_lastresort == "yes": try: print("Now executing `sudo -H pip3 install requests`.", "Please enter the password for sudo when the prompt", "comes up. Press Control + C to cancel.", "Starting in 5 seconds...", sep="\n") time.sleep(5) try: subprocess.call(["sudo -H pip3 install requests"], shell=True) try: # Fun fact: This is inside THREE try/except things. print("Attempting to reimport requests.") import requests print("requests is FINALLY installed!") except: print("requests still wasn't successfully installed.", "Cannot continue without requests.", "Try doing a manual install of requests with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except: print("When running the command, an error occurred", "Try doing a manual install of requests with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except KeyboardInterrupt: print("Command execution aborted.", "Cannot continue without appJar.", "Try and do a manual install of requests with PIP", "in a command line.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() elif requests_lastresort == "no": print("Not installing appJar with a shell command.", "Cannot continue without requests.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Did not understand your input. Defaulting to not installing", "via the shell. Cannot continue without requests.", "Try installing requests with PIP.", "Press enter to exit.") input() sys.exit() try: import halo logger.info("Halo installed successfully.") except ImportError: logger.warn("halo was not installed successfully.") print("Hmm...Halo didn't install properly.") printException() print("As a last resort, we can use sudo -H to install packages.", "Do you want to use the shell option to install halo?", "WARNING: Using the last-resort method may screw up PIP, and", "may require you to reinstall PIP on your machine." "Yes or No.", sep="\n") halo_lastresort = input("Input here: ").lower() logger.debug("halo_lastresort: %s" % halo_lastresort) if halo_lastresort == "yes": try: print("Now executing `sudo -H pip3 install halo`.", "Please enter the password for sudo when the prompt", "comes up. Press Control + C to cancel.", "Starting in 5 seconds...", sep="\n") time.sleep(5) try: subprocess.call(["sudo -H pip3 install halo"], shell=True) try: print("Attempting to reimport halo.") import colorama print("Halo is now installed!") except: print("Halo still wasn't successfully installed.", "Cannot continue without Halo.", "Try doing a manual install of Halo with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except: print("When running the command, an error occurred", "Try doing a manual install of Halo with PIP.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() except KeyboardInterrupt: print("Command execution aborted.", "Cannot continue without Halo.", "Try and do a manual install of Halo with PIP", "in a command line.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() elif halo_lastresort == "no": print("Not installing Halo with a shell command.", "Cannot continue without Halo.", "Press enter to exit.", sep="\n") input() sys.exit() else: print("Did not understand your input. Defaulting to not installing", "via the shell. Cannot continue without Halo.", "Try installing Halo with PIP.", "Press enter to exit.") input() sys.exit() print("","All libraries are installed!", sep="\n") else: logger.warn("Input was not understood. Closing...") print("Your input wasn't understood for if you wanted to automatically import libraries.", "As a precaution PyWeather Setup needs to now close. Press enter to exit.", sep="\n") input() sys.exit() # Previously this updated all your pip packages. I then did this on my NAS (on FreeNAS 11). # It broke my NAS! Woo hoo! print("", "Would you like PyWeather to automatically update it's required packages?", "Doing this is generally recommended, and will have benefits down the line when", "some libraries fix known issues that occur in PyWeather. Yes or No.", sep="\n") confirm_updatepip = input("Input here: ").lower() logger.debug("confirm_updatepip: %s" % confirm_updatepip) if confirm_updatepip == "yes": print("") print("Updating PIP packages.") totalpackages = 5 updatecount = 1 pip_requiredlibraries = ['requests', 'halo', 'appjar', 'colorama', 'geopy'] for pkgname in pip_requiredlibraries: print("Now updating package: %s (Update %s/%s)" % (pkgname, updatecount, totalpackages)) pip.main(['install', '--upgrade', '%s' % pkgname]) updatecount = updatecount + 1 elif confirm_updatepip == "no": print("Not updating PIP packages. You may run into issues with non-updated", "packages in future versions of PyWeather.") else: print("Input not understood, not updating PIP packages. You may run into", "issues with non-updated packages in future versions of PyWeather.") # Verbosity is not needed here. print("I'm now going to guide you through obtaining an API key.", "Please carefully read my detailed instructions, so you don't mess anything up.", sep="\n") print("","If you know how to acquire a Wunderground API key, or are resetting PyWeather,", "hit enter 14 times to get to the API key entry.", sep="\n") print("Let's begin.", "Start by opening a web browser, and going to https://www.wunderground.com/weather/api/.", "Press any key when you are done.", sep="\n") input() print("Next, click the 'Explore my options' button.", "Press any key when you are done.", sep="\n") input() print("Next, click the small button next to 'ANVIL PLAN'.", "After that, confirm that the total underneath the 'Purchase Key' button says", "'$0 USD per month'.", "If the total underneath the 'Purchase Key' button doesn't", "say '$0 USD per month, please ensure that the small button next to 'Developer'", "on the table in the middle of the screen is selected, and the total", "says '$0 USD per month'", "Press any key when you are done.", sep="\n") input() print("Next, click the 'Purchase Key' button.", "Press any key when you are done.", sep="\n") input() print("Next, input your email, and a password to sign up for a Weather", "Underground account.", "Be sure to select the checkbox next to 'I agree to the Terms of Service'", "It's best if you leave the checkbox next to 'I would like to receive WU", "updates via email' unchecked.", "Press any key when you are done and ready.", sep="\n") input() print("Next, press the 'Sign up for free' button.", "When the welcome window pops up, be sure to click the X button at the top right of the popup.", "When clicking the X, you should be redirected to wunderground.com.", "Press any key when you are done and ready.", sep="\n") input() print("Next, click 'My Profile' at the top right corner of the homepage.", "In the dropdown, click 'My Email & Text Alerts'", "Press any key when you are done and ready.", sep="\n") input() print("Next, next to your email listed on the page, click the 'Edit / Verify' button.", "After you click the button, click the 'Verify Email' button.", "Press any key when you are done and ready.", sep="\n") input() print("Next, check your email in which you signed up with.", "If you got a letter from Weather Underground, titled 'Daily Forecast", "Email Verification', open that letter, and click the link.", "If you didn't get the letter, wait a few minutes, and be sure to check your spam folder.", "Hint: If you followed this guide exactly, WU will not be sending you daily forecasts to your email.", "Press any key when you are done and ready.", sep="\n") input() print("Your email should be verified.", "Next, in your web browser, head back to https://www.wunderground.com/weather/api/.", "Then, click the 'Explore my Options' button, again.", "Press any key when you are done and ready.", sep="\n") input() print("Next, at the top of the page, make sure the button next to 'ANVIL PLAN'", "is selected.", "After that, confirm that the total underneath the 'Purchase Key' button says", "'$0 USD per month'", "If the total doesn't say that, in the pricing table, make sure the button", "next to 'Developer' is selected.", "Press any key when you are done and ready.", sep="\n") input() print("Next, click the 'Purchase Key' button, on top of your total (which", "should be $0 USD per month)", "Next, fill out the form, considering these tips:", "For the contact name/email, it's recommended you use your real name", "(first name last initial is fine).", "It's also recommended that you use your real email.", "For the project name, put in something generic, like 'to use a script that", "uses WU's API', or 'WU API test'. It's up to you.", "For the project website, put in something generic, like 'google.com', or", "some other site you feel like having as the project site.", "For the question 'Where will the API be used', answer Other.", "For the question 'Will the API be used for commercial use?', answer No.", "For the question 'Will the API be used for manufacturing mobile chip", "processing?', answer No.", "Answer yes if you somehow are manufacturing mobile chip processing. I doubt", "you are, however.", "For the country that you are based in, put your location.", "Before we move on, fill out these forms, and press any key when you are done " "and ready.", sep="\n") input() print("Next, for the brief description, put something like 'using an API key", "to use a script using Wunderground'.", "After that, check both boxes at the bottom of the page. Read the ToS if you", "feel like it.", "Finally, click 'Purchase Key'.", "You should land on a page that says 'Edit API Key'.", "Press any key when you are done and ready.", sep="\n") input() print("In the table to the left of the page, copy the text that's under Key ID.", "(Ctrl+C, right click)", "I'm now going to ask you to input the API key into the text entry below.", "The API key will be saved to storage/apikey.txt, so PyWeather can easily", "pull it up.", "Press any key when you are done and ready.", sep="\n") input() print("Please input your API key below.") apikey_input = input("Input here: ") logger.debug("apikey_input: %s" % apikey_input) print("", "Just to confirm, the API key you gave me was: " + apikey_input + ".", sep="\n") print("Please double check your input, and confirm in the dialogue below.") apikey_confirm = input("Is the API key right? Yes or no: ").lower() logger.debug("apikey_confirm: %s" % apikey_confirm) if apikey_confirm == "no": while True: logger.debug("User now re-entering key...") print("","Please input your API key below.", sep="\n") apikey_input = input("Input here: ") logger.debug("apikey_input: %s" % apikey_input) print("Just to confirm, the API key you gave me was: " + apikey_input + ".") apikey_confirm = input("Is the API key right? Yes or no: ").lower() if apikey_confirm == "yes": break elif apikey_confirm == "no": continue else: print("Couldn't understand your input.", "I'll assume the API key is correct, moving on.", sep="\n") print("Now saving your API key...") open('storage//apikey.txt', 'w').close() with open("storage//apikey.txt", 'a') as out: logger.debug("out: %s" % out) out.write(apikey_input) out.close() logger.debug("Performed ops: overwrite apikey.txt, out.write(apikey_input), out.close()") print("", "I can also back up your API key, in case you do something wrong.", sep="\n") # A future release should bring customization as to the storage location. print("Would you like me to save a backup? Yes or no.") backup_APIkey = input("Input here: ").lower() if backup_APIkey == "yes": print("","Where would you want me to backup the key to?", "This is a directory. If I wanted my key at directory/backkey.txt,", "You would enter 'directory'. The default directory is 'backup'.", sep="\n") # Doing a .lower() here to prevent case insensitiveness. backup_APIkeydirectory = input("Input here: ").lower() folder_argument = backup_APIkeydirectory + "//backkey.txt" backup_APIkeydirectory2 = backup_APIkeydirectory + "//" logger.debug("backup_APIkeydirectory: %s ; backup_APIkeydirectory2: %s" % (backup_APIkeydirectory, backup_APIkeydirectory2)) logger.debug("folder_argument: %s" % folder_argument) # These two variables will get reset if the directory is backup, or empty. if backup_APIkeydirectory == "backup" or backup_APIkeydirectory == "": print("Using the default directory of //backup.") folder_argument = "backup//backkey.txt" backup_APIkeydirectory2 = "backup//" logger.debug("folder_argument: %s ; backup_APIkeydirectory2: %s" % (folder_argument, backup_APIkeydirectory2)) elif backup_APIkeydirectory != "backup": try: os.mkdir(backup_APIkeydirectory2) except: printException_loggerwarn() print("Couldn't make the directory, does it exist?") # Overwrite the file, if it exists. open(folder_argument, 'w').close() open(folder_argument, 'a').write(apikey_input) open(folder_argument).close() config['KEYBACKUP']['savedirectory'] = backup_APIkeydirectory2 print("The API key was backed up successfully!") logger.debug("Performed 3 ops. Overwrite "+ folder_argument + "backkey.txt, write to backkey.txt" + ", and close backkey.txt.") print("", "Before we configure PyWeather, I'll now validate your API key.", sep="\n") # Do an infinite loop of validation of the API key, so the user can reenter the API key # if it was wrong. while True: apitest_URL = 'http://api.wunderground.com/api/' + apikey_input + '/conditions/q/NY/New_York.json' testreader = codecs.getreader("utf-8") logger.debug("apitest_URL: %s ; testreader: %s" % (apitest_URL, testreader)) try: testJSON = requests.get(apitest_URL) logger.debug("testJSON: %s" % testJSON) except: logger.warn("Couldn't connect to Wunderground's API! No internet?") print("When PyWeather Setup attempted to fetch the .json to validate your API key,", "it ran into an error. If you're on a network with a filter, make sure that", "'api.wunderground.com' is unblocked. Otherwise, make sure you have an internet", "connection.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() test_json = json.loads(testJSON.text) if jsonVerbosity == True: logger.debug("test_json: %s" % test_json) try: test_conditions = str(test_json['current_observation']['temp_f']) logger.debug("test_conditions: %s" % test_conditions) print("Hurray! Your API key is valid and works.") break except: logger.warn("Error! Is the API key invalid?") print("When attempting to validate the API key that you entered/confirmed,", "PyWeather ran into an error. Would you like to reenter your API key to revalidate it?", "Please note, that this error might be caused by WU's API being down, or another cause.", "However, 90% of the time, this is due to a bad API key.", "Yes or No.", sep='\n') revalidateAPIkey = input("Input here: ").lower() if revalidateAPIkey == "yes": print("Enter in your API key below.") apikey_input = input("Input here: ") logger.debug("apikey_input: %s") print("Revalidating your API key...") continue elif revalidateAPIkey == "no": print("Not revalidating your API key. You'll need a valid API key to continue.", "Press enter to exit.", sep="\n") input() sys.exit() printException() print("Press enter to exit.") input() sys.exit() print("Let's configure PyWeather to your liking.") logger.debug("config: %s" % config) print("", "(1/42)","On the summary screen, would you like to show sunrise/sunset times?", "By default, this is disabled.", "Yes or No.", sep="\n") sundata_Summary = input("Input here: ").lower() logger.debug("sundata_Summary: %s" % sundata_Summary) if sundata_Summary == "yes": config['SUMMARY']['sundata_summary'] = 'True' print("Changes saved.") logger.debug("Sundata on the summary is now ENABLED.") elif sundata_Summary == "no": config['SUMMARY']['sundata_summary'] = 'False' print("Changes saved.") logger.debug("Sundata on the summary is now DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'False'", sep="\n") config['SUMMARY']['sundata_summary'] = 'False' print("Changes saved.") logger.debug("Could not recognize input. Defaulting to DISABLED.") print("", "(2/42)","On the summary screen, would you like to show almanac data?", "By default, this is disabled.", "Yes or no:", sep="\n") almanacdata_Summary = input("Input here: ").lower() logger.debug("almanacdata_Summary: %s" % almanacdata_Summary) if almanacdata_Summary == "yes": config['SUMMARY']['almanac_summary'] = 'True' print("Changes saved.") logger.debug("Almanac on the summary is now ENABLED.") elif almanacdata_Summary == "no": config['SUMMARY']['almanac_summary'] = 'False' print("Changes saved.") logger.debug("Almanac on the summary is now DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'False'", sep="\n") config['SUMMARY']['almanac_summary'] = 'False' print("Changes saved.") logger.debug("Could not recognize input. Defaulting to DISABLED.") print("", "(3/42)", "On the summary screen, would you like to show alerts data?", "By default, this is enabled. Please note, Wunderground", "only supports alert data in the US and EU at this time.", "Yes or No.", sep="\n") alertsdata_Summary = input("Input here: ").lower() logger.debug("alertsdata_Summary: %s" % alertsdata_Summary) if alertsdata_Summary == "yes": config['SUMMARY']['showalertsonsummary'] = 'True' print("Changes saved.") logger.debug("Alerts on the summary is now ENABLED.") elif alertsdata_Summary == "no": config['SUMMARY']['showalertsonsummary'] = 'False' print("Changes saved.") logger.debug("Alerts on the summary is now DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'True'", sep="\n") config['SUMMARY']['showAlertsOnSummary'] = 'True' print("", "(4/42)","On boot, would you like PyWeather to check for updates?", "By default, this is disabled, due to a load time increase of ~2-5 seconds.", "Yes or No.", sep="\n") checkForUpdates = input("Input here: ").lower() logger.debug("checkForUpdates: %s" % checkForUpdates) if checkForUpdates == "yes": config['UPDATER']['autoCheckForUpdates'] = 'True' print("Changes saved.") logger.debug("Checking for updates on startup is ENABLED.") elif checkForUpdates == "no": config['UPDATER']['autoCheckForUpdates'] = 'False' print("Changes saved.") logger.debug("Checking for updates on startup is DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'False'", sep="\n") config['UPDATER']['autoCheckForUpdates'] = 'False' print("Changes saved.") logger.debug("Could not recognize input. Defaulting to DISABLED.") print("", "(5/42)","When an error occurs, would you like PyWeather to show the full error?", "When enabled, you'll have easier access to the full error for reporting", "the bug on GitHub.", "By default, this is disabled, as errors look less pretty when enabled.", "Yes or no.", sep="\n") displayTracebacks = input("Input here: ").lower() logger.debug("displayTracebacks: %s" % displayTracebacks) if displayTracebacks == "yes": config['TRACEBACK']['tracebacks'] = 'True' config['TRACEBACK']['setup_tracebacks'] = 'True' config['TRACEBACK']['updater_tracebacks'] = 'True' config['TRACEBACK']['keybackup_tracebacks'] = 'True' config['TRACEBACK']['configdefault_tracebacks'] = 'True' print("Changes saved.") logger.debug("Printing tracebacks is ENABLED.") elif displayTracebacks == "no": config['TRACEBACK']['tracebacks'] = 'False' config['TRACEBACK']['setup_tracebacks'] = 'False' config['TRACEBACK']['updater_tracebacks'] = 'False' config['TRACEBACK']['keybackup_tracebacks'] = 'False' config['TRACEBACK']['configdefault_tracebacks'] = 'False' print("Changes saved.") logger.debug("Printing tracebacks is DISABLED.") else: print("Couldn't understand what you inputted.", "Defaulting to 'False'", sep="\n") config['TRACEBACK']['tracebacks'] = 'False' config['TRACEBACK']['setup_tracebacks'] = 'False' config['TRACEBACK']['updater_tracebacks'] = 'False' config['TRACEBACK']['keybackup_tracebacks'] = 'False' print("Changes saved.") logger.debug("Could not understand input. Defaulting to DISABLED.") print("", "(6/42)", "When booting PyWeather up initially, would you like PyWeather to", "fetch the 10-day hourly forecast, instead of the 3-day forecast?", "This is disabled by default. When enabled, initial loading times are", "increased. However, when you view the 10-day hourly forecast, you won't", "have to wait for it to load, and use another API call.", "Yes or No.", sep="\n") tenday_onboot = input("Input here: ").lower() if tenday_onboot == "yes": config['PREFETCH']['10dayfetch_atboot'] = 'True' print("Changes saved.") logger.debug("Fetching 10 day JSON at boot is ENABLED.") elif tenday_onboot == "no": config['PREFETCH']['10dayfetch_atboot'] = 'False' print("Changes saved.") logger.debug("Fetching 10 day JSON at boot is DISABLED.") else: print("Couldn't understand what you inputted.", "Defaulting to the default value 'False'", sep="\n") config['PREFETCH']['10dayfetch_atboot'] = 'False' print("Changes saved.") logger.debug("Could not understand input. Defaulting to DISABLED.") print("", "(7/42)", "When viewing detailed hourly, 10-day hourly, and historical hourly,", "detailed information, how many iterations should PyWeather go through", "before asking you to continue?", "By default, this is 6. An input above 10", "is not recommended.", sep="\n") detailedloops = input("Input here: ") try: detailedloops = int(detailedloops) detailedloops = str(detailedloops) config['UI']['detailedinfoloops'] = detailedloops print("Changes saved.") logger.debug("Detailed info iterations now %s." % detailedloops) except: print("Couldn't convert input into a number. Defaulting to '6'.") printException_loggerwarn() config['UI']['detailedinfoloops'] = '6' print("Changes saved.") logger.debug("Detailed info loops now 6.") print("", "(8/42)", "When viewing detailed 10-day forecast information, how many", "iterations should PyWeather go through, before asking you to", "continue?", "By default, this is 5. An input above 10 will not prompt", "the enter to continue prompt", sep="\n") detailedForecastLoops = input("Input here: ") try: detailedForecastLoops = int(detailedForecastLoops) detailedForecastLoops = str(detailedForecastLoops) config['UI']['forecast_detailedinfoloops'] = detailedForecastLoops print("Changes saved.") logger.debug("Detailed forecast info iterations now %s" % detailedForecastLoops) except: print("Couldn't convert input into a number. Defaulting to '5'.") printException_loggerwarn() config['UI']['forecast_detailedinfoloops'] = '5' print("Changes saved.") logger.debug("Detailed forecast info loops now 5.") print("", "(9/42)", "PyWeather has a caching system, in which if you're gone for some time", "data will automatically refresh. Would you like to turn this on?", "This is enabled by default. Yes or No.", sep="\n") enablecache = input("Input here: ").lower() if enablecache == "no": print("Cache will be disabled.") config['CACHE']['enabled'] = 'False' print("Changes saved.") else: config['CACHE']['enabled'] = 'True' print("You entered yes, or your input wasn't understood (yes is the default.)", "In the next few inputs, enter the time in minutes that PyWeather should keep", "certain types of data, before a data refresh is automatically requested.", "If you want to leave cache values to their defaults, press enter at any prompt.", sep="\n") print("", "(10/42)", "Please enter the cache time for alerts data in minutes (default = 5)", sep="\n") alertscachetime = input("Input here: ").lower() try: alertscachetime = float(alertscachetime) alertscachetime = str(alertscachetime) config['CACHE']['alerts_cachedtime'] = alertscachetime print("Changes saved.") logger.debug("Alerts cache time now %s minutes." % alertscachetime) except: print("", "Your input couldn't be converted into a number. Setting alerts", "cache time to it's default value of '5'.", sep="\n") config['CACHE']['alerts_cachedtime'] = '5' logger.debug("Alerts cache time now 5 minutes.") print("", "(11/42)", "Please enter the cache time for current data in minutes (default = 10)", sep="\n") currentcachetime = input("Input here: ").lower() try: currentcachetime = float(currentcachetime) currentcachetime = str(currentcachetime) config['CACHE']['current_cachedtime'] = currentcachetime print("Changes saved.") logger.debug("Current cache time now %s minutes." % alertscachetime) except: print("", "Your input couldn't be converted into a number. Setting current", "cache time to it's default value of '10'.", sep="\n") config['CACHE']['current_cachedtime'] = '10' logger.debug("Current cache time now 10 minutes.") print("", "(12/42)", "Please enter the cache time for forecast data in minutes (default = 60)", sep="\n") forecastcachetime = input("Input here: ").lower() try: forecastcachetime = float(forecastcachetime) forecastcachetime = str(forecastcachetime) config['CACHE']['forecast_cachedtime'] = forecastcachetime print("Changes saved.") logger.debug("Forecast cache time now %s minutes." % forecastcachetime) except: print("", "Your input couldn't be converted into a number. Setting forecast", "cache time to it's default value of '60'.", sep="\n") config['CACHE']['forecast_cachedtime'] = '60' logger.debug("Forecast cache time now 60 minutes.") print("", "(13/42)", "Please enter the cache time for almanac data in minutes (default = 240)", sep="\n") almanaccachetime = input("Input here: ").lower() try: almanaccachetime = float(almanaccachetime) almanaccachetime = str(almanaccachetime) config['CACHE']['almanac_cachedtime'] = almanaccachetime print("Changes saved.") logger.debug("Almanac cache time now %s minutes." % almanaccachetime) except: print("", "Your input couldn't be converted into a number. Setting almanac", "cache time to it's default value of '240'.", sep="\n") config['CACHE']['almanac_cachedtime'] = '240' logger.debug("Almanac cache time now 240 minutes.") print("", "(14/42)", "Please enter the cache time for 1.5 day hourly data in minutes (default = 60)", sep="\n") threedayhourly_cachedtime = input("Input here: ").lower() try: threedayhourly = float(threedayhourly_cachedtime) threedayhourly = str(threedayhourly_cachedtime) config['CACHE']['threedayhourly_cachedtime'] = threedayhourly_cachedtime print("Changes saved.") logger.debug("3 day hourly cache time now %s minutes." % threedayhourly_cachedtime) except: print("", "Your input couldn't be converted into a number. Setting three day hourly", "cache time to it's default value of '60'.", sep="\n") config['CACHE']['threedayhourly_cachedtime'] = "60" logger.debug("3 day hourly cache time now 60 minutes") print("", "(15/42)", "Please enter the cache time for the ten day hourly data in minutes (default = 60)", sep="\n") tendayhourly_cachedtime = input("Input here: ").lower() try: tendayhourly = float(tendayhourly_cachedtime) tendayhourly = str(tendayhourly_cachedtime) config['CACHE']['tendayhourly_cachedtime'] = tendayhourly_cachedtime print("Changes saved.") logger.debug("10 day hourly cache time now %s minutes." % tendayhourly_cachedtime) except: print("", "Your input couldn't be converted into a number. Setting ten day hourly", "cache time to it's default value of '60'.", sep="\n") config['CACHE']['tendayhourly_cachedtime'] = "60" logger.debug("10 day hourly cache time now 60 minutes") print("", "(16/42)", "Please enter the cache time for sun data in minutes (default = 480)", sep="\n") sundatacachetime = input("Input here: ").lower() try: sundatacachetime = float(sundatacachetime) sundatacachetime = str(sundatacachetime) config['CACHE']['sundata_cachedtime'] = forecastcachetime print("Changes saved.") logger.debug("Sun data cache time now %s minutes." % sundatacachetime) except: print("", "Your input couldn't be converted into a number. Setting sun data", "cache time to it's default value of '480'.", sep="\n") config['CACHE']['sundata_cachedtime'] = '480' logger.debug("Sun data cache time now 480 minutes.") print("", "(17/42)", "Please enter the cache time for tide data in minutes (default = 480)", sep="\n") tidecachetime = input("Input here: ").lower() try: tidecachetime = float(tidecachetime) tidecachetime = str(tidecachetime) config['CACHE']['tide_cachedtime'] = tidecachetime print("Changes saved.") logger.debug("Tide cache time now %s minutes." % tidecachetime) except: print("", "Your input couldn't be converted into a number. Setting tide data", "cache time to it's default value of '480'.", sep="\n") config['CACHE']['tide_cachedtime'] = '480' logger.debug("Tide data cache time now 480 minutes.") print("", "(18/42)", "Please enter the cache time for hurricane data in minutes (default = 480)", sep="\n") hurricanecachetime = input("Input here: ").lower() try: hurricanecachetime = float(hurricanecachetime) hurricanecachetime = str(hurricanecachetime) config['CACHE']['hurricane_cachedtime'] = hurricanecachetime print("Changes saved.") logger.debug("Hurricane cache time now %s minutes" % hurricanecachetime) except: print("", "Your input couldn't be converted into a number. Setting hurricane data", "cache time to it's default value of '180'.", sep="\n") config['CACHE']['hurricane_cachedtime'] = '180' logger.debug("Hurricane data cache time now 180 minutes.") print("", "(19/42)", "Please enter the cache time for yesterday's weather data in minutes (default = 720)", sep="\n") yesterdaycachedtime = input("Input here: ").lower() try: yesterdaycachedtime = float(yesterdaycachedtime) yesterdaycachedtime = str(yesterdaycachedtime) config['CACHE']['yesterday_cachedtime'] = yesterdaycachedtime print("Changes saved.") logger.debug("Yesterday cache time now %s minutess" % yesterdaycachedtime) except: print("", "Your input couldn't be converted into a number. Setting yesterday's weather data", "cache time to it's default value of 720.", sep="\n") config['CACHE']['yesterday_cachedtime'] = '720' logger.debug("Yesterday data cache time now 720 minutes.") print("", "(20/42)", "When viewing detailed EU alerts information, how many", "iterations should PyWeather go through, before asking you to", "continue?", "By default, this is 2.", sep="\n") EUalertsloops = input("Input here: ") try: EUalertsloops = int(EUalertsloops) EUalertsloops = str(EUalertsloops) config['UI']['alerts_EUiterations'] = EUalertsloops print("Changes saved.") logger.debug("Detailed EU alert iterations now %s" % EUalertsloops) except: print("Couldn't convert input into a number. Defaulting to '2'.") printException_loggerwarn() config['UI']['alerts_EUiterations'] = '2' print("Changes saved.") logger.debug("Detailed EU alert iterations now 2.") print("", "(21/42)", "When viewing detailed US alerts information, how many", "iterations should PyWeather go through, before asking you to", "continue?", "By default, this is 1.", sep="\n") USalertsloops = input("Input here: ") try: USalertsloops = int(USalertsloops) USalertsloops = str(USalertsloops) config['UI']['alerts_USiterations'] = USalertsloops print("Changes saved.") logger.debug("Detailed US alert iterations now %s" % USalertsloops) except: print("Couldn't convert input to a number. Defaulting to '1'.") printException_loggerwarn() config['UI']['alerts_USiterations'] = '1' print("Changes saved.") logger.debug("Detailed US alert iterations now 1.") print("", "(22/42)","When PyWeather is going through detailed information, it can show", "how many iterations are completed.", "By default, this is disabled.", "Yes or No.", sep="\n") showIterations = input("Input here: ").lower() if showIterations == "yes": config['UI']['show_completediterations'] = 'True' print("Changes saved.") logger.debug("Showing completed iterations is ENABLED.") elif showIterations == "no": config['UI']['show_completediterations'] = 'False' print("Changes saved.") logger.debug("Showing completed iterations is DISABLED.") else: print("Couldn't understand what you inputted.", "Defaulting to 'FALSE'.", sep="\n") config['UI']['show_completediterations'] = 'False' print("Changes saved.") logger.debug("Could not understand input. Defaulting to DISABLED.") print("", "(23/42)", "When PyWeather is going through detailed information, would", "you like the 'Enter to Continue' prompts to pop up?", "By default, this is enabled.", "Yes or No.", sep="\n") showEnterToContinue = input("Input here: ").lower() if showEnterToContinue == "yes": config['UI']['show_entertocontinue'] = 'True' print("Changes saved.") logger.debug("Showing enter to continue prompts is ENABLED.") elif showEnterToContinue == "no": config['UI']['show_entertocontinue'] = 'False' print("Changes saved.") logger.debug("Showing enter to continue prompts is DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'True'.", sep="\n") config['UI']['show_entertocontinue'] = 'True' print("Changes saved.") logger.debug("Could not understand input. Defaulting to ENABLED.") print("", "(24/42)", "In the PyWeather Updater, the updater can show the release tag", "associated with the latest release. Helpful for those using Git to", "update PyWeather. By default, this is disabled.", "Yes or No.", sep="\n") showReleaseTag = input("Input here: ").lower() if showReleaseTag == "yes": config['UPDATER']['show_updaterreleasetag'] = 'True' print("Changes saved.") logger.debug("Showing release tag in updater is ENABLED.") elif showReleaseTag == "no": config['UPDATER']['show_updaterreleasetag'] = 'False' print("Changes saved.") logger.debug("Showing release tag in updater is DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'False'.", sep="\n") config['UPDATER']['show_updaterreleasetag'] = 'False' print("Changes saved.") logger.debug("Could not understand input. Defaulting to DISABLED.") print("", "(25/42)", "When PyWeather boots, it can validate your API key. If PyWeather", "finds your primary API key is invalid, it'll attempt to validate your", "backup key, and load that if it's validated successfully.", "By default, this is enabled, as it's well worth the 1 API call to make", "sure your key is valid. However, if you said 'Yes' to almanac/sun data", "on the summary screen, you might not want to enable this.", "Yes or No.", sep="\n") validateKeyOnBoot = input("Input here: ").lower() if validateKeyOnBoot == "yes": config['PYWEATHER BOOT']['validateAPIKey'] = 'True' print("Changes saved.") logger.debug("Validating API key on boot is ENABLED.") elif validateKeyOnBoot == "no": config['PYWEATHER BOOT']['validateAPIKey'] = 'False' print("Changes saved.") logger.debug("Validating API key on boot is DISABLED.") else: print("Could not understand what you inputted.", "Defaulting to 'True'.", sep="\n") config['PYWEATHER BOOT']['validateAPIKey'] = 'False' logger.debug("Could not understand input. Defaulting to ENABLED.") print("", "(26/42)", "PyWeather now has a radar feature, which opens up a GUI on supported", "platforms. Depending on your screen resolution, you'll have to set how large", "the radar picture is when rendered. In the prompt below, enter one of five sizes.", "extrasmall - 320x240 window", "small - 480x320 window", "normal - 640x480 window", "large - 960x720 window", "extralarge - 1280x960 window", "By default, the resolution is normal. Adapt your choice to the screen resolution", "of the machine you're using.", sep="\n") radar_resolutions = ["extrasmall", "small", "normal", "large", "extralarge"] logger.debug("radar_resolutions: %s" % radar_resolutions) radar_resolutioninput = input("Input here: ").lower() for x in range(0, 5): if radar_resolutioninput == radar_resolutions[x]: logger.debug("Resolution input matched, end result: %s" % radar_resolutions[x]) config['RADAR GUI']['radar_imagesize'] = radar_resolutions[x] print("Changes saved.") break # This works by design. If x = 4 (extralarge), the if would catch first. elif x == 5: print("Could not understand what you inputted. Defaulting to 'normal'.") config['RADAR GUI']['radar_imagesize'] = 'normal' print("Changes saved.") print("", "(27/42)", "PyWeather's radar feature is unfortunately experimental as of PyWeather 0.6.3 beta.", "By default, a confirmation message will always appear when attempting to launch the radar.", "However, this can be turned off, if you plan to use the experimental radar on a regular basis.", "By default, bypassing the confirmation message is disabled. Yes or No.", sep="\n") radar_bypassconfinput = input("Input here: ").lower() logger.debug("radar_bypassconfinput: %s" % radar_bypassconfinput) if radar_bypassconfinput == "yes": config['RADAR GUI']['bypassconfirmation'] = 'True' logger.debug("RADAR GUI/bypassconfirmation is now TRUE") print("Changes saved.") elif radar_bypassconfinput == "no": config['RADAR GUI']['bypassconfirmation'] = 'False' logger.debug("RADAR GUI/bypassconfirmation is now FALSE") print("Changes saved.") else: print("Could not understand what you inputted. Defaulting to 'False'.") config['RADAR GUI']['bypassconfirmation'] = 'False' logger.debug("RADAR GUI/bypassconfirmation is now FALSE") print("Changes saved.") print("", "(28/42)", "On the summary screen, would you like tide data to be shown?", "This uses an extra API call when enabled. By default, this is disabled.", "Yes or No.", sep="\n") tideonsummary = input("Input here: ").lower() logger.debug("tideonsummary: %s" % tideonsummary) if tideonsummary == "yes": config['SUMMARY']['showtideonsummary'] = "True" logger.debug("SUMMARY/showtideonsummary is now TRUE") print("Changes saved.") elif tideonsummary == "no": config['SUMMARY']['showtideonsummary'] = "False" logger.debug("SUMMARY/showtideonsummary is now FALSE") print("Changes saved.") else: print("Could not understand what you inputted. Defaulting to 'False'.") config['SUMMARY']['showtideonsummary'] = "False" logger.debug("SUMMARY/showtideonsummary is now FALSE") print("Changes saved.") print("", "(29/42)", "When PyWeather boots, would you like hurricane data to be fetched?", "Initial loading times will increase when this is on, but hurricane data will load faster.", "This can use an extra API call, especially when you fetch hurricane data but don't check it", "in PyWeather. By default, this is disabled.", "Yes or No.", sep="\n") hurricaneprefetch = input("Input here: ").lower() logger.debug("hurricaneprefetch: %s" % hurricaneprefetch) if hurricaneprefetch == "yes": config['PREFETCH']['hurricanedata_atboot'] = 'True' logger.debug("PREFETCH/hurricanedata_atbooot is now TRUE.") print("Changes saved.") elif hurricaneprefetch == "no": config['PREFETCH']['hurricanedata_atboot'] = 'False' logger.debug("PREFETCH/hurricanedata_atboot is now FALSE.") print("Changes saved.") else: print("Could not understand what you inputted. Defaulting to 'False'.") config['PREFETCH']['hurricanedata_atboot'] = 'False' logger.debug("PREFETCH/hurricanedata_atboot is now FALSE.") print("Changes saved.") print("", "(30/42)", "PyWeather has a new feature where you can now easily call your current location at boot.", "The current location feature allows you to enter 'currentlocation' at boot, and view the weather for your", "approximate location. However, GeoIP lookups might be inaccurate, especially for mobile users. The GeoIP service", "uses freegeoip.net. Would you like to enable this service? By default, this is disabled. Yes or No.", sep="\n") allowgeoipservice = input("Input here: ").lower() logger.debug("allowgeoipservice: %s" % allowgeoipservice) if allowgeoipservice == "yes": config['FIRSTINPUT']['geoipservice_enabled'] = 'True' logger.debug("FIRSTINPUT/geoipservice_enabled is now TRUE.") print("Changes saved.") elif allowgeoipservice == "no": config['FIRSTINPUT']['geoipservice_enabled'] = 'False' logger.debug("FIRSTINPUT/geoipservice_enabled is now FALSE.") else: print("Could not understand what you inputted. Defaulting to 'False'.") config['FIRSTINPUT']['geoipservice_enabled'] = 'False' logger.debug("FIRSTINPUT/geoipservice_enabled is now FALSE.") print("Changes saved.") print("", "(31/42)", "PyWeather has a new feature where you can query indivdiual Wunderground PWS stations.", "You can query any PWS globally by entering pws:<pws ID> when enabled, and where <pws ID> is the ID of the", "PWS you want to query. However, this can be turned off if you don't want to have extra lines of text at boot,", "or don't want the ability to query PWSes. By default, this is enabled. Yes or No.", sep="\n") allowpwsqueries = input("Input here: ").lower() logger.debug("allowpwsqueries: %s" % allowpwsqueries) if allowpwsqueries == "yes": config['FIRSTINPUT']['allow_pwsqueries'] = 'True' logger.debug("FIRSTINPUT/allow_pwsqueries is now TRUE.") print("Changes saved.") elif allowpwsqueries == "no": config['FIRSTINPUT']['allow_pwsqueries'] = 'False' logger.debug("FIRSTINPUT/allow_pwsqueries is now FALSE.") print("Changes saved.") else: print("Could not understand what you inputted. Defaulting to 'True'.") config['FIRSTINPUT']['allow_pwsqueries'] = 'True' logger.debug("FIRSTINPUT/allow_pwsqueries is now TRUE.") print("Changes saved.") print("", "(32/42)", "PyWeather has a new feature where in hurricane data, you can see the nearest city that a hurricane is to.", "However, this feature uses a separate API (geonames.org), can only work when the hurricane is within 300km of a city,", "and will drastically increase loading times. You may also run into issues with the default API key hitting rate limits.", "Despite all of this, would you like to enable the nearest city features for non-forecast hurricane data?", "Yes or No. By default, this is disabled.", sep="\n") allownearestcities = input("Input here: ").lower() logger.debug("allownearestcities: %s" % allownearestcities) if allownearestcities == "yes": additional_ncoptions = True logger.debug("additional_ncoptions: %s" % additional_ncoptions) config['HURRICANE']['enablenearestcity'] = 'True' logger.debug("HURRICANE/enablenearestcity is now TRUE.") print("Changes saved.") elif allownearestcities == "no": additional_ncoptions = False logger.debug("additional_ncoptions: %s" % additional_ncoptions) config['HURRICANE']['enablenearestcity'] = 'False' logger.debug("HURRICANE/enablenearestcity is now FALSE.") print("Changes saved.") else: additional_ncoptions = False logger.debug("additional_ncoptions: %s" % additional_ncoptions) print("Could not understand what you inputted. Defaulting to 'False'.") config['HURRICANE']['enablenearestcity'] = 'False' logger.debug("HURRICANE/enablenearestcity is now FALSE.") print("Changes saved.") # <--- Additional options for nearest city feature ---> if additional_ncoptions is True: print("", "(33/42)", "By default, the nearest city feature is only enabled on the current data screen of hurricane data.", "You can enable the nearest city feature to be enabled on forecast data. However, loading hurricane data becomes much", "slower. By default, this is disabled. Yes or No.", sep="\n") enable_ncforecast = input("Input here: ").lower() if enable_ncforecast == "yes": config['HURRICANE']['enablenearestcity_forecast'] = 'True' logger.debug("HURRICANE/enablenearestcity_forecast is now TRUE.") print("Changes saved.") elif enable_ncforecast == "no": config['HURRICANE']['enablenearestcity_forecast'] = 'False' logger.debug("HURRICANE/enablenearestcity_forecast is now FALSE.") print("Changes saved.") else: print("Could not understand your input. Defaulting to 'False'.") config['HURRICANE']['enablenearestcity_forecast'] = 'False' logger.debug("HURRICANE/enablenearestcity_forecast is now FALSE.") print("Changes saved.") print("", "(34/42)", "By default, PyWeather uses it's own API username for the nearest city features, which should be able to", "handle PyWeather's user demands just fine. However, if you'd like to use your own account for the API, you may.", "You can sign up at geonames.org, and follow all the steps. The confirmation letter may take some time to hit your inbox.", "Would you like to define your own API username? Yes or No. By default, this is no.", sep="\n") definegeonamesusername = input("Input here: ").lower() logger.debug("definegeonamesusername: %s" % definegeonamesusername) if definegeonamesusername == "yes": # Enter into confirmation loop while True: print("Please enter the username that you'll use to access the geonames API.") geonamesusername = input("Input here: ").lower() logger.debug("geonamesusername: %s" % geonamesusername) print("The API username you gave me was: %s" % geonamesusername, "Is this the username that you'd like to use? Yes or No.", "Please note that your username will not be validated.", sep="\n") geonamesconfirmation = input("Input here: ").lower() confirmurl = 'http://api.geonames.org/findNearbyPlaceNameJSON?lat=19.3&lng=102.2&username= ' + geonamesusername + '&radius=300&maxRows=1&cities=cities5000' logger.debug("geonamesconfirmation: %s ; confirmurl: %s" % (geonamesconfirmation, confirmurl)) if geonamesconfirmation == "yes": config['HURRICANE']['api_username'] = geonamesusername logger.debug("HURRICANE/api_username is now %s" % geonamesusername) print("Changes saved.") elif geonamesconfirmation == "no": continue else: print("Input not understood. Will not validate username. If the username is", "invalid, please change the HURRICANE/api_username option in the config.", sep="\n") config['HURRICANE']['api_username'] = geonamesusername logger.debug("HURRICANE/api_username is now %s" % geonamesusername) print("Changes saved.") elif definegeonamesusername == "no": print("Defaulting to the default username for the geonames API.") else: print("Input not understood.", "Defaulting to the default username for the geonames API.", sep="\n") print("", "(35/42)", "For the nearest city feature, you can define how large a city has to be to show up as a nearest city.", "You have three options for this. 'small' will set the threshold to cities with a 1,000 population and greater, but this", "tends to include cities with very few or no people. 'medium' will set the threshold to cities with a 5,000 population", "and greater, and 'large' for cities that have a population of 10,000 or greater. Please enter either 'small', 'medium'", "or 'large' below. Default is 'medium'.", sep="\n") nearestcitysize = input("Input here: ").lower() logger.debug("nearestcitysize: %s" % nearestcitysize) if nearestcitysize == "small": config['HURRICANE']['nearestcitysize'] = 'small' logger.debug("HURRICANE/nearestcitysize is now 'small'.") print("Changes saved.") elif nearestcitysize == "medium": config['HURRICANE']['nearestcitysize'] = 'medium' logger.debug("HURRICANE/nearestcitysize is now 'medium'") print("Changes saved.") else: print("Could not understand your input. Defaulting to 'medium'.") config['HURRICANE']['nearestcitysize'] = 'medium' logger.debug("HURRICANE/nearestcitysize is now 'medium'.") print("Changes saved.") print("", "(36/42)", "PyWeather will now let you enable a favorite locations feature, which allows", "you to quickly call up to 5 locations in PyWeather. You have the ability to configure your", "favorite locations in a menu option in PyWeather. By default, this feature is enabled.", "Yes or No.", sep="\n") enable_favoritelocations = input("Input here: ").lower() logger.debug("enable_favoritelocations: %s" % enable_favoritelocations) if enable_favoritelocations == "yes": config['FAVORITE LOCATIONS']['enabled'] = 'True' logger.debug("FAVORITE LOCATIONS/enabled is now 'True'.") print("Changes saved!") elif enable_favoritelocations == "no": config['FAVORITE LOCATIONS']['enabled'] = 'False' logger.debug("FAVORITE LOCATIONS/enabled is now 'False'.") print("Changes saved!") else: print("Could not understand your input. Defaulting to 'True'.") config['FAVORITE LOCATIONS']['enabled'] = 'True' logger.debug("FAVORITE LOCATIONS/enabled is now 'True'.") print("Changes saved!") print("", "(37/43)", "PyWeather can now store your previously searched locations.", "You have the ability to configure your previous locations in a menu option", "in PyWeather. By default this feature is enabled.", "Yes or No.", sep="\n") enable_previouslocations = input("Input here: ").lower() logger.debug("enable_previouslocations: %s" % enable_previouslocations) if enable_previouslocations == "yes": config['PREVIOUS LOCATIONS']['enabled'] = 'True' logger.debug("PREVIOUS LOCATIONS/enabled is now 'True'.") print("Changes saved!") elif enable_previouslocations == "no": config['PREVIOUS LOCATIONS']['enabled'] = 'False' logger.debug("PREVIOUS LOCATIONS/enabled is now 'False'.") print("Changes saved.") else: print("Could not understand your input. Defaulting to 'True'.") config['PREVIOUS LOCATIONS']['enabled'] = 'True' logger.debug("PREVIOUS LOCATIONS/enabled is now 'True'.") print("", "(37/42)", "PyWeather by default uses Google's geocoder, which can occasionally have rate limiting issues.", "To get around this, you can manually use your own API key that you sign up for with Google. This is completely", "optional, and you can continue past this step and not impede PyWeather's functionality. However, would you like", "to enable the use of a custom API key for the geocoder? Yes or No.", sep="\n") enablecustomgeocoderkey = input("Input here: ").lower() logger.debug("enablecustomgeocoderkey: %s" % enablecustomgeocoderkey) if enablecustomgeocoderkey == "yes": print("", "(38/42)", "To sign up for a Google Maps API key, please visit this link: ", "https://developers.google.com/maps/documentation/javascript/get-api-key", "Press the button 'Get Key', and wait a minute. Copy and paste the key into the input", "below. Your API key will NOT be validated. Enter 'exit' to exit this process, and to disable", "a custom API key.", sep="\n") customgeocoderkey = input("Input here: ") logger.debug("customgeocoderkey: %s" % customgeocoderkey) while True: print("", "The API key you entered is: %s" % customgeocoderkey, "Is this the API key you want to use? Yes or No.", sep="\n") confirmcustomgeocoderkey = input("Input here: ").lower() logger.debug("confirmcustomgeocoderkey: %s" % confirmcustomgeocoderkey) if confirmcustomgeocoderkey == "yes": break else: if confirmcustomgeocoderkey != "no": print("Couldn't understand your input. Please input your API key again.") print("Please enter the API key you want to use below.") customgeocoderkey = input("Input here: ") logger.debug("customgeocoderkey: %s" % customgeocoderkey) if customgeocoderkey == "exit": print("Exiting the custom geocoder key process, and disabling a custom geocoder key.") config['GEOCODER API']['customkey_enabled'] = 'False' logger.debug("GEOCODER API/customkey_enabled is now FALSE.") print("Changes saved.") else: config['GEOCODER API']['customkey_enabled'] = 'True' config['GEOCODER API']['customkey'] = str(customgeocoderkey) logger.debug("GEOCODER API/customkey_enabled is now TRUE.") print("Changes saved.") elif enablecustomgeocoderkey == "no": config['GEOCODER API']['customkey_enabled'] = 'False' logger.debug("GEOCODER API/customkey_enabled is now FALSE.") print("Changes saved.") else: print("Your input could not be understood. Defaulting to 'False'.") config['GEOCODER API']['customkey_enabled'] = 'False' logger.debug("GEOCODER API/customkey_enabled is now FALSE.") print("Changes saved.") print("", "(39/42)", "On the summary screen, you can now view a summary of the weather that occurred yesterday.", "Enabling this will also enable the option to prefetch yesterday's weather at boot in the config file.", "Please note that enabling this uses 1 extra API call at boot, and will increase PyWeather's loading time.", "Would you like to turn on showing yesterday's weather on the summary screen? Yes or No. By default, this is", "disabled.", sep="\n") showyesterdayonsummary = input("Input here: ").lower() logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary) if showyesterdayonsummary == "yes": config['SUMMARY']['showyesterdayonsummary'] = 'True' logger.info("SUMMARY/showyesterdayonsummary is now 'True'.") config['PREFETCH']['yesterdaydata_atboot'] = 'True' logger.info("PREFETCH/yesterdaydata_atboot is now 'True'.") showyesterdayonsummary = True logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary) print("Changes saved.") elif showyesterdayonsummary == "no": config['SUMMARY']['showyesterdayonsummary'] = 'False' logger.info("SUMMARY/showyesterdayonsummary is now 'False'.") showyesterdayonsummary = False logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary) print("Changes saved.") else: print("Your input could not be understood. Defaulting to 'False'.") config['SUMMARY']['showyesterdayonsummary'] = 'False' logger.info("SUMMARY/showyesterdayonsumary is now 'False'.") showyesterdayonsummary = False logger.debug("showyesterdayonsummary: %s" % showyesterdayonsummary) print("Changes saved.") if showyesterdayonsummary is False: print("", "(40/42)", "When PyWeather boots up, you can have the option to have yesterday's weather data", "prefetched during bootup. Enabling this will use 1 extra API call at boot, and will increase PyWeather's", "loading time. Would you like to enable prefetching yesterday's weather data on boot? Yes or No.", "By default, this is disabled.", sep="\n") prefetchyesterdayatboot = input("Input here: ").lower() logger.debug("prefetchyesterdayatboot: %s" % prefetchyesterdayatboot) if prefetchyesterdayatboot == "yes": config['PREFETCH']['yesterdaydata_atboot'] = 'True' logger.info("PREFETCH/yesterdaydata_atboot is now 'True'.") print("Changes saved.") elif prefetchyesterdayatboot == "no": config['PREFETCH']['yesterdaydata_atboot'] = 'False' logger.info("PREFETCH/yesterdaydata_atboot is now 'False'.") print("Changes saved.") else: print("Your input could not be understood. Defaulting to 'False'.") config['PREFETCH']['yesterdaydata_atboot'] = 'False' logger.info("PREFETCH/yesterdaydata_atboot is now 'False'.") print("Changes saved.") print("", "(41/42)", "In 0.6.3 beta and newer, you have the option to enable extra tools for PyWeather.", "Extra tools are diagnostic tools, and so far you can see cache timings in PyWeather, and more extra tools", "will be added as time goes on. Would you like to enable the ability to use extra tools? Yes or No. By default", "this is disabled.", sep="\n") enableextratools = input("Input here: ").lower() logger.debug("enableextratools: %s" % enableextratools) if enableextratools == "yes": config['UI']['extratools_enabled'] = 'True' logger.info("UI/extratools_enabled is now 'True'.") print("Changes saved.") elif enableextratools == "no": config['UI']['extratools_enabled'] = 'False' logger.info("UI/extratools_enabled is now 'False'.") print("Changes saved.") else: print("Could not understand your input. Defaulting to 'False'.") config['UI']['extratools_enabled'] = 'False' logger.info("UI/extratools_enabled is now 'False'.") print("Changes saved.") print("", "(42/42)", "PyWeather's geocoder usually uses https, but issues have been discovered", "on some platforms, where the geocoder cannot operate in the https mode. If you press enter", "PyWeather will automatically detect which scheme to use. If you are an advanced user, and want", "to configure the scheme yourself, enter advancedconfig at the prompt below.", sep="\n") configuregeopyscheme = input("Input here: ").lower() logger.debug("configuregeopyscheme: %s" % configuregeopyscheme) if configuregeopyscheme == "advancedconfig": print("Which geopy scheme would you like to use? 'https' works on most platforms", "but 'http' is needed on some platforms (OS X, as an example). Please input", "'https' or 'http' below.") geopyschemetype = input("Input here: ").lower() logger.debug("geopyschemetype: %s" % geopyschemetype) if geopyschemetype == "https": config['GEOCDER']['scheme'] = 'https' logger.debug("GEOCODER/scheme is now 'https'") print("Changes saved. Geocoder settings will not be validated.") elif geopyschemetype == "http": config['GEOCODER']['scheme'] = 'http' logger.debug("GEOCODER/scheme is now 'http'") print("Changes saved. Geocoder settings will not be validated.") else: print("Your input could not be understood. Defaulting to 'https'.") logger.debug("GEOCODER/scheme is now 'https'") print("Changes saved. Geocoder settings will not be validated.") else: print("Now automatically configuring your geopy scheme.") # HTTPS validation from geopy import GoogleV3 geocoder = GoogleV3(scheme='https') # I've found that one "warm up request", and then waiting ~15 seconds somehow helps determine if a platform is HTTP/HTTPS compatible. try: geocoder.geocode("123 5th Avenue, New York, NY") except: logger.debug("Warm up geocode failed.") print("I've just completed a warm-up geocode. However, sometimes a rate limit will", "occur after this geocode. I've paused the setup process for 10 seconds. This", "should help with figuring out what scheme works on your OS.", sep="\n") time.sleep(10) try: geocoder.geocode("123 5th Avenue, New York, NY") print("The geocoder can operate with HTTPS enabled on your OS. Saving these changes...") config['GEOCODER']['scheme'] = 'https' logger.debug("GEOCODER/scheme is now 'https'") print("Changes saved.") except geopy.exc.GeocoderServiceError: print("Geopy probably can't run without HTTPS (or your internet went down). Trying HTTP as the scheme...") geocoder = GoogleV3(scheme='http') print("Waiting 10 seconds to avoid rate limiting after the previous geocode...") time.sleep(10) try: geocoder.geocode("123 5th Avenue, New York, NY") print("The geocoder can operate, but without HTTPS enabled on your OS. Saving these changes...") config['GEOCODER']['scheme'] = 'http' logger.debug("GEOCODER/scheme is now 'http'") print("Changes saved.") except geopy.exc.GeocoderServiceError: print("You probably don't have an internet connection, as HTTPS and HTTP validation both failed.", "Defaulting to HTTP as the geopy scheme...", sep="\n") config['GEOCODER']['scheme'] = 'http' logger.debug("GEOCODER/scheme is now 'http'") print("Changes saved.") # if showing yesterday is disabled show prefetch yesterday # if show yest. on sum. is enabled enable prefetch too basically the same code print("","That's it! Now commiting config changes...", sep="\n") try: with open('storage//config.ini', 'w') as configfile: logger.debug("configfile: %s" % configfile) config.write(configfile) print("Changes committed!") logger.info("Performed operation: config.write(configfile)") except: print("The config file couldn't be written to.", "Make sure the config file can be written to.", sep="\n") printException() print("Press enter to exit.") input() sys.exit() print("","Everything is set up and ready to rumble!", "Enjoy using PyWeather! If you have any issues, please report them on GitHub!", "Press enter to continue.", sep="\n") input() sys.exit()
gpl-3.0
jkarnows/scikit-learn
sklearn/neighbors/tests/test_dist_metrics.py
230
5234
import itertools import pickle import numpy as np from numpy.testing import assert_array_almost_equal import scipy from scipy.spatial.distance import cdist from sklearn.neighbors.dist_metrics import DistanceMetric from nose import SkipTest def dist_func(x1, x2, p): return np.sum((x1 - x2) ** p) ** (1. / p) def cmp_version(version1, version2): version1 = tuple(map(int, version1.split('.')[:2])) version2 = tuple(map(int, version2.split('.')[:2])) if version1 < version2: return -1 elif version1 > version2: return 1 else: return 0 class TestMetrics: def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5, rseed=0, dtype=np.float64): np.random.seed(rseed) self.X1 = np.random.random((n1, d)).astype(dtype) self.X2 = np.random.random((n2, d)).astype(dtype) # make boolean arrays: ones and zeros self.X1_bool = self.X1.round(0) self.X2_bool = self.X2.round(0) V = np.random.random((d, d)) VI = np.dot(V, V.T) self.metrics = {'euclidean': {}, 'cityblock': {}, 'minkowski': dict(p=(1, 1.5, 2, 3)), 'chebyshev': {}, 'seuclidean': dict(V=(np.random.random(d),)), 'wminkowski': dict(p=(1, 1.5, 3), w=(np.random.random(d),)), 'mahalanobis': dict(VI=(VI,)), 'hamming': {}, 'canberra': {}, 'braycurtis': {}} self.bool_metrics = ['matching', 'jaccard', 'dice', 'kulsinski', 'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath'] def test_cdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X2, metric, **kwargs) yield self.check_cdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X2_bool, metric) yield self.check_cdist_bool, metric, D_true def check_cdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1, self.X2) assert_array_almost_equal(D12, D_true) def check_cdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool, self.X2_bool) assert_array_almost_equal(D12, D_true) def test_pdist(self): for metric, argdict in self.metrics.items(): keys = argdict.keys() for vals in itertools.product(*argdict.values()): kwargs = dict(zip(keys, vals)) D_true = cdist(self.X1, self.X1, metric, **kwargs) yield self.check_pdist, metric, kwargs, D_true for metric in self.bool_metrics: D_true = cdist(self.X1_bool, self.X1_bool, metric) yield self.check_pdist_bool, metric, D_true def check_pdist(self, metric, kwargs, D_true): if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0: raise SkipTest("Canberra distance incorrect in scipy < 0.9") dm = DistanceMetric.get_metric(metric, **kwargs) D12 = dm.pairwise(self.X1) assert_array_almost_equal(D12, D_true) def check_pdist_bool(self, metric, D_true): dm = DistanceMetric.get_metric(metric) D12 = dm.pairwise(self.X1_bool) assert_array_almost_equal(D12, D_true) def test_haversine_metric(): def haversine_slow(x1, x2): return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2 + np.cos(x1[0]) * np.cos(x2[0]) * np.sin(0.5 * (x1[1] - x2[1])) ** 2)) X = np.random.random((10, 2)) haversine = DistanceMetric.get_metric("haversine") D1 = haversine.pairwise(X) D2 = np.zeros_like(D1) for i, x1 in enumerate(X): for j, x2 in enumerate(X): D2[i, j] = haversine_slow(x1, x2) assert_array_almost_equal(D1, D2) assert_array_almost_equal(haversine.dist_to_rdist(D1), np.sin(0.5 * D2) ** 2) def test_pyfunc_metric(): X = np.random.random((10, 3)) euclidean = DistanceMetric.get_metric("euclidean") pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2) # Check if both callable metric and predefined metric initialized # DistanceMetric object is picklable euclidean_pkl = pickle.loads(pickle.dumps(euclidean)) pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc)) D1 = euclidean.pairwise(X) D2 = pyfunc.pairwise(X) D1_pkl = euclidean_pkl.pairwise(X) D2_pkl = pyfunc_pkl.pairwise(X) assert_array_almost_equal(D1, D2) assert_array_almost_equal(D1_pkl, D2_pkl)
bsd-3-clause
hsharsha/depot_tools
third_party/boto/pyami/helloworld.py
120
1247
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.pyami.scriptbase import ScriptBase class HelloWorld(ScriptBase): def main(self): self.log('Hello World!!!')
bsd-3-clause
Infusion-OS/android_external_skia
tools/skpdiff/skpdiff_server.py
161
24230
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function import argparse import BaseHTTPServer import json import os import os.path import re import subprocess import sys import tempfile import urllib2 # Grab the script path because that is where all the static assets are SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) # Find the tools directory for python imports TOOLS_DIR = os.path.dirname(SCRIPT_DIR) # Find the root of the skia trunk for finding skpdiff binary SKIA_ROOT_DIR = os.path.dirname(TOOLS_DIR) # Find the default location of gm expectations DEFAULT_GM_EXPECTATIONS_DIR = os.path.join(SKIA_ROOT_DIR, 'expectations', 'gm') # Imports from within Skia if TOOLS_DIR not in sys.path: sys.path.append(TOOLS_DIR) GM_DIR = os.path.join(SKIA_ROOT_DIR, 'gm') if GM_DIR not in sys.path: sys.path.append(GM_DIR) import gm_json import jsondiff # A simple dictionary of file name extensions to MIME types. The empty string # entry is used as the default when no extension was given or if the extension # has no entry in this dictionary. MIME_TYPE_MAP = {'': 'application/octet-stream', 'html': 'text/html', 'css': 'text/css', 'png': 'image/png', 'js': 'application/javascript', 'json': 'application/json' } IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN) SKPDIFF_INVOKE_FORMAT = '{} --jsonp=false -o {} -f {} {}' def get_skpdiff_path(user_path=None): """Find the skpdiff binary. @param user_path If none, searches in Release and Debug out directories of the skia root. If set, checks that the path is a real file and returns it. """ skpdiff_path = None possible_paths = [] # Use the user given path, or try out some good default paths. if user_path: possible_paths.append(user_path) else: possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out', 'Release', 'skpdiff')) possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out', 'Release', 'skpdiff.exe')) possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out', 'Debug', 'skpdiff')) possible_paths.append(os.path.join(SKIA_ROOT_DIR, 'out', 'Debug', 'skpdiff.exe')) # Use the first path that actually points to the binary for possible_path in possible_paths: if os.path.isfile(possible_path): skpdiff_path = possible_path break # If skpdiff was not found, print out diagnostic info for the user. if skpdiff_path is None: print('Could not find skpdiff binary. Either build it into the ' + 'default directory, or specify the path on the command line.') print('skpdiff paths tried:') for possible_path in possible_paths: print(' ', possible_path) return skpdiff_path def download_file(url, output_path): """Download the file at url and place it in output_path""" reader = urllib2.urlopen(url) with open(output_path, 'wb') as writer: writer.write(reader.read()) def download_gm_image(image_name, image_path, hash_val): """Download the gm result into the given path. @param image_name The GM file name, for example imageblur_gpu.png. @param image_path Path to place the image. @param hash_val The hash value of the image. """ if hash_val is None: return # Separate the test name from a image name image_match = IMAGE_FILENAME_RE.match(image_name) test_name = image_match.group(1) # Calculate the URL of the requested image image_url = gm_json.CreateGmActualUrl( test_name, gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5, hash_val) # Download the image as requested download_file(image_url, image_path) def get_image_set_from_skpdiff(skpdiff_records): """Get the set of all images references in the given records. @param skpdiff_records An array of records, which are dictionary objects. """ expected_set = frozenset([r['baselinePath'] for r in skpdiff_records]) actual_set = frozenset([r['testPath'] for r in skpdiff_records]) return expected_set | actual_set def set_expected_hash_in_json(expected_results_json, image_name, hash_value): """Set the expected hash for the object extracted from expected-results.json. Note that this only work with bitmap-64bitMD5 hash types. @param expected_results_json The Python dictionary with the results to modify. @param image_name The name of the image to set the hash of. @param hash_value The hash to set for the image. """ expected_results = expected_results_json[gm_json.JSONKEY_EXPECTEDRESULTS] if image_name in expected_results: expected_results[image_name][gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS][0][1] = hash_value else: expected_results[image_name] = { gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS: [ [ gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5, hash_value ] ] } def get_head_version(path): """Get the version of the file at the given path stored inside the HEAD of the git repository. It is returned as a string. @param path The path of the file whose HEAD is returned. It is assumed the path is inside a git repo rooted at SKIA_ROOT_DIR. """ # git-show will not work with absolute paths. This ensures we give it a path # relative to the skia root. This path also has to use forward slashes, even # on windows. git_path = os.path.relpath(path, SKIA_ROOT_DIR).replace('\\', '/') git_show_proc = subprocess.Popen(['git', 'show', 'HEAD:' + git_path], stdout=subprocess.PIPE) # When invoked outside a shell, git will output the last committed version # of the file directly to stdout. git_version_content, _ = git_show_proc.communicate() return git_version_content class GMInstance: """Information about a GM test result on a specific device: - device_name = the name of the device that rendered it - image_name = the GM test name and config - expected_hash = the current expected hash value - actual_hash = the actual hash value - is_rebaselined = True if actual_hash is what is currently in the expected results file, False otherwise. """ def __init__(self, device_name, image_name, expected_hash, actual_hash, is_rebaselined): self.device_name = device_name self.image_name = image_name self.expected_hash = expected_hash self.actual_hash = actual_hash self.is_rebaselined = is_rebaselined class ExpectationsManager: def __init__(self, expectations_dir, expected_name, updated_name, skpdiff_path): """ @param expectations_dir The directory to traverse for results files. This should resemble expectations/gm in the Skia trunk. @param expected_name The name of the expected result files. These are in the format of expected-results.json. @param updated_name The name of the updated expected result files. Normally this matches --expectations-filename-output for the rebaseline.py tool. @param skpdiff_path The path used to execute the skpdiff command. """ self._expectations_dir = expectations_dir self._expected_name = expected_name self._updated_name = updated_name self._skpdiff_path = skpdiff_path self._generate_gm_comparison() def _generate_gm_comparison(self): """Generate all the data needed to compare GMs: - determine which GMs changed - download the changed images - compare them with skpdiff """ # Get the expectations and compare them with actual hashes self._get_expectations() # Create a temporary file tree that makes sense for skpdiff to operate # on. We take the realpath of the new temp directory because some OSs # (*cough* osx) put the temp directory behind a symlink that gets # resolved later down the pipeline and breaks the image map. image_output_dir = os.path.realpath(tempfile.mkdtemp('skpdiff')) expected_image_dir = os.path.join(image_output_dir, 'expected') actual_image_dir = os.path.join(image_output_dir, 'actual') os.mkdir(expected_image_dir) os.mkdir(actual_image_dir) # Download expected and actual images that differed into the temporary # file tree. self._download_expectation_images(expected_image_dir, actual_image_dir) # Invoke skpdiff with our downloaded images and place its results in the # temporary directory. self._skpdiff_output_path = os.path.join(image_output_dir, 'skpdiff_output.json') skpdiff_cmd = SKPDIFF_INVOKE_FORMAT.format(self._skpdiff_path, self._skpdiff_output_path, expected_image_dir, actual_image_dir) os.system(skpdiff_cmd) self._load_skpdiff_output() def _get_expectations(self): """Fills self._expectations with GMInstance objects for each test whose expectation is different between the following two files: - the local filesystem's updated results file - git's head version of the expected results file """ differ = jsondiff.GMDiffer() self._expectations = [] for root, dirs, files in os.walk(self._expectations_dir): for expectation_file in files: # There are many files in the expectations directory. We only # care about expected results. if expectation_file != self._expected_name: continue # Get the name of the results file, and be sure there is an # updated result to compare against. If there is not, there is # no point in diffing this device. expected_file_path = os.path.join(root, self._expected_name) updated_file_path = os.path.join(root, self._updated_name) if not os.path.isfile(updated_file_path): continue # Always get the expected results from git because we may have # changed them in a previous instance of the server. expected_contents = get_head_version(expected_file_path) updated_contents = None with open(updated_file_path, 'rb') as updated_file: updated_contents = updated_file.read() # Read the expected results on disk to determine what we've # already rebaselined. commited_contents = None with open(expected_file_path, 'rb') as expected_file: commited_contents = expected_file.read() # Find all expectations that did not match. expected_diff = differ.GenerateDiffDictFromStrings( expected_contents, updated_contents) # Generate a set of images that have already been rebaselined # onto disk. rebaselined_diff = differ.GenerateDiffDictFromStrings( expected_contents, commited_contents) rebaselined_set = set(rebaselined_diff.keys()) # The name of the device corresponds to the name of the folder # we are in. device_name = os.path.basename(root) # Store old and new versions of the expectation for each GM for image_name, hashes in expected_diff.iteritems(): self._expectations.append( GMInstance(device_name, image_name, hashes['old'], hashes['new'], image_name in rebaselined_set)) def _load_skpdiff_output(self): """Loads the results of skpdiff and annotates them with whether they have already been rebaselined or not. The resulting data is store in self.skpdiff_records.""" self.skpdiff_records = None with open(self._skpdiff_output_path, 'rb') as skpdiff_output_file: self.skpdiff_records = json.load(skpdiff_output_file)['records'] for record in self.skpdiff_records: record['isRebaselined'] = self.image_map[record['baselinePath']][1].is_rebaselined def _download_expectation_images(self, expected_image_dir, actual_image_dir): """Download the expected and actual images for the _expectations array. @param expected_image_dir The directory to download expected images into. @param actual_image_dir The directory to download actual images into. """ image_map = {} # Look through expectations and download their images. for expectation in self._expectations: # Build appropriate paths to download the images into. expected_image_path = os.path.join(expected_image_dir, expectation.device_name + '-' + expectation.image_name) actual_image_path = os.path.join(actual_image_dir, expectation.device_name + '-' + expectation.image_name) print('Downloading %s for device %s' % ( expectation.image_name, expectation.device_name)) # Download images download_gm_image(expectation.image_name, expected_image_path, expectation.expected_hash) download_gm_image(expectation.image_name, actual_image_path, expectation.actual_hash) # Annotate the expectations with where the images were downloaded # to. expectation.expected_image_path = expected_image_path expectation.actual_image_path = actual_image_path # Map the image paths back to the expectations. image_map[expected_image_path] = (False, expectation) image_map[actual_image_path] = (True, expectation) self.image_map = image_map def _set_expected_hash(self, device_name, image_name, hash_value): """Set the expected hash for the image of the given device. This always writes directly to the expected results file of the given device @param device_name The name of the device to write the hash to. @param image_name The name of the image whose hash to set. @param hash_value The value of the hash to set. """ # Retrieve the expected results file as it is in the working tree json_path = os.path.join(self._expectations_dir, device_name, self._expected_name) expectations = gm_json.LoadFromFile(json_path) # Set the specified hash. set_expected_hash_in_json(expectations, image_name, hash_value) # Write it out to disk using gm_json to keep the formatting consistent. gm_json.WriteToFile(expectations, json_path) def commit_rebaselines(self, rebaselines): """Sets the expected results file to use the hashes of the images in the rebaselines list. If a expected result image is not in rebaselines at all, the old hash will be used. @param rebaselines A list of image paths to use the hash of. """ # Reset all expectations to their old hashes because some of them may # have been set to the new hash by a previous call to this function. for expectation in self._expectations: expectation.is_rebaselined = False self._set_expected_hash(expectation.device_name, expectation.image_name, expectation.expected_hash) # Take all the images to rebaseline for image_path in rebaselines: # Get the metadata about the image at the path. is_actual, expectation = self.image_map[image_path] expectation.is_rebaselined = is_actual expectation_hash = expectation.actual_hash if is_actual else\ expectation.expected_hash # Write out that image's hash directly to the expected results file. self._set_expected_hash(expectation.device_name, expectation.image_name, expectation_hash) self._load_skpdiff_output() class SkPDiffHandler(BaseHTTPServer.BaseHTTPRequestHandler): def send_file(self, file_path): # Grab the extension if there is one extension = os.path.splitext(file_path)[1] if len(extension) >= 1: extension = extension[1:] # Determine the MIME type of the file from its extension mime_type = MIME_TYPE_MAP.get(extension, MIME_TYPE_MAP['']) # Open the file and send it over HTTP if os.path.isfile(file_path): with open(file_path, 'rb') as sending_file: self.send_response(200) self.send_header('Content-type', mime_type) self.end_headers() self.wfile.write(sending_file.read()) else: self.send_error(404) def serve_if_in_dir(self, dir_path, file_path): # Determine if the file exists relative to the given dir_path AND exists # under the dir_path. This is to prevent accidentally serving files # outside the directory intended using symlinks, or '../'. real_path = os.path.normpath(os.path.join(dir_path, file_path)) if os.path.commonprefix([real_path, dir_path]) == dir_path: if os.path.isfile(real_path): self.send_file(real_path) return True return False def do_GET(self): # Simple rewrite rule of the root path to 'viewer.html' if self.path == '' or self.path == '/': self.path = '/viewer.html' # The [1:] chops off the leading '/' file_path = self.path[1:] # Handle skpdiff_output.json manually because it is was processed by the # server when it was started and does not exist as a file. if file_path == 'skpdiff_output.json': self.send_response(200) self.send_header('Content-type', MIME_TYPE_MAP['json']) self.end_headers() # Add JSONP padding to the JSON because the web page expects it. It # expects it because it was designed to run with or without a web # server. Without a web server, the only way to load JSON is with # JSONP. skpdiff_records = self.server.expectations_manager.skpdiff_records self.wfile.write('var SkPDiffRecords = ') json.dump({'records': skpdiff_records}, self.wfile) self.wfile.write(';') return # Attempt to send static asset files first. if self.serve_if_in_dir(SCRIPT_DIR, file_path): return # WARNING: Serving any file the user wants is incredibly insecure. Its # redeeming quality is that we only serve gm files on a white list. if self.path in self.server.image_set: self.send_file(self.path) return # If no file to send was found, just give the standard 404 self.send_error(404) def do_POST(self): if self.path == '/commit_rebaselines': content_length = int(self.headers['Content-length']) request_data = json.loads(self.rfile.read(content_length)) rebaselines = request_data['rebaselines'] self.server.expectations_manager.commit_rebaselines(rebaselines) self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() self.wfile.write('{"success":true}') return # If the we have no handler for this path, give em' the 404 self.send_error(404) def run_server(expectations_manager, port=8080): # It's important to parse the results file so that we can make a set of # images that the web page might request. skpdiff_records = expectations_manager.skpdiff_records image_set = get_image_set_from_skpdiff(skpdiff_records) # Do not bind to interfaces other than localhost because the server will # attempt to serve files relative to the root directory as a last resort # before 404ing. This means all of your files can be accessed from this # server, so DO NOT let this server listen to anything but localhost. server_address = ('127.0.0.1', port) http_server = BaseHTTPServer.HTTPServer(server_address, SkPDiffHandler) http_server.image_set = image_set http_server.expectations_manager = expectations_manager print('Navigate thine browser to: http://{}:{}/'.format(*server_address)) http_server.serve_forever() def main(): parser = argparse.ArgumentParser() parser.add_argument('--port', '-p', metavar='PORT', type=int, default=8080, help='port to bind the server to; ' + 'defaults to %(default)s', ) parser.add_argument('--expectations-dir', metavar='EXPECTATIONS_DIR', default=DEFAULT_GM_EXPECTATIONS_DIR, help='path to the gm expectations; ' + 'defaults to %(default)s' ) parser.add_argument('--expected', metavar='EXPECTATIONS_FILE_NAME', default='expected-results.json', help='the file name of the expectations JSON; ' + 'defaults to %(default)s' ) parser.add_argument('--updated', metavar='UPDATED_FILE_NAME', default='updated-results.json', help='the file name of the updated expectations JSON;' + ' defaults to %(default)s' ) parser.add_argument('--skpdiff-path', metavar='SKPDIFF_PATH', default=None, help='the path to the skpdiff binary to use; ' + 'defaults to out/Release/skpdiff or out/Default/skpdiff' ) args = vars(parser.parse_args()) # Convert args into a python dict # Make sure we have access to an skpdiff binary skpdiff_path = get_skpdiff_path(args['skpdiff_path']) if skpdiff_path is None: sys.exit(1) # Print out the paths of things for easier debugging print('script dir :', SCRIPT_DIR) print('tools dir :', TOOLS_DIR) print('root dir :', SKIA_ROOT_DIR) print('expectations dir :', args['expectations_dir']) print('skpdiff path :', skpdiff_path) expectations_manager = ExpectationsManager(args['expectations_dir'], args['expected'], args['updated'], skpdiff_path) run_server(expectations_manager, port=args['port']) if __name__ == '__main__': main()
bsd-3-clause
vabs22/zulip
zerver/migrations/0070_userhotspot.py
9
1032
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-03-28 00:22 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('zerver', '0069_realmauditlog_extra_data'), ] operations = [ migrations.CreateModel( name='UserHotspot', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('hotspot', models.CharField(max_length=30)), ('timestamp', models.DateTimeField(default=django.utils.timezone.now)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.AlterUniqueTogether( name='userhotspot', unique_together=set([('user', 'hotspot')]), ), ]
apache-2.0
mehtapgundogan/Tellal
env/lib/python2.7/site-packages/pip/vcs/bazaar.py
35
4455
from __future__ import absolute_import import logging import os import tempfile import re # TODO: Get this into six.moves.urllib.parse try: from urllib import parse as urllib_parse except ImportError: import urlparse as urllib_parse from pip.utils import rmtree, display_path from pip.vcs import vcs, VersionControl from pip.download import path_to_url logger = logging.getLogger(__name__) class Bazaar(VersionControl): name = 'bzr' dirname = '.bzr' repo_name = 'branch' schemes = ( 'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', 'bzr+lp', ) def __init__(self, url=None, *args, **kwargs): super(Bazaar, self).__init__(url, *args, **kwargs) # Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical # Register lp but do not expose as a scheme to support bzr+lp. if getattr(urllib_parse, 'uses_fragment', None): urllib_parse.uses_fragment.extend(['lp']) urllib_parse.non_hierarchical.extend(['lp']) def export(self, location): """ Export the Bazaar repository at the url to the destination location """ temp_dir = tempfile.mkdtemp('-export', 'pip-') self.unpack(temp_dir) if os.path.exists(location): # Remove the location to make sure Bazaar can export it correctly rmtree(location) try: self.run_command(['export', location], cwd=temp_dir, filter_stdout=self._filter, show_stdout=False) finally: rmtree(temp_dir) def switch(self, dest, url, rev_options): self.run_command(['switch', url], cwd=dest) def update(self, dest, rev_options): self.run_command(['pull', '-q'] + rev_options, cwd=dest) def obtain(self, dest): url, rev = self.get_url_rev() if rev: rev_options = ['-r', rev] rev_display = ' (to revision %s)' % rev else: rev_options = [] rev_display = '' if self.check_destination(dest, url, rev_options, rev_display): logger.info( 'Checking out %s%s to %s', url, rev_display, display_path(dest), ) self.run_command(['branch', '-q'] + rev_options + [url, dest]) def get_url_rev(self): # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it url, rev = super(Bazaar, self).get_url_rev() if url.startswith('ssh://'): url = 'bzr+' + url return url, rev def get_url(self, location): urls = self.run_command(['info'], show_stdout=False, cwd=location) for line in urls.splitlines(): line = line.strip() for x in ('checkout of branch: ', 'parent branch: '): if line.startswith(x): repo = line.split(x)[1] if self._is_local_repository(repo): return path_to_url(repo) return repo return None def get_revision(self, location): revision = self.run_command( ['revno'], show_stdout=False, cwd=location) return revision.splitlines()[-1] def get_tag_revs(self, location): tags = self.run_command( ['tags'], show_stdout=False, cwd=location) tag_revs = [] for line in tags.splitlines(): tags_match = re.search(r'([.\w-]+)\s*(.*)$', line) if tags_match: tag = tags_match.group(1) rev = tags_match.group(2) tag_revs.append((rev.strip(), tag.strip())) return dict(tag_revs) def get_src_requirement(self, dist, location, find_tags): repo = self.get_url(location) if not repo: return None if not repo.lower().startswith('bzr:'): repo = 'bzr+' + repo egg_project_name = dist.egg_name().split('-', 1)[0] current_rev = self.get_revision(location) tag_revs = self.get_tag_revs(location) if current_rev in tag_revs: # It's a tag full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev]) else: full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev) return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name) vcs.register(Bazaar)
gpl-2.0
lambdaq/pytr
core.py
1
7912
#!/usr/bin/env python # coding: utf8 # from gevent import monkey # monkey.patch_all() import socket import os, sys import random, struct import logging from collections import deque, Counter, defaultdict logger = logging.getLogger(__file__) logger.addHandler(logging.StreamHandler(sys.stderr)) logger.setLevel(logging.ERROR) class UdpIpParser(object): """parse IP+UDP""" def __init__(self, data): self.data = data self.ip_hdrl = ip_hdrl = ((data[0]) & 0x0F) * 4 self.udp_payload_len = struct.unpack( '!H', data[ip_hdrl + 4:ip_hdrl + 6])[0] @property def payload(self): udp_hdrl = 8 return self.data[self.ip_hdrl + udp_hdrl:self.ip_hdrl + self.udp_payload_len] class IpPacket(object): def __init__(self, data): self.data = data self.hdrl = (0x0F & (data[0])) * 4 self.payload = self.data[self.hdrl:] self.ttl = self.data[8] @property def src_ip(self): return socket.inet_ntoa(str(self.data[12:16])) @property def dst_ip(self): return socket.inet_ntoa(str(self.data[16:20])) class IcmpParser(object): hdrl = 8 def __init__(self, data): self.data = data @property def type(self): return self.data[0] @property def payload(self): return self.data[8:14] @property def id(self): return struct.unpack('>H', self.data[4:6])[0] def checksum(msg): # simplest rfc1071. msg is bytearray s = 0 for i in range(0, len(msg), 2): w = msg[i] + (msg[i + 1] << 8) c = s + w s = (c & 0xffff) + (c >> 16) return ~s & 0xffff def create_ping(id=None): id = id or random.randint(30000, 65500) icmp_type = 8 icmp_code = 0 icmp_checksum = 0 icmp_seq = 1 icmp_timestamp = 0 data = '%06d' % id s = struct.Struct('!bbHHhQ%ss' % len(data)) msg = bytearray(s.size) s.pack_into( msg, 0, icmp_type, icmp_code, icmp_checksum, id, icmp_seq, icmp_timestamp, data) # calculate ICMP checksum, which can not be offloaded cs = checksum(msg) struct.pack_into('<H', msg, 2, cs) return msg def guess_hop(ttl): if not ttl: return if ttl >= 128: return 256 - ttl elif 64 < ttl < 128: return 128 - ttl else: return 64 - ttl MAX_RETRY = 5 class Tracer(object): MAX_TTL = 32 def __init__(self): """ packet send rate = self.batch_size/self.timeout - hosts is iterable target IPs """ self.batch_size = 100 self.max_retry = 10 self.timeout = 1 self.running = self.timeout * self.max_retry self.max_ttl = defaultdict(lambda: self.MAX_TTL) self.echo_map = {} self.in_flight = deque(maxlen=self.batch_size) # a list of ip-ttl tuples self.retries = Counter() # remaining retries self.result = defaultdict(dict) # {ip: [hop1, hop2, ...]} self.sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) self.sock.bind(('', 0)) self.sock.settimeout(self.timeout) def _iter_ip_and_ttl(self, hosts): """generate all IPs and their hops need to ping Need consider retries. """ for ip in hosts: for ttl in xrange(1, self.MAX_TTL + 1): if ttl >= self.max_ttl[ip]: break resp = (ip.strip(), ttl) self.in_flight.append(resp) yield resp def run(self, hosts): """would block""" self.ip_and_ttl = self._iter_ip_and_ttl(hosts) self.tick() while self.running > 0: data = bytearray(1024) try: nbytes, addr = self.sock.recvfrom_into(data) self.on_data(data, addr[0]) except socket.timeout: self.tick() return self.result def _iter_retry(self): i = 0 while self.in_flight and self.retries: if not i < len(self.in_flight): return key = self.in_flight[i] if self.retries[key] > 0: self.retries[key] -= 1 yield key i += 1 if self.retries[key] <= 0: self.on_retry_fail(*key) i -= 1 def on_retry_fail(self, ip, ttl): self.retries.pop((ip, ttl), None) self.in_flight.remove((ip, ttl)) if ttl <= self.max_ttl[ip]: self.result[ip][ttl] = '?' @property def on_tick(self): return getattr(self, '_on_tick', None) or (lambda *args: None) @on_tick.setter def on_tick(self, func): self._on_tick = func @property def on_pong(self): return getattr(self, '_on_pong', None) or (lambda *args: None) @on_pong.setter def on_pong(self, func): self._on_pong = func def tick(self): logger.debug('in_flight=%s, retries=%s', len(self.in_flight), self.retries.most_common(4)) self.on_tick(self) sent = 0 for ip, ttl in self._iter_retry(): self.ping(ip, ttl) sent += 1 if sent >= self.batch_size: break while sent < self.batch_size: try: ip, ttl = self.ip_and_ttl.next() except StopIteration: self.running -= self.timeout return self.ping(ip, ttl) self.retries[(ip, ttl)] = self.max_retry sent += 1 def ping(self, ip, ttl): logger.debug("Ping %s, ttl=%s", ip, ttl) key = (ip, ttl) sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) sock.bind(('', 0)) sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl) icmp_id = random.randint(30000, 60000) self.echo_map[icmp_id] = (ip, ttl) packet = create_ping(icmp_id) sock.sendto(packet, (ip, 0)) sock.close() return icmp_id def pong(self, ping_ip, pong_ip, ttl): # @ToDo: handle multi-path trace-route if ping_ip == pong_ip: ttl = min(ttl, self.max_ttl[ping_ip]) self.max_ttl[ping_ip] = ttl for k in xrange(1, self.MAX_TTL): ip = self.result[ping_ip].get(k) if k > ttl or ip == ping_ip: self.result[ping_ip].pop(k, None) key = ping_ip, ttl try: self.in_flight.remove(key) except ValueError: pass self.retries.pop(key, None) else: key = ping_ip, ttl try: self.in_flight.remove(key) except ValueError: pass self.retries.pop(key, None) self.result[ping_ip][ttl] = pong_ip self.on_pong(self, ping_ip, pong_ip, ttl) def on_data(self, data, addr): # get IP packet inside returned IP outer_ip = IpPacket(data) inner_ip = IpPacket(outer_ip.payload[IcmpParser.hdrl:]) # the raw structure is: IP(ICMP(IP(ICMP))) icmp = IcmpParser(inner_ip.payload) icmp_id = None if icmp.payload.isdigit(): icmp_id = int(icmp.payload) if not icmp_id: icmp_id = icmp.id if icmp_id in self.echo_map: ip, ttl = self.echo_map[icmp_id] logger.debug('Pong %s, ip=%s, hop=%s', ip, addr, ttl) # f.write('%s\t%s\t%s\n' % (ip, ttl, addr)) self.pong(ip, addr, ttl) else: logger.debug('Pong unknown %s -> %s type %s' % ( inner_ip.src_ip, inner_ip.dst_ip, icmp.type)) def get_hops(res): return [res.get(i) or '?' for i in xrange(max(res.keys()), 0, -1)]
bsd-2-clause
SteveHNH/ansible
lib/ansible/modules/windows/win_group_membership.py
47
3143
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2017, Andrew Saraceni <andrew.saraceni@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_group_membership version_added: "2.4" short_description: Manage Windows local group membership description: - Allows the addition and removal of local, service and domain users, and domain groups from a local group. options: name: description: - Name of the local group to manage membership on. required: true members: description: - A list of members to ensure are present/absent from the group. - Accepts local users as username, .\username, and SERVERNAME\username. - Accepts domain users and groups as DOMAIN\username and username@DOMAIN. - Accepts service users as NT AUTHORITY\username. required: true state: description: - Desired state of the members in the group. choices: - present - absent default: present author: - Andrew Saraceni (@andrewsaraceni) ''' EXAMPLES = r''' - name: Add a local and domain user to a local group win_group_membership: name: Remote Desktop Users members: - NewLocalAdmin - DOMAIN\TestUser state: present - name: Remove a domain group and service user from a local group win_group_membership: name: Backup Operators members: - DOMAIN\TestGroup - NT AUTHORITY\SYSTEM state: absent ''' RETURN = r''' name: description: The name of the target local group. returned: always type: string sample: Administrators added: description: A list of members added when C(state) is C(present); this is empty if no members are added. returned: success and C(state) is C(present) type: list sample: ["NewLocalAdmin", "DOMAIN\\TestUser"] removed: description: A list of members removed when C(state) is C(absent); this is empty if no members are removed. returned: success and C(state) is C(absent) type: list sample: ["DOMAIN\\TestGroup", "NT AUTHORITY\\SYSTEM"] members: description: A list of all local group members at completion; this is empty if the group contains no members. returned: success type: list sample: ["DOMAIN\\TestUser", "NewLocalAdmin"] '''
gpl-3.0
nwalters512/the-blue-alliance
tests/test_validation_helper.py
3
1494
import unittest2 from helpers.validation_helper import ValidationHelper class TestValidationHelper(unittest2.TestCase): def testTeamValidation(self): errors = ValidationHelper.validate([("team_id_validator", "frc01")]) self.assertEqual(errors, {"Errors": [{"team_id": "frc01 is not a valid team id"}]}) def testEventValidation(self): errors = ValidationHelper.validate([("event_id_validator", "1cmp")]) self.assertEqual(errors, {"Errors": [{"event_id": "1cmp is not a valid event id"}]}) def testMatchValidation(self): errors = ValidationHelper.validate([("match_id_validator", "0010c1_0m2")]) self.assertEqual(errors, {"Errors": [{"match_id": "0010c1_0m2 is not a valid match id"}]}) def testMichiganEigthFinalsValidValidation(self): errors = ValidationHelper.validate([("match_id_validator", "2015micmp_ef3m1")]) self.assertEqual(None, errors) def testComboValidation(self): errors = ValidationHelper.validate([("match_id_validator", "0010c1_0m2"), ("team_id_validator", "frc01"), ("event_id_validator", "1cmp")]) self.assertEqual(errors, {"Errors": [{"match_id": "0010c1_0m2 is not a valid match id"}, {"team_id": "frc01 is not a valid team id"},{"event_id": "1cmp is not a valid event id"}]}) def testValidValidation(self): errors = ValidationHelper.validate([("team_id_validator", "frc101")]) self.assertEqual(None, errors)
mit
gangadharkadam/office_erp
erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py
16
2966
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import cstr, cint from frappe import msgprint, _ def execute(filters=None): if not filters: filters = {} conditions, filters = get_conditions(filters) columns = get_columns(filters) att_map = get_attendance_list(conditions, filters) emp_map = get_employee_details() data = [] for emp in sorted(att_map): emp_det = emp_map.get(emp) if not emp_det: continue row = [emp, emp_det.employee_name, emp_det.branch, emp_det.department, emp_det.designation, emp_det.company] total_p = total_a = 0.0 for day in range(filters["total_days_in_month"]): status = att_map.get(emp).get(day + 1, "Absent") status_map = {"Present": "P", "Absent": "A", "Half Day": "HD"} row.append(status_map[status]) if status == "Present": total_p += 1 elif status == "Absent": total_a += 1 elif status == "Half Day": total_p += 0.5 total_a += 0.5 row += [total_p, total_a] data.append(row) return columns, data def get_columns(filters): columns = [ "Employee:Link/Employee:120", "Employee Name::140", "Branch:Link/Branch:120", "Department:Link/Department:120", "Designation:Link/Designation:120", "Company:Link/Company:120" ] for day in range(filters["total_days_in_month"]): columns.append(cstr(day+1) +"::20") columns += ["Total Present:Float:80", "Total Absent:Float:80"] return columns def get_attendance_list(conditions, filters): attendance_list = frappe.db.sql("""select employee, day(att_date) as day_of_month, status from tabAttendance where docstatus = 1 %s order by employee, att_date""" % conditions, filters, as_dict=1) att_map = {} for d in attendance_list: att_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, "") att_map[d.employee][d.day_of_month] = d.status return att_map def get_conditions(filters): if not (filters.get("month") and filters.get("fiscal_year")): msgprint(_("Please select month and year"), raise_exception=1) filters["month"] = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"].index(filters["month"]) + 1 from calendar import monthrange filters["total_days_in_month"] = monthrange(cint(filters["fiscal_year"].split("-")[-1]), filters["month"])[1] conditions = " and month(att_date) = %(month)s and fiscal_year = %(fiscal_year)s" if filters.get("company"): conditions += " and company = %(company)s" if filters.get("employee"): conditions += " and employee = %(employee)s" return conditions, filters def get_employee_details(): emp_map = frappe._dict() for d in frappe.db.sql("""select name, employee_name, designation, department, branch, company from tabEmployee where docstatus < 2 and status = 'Active'""", as_dict=1): emp_map.setdefault(d.name, d) return emp_map
agpl-3.0
lifeofguenter/google-appengine-wx-launcher
launcher/text_frame_unittest.py
28
2392
#!/usr/bin/env python # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unittests for text_frame.py""" import unittest import wx import launcher class TextFrameTest(unittest.TestCase): def setUp(self): # Must always create a wx.App first self.app = wx.PySimpleApp() def CreateConsole(self): """Create and return a generic console.""" lc = launcher.TextFrame('title') return lc def testBasicTile(self): """Test to make sure new windows don't overlap.""" pos = (0,0) launcher.TextFrame._ResetTiling() for i in range(3): lc = launcher.TextFrame('big bad window title') newpos = lc.GetPositionTuple() self.assertTrue(newpos[0] > pos[0]) self.assertTrue(newpos[1] > pos[1]) pos = newpos def testMuchTiling(self): """Make sure the top/left of our tile is always on-screen.""" launcher.TextFrame._ResetTiling() area = wx.Display().GetClientArea() lc = launcher.TextFrame('super dooper tytle 4 roolerz and doodz') # Needs to be real big in case you have a large monitor. ~1000 # iterations needed for a (1440,874) laptop before a full reset # happens. for i in range(3000): lc._ShiftTilePosition() self.assertTrue(launcher.TextFrame._tile_position[0] > area[0]) self.assertTrue(launcher.TextFrame._tile_position[1] > area[1]) self.assertTrue(launcher.TextFrame._tile_position[0] < area[2]) self.assertTrue(launcher.TextFrame._tile_position[1] < area[3]) def testText(self): """Test adding text to the console.""" lc = self.CreateConsole() contents = "" self.assertEqual(contents, lc.GetText()) for str in ('a', 'foo', '\n\n\n', 'bar\nbaz\n choke choke zapf'): contents += str lc.AppendText(str) self.assertEqual(contents, lc.GetText()) if __name__ == '__main__': unittest.main()
apache-2.0
iamjakob/oauth2lib
oauth2lib/provider.py
8
21633
import json import logging from requests import Response from cStringIO import StringIO try: from werkzeug.exceptions import Unauthorized except ImportError: Unauthorized = Exception from . import utils class Provider(object): """Base provider class for different types of OAuth 2.0 providers.""" def _handle_exception(self, exc): """Handle an internal exception that was caught and suppressed. :param exc: Exception to process. :type exc: Exception """ logger = logging.getLogger(__name__) logger.exception(exc) def _make_response(self, body='', headers=None, status_code=200): """Return a response object from the given parameters. :param body: Buffer/string containing the response body. :type body: str :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """ res = Response() res.status_code = status_code if headers is not None: res.headers.update(headers) res.raw = StringIO(body) return res def _make_redirect_error_response(self, redirect_uri, err): """Return a HTTP 302 redirect response object containing the error. :param redirect_uri: Client redirect URI. :type redirect_uri: str :param err: OAuth error message. :type err: str :rtype: requests.Response """ params = { 'error': err, 'response_type': None, 'client_id': None, 'redirect_uri': None } redirect = utils.build_url(redirect_uri, params) return self._make_response(headers={'Location': redirect}, status_code=302) def _make_json_response(self, data, headers=None, status_code=200): """Return a response object from the given JSON data. :param data: Data to JSON-encode. :type data: mixed :param headers: Dict of headers to include in the requests. :type headers: dict :param status_code: HTTP status code. :type status_code: int :rtype: requests.Response """ response_headers = {} if headers is not None: response_headers.update(headers) response_headers['Content-Type'] = 'application/json;charset=UTF-8' response_headers['Cache-Control'] = 'no-store' response_headers['Pragma'] = 'no-cache' return self._make_response(json.dumps(data), response_headers, status_code) def _make_json_error_response(self, err): """Return a JSON-encoded response object representing the error. :param err: OAuth error message. :type err: str :rtype: requests.Response """ return self._make_json_response({'error': err}, status_code=400) def _invalid_redirect_uri_response(self): """What to return when the redirect_uri parameter is missing. :rtype: requests.Response """ return self._make_json_error_response('invalid_request') class AuthorizationProvider(Provider): """OAuth 2.0 authorization provider. This class manages authorization codes and access tokens. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a provider. These are the methods that must be implemented in a subclass: validate_client_id(self, client_id) # Return True or False validate_client_secret(self, client_id, client_secret) # Return True or False validate_scope(self, client_id, scope) # Return True or False validate_redirect_uri(self, client_id, redirect_uri) # Return True or False validate_access(self) # Use this to validate your app session user # Return True or False from_authorization_code(self, client_id, code, scope) # Return mixed data or None on invalid from_refresh_token(self, client_id, refresh_token, scope) # Return mixed data or None on invalid persist_authorization_code(self, client_id, code, scope) # Return value ignored persist_token_information(self, client_id, scope, access_token, token_type, expires_in, refresh_token, data) # Return value ignored discard_authorization_code(self, client_id, code) # Return value ignored discard_refresh_token(self, client_id, refresh_token) # Return value ignored Optionally, the following may be overridden to acheive desired behavior: @property token_length(self) @property token_type(self) @property token_expires_in(self) generate_authorization_code(self) generate_access_token(self) generate_refresh_token(self) """ @property def token_length(self): """Property method to get the length used to generate tokens. :rtype: int """ return 40 @property def token_type(self): """Property method to get the access token type. :rtype: str """ return 'Bearer' @property def token_expires_in(self): """Property method to get the token expiration time in seconds. :rtype: int """ return 3600 def generate_authorization_code(self): """Generate a random authorization code. :rtype: str """ return utils.random_ascii_string(self.token_length) def generate_access_token(self): """Generate a random access token. :rtype: str """ return utils.random_ascii_string(self.token_length) def generate_refresh_token(self): """Generate a random refresh token. :rtype: str """ return utils.random_ascii_string(self.token_length) def get_authorization_code(self, response_type, client_id, redirect_uri, **params): """Generate authorization code HTTP response. :param response_type: Desired response type. Must be exactly "code". :type response_type: str :param client_id: Client ID. :type client_id: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :rtype: requests.Response """ # Ensure proper response_type if response_type != 'code': err = 'unsupported_response_type' return self._make_redirect_error_response(redirect_uri, err) # Check redirect URI is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri) if not is_valid_redirect_uri: return self._invalid_redirect_uri_response() # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_access = self.validate_access() scope = params.get('scope', '') is_valid_scope = self.validate_scope(client_id, scope) # Return proper error responses on invalid conditions if not is_valid_client_id: err = 'unauthorized_client' return self._make_redirect_error_response(redirect_uri, err) if not is_valid_access: err = 'access_denied' return self._make_redirect_error_response(redirect_uri, err) if not is_valid_scope: err = 'invalid_scope' return self._make_redirect_error_response(redirect_uri, err) # Generate authorization code code = self.generate_authorization_code() # Save information to be used to validate later requests self.persist_authorization_code(client_id=client_id, code=code, scope=scope) # Return redirection response params.update({ 'code': code, 'response_type': None, 'client_id': None, 'redirect_uri': None }) redirect = utils.build_url(redirect_uri, params) return self._make_response(headers={'Location': redirect}, status_code=302) def refresh_token(self, grant_type, client_id, client_secret, refresh_token, **params): """Generate access token HTTP response from a refresh token. :param grant_type: Desired grant type. Must be "refresh_token". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param refresh_token: Refresh token. :type refresh_token: str :rtype: requests.Response """ # Ensure proper grant_type if grant_type != 'refresh_token': return self._make_json_error_response('unsupported_grant_type') # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_client_secret = self.validate_client_secret(client_id, client_secret) scope = params.get('scope', '') is_valid_scope = self.validate_scope(client_id, scope) data = self.from_refresh_token(client_id, refresh_token, scope) is_valid_refresh_token = data is not None # Return proper error responses on invalid conditions if not (is_valid_client_id and is_valid_client_secret): return self._make_json_error_response('invalid_client') if not is_valid_scope: return self._make_json_error_response('invalid_scope') if not is_valid_refresh_token: return self._make_json_error_response('invalid_grant') # Discard original refresh token self.discard_refresh_token(client_id, refresh_token) # Generate access tokens once all conditions have been met access_token = self.generate_access_token() token_type = self.token_type expires_in = self.token_expires_in refresh_token = self.generate_refresh_token() # Save information to be used to validate later requests self.persist_token_information(client_id=client_id, scope=scope, access_token=access_token, token_type=token_type, expires_in=expires_in, refresh_token=refresh_token, data=data) # Return json response return self._make_json_response({ 'access_token': access_token, 'token_type': token_type, 'expires_in': expires_in, 'refresh_token': refresh_token }) def get_token(self, grant_type, client_id, client_secret, redirect_uri, code, **params): """Generate access token HTTP response. :param grant_type: Desired grant type. Must be "authorization_code". :type grant_type: str :param client_id: Client ID. :type client_id: str :param client_secret: Client secret. :type client_secret: str :param redirect_uri: Client redirect URI. :type redirect_uri: str :param code: Authorization code. :type code: str :rtype: requests.Response """ # Ensure proper grant_type if grant_type != 'authorization_code': return self._make_json_error_response('unsupported_grant_type') # Check conditions is_valid_client_id = self.validate_client_id(client_id) is_valid_client_secret = self.validate_client_secret(client_id, client_secret) is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri) scope = params.get('scope', '') is_valid_scope = self.validate_scope(client_id, scope) data = self.from_authorization_code(client_id, code, scope) is_valid_grant = data is not None # Return proper error responses on invalid conditions if not (is_valid_client_id and is_valid_client_secret): return self._make_json_error_response('invalid_client') if not is_valid_grant or not is_valid_redirect_uri: return self._make_json_error_response('invalid_grant') if not is_valid_scope: return self._make_json_error_response('invalid_scope') # Discard original authorization code self.discard_authorization_code(client_id, code) # Generate access tokens once all conditions have been met access_token = self.generate_access_token() token_type = self.token_type expires_in = self.token_expires_in refresh_token = self.generate_refresh_token() # Save information to be used to validate later requests self.persist_token_information(client_id=client_id, scope=scope, access_token=access_token, token_type=token_type, expires_in=expires_in, refresh_token=refresh_token, data=data) # Return json response return self._make_json_response({ 'access_token': access_token, 'token_type': token_type, 'expires_in': expires_in, 'refresh_token': refresh_token }) def get_authorization_code_from_uri(self, uri): """Get authorization code response from a URI. This method will ignore the domain and path of the request, instead automatically parsing the query string parameters. :param uri: URI to parse for authorization information. :type uri: str :rtype: requests.Response """ params = utils.url_query_params(uri) try: if 'response_type' not in params: raise TypeError('Missing parameter response_type in URL query') if 'client_id' not in params: raise TypeError('Missing parameter client_id in URL query') if 'redirect_uri' not in params: raise TypeError('Missing parameter redirect_uri in URL query') return self.get_authorization_code(**params) except TypeError as exc: self._handle_exception(exc) # Catch missing parameters in request err = 'invalid_request' if 'redirect_uri' in params: u = params['redirect_uri'] return self._make_redirect_error_response(u, err) else: return self._invalid_redirect_uri_response() except StandardError as exc: self._handle_exception(exc) # Catch all other server errors err = 'server_error' u = params['redirect_uri'] return self._make_redirect_error_response(u, err) def get_token_from_post_data(self, data): """Get a token response from POST data. :param data: POST data containing authorization information. :type data: dict :rtype: requests.Response """ try: # Verify OAuth 2.0 Parameters for x in ['grant_type', 'client_id', 'client_secret']: if not data.get(x): raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x)) # Handle get token from refresh_token if 'refresh_token' in data: return self.refresh_token(**data) # Handle get token from authorization code for x in ['redirect_uri', 'code']: if not data.get(x): raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x)) return self.get_token(**data) except TypeError as exc: self._handle_exception(exc) # Catch missing parameters in request return self._make_json_error_response('invalid_request') except StandardError as exc: self._handle_exception(exc) # Catch all other server errors return self._make_json_error_response('server_error') def validate_client_id(self, client_id): raise NotImplementedError('Subclasses must implement ' \ 'validate_client_id.') def validate_client_secret(self, client_id, client_secret): raise NotImplementedError('Subclasses must implement ' \ 'validate_client_secret.') def validate_redirect_uri(self, client_id, redirect_uri): raise NotImplementedError('Subclasses must implement ' \ 'validate_redirect_uri.') def validate_scope(self, client_id, scope): raise NotImplementedError('Subclasses must implement ' \ 'validate_scope.') def validate_access(self): raise NotImplementedError('Subclasses must implement ' \ 'validate_access.') def from_authorization_code(self, client_id, code, scope): raise NotImplementedError('Subclasses must implement ' \ 'from_authorization_code.') def from_refresh_token(self, client_id, refresh_token, scope): raise NotImplementedError('Subclasses must implement ' \ 'from_refresh_token.') def persist_authorization_code(self, client_id, code, scope): raise NotImplementedError('Subclasses must implement ' \ 'persist_authorization_code.') def persist_token_information(self, client_id, scope, access_token, token_type, expires_in, refresh_token, data): raise NotImplementedError('Subclasses must implement ' \ 'persist_token_information.') def discard_authorization_code(self, client_id, code): raise NotImplementedError('Subclasses must implement ' \ 'discard_authorization_code.') def discard_refresh_token(self, client_id, refresh_token): raise NotImplementedError('Subclasses must implement ' \ 'discard_refresh_token.') class OAuthError(Unauthorized): """OAuth error, including the OAuth error reason.""" def __init__(self, reason, *args, **kwargs): self.reason = reason super(OAuthError, self).__init__(*args, **kwargs) class ResourceAuthorization(object): """A class containing an OAuth 2.0 authorization.""" is_oauth = False is_valid = None token = None client_id = None expires_in = None error = None def raise_error_if_invalid(self): if not self.is_valid: raise OAuthError(self.error, 'OAuth authorization error') class ResourceProvider(Provider): """OAuth 2.0 resource provider. This class provides an interface to validate an incoming request and authenticate resource access. Certain methods MUST be overridden in a subclass, thus this class cannot be directly used as a resource provider. These are the methods that must be implemented in a subclass: get_authorization_header(self) # Return header string for key "Authorization" or None validate_access_token(self, access_token, authorization) # Set is_valid=True, client_id, and expires_in attributes # on authorization if authorization was successful. # Return value is ignored """ @property def authorization_class(self): return ResourceAuthorization def get_authorization(self): """Get authorization object representing status of authentication.""" auth = self.authorization_class() header = self.get_authorization_header() if not header or not header.split: return auth header = header.split() if len(header) > 1 and header[0] == 'Bearer': auth.is_oauth = True access_token = header[1] self.validate_access_token(access_token, auth) if not auth.is_valid: auth.error = 'access_denied' return auth def get_authorization_header(self): raise NotImplementedError('Subclasses must implement ' \ 'get_authorization_header.') def validate_access_token(self, access_token, authorization): raise NotImplementedError('Subclasses must implement ' \ 'validate_token.')
mit
jagg81/translate-toolkit
translate/storage/statsdb.py
2
27596
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2007-2010 Zuza Software Foundation # # This file is part of the Translate Toolkit. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Module to provide a cache of statistics in a database. @organization: Zuza Software Foundation @copyright: 2007 Zuza Software Foundation @license: U{GPL <http://www.fsf.org/licensing/licenses/gpl.html>} """ try: from sqlite3 import dbapi2 except ImportError: from pysqlite2 import dbapi2 import os.path import re import sys import stat import thread from UserDict import UserDict from translate import __version__ as toolkitversion from translate.lang.common import Common from translate.misc.multistring import multistring from translate.storage import factory from translate.storage.workflow import StateEnum #kdepluralre = re.compile("^_n: ") #Restore this if you really need support for old kdeplurals brtagre = re.compile("<br\s*?/?>") xmltagre = re.compile("<[^>]+>") numberre = re.compile("\\D\\.\\D") extended_state_strings = { StateEnum.EMPTY: "empty", StateEnum.NEEDS_WORK: "needs-work", StateEnum.REJECTED: "rejected", StateEnum.NEEDS_REVIEW: "needs-review", StateEnum.UNREVIEWED: "unreviewed", StateEnum.FINAL: "final", } UNTRANSLATED = StateEnum.EMPTY FUZZY = StateEnum.NEEDS_WORK TRANSLATED = StateEnum.UNREVIEWED state_strings = { UNTRANSLATED: "untranslated", FUZZY: "fuzzy", TRANSLATED: "translated", } def wordcount(string): # TODO: po class should understand KDE style plurals ## #string = kdepluralre.sub("", string) #Restore this if you really need support for old kdeplurals string = brtagre.sub("\n", string) string = xmltagre.sub("", string) string = numberre.sub(" ", string) #TODO: This should still use the correct language to count in the target #language return len(Common.words(string)) def wordsinunit(unit): """Counts the words in the unit's source and target, taking plurals into account. The target words are only counted if the unit is translated.""" (sourcewords, targetwords) = (0, 0) if isinstance(unit.source, multistring): sourcestrings = unit.source.strings else: sourcestrings = [unit.source or ""] for s in sourcestrings: sourcewords += wordcount(s) if not unit.istranslated(): return sourcewords, targetwords if isinstance(unit.target, multistring): targetstrings = unit.target.strings else: targetstrings = [unit.target or ""] for s in targetstrings: targetwords += wordcount(s) return sourcewords, targetwords class Record(UserDict): def __init__(self, record_keys, record_values=None, compute_derived_values=lambda x: x): if record_values == None: record_values = (0 for _i in record_keys) self.record_keys = record_keys self.data = dict(zip(record_keys, record_values)) self._compute_derived_values = compute_derived_values self._compute_derived_values(self) def to_tuple(self): return tuple(self[key] for key in self.record_keys) def __add__(self, other): result = Record(self.record_keys) for key in self.keys(): result[key] = self[key] + other[key] self._compute_derived_values(self) return result def __sub__(self, other): result = Record(self.record_keys) for key in self.keys(): result[key] = self[key] - other[key] self._compute_derived_values(self) return result def as_string_for_db(self): return ",".join([repr(x) for x in self.to_tuple()]) def transaction(f): """Modifies f to commit database changes if it executes without exceptions. Otherwise it rolls back the database. ALL publicly accessible methods in StatsCache MUST be decorated with this decorator. """ def decorated_f(self, *args, **kwargs): try: result = f(self, *args, **kwargs) self.con.commit() return result except: # If ANY exception is raised, we're left in an # uncertain state and we MUST roll back any changes to avoid getting # stuck in an inconsistent state. if self.con: self.con.rollback() raise return decorated_f def statefordb(unit): """Returns the numeric database state for the unit.""" if unit.istranslated(): return TRANSLATED if unit.isfuzzy() and unit.target: return FUZZY return UNTRANSLATED class FileTotals(object): keys = ['translatedsourcewords', 'fuzzysourcewords', 'untranslatedsourcewords', 'translated', 'fuzzy', 'untranslated', 'translatedtargetwords'] def db_keys(self): return ",".join(self.keys) def __init__(self, cur): self.cur = cur self.cur.execute(""" CREATE TABLE IF NOT EXISTS filetotals( fileid INTEGER PRIMARY KEY AUTOINCREMENT, translatedsourcewords INTEGER NOT NULL, fuzzysourcewords INTEGER NOT NULL, untranslatedsourcewords INTEGER NOT NULL, translated INTEGER NOT NULL, fuzzy INTEGER NOT NULL, untranslated INTEGER NOT NULL, translatedtargetwords INTEGER NOT NULL);""") def new_record(cls, state_for_db=None, sourcewords=None, targetwords=None): record = Record(cls.keys, compute_derived_values=cls._compute_derived_values) if state_for_db is not None: if state_for_db is UNTRANSLATED: record['untranslated'] = 1 record['untranslatedsourcewords'] = sourcewords if state_for_db is TRANSLATED: record['translated'] = 1 record['translatedsourcewords'] = sourcewords record['translatedtargetwords'] = targetwords elif state_for_db is FUZZY: record['fuzzy'] = 1 record['fuzzysourcewords'] = sourcewords return record new_record = classmethod(new_record) def _compute_derived_values(cls, record): record["total"] = record["untranslated"] + \ record["translated"] + \ record["fuzzy"] record["totalsourcewords"] = record["untranslatedsourcewords"] + \ record["translatedsourcewords"] + \ record["fuzzysourcewords"] record["review"] = 0 _compute_derived_values = classmethod(_compute_derived_values) def __getitem__(self, fileid): result = self.cur.execute(""" SELECT %(keys)s FROM filetotals WHERE fileid=?;""" % {'keys': self.db_keys()}, (fileid,)) return Record(FileTotals.keys, result.fetchone(), self._compute_derived_values) def __setitem__(self, fileid, record): self.cur.execute(""" INSERT OR REPLACE into filetotals VALUES (%(fileid)d, %(vals)s); """ % {'fileid': fileid, 'vals': record.as_string_for_db()}) def __delitem__(self, fileid): self.cur.execute(""" DELETE FROM filetotals WHERE fileid=?; """, (fileid,)) def emptyfiletotals(): """Returns a dictionary with all statistics initalised to 0.""" return FileTotals.new_record() def emptyfilechecks(): return {} def emptyfilestats(): return {"total": [], "translated": [], "fuzzy": [], "untranslated": []} def emptyunitstats(): return {"sourcewordcount": [], "targetwordcount": []} # We allow the caller to specify which value to return when errors_return_empty # is True. We do this, since Poolte wants None to be returned when it calls # get_mod_info directly, whereas we want an integer to be returned for # uses of get_mod_info within this module. # TODO: Get rid of empty_return when Pootle code is improved to not require # this. def get_mod_info(file_path): file_stat = os.stat(file_path) assert not stat.S_ISDIR(file_stat.st_mode) return file_stat.st_mtime, file_stat.st_size def suggestion_extension(): return os.path.extsep + 'pending' def suggestion_filename(filename): return filename + suggestion_extension() # ALL PUBLICLY ACCESSIBLE METHODS MUST BE DECORATED WITH THE transaction DECORATOR. class StatsCache(object): """An object instantiated as a singleton for each statsfile that provides access to the database cache from a pool of StatsCache objects.""" _caches = {} defaultfile = None con = None """This cache's connection""" cur = None """The current cursor""" def __new__(cls, statsfile=None): current_thread = thread.get_ident() def make_database(statsfile): def connect(cache): cache.con = dbapi2.connect(statsfile) cache.cur = cache.con.cursor() def clear_old_data(cache): try: cache.cur.execute("""SELECT min(toolkitbuild) FROM files""") val = cache.cur.fetchone() # If the database is empty, we have no idea whether its layout # is correct, so we might as well delete it. if val is None or val[0] < toolkitversion.build: cache.con.close() del cache os.unlink(statsfile) return True return False except dbapi2.OperationalError: return False cache = cls._caches.setdefault(current_thread, {})[statsfile] = object.__new__(cls) connect(cache) if clear_old_data(cache): connect(cache) cache.create() return cache if not statsfile: if not cls.defaultfile: userdir = os.path.expanduser("~") cachedir = None if os.name == "nt": cachedir = os.path.join(userdir, "Translate Toolkit") else: cachedir = os.path.join(userdir, ".translate_toolkit") if not os.path.exists(cachedir): os.mkdir(cachedir) cls.defaultfile = os.path.realpath(os.path.join(cachedir, "stats.db")) statsfile = cls.defaultfile else: statsfile = os.path.realpath(statsfile) # First see if a cache for this file already exists: if current_thread in cls._caches and statsfile in cls._caches[current_thread]: return cls._caches[current_thread][statsfile] # No existing cache. Let's build a new one and keep a copy return make_database(statsfile) @transaction def create(self): """Create all tables and indexes.""" self.file_totals = FileTotals(self.cur) self.cur.execute("""CREATE TABLE IF NOT EXISTS files( fileid INTEGER PRIMARY KEY AUTOINCREMENT, path VARCHAR NOT NULL UNIQUE, st_mtime INTEGER NOT NULL, st_size INTEGER NOT NULL, toolkitbuild INTEGER NOT NULL);""") self.cur.execute("""CREATE UNIQUE INDEX IF NOT EXISTS filepathindex ON files (path);""") self.cur.execute("""CREATE TABLE IF NOT EXISTS units( id INTEGER PRIMARY KEY AUTOINCREMENT, unitid VARCHAR NOT NULL, fileid INTEGER NOT NULL, unitindex INTEGER NOT NULL, source VARCHAR NOT NULL, target VARCHAR, state INTEGER, e_state INTEGER, sourcewords INTEGER, targetwords INTEGER);""") self.cur.execute("""CREATE INDEX IF NOT EXISTS fileidindex ON units(fileid);""") self.cur.execute("""CREATE TABLE IF NOT EXISTS checkerconfigs( configid INTEGER PRIMARY KEY AUTOINCREMENT, config VARCHAR);""") self.cur.execute("""CREATE INDEX IF NOT EXISTS configindex ON checkerconfigs(config);""") self.cur.execute("""CREATE TABLE IF NOT EXISTS uniterrors( errorid INTEGER PRIMARY KEY AUTOINCREMENT, unitindex INTEGER NOT NULL, fileid INTEGER NOT NULL, configid INTEGER NOT NULL, name VARCHAR NOT NULL, message VARCHAR);""") self.cur.execute("""CREATE INDEX IF NOT EXISTS uniterrorindex ON uniterrors(fileid, configid);""") @transaction def _getfileid(self, filename, check_mod_info=True, store=None): """return fileid representing the given file in the statscache. if file not in cache or has been updated since last record update, recalculate stats. optional argument store can be used to avoid unnessecary reparsing of already loaded translation files. store can be a TranslationFile object or a callback that returns one. """ if isinstance(filename, str): filename = unicode(filename, sys.getfilesystemencoding()) realpath = os.path.realpath(filename) self.cur.execute("""SELECT fileid, st_mtime, st_size FROM files WHERE path=?;""", (realpath,)) filerow = self.cur.fetchone() mod_info = get_mod_info(realpath) if filerow: fileid = filerow[0] if not check_mod_info: # Update the mod_info of the file self.cur.execute("""UPDATE files SET st_mtime=?, st_size=? WHERE fileid=?;""", (mod_info[0], mod_info[1], fileid)) return fileid if (filerow[1], filerow[2]) == mod_info: return fileid # file wasn't in db at all, lets recache it if callable(store): store = store() else: store = store or factory.getobject(realpath) return self._cachestore(store, realpath, mod_info) def _getstoredcheckerconfig(self, checker): """See if this checker configuration has been used before.""" config = str(checker.config.__dict__) self.cur.execute("""SELECT configid, config FROM checkerconfigs WHERE config=?;""", (config,)) configrow = self.cur.fetchone() if not configrow or configrow[1] != config: return None else: return configrow[0] @transaction def _cacheunitstats(self, units, fileid, unitindex=None, file_totals_record=FileTotals.new_record()): """Cache the statistics for the supplied unit(s).""" unitvalues = [] for index, unit in enumerate(units): if unit.istranslatable(): sourcewords, targetwords = wordsinunit(unit) if unitindex: index = unitindex # what about plurals in .source and .target? unit_state_for_db = statefordb(unit) unitvalues.append((unit.getid(), fileid, index, \ unit.source, unit.target, \ sourcewords, targetwords, \ unit_state_for_db, unit.get_state_id())) file_totals_record = file_totals_record + FileTotals.new_record(unit_state_for_db, sourcewords, targetwords) # XXX: executemany is non-standard self.cur.executemany("""INSERT INTO units (unitid, fileid, unitindex, source, target, sourcewords, targetwords, state, e_state) values (?, ?, ?, ?, ?, ?, ?, ?, ?);""", unitvalues) self.file_totals[fileid] = file_totals_record if unitindex: return state_strings[statefordb(units[0])] return "" @transaction def _cachestore(self, store, realpath, mod_info): """Calculates and caches the statistics of the given store unconditionally.""" self.cur.execute("""DELETE FROM files WHERE path=?;""", (realpath,)) self.cur.execute("""INSERT INTO files (fileid, path, st_mtime, st_size, toolkitbuild) values (NULL, ?, ?, ?, ?);""", (realpath, mod_info[0], mod_info[1], toolkitversion.build)) fileid = self.cur.lastrowid self.cur.execute("""DELETE FROM units WHERE fileid=?""", (fileid,)) self._cacheunitstats(store.units, fileid) return fileid def file_extended_totals(self, filename, store=None): stats = {} fileid = self._getfileid(filename, store=store) self.cur.execute("""SELECT e_state, COUNT(id), SUM(sourcewords), SUM(targetwords) FROM units WHERE fileid=? GROUP BY e_state""", (fileid,)) values = self.cur.fetchall() for value in values: stats[extended_state_strings[value[0]]] = { "units": value[1], "sourcewords": value[2], "targetwords": value[3], } return stats def filetotals(self, filename, store=None, extended=False): """Retrieves the statistics for the given file if possible, otherwise delegates to cachestore().""" stats = self.file_totals[self._getfileid(filename, store=store)] if extended: stats["extended"] = self.file_extended_totals(filename, store=store) return stats @transaction def _cacheunitschecks(self, units, fileid, configid, checker, unitindex=None): """Helper method for cachestorechecks() and recacheunit()""" # We always want to store one dummy error to know that we have actually # run the checks on this file with the current checker configuration dummy = (-1, fileid, configid, "noerror", "") unitvalues = [dummy] # if we are doing a single unit, we want to return the checknames errornames = [] for index, unit in enumerate(units): if unit.istranslatable(): # Correctly assign the unitindex if unitindex: index = unitindex failures = checker.run_filters(unit) for checkname, checkmessage in failures.iteritems(): unitvalues.append((index, fileid, configid, checkname, checkmessage)) errornames.append("check-" + checkname) checker.setsuggestionstore(None) if unitindex: # We are only updating a single unit, so we don't want to add an # extra noerror-entry unitvalues.remove(dummy) errornames.append("total") # XXX: executemany is non-standard self.cur.executemany("""INSERT INTO uniterrors (unitindex, fileid, configid, name, message) values (?, ?, ?, ?, ?);""", unitvalues) return errornames @transaction def _cachestorechecks(self, fileid, store, checker, configid): """Calculates and caches the error statistics of the given store unconditionally.""" # Let's purge all previous failures because they will probably just # fill up the database without much use. self.cur.execute("""DELETE FROM uniterrors WHERE fileid=?;""", (fileid,)) self._cacheunitschecks(store.units, fileid, configid, checker) return fileid def get_unit_stats(self, fileid, unitid): values = self.cur.execute(""" SELECT state, sourcewords, targetwords FROM units WHERE fileid=? AND unitid=? """, (fileid, unitid)) result = values.fetchone() if result is not None: return result else: print >> sys.stderr, """WARNING: Database in inconsistent state. fileid %d and unitid %s have no entries in the table units.""" % (fileid, unitid) # If values.fetchone() is None, then we return an empty list, # to make FileTotals.new_record(*self.get_unit_stats(fileid, unitid)) # do the right thing. return [] @transaction def recacheunit(self, filename, checker, unit): """Recalculate all information for a specific unit. This is necessary for updating all statistics when a translation of a unit took place, for example. This method assumes that everything was up to date before (file totals, checks, checker config, etc.""" fileid = self._getfileid(filename, check_mod_info=False) configid = self._get_config_id(fileid, checker) unitid = unit.getid() # get the unit index totals_without_unit = self.file_totals[fileid] - \ FileTotals.new_record(*self.get_unit_stats(fileid, unitid)) self.cur.execute("""SELECT unitindex FROM units WHERE fileid=? AND unitid=?;""", (fileid, unitid)) unitindex = self.cur.fetchone()[0] self.cur.execute("""DELETE FROM units WHERE fileid=? AND unitid=?;""", (fileid, unitid)) state = [self._cacheunitstats([unit], fileid, unitindex, totals_without_unit)] # remove the current errors self.cur.execute("""DELETE FROM uniterrors WHERE fileid=? AND unitindex=?;""", (fileid, unitindex)) if os.path.exists(suggestion_filename(filename)): checker.setsuggestionstore(factory.getobject(suggestion_filename(filename), ignore=suggestion_extension())) state.extend(self._cacheunitschecks([unit], fileid, configid, checker, unitindex)) return state def _checkerrors(self, filename, fileid, configid, checker, store): def geterrors(): self.cur.execute("""SELECT name, unitindex FROM uniterrors WHERE fileid=? and configid=? ORDER BY unitindex;""", (fileid, configid)) return self.cur.fetchone(), self.cur first, cur = geterrors() if first is not None: return first, cur # This could happen if we haven't done the checks before, or the # file changed, or we are using a different configuration if callable(store): store = store() else: store = store or factory.getobject(filename) if os.path.exists(suggestion_filename(filename)): checker.setsuggestionstore(factory.getobject(suggestion_filename(filename), ignore=suggestion_extension())) self._cachestorechecks(fileid, store, checker, configid) return geterrors() def _geterrors(self, filename, fileid, configid, checker, store): result = [] first, cur = self._checkerrors(filename, fileid, configid, checker, store) result.append(first) result.extend(cur.fetchall()) return result @transaction def _get_config_id(self, fileid, checker): configid = self._getstoredcheckerconfig(checker) if configid: return configid self.cur.execute("""INSERT INTO checkerconfigs (configid, config) values (NULL, ?);""", (str(checker.config.__dict__),)) return self.cur.lastrowid def filechecks(self, filename, checker, store=None): """Retrieves the error statistics for the given file if possible, otherwise delegates to cachestorechecks().""" fileid = self._getfileid(filename, store=store) configid = self._get_config_id(fileid, checker) values = self._geterrors(filename, fileid, configid, checker, store) errors = emptyfilechecks() for value in values: if value[1] == -1: continue checkkey = 'check-' + value[0] #value[0] is the error name if not checkkey in errors: errors[checkkey] = [] errors[checkkey].append(value[1]) #value[1] is the unitindex return errors def file_fails_test(self, filename, checker, name): fileid = self._getfileid(filename) configid = self._get_config_id(fileid, checker) self._checkerrors(filename, fileid, configid, checker, None) self.cur.execute("""SELECT name, unitindex FROM uniterrors WHERE fileid=? and configid=? and name=?;""", (fileid, configid, name)) return self.cur.fetchone() is not None def filestatestats(self, filename, store=None, extended=False): """Return a dictionary of unit stats mapping sets of unit indices with those states""" stats = emptyfilestats() if extended: stats["extended"] = {} fileid = self._getfileid(filename, store=store) self.cur.execute("""SELECT state, e_state, unitindex FROM units WHERE fileid=? ORDER BY unitindex;""", (fileid,)) values = self.cur.fetchall() for value in values: stats[state_strings[value[0]]].append(value[2]) if extended: if value[1] not in stats["extended"]: stats["extended"][value[1]] = [] stats["extended"][value[1]].append(value[2]) stats["total"].append(value[2]) return stats def filestats(self, filename, checker, store=None, extended=False): """Return a dictionary of property names mapping sets of unit indices with those properties.""" stats = emptyfilestats() stats.update(self.filechecks(filename, checker, store)) stats.update(self.filestatestats(filename, store, extended=extended)) return stats def unitstats(self, filename, _lang=None, store=None): # For now, lang and store are unused. lang will allow the user to # base stats information on the given language. See the commented # line containing stats.update below. """Return a dictionary of property names mapping to arrays which map unit indices to property values. Please note that this is different from filestats, since filestats supplies sets of unit indices with a given property, whereas this method supplies arrays which map unit indices to given values.""" stats = emptyunitstats() #stats.update(self.unitchecks(filename, lang, store)) fileid = self._getfileid(filename, store=store) self.cur.execute("""SELECT sourcewords, targetwords FROM units WHERE fileid=? ORDER BY unitindex;""", (fileid,)) for sourcecount, targetcount in self.cur.fetchall(): stats["sourcewordcount"].append(sourcecount) stats["targetwordcount"].append(targetcount) return stats
gpl-2.0
agry/NGECore2
scripts/mobiles/rori/shallow_torton.py
2
1728
import sys from services.spawn import MobileTemplate from services.spawn import WeaponTemplate from resources.datatables import WeaponType from resources.datatables import Difficulty from resources.datatables import Options from java.util import Vector def addTemplate(core): mobileTemplate = MobileTemplate() mobileTemplate.setCreatureName('shallow_torton') mobileTemplate.setLevel(47) mobileTemplate.setDifficulty(Difficulty.NORMAL) mobileTemplate.setMinSpawnDistance(4) mobileTemplate.setMaxSpawnDistance(8) mobileTemplate.setDeathblow(False) mobileTemplate.setScale(.5) mobileTemplate.setMeatType("Carnivore Meat") mobileTemplate.setMeatAmount(650) mobileTemplate.setHideType("Wooly Hide") mobileTemplate.setHideAmount(575) mobileTemplate.setBoneType("Animal Bones") mobileTemplate.setBoneAmount(650) mobileTemplate.setSocialGroup("torton") mobileTemplate.setAssistRange(4) mobileTemplate.setStalker(False) mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE) templates = Vector() templates.add('object/mobile/shared_shallow_torton.iff') mobileTemplate.setTemplates(templates) weaponTemplates = Vector() weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic') weaponTemplates.add(weapontemplate) mobileTemplate.setWeaponTemplateVector(weaponTemplates) attacks = Vector() attacks.add('bm_dampen_pain_3') attacks.add('bm_deflective_hide') attacks.add('bm_puncture_1') attacks.add('shaken_3') attacks.add('bm_stomp_3') mobileTemplate.setDefaultAttack('creatureMeleeAttack') mobileTemplate.setAttacks(attacks) core.spawnService.addMobileTemplate('shallow_torton', mobileTemplate) return
lgpl-3.0
brownharryb/erpnext
erpnext/patches/v5_1/fix_against_account.py
107
1271
from __future__ import unicode_literals import frappe from erpnext.accounts.doctype.gl_entry.gl_entry import update_against_account def execute(): from_date = "2015-05-01" for doc in frappe.get_all("Journal Entry", filters={"creation": (">", from_date), "docstatus": "1"}): # update in gl_entry update_against_account("Journal Entry", doc.name) # update in jv doc = frappe.get_doc("Journal Entry", doc.name) doc.set_against_account() doc.db_update() for doc in frappe.get_all("Sales Invoice", filters={"creation": (">", from_date), "docstatus": "1"}, fields=["name", "customer"]): frappe.db.sql("""update `tabGL Entry` set against=%s where voucher_type='Sales Invoice' and voucher_no=%s and credit > 0 and ifnull(party, '')=''""", (doc.customer, doc.name)) for doc in frappe.get_all("Purchase Invoice", filters={"creation": (">", from_date), "docstatus": "1"}, fields=["name", "supplier"]): frappe.db.sql("""update `tabGL Entry` set against=%s where voucher_type='Purchase Invoice' and voucher_no=%s and debit > 0 and ifnull(party, '')=''""", (doc.supplier, doc.name))
gpl-3.0
zaventh/android_kernel_lge_hammerhead
tools/perf/scripts/python/net_dropmonitor.py
4235
1554
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") linecount = 0 for line in f: linecount = linecount+1 f.seek(0) except: return j = 0 for line in f: loc = int(line.split()[0], 16) name = line.split()[2] j = j +1 if ((j % 100) == 0): print "\r" + str(j) + "/" + str(linecount), kallsyms.append({ 'loc': loc, 'name' : name}) print "\r" + str(j) + "/" + str(linecount) kallsyms.sort() return def get_sym(sloc): loc = int(sloc) for i in kallsyms: if (i['loc'] >= loc): return (i['name'], i['loc']-loc) return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
gpl-2.0
baylee/django
tests/forms_tests/widget_tests/test_selectdatewidget.py
35
20646
from datetime import date from django.forms import DateField, Form, SelectDateWidget from django.test import override_settings from django.utils import translation from django.utils.dates import MONTHS_AP from .base import WidgetTest class SelectDateWidgetTest(WidgetTest): maxDiff = None widget = SelectDateWidget( years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'), ) def test_render_empty(self): self.check_html(self.widget, 'mydate', '', html=( """ <select name="mydate_month" id="id_mydate_month"> <option value="0">---</option> <option value="1">January</option> <option value="2">February</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select> <select name="mydate_day" id="id_mydate_day"> <option value="0">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option value="0">---</option> <option value="2007">2007</option> <option value="2008">2008</option> <option value="2009">2009</option> <option value="2010">2010</option> <option value="2011">2011</option> <option value="2012">2012</option> <option value="2013">2013</option> <option value="2014">2014</option> <option value="2015">2015</option> <option value="2016">2016</option> </select> """ )) def test_render_none(self): """ Rendering the None or '' values should yield the same output. """ self.assertHTMLEqual( self.widget.render('mydate', None), self.widget.render('mydate', ''), ) def test_render_string(self): self.check_html(self.widget, 'mydate', '2010-04-15', html=( """ <select name="mydate_month" id="id_mydate_month"> <option value="0">---</option> <option value="1">January</option> <option value="2">February</option> <option value="3">March</option> <option value="4" selected="selected">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select> <select name="mydate_day" id="id_mydate_day"> <option value="0">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15" selected="selected">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option value="0">---</option> <option value="2007">2007</option> <option value="2008">2008</option> <option value="2009">2009</option> <option value="2010" selected="selected">2010</option> <option value="2011">2011</option> <option value="2012">2012</option> <option value="2013">2013</option> <option value="2014">2014</option> <option value="2015">2015</option> <option value="2016">2016</option> </select> """ )) def test_render_datetime(self): self.assertHTMLEqual( self.widget.render('mydate', date(2010, 4, 15)), self.widget.render('mydate', '2010-04-15'), ) def test_render_invalid_date(self): """ Invalid dates should still render the failed date. """ self.check_html(self.widget, 'mydate', '2010-02-31', html=( """ <select name="mydate_month" id="id_mydate_month"> <option value="0">---</option> <option value="1">January</option> <option value="2" selected="selected">February</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select> <select name="mydate_day" id="id_mydate_day"> <option value="0">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31" selected="selected">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option value="0">---</option> <option value="2007">2007</option> <option value="2008">2008</option> <option value="2009">2009</option> <option value="2010" selected="selected">2010</option> <option value="2011">2011</option> <option value="2012">2012</option> <option value="2013">2013</option> <option value="2014">2014</option> <option value="2015">2015</option> <option value="2016">2016</option> </select> """ )) def test_custom_months(self): widget = SelectDateWidget(months=MONTHS_AP, years=('2013',)) self.check_html(widget, 'mydate', '', html=( """ <select name="mydate_month" id="id_mydate_month"> <option value="0">---</option> <option value="1">Jan.</option> <option value="2">Feb.</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">Aug.</option> <option value="9">Sept.</option> <option value="10">Oct.</option> <option value="11">Nov.</option> <option value="12">Dec.</option> </select> <select name="mydate_day" id="id_mydate_day"> <option value="0">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option value="0">---</option> <option value="2013">2013</option> </select> """ )) def test_selectdate_required(self): class GetNotRequiredDate(Form): mydate = DateField(widget=SelectDateWidget, required=False) class GetRequiredDate(Form): mydate = DateField(widget=SelectDateWidget, required=True) self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required) self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required) def test_selectdate_empty_label(self): w = SelectDateWidget(years=('2014',), empty_label='empty_label') # Rendering the default state with empty_label setted as string. self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3) w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day')) # Rendering the default state with empty_label tuple. self.assertHTMLEqual( w.render('mydate', ''), """ <select name="mydate_month" id="id_mydate_month"> <option value="0">empty_month</option> <option value="1">January</option> <option value="2">February</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select> <select name="mydate_day" id="id_mydate_day"> <option value="0">empty_day</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option value="0">empty_year</option> <option value="2014">2014</option> </select> """, ) with self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.'): SelectDateWidget(years=('2014',), empty_label=('not enough', 'values')) @override_settings(USE_L10N=True) @translation.override('nl') def test_l10n(self): w = SelectDateWidget( years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016') ) self.assertEqual( w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-2010', ) self.assertHTMLEqual( w.render('date', '13-08-2010'), """ <select name="date_day" id="id_date_day"> <option value="0">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13" selected="selected">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="date_month" id="id_date_month"> <option value="0">---</option> <option value="1">januari</option> <option value="2">februari</option> <option value="3">maart</option> <option value="4">april</option> <option value="5">mei</option> <option value="6">juni</option> <option value="7">juli</option> <option value="8" selected="selected">augustus</option> <option value="9">september</option> <option value="10">oktober</option> <option value="11">november</option> <option value="12">december</option> </select> <select name="date_year" id="id_date_year"> <option value="0">---</option> <option value="2007">2007</option> <option value="2008">2008</option> <option value="2009">2009</option> <option value="2010" selected="selected">2010</option> <option value="2011">2011</option> <option value="2012">2012</option> <option value="2013">2013</option> <option value="2014">2014</option> <option value="2015">2015</option> <option value="2016">2016</option> </select> """, ) # Even with an invalid date, the widget should reflect the entered value (#17401). self.assertEqual(w.render('mydate', '2010-02-30').count('selected="selected"'), 3) # Years before 1900 should work. w = SelectDateWidget(years=('1899',)) self.assertEqual( w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-1899', )
bsd-3-clause
drexly/tonginBlobStore
lib/django/contrib/admin/helpers.py
79
14890
from __future__ import unicode_literals import warnings from django import forms from django.conf import settings from django.contrib.admin.templatetags.admin_static import static from django.contrib.admin.utils import ( display_for_field, flatten_fieldsets, help_text_for_field, label_for_field, lookup_field, ) from django.core.exceptions import ObjectDoesNotExist from django.db.models.fields.related import ManyToManyRel from django.forms.utils import flatatt from django.template.defaultfilters import capfirst, linebreaksbr from django.utils import six from django.utils.deprecation import ( RemovedInDjango20Warning, RemovedInDjango110Warning, ) from django.utils.encoding import force_text, smart_text from django.utils.functional import cached_property from django.utils.html import conditional_escape, format_html from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ ACTION_CHECKBOX_NAME = '_selected_action' class ActionForm(forms.Form): action = forms.ChoiceField(label=_('Action:')) select_across = forms.BooleanField(label='', required=False, initial=0, widget=forms.HiddenInput({'class': 'select-across'})) checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False) class AdminForm(object): def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None): self.form, self.fieldsets = form, fieldsets self.prepopulated_fields = [{ 'field': form[field_name], 'dependencies': [form[f] for f in dependencies] } for field_name, dependencies in prepopulated_fields.items()] self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for name, options in self.fieldsets: yield Fieldset( self.form, name, readonly_fields=self.readonly_fields, model_admin=self.model_admin, **options ) def _media(self): media = self.form.media for fs in self: media = media + fs.media return media media = property(_media) class Fieldset(object): def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(), description=None, model_admin=None): self.form = form self.name, self.fields = name, fields self.classes = ' '.join(classes) self.description = description self.model_admin = model_admin self.readonly_fields = readonly_fields def _media(self): if 'collapse' in self.classes: extra = '' if settings.DEBUG else '.min' js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js', 'collapse%s.js' % extra] return forms.Media(js=[static('admin/js/%s' % url) for url in js]) return forms.Media() media = property(_media) def __iter__(self): for field in self.fields: yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin) class Fieldline(object): def __init__(self, form, field, readonly_fields=None, model_admin=None): self.form = form # A django.forms.Form instance if not hasattr(field, "__iter__") or isinstance(field, six.text_type): self.fields = [field] else: self.fields = field self.has_visible_field = not all(field in self.form.fields and self.form.fields[field].widget.is_hidden for field in self.fields) self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for i, field in enumerate(self.fields): if field in self.readonly_fields: yield AdminReadonlyField(self.form, field, is_first=(i == 0), model_admin=self.model_admin) else: yield AdminField(self.form, field, is_first=(i == 0)) def errors(self): return mark_safe( '\n'.join(self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields).strip('\n') ) class AdminField(object): def __init__(self, form, field, is_first): self.field = form[field] # A django.forms.BoundField instance self.is_first = is_first # Whether this field is first on the line self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput) self.is_readonly = False def label_tag(self): classes = [] contents = conditional_escape(force_text(self.field.label)) if self.is_checkbox: classes.append('vCheckboxLabel') if self.field.field.required: classes.append('required') if not self.is_first: classes.append('inline') attrs = {'class': ' '.join(classes)} if classes else {} # checkboxes should not have a label suffix as the checkbox appears # to the left of the label. return self.field.label_tag(contents=mark_safe(contents), attrs=attrs, label_suffix='' if self.is_checkbox else None) def errors(self): return mark_safe(self.field.errors.as_ul()) class AdminReadonlyField(object): def __init__(self, form, field, is_first, model_admin=None): # Make self.field look a little bit like a field. This means that # {{ field.name }} must be a useful class name to identify the field. # For convenience, store other field-related data here too. if callable(field): class_name = field.__name__ if field.__name__ != '<lambda>' else '' else: class_name = field if form._meta.labels and class_name in form._meta.labels: label = form._meta.labels[class_name] else: label = label_for_field(field, form._meta.model, model_admin) if form._meta.help_texts and class_name in form._meta.help_texts: help_text = form._meta.help_texts[class_name] else: help_text = help_text_for_field(class_name, form._meta.model) self.field = { 'name': class_name, 'label': label, 'help_text': help_text, 'field': field, } self.form = form self.model_admin = model_admin self.is_first = is_first self.is_checkbox = False self.is_readonly = True self.empty_value_display = model_admin.get_empty_value_display() def label_tag(self): attrs = {} if not self.is_first: attrs["class"] = "inline" label = self.field['label'] return format_html('<label{}>{}:</label>', flatatt(attrs), capfirst(force_text(label))) def contents(self): from django.contrib.admin.templatetags.admin_list import _boolean_icon field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin try: f, attr, value = lookup_field(field, obj, model_admin) except (AttributeError, ValueError, ObjectDoesNotExist): result_repr = self.empty_value_display else: if f is None: boolean = getattr(attr, "boolean", False) if boolean: result_repr = _boolean_icon(value) else: if hasattr(value, "__html__"): result_repr = value else: result_repr = smart_text(value) if getattr(attr, "allow_tags", False): warnings.warn( "Deprecated allow_tags attribute used on %s. " "Use django.utils.safestring.format_html(), " "format_html_join(), or mark_safe() instead." % attr, RemovedInDjango20Warning ) result_repr = mark_safe(value) else: result_repr = linebreaksbr(result_repr) else: if isinstance(f.remote_field, ManyToManyRel) and value is not None: result_repr = ", ".join(map(six.text_type, value.all())) else: result_repr = display_for_field(value, f, self.empty_value_display) return conditional_escape(result_repr) class InlineAdminFormSet(object): """ A wrapper around an inline formset for use in the admin system. """ def __init__(self, inline, formset, fieldsets, prepopulated_fields=None, readonly_fields=None, model_admin=None): self.opts = inline self.formset = formset self.fieldsets = fieldsets self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields if prepopulated_fields is None: prepopulated_fields = {} self.prepopulated_fields = prepopulated_fields def __iter__(self): for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()): view_on_site_url = self.opts.get_view_on_site_url(original) yield InlineAdminForm(self.formset, form, self.fieldsets, self.prepopulated_fields, original, self.readonly_fields, model_admin=self.opts, view_on_site_url=view_on_site_url) for form in self.formset.extra_forms: yield InlineAdminForm(self.formset, form, self.fieldsets, self.prepopulated_fields, None, self.readonly_fields, model_admin=self.opts) yield InlineAdminForm(self.formset, self.formset.empty_form, self.fieldsets, self.prepopulated_fields, None, self.readonly_fields, model_admin=self.opts) def fields(self): fk = getattr(self.formset, "fk", None) for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)): if fk and fk.name == field_name: continue if field_name in self.readonly_fields: yield { 'label': label_for_field(field_name, self.opts.model, self.opts), 'widget': { 'is_hidden': False }, 'required': False, 'help_text': help_text_for_field(field_name, self.opts.model), } else: yield self.formset.form.base_fields[field_name] def _media(self): media = self.opts.media + self.formset.media for fs in self: media = media + fs.media return media media = property(_media) class InlineAdminForm(AdminForm): """ A wrapper around an inline form for use in the admin system. """ def __init__(self, formset, form, fieldsets, prepopulated_fields, original, readonly_fields=None, model_admin=None, view_on_site_url=None): self.formset = formset self.model_admin = model_admin self.original = original self.show_url = original and view_on_site_url is not None self.absolute_url = view_on_site_url super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields, readonly_fields, model_admin) @cached_property def original_content_type_id(self): warnings.warn( 'InlineAdminForm.original_content_type_id is deprecated and will be ' 'removed in Django 1.10. If you were using this attribute to construct ' 'the "view on site" URL, use the `absolute_url` attribute instead.', RemovedInDjango110Warning, stacklevel=2 ) if self.original is not None: # Since this module gets imported in the application's root package, # it cannot import models from other applications at the module level. from django.contrib.contenttypes.models import ContentType return ContentType.objects.get_for_model(self.original).pk raise AttributeError def __iter__(self): for name, options in self.fieldsets: yield InlineFieldset(self.formset, self.form, name, self.readonly_fields, model_admin=self.model_admin, **options) def needs_explicit_pk_field(self): # Auto fields are editable (oddly), so need to check for auto or non-editable pk if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable: return True # Also search any parents for an auto field. (The pk info is propagated to child # models so that does not need to be checked in parents.) for parent in self.form._meta.model._meta.get_parent_list(): if parent._meta.has_auto_field: return True return False def pk_field(self): return AdminField(self.form, self.formset._pk_field.name, False) def fk_field(self): fk = getattr(self.formset, "fk", None) if fk: return AdminField(self.form, fk.name, False) else: return "" def deletion_field(self): from django.forms.formsets import DELETION_FIELD_NAME return AdminField(self.form, DELETION_FIELD_NAME, False) def ordering_field(self): from django.forms.formsets import ORDERING_FIELD_NAME return AdminField(self.form, ORDERING_FIELD_NAME, False) class InlineFieldset(Fieldset): def __init__(self, formset, *args, **kwargs): self.formset = formset super(InlineFieldset, self).__init__(*args, **kwargs) def __iter__(self): fk = getattr(self.formset, "fk", None) for field in self.fields: if fk and fk.name == field: continue yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin) class AdminErrorList(forms.utils.ErrorList): """ Stores all errors for the form/formsets in an add/change stage view. """ def __init__(self, form, inline_formsets): super(AdminErrorList, self).__init__() if form.is_bound: self.extend(form.errors.values()) for inline_formset in inline_formsets: self.extend(inline_formset.non_form_errors()) for errors_in_inline_form in inline_formset.errors: self.extend(errors_in_inline_form.values())
bsd-3-clause
adit-chandra/tensorflow
tensorflow/tools/docs/parser.py
3
58781
# Lint as: python2, python3 # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Turn Python docstrings into Markdown for TensorFlow documentation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import ast import collections import functools import itertools import json import os import re import astor import six from six.moves import zip from google.protobuf.message import Message as ProtoMessage from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import tf_inspect from tensorflow.tools.docs import doc_controls def is_free_function(py_object, full_name, index): """Check if input is a free function (and not a class- or static method). Args: py_object: The the object in question. full_name: The full name of the object, like `tf.module.symbol`. index: The {full_name:py_object} dictionary for the public API. Returns: True if the obeject is a stand-alone function, and not part of a class definition. """ if not tf_inspect.isfunction(py_object): return False parent_name = six.ensure_str(full_name).rsplit('.', 1)[0] if tf_inspect.isclass(index[parent_name]): return False return True # A regular expression capturing a python identifier. IDENTIFIER_RE = r'[a-zA-Z_]\w*' class TFDocsError(Exception): pass class _Errors(object): """A collection of errors.""" def __init__(self): self._errors = [] def log_all(self): """Log all the collected errors to the standard error.""" template = 'ERROR:\n output file name: %s\n %s\n\n' for full_name, message in self._errors: logging.warn(template, full_name, message) def append(self, full_name, message): """Add an error to the collection. Args: full_name: The path to the file in which the error occurred. message: The message to display with the error. """ self._errors.append((full_name, message)) def __len__(self): return len(self._errors) def __eq__(self, other): if not isinstance(other, _Errors): return False return self._errors == other._errors # pylint: disable=protected-access def documentation_path(full_name, is_fragment=False): """Returns the file path for the documentation for the given API symbol. Given the fully qualified name of a library symbol, compute the path to which to write the documentation for that symbol (relative to a base directory). Documentation files are organized into directories that mirror the python module/class structure. Args: full_name: Fully qualified name of a library symbol. is_fragment: If `False` produce a direct markdown link (`tf.a.b.c` --> `tf/a/b/c.md`). If `True` produce fragment link, `tf.a.b.c` --> `tf/a/b.md#c` Returns: The file path to which to write the documentation for `full_name`. """ parts = six.ensure_str(full_name).split('.') if is_fragment: parts, fragment = parts[:-1], parts[-1] result = six.ensure_str(os.path.join(*parts)) + '.md' if is_fragment: result = six.ensure_str(result) + '#' + six.ensure_str(fragment) return result def _get_raw_docstring(py_object): """Get the docs for a given python object. Args: py_object: A python object to retrieve the docs for (class, function/method, or module). Returns: The docstring, or the empty string if no docstring was found. """ # For object instances, tf_inspect.getdoc does give us the docstring of their # type, which is not what we want. Only return the docstring if it is useful. if (tf_inspect.isclass(py_object) or tf_inspect.ismethod(py_object) or tf_inspect.isfunction(py_object) or tf_inspect.ismodule(py_object) or isinstance(py_object, property)): return tf_inspect.getdoc(py_object) or '' else: return '' # A regular expression for capturing a @{symbol} reference. SYMBOL_REFERENCE_RE = re.compile( r""" # Start with a literal "@{". @\{ # Group at least 1 symbol, not "}". ([^}]+) # Followed by a closing "}" \} """, flags=re.VERBOSE) AUTO_REFERENCE_RE = re.compile(r'`([a-zA-Z0-9_.]+?)`') class ReferenceResolver(object): """Class for replacing @{...} references with Markdown links. Attributes: current_doc_full_name: A string (or None) indicating the name of the document currently being processed, so errors can reference the broken doc. """ def __init__(self, duplicate_of, doc_index, is_fragment, py_module_names): """Initializes a Reference Resolver. Args: duplicate_of: A map from duplicate names to preferred names of API symbols. doc_index: A `dict` mapping symbol name strings to objects with `url` and `title` fields. Used to resolve @{$doc} references in docstrings. is_fragment: A map from full names to bool for each symbol. If True the object lives at a page fragment `tf.a.b.c` --> `tf/a/b#c`. If False object has a page to itself: `tf.a.b.c` --> `tf/a/b/c`. py_module_names: A list of string names of Python modules. """ self._duplicate_of = duplicate_of self._doc_index = doc_index self._is_fragment = is_fragment self._all_names = set(is_fragment.keys()) self._py_module_names = py_module_names self.current_doc_full_name = None self._errors = _Errors() def add_error(self, message): self._errors.append(self.current_doc_full_name, message) def log_errors(self): self._errors.log_all() def num_errors(self): return len(self._errors) @classmethod def from_visitor(cls, visitor, doc_index, **kwargs): """A factory function for building a ReferenceResolver from a visitor. Args: visitor: an instance of `DocGeneratorVisitor` doc_index: a dictionary mapping document names to references objects with "title" and "url" fields **kwargs: all remaining args are passed to the constructor Returns: an instance of `ReferenceResolver` () """ is_fragment = {} for name, obj in visitor.index.items(): has_page = ( tf_inspect.isclass(obj) or tf_inspect.ismodule(obj) or is_free_function(obj, name, visitor.index)) is_fragment[name] = not has_page return cls( duplicate_of=visitor.duplicate_of, doc_index=doc_index, is_fragment=is_fragment, **kwargs) @classmethod def from_json_file(cls, filepath, doc_index): with open(filepath) as f: json_dict = json.load(f) return cls(doc_index=doc_index, **json_dict) def to_json_file(self, filepath): """Converts the RefenceResolver to json and writes it to the specified file. Args: filepath: The file path to write the json to. """ try: os.makedirs(os.path.dirname(filepath)) except OSError: pass json_dict = {} for key, value in self.__dict__.items(): # Drop these two fields. `_doc_index` is not serializable. `_all_names` is # generated by the constructor. if key in ('_doc_index', '_all_names', '_errors', 'current_doc_full_name'): continue # Strip off any leading underscores on field names as these are not # recognized by the constructor. json_dict[key.lstrip('_')] = value with open(filepath, 'w') as f: json.dump(json_dict, f, indent=2, sort_keys=True) def replace_references(self, string, relative_path_to_root): """Replace "@{symbol}" references with links to symbol's documentation page. This functions finds all occurrences of "@{symbol}" in `string` and replaces them with markdown links to the documentation page for "symbol". `relative_path_to_root` is the relative path from the document that contains the "@{symbol}" reference to the root of the API documentation that is linked to. If the containing page is part of the same API docset, `relative_path_to_root` can be set to `os.path.dirname(documentation_path(name))`, where `name` is the python name of the object whose documentation page the reference lives on. Args: string: A string in which "@{symbol}" references should be replaced. relative_path_to_root: The relative path from the containing document to the root of the API documentation that is being linked to. Returns: `string`, with "@{symbol}" references replaced by Markdown links. """ def strict_one_ref(match): try: return self._one_ref(match, relative_path_to_root) except TFDocsError as e: self.add_error(e.message) return 'BAD_LINK' string = re.sub(SYMBOL_REFERENCE_RE, strict_one_ref, six.ensure_str(string)) def sloppy_one_ref(match): try: return self._one_ref(match, relative_path_to_root) except TFDocsError: return match.group(0) string = re.sub(AUTO_REFERENCE_RE, sloppy_one_ref, string) return string def python_link(self, link_text, ref_full_name, relative_path_to_root, code_ref=True): """Resolve a "@{python symbol}" reference to a Markdown link. This will pick the canonical location for duplicate symbols. The input to this function should already be stripped of the '@' and '{}'. This function returns a Markdown link. If `code_ref` is true, it is assumed that this is a code reference, so the link text will be rendered as code (using backticks). `link_text` should refer to a library symbol, starting with 'tf.'. Args: link_text: The text of the Markdown link. ref_full_name: The fully qualified name of the symbol to link to. relative_path_to_root: The relative path from the location of the current document to the root of the API documentation. code_ref: If true (the default), put `link_text` in `...`. Returns: A markdown link to the documentation page of `ref_full_name`. """ url = self.reference_to_url(ref_full_name, relative_path_to_root) if code_ref: link_text = link_text.join(['<code>', '</code>']) else: link_text = self._link_text_to_html(link_text) return '<a href="{}">{}</a>'.format(url, link_text) @staticmethod def _link_text_to_html(link_text): code_re = '`(.*?)`' return re.sub(code_re, r'<code>\1</code>', six.ensure_str(link_text)) def py_master_name(self, full_name): """Return the master name for a Python symbol name.""" return self._duplicate_of.get(full_name, full_name) def reference_to_url(self, ref_full_name, relative_path_to_root): """Resolve a "@{python symbol}" reference to a relative path. The input to this function should already be stripped of the '@' and '{}', and its output is only the link, not the full Markdown. If `ref_full_name` is the name of a class member, method, or property, the link will point to the page of the containing class, and it will include the method name as an anchor. For example, `tf.module.MyClass.my_method` will be translated into a link to `os.join.path(relative_path_to_root, 'tf/module/MyClass.md#my_method')`. Args: ref_full_name: The fully qualified name of the symbol to link to. relative_path_to_root: The relative path from the location of the current document to the root of the API documentation. Returns: A relative path that links from the documentation page of `from_full_name` to the documentation page of `ref_full_name`. Raises: RuntimeError: If `ref_full_name` is not documented. TFDocsError: If the @{} syntax cannot be decoded. """ master_name = self._duplicate_of.get(ref_full_name, ref_full_name) # Check whether this link exists if master_name not in self._all_names: raise TFDocsError( 'Cannot make link to "%s": Not in index.' % master_name) ref_path = documentation_path(master_name, self._is_fragment[master_name]) return os.path.join(relative_path_to_root, ref_path) def _one_ref(self, match, relative_path_to_root): """Return a link for a single "@{symbol}" reference.""" string = match.group(1) # Look for link text after $. dollar = string.rfind('$') if dollar > 0: # Ignore $ in first character link_text = string[dollar + 1:] string = string[:dollar] manual_link_text = True else: link_text = string manual_link_text = False # Handle different types of references. if six.ensure_str(string).startswith('$'): # Doc reference return self._doc_link(string, link_text, manual_link_text, relative_path_to_root) elif six.ensure_str(string).startswith('tensorflow::'): # C++ symbol return self._cc_link(string, link_text, manual_link_text, relative_path_to_root) else: is_python = False for py_module_name in self._py_module_names: if string == py_module_name or string.startswith( six.ensure_str(py_module_name) + '.'): is_python = True break if is_python: # Python symbol return self.python_link( link_text, string, relative_path_to_root, code_ref=not manual_link_text) # Error! raise TFDocsError('Did not understand "%s"' % match.group(0), 'BROKEN_LINK') def _doc_link(self, string, link_text, manual_link_text, relative_path_to_root): """Generate a link for a @{$...} reference.""" string = string[1:] # remove leading $ # If string has a #, split that part into `hash_tag` hash_pos = six.ensure_str(string).find('#') if hash_pos > -1: hash_tag = string[hash_pos:] string = string[:hash_pos] else: hash_tag = '' if string in self._doc_index: if not manual_link_text: link_text = self._doc_index[string].title url = os.path.normpath(os.path.join( relative_path_to_root, '../..', self._doc_index[string].url)) link_text = self._link_text_to_html(link_text) return '<a href="{}{}">{}</a>'.format(url, hash_tag, link_text) return self._doc_missing(string, hash_tag, link_text, manual_link_text, relative_path_to_root) def _doc_missing(self, string, unused_hash_tag, unused_link_text, unused_manual_link_text, unused_relative_path_to_root): """Generate an error for unrecognized @{$...} references.""" raise TFDocsError('Unknown Document "%s"' % string) def _cc_link(self, string, link_text, unused_manual_link_text, relative_path_to_root): """Generate a link for a @{tensorflow::...} reference.""" # TODO(josh11b): Fix this hard-coding of paths. if string == 'tensorflow::ClientSession': ret = 'class/tensorflow/client-session.md' elif string == 'tensorflow::Scope': ret = 'class/tensorflow/scope.md' elif string == 'tensorflow::Status': ret = 'class/tensorflow/status.md' elif string == 'tensorflow::Tensor': ret = 'class/tensorflow/tensor.md' elif string == 'tensorflow::ops::Const': ret = 'namespace/tensorflow/ops.md#const' else: raise TFDocsError('C++ reference not understood: "%s"' % string) # relative_path_to_root gets you to api_docs/python, we go from there # to api_docs/cc, and then add ret. cc_relative_path = os.path.normpath(os.path.join( relative_path_to_root, '../cc', ret)) return '<a href="{}"><code>{}</code></a>'.format(cc_relative_path, link_text) # TODO(aselle): Collect these into a big list for all modules and functions # and make a rosetta stone page. def _handle_compatibility(doc): """Parse and remove compatibility blocks from the main docstring. Args: doc: The docstring that contains compatibility notes" Returns: a tuple of the modified doc string and a hash that maps from compatibility note type to the text of the note. """ compatibility_notes = {} match_compatibility = re.compile(r'[ \t]*@compatibility\((\w+)\)\s*\n' r'((?:[^@\n]*\n)+)' r'\s*@end_compatibility') for f in match_compatibility.finditer(doc): compatibility_notes[f.group(1)] = f.group(2) return match_compatibility.subn(r'', doc)[0], compatibility_notes def _gen_pairs(items): """Given an list of items [a,b,a,b...], generate pairs [(a,b),(a,b)...]. Args: items: A list of items (length must be even) Yields: The original items, in pairs """ assert len(items) % 2 == 0 items = iter(items) while True: try: yield next(items), next(items) except StopIteration: return class _FunctionDetail( collections.namedtuple('_FunctionDetail', ['keyword', 'header', 'items'])): """A simple class to contain function details. Composed of a "keyword", a possibly empty "header" string, and a possibly empty list of key-value pair "items". """ __slots__ = [] def __str__(self): """Return the original string that represents the function detail.""" parts = [six.ensure_str(self.keyword) + ':\n'] parts.append(self.header) for key, value in self.items: parts.append(' ' + six.ensure_str(key) + ': ') parts.append(value) return ''.join(parts) def _parse_function_details(docstring): r"""Given a docstring, split off the header and parse the function details. For example the docstring of tf.nn.relu: '''Computes rectified linear: `max(features, 0)`. Args: features: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `features`. ''' This is parsed, and returned as: ``` ('Computes rectified linear: `max(features, 0)`.\n\n', [ _FunctionDetail( keyword='Args', header='', items=[ ('features', ' A `Tensor`. Must be ...'), ('name', ' A name for the operation (optional).\n\n')]), _FunctionDetail( keyword='Returns', header=' A `Tensor`. Has the same type as `features`.', items=[]) ]) ``` Args: docstring: The docstring to parse Returns: A (header, function_details) pair, where header is a string and function_details is a (possibly empty) list of `_FunctionDetail` objects. """ detail_keywords = '|'.join([ 'Args', 'Arguments', 'Fields', 'Returns', 'Yields', 'Raises', 'Attributes' ]) tag_re = re.compile('(?<=\n)(' + detail_keywords + '):\n', re.MULTILINE) parts = tag_re.split(docstring) # The first part is the main docstring docstring = parts[0] # Everything else alternates keyword-content pairs = list(_gen_pairs(parts[1:])) function_details = [] item_re = re.compile(r'^ ? ?(\*?\*?\w[\w.]*?\s*):\s', re.MULTILINE) for keyword, content in pairs: content = item_re.split(six.ensure_str(content)) header = content[0] items = list(_gen_pairs(content[1:])) function_details.append(_FunctionDetail(keyword, header, items)) return docstring, function_details _DocstringInfo = collections.namedtuple('_DocstringInfo', [ 'brief', 'docstring', 'function_details', 'compatibility' ]) def _parse_md_docstring(py_object, relative_path_to_root, reference_resolver): """Parse the object's docstring and return a `_DocstringInfo`. This function clears @@'s from the docstring, and replaces @{} references with markdown links. For links within the same set of docs, the `relative_path_to_root` for a docstring on the page for `full_name` can be set to: ```python relative_path_to_root = os.path.relpath( path='.', start=os.path.dirname(documentation_path(full_name)) or '.') ``` Args: py_object: A python object to retrieve the docs for (class, function/method, or module). relative_path_to_root: The relative path from the location of the current document to the root of the Python API documentation. This is used to compute links for "@{symbol}" references. reference_resolver: An instance of ReferenceResolver. Returns: A _DocstringInfo object, all fields will be empty if no docstring was found. """ # TODO(wicke): If this is a partial, use the .func docstring and add a note. raw_docstring = _get_raw_docstring(py_object) raw_docstring = reference_resolver.replace_references( raw_docstring, relative_path_to_root) atat_re = re.compile(r' *@@[a-zA-Z_.0-9]+ *$') raw_docstring = '\n'.join( line for line in six.ensure_str(raw_docstring).split('\n') if not atat_re.match(six.ensure_str(line))) docstring, compatibility = _handle_compatibility(raw_docstring) docstring, function_details = _parse_function_details(docstring) if 'Generated by: tensorflow/tools/api/generator' in docstring: docstring = '' return _DocstringInfo( docstring.split('\n')[0], docstring, function_details, compatibility) def _get_arg_spec(func): """Extracts signature information from a function or functools.partial object. For functions, uses `tf_inspect.getfullargspec`. For `functools.partial` objects, corrects the signature of the underlying function to take into account the removed arguments. Args: func: A function whose signature to extract. Returns: An `FullArgSpec` namedtuple `(args, varargs, varkw, defaults, etc.)`, as returned by `tf_inspect.getfullargspec`. """ # getfullargspec does not work for functools.partial objects directly. if isinstance(func, functools.partial): argspec = tf_inspect.getfullargspec(func.func) # Remove the args from the original function that have been used up. first_default_arg = ( len(argspec.args or []) - len(argspec.defaults or [])) partial_args = len(func.args) argspec_args = [] if argspec.args: argspec_args = list(argspec.args[partial_args:]) argspec_defaults = list(argspec.defaults or ()) if argspec.defaults and partial_args > first_default_arg: argspec_defaults = list(argspec.defaults[partial_args-first_default_arg:]) first_default_arg = max(0, first_default_arg - partial_args) for kwarg in (func.keywords or []): if kwarg in (argspec.args or []): i = argspec_args.index(kwarg) argspec_args.pop(i) if i >= first_default_arg: argspec_defaults.pop(i-first_default_arg) else: first_default_arg -= 1 return tf_inspect.FullArgSpec( args=argspec_args, varargs=argspec.varargs, varkw=argspec.varkw, defaults=tuple(argspec_defaults), kwonlyargs=[], kwonlydefaults=None, annotations={}) else: # Regular function or method, getargspec will work fine. return tf_inspect.getfullargspec(func) def _remove_first_line_indent(string): indent = len(re.match(r'^\s*', six.ensure_str(string)).group(0)) return '\n'.join( [line[indent:] for line in six.ensure_str(string).split('\n')]) PAREN_NUMBER_RE = re.compile(r'^\(([0-9.e-]+)\)') def _generate_signature(func, reverse_index): """Given a function, returns a list of strings representing its args. This function produces a list of strings representing the arguments to a python function. It uses tf_inspect.getfullargspec, which does not generalize well to Python 3.x, which is more flexible in how *args and **kwargs are handled. This is not a problem in TF, since we have to remain compatible to Python 2.7 anyway. This function uses `__name__` for callables if it is available. This can lead to poor results for functools.partial and other callable objects. The returned string is Python code, so if it is included in a Markdown document, it should be typeset as code (using backticks), or escaped. Args: func: A function, method, or functools.partial to extract the signature for. reverse_index: A map from object ids to canonical full names to use. Returns: A list of strings representing the argument signature of `func` as python code. """ args_list = [] argspec = _get_arg_spec(func) first_arg_with_default = ( len(argspec.args or []) - len(argspec.defaults or [])) # Python documentation skips `self` when printing method signatures. # Note we cannot test for ismethod here since unbound methods do not register # as methods (in Python 3). first_arg = 1 if 'self' in argspec.args[:1] else 0 # Add all args without defaults. for arg in argspec.args[first_arg:first_arg_with_default]: args_list.append(arg) # Add all args with defaults. if argspec.defaults: try: source = _remove_first_line_indent(tf_inspect.getsource(func)) func_ast = ast.parse(source) ast_defaults = func_ast.body[0].args.defaults except IOError: # If this is a builtin, getsource fails with IOError # If we cannot get the source, assume the AST would be equal to the repr # of the defaults. ast_defaults = [None] * len(argspec.defaults) for arg, default, ast_default in zip( argspec.args[first_arg_with_default:], argspec.defaults, ast_defaults): if id(default) in reverse_index: default_text = reverse_index[id(default)] elif ast_default is not None: default_text = ( six.ensure_str(astor.to_source(ast_default)).rstrip('\n').replace( '\t', '\\t').replace('\n', '\\n').replace('"""', "'")) default_text = PAREN_NUMBER_RE.sub('\\1', six.ensure_str(default_text)) if default_text != repr(default): # This may be an internal name. If so, handle the ones we know about. # TODO(wicke): This should be replaced with a lookup in the index. # TODO(wicke): (replace first ident with tf., check if in index) internal_names = { 'ops.GraphKeys': 'tf.GraphKeys', '_ops.GraphKeys': 'tf.GraphKeys', 'init_ops.zeros_initializer': 'tf.zeros_initializer', 'init_ops.ones_initializer': 'tf.ones_initializer', 'saver_pb2.SaverDef': 'tf.train.SaverDef', } full_name_re = '^%s(.%s)+' % (IDENTIFIER_RE, IDENTIFIER_RE) match = re.match(full_name_re, default_text) if match: lookup_text = default_text for internal_name, public_name in six.iteritems(internal_names): if match.group(0).startswith(internal_name): lookup_text = public_name + default_text[len(internal_name):] break if default_text is lookup_text: logging.warn( 'WARNING: Using default arg, failed lookup: %s, repr: %r', default_text, default) else: default_text = lookup_text else: default_text = repr(default) args_list.append('%s=%s' % (arg, default_text)) # Add *args and *kwargs. if argspec.varargs: args_list.append('*' + six.ensure_str(argspec.varargs)) if argspec.varkw: args_list.append('**' + six.ensure_str(argspec.varkw)) return args_list def _get_guides_markdown(duplicate_names, guide_index, relative_path): all_guides = [] for name in duplicate_names: all_guides.extend(guide_index.get(name, [])) if not all_guides: return '' prefix = '../' * (relative_path.count('/') + 3) links = sorted(set([guide_ref.make_md_link(prefix) for guide_ref in all_guides])) return 'See the guide%s: %s\n\n' % ( 's' if len(links) > 1 else '', ', '.join(links)) def _get_defining_class(py_class, name): for cls in tf_inspect.getmro(py_class): if name in cls.__dict__: return cls return None class _LinkInfo( collections.namedtuple( '_LinkInfo', ['short_name', 'full_name', 'obj', 'doc', 'url'])): __slots__ = [] def is_link(self): return True class _OtherMemberInfo( collections.namedtuple('_OtherMemberInfo', ['short_name', 'full_name', 'obj', 'doc'])): __slots__ = [] def is_link(self): return False _PropertyInfo = collections.namedtuple( '_PropertyInfo', ['short_name', 'full_name', 'obj', 'doc']) _MethodInfo = collections.namedtuple('_MethodInfo', [ 'short_name', 'full_name', 'obj', 'doc', 'signature', 'decorators' ]) class _FunctionPageInfo(object): """Collects docs For a function Page.""" def __init__(self, full_name): self._full_name = full_name self._defined_in = None self._aliases = None self._doc = None self._guides = None self._signature = None self._decorators = [] def for_function(self): return True def for_class(self): return False def for_module(self): return False @property def full_name(self): return self._full_name @property def short_name(self): return six.ensure_str(self._full_name).split('.')[-1] @property def defined_in(self): return self._defined_in def set_defined_in(self, defined_in): assert self.defined_in is None self._defined_in = defined_in @property def aliases(self): return self._aliases def set_aliases(self, aliases): assert self.aliases is None self._aliases = aliases @property def doc(self): return self._doc def set_doc(self, doc): assert self.doc is None self._doc = doc @property def guides(self): return self._guides def set_guides(self, guides): assert self.guides is None self._guides = guides @property def signature(self): return self._signature def set_signature(self, function, reverse_index): """Attach the function's signature. Args: function: The python function being documented. reverse_index: A map from object ids in the index to full names. """ assert self.signature is None self._signature = _generate_signature(function, reverse_index) @property def decorators(self): return list(self._decorators) def add_decorator(self, dec): self._decorators.append(dec) def get_metadata_html(self): return _Metadata(self.full_name).build_html() class _ClassPageInfo(object): """Collects docs for a class page. Attributes: full_name: The fully qualified name of the object at the master location. Aka `master_name`. For example: `tf.nn.sigmoid`. short_name: The last component of the `full_name`. For example: `sigmoid`. defined_in: The path to the file where this object is defined. aliases: The list of all fully qualified names for the locations where the object is visible in the public api. This includes the master location. doc: A `_DocstringInfo` object representing the object's docstring (can be created with `_parse_md_docstring`). guides: A markdown string, of back links pointing to the api_guides that reference this object. bases: A list of `_LinkInfo` objects pointing to the docs for the parent classes. properties: A list of `_PropertyInfo` objects documenting the class' properties (attributes that use `@property`). methods: A list of `_MethodInfo` objects documenting the class' methods. classes: A list of `_LinkInfo` objects pointing to docs for any nested classes. other_members: A list of `_OtherMemberInfo` objects documenting any other object's defined inside the class object (mostly enum style fields). """ def __init__(self, full_name): self._full_name = full_name self._defined_in = None self._aliases = None self._doc = None self._guides = None self._namedtuplefields = None self._bases = None self._properties = [] self._methods = [] self._classes = [] self._other_members = [] def for_function(self): """Returns true if this object documents a function.""" return False def for_class(self): """Returns true if this object documents a class.""" return True def for_module(self): """Returns true if this object documents a module.""" return False @property def full_name(self): """Returns the documented object's fully qualified name.""" return self._full_name @property def short_name(self): """Returns the documented object's short name.""" return six.ensure_str(self._full_name).split('.')[-1] @property def defined_in(self): """Returns the path to the file where the documented object is defined.""" return self._defined_in def set_defined_in(self, defined_in): """Sets the `defined_in` path.""" assert self.defined_in is None self._defined_in = defined_in @property def aliases(self): """Returns a list of all full names for the documented object.""" return self._aliases def set_aliases(self, aliases): """Sets the `aliases` list. Args: aliases: A list of strings. Containing all the object's full names. """ assert self.aliases is None self._aliases = aliases @property def doc(self): """Returns a `_DocstringInfo` created from the object's docstring.""" return self._doc def set_doc(self, doc): """Sets the `doc` field. Args: doc: An instance of `_DocstringInfo`. """ assert self.doc is None self._doc = doc @property def guides(self): """Returns a markdown string containing backlinks to relevant api_guides.""" return self._guides def set_guides(self, guides): """Sets the `guides` field. Args: guides: A markdown string containing backlinks to all the api_guides that link to the documented object. """ assert self.guides is None self._guides = guides @property def namedtuplefields(self): return self._namedtuplefields def set_namedtuplefields(self, py_class): if issubclass(py_class, tuple): if all( hasattr(py_class, attr) for attr in ('_asdict', '_fields', '_make', '_replace')): self._namedtuplefields = py_class._fields @property def bases(self): """Returns a list of `_LinkInfo` objects pointing to the class' parents.""" return self._bases def _set_bases(self, relative_path, parser_config): """Builds the `bases` attribute, to document this class' parent-classes. This method sets the `bases` to a list of `_LinkInfo` objects point to the doc pages for the class' parents. Args: relative_path: The relative path from the doc this object describes to the documentation root. parser_config: An instance of `ParserConfig`. """ bases = [] obj = parser_config.py_name_to_object(self.full_name) for base in obj.__bases__: base_full_name = parser_config.reverse_index.get(id(base), None) if base_full_name is None: continue base_doc = _parse_md_docstring(base, relative_path, parser_config.reference_resolver) base_url = parser_config.reference_resolver.reference_to_url( base_full_name, relative_path) link_info = _LinkInfo( short_name=six.ensure_str(base_full_name).split('.')[-1], full_name=base_full_name, obj=base, doc=base_doc, url=base_url) bases.append(link_info) self._bases = bases @property def properties(self): """Returns a list of `_PropertyInfo` describing the class' properties.""" props_dict = {prop.short_name: prop for prop in self._properties} props = [] if self.namedtuplefields: for field in self.namedtuplefields: props.append(props_dict.pop(field)) props.extend(sorted(props_dict.values())) return props def _add_property(self, short_name, full_name, obj, doc): """Adds a `_PropertyInfo` entry to the `properties` list. Args: short_name: The property's short name. full_name: The property's fully qualified name. obj: The property object itself doc: The property's parsed docstring, a `_DocstringInfo`. """ # Hide useless namedtuple docs-trings if re.match('Alias for field number [0-9]+', six.ensure_str(doc.docstring)): doc = doc._replace(docstring='', brief='') property_info = _PropertyInfo(short_name, full_name, obj, doc) self._properties.append(property_info) @property def methods(self): """Returns a list of `_MethodInfo` describing the class' methods.""" return self._methods def _add_method(self, short_name, full_name, obj, doc, signature, decorators): """Adds a `_MethodInfo` entry to the `methods` list. Args: short_name: The method's short name. full_name: The method's fully qualified name. obj: The method object itself doc: The method's parsed docstring, a `_DocstringInfo` signature: The method's parsed signature (see: `_generate_signature`) decorators: A list of strings describing the decorators that should be mentioned on the object's docs page. """ method_info = _MethodInfo(short_name, full_name, obj, doc, signature, decorators) self._methods.append(method_info) @property def classes(self): """Returns a list of `_LinkInfo` pointing to any nested classes.""" return self._classes def get_metadata_html(self): meta_data = _Metadata(self.full_name) for item in itertools.chain(self.classes, self.properties, self.methods, self.other_members): meta_data.append(item) return meta_data.build_html() def _add_class(self, short_name, full_name, obj, doc, url): """Adds a `_LinkInfo` for a nested class to `classes` list. Args: short_name: The class' short name. full_name: The class' fully qualified name. obj: The class object itself doc: The class' parsed docstring, a `_DocstringInfo` url: A url pointing to where the nested class is documented. """ page_info = _LinkInfo(short_name, full_name, obj, doc, url) self._classes.append(page_info) @property def other_members(self): """Returns a list of `_OtherMemberInfo` describing any other contents.""" return self._other_members def _add_other_member(self, short_name, full_name, obj, doc): """Adds an `_OtherMemberInfo` entry to the `other_members` list. Args: short_name: The class' short name. full_name: The class' fully qualified name. obj: The class object itself doc: The class' parsed docstring, a `_DocstringInfo` """ other_member_info = _OtherMemberInfo(short_name, full_name, obj, doc) self._other_members.append(other_member_info) def collect_docs_for_class(self, py_class, parser_config): """Collects information necessary specifically for a class's doc page. Mainly, this is details about the class's members. Args: py_class: The class object being documented parser_config: An instance of ParserConfig. """ self.set_namedtuplefields(py_class) doc_path = documentation_path(self.full_name) relative_path = os.path.relpath( path='.', start=os.path.dirname(doc_path) or '.') self._set_bases(relative_path, parser_config) for short_name in parser_config.tree[self.full_name]: # Remove builtin members that we never want to document. if short_name in [ '__class__', '__base__', '__weakref__', '__doc__', '__module__', '__dict__', '__abstractmethods__', '__slots__', '__getnewargs__', '__str__', '__repr__', '__hash__', '__reduce__' ]: continue child_name = '.'.join([self.full_name, short_name]) child = parser_config.py_name_to_object(child_name) # Don't document anything that is defined in object or by protobuf. defining_class = _get_defining_class(py_class, short_name) if defining_class in [object, type, tuple, BaseException, Exception]: continue # The following condition excludes most protobuf-defined symbols. if (defining_class and defining_class.__name__ in ['CMessage', 'Message', 'MessageMeta']): continue # TODO(markdaoust): Add a note in child docs showing the defining class. if doc_controls.should_skip_class_attr(py_class, short_name): continue child_doc = _parse_md_docstring(child, relative_path, parser_config.reference_resolver) if isinstance(child, property): self._add_property(short_name, child_name, child, child_doc) elif tf_inspect.isclass(child): if defining_class is None: continue url = parser_config.reference_resolver.reference_to_url( child_name, relative_path) self._add_class(short_name, child_name, child, child_doc, url) elif (tf_inspect.ismethod(child) or tf_inspect.isfunction(child) or tf_inspect.isroutine(child)): if defining_class is None: continue # Omit methods defined by namedtuple. original_method = defining_class.__dict__[short_name] if (hasattr(original_method, '__module__') and six.ensure_str( (original_method.__module__ or '')).startswith('namedtuple')): continue # Some methods are often overridden without documentation. Because it's # obvious what they do, don't include them in the docs if there's no # docstring. if not child_doc.brief.strip() and short_name in [ '__del__', '__copy__' ]: continue try: child_signature = _generate_signature(child, parser_config.reverse_index) except TypeError: # If this is a (dynamically created) slot wrapper, tf_inspect will # raise typeerror when trying to get to the code. Ignore such # functions. continue child_decorators = [] try: if isinstance(py_class.__dict__[short_name], classmethod): child_decorators.append('classmethod') except KeyError: pass try: if isinstance(py_class.__dict__[short_name], staticmethod): child_decorators.append('staticmethod') except KeyError: pass self._add_method(short_name, child_name, child, child_doc, child_signature, child_decorators) else: # Exclude members defined by protobuf that are useless if issubclass(py_class, ProtoMessage): if (six.ensure_str(short_name).endswith('_FIELD_NUMBER') or short_name in ['__slots__', 'DESCRIPTOR']): continue # TODO(wicke): We may want to also remember the object itself. self._add_other_member(short_name, child_name, child, child_doc) class _ModulePageInfo(object): """Collects docs for a module page.""" def __init__(self, full_name): self._full_name = full_name self._defined_in = None self._aliases = None self._doc = None self._guides = None self._modules = [] self._classes = [] self._functions = [] self._other_members = [] def for_function(self): return False def for_class(self): return False def for_module(self): return True @property def full_name(self): return self._full_name @property def short_name(self): return six.ensure_str(self._full_name).split('.')[-1] @property def defined_in(self): return self._defined_in def set_defined_in(self, defined_in): assert self.defined_in is None self._defined_in = defined_in @property def aliases(self): return self._aliases def set_aliases(self, aliases): assert self.aliases is None self._aliases = aliases @property def doc(self): return self._doc def set_doc(self, doc): assert self.doc is None self._doc = doc @property def guides(self): return self._guides def set_guides(self, guides): assert self.guides is None self._guides = guides @property def modules(self): return self._modules def _add_module(self, short_name, full_name, obj, doc, url): self._modules.append(_LinkInfo(short_name, full_name, obj, doc, url)) @property def classes(self): return self._classes def _add_class(self, short_name, full_name, obj, doc, url): self._classes.append(_LinkInfo(short_name, full_name, obj, doc, url)) @property def functions(self): return self._functions def _add_function(self, short_name, full_name, obj, doc, url): self._functions.append(_LinkInfo(short_name, full_name, obj, doc, url)) @property def other_members(self): return self._other_members def _add_other_member(self, short_name, full_name, obj, doc): self._other_members.append( _OtherMemberInfo(short_name, full_name, obj, doc)) def get_metadata_html(self): meta_data = _Metadata(self.full_name) # Objects with their own pages are not added to the matadata list for the # module, the module only has a link to the object page. No docs. for item in self.other_members: meta_data.append(item) return meta_data.build_html() def collect_docs_for_module(self, parser_config): """Collect information necessary specifically for a module's doc page. Mainly this is information about the members of the module. Args: parser_config: An instance of ParserConfig. """ relative_path = os.path.relpath( path='.', start=os.path.dirname(documentation_path(self.full_name)) or '.') member_names = parser_config.tree.get(self.full_name, []) for name in member_names: if name in ['__builtins__', '__doc__', '__file__', '__name__', '__path__', '__package__', '__cached__', '__loader__', '__spec__']: continue member_full_name = six.ensure_str(self.full_name) + '.' + six.ensure_str( name) if self.full_name else name member = parser_config.py_name_to_object(member_full_name) member_doc = _parse_md_docstring(member, relative_path, parser_config.reference_resolver) url = parser_config.reference_resolver.reference_to_url( member_full_name, relative_path) if tf_inspect.ismodule(member): self._add_module(name, member_full_name, member, member_doc, url) elif tf_inspect.isclass(member): self._add_class(name, member_full_name, member, member_doc, url) elif tf_inspect.isfunction(member): self._add_function(name, member_full_name, member, member_doc, url) else: self._add_other_member(name, member_full_name, member, member_doc) class ParserConfig(object): """Stores all indexes required to parse the docs.""" def __init__(self, reference_resolver, duplicates, duplicate_of, tree, index, reverse_index, guide_index, base_dir): """Object with the common config for docs_for_object() calls. Args: reference_resolver: An instance of ReferenceResolver. duplicates: A `dict` mapping fully qualified names to a set of all aliases of this name. This is used to automatically generate a list of all aliases for each name. duplicate_of: A map from duplicate names to preferred names of API symbols. tree: A `dict` mapping a fully qualified name to the names of all its members. Used to populate the members section of a class or module page. index: A `dict` mapping full names to objects. reverse_index: A `dict` mapping object ids to full names. guide_index: A `dict` mapping symbol name strings to objects with a `make_md_link()` method. base_dir: A base path that is stripped from file locations written to the docs. """ self.reference_resolver = reference_resolver self.duplicates = duplicates self.duplicate_of = duplicate_of self.tree = tree self.reverse_index = reverse_index self.index = index self.guide_index = guide_index self.base_dir = base_dir self.defined_in_prefix = 'tensorflow/' self.code_url_prefix = ( '/code/stable/tensorflow/') # pylint: disable=line-too-long def py_name_to_object(self, full_name): """Return the Python object for a Python symbol name.""" return self.index[full_name] def docs_for_object(full_name, py_object, parser_config): """Return a PageInfo object describing a given object from the TF API. This function uses _parse_md_docstring to parse the docs pertaining to `object`. This function resolves '@{symbol}' references in the docstrings into links to the appropriate location. It also adds a list of alternative names for the symbol automatically. It assumes that the docs for each object live in a file given by `documentation_path`, and that relative links to files within the documentation are resolvable. Args: full_name: The fully qualified name of the symbol to be documented. py_object: The Python object to be documented. Its documentation is sourced from `py_object`'s docstring. parser_config: A ParserConfig object. Returns: Either a `_FunctionPageInfo`, `_ClassPageInfo`, or a `_ModulePageInfo` depending on the type of the python object being documented. Raises: RuntimeError: If an object is encountered for which we don't know how to make docs. """ # Which other aliases exist for the object referenced by full_name? master_name = parser_config.reference_resolver.py_master_name(full_name) duplicate_names = parser_config.duplicates.get(master_name, [full_name]) # TODO(wicke): Once other pieces are ready, enable this also for partials. if (tf_inspect.ismethod(py_object) or tf_inspect.isfunction(py_object) or # Some methods in classes from extensions come in as routines. tf_inspect.isroutine(py_object)): page_info = _FunctionPageInfo(master_name) page_info.set_signature(py_object, parser_config.reverse_index) elif tf_inspect.isclass(py_object): page_info = _ClassPageInfo(master_name) page_info.collect_docs_for_class(py_object, parser_config) elif tf_inspect.ismodule(py_object): page_info = _ModulePageInfo(master_name) page_info.collect_docs_for_module(parser_config) else: raise RuntimeError('Cannot make docs for object %s: %r' % (full_name, py_object)) relative_path = os.path.relpath( path='.', start=os.path.dirname(documentation_path(full_name)) or '.') page_info.set_doc(_parse_md_docstring( py_object, relative_path, parser_config.reference_resolver)) page_info.set_aliases(duplicate_names) page_info.set_guides(_get_guides_markdown( duplicate_names, parser_config.guide_index, relative_path)) page_info.set_defined_in(_get_defined_in(py_object, parser_config)) return page_info class _PythonBuiltin(object): """This class indicated that the object in question is a python builtin. This can be used for the `defined_in` slot of the `PageInfo` objects. """ def is_builtin(self): return True def is_python_file(self): return False def is_generated_file(self): return False def __str__(self): return 'This is an alias for a Python built-in.\n\n' class _PythonFile(object): """This class indicates that the object is defined in a regular python file. This can be used for the `defined_in` slot of the `PageInfo` objects. """ def __init__(self, path, parser_config): self.path = path self.path_prefix = parser_config.defined_in_prefix self.code_url_prefix = parser_config.code_url_prefix def is_builtin(self): return False def is_python_file(self): return True def is_generated_file(self): return False def __str__(self): return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format( path=self.path, prefix=self.path_prefix, code_prefix=self.code_url_prefix) class _ProtoFile(object): """This class indicates that the object is defined in a .proto file. This can be used for the `defined_in` slot of the `PageInfo` objects. """ def __init__(self, path, parser_config): self.path = path self.path_prefix = parser_config.defined_in_prefix self.code_url_prefix = parser_config.code_url_prefix def is_builtin(self): return False def is_python_file(self): return False def is_generated_file(self): return False def __str__(self): return 'Defined in [`{prefix}{path}`]({code_prefix}{path}).\n\n'.format( path=self.path, prefix=self.path_prefix, code_prefix=self.code_url_prefix) class _GeneratedFile(object): """This class indicates that the object is defined in a generated python file. Generated files should not be linked to directly. This can be used for the `defined_in` slot of the `PageInfo` objects. """ def __init__(self, path, parser_config): self.path = path self.path_prefix = parser_config.defined_in_prefix def is_builtin(self): return False def is_python_file(self): return False def is_generated_file(self): return True def __str__(self): return 'Defined in generated file: `%s%s`.\n\n' % (self.path_prefix, self.path) def _get_defined_in(py_object, parser_config): """Returns a description of where the passed in python object was defined. Args: py_object: The Python object. parser_config: A ParserConfig object. Returns: Either a `_PythonBuiltin`, `_PythonFile`, or a `_GeneratedFile` """ # Every page gets a note about where this object is defined # TODO(wicke): If py_object is decorated, get the decorated object instead. # TODO(wicke): Only use decorators that support this in TF. try: path = os.path.relpath(path=tf_inspect.getfile(py_object), start=parser_config.base_dir) except TypeError: # getfile throws TypeError if py_object is a builtin. return _PythonBuiltin() # TODO(wicke): If this is a generated file, link to the source instead. # TODO(wicke): Move all generated files to a generated/ directory. # TODO(wicke): And make their source file predictable from the file name. # In case this is compiled, point to the original if six.ensure_str(path).endswith('.pyc'): path = path[:-1] # Never include links outside this code base. if six.ensure_str(path).startswith('..') or re.search(r'\b_api\b', six.ensure_str(path)): return None if re.match(r'.*/gen_[^/]*\.py$', six.ensure_str(path)): return _GeneratedFile(path, parser_config) if 'genfiles' in path or 'tools/api/generator' in path: return _GeneratedFile(path, parser_config) elif re.match(r'.*_pb2\.py$', six.ensure_str(path)): # The _pb2.py files all appear right next to their defining .proto file. return _ProtoFile(six.ensure_str(path[:-7]) + '.proto', parser_config) else: return _PythonFile(path, parser_config) # TODO(markdaoust): This should just parse, pretty_docs should generate the md. def generate_global_index(library_name, index, reference_resolver): """Given a dict of full names to python objects, generate an index page. The index page generated contains a list of links for all symbols in `index` that have their own documentation page. Args: library_name: The name for the documented library to use in the title. index: A dict mapping full names to python objects. reference_resolver: An instance of ReferenceResolver. Returns: A string containing an index page as Markdown. """ symbol_links = [] for full_name, py_object in six.iteritems(index): if (tf_inspect.ismodule(py_object) or tf_inspect.isfunction(py_object) or tf_inspect.isclass(py_object)): # In Python 3, unbound methods are functions, so eliminate those. if tf_inspect.isfunction(py_object): if full_name.count('.') == 0: parent_name = '' else: parent_name = full_name[:full_name.rfind('.')] if parent_name in index and tf_inspect.isclass(index[parent_name]): # Skip methods (=functions with class parents). continue symbol_links.append(( full_name, reference_resolver.python_link(full_name, full_name, '.'))) lines = ['# All symbols in %s' % library_name, ''] for _, link in sorted(symbol_links, key=lambda x: x[0]): lines.append('* %s' % link) # TODO(markdaoust): use a _ModulePageInfo -> prety_docs.build_md_page() return '\n'.join(lines) class _Metadata(object): """A class for building a page's Metadata block. Attributes: name: The name of the page being described by the Metadata block. version: The source version. """ def __init__(self, name, version='Stable'): """Creates a Metadata builder. Args: name: The name of the page being described by the Metadata block. version: The source version. """ self.name = name self.version = version self._content = [] def append(self, item): """Adds an item from the page to the Metadata block. Args: item: The parsed page section to add. """ self._content.append(item.short_name) def build_html(self): """Returns the Metadata block as an Html string.""" schema = 'http://developers.google.com/ReferenceObject' parts = ['<div itemscope itemtype="%s">' % schema] parts.append('<meta itemprop="name" content="%s" />' % self.name) parts.append('<meta itemprop="path" content="%s" />' % self.version) for item in self._content: parts.append('<meta itemprop="property" content="%s"/>' % item) parts.extend(['</div>', '']) return '\n'.join(parts)
apache-2.0
franosincic/edx-platform
common/djangoapps/student/admin.py
22
6073
""" Django admin pages for student app """ from django import forms from django.contrib.auth.models import User from ratelimitbackend import admin from xmodule.modulestore.django import modulestore from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from config_models.admin import ConfigurationModelAdmin from student.models import ( UserProfile, UserTestGroup, CourseEnrollmentAllowed, DashboardConfiguration, CourseEnrollment, Registration, PendingNameChange, CourseAccessRole, LinkedInAddToProfileConfiguration ) from student.roles import REGISTERED_ACCESS_ROLES class CourseAccessRoleForm(forms.ModelForm): """Form for adding new Course Access Roles view the Django Admin Panel.""" class Meta(object): model = CourseAccessRole fields = '__all__' email = forms.EmailField(required=True) COURSE_ACCESS_ROLES = [(role_name, role_name) for role_name in REGISTERED_ACCESS_ROLES.keys()] role = forms.ChoiceField(choices=COURSE_ACCESS_ROLES) def clean_course_id(self): """ Checking course-id format and course exists in module store. This field can be null. """ if self.cleaned_data['course_id']: course_id = self.cleaned_data['course_id'] try: course_key = CourseKey.from_string(course_id) except InvalidKeyError: raise forms.ValidationError(u"Invalid CourseID. Please check the format and re-try.") if not modulestore().has_course(course_key): raise forms.ValidationError(u"Cannot find course with id {} in the modulestore".format(course_id)) return course_key return None def clean_org(self): """If org and course-id exists then Check organization name against the given course. """ if self.cleaned_data.get('course_id') and self.cleaned_data['org']: org = self.cleaned_data['org'] org_name = self.cleaned_data.get('course_id').org if org.lower() != org_name.lower(): raise forms.ValidationError( u"Org name {} is not valid. Valid name is {}.".format( org, org_name ) ) return self.cleaned_data['org'] def clean_email(self): """ Checking user object against given email id. """ email = self.cleaned_data['email'] try: user = User.objects.get(email=email) except Exception: raise forms.ValidationError( u"Email does not exist. Could not find {email}. Please re-enter email address".format( email=email ) ) return user def clean(self): """ Checking the course already exists in db. """ cleaned_data = super(CourseAccessRoleForm, self).clean() if not self.errors: if CourseAccessRole.objects.filter( user=cleaned_data.get("email"), org=cleaned_data.get("org"), course_id=cleaned_data.get("course_id"), role=cleaned_data.get("role") ).exists(): raise forms.ValidationError("Duplicate Record.") return cleaned_data def __init__(self, *args, **kwargs): super(CourseAccessRoleForm, self).__init__(*args, **kwargs) if self.instance.user_id: self.fields['email'].initial = self.instance.user.email class CourseAccessRoleAdmin(admin.ModelAdmin): """Admin panel for the Course Access Role. """ form = CourseAccessRoleForm raw_id_fields = ("user",) exclude = ("user",) fieldsets = ( (None, { 'fields': ('email', 'course_id', 'org', 'role',) }), ) list_display = ( 'id', 'user', 'org', 'course_id', 'role', ) search_fields = ( 'id', 'user__username', 'user__email', 'org', 'course_id', 'role', ) def save_model(self, request, obj, form, change): obj.user = form.cleaned_data['email'] super(CourseAccessRoleAdmin, self).save_model(request, obj, form, change) class LinkedInAddToProfileConfigurationAdmin(admin.ModelAdmin): """Admin interface for the LinkedIn Add to Profile configuration. """ class Meta(object): model = LinkedInAddToProfileConfiguration # Exclude deprecated fields exclude = ('dashboard_tracking_code',) class CourseEnrollmentAdmin(admin.ModelAdmin): """ Admin interface for the CourseEnrollment model. """ list_display = ('id', 'course_id', 'mode', 'user', 'is_active',) list_filter = ('mode', 'is_active',) raw_id_fields = ('user',) search_fields = ('course_id', 'mode', 'user__username',) def queryset(self, request): return super(CourseEnrollmentAdmin, self).queryset(request).select_related('user') class Meta(object): model = CourseEnrollment class UserProfileAdmin(admin.ModelAdmin): """ Admin interface for UserProfile model. """ list_display = ('user', 'name',) raw_id_fields = ('user',) search_fields = ('user__username', 'user__first_name', 'user__last_name', 'user__email', 'name',) def get_readonly_fields(self, request, obj=None): # The user field should not be editable for an existing user profile. if obj: return self.readonly_fields + ('user',) return self.readonly_fields class Meta(object): model = UserProfile admin.site.register(UserTestGroup) admin.site.register(CourseEnrollmentAllowed) admin.site.register(Registration) admin.site.register(PendingNameChange) admin.site.register(CourseAccessRole, CourseAccessRoleAdmin) admin.site.register(DashboardConfiguration, ConfigurationModelAdmin) admin.site.register(LinkedInAddToProfileConfiguration, LinkedInAddToProfileConfigurationAdmin) admin.site.register(CourseEnrollment, CourseEnrollmentAdmin) admin.site.register(UserProfile, UserProfileAdmin)
agpl-3.0
waxkinetic/fabcloudkit
fabcloudkit/build_tools/python_build.py
1
7221
""" fabcloudkit :copyright: (c) 2013 by Rick Bohrer. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import # pypi from fabric.context_managers import cd, prefix, settings from fabric.operations import run, sudo from fabric.state import env # package from fabcloudkit import ctx from ..build import build_repo, BuildInfo from ..internal import * from ..toolbase import Tool from ..tool.virtualenv import VirtualEnvTool from ..util import copy_file_from class PythonBuildTool(Tool): def build(self, repos, reference_repo=None, post_build=None, interpreter=None, tarball=False, unittest=None): """Performs a 'python' build. Performs a python build by running setup.py in each identified repo. If desired, repos can be refreshed first (e.g., via git pull). :param repos: specifies the list of repos in which to run setup.py. :param reference_repo: optional; the reference repo from which to retrieve the head commit id. this id used as a component of the build name. if not specified, the first repo in the context is used. :param post_build: a list of post-build commands. a list of dictionaries. each dict must contain the key "command" that specifies the command to execute. optionally, it may include a "sudo" value of [True|False], and an "ignore_fail" value of [True|False]. :param interpreter: specifies the Python interpreter to use in the build's virtualenv. if not specified, the operating system default interpreter is used. note that the interpreter must already exist on the system. :param tarball: True to create a tarball of the build; this is required if any other instance will use "copy_from". :param unittest: TBD :return: the new build name """ start_msg('Executing build for instance in role "{0}":'.format(env.role_name)) # increment the build name and create a new virtualenv for the build. build_name = self._increment_name(reference_repo) build_env_dir = ctx().build_path(build_name) VirtualEnvTool().ensure(build_env_dir, interpreter) # run "setup.py install" in each repo. for repo_name in ([repos] if isinstance(repos, basestring) else repos): build_repo(build_env_dir, ctx().get_repo(repo_name)) # run tests. self._unittest(unittest, build_name) # save the last known good build-name. BuildInfo.set_last_good(build_name) if tarball: self._tarball(build_name) # execute any post-build commands. if post_build: self._execute_post_build(post_build, build_name) # make the build_name available to the caller; it'll be set as an instance-tag. succeed_msg('Build completed successfully for role "{0}".'.format(env.role_name)) env.role.set_env(build_result=build_name) return self def copy_from(self, role_name, post_build=None, delete_tar=True): """Copies an existing build from an instance in the specified role. Instead of building itself, a build is copied from another instance to the current instance. :param role_name: the role of the instance to copy the build tarball from. :param post_build: list of post-build commands to execute. :param delete_tar: True to delete the tarball, False otherwise. :return: the name of the copied build. """ # get the last known good build from the source machine. # note: we could alternatively get this from an instance tag. message('Copying build from instance in role: "{0}"'.format(role_name)) inst, role = ctx().get_host_in_role(role_name) with settings(host_string=inst.public_dns_name, user=role.user): message('Getting last good build-name from: "{0}"'.format(role_name)) src_build_name = BuildInfo().get_last_good() # copy it from the source machine. note that all machines must have been provisioned # properly to allow the current machine access to the source machine. tarball = self._tarball_name(src_build_name) path = ctx().build_path(tarball) copy_file_from(role.user, inst.private_dns_name, path, path) with cd(ctx().builds_root()): # untar it. command = 'tar -x --file={tarball}'.format(**locals()) result = run(command) if result.failed: raise HaltError('Failed to untar: "{0}"'.format(path)) # delete the tar. if delete_tar: run('rm {tarball}'.format(**locals())) # update the build information. BuildInfo().set_last_good(src_build_name) # execute any post-build commands. if post_build: self._execute_post_build(post_build, src_build_name) succeed_msg('Successfully copied build: "{0}"'.format(src_build_name)) return src_build_name def _execute_post_build(self, cmd_lst, build_name): message('Running post-build commands:') with prefix(VirtualEnvTool.activate_prefix(ctx().build_path(build_name))): for desc in cmd_lst: f = sudo if desc.get('sudo', False) else run result = f(desc['command']) if result.failed and not desc.get('ignore_fail', False): raise HaltError('Post-build command failed: "{0}"'.format(desc['command'])) message('Completed post-build commands.') return self def _increment_name(self, ref_repo_name): # some projects have more than one repo. in this case one is designated as the "reference". # the reference repo gives it's most recent commit ID that's used in the new build name. # if no reference is given, just use the first (hopefully, the only) repo in the Context. if ref_repo_name: ref_repo = ctx().get_repo(ref_repo_name) else: ref_repo = ctx().repos()[0] name = BuildInfo.next(ref_repo.dir) succeed_msg('Created new build name: "{0}"'.format(name)) return name def _tarball(self, build_name): tarball = self._tarball_name(build_name) dir_to_tar = ctx().build_path(build_name) with cd(ctx().builds_root()): options = '--create --gzip --format=ustar --owner=0 --group=0' command = 'tar {options} --file={tarball} {build_name}'.format(**locals()) result = run(command) if result.failed: raise HaltError('Failed to create tarball for: "{0}"'.format(dir_to_tar)) succeed_msg('Created build tarball: "{0}"'.format(tarball)) return self def _tarball_name(self, build_name): return '{build_name}.tar.gz'.format(**locals()) def _unittest(self, plan, build_name): failed_msg('The action "unittest" is not implemented (yet).') return self # register. Tool.__tools__['python_build'] = PythonBuildTool
bsd-3-clause
huchoi/edx-platform
common/djangoapps/django_future/csrf.py
104
2882
# Taken from Django 1.4 import warnings from django.middleware.csrf import CsrfViewMiddleware, get_token from django.utils.decorators import decorator_from_middleware, available_attrs from functools import wraps csrf_protect = decorator_from_middleware(CsrfViewMiddleware) csrf_protect.__name__ = "csrf_protect" csrf_protect.__doc__ = """ This decorator adds CSRF protection in exactly the same way as CsrfViewMiddleware, but it can be used on a per view basis. Using both, or using the decorator multiple times, is harmless and efficient. """ class _EnsureCsrfToken(CsrfViewMiddleware): # We need this to behave just like the CsrfViewMiddleware, but not reject # requests. def _reject(self, request, reason): return None requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken) requires_csrf_token.__name__ = 'requires_csrf_token' requires_csrf_token.__doc__ = """ Use this decorator on views that need a correct csrf_token available to RequestContext, but without the CSRF protection that csrf_protect enforces. """ class _EnsureCsrfCookie(CsrfViewMiddleware): def _reject(self, request, reason): return None def process_view(self, request, callback, callback_args, callback_kwargs): retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs) # Forces process_response to send the cookie get_token(request) return retval ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie) ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie' ensure_csrf_cookie.__doc__ = """ Use this decorator to ensure that a view sets a CSRF cookie, whether or not it uses the csrf_token template tag, or the CsrfViewMiddleware is used. """ def csrf_response_exempt(view_func): """ Modifies a view function so that its response is exempt from the post-processing of the CSRF middleware. """ warnings.warn("csrf_response_exempt is deprecated. It no longer performs a " "function, and calls to it can be removed.", PendingDeprecationWarning) return view_func def csrf_view_exempt(view_func): """ Marks a view function as being exempt from CSRF view protection. """ warnings.warn("csrf_view_exempt is deprecated. Use csrf_exempt instead.", PendingDeprecationWarning) return csrf_exempt(view_func) def csrf_exempt(view_func): """ Marks a view function as being exempt from the CSRF view protection. """ # We could just do view_func.csrf_exempt = True, but decorators # are nicer if they don't have side-effects, so we return a new # function. def wrapped_view(*args, **kwargs): return view_func(*args, **kwargs) wrapped_view.csrf_exempt = True return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
agpl-3.0
cloudera/hue
desktop/core/ext-py/ply-3.11/ply/ctokens.py
17
3155
# ---------------------------------------------------------------------- # ctokens.py # # Token specifications for symbols in ANSI C and C++. This file is # meant to be used as a library in other tokenizers. # ---------------------------------------------------------------------- # Reserved words tokens = [ # Literals (identifier, integer constant, float constant, string constant, char const) 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER', # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=) 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO', 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', 'LOR', 'LAND', 'LNOT', 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=) 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', # Increment/decrement (++,--) 'INCREMENT', 'DECREMENT', # Structure dereference (->) 'ARROW', # Ternary operator (?) 'TERNARY', # Delimeters ( ) [ ] { } , . ; : 'LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET', 'LBRACE', 'RBRACE', 'COMMA', 'PERIOD', 'SEMI', 'COLON', # Ellipsis (...) 'ELLIPSIS', ] # Operators t_PLUS = r'\+' t_MINUS = r'-' t_TIMES = r'\*' t_DIVIDE = r'/' t_MODULO = r'%' t_OR = r'\|' t_AND = r'&' t_NOT = r'~' t_XOR = r'\^' t_LSHIFT = r'<<' t_RSHIFT = r'>>' t_LOR = r'\|\|' t_LAND = r'&&' t_LNOT = r'!' t_LT = r'<' t_GT = r'>' t_LE = r'<=' t_GE = r'>=' t_EQ = r'==' t_NE = r'!=' # Assignment operators t_EQUALS = r'=' t_TIMESEQUAL = r'\*=' t_DIVEQUAL = r'/=' t_MODEQUAL = r'%=' t_PLUSEQUAL = r'\+=' t_MINUSEQUAL = r'-=' t_LSHIFTEQUAL = r'<<=' t_RSHIFTEQUAL = r'>>=' t_ANDEQUAL = r'&=' t_OREQUAL = r'\|=' t_XOREQUAL = r'\^=' # Increment/decrement t_INCREMENT = r'\+\+' t_DECREMENT = r'--' # -> t_ARROW = r'->' # ? t_TERNARY = r'\?' # Delimeters t_LPAREN = r'\(' t_RPAREN = r'\)' t_LBRACKET = r'\[' t_RBRACKET = r'\]' t_LBRACE = r'\{' t_RBRACE = r'\}' t_COMMA = r',' t_PERIOD = r'\.' t_SEMI = r';' t_COLON = r':' t_ELLIPSIS = r'\.\.\.' # Identifiers t_ID = r'[A-Za-z_][A-Za-z0-9_]*' # Integer literal t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' # Floating literal t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' # String literal t_STRING = r'\"([^\\\n]|(\\.))*?\"' # Character constant 'c' or L'c' t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\'' # Comment (C-Style) def t_COMMENT(t): r'/\*(.|\n)*?\*/' t.lexer.lineno += t.value.count('\n') return t # Comment (C++-Style) def t_CPPCOMMENT(t): r'//.*\n' t.lexer.lineno += 1 return t
apache-2.0
t0mm0/youtube-dl
youtube_dl/extractor/kaltura.py
63
4867
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_urllib_parse from ..utils import ( ExtractorError, int_or_none, ) class KalturaIE(InfoExtractor): _VALID_URL = r'''(?x) (?:kaltura:| https?://(:?(?:www|cdnapisec)\.)?kaltura\.com/index\.php/kwidget/(?:[^/]+/)*?wid/_ )(?P<partner_id>\d+) (?::| /(?:[^/]+/)*?entry_id/ )(?P<id>[0-9a-z_]+)''' _API_BASE = 'http://cdnapi.kaltura.com/api_v3/index.php?' _TESTS = [ { 'url': 'kaltura:269692:1_1jc2y3e4', 'md5': '3adcbdb3dcc02d647539e53f284ba171', 'info_dict': { 'id': '1_1jc2y3e4', 'ext': 'mp4', 'title': 'Track 4', 'upload_date': '20131219', 'uploader_id': 'mlundberg@wolfgangsvault.com', 'description': 'The Allman Brothers Band, 12/16/1981', 'thumbnail': 're:^https?://.*/thumbnail/.*', 'timestamp': int, }, }, { 'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4', 'only_matching': True, }, { 'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3', 'only_matching': True, }, ] def _kaltura_api_call(self, video_id, actions, *args, **kwargs): params = actions[0] if len(actions) > 1: for i, a in enumerate(actions[1:], start=1): for k, v in a.items(): params['%d:%s' % (i, k)] = v query = compat_urllib_parse.urlencode(params) url = self._API_BASE + query data = self._download_json(url, video_id, *args, **kwargs) status = data if len(actions) == 1 else data[0] if status.get('objectType') == 'KalturaAPIException': raise ExtractorError( '%s said: %s' % (self.IE_NAME, status['message'])) return data def _get_kaltura_signature(self, video_id, partner_id): actions = [{ 'apiVersion': '3.1', 'expiry': 86400, 'format': 1, 'service': 'session', 'action': 'startWidgetSession', 'widgetId': '_%s' % partner_id, }] return self._kaltura_api_call( video_id, actions, note='Downloading Kaltura signature')['ks'] def _get_video_info(self, video_id, partner_id): signature = self._get_kaltura_signature(video_id, partner_id) actions = [ { 'action': 'null', 'apiVersion': '3.1.5', 'clientTag': 'kdp:v3.8.5', 'format': 1, # JSON, 2 = XML, 3 = PHP 'service': 'multirequest', 'ks': signature, }, { 'action': 'get', 'entryId': video_id, 'service': 'baseentry', 'version': '-1', }, { 'action': 'getContextData', 'contextDataParams:objectType': 'KalturaEntryContextDataParams', 'contextDataParams:referrer': 'http://www.kaltura.com/', 'contextDataParams:streamerType': 'http', 'entryId': video_id, 'service': 'baseentry', }, ] return self._kaltura_api_call( video_id, actions, note='Downloading video info JSON') def _real_extract(self, url): video_id = self._match_id(url) mobj = re.match(self._VALID_URL, url) partner_id, entry_id = mobj.group('partner_id'), mobj.group('id') info, source_data = self._get_video_info(entry_id, partner_id) formats = [{ 'format_id': '%(fileExt)s-%(bitrate)s' % f, 'ext': f['fileExt'], 'tbr': f['bitrate'], 'fps': f.get('frameRate'), 'filesize_approx': int_or_none(f.get('size'), invscale=1024), 'container': f.get('containerFormat'), 'vcodec': f.get('videoCodecId'), 'height': f.get('height'), 'width': f.get('width'), 'url': '%s/flavorId/%s' % (info['dataUrl'], f['id']), } for f in source_data['flavorAssets']] self._sort_formats(formats) return { 'id': video_id, 'title': info['name'], 'formats': formats, 'description': info.get('description'), 'thumbnail': info.get('thumbnailUrl'), 'duration': info.get('duration'), 'timestamp': info.get('createdAt'), 'uploader_id': info.get('userId'), 'view_count': info.get('plays'), }
unlicense
koniiiik/django
tests/string_lookup/models.py
281
1533
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Foo(models.Model): name = models.CharField(max_length=50) friend = models.CharField(max_length=50, blank=True) def __str__(self): return "Foo %s" % self.name @python_2_unicode_compatible class Bar(models.Model): name = models.CharField(max_length=50) normal = models.ForeignKey(Foo, models.CASCADE, related_name='normal_foo') fwd = models.ForeignKey("Whiz", models.CASCADE) back = models.ForeignKey("Foo", models.CASCADE) def __str__(self): return "Bar %s" % self.place.name @python_2_unicode_compatible class Whiz(models.Model): name = models.CharField(max_length=50) def __str__(self): return "Whiz %s" % self.name @python_2_unicode_compatible class Child(models.Model): parent = models.OneToOneField('Base', models.CASCADE) name = models.CharField(max_length=50) def __str__(self): return "Child %s" % self.name @python_2_unicode_compatible class Base(models.Model): name = models.CharField(max_length=50) def __str__(self): return "Base %s" % self.name @python_2_unicode_compatible class Article(models.Model): name = models.CharField(max_length=50) text = models.TextField() submitted_from = models.GenericIPAddressField(blank=True, null=True) def __str__(self): return "Article %s" % self.name
bsd-3-clause
autosub-team/autosub
src/plugins/vels_ob/swagger_client/models/task.py
2
6371
# coding: utf-8 """ HDL Testing Platform REST API for HDL TP # noqa: E501 OpenAPI spec version: 1.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class Task(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'user_id': 'str', 'hdl_file': 'str', 'design': 'str', 'pblock': 'int', 'peripherals': 'list[str]', 'pins': 'list[Pin]' } attribute_map = { 'user_id': 'user_id', 'hdl_file': 'hdl_file', 'design': 'design', 'pblock': 'pblock', 'peripherals': 'peripherals', 'pins': 'pins' } def __init__(self, user_id=None, hdl_file=None, design=None, pblock=None, peripherals=None, pins=None): # noqa: E501 """Task - a model defined in Swagger""" # noqa: E501 self._user_id = None self._hdl_file = None self._design = None self._pblock = None self._peripherals = None self._pins = None self.discriminator = None self.user_id = user_id if hdl_file is not None: self.hdl_file = hdl_file if design is not None: self.design = design if pblock is not None: self.pblock = pblock if peripherals is not None: self.peripherals = peripherals if pins is not None: self.pins = pins @property def user_id(self): """Gets the user_id of this Task. # noqa: E501 user Identifier # noqa: E501 :return: The user_id of this Task. # noqa: E501 :rtype: str """ return self._user_id @user_id.setter def user_id(self, user_id): """Sets the user_id of this Task. user Identifier # noqa: E501 :param user_id: The user_id of this Task. # noqa: E501 :type: str """ if user_id is None: raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501 self._user_id = user_id @property def hdl_file(self): """Gets the hdl_file of this Task. # noqa: E501 uploaded hdl file name # noqa: E501 :return: The hdl_file of this Task. # noqa: E501 :rtype: str """ return self._hdl_file @hdl_file.setter def hdl_file(self, hdl_file): """Sets the hdl_file of this Task. uploaded hdl file name # noqa: E501 :param hdl_file: The hdl_file of this Task. # noqa: E501 :type: str """ self._hdl_file = hdl_file @property def design(self): """Gets the design of this Task. # noqa: E501 design # noqa: E501 :return: The design of this Task. # noqa: E501 :rtype: str """ return self._design @design.setter def design(self, design): """Sets the design of this Task. design # noqa: E501 :param design: The design of this Task. # noqa: E501 :type: str """ self._design = design @property def pblock(self): """Gets the pblock of this Task. # noqa: E501 pblock # noqa: E501 :return: The pblock of this Task. # noqa: E501 :rtype: int """ return self._pblock @pblock.setter def pblock(self, pblock): """Sets the pblock of this Task. pblock # noqa: E501 :param pblock: The pblock of this Task. # noqa: E501 :type: int """ self._pblock = pblock @property def peripherals(self): """Gets the peripherals of this Task. # noqa: E501 :return: The peripherals of this Task. # noqa: E501 :rtype: list[str] """ return self._peripherals @peripherals.setter def peripherals(self, peripherals): """Sets the peripherals of this Task. :param peripherals: The peripherals of this Task. # noqa: E501 :type: list[str] """ self._peripherals = peripherals @property def pins(self): """Gets the pins of this Task. # noqa: E501 :return: The pins of this Task. # noqa: E501 :rtype: list[Pin] """ return self._pins @pins.setter def pins(self, pins): """Sets the pins of this Task. :param pins: The pins of this Task. # noqa: E501 :type: list[Pin] """ self._pins = pins def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Task, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Task): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
gpl-2.0
ajstarna/RicochetRobots
Brobot/model.py
1
9336
import itertools import random # Directions NORTH = 'N' EAST = 'E' SOUTH = 'S' WEST = 'W' DIRECTIONS = [NORTH, EAST, SOUTH, WEST] REVERSE = { NORTH: SOUTH, EAST: WEST, SOUTH: NORTH, WEST: EAST, } OFFSET = { NORTH: -16, EAST: 1, SOUTH: 16, WEST: -1, } # Masks M_NORTH = 0x01 M_EAST = 0x02 M_SOUTH = 0x04 M_WEST = 0x08 M_ROBOT = 0x10 M_LOOKUP = { NORTH: M_NORTH, EAST: M_EAST, SOUTH: M_SOUTH, WEST: M_WEST, } # Colors RED = 'R' GREEN = 'G' BLUE = 'B' YELLOW = 'Y' COLORS = [RED, GREEN, BLUE, YELLOW] # Shapes CIRCLE = 'C' TRIANGLE = 'T' SQUARE = 'Q' HEXAGON = 'H' SHAPES = [CIRCLE, TRIANGLE, SQUARE, HEXAGON] # Tokens TOKENS = [''.join(token) for token in itertools.product(COLORS, SHAPES)] # Quadrants QUAD_1A = ( 'NW,N,N,N,NE,NW,N,N,' 'W,S,X,X,X,X,SEYH,W,' 'WE,NWGT,X,X,X,X,N,X,' 'W,X,X,X,X,X,X,X,' 'W,X,X,X,X,X,S,X,' 'SW,X,X,X,X,X,NEBQ,W,' 'NW,X,E,SWRC,X,X,X,S,' 'W,X,X,N,X,X,E,NW' ) QUAD_1B = ( 'NW,NE,NW,N,NS,N,N,N,' 'W,S,X,E,NWRC,X,X,X,' 'W,NEGT,W,X,X,X,X,X,' 'W,X,X,X,X,X,SEYH,W,' 'W,X,X,X,X,X,N,X,' 'SW,X,X,X,X,X,X,X,' 'NW,X,E,SWBQ,X,X,X,S,' 'W,X,X,N,X,X,E,NW' ) QUAD_2A = ( 'NW,N,N,NE,NW,N,N,N,' 'W,X,X,X,X,E,SWBC,X,' 'W,S,X,X,X,X,N,X,' 'W,NEYT,W,X,X,S,X,X,' 'W,X,X,X,E,NWGQ,X,X,' 'W,X,SERH,W,X,X,X,X,' 'SW,X,N,X,X,X,X,S,' 'NW,X,X,X,X,X,E,NW' ) QUAD_2B = ( 'NW,N,N,N,NE,NW,N,N,' 'W,X,SERH,W,X,X,X,X,' 'W,X,N,X,X,X,X,X,' 'WE,SWGQ,X,X,X,X,S,X,' 'SW,N,X,X,X,E,NWYT,X,' 'NW,X,X,X,X,S,X,X,' 'W,X,X,X,X,NEBC,W,S,' 'W,X,X,X,X,X,E,NW' ) QUAD_3A = ( 'NW,N,N,NE,NW,N,N,N,' 'W,X,X,X,X,SEGH,W,X,' 'WE,SWRQ,X,X,X,N,X,X,' 'SW,N,X,X,X,X,S,X,' 'NW,X,X,X,X,E,NWYC,X,' 'W,X,S,X,X,X,X,X,' 'W,X,NEBT,W,X,X,X,S,' 'W,X,X,X,X,X,E,NW' ) QUAD_3B = ( 'NW,N,NS,N,NE,NW,N,N,' 'W,E,NWYC,X,X,X,X,X,' 'W,X,X,X,X,X,X,X,' 'W,X,X,X,X,E,SWBT,X,' 'SW,X,X,X,S,X,N,X,' 'NW,X,X,X,NERQ,W,X,X,' 'W,SEGH,W,X,X,X,X,S,' 'W,N,X,X,X,X,E,NW' ) QUAD_4A = ( 'NW,N,N,NE,NW,N,N,N,' 'W,X,X,X,X,X,X,X,' 'W,X,X,X,X,SEBH,W,X,' 'W,X,S,X,X,N,X,X,' 'SW,X,NEGC,W,X,X,X,X,' 'NW,S,X,X,X,X,E,SWRT,' 'WE,NWYQ,X,X,X,X,X,NS,' 'W,X,X,X,X,X,E,NW' ) QUAD_4B = ( 'NW,N,N,NE,NW,N,N,N,' 'WE,SWRT,X,X,X,X,S,X,' 'W,N,X,X,X,X,NEGC,W,' 'W,X,X,X,X,X,X,X,' 'W,X,SEBH,W,X,X,X,S,' 'SW,X,N,X,X,X,E,NWYQ,' 'NW,X,X,X,X,X,X,S,' 'W,X,X,X,X,X,E,NW' ) QUADS = [ (QUAD_1A, QUAD_1B), (QUAD_2A, QUAD_2B), (QUAD_3A, QUAD_3B), (QUAD_4A, QUAD_4B), ] # Rotation ROTATE_QUAD = [ 56, 48, 40, 32, 24, 16, 8, 0, 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 28, 20, 12, 4, 61, 53, 45, 37, 29, 21, 13, 5, 62, 54, 46, 38, 30, 22, 14, 6, 63, 55, 47, 39, 31, 23, 15, 7, ] ROTATE_WALL = { NORTH: EAST, EAST: SOUTH, SOUTH: WEST, WEST: NORTH, } # Helper Functions def idx(x, y, size=16): return y * size + x def xy(index, size=16): x = index % size y = index / size return (x, y) def rotate_quad(data, times=1): for i in range(times): result = [data[index] for index in ROTATE_QUAD] result = [''.join(ROTATE_WALL.get(c, c) for c in x) for x in result] data = result return data def create_grid(quads=None): if quads is None: quads = [random.choice(pair) for pair in QUADS] random.shuffle(quads) quads = [quad.split(',') for quad in quads] quads = [rotate_quad(quads[i], i) for i in [0, 1, 3, 2]] result = [None for i in range(16 * 16)] for i, quad in enumerate(quads): dx, dy = xy(i, 2) for j, data in enumerate(quad): x, y = xy(j, 8) x += dx * 8 y += dy * 8 index = idx(x, y) result[index] = data return result def to_mask(cell): result = 0 for letter, mask in M_LOOKUP.items(): if letter in cell: result |= mask return result # Game class Game(object): @staticmethod def hardest(): quads = [QUAD_2B, QUAD_4B, QUAD_3B, QUAD_1B] robots = [226, 48, 43, 18] token = 'BT' return Game(quads=quads, robots=robots, token=token) def __init__(self, seed=None, quads=None, robots=None, token=None): if seed: random.seed(seed) self.grid = create_grid(quads) if robots is None: self.robots = self.place_robots() else: self.robots = dict(zip(COLORS, robots)) self.token = token or random.choice(TOKENS) self.moves = 0 self.last = None def place_robots(self): result = {} used = set() for color in COLORS: while True: index = random.randint(0, 255) if index in (119, 120, 135, 136): continue if self.grid[index][-2:] in TOKENS: continue if index in used: continue result[color] = index used.add(index) break return result def get_robot(self, index): for color, position in self.robots.iteritems(): if position == index: return color return None def can_move(self, color, direction): if self.last == (color, REVERSE[direction]): return False index = self.robots[color] if direction in self.grid[index]: return False new_index = index + OFFSET[direction] if new_index in self.robots.itervalues(): return False return True def compute_move(self, color, direction): index = self.robots[color] robots = self.robots.values() while True: if direction in self.grid[index]: break new_index = index + OFFSET[direction] if new_index in robots: break index = new_index return index def do_move(self, color, direction): start = self.robots[color] last = self.last if last == (color, REVERSE[direction]): print 'reverse' #raise Exception end = self.compute_move(color, direction) if start == end: print 'wall move' #raise Exception self.moves += 1 self.robots[color] = end self.last = (color, direction) return (color, start, last) def undo_move(self, data): color, start, last = data self.moves -= 1 self.robots[color] = start self.last = last def get_moves(self, colors=None): result = [] colors = colors or COLORS for color in colors: for direction in DIRECTIONS: if self.can_move(color, direction): result.append((color, direction)) return result def over(self): color = self.token[0] return self.token in self.grid[self.robots[color]] def key(self): return tuple(self.robots.itervalues()) def search(self): max_depth = 1 while True: #print 'Searching to depth:', max_depth result = self._search([], set(), 0, max_depth) if result is not None: return result max_depth += 1 def _search(self, path, memo, depth, max_depth): if self.over(): return list(path) if depth == max_depth: return None key = (depth, self.key()) if key in memo: return None memo.add(key) if depth == max_depth - 1: colors = [self.token[0]] else: colors = None moves = self.get_moves(colors) for move in moves: data = self.do_move(*move) path.append(move) result = self._search(path, memo, depth + 1, max_depth) path.pop(-1) self.undo_move(data) if result: return result return None def export(self): grid = [] token = None robots = [self.robots[color] for color in COLORS] for index, cell in enumerate(self.grid): mask = to_mask(cell) if index in robots: mask |= M_ROBOT grid.append(mask) if self.token in cell: token = index robot = COLORS.index(self.token[0]) return { 'grid': grid, 'robot': robot, 'token': token, 'robots': robots, } def export2(self): grid = [] token = None robots = [self.robots[color] for color in COLORS] for index, cell in enumerate(self.grid): mask = to_mask(cell) grid.append(mask) if self.token in cell: token = index robot = COLORS.index(self.token[0]) return { 'grid': grid, 'robot': robot, 'token': token, 'robots': robots, }
bsd-2-clause
tongxindao/Flask-micblog
PyFlk-Framework/pyflk/session/__init__.py
2
5139
import os import json import time import base64 def create_session_id(): ''' create Session ID ''' # first of all, get current time stamp and convert byte stream, in Base64 code, decode to string and put off Base64 code "=" symbol, get second to last bit, final reverse order array return base64.encodebytes(str(time.time()).encode()).decode().replace("=", "")[:-2][::-1] def get_session_id(request): ''' from request gets Session ID ''' return request.cookies.get('session_id', '') class Session: ''' Session ''' # instance object __instance = None # init method def __init__(self): # session mapping table self.__session_map__ = {} # session local store folder self.__storage_path__ = None def set_storage_path(self, path): ''' set session storage path ''' self.__storage_path__ = path def storage(self, session_id): ''' save session record to local ''' # constructor Session local file path, file name is Session ID session_path = os.path.join(self.__storage_path__, session_id) # if already set Session storage path, then start cache to local if self.__storage_path__ is not None: with open(session_path, 'wb') as f: # convert session record to string content = json.dumps(self.__session_map__[session_id]) # base64 code and write to file, prevent sone specified binary data cannot right write in f.write(base64.encodebytes(content.encode())) def __new__(cls, *args, **kwargs): ''' singleton, realization global public a Session instance object ''' if cls.__instance is None: cls.__instance = super(Session, cls).__new__(cls, *args, **kwargs) return cls.__instance def push(self, request, item, value): ''' update or add record ''' # from request gets client's Session ID session_id = get_session_id(request) # if this Session ID exist in mapping table, then add new data key: value, if not, then first init a null dict, and add data key: value if session in self.__session_map__: # from current session add data self.__session_map__[get_session_id(request)][item] = value else: # init current session self.__session_map__[session_id] = {} # from current session add data self.__session_map__[session_id][item] = value # Session happen change, update cache to local self.storage(session_id) def pop(self, request, item, value=True): ''' delete current session's some item ''' # gets current session session_id = get_session_id(request) current_session = self.__session_map__.get(session_id, {}) # decide data item's key whether or not in current session, if yes then delete it if item in current_session: current_session.pop(item, value) # Session happen change, update cache to local self.storage(session_id) def load_local_session(self): ''' load local session ''' # if already set Session storage path, then start load cache from local if self.__storage_path__ is not None: # from local storage folder get all of the Session record file list, file name is Session ID session_path_list = os.listdir(self.__storage_path__) # ergodic Session record file list for session_id in session_path_list: # constructor Session record file folder path = os.path.join(self.__storage_path__, session_id) # read file content with open(path, 'rb') as f: content = f.read() # file content decode to base64 content = base64.decodebytes(content) # Session ID content bind and add to Session mapping table self.__session_map__[session_id] = json.loads(content.decode()) def map(self, request): ''' gets current Session record ''' return self.__session_map__.get(get_session_id(request), {}) def get(self, request, item): ''' gets current Session some item ''' return self.__session_map__.get(get_session_id(request), {}).get(item, None) class AuthSession: ''' Session verification decorator ''' @classmethod def auth_session(cls, f, *args, **options): def decorator(obj, request): return f(obj, request) if cls.auth_logic(request, *args, **options) else cls.auth_fail_callback(request, *args, **options) return decorator @staticmethod def auth_logic(request, *args, **options): ''' verification logic interface, return a boolean ''' raise NotImplementedError @staticmethod def auth_fail_callback(request, *args, **options): ''' verification fail callback interface ''' raise NotImplementedError # singleton global object session = Session()
apache-2.0
dhanzhang/shadowsocks-1
shadowsocks/crypto/util.py
1032
4287
#!/usr/bin/env python # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, \ with_statement import os import logging def find_library_nt(name): # modified from ctypes.util # ctypes.util.find_library just returns first result he found # but we want to try them all # because on Windows, users may have both 32bit and 64bit version installed results = [] for directory in os.environ['PATH'].split(os.pathsep): fname = os.path.join(directory, name) if os.path.isfile(fname): results.append(fname) if fname.lower().endswith(".dll"): continue fname = fname + ".dll" if os.path.isfile(fname): results.append(fname) return results def find_library(possible_lib_names, search_symbol, library_name): import ctypes.util from ctypes import CDLL paths = [] if type(possible_lib_names) not in (list, tuple): possible_lib_names = [possible_lib_names] lib_names = [] for lib_name in possible_lib_names: lib_names.append(lib_name) lib_names.append('lib' + lib_name) for name in lib_names: if os.name == "nt": paths.extend(find_library_nt(name)) else: path = ctypes.util.find_library(name) if path: paths.append(path) if not paths: # We may get here when find_library fails because, for example, # the user does not have sufficient privileges to access those # tools underlying find_library on linux. import glob for name in lib_names: patterns = [ '/usr/local/lib*/lib%s.*' % name, '/usr/lib*/lib%s.*' % name, 'lib%s.*' % name, '%s.dll' % name] for pat in patterns: files = glob.glob(pat) if files: paths.extend(files) for path in paths: try: lib = CDLL(path) if hasattr(lib, search_symbol): logging.info('loading %s from %s', library_name, path) return lib else: logging.warn('can\'t find symbol %s in %s', search_symbol, path) except Exception: pass return None def run_cipher(cipher, decipher): from os import urandom import random import time BLOCK_SIZE = 16384 rounds = 1 * 1024 plain = urandom(BLOCK_SIZE * rounds) results = [] pos = 0 print('test start') start = time.time() while pos < len(plain): l = random.randint(100, 32768) c = cipher.update(plain[pos:pos + l]) results.append(c) pos += l pos = 0 c = b''.join(results) results = [] while pos < len(plain): l = random.randint(100, 32768) results.append(decipher.update(c[pos:pos + l])) pos += l end = time.time() print('speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start))) assert b''.join(results) == plain def test_find_library(): assert find_library('c', 'strcpy', 'libc') is not None assert find_library(['c'], 'strcpy', 'libc') is not None assert find_library(('c',), 'strcpy', 'libc') is not None assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate', 'libcrypto') is not None assert find_library('notexist', 'strcpy', 'libnotexist') is None assert find_library('c', 'symbol_not_exist', 'c') is None assert find_library(('notexist', 'c', 'crypto', 'eay32'), 'EVP_CipherUpdate', 'libc') is not None if __name__ == '__main__': test_find_library()
apache-2.0
hkchenhongyi/django
tests/generic_views/test_detail.py
284
8387
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist from django.test import TestCase, override_settings from django.test.client import RequestFactory from django.views.generic.base import View from django.views.generic.detail import SingleObjectTemplateResponseMixin from django.views.generic.edit import ModelFormMixin from .models import Artist, Author, Book, Page @override_settings(ROOT_URLCONF='generic_views.urls') class DetailViewTest(TestCase): @classmethod def setUpTestData(cls): cls.artist1 = Artist.objects.create(name='Rene Magritte') cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano') cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg') cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1)) cls.book1.authors.add(cls.author1) cls.book2 = Book.objects.create( name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1) ) cls.page1 = Page.objects.create( content='I was once bitten by a moose.', template='generic_views/page_template.html' ) def test_simple_object(self): res = self.client.get('/detail/obj/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], {'foo': 'bar'}) self.assertIsInstance(res.context['view'], View) self.assertTemplateUsed(res, 'generic_views/detail.html') def test_detail_by_pk(self): res = self.client.get('/detail/author/%s/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_missing_object(self): res = self.client.get('/detail/author/500/') self.assertEqual(res.status_code, 404) def test_detail_object_does_not_exist(self): self.assertRaises(ObjectDoesNotExist, self.client.get, '/detail/doesnotexist/1/') def test_detail_by_custom_pk(self): res = self.client.get('/detail/author/bycustompk/%s/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_slug(self): res = self.client.get('/detail/author/byslug/scott-rosenberg/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg')) self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg')) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_custom_slug(self): res = self.client.get('/detail/author/bycustomslug/scott-rosenberg/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg')) self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg')) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_pk_ignore_slug(self): res = self.client.get('/detail/author/bypkignoreslug/%s-roberto-bolano/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_pk_ignore_slug_mismatch(self): res = self.client.get('/detail/author/bypkignoreslug/%s-scott-rosenberg/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_pk_and_slug(self): res = self.client.get('/detail/author/bypkandslug/%s-roberto-bolano/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_pk_and_slug_mismatch_404(self): res = self.client.get('/detail/author/bypkandslug/%s-scott-rosenberg/' % self.author1.pk) self.assertEqual(res.status_code, 404) def test_verbose_name(self): res = self.client.get('/detail/artist/%s/' % self.artist1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.artist1) self.assertEqual(res.context['artist'], self.artist1) self.assertTemplateUsed(res, 'generic_views/artist_detail.html') def test_template_name(self): res = self.client.get('/detail/author/%s/template_name/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/about.html') def test_template_name_suffix(self): res = self.client.get('/detail/author/%s/template_name_suffix/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_view.html') def test_template_name_field(self): res = self.client.get('/detail/page/%s/field/' % self.page1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.page1) self.assertEqual(res.context['page'], self.page1) self.assertTemplateUsed(res, 'generic_views/page_template.html') def test_context_object_name(self): res = self.client.get('/detail/author/%s/context_object_name/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['thingy'], self.author1) self.assertNotIn('author', res.context) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_duplicated_context_object_name(self): res = self.client.get('/detail/author/%s/dupe_context_object_name/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertNotIn('author', res.context) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_deferred_queryset_template_name(self): class FormContext(SingleObjectTemplateResponseMixin): request = RequestFactory().get('/') model = Author object = Author.objects.defer('name').get(pk=self.author1.pk) self.assertEqual(FormContext().get_template_names()[0], 'generic_views/author_detail.html') def test_deferred_queryset_context_object_name(self): class FormContext(ModelFormMixin): request = RequestFactory().get('/') model = Author object = Author.objects.defer('name').get(pk=self.author1.pk) fields = ('name',) form_context_data = FormContext().get_context_data() self.assertEqual(form_context_data['object'], self.author1) self.assertEqual(form_context_data['author'], self.author1) def test_invalid_url(self): self.assertRaises(AttributeError, self.client.get, '/detail/author/invalid/url/') def test_invalid_queryset(self): self.assertRaises(ImproperlyConfigured, self.client.get, '/detail/author/invalid/qs/') def test_non_model_object_with_meta(self): res = self.client.get('/detail/nonmodel/1/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'].id, "non_model_1")
bsd-3-clause
jonashagstedt/django-jsx
demo/demo/settings.py
2
3162
""" Django settings for demo project. Generated by 'django-admin startproject' using Django 1.8.4. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '86mvm#x9^9dakhk^cu#laf$-_cr-9k$cv3@&mmqbfzf#=0($rn' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_jsx', 'demo', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'demo.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, { 'BACKEND': 'django_jsx.template.backend.JsTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'static/js') ] } ] # DJANGO_ISOMORPHIC_RENDERER = os.path.join(BASE_DIR, 'custom_renderer/custom-transpiled.js') WSGI_APPLICATION = 'demo.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static_root') MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # Additional locations of static files STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), ]
bsd-3-clause
zzzeek/sqlalchemy
lib/sqlalchemy/event/api.py
3
6794
# event/api.py # Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php """Public API functions for the event system. """ from __future__ import absolute_import from .base import _registrars from .registry import _EventKey from .. import exc from .. import util CANCEL = util.symbol("CANCEL") NO_RETVAL = util.symbol("NO_RETVAL") def _event_key(target, identifier, fn): for evt_cls in _registrars[identifier]: tgt = evt_cls._accept_with(target) if tgt is not None: return _EventKey(target, identifier, fn, tgt) else: raise exc.InvalidRequestError( "No such event '%s' for target '%s'" % (identifier, target) ) def listen(target, identifier, fn, *args, **kw): """Register a listener function for the given target. The :func:`.listen` function is part of the primary interface for the SQLAlchemy event system, documented at :ref:`event_toplevel`. e.g.:: from sqlalchemy import event from sqlalchemy.schema import UniqueConstraint def unique_constraint_name(const, table): const.name = "uq_%s_%s" % ( table.name, list(const.columns)[0].name ) event.listen( UniqueConstraint, "after_parent_attach", unique_constraint_name) A given function can also be invoked for only the first invocation of the event using the ``once`` argument:: def on_config(): do_config() event.listen(Mapper, "before_configure", on_config, once=True) .. warning:: The ``once`` argument does not imply automatic de-registration of the listener function after it has been invoked a first time; a listener entry will remain associated with the target object. Associating an arbitrarily high number of listeners without explicitly removing them will cause memory to grow unbounded even if ``once=True`` is specified. .. note:: The :func:`.listen` function cannot be called at the same time that the target event is being run. This has implications for thread safety, and also means an event cannot be added from inside the listener function for itself. The list of events to be run are present inside of a mutable collection that can't be changed during iteration. Event registration and removal is not intended to be a "high velocity" operation; it is a configurational operation. For systems that need to quickly associate and deassociate with events at high scale, use a mutable structure that is handled from inside of a single listener. .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now used as the container for the list of events, which explicitly disallows collection mutation while the collection is being iterated. .. seealso:: :func:`.listens_for` :func:`.remove` """ _event_key(target, identifier, fn).listen(*args, **kw) def listens_for(target, identifier, *args, **kw): """Decorate a function as a listener for the given target + identifier. The :func:`.listens_for` decorator is part of the primary interface for the SQLAlchemy event system, documented at :ref:`event_toplevel`. e.g.:: from sqlalchemy import event from sqlalchemy.schema import UniqueConstraint @event.listens_for(UniqueConstraint, "after_parent_attach") def unique_constraint_name(const, table): const.name = "uq_%s_%s" % ( table.name, list(const.columns)[0].name ) A given function can also be invoked for only the first invocation of the event using the ``once`` argument:: @event.listens_for(Mapper, "before_configure", once=True) def on_config(): do_config() .. warning:: The ``once`` argument does not imply automatic de-registration of the listener function after it has been invoked a first time; a listener entry will remain associated with the target object. Associating an arbitrarily high number of listeners without explicitly removing them will cause memory to grow unbounded even if ``once=True`` is specified. .. seealso:: :func:`.listen` - general description of event listening """ def decorate(fn): listen(target, identifier, fn, *args, **kw) return fn return decorate def remove(target, identifier, fn): """Remove an event listener. The arguments here should match exactly those which were sent to :func:`.listen`; all the event registration which proceeded as a result of this call will be reverted by calling :func:`.remove` with the same arguments. e.g.:: # if a function was registered like this... @event.listens_for(SomeMappedClass, "before_insert", propagate=True) def my_listener_function(*arg): pass # ... it's removed like this event.remove(SomeMappedClass, "before_insert", my_listener_function) Above, the listener function associated with ``SomeMappedClass`` was also propagated to subclasses of ``SomeMappedClass``; the :func:`.remove` function will revert all of these operations. .. note:: The :func:`.remove` function cannot be called at the same time that the target event is being run. This has implications for thread safety, and also means an event cannot be removed from inside the listener function for itself. The list of events to be run are present inside of a mutable collection that can't be changed during iteration. Event registration and removal is not intended to be a "high velocity" operation; it is a configurational operation. For systems that need to quickly associate and deassociate with events at high scale, use a mutable structure that is handled from inside of a single listener. .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now used as the container for the list of events, which explicitly disallows collection mutation while the collection is being iterated. .. seealso:: :func:`.listen` """ _event_key(target, identifier, fn).remove() def contains(target, identifier, fn): """Return True if the given target/ident/fn is set up to listen.""" return _event_key(target, identifier, fn).contains()
mit
wolfgangmauerer/prosoda
prosoda/interactive.py
1
1232
# Commands that are useful after adist.yp has been # run in ipython # This file is part of prosoda. prosoda is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Copyright 2010, 2011, 2012 by Wolfgang Mauerer <wm@linux-kernel.net> # All Rights Reserved. initialiseR() git = shelve.open("/home/wolfgang/linux-14-33")["git"] res = createSeries(git, "__main__", ["v2.6.24", "v2.6.25"]) writeToFile(res, "/home/wolfgang/raw.dat") runR('raw = as.xts(read.zoo(file="/home/wolfgang/raw.dat", FUN=tstamp_to_date))') runR('reg = to.regts(raw[,1], 250)') reg = RtoPython(runR('reg')) raw = RtoPython(runR('raw')) # ... and then commence with the analysis as desired
gpl-2.0
c-o-m-m-a-n-d-e-r/CouchPotatoServer
libs/pyasn1/type/constraint.py
382
7279
# # ASN.1 subtype constraints classes. # # Constraints are relatively rare, but every ASN1 object # is doing checks all the time for whether they have any # constraints and whether they are applicable to the object. # # What we're going to do is define objects/functions that # can be called unconditionally if they are present, and that # are simply not present if there are no constraints. # # Original concept and code by Mike C. Fletcher. # import sys from pyasn1.type import error class AbstractConstraint: """Abstract base-class for constraint objects Constraints should be stored in a simple sequence in the namespace of their client Asn1Item sub-classes. """ def __init__(self, *values): self._valueMap = {} self._setValues(values) self.__hashedValues = None def __call__(self, value, idx=None): try: self._testValue(value, idx) except error.ValueConstraintError: raise error.ValueConstraintError( '%s failed at: \"%s\"' % (self, sys.exc_info()[1]) ) def __repr__(self): return '%s(%s)' % ( self.__class__.__name__, ', '.join([repr(x) for x in self._values]) ) def __eq__(self, other): return self is other and True or self._values == other def __ne__(self, other): return self._values != other def __lt__(self, other): return self._values < other def __le__(self, other): return self._values <= other def __gt__(self, other): return self._values > other def __ge__(self, other): return self._values >= other if sys.version_info[0] <= 2: def __nonzero__(self): return bool(self._values) else: def __bool__(self): return bool(self._values) def __hash__(self): if self.__hashedValues is None: self.__hashedValues = hash((self.__class__.__name__, self._values)) return self.__hashedValues def _setValues(self, values): self._values = values def _testValue(self, value, idx): raise error.ValueConstraintError(value) # Constraints derivation logic def getValueMap(self): return self._valueMap def isSuperTypeOf(self, otherConstraint): return self in otherConstraint.getValueMap() or \ otherConstraint is self or otherConstraint == self def isSubTypeOf(self, otherConstraint): return otherConstraint in self._valueMap or \ otherConstraint is self or otherConstraint == self class SingleValueConstraint(AbstractConstraint): """Value must be part of defined values constraint""" def _testValue(self, value, idx): # XXX index vals for performance? if value not in self._values: raise error.ValueConstraintError(value) class ContainedSubtypeConstraint(AbstractConstraint): """Value must satisfy all of defined set of constraints""" def _testValue(self, value, idx): for c in self._values: c(value, idx) class ValueRangeConstraint(AbstractConstraint): """Value must be within start and stop values (inclusive)""" def _testValue(self, value, idx): if value < self.start or value > self.stop: raise error.ValueConstraintError(value) def _setValues(self, values): if len(values) != 2: raise error.PyAsn1Error( '%s: bad constraint values' % (self.__class__.__name__,) ) self.start, self.stop = values if self.start > self.stop: raise error.PyAsn1Error( '%s: screwed constraint values (start > stop): %s > %s' % ( self.__class__.__name__, self.start, self.stop ) ) AbstractConstraint._setValues(self, values) class ValueSizeConstraint(ValueRangeConstraint): """len(value) must be within start and stop values (inclusive)""" def _testValue(self, value, idx): l = len(value) if l < self.start or l > self.stop: raise error.ValueConstraintError(value) class PermittedAlphabetConstraint(SingleValueConstraint): def _setValues(self, values): self._values = () for v in values: self._values = self._values + tuple(v) def _testValue(self, value, idx): for v in value: if v not in self._values: raise error.ValueConstraintError(value) # This is a bit kludgy, meaning two op modes within a single constraing class InnerTypeConstraint(AbstractConstraint): """Value must satisfy type and presense constraints""" def _testValue(self, value, idx): if self.__singleTypeConstraint: self.__singleTypeConstraint(value) elif self.__multipleTypeConstraint: if idx not in self.__multipleTypeConstraint: raise error.ValueConstraintError(value) constraint, status = self.__multipleTypeConstraint[idx] if status == 'ABSENT': # XXX presense is not checked! raise error.ValueConstraintError(value) constraint(value) def _setValues(self, values): self.__multipleTypeConstraint = {} self.__singleTypeConstraint = None for v in values: if isinstance(v, tuple): self.__multipleTypeConstraint[v[0]] = v[1], v[2] else: self.__singleTypeConstraint = v AbstractConstraint._setValues(self, values) # Boolean ops on constraints class ConstraintsExclusion(AbstractConstraint): """Value must not fit the single constraint""" def _testValue(self, value, idx): try: self._values[0](value, idx) except error.ValueConstraintError: return else: raise error.ValueConstraintError(value) def _setValues(self, values): if len(values) != 1: raise error.PyAsn1Error('Single constraint expected') AbstractConstraint._setValues(self, values) class AbstractConstraintSet(AbstractConstraint): """Value must not satisfy the single constraint""" def __getitem__(self, idx): return self._values[idx] def __add__(self, value): return self.__class__(self, value) def __radd__(self, value): return self.__class__(self, value) def __len__(self): return len(self._values) # Constraints inclusion in sets def _setValues(self, values): self._values = values for v in values: self._valueMap[v] = 1 self._valueMap.update(v.getValueMap()) class ConstraintsIntersection(AbstractConstraintSet): """Value must satisfy all constraints""" def _testValue(self, value, idx): for v in self._values: v(value, idx) class ConstraintsUnion(AbstractConstraintSet): """Value must satisfy at least one constraint""" def _testValue(self, value, idx): for v in self._values: try: v(value, idx) except error.ValueConstraintError: pass else: return raise error.ValueConstraintError( 'all of %s failed for \"%s\"' % (self._values, value) ) # XXX # add tests for type check
gpl-3.0
kdebrab/pandas
pandas/core/indexes/category.py
1
30548
import operator import numpy as np from pandas._libs import index as libindex from pandas import compat from pandas.compat.numpy import function as nv from pandas.core.dtypes.generic import ABCCategorical, ABCSeries from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.common import ( is_categorical_dtype, ensure_platform_int, is_list_like, is_interval_dtype, is_scalar) from pandas.core.dtypes.missing import array_equivalent, isna from pandas.core.algorithms import take_1d from pandas.util._decorators import Appender, cache_readonly from pandas.core.config import get_option from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core import accessor import pandas.core.common as com import pandas.core.missing as missing import pandas.core.indexes.base as ibase from pandas.core.arrays.categorical import Categorical, contains _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update(dict(target_klass='CategoricalIndex')) class CategoricalIndex(Index, accessor.PandasDelegate): """ Immutable Index implementing an ordered, sliceable set. CategoricalIndex represents a sparsely populated Index with an underlying Categorical. Parameters ---------- data : array-like or Categorical, (1-dimensional) categories : optional, array-like categories for the CategoricalIndex ordered : boolean, designating if the categories are ordered copy : bool Make a copy of input ndarray name : object Name to be stored in the index Attributes ---------- codes categories ordered Methods ------- rename_categories reorder_categories add_categories remove_categories remove_unused_categories set_categories as_ordered as_unordered map See Also -------- Categorical, Index """ _typ = 'categoricalindex' _engine_type = libindex.Int64Engine _attributes = ['name'] def __new__(cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None, fastpath=False): if fastpath: return cls._simple_new(data, name=name, dtype=dtype) if name is None and hasattr(data, 'name'): name = data.name if isinstance(data, ABCCategorical): data = cls._create_categorical(data, categories, ordered, dtype) elif isinstance(data, CategoricalIndex): data = data._data data = cls._create_categorical(data, categories, ordered, dtype) else: # don't allow scalars # if data is None, then categories must be provided if is_scalar(data): if data is not None or categories is None: cls._scalar_data_error(data) data = [] data = cls._create_categorical(data, categories, ordered, dtype) if copy: data = data.copy() return cls._simple_new(data, name=name) def _create_from_codes(self, codes, categories=None, ordered=None, name=None): """ *this is an internal non-public method* create the correct categorical from codes Parameters ---------- codes : new codes categories : optional categories, defaults to existing ordered : optional ordered attribute, defaults to existing name : optional name attribute, defaults to existing Returns ------- CategoricalIndex """ if categories is None: categories = self.categories if ordered is None: ordered = self.ordered if name is None: name = self.name cat = Categorical.from_codes(codes, categories=categories, ordered=self.ordered) return CategoricalIndex(cat, name=name) @classmethod def _create_categorical(cls, data, categories=None, ordered=None, dtype=None): """ *this is an internal non-public method* create the correct categorical from data and the properties Parameters ---------- data : data for new Categorical categories : optional categories, defaults to existing ordered : optional ordered attribute, defaults to existing dtype : CategoricalDtype, defaults to existing Returns ------- Categorical """ if (isinstance(data, (cls, ABCSeries)) and is_categorical_dtype(data)): data = data.values if not isinstance(data, ABCCategorical): if ordered is None and dtype is None: ordered = False data = Categorical(data, categories=categories, ordered=ordered, dtype=dtype) else: if categories is not None: data = data.set_categories(categories, ordered=ordered) elif ordered is not None and ordered != data.ordered: data = data.set_ordered(ordered) if isinstance(dtype, CategoricalDtype) and dtype != data.dtype: # we want to silently ignore dtype='category' data = data._set_dtype(dtype) return data @classmethod def _simple_new(cls, values, name=None, categories=None, ordered=None, dtype=None, **kwargs): result = object.__new__(cls) values = cls._create_categorical(values, categories, ordered, dtype=dtype) result._data = values result.name = name for k, v in compat.iteritems(kwargs): setattr(result, k, v) result._reset_identity() return result @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, categories=None, ordered=None, dtype=None, **kwargs): # categories and ordered can't be part of attributes, # as these are properties # we want to reuse self.dtype if possible, i.e. neither are # overridden. if dtype is not None and (categories is not None or ordered is not None): raise TypeError("Cannot specify both `dtype` and `categories` " "or `ordered`") if categories is None and ordered is None: dtype = self.dtype if dtype is None else dtype return super(CategoricalIndex, self)._shallow_copy( values=values, dtype=dtype, **kwargs) if categories is None: categories = self.categories if ordered is None: ordered = self.ordered return super(CategoricalIndex, self)._shallow_copy( values=values, categories=categories, ordered=ordered, **kwargs) def _is_dtype_compat(self, other): """ *this is an internal non-public method* provide a comparison between the dtype of self and other (coercing if needed) Raises ------ TypeError if the dtypes are not compatible """ if is_categorical_dtype(other): if isinstance(other, CategoricalIndex): other = other._values if not other.is_dtype_equal(self): raise TypeError("categories must match existing categories " "when appending") else: values = other if not is_list_like(values): values = [values] other = CategoricalIndex(self._create_categorical( other, dtype=self.dtype)) if not other.isin(values).all(): raise TypeError("cannot append a non-category item to a " "CategoricalIndex") return other def equals(self, other): """ Determines if two CategorialIndex objects contain the same elements. """ if self.is_(other): return True if not isinstance(other, Index): return False try: other = self._is_dtype_compat(other) return array_equivalent(self._data, other) except (TypeError, ValueError): pass return False @property def _formatter_func(self): return self.categories._formatter_func def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value) """ max_categories = (10 if get_option("display.max_categories") == 0 else get_option("display.max_categories")) attrs = [ ('categories', ibase.default_pprint(self.categories, max_seq_items=max_categories)), ('ordered', self.ordered)] if self.name is not None: attrs.append(('name', ibase.default_pprint(self.name))) attrs.append(('dtype', "'%s'" % self.dtype.name)) max_seq_items = get_option('display.max_seq_items') or len(self) if len(self) > max_seq_items: attrs.append(('length', len(self))) return attrs @property def inferred_type(self): return 'categorical' @property def values(self): """ return the underlying data, which is a Categorical """ return self._data @property def itemsize(self): # Size of the items in categories, not codes. return self.values.itemsize def get_values(self): """ return the underlying data as an ndarray """ return self._data.get_values() def tolist(self): return self._data.tolist() @property def codes(self): return self._data.codes @property def categories(self): return self._data.categories @property def ordered(self): return self._data.ordered def _reverse_indexer(self): return self._data._reverse_indexer() @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) def __contains__(self, key): # if key is a NaN, check if any NaN is in self. if isna(key): return self.hasnans return contains(self, key, container=self._engine) @Appender(_index_shared_docs['contains'] % _index_doc_kwargs) def contains(self, key): return key in self def __array__(self, dtype=None): """ the array interface, return my values """ return np.array(self._data, dtype=dtype) @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): if is_interval_dtype(dtype): from pandas import IntervalIndex return IntervalIndex(np.array(self)) elif is_categorical_dtype(dtype): # GH 18630 dtype = self.dtype.update_dtype(dtype) if dtype == self.dtype: return self.copy() if copy else self return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy) @cache_readonly def _isnan(self): """ return if each value is nan""" return self._data.codes == -1 @Appender(ibase._index_shared_docs['fillna']) def fillna(self, value, downcast=None): self._assert_can_do_op(value) return CategoricalIndex(self._data.fillna(value), name=self.name) def argsort(self, *args, **kwargs): return self.values.argsort(*args, **kwargs) @cache_readonly def _engine(self): # we are going to look things up with the codes themselves return self._engine_type(lambda: self.codes.astype('i8'), len(self)) # introspection @cache_readonly def is_unique(self): return self._engine.is_unique @property def is_monotonic_increasing(self): return self._engine.is_monotonic_increasing @property def is_monotonic_decreasing(self): return self._engine.is_monotonic_decreasing @Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs) def unique(self, level=None): if level is not None: self._validate_index_level(level) result = self.values.unique() # CategoricalIndex._shallow_copy keeps original categories # and ordered if not otherwise specified return self._shallow_copy(result, categories=result.categories, ordered=result.ordered) @Appender(Index.duplicated.__doc__) def duplicated(self, keep='first'): from pandas._libs.hashtable import duplicated_int64 codes = self.codes.astype('i8') return duplicated_int64(codes, keep) def _to_safe_for_reshape(self): """ convert to object if we are a categorical """ return self.astype('object') def get_loc(self, key, method=None): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label method : {None} * default: exact matches only. Returns ------- loc : int if unique index, slice if monotonic index, else mask Examples --------- >>> unique_index = pd.CategoricalIndex(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.CategoricalIndex(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.CategoricalIndex(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True], dtype=bool) """ codes = self.categories.get_loc(key) if (codes == -1): raise KeyError(key) return self._engine.get_loc(codes) def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ try: k = com._values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') indexer = self.get_loc(k) return series.iloc[indexer] except (KeyError, TypeError): pass # we might be a positional inexer return super(CategoricalIndex, self).get_value(series, key) def _can_reindex(self, indexer): """ always allow reindexing """ pass @Appender(_index_shared_docs['where']) def where(self, cond, other=None): if other is None: other = self._na_value values = np.where(cond, self.values, other) cat = Categorical(values, categories=self.categories, ordered=self.ordered) return self._shallow_copy(cat, **self._get_attributes_dict()) def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.Index Resulting index indexer : np.ndarray or None Indices of output values in original index """ if method is not None: raise NotImplementedError("argument method is not implemented for " "CategoricalIndex.reindex") if level is not None: raise NotImplementedError("argument level is not implemented for " "CategoricalIndex.reindex") if limit is not None: raise NotImplementedError("argument limit is not implemented for " "CategoricalIndex.reindex") target = ibase.ensure_index(target) if not is_categorical_dtype(target) and not target.is_unique: raise ValueError("cannot reindex with a non-unique indexer") indexer, missing = self.get_indexer_non_unique(np.array(target)) if len(self.codes): new_target = self.take(indexer) else: new_target = target # filling in missing if needed if len(missing): cats = self.categories.get_indexer(target) if (cats == -1).any(): # coerce to a regular index here! result = Index(np.array(self), name=self.name) new_target, indexer, _ = result._reindex_non_unique( np.array(target)) else: codes = new_target.codes.copy() codes[indexer == -1] = cats[missing] new_target = self._create_from_codes(codes) # we always want to return an Index type here # to be consistent with .reindex for other index types (e.g. they don't # coerce based on the actual values, only on the dtype) # unless we had an initial Categorical to begin with # in which case we are going to conform to the passed Categorical new_target = np.asarray(new_target) if is_categorical_dtype(target): new_target = target._shallow_copy(new_target, name=self.name) else: new_target = Index(new_target, name=self.name) return new_target, indexer def _reindex_non_unique(self, target): """ reindex from a non-unique; which CategoricalIndex's are almost always """ new_target, indexer = self.reindex(target) new_indexer = None check = indexer == -1 if check.any(): new_indexer = np.arange(len(self.take(indexer))) new_indexer[check] = -1 cats = self.categories.get_indexer(target) if not (cats == -1).any(): # .reindex returns normal Index. Revert to CategoricalIndex if # all targets are included in my categories new_target = self._shallow_copy(new_target) return new_target, indexer, new_indexer @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): from pandas.core.arrays.categorical import _recode_for_categories method = missing.clean_reindex_fill_method(method) target = ibase.ensure_index(target) if self.is_unique and self.equals(target): return np.arange(len(self), dtype='intp') if method == 'pad' or method == 'backfill': raise NotImplementedError("method='pad' and method='backfill' not " "implemented yet for CategoricalIndex") elif method == 'nearest': raise NotImplementedError("method='nearest' not implemented yet " 'for CategoricalIndex') if (isinstance(target, CategoricalIndex) and self.values.is_dtype_equal(target)): if self.values.equals(target.values): # we have the same codes codes = target.codes else: codes = _recode_for_categories(target.codes, target.categories, self.values.categories) else: if isinstance(target, CategoricalIndex): code_indexer = self.categories.get_indexer(target.categories) codes = take_1d(code_indexer, target.codes, fill_value=-1) else: codes = self.categories.get_indexer(target) indexer, _ = self._engine.get_indexer_non_unique(codes) return ensure_platform_int(indexer) @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): target = ibase.ensure_index(target) if isinstance(target, CategoricalIndex): # Indexing on codes is more efficient if categories are the same: if target.categories is self.categories: target = target.codes indexer, missing = self._engine.get_indexer_non_unique(target) return ensure_platform_int(indexer), missing target = target.values codes = self.categories.get_indexer(target) indexer, missing = self._engine.get_indexer_non_unique(codes) return ensure_platform_int(indexer), missing @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): if self.categories._defer_to_indexing: return self.categories._convert_scalar_indexer(key, kind=kind) return super(CategoricalIndex, self)._convert_scalar_indexer( key, kind=kind) @Appender(_index_shared_docs['_convert_list_indexer']) def _convert_list_indexer(self, keyarr, kind=None): # Return our indexer or raise if all of the values are not included in # the categories if self.categories._defer_to_indexing: indexer = self.categories._convert_list_indexer(keyarr, kind=kind) return Index(self.codes).get_indexer_for(indexer) indexer = self.categories.get_indexer(np.asarray(keyarr)) if (indexer == -1).any(): raise KeyError( "a list-indexer must only " "include values that are " "in the categories") return self.get_indexer(keyarr) @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): keyarr = com._asarray_tuplesafe(keyarr) if self.categories._defer_to_indexing: return keyarr return self._shallow_copy(keyarr) @Appender(_index_shared_docs['_convert_index_indexer']) def _convert_index_indexer(self, keyarr): return self._shallow_copy(keyarr) @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) indices = ensure_platform_int(indices) taken = self._assert_take_fillable(self.codes, indices, allow_fill=allow_fill, fill_value=fill_value, na_value=-1) return self._create_from_codes(taken) def is_dtype_equal(self, other): return self._data.is_dtype_equal(other) take_nd = take def map(self, mapper): """ Map values using input correspondence (a dict, Series, or function). Maps the values (their categories, not the codes) of the index to new categories. If the mapping correspondence is one-to-one the result is a :class:`~pandas.CategoricalIndex` which has the same order property as the original, otherwise an :class:`~pandas.Index` is returned. If a `dict` or :class:`~pandas.Series` is used any unmapped category is mapped to `NaN`. Note that if this happens an :class:`~pandas.Index` will be returned. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. Returns ------- pandas.CategoricalIndex or pandas.Index Mapped index. See Also -------- Index.map : Apply a mapping correspondence on an :class:`~pandas.Index`. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Series.apply : Apply more complex functions on a :class:`~pandas.Series`. Examples -------- >>> idx = pd.CategoricalIndex(['a', 'b', 'c']) >>> idx CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') >>> idx.map(lambda x: x.upper()) CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'], ordered=False, dtype='category') >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'}) CategoricalIndex(['first', 'second', 'third'], categories=['first', 'second', 'third'], ordered=False, dtype='category') If the mapping is one-to-one the ordering of the categories is preserved: >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True) >>> idx CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=True, dtype='category') >>> idx.map({'a': 3, 'b': 2, 'c': 1}) CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True, dtype='category') If the mapping is not one-to-one an :class:`~pandas.Index` is returned: >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'}) Index(['first', 'second', 'first'], dtype='object') If a `dict` is used, all unmapped categories are mapped to `NaN` and the result is an :class:`~pandas.Index`: >>> idx.map({'a': 'first', 'b': 'second'}) Index(['first', 'second', nan], dtype='object') """ return self._shallow_copy_with_infer(self.values.map(mapper)) def delete(self, loc): """ Make new Index with passed location(-s) deleted Returns ------- new_index : Index """ return self._create_from_codes(np.delete(self.codes, loc)) def insert(self, loc, item): """ Make new Index inserting new item at location. Follows Python list.append semantics for negative values Parameters ---------- loc : int item : object Returns ------- new_index : Index Raises ------ ValueError if the item is not in the categories """ code = self.categories.get_indexer([item]) if (code == -1) and not (is_scalar(item) and isna(item)): raise TypeError("cannot insert an item into a CategoricalIndex " "that is not already an existing category") codes = self.codes codes = np.concatenate((codes[:loc], code, codes[loc:])) return self._create_from_codes(codes) def _concat(self, to_concat, name): # if calling index is category, don't check dtype of others return CategoricalIndex._concat_same_dtype(self, to_concat, name) def _concat_same_dtype(self, to_concat, name): """ Concatenate to_concat which has the same class ValueError if other is not in the categories """ to_concat = [self._is_dtype_compat(c) for c in to_concat] codes = np.concatenate([c.codes for c in to_concat]) result = self._create_from_codes(codes, name=name) # if name is None, _create_from_codes sets self.name result.name = name return result def _codes_for_groupby(self, sort, observed): """ Return a Categorical adjusted for groupby """ return self.values._codes_for_groupby(sort, observed) @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ def _make_compare(op): opname = '__{op}__'.format(op=op.__name__) def _evaluate_compare(self, other): # if we have a Categorical type, then must have the same # categories if isinstance(other, CategoricalIndex): other = other._values elif isinstance(other, Index): other = self._create_categorical( other._values, dtype=self.dtype) if isinstance(other, (ABCCategorical, np.ndarray, ABCSeries)): if len(self.values) != len(other): raise ValueError("Lengths must match to compare") if isinstance(other, ABCCategorical): if not self.values.is_dtype_equal(other): raise TypeError("categorical index comparisons must " "have the same categories and ordered " "attributes") result = op(self.values, other) if isinstance(result, ABCSeries): # Dispatch to pd.Categorical returned NotImplemented # and we got a Series back; down-cast to ndarray result = result.values return result return compat.set_function_name(_evaluate_compare, opname, cls) cls.__eq__ = _make_compare(operator.eq) cls.__ne__ = _make_compare(operator.ne) cls.__lt__ = _make_compare(operator.lt) cls.__gt__ = _make_compare(operator.gt) cls.__le__ = _make_compare(operator.le) cls.__ge__ = _make_compare(operator.ge) def _delegate_method(self, name, *args, **kwargs): """ method delegation to the ._values """ method = getattr(self._values, name) if 'inplace' in kwargs: raise ValueError("cannot use inplace with CategoricalIndex") res = method(*args, **kwargs) if is_scalar(res): return res return CategoricalIndex(res, name=self.name) @classmethod def _add_accessors(cls): """ add in Categorical accessor methods """ CategoricalIndex._add_delegate_accessors( delegate=Categorical, accessors=["rename_categories", "reorder_categories", "add_categories", "remove_categories", "remove_unused_categories", "set_categories", "as_ordered", "as_unordered", "min", "max"], typ='method', overwrite=True) CategoricalIndex._add_numeric_methods_add_sub_disabled() CategoricalIndex._add_numeric_methods_disabled() CategoricalIndex._add_logical_methods_disabled() CategoricalIndex._add_comparison_methods() CategoricalIndex._add_accessors()
bsd-3-clause
floresconlimon/qutebrowser
qutebrowser/mainwindow/statusbar/prompter.py
5
14437
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Manager for questions to be shown in the statusbar.""" import sip import collections from PyQt5.QtCore import pyqtSlot, pyqtSignal, QTimer, QObject from PyQt5.QtWidgets import QLineEdit from qutebrowser.keyinput import modeman from qutebrowser.commands import cmdutils from qutebrowser.utils import usertypes, log, qtutils, objreg, utils PromptContext = collections.namedtuple('PromptContext', ['question', 'text', 'input_text', 'echo_mode', 'input_visible']) AuthTuple = collections.namedtuple('AuthTuple', ['user', 'password']) class Prompter(QObject): """Manager for questions to be shown in the statusbar. The way in which multiple questions are handled deserves some explanation. If a question is blocking, we *need* to ask it immediately, and can't wait for previous questions to finish. We could theoretically ask a blocking question inside of another blocking one, so in ask_question we simply save the current prompt state on the stack, let the user answer the *most recent* question, and then restore the previous state. With a non-blocking question, things are a bit easier. We simply add it to self._queue if we're still busy handling another question, since it can be answered at any time. In either case, as soon as we finished handling a question, we call _pop_later() which schedules a _pop to ask the next question in _queue. We schedule it rather than doing it immediately because then the order of how things happen is clear, e.g. on_mode_left can't happen after we already set up the *new* question. Class Attributes: KEY_MODES: A mapping of PromptModes to KeyModes. Attributes: _shutting_down: Whether we're currently shutting down the prompter and should ignore future questions to avoid segfaults. _question: A Question object with the question to be asked to the user. _loops: A list of local EventLoops to spin in when blocking. _queue: A deque of waiting questions. _busy: If we're currently busy with asking a question. _win_id: The window ID this object is associated with. Signals: show_prompt: Emitted when the prompt widget should be shown. hide_prompt: Emitted when the prompt widget should be hidden. """ KEY_MODES = { usertypes.PromptMode.yesno: usertypes.KeyMode.yesno, usertypes.PromptMode.text: usertypes.KeyMode.prompt, usertypes.PromptMode.user_pwd: usertypes.KeyMode.prompt, usertypes.PromptMode.alert: usertypes.KeyMode.prompt, } show_prompt = pyqtSignal() hide_prompt = pyqtSignal() def __init__(self, win_id, parent=None): super().__init__(parent) self._shutting_down = False self._question = None self._loops = [] self._queue = collections.deque() self._busy = False self._win_id = win_id def __repr__(self): return utils.get_repr(self, loops=len(self._loops), question=self._question, queue=len(self._queue), busy=self._busy) def _pop_later(self): """Helper to call self._pop as soon as everything else is done.""" QTimer.singleShot(0, self._pop) def _pop(self): """Pop a question from the queue and ask it, if there are any.""" log.statusbar.debug("Popping from queue {}".format(self._queue)) if self._queue: question = self._queue.popleft() if not sip.isdeleted(question): # the question could already be deleted, e.g. by a cancelled # download. See # https://github.com/The-Compiler/qutebrowser/issues/415 self.ask_question(question, blocking=False) def _get_ctx(self): """Get a PromptContext based on the current state.""" if not self._busy: return None prompt = objreg.get('prompt', scope='window', window=self._win_id) ctx = PromptContext(question=self._question, text=prompt.txt.text(), input_text=prompt.lineedit.text(), echo_mode=prompt.lineedit.echoMode(), input_visible=prompt.lineedit.isVisible()) return ctx def _restore_ctx(self, ctx): """Restore state from a PromptContext. Args: ctx: A PromptContext previously saved by _get_ctx, or None. Return: True if a context was restored, False otherwise. """ log.statusbar.debug("Restoring context {}".format(ctx)) if ctx is None: self.hide_prompt.emit() self._busy = False return False self._question = ctx.question prompt = objreg.get('prompt', scope='window', window=self._win_id) prompt.txt.setText(ctx.text) prompt.lineedit.setText(ctx.input_text) prompt.lineedit.setEchoMode(ctx.echo_mode) prompt.lineedit.setVisible(ctx.input_visible) self.show_prompt.emit() mode = self.KEY_MODES[ctx.question.mode] ctx.question.aborted.connect( lambda: modeman.maybe_leave(self._win_id, mode, 'aborted')) modeman.enter(self._win_id, mode, 'question asked') return True def _display_question(self): """Display the question saved in self._question.""" prompt = objreg.get('prompt', scope='window', window=self._win_id) if self._question.mode == usertypes.PromptMode.yesno: if self._question.default is None: suffix = "" elif self._question.default: suffix = " (yes)" else: suffix = " (no)" prompt.txt.setText(self._question.text + suffix) prompt.lineedit.hide() elif self._question.mode == usertypes.PromptMode.text: prompt.txt.setText(self._question.text) if self._question.default: prompt.lineedit.setText(self._question.default) prompt.lineedit.show() elif self._question.mode == usertypes.PromptMode.user_pwd: prompt.txt.setText(self._question.text) if self._question.default: prompt.lineedit.setText(self._question.default) prompt.lineedit.show() elif self._question.mode == usertypes.PromptMode.alert: prompt.txt.setText(self._question.text + ' (ok)') prompt.lineedit.hide() else: raise ValueError("Invalid prompt mode!") log.modes.debug("Question asked, focusing {!r}".format( prompt.lineedit)) prompt.lineedit.setFocus() self.show_prompt.emit() self._busy = True def shutdown(self): """Cancel all blocking questions. Quits and removes all running event loops. Return: True if loops needed to be aborted, False otherwise. """ self._shutting_down = True if self._loops: for loop in self._loops: loop.quit() loop.deleteLater() return True else: return False @pyqtSlot(usertypes.KeyMode) def on_mode_left(self, mode): """Clear and reset input when the mode was left.""" prompt = objreg.get('prompt', scope='window', window=self._win_id) if mode in (usertypes.KeyMode.prompt, usertypes.KeyMode.yesno): prompt.txt.setText('') prompt.lineedit.clear() prompt.lineedit.setEchoMode(QLineEdit.Normal) self.hide_prompt.emit() self._busy = False if self._question.answer is None and not self._question.is_aborted: self._question.cancel() @cmdutils.register(instance='prompter', hide=True, scope='window', modes=[usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]) def prompt_accept(self): """Accept the current prompt. // This executes the next action depending on the question mode, e.g. asks for the password or leaves the mode. """ prompt = objreg.get('prompt', scope='window', window=self._win_id) if (self._question.mode == usertypes.PromptMode.user_pwd and self._question.user is None): # User just entered a username self._question.user = prompt.lineedit.text() prompt.txt.setText("Password:") prompt.lineedit.clear() prompt.lineedit.setEchoMode(QLineEdit.Password) elif self._question.mode == usertypes.PromptMode.user_pwd: # User just entered a password password = prompt.lineedit.text() self._question.answer = AuthTuple(self._question.user, password) modeman.maybe_leave(self._win_id, usertypes.KeyMode.prompt, 'prompt accept') self._question.done() elif self._question.mode == usertypes.PromptMode.text: # User just entered text. self._question.answer = prompt.lineedit.text() modeman.maybe_leave(self._win_id, usertypes.KeyMode.prompt, 'prompt accept') self._question.done() elif self._question.mode == usertypes.PromptMode.yesno: # User wants to accept the default of a yes/no question. self._question.answer = self._question.default modeman.maybe_leave(self._win_id, usertypes.KeyMode.yesno, 'yesno accept') self._question.done() elif self._question.mode == usertypes.PromptMode.alert: # User acknowledged an alert self._question.answer = None modeman.maybe_leave(self._win_id, usertypes.KeyMode.prompt, 'alert accept') self._question.done() else: raise ValueError("Invalid question mode!") @cmdutils.register(instance='prompter', hide=True, scope='window', modes=[usertypes.KeyMode.yesno]) def prompt_yes(self): """Answer yes to a yes/no prompt.""" if self._question.mode != usertypes.PromptMode.yesno: # We just ignore this if we don't have a yes/no question. return self._question.answer = True modeman.maybe_leave(self._win_id, usertypes.KeyMode.yesno, 'yesno accept') self._question.done() @cmdutils.register(instance='prompter', hide=True, scope='window', modes=[usertypes.KeyMode.yesno]) def prompt_no(self): """Answer no to a yes/no prompt.""" if self._question.mode != usertypes.PromptMode.yesno: # We just ignore this if we don't have a yes/no question. return self._question.answer = False modeman.maybe_leave(self._win_id, usertypes.KeyMode.yesno, 'prompt accept') self._question.done() @pyqtSlot(usertypes.Question, bool) def ask_question(self, question, blocking): """Dispkay a question in the statusbar. Args: question: The Question object to ask. blocking: If True, this function blocks and returns the result. Return: The answer of the user when blocking=True. None if blocking=False. """ log.statusbar.debug("Asking question {}, blocking {}, loops {}, queue " "{}".format(question, blocking, self._loops, self._queue)) if self._shutting_down: # If we're currently shutting down we have to ignore this question # to avoid segfaults - see # https://github.com/The-Compiler/qutebrowser/issues/95 log.statusbar.debug("Ignoring question because we're shutting " "down.") question.abort() return None if self._busy and not blocking: # We got an async question, but we're already busy with one, so we # just queue it up for later. log.statusbar.debug("Adding {} to queue.".format(question)) self._queue.append(question) return if blocking: # If we're blocking we save the old state on the stack, so we can # restore it after exec, if exec gets called multiple times. context = self._get_ctx() self._question = question self._display_question() mode = self.KEY_MODES[self._question.mode] question.aborted.connect( lambda: modeman.maybe_leave(self._win_id, mode, 'aborted')) modeman.enter(self._win_id, mode, 'question asked') if blocking: loop = qtutils.EventLoop() self._loops.append(loop) loop.destroyed.connect(lambda: self._loops.remove(loop)) question.completed.connect(loop.quit) question.completed.connect(loop.deleteLater) loop.exec_() if not self._restore_ctx(context): # Nothing left to restore, so we can go back to popping async # questions. if self._queue: self._pop_later() return self._question.answer else: question.completed.connect(self._pop_later)
gpl-3.0
csgrad/ns-3-9-ngwmn
bindings/python/apidefs/gcc-ILP32/ns3_module_udp_echo.py
10
7949
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers def register_types(module): root_module = module.get_root() ## udp-echo-client.h: ns3::UdpEchoClient [class] module.add_class('UdpEchoClient', parent=root_module['ns3::Application']) ## udp-echo-server.h: ns3::UdpEchoServer [class] module.add_class('UdpEchoServer', parent=root_module['ns3::Application']) ## Register a nested module for the namespace Config nested_module = module.add_cpp_namespace('Config') register_types_ns3_Config(nested_module) ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace addressUtils nested_module = module.add_cpp_namespace('addressUtils') register_types_ns3_addressUtils(nested_module) ## Register a nested module for the namespace aodv nested_module = module.add_cpp_namespace('aodv') register_types_ns3_aodv(nested_module) ## Register a nested module for the namespace dot11s nested_module = module.add_cpp_namespace('dot11s') register_types_ns3_dot11s(nested_module) ## Register a nested module for the namespace flame nested_module = module.add_cpp_namespace('flame') register_types_ns3_flame(nested_module) ## Register a nested module for the namespace internal nested_module = module.add_cpp_namespace('internal') register_types_ns3_internal(nested_module) ## Register a nested module for the namespace olsr nested_module = module.add_cpp_namespace('olsr') register_types_ns3_olsr(nested_module) def register_types_ns3_Config(module): root_module = module.get_root() def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_addressUtils(module): root_module = module.get_root() def register_types_ns3_aodv(module): root_module = module.get_root() def register_types_ns3_dot11s(module): root_module = module.get_root() def register_types_ns3_flame(module): root_module = module.get_root() def register_types_ns3_internal(module): root_module = module.get_root() def register_types_ns3_olsr(module): root_module = module.get_root() def register_methods(root_module): register_Ns3UdpEchoClient_methods(root_module, root_module['ns3::UdpEchoClient']) register_Ns3UdpEchoServer_methods(root_module, root_module['ns3::UdpEchoServer']) return def register_Ns3UdpEchoClient_methods(root_module, cls): ## udp-echo-client.h: ns3::UdpEchoClient::UdpEchoClient(ns3::UdpEchoClient const & arg0) [copy constructor] cls.add_constructor([param('ns3::UdpEchoClient const &', 'arg0')]) ## udp-echo-client.h: ns3::UdpEchoClient::UdpEchoClient() [constructor] cls.add_constructor([]) ## udp-echo-client.h: uint32_t ns3::UdpEchoClient::GetDataSize() const [member function] cls.add_method('GetDataSize', 'uint32_t', [], is_const=True) ## udp-echo-client.h: static ns3::TypeId ns3::UdpEchoClient::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## udp-echo-client.h: void ns3::UdpEchoClient::SetDataSize(uint32_t dataSize) [member function] cls.add_method('SetDataSize', 'void', [param('uint32_t', 'dataSize')]) ## udp-echo-client.h: void ns3::UdpEchoClient::SetFill(std::string fill) [member function] cls.add_method('SetFill', 'void', [param('std::string', 'fill')]) ## udp-echo-client.h: void ns3::UdpEchoClient::SetFill(uint8_t fill, uint32_t dataSize) [member function] cls.add_method('SetFill', 'void', [param('uint8_t', 'fill'), param('uint32_t', 'dataSize')]) ## udp-echo-client.h: void ns3::UdpEchoClient::SetFill(uint8_t * fill, uint32_t fillSize, uint32_t dataSize) [member function] cls.add_method('SetFill', 'void', [param('uint8_t *', 'fill'), param('uint32_t', 'fillSize'), param('uint32_t', 'dataSize')]) ## udp-echo-client.h: void ns3::UdpEchoClient::SetRemote(ns3::Ipv4Address ip, uint16_t port) [member function] cls.add_method('SetRemote', 'void', [param('ns3::Ipv4Address', 'ip'), param('uint16_t', 'port')]) ## udp-echo-client.h: void ns3::UdpEchoClient::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## udp-echo-client.h: void ns3::UdpEchoClient::StartApplication() [member function] cls.add_method('StartApplication', 'void', [], visibility='private', is_virtual=True) ## udp-echo-client.h: void ns3::UdpEchoClient::StopApplication() [member function] cls.add_method('StopApplication', 'void', [], visibility='private', is_virtual=True) return def register_Ns3UdpEchoServer_methods(root_module, cls): ## udp-echo-server.h: ns3::UdpEchoServer::UdpEchoServer(ns3::UdpEchoServer const & arg0) [copy constructor] cls.add_constructor([param('ns3::UdpEchoServer const &', 'arg0')]) ## udp-echo-server.h: ns3::UdpEchoServer::UdpEchoServer() [constructor] cls.add_constructor([]) ## udp-echo-server.h: static ns3::TypeId ns3::UdpEchoServer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## udp-echo-server.h: void ns3::UdpEchoServer::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## udp-echo-server.h: void ns3::UdpEchoServer::StartApplication() [member function] cls.add_method('StartApplication', 'void', [], visibility='private', is_virtual=True) ## udp-echo-server.h: void ns3::UdpEchoServer::StopApplication() [member function] cls.add_method('StopApplication', 'void', [], visibility='private', is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_Config(module.get_submodule('Config'), root_module) register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module) register_functions_ns3_aodv(module.get_submodule('aodv'), root_module) register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module) register_functions_ns3_flame(module.get_submodule('flame'), root_module) register_functions_ns3_internal(module.get_submodule('internal'), root_module) register_functions_ns3_olsr(module.get_submodule('olsr'), root_module) return def register_functions_ns3_Config(module, root_module): return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_addressUtils(module, root_module): return def register_functions_ns3_aodv(module, root_module): return def register_functions_ns3_dot11s(module, root_module): return def register_functions_ns3_flame(module, root_module): return def register_functions_ns3_internal(module, root_module): return def register_functions_ns3_olsr(module, root_module): return
gpl-2.0
jicruz/heroku-bot
lib/youtube_dl/extractor/movingimage.py
64
1774
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( unescapeHTML, parse_duration, ) class MovingImageIE(InfoExtractor): _VALID_URL = r'https?://movingimage\.nls\.uk/film/(?P<id>\d+)' _TEST = { 'url': 'http://movingimage.nls.uk/film/3561', 'md5': '4caa05c2b38453e6f862197571a7be2f', 'info_dict': { 'id': '3561', 'ext': 'mp4', 'title': 'SHETLAND WOOL', 'description': 'md5:c5afca6871ad59b4271e7704fe50ab04', 'duration': 900, 'thumbnail': r're:^https?://.*\.jpg$', }, } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) formats = self._extract_m3u8_formats( self._html_search_regex(r'file\s*:\s*"([^"]+)"', webpage, 'm3u8 manifest URL'), video_id, ext='mp4', entry_protocol='m3u8_native') def search_field(field_name, fatal=False): return self._search_regex( r'<span\s+class="field_title">%s:</span>\s*<span\s+class="field_content">([^<]+)</span>' % field_name, webpage, 'title', fatal=fatal) title = unescapeHTML(search_field('Title', fatal=True)).strip('()[]') description = unescapeHTML(search_field('Description')) duration = parse_duration(search_field('Running time')) thumbnail = self._search_regex( r"image\s*:\s*'([^']+)'", webpage, 'thumbnail', fatal=False) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'duration': duration, 'thumbnail': thumbnail, }
gpl-3.0
joakim-hove/django
tests/fixtures/tests.py
113
35712
from __future__ import unicode_literals import os import sys import tempfile import unittest import warnings from django.apps import apps from django.contrib.sites.models import Site from django.core import management from django.core.serializers.base import ProgressBar from django.db import IntegrityError, connection from django.test import ( TestCase, TransactionTestCase, ignore_warnings, skipUnlessDBFeature, ) from django.utils import six from django.utils.encoding import force_text from .models import Article, Spy, Tag, Visa class TestCaseFixtureLoadingTests(TestCase): fixtures = ['fixture1.json', 'fixture2.json'] def testClassFixtures(self): "Check that test case has installed 3 fixture objects" self.assertEqual(Article.objects.count(), 3) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Django conquers world!>', '<Article: Copyright is fine the way it is>', '<Article: Poker has no place on ESPN>', ]) class SubclassTestCaseFixtureLoadingTests(TestCaseFixtureLoadingTests): """ Make sure that subclasses can remove fixtures from parent class (#21089). """ fixtures = [] def testClassFixtures(self): "Check that there were no fixture objects installed" self.assertEqual(Article.objects.count(), 0) class DumpDataAssertMixin(object): def _dumpdata_assert(self, args, output, format='json', filename=None, natural_foreign_keys=False, natural_primary_keys=False, use_base_manager=False, exclude_list=[], primary_keys=''): new_io = six.StringIO() if filename: filename = os.path.join(tempfile.gettempdir(), filename) management.call_command('dumpdata', *args, **{'format': format, 'stdout': new_io, 'stderr': new_io, 'output': filename, 'use_natural_foreign_keys': natural_foreign_keys, 'use_natural_primary_keys': natural_primary_keys, 'use_base_manager': use_base_manager, 'exclude': exclude_list, 'primary_keys': primary_keys}) if filename: with open(filename, "r") as f: command_output = f.read() os.remove(filename) else: command_output = new_io.getvalue().strip() if format == "json": self.assertJSONEqual(command_output, output) elif format == "xml": self.assertXMLEqual(command_output, output) else: self.assertEqual(command_output, output) class FixtureLoadingTests(DumpDataAssertMixin, TestCase): def test_loading_and_dumping(self): apps.clear_cache() Site.objects.all().delete() # Load fixture 1. Single JSON file, with two objects. management.call_command('loaddata', 'fixture1.json', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Time to reform copyright>', '<Article: Poker has no place on ESPN>', ]) # Dump the current contents of the database as a JSON fixture self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]') # Try just dumping the contents of fixtures.Category self._dumpdata_assert(['fixtures.Category'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]') # ...and just fixtures.Article self._dumpdata_assert(['fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]') # ...and both self._dumpdata_assert(['fixtures.Category', 'fixtures.Article'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]') # Specify a specific model twice self._dumpdata_assert(['fixtures.Article', 'fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]') # Specify a dump that specifies Article both explicitly and implicitly self._dumpdata_assert(['fixtures.Article', 'fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]') # Specify a dump that specifies Article both explicitly and implicitly, # but lists the app first (#22025). self._dumpdata_assert(['fixtures', 'fixtures.Article'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]') # Same again, but specify in the reverse order self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]') # Specify one model from one application, and an entire other application. self._dumpdata_assert(['fixtures.Category', 'sites'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]') # Load fixture 2. JSON file imported by default. Overwrites some existing objects management.call_command('loaddata', 'fixture2.json', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Django conquers world!>', '<Article: Copyright is fine the way it is>', '<Article: Poker has no place on ESPN>', ]) # Load fixture 3, XML format. management.call_command('loaddata', 'fixture3.xml', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: XML identified as leading cause of cancer>', '<Article: Django conquers world!>', '<Article: Copyright is fine the way it is>', '<Article: Poker on TV is great!>', ]) # Load fixture 6, JSON file with dynamic ContentType fields. Testing ManyToOne. management.call_command('loaddata', 'fixture6.json', verbosity=0) self.assertQuerysetEqual(Tag.objects.all(), [ '<Tag: <Article: Copyright is fine the way it is> tagged "copyright">', '<Tag: <Article: Copyright is fine the way it is> tagged "law">', ], ordered=False) # Load fixture 7, XML file with dynamic ContentType fields. Testing ManyToOne. management.call_command('loaddata', 'fixture7.xml', verbosity=0) self.assertQuerysetEqual(Tag.objects.all(), [ '<Tag: <Article: Copyright is fine the way it is> tagged "copyright">', '<Tag: <Article: Copyright is fine the way it is> tagged "legal">', '<Tag: <Article: Django conquers world!> tagged "django">', '<Tag: <Article: Django conquers world!> tagged "world domination">', ], ordered=False) # Load fixture 8, JSON file with dynamic Permission fields. Testing ManyToMany. management.call_command('loaddata', 'fixture8.json', verbosity=0) self.assertQuerysetEqual(Visa.objects.all(), [ '<Visa: Django Reinhardt Can add user, Can change user, Can delete user>', '<Visa: Stephane Grappelli Can add user>', '<Visa: Prince >' ], ordered=False) # Load fixture 9, XML file with dynamic Permission fields. Testing ManyToMany. management.call_command('loaddata', 'fixture9.xml', verbosity=0) self.assertQuerysetEqual(Visa.objects.all(), [ '<Visa: Django Reinhardt Can add user, Can change user, Can delete user>', '<Visa: Stephane Grappelli Can add user, Can delete user>', '<Visa: Artist formerly known as "Prince" Can change user>' ], ordered=False) # object list is unaffected self.assertQuerysetEqual(Article.objects.all(), [ '<Article: XML identified as leading cause of cancer>', '<Article: Django conquers world!>', '<Article: Copyright is fine the way it is>', '<Article: Poker on TV is great!>', ]) # By default, you get raw keys on dumpdata self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [3, 1]}}]') # But you can get natural keys if you ask for them and they are available self._dumpdata_assert(['fixtures.book'], '[{"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]', natural_foreign_keys=True) # You can also omit the primary keys for models that we can get later with natural keys. self._dumpdata_assert(['fixtures.person'], '[{"fields": {"name": "Django Reinhardt"}, "model": "fixtures.person"}, {"fields": {"name": "Stephane Grappelli"}, "model": "fixtures.person"}, {"fields": {"name": "Artist formerly known as \\"Prince\\""}, "model": "fixtures.person"}]', natural_primary_keys=True) # Dump the current contents of the database as a JSON fixture self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker on TV is great!", "pub_date": "2006-06-16T11:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}, {"pk": 4, "model": "fixtures.article", "fields": {"headline": "Django conquers world!", "pub_date": "2006-06-16T15:00:00"}}, {"pk": 5, "model": "fixtures.article", "fields": {"headline": "XML identified as leading cause of cancer", "pub_date": "2006-06-16T16:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "legal", "tagged_id": 3}}, {"pk": 3, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "django", "tagged_id": 4}}, {"pk": 4, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "world domination", "tagged_id": 4}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Artist formerly known as \\"Prince\\""}}, {"pk": 1, "model": "fixtures.visa", "fields": {"person": ["Django Reinhardt"], "permissions": [["add_user", "auth", "user"], ["change_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 2, "model": "fixtures.visa", "fields": {"person": ["Stephane Grappelli"], "permissions": [["add_user", "auth", "user"], ["delete_user", "auth", "user"]]}}, {"pk": 3, "model": "fixtures.visa", "fields": {"person": ["Artist formerly known as \\"Prince\\""], "permissions": [["change_user", "auth", "user"]]}}, {"pk": 1, "model": "fixtures.book", "fields": {"name": "Music for all ages", "authors": [["Artist formerly known as \\"Prince\\""], ["Django Reinhardt"]]}}]', natural_foreign_keys=True) # Dump the current contents of the database as an XML fixture self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?> <django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker on TV is great!</field><field type="DateTimeField" name="pub_date">2006-06-16T11:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Copyright is fine the way it is</field><field type="DateTimeField" name="pub_date">2006-06-16T14:00:00</field></object><object pk="4" model="fixtures.article"><field type="CharField" name="headline">Django conquers world!</field><field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field></object><object pk="5" model="fixtures.article"><field type="CharField" name="headline">XML identified as leading cause of cancer</field><field type="DateTimeField" name="pub_date">2006-06-16T16:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">legal</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="3" model="fixtures.tag"><field type="CharField" name="name">django</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="4" model="fixtures.tag"><field type="CharField" name="name">world domination</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">4</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Artist formerly known as "Prince"</field></object><object pk="1" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Django Reinhardt</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="2" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Stephane Grappelli</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>add_user</natural><natural>auth</natural><natural>user</natural></object><object><natural>delete_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="3" model="fixtures.visa"><field to="fixtures.person" name="person" rel="ManyToOneRel"><natural>Artist formerly known as "Prince"</natural></field><field to="auth.permission" name="permissions" rel="ManyToManyRel"><object><natural>change_user</natural><natural>auth</natural><natural>user</natural></object></field></object><object pk="1" model="fixtures.book"><field type="CharField" name="name">Music for all ages</field><field to="fixtures.person" name="authors" rel="ManyToManyRel"><object><natural>Artist formerly known as "Prince"</natural></object><object><natural>Django Reinhardt</natural></object></field></object></django-objects>""", format='xml', natural_foreign_keys=True) def test_dumpdata_with_excludes(self): # Load fixture1 which has a site, two articles, and a category Site.objects.all().delete() management.call_command('loaddata', 'fixture1.json', verbosity=0) # Excluding fixtures app should only leave sites self._dumpdata_assert( ['sites', 'fixtures'], '[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}]', exclude_list=['fixtures']) # Excluding fixtures.Article/Book should leave fixtures.Category self._dumpdata_assert( ['sites', 'fixtures'], '[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]', exclude_list=['fixtures.Article', 'fixtures.Book']) # Excluding fixtures and fixtures.Article/Book should be a no-op self._dumpdata_assert( ['sites', 'fixtures'], '[{"pk": 1, "model": "sites.site", "fields": {"domain": "example.com", "name": "example.com"}}, {"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]', exclude_list=['fixtures.Article', 'fixtures.Book']) # Excluding sites and fixtures.Article/Book should only leave fixtures.Category self._dumpdata_assert( ['sites', 'fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}]', exclude_list=['fixtures.Article', 'fixtures.Book', 'sites']) # Excluding a bogus app should throw an error with six.assertRaisesRegex(self, management.CommandError, "No installed app with label 'foo_app'."): self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['foo_app']) # Excluding a bogus model should throw an error with six.assertRaisesRegex(self, management.CommandError, "Unknown model in excludes: fixtures.FooModel"): self._dumpdata_assert(['fixtures', 'sites'], '', exclude_list=['fixtures.FooModel']) @unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support '?' in filenames.") def test_load_fixture_with_special_characters(self): management.call_command('loaddata', 'fixture_with[special]chars', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), ['<Article: How To Deal With Special Characters>']) def test_dumpdata_with_filtering_manager(self): spy1 = Spy.objects.create(name='Paul') spy2 = Spy.objects.create(name='Alex', cover_blown=True) self.assertQuerysetEqual(Spy.objects.all(), ['<Spy: Paul>']) # Use the default manager self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % spy1.pk) # Dump using Django's base manager. Should return all objects, # even those normally filtered by the manager self._dumpdata_assert(['fixtures.Spy'], '[{"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": true}}, {"pk": %d, "model": "fixtures.spy", "fields": {"cover_blown": false}}]' % (spy2.pk, spy1.pk), use_base_manager=True) def test_dumpdata_with_pks(self): management.call_command('loaddata', 'fixture1.json', verbosity=0) management.call_command('loaddata', 'fixture2.json', verbosity=0) self._dumpdata_assert( ['fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]', primary_keys='2,3' ) self._dumpdata_assert( ['fixtures.Article'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}]', primary_keys='2' ) with six.assertRaisesRegex(self, management.CommandError, "You can only use --pks option with one model"): self._dumpdata_assert( ['fixtures'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]', primary_keys='2,3' ) with six.assertRaisesRegex(self, management.CommandError, "You can only use --pks option with one model"): self._dumpdata_assert( '', '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]', primary_keys='2,3' ) with six.assertRaisesRegex(self, management.CommandError, "You can only use --pks option with one model"): self._dumpdata_assert( ['fixtures.Article', 'fixtures.category'], '[{"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Copyright is fine the way it is", "pub_date": "2006-06-16T14:00:00"}}]', primary_keys='2,3' ) def test_dumpdata_with_file_output(self): management.call_command('loaddata', 'fixture1.json', verbosity=0) self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]', filename='dumpdata.json') def test_dumpdata_progressbar(self): """ Dumpdata shows a progress bar on the command line when --output is set, stdout is a tty, and verbosity > 0. """ management.call_command('loaddata', 'fixture1.json', verbosity=0) new_io = six.StringIO() new_io.isatty = lambda: True _, filename = tempfile.mkstemp() options = { 'format': 'json', 'stdout': new_io, 'stderr': new_io, 'output': filename, } management.call_command('dumpdata', 'fixtures', **options) self.assertTrue(new_io.getvalue().endswith('[' + '.' * ProgressBar.progress_width + ']\n')) # Test no progress bar when verbosity = 0 options['verbosity'] = 0 new_io = six.StringIO() new_io.isatty = lambda: True management.call_command('dumpdata', 'fixtures', **options) self.assertEqual(new_io.getvalue(), '') def test_compress_format_loading(self): # Load fixture 4 (compressed), using format specification management.call_command('loaddata', 'fixture4.json', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Django pets kitten>', ]) def test_compressed_specified_loading(self): # Load fixture 5 (compressed), using format *and* compression specification management.call_command('loaddata', 'fixture5.json.zip', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: WoW subscribers now outnumber readers>', ]) def test_compressed_loading(self): # Load fixture 5 (compressed), only compression specification management.call_command('loaddata', 'fixture5.zip', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: WoW subscribers now outnumber readers>', ]) def test_ambiguous_compressed_fixture(self): # The name "fixture5" is ambiguous, so loading it will raise an error with self.assertRaises(management.CommandError) as cm: management.call_command('loaddata', 'fixture5', verbosity=0) self.assertIn("Multiple fixtures named 'fixture5'", cm.exception.args[0]) def test_db_loading(self): # Load db fixtures 1 and 2. These will load using the 'default' database identifier implicitly management.call_command('loaddata', 'db_fixture_1', verbosity=0) management.call_command('loaddata', 'db_fixture_2', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Who needs more than one database?>', '<Article: Who needs to use compressed data?>', ]) def test_loaddata_error_message(self): """ Verifies that loading a fixture which contains an invalid object outputs an error message which contains the pk of the object that triggered the error. """ # MySQL needs a little prodding to reject invalid data. # This won't affect other tests because the database connection # is closed at the end of each test. if connection.vendor == 'mysql': connection.cursor().execute("SET sql_mode = 'TRADITIONAL'") with self.assertRaises(IntegrityError) as cm: management.call_command('loaddata', 'invalid.json', verbosity=0) self.assertIn("Could not load fixtures.Article(pk=1):", cm.exception.args[0]) @ignore_warnings(category=UserWarning, message="No fixture named") def test_loaddata_app_option(self): """ Verifies that the --app option works. """ management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="someotherapp") self.assertQuerysetEqual(Article.objects.all(), []) management.call_command('loaddata', 'db_fixture_1', verbosity=0, app_label="fixtures") self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Who needs more than one database?>', ]) def test_loaddata_verbosity_three(self): output = six.StringIO() management.call_command('loaddata', 'fixture1.json', verbosity=3, stdout=output, stderr=output) command_output = force_text(output.getvalue()) self.assertIn( "\rProcessed 1 object(s).\rProcessed 2 object(s)." "\rProcessed 3 object(s).\rProcessed 4 object(s).\n", command_output ) def test_loading_using(self): # Load db fixtures 1 and 2. These will load using the 'default' database identifier explicitly management.call_command('loaddata', 'db_fixture_1', verbosity=0, using='default') management.call_command('loaddata', 'db_fixture_2', verbosity=0, using='default') self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Who needs more than one database?>', '<Article: Who needs to use compressed data?>', ]) @ignore_warnings(category=UserWarning, message="No fixture named") def test_unmatched_identifier_loading(self): # Try to load db fixture 3. This won't load because the database identifier doesn't match management.call_command('loaddata', 'db_fixture_3', verbosity=0) management.call_command('loaddata', 'db_fixture_3', verbosity=0, using='default') self.assertQuerysetEqual(Article.objects.all(), []) def test_output_formats(self): # Load back in fixture 1, we need the articles from it management.call_command('loaddata', 'fixture1', verbosity=0) # Try to load fixture 6 using format discovery management.call_command('loaddata', 'fixture6', verbosity=0) self.assertQuerysetEqual(Tag.objects.all(), [ '<Tag: <Article: Time to reform copyright> tagged "copyright">', '<Tag: <Article: Time to reform copyright> tagged "law">' ], ordered=False) # Dump the current contents of the database as a JSON fixture self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}, {"pk": 1, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "copyright", "tagged_id": 3}}, {"pk": 2, "model": "fixtures.tag", "fields": {"tagged_type": ["fixtures", "article"], "name": "law", "tagged_id": 3}}, {"pk": 1, "model": "fixtures.person", "fields": {"name": "Django Reinhardt"}}, {"pk": 2, "model": "fixtures.person", "fields": {"name": "Stephane Grappelli"}}, {"pk": 3, "model": "fixtures.person", "fields": {"name": "Prince"}}]', natural_foreign_keys=True) # Dump the current contents of the database as an XML fixture self._dumpdata_assert(['fixtures'], """<?xml version="1.0" encoding="utf-8"?> <django-objects version="1.0"><object pk="1" model="fixtures.category"><field type="CharField" name="title">News Stories</field><field type="TextField" name="description">Latest news stories</field></object><object pk="2" model="fixtures.article"><field type="CharField" name="headline">Poker has no place on ESPN</field><field type="DateTimeField" name="pub_date">2006-06-16T12:00:00</field></object><object pk="3" model="fixtures.article"><field type="CharField" name="headline">Time to reform copyright</field><field type="DateTimeField" name="pub_date">2006-06-16T13:00:00</field></object><object pk="1" model="fixtures.tag"><field type="CharField" name="name">copyright</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="2" model="fixtures.tag"><field type="CharField" name="name">law</field><field to="contenttypes.contenttype" name="tagged_type" rel="ManyToOneRel"><natural>fixtures</natural><natural>article</natural></field><field type="PositiveIntegerField" name="tagged_id">3</field></object><object pk="1" model="fixtures.person"><field type="CharField" name="name">Django Reinhardt</field></object><object pk="2" model="fixtures.person"><field type="CharField" name="name">Stephane Grappelli</field></object><object pk="3" model="fixtures.person"><field type="CharField" name="name">Prince</field></object></django-objects>""", format='xml', natural_foreign_keys=True) class NonExistentFixtureTests(TestCase): """ Custom class to limit fixture dirs. """ available_apps = ['django.contrib.auth', 'django.contrib.contenttypes'] def test_loaddata_not_existent_fixture_file(self): stdout_output = six.StringIO() with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") # With verbosity=2, we get both stdout output and a warning management.call_command( 'loaddata', 'this_fixture_doesnt_exist', verbosity=2, stdout=stdout_output, ) self.assertIn("No fixture 'this_fixture_doesnt_exist' in", force_text(stdout_output.getvalue())) self.assertEqual(len(w), 1) self.assertEqual(force_text(w[0].message), "No fixture named 'this_fixture_doesnt_exist' found.") class FixtureTransactionTests(DumpDataAssertMixin, TransactionTestCase): available_apps = [ 'fixtures', 'django.contrib.contenttypes', 'django.contrib.auth', 'django.contrib.sites', ] @skipUnlessDBFeature('supports_forward_references') def test_format_discovery(self): # Load fixture 1 again, using format discovery management.call_command('loaddata', 'fixture1', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Time to reform copyright>', '<Article: Poker has no place on ESPN>', ]) # Try to load fixture 2 using format discovery; this will fail # because there are two fixture2's in the fixtures directory with self.assertRaises(management.CommandError) as cm: management.call_command('loaddata', 'fixture2', verbosity=0) self.assertIn("Multiple fixtures named 'fixture2'", cm.exception.args[0]) # object list is unaffected self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Time to reform copyright>', '<Article: Poker has no place on ESPN>', ]) # Dump the current contents of the database as a JSON fixture self._dumpdata_assert(['fixtures'], '[{"pk": 1, "model": "fixtures.category", "fields": {"description": "Latest news stories", "title": "News Stories"}}, {"pk": 2, "model": "fixtures.article", "fields": {"headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T12:00:00"}}, {"pk": 3, "model": "fixtures.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16T13:00:00"}}]') # Load fixture 4 (compressed), using format discovery management.call_command('loaddata', 'fixture4', verbosity=0) self.assertQuerysetEqual(Article.objects.all(), [ '<Article: Django pets kitten>', '<Article: Time to reform copyright>', '<Article: Poker has no place on ESPN>', ])
bsd-3-clause
chop-dbhi/varify-data-warehouse
vdw/genes/models.py
1
4984
from django.db import models from django.contrib.auth.models import User from objectset.models import ObjectSet, SetObject from vdw.literature.models import PubMed from vdw.genome.models import Chromosome from vdw.phenotypes.models import Phenotype, PhenotypeThrough from .managers import GeneManager class GeneFamily(models.Model): "Gene family tags and descriptions." tag = models.CharField(max_length=30, null=True) description = models.CharField(max_length=200, null=True) class Meta(object): db_table = 'gene_family' class Synonym(models.Model): """Model which contains known alternate gene names and symbols for the canonical genes. This can be used as an index for search-related queries. """ # Call it a label since this may be a symbol, a name or something else label = models.CharField(max_length=255, db_index=True) class Meta(object): db_table = 'synonym' class Gene(models.Model): """Unified gene model. This includes data from multiple sources with the appropriate `id` defined to which references the source. If multiple sources contain have overlap, the respective `id`s will be filled in. The canonical source is HGNC, which approves gene names and symbols, the `approved` flag should be set if this is the approved gene name and symbol by HGNC. """ chr = models.ForeignKey(Chromosome) symbol = models.CharField(max_length=255, db_index=True) name = models.TextField('full name', blank=True) hgnc_id = models.IntegerField('HGNC ID', null=True, blank=True) # Via the HGNC documentation: "Families/groups may be either structural or # functional, therefore a gene may belong to more than one family/group" families = models.ManyToManyField(GeneFamily, blank=True) # Literature articles = models.ManyToManyField(PubMed, db_table='gene_pubmed') # Synonyms synonyms = models.ManyToManyField(Synonym, db_table='gene_synonym') # Phenotypes phenotypes = models.ManyToManyField(Phenotype, through='GenePhenotype') objects = GeneManager() class Meta(object): db_table = 'gene' def __unicode__(self): return self.symbol def approved(self): return self.hgnc_id is not None def hgnc_url(self): if self.hgnc_id: return 'http://www.genenames.org/data/hgnc_data.php?hgnc_id=' + \ str(self.hgnc_id) class GenePhenotype(PhenotypeThrough): gene = models.ForeignKey(Gene) class Meta(object): db_table = 'gene_phenotype' class Exon(models.Model): "Gene-specific exon region" gene = models.ForeignKey(Gene) index = models.IntegerField('exon index') start = models.IntegerField('exon start position') end = models.IntegerField('exon end position') class Meta(object): db_table = 'exon' class Transcript(models.Model): "Gene transcripts" refseq_id = models.CharField(max_length=100, unique=True) strand = models.CharField(max_length=1, null=True, blank=True, help_text='+ or - for strand') start = models.IntegerField('transcript start position', null=True, blank=True) end = models.IntegerField('transcript end position', null=True, blank=True) coding_start = models.IntegerField('coding region start position', null=True, blank=True) coding_end = models.IntegerField('coding region end position', null=True, blank=True) coding_start_status = models.CharField('coding region start status', max_length=20, null=True, blank=True) coding_end_status = models.CharField('coding region end status', max_length=20, null=True, blank=True) exon_count = models.IntegerField('number of exons', null=True, blank=True) gene = models.ForeignKey(Gene, null=True, blank=True) exons = models.ManyToManyField(Exon, db_table='transcript_exon') class Meta(object): db_table = 'transcript' def ncbi_url(self): return 'http://www.ncbi.nlm.nih.gov/nuccore/' + self.refseq_id class GeneSet(ObjectSet): user = models.ForeignKey(User, null=True, blank=True) name = models.CharField(max_length=100, null=True, blank=True) genes = models.ManyToManyField(Gene, through='GeneSetObject') published = models.BooleanField(default=True) set_object_rel = 'genes' label_field = 'name' def __unicode__(self): return unicode(self.name) class Meta(object): db_table = 'geneset' ordering = ('user', 'name',) class GeneSetObject(SetObject): object_set = models.ForeignKey(GeneSet, db_column='set_id') set_object = models.ForeignKey(Gene, db_column='object_id') class Meta(object): db_table = 'geneset_setobject'
bsd-2-clause
mattjmorrison/ReportLab
src/reportlab/lib/colors.py
10
35800
#Copyright ReportLab Europe Ltd. 2000-2010 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/colors.py __version__=''' $Id: colors.py 3780 2010-09-17 13:40:59Z rgbecker $ ''' __doc__='''Defines standard colour-handling classes and colour names. We define standard classes to hold colours in two models: RGB and CMYK. These can be constructed from several popular formats. We also include - pre-built colour objects for the HTML standard colours - pre-built colours used in ReportLab's branding - various conversion and construction functions ''' import math from reportlab.lib.utils import fp_str class Color: """This class is used to represent color. Components red, green, blue are in the range 0 (dark) to 1 (full intensity).""" def __init__(self, red=0, green=0, blue=0, alpha=1): "Initialize with red, green, blue in range [0-1]." self.red = red self.green = green self.blue = blue self.alpha = alpha def __repr__(self): return "Color(%s)" % fp_str(*(self.red, self.green, self.blue,self.alpha)).replace(' ',',') def __hash__(self): return hash((self.red, self.green, self.blue, self.alpha)) def __cmp__(self,other): '''simple comparison by component; cmyk != color ever >>> cmp(Color(0,0,0),None) -1 >>> cmp(Color(0,0,0),black) 0 >>> cmp(Color(0,0,0),CMYKColor(0,0,0,1)),Color(0,0,0).rgba()==CMYKColor(0,0,0,1).rgba() (-1, True) ''' if isinstance(other,CMYKColor) or not isinstance(other,Color): return -1 try: return cmp((self.red, self.green, self.blue, self.alpha), (other.red, other.green, other.blue, other.alpha)) except: return -1 return 0 def rgb(self): "Returns a three-tuple of components" return (self.red, self.green, self.blue) def rgba(self): "Returns a four-tuple of components" return (self.red, self.green, self.blue, self.alpha) def bitmap_rgb(self): return tuple(map(lambda x: int(x*255)&255, self.rgb())) def bitmap_rgba(self): return tuple(map(lambda x: int(x*255)&255, self.rgba())) def hexval(self): return '0x%02x%02x%02x' % self.bitmap_rgb() def hexvala(self): return '0x%02x%02x%02x%02x' % self.bitmap_rgba() _cKwds='red green blue alpha'.split() def cKwds(self): for k in self._cKwds: yield k,getattr(self,k) cKwds=property(cKwds) def clone(self,**kwds): '''copy then change values in kwds''' D = dict([kv for kv in self.cKwds]) D.update(kwds) return self.__class__(**D) def _lookupName(self,D={}): if not D: for n,v in getAllNamedColors().iteritems(): if not isinstance(v,CMYKColor): t = v.red,v.green,v.blue if t in D: n = n+'/'+D[t] D[t] = n t = self.red,self.green,self.blue return t in D and D[t] or None class CMYKColor(Color): """This represents colors using the CMYK (cyan, magenta, yellow, black) model commonly used in professional printing. This is implemented as a derived class so that renderers which only know about RGB "see it" as an RGB color through its 'red','green' and 'blue' attributes, according to an approximate function. The RGB approximation is worked out when the object in constructed, so the color attributes should not be changed afterwards. Extra attributes may be attached to the class to support specific ink models, and renderers may look for these.""" _scale = 1.0 def __init__(self, cyan=0, magenta=0, yellow=0, black=0, spotName=None, density=1, knockout=None, alpha=1): """ Initialize with four colors in range [0-1]. the optional spotName, density & knockout may be of use to specific renderers. spotName is intended for use as an identifier to the renderer not client programs. density is used to modify the overall amount of ink. knockout is a renderer dependent option that determines whether the applied colour knocksout (removes) existing colour; None means use the global default. """ self.cyan = cyan self.magenta = magenta self.yellow = yellow self.black = black self.spotName = spotName self.density = max(min(density,1),0) # force into right range self.knockout = knockout self.alpha = alpha # now work out the RGB approximation. override self.red, self.green, self.blue = cmyk2rgb( (cyan, magenta, yellow, black) ) if density<1: #density adjustment of rgb approximants, effectively mix with white r, g, b = self.red, self.green, self.blue r = density*(r-1)+1 g = density*(g-1)+1 b = density*(b-1)+1 self.red, self.green, self.blue = (r,g,b) def __repr__(self): return "%s(%s%s%s%s%s)" % (self.__class__.__name__, fp_str(self.cyan, self.magenta, self.yellow, self.black).replace(' ',','), (self.spotName and (',spotName='+repr(self.spotName)) or ''), (self.density!=1 and (',density='+fp_str(self.density)) or ''), (self.knockout is not None and (',knockout=%d' % self.knockout) or ''), (self.alpha is not None and (',alpha=%s' % self.alpha) or ''), ) def fader(self, n, reverse=False): '''return n colors based on density fade *NB* note this dosen't reach density zero''' scale = self._scale dd = scale/float(n) L = [self.clone(density=scale - i*dd) for i in xrange(n)] if reverse: L.reverse() return L def __hash__(self): return hash( (self.cyan, self.magenta, self.yellow, self.black, self.density, self.spotName, self.alpha) ) def __cmp__(self,other): """obvious way to compare colours Comparing across the two color models is of limited use. >>> cmp(CMYKColor(0,0,0,1),None) -1 >>> cmp(CMYKColor(0,0,0,1),_CMYK_black) 0 >>> cmp(PCMYKColor(0,0,0,100),_CMYK_black) 0 >>> cmp(CMYKColor(0,0,0,1),Color(0,0,1)),Color(0,0,0).rgba()==CMYKColor(0,0,0,1).rgba() (-1, True) """ if not isinstance(other, CMYKColor): return -1 try: return cmp( (self.cyan, self.magenta, self.yellow, self.black, self.density, self.alpha, self.spotName), (other.cyan, other.magenta, other.yellow, other.black, other.density, other.alpha, other.spotName)) except: # or just return 'not equal' if not a color return -1 return 0 def cmyk(self): "Returns a tuple of four color components - syntactic sugar" return (self.cyan, self.magenta, self.yellow, self.black) def cmyka(self): "Returns a tuple of five color components - syntactic sugar" return (self.cyan, self.magenta, self.yellow, self.black, self.alpha) def _density_str(self): return fp_str(self.density) _cKwds='cyan magenta yellow black density alpha spotName knockout'.split() def _lookupName(self,D={}): if not D: for n,v in getAllNamedColors().iteritems(): if isinstance(v,CMYKColor): t = v.cyan,v.magenta,v.yellow,v.black if t in D: n = n+'/'+D[t] D[t] = n t = self.cyan,self.magenta,self.yellow,self.black return t in D and D[t] or None class PCMYKColor(CMYKColor): '''100 based CMYKColor with density and a spotName; just like Rimas uses''' _scale = 100. def __init__(self,cyan,magenta,yellow,black,density=100,spotName=None,knockout=None,alpha=100): CMYKColor.__init__(self,cyan/100.,magenta/100.,yellow/100.,black/100.,spotName,density/100.,knockout=knockout,alpha=alpha/100.) def __repr__(self): return "%s(%s%s%s%s%s)" % (self.__class__.__name__, fp_str(self.cyan*100, self.magenta*100, self.yellow*100, self.black*100).replace(' ',','), (self.spotName and (',spotName='+repr(self.spotName)) or ''), (self.density!=1 and (',density='+fp_str(self.density*100)) or ''), (self.knockout is not None and (',knockout=%d' % self.knockout) or ''), (self.alpha is not None and (',alpha=%s' % (fp_str(self.alpha*100))) or ''), ) def cKwds(self): K=self._cKwds S=K[:6] for k in self._cKwds: v=getattr(self,k) if k in S: v*=100 yield k,v cKwds=property(cKwds) class CMYKColorSep(CMYKColor): '''special case color for making separating pdfs''' _scale = 1. def __init__(self, cyan=0, magenta=0, yellow=0, black=0, spotName=None, density=1,alpha=1): CMYKColor.__init__(self,cyan,magenta,yellow,black,spotName,density,knockout=None,alpha=alpha) _cKwds='cyan magenta yellow black density alpha spotName'.split() class PCMYKColorSep(PCMYKColor,CMYKColorSep): '''special case color for making separating pdfs''' _scale = 100. def __init__(self, cyan=0, magenta=0, yellow=0, black=0, spotName=None, density=100, alpha=100): PCMYKColor.__init__(self,cyan,magenta,yellow,black,density,spotName,knockout=None,alpha=alpha) _cKwds='cyan magenta yellow black density alpha spotName'.split() def cmyk2rgb(cmyk,density=1): "Convert from a CMYK color tuple to an RGB color tuple" c,m,y,k = cmyk # From the Adobe Postscript Ref. Manual 2nd ed. r = 1.0 - min(1.0, c + k) g = 1.0 - min(1.0, m + k) b = 1.0 - min(1.0, y + k) return (r,g,b) def rgb2cmyk(r,g,b): '''one way to get cmyk from rgb''' c = 1 - r m = 1 - g y = 1 - b k = min(c,m,y) c = min(1,max(0,c-k)) m = min(1,max(0,m-k)) y = min(1,max(0,y-k)) k = min(1,max(0,k)) return (c,m,y,k) def color2bw(colorRGB): "Transform an RGB color to a black and white equivalent." col = colorRGB r, g, b, a = col.red, col.green, col.blue, col.alpha n = (r + g + b) / 3.0 bwColorRGB = Color(n, n, n, a) return bwColorRGB def HexColor(val, htmlOnly=False, alpha=False): """This function converts a hex string, or an actual integer number, into the corresponding color. E.g., in "#AABBCC" or 0xAABBCC, AA is the red, BB is the green, and CC is the blue (00-FF). An alpha value can also be given in the form #AABBCCDD or 0xAABBCCDD where DD is the alpha value. For completeness I assume that #aabbcc or 0xaabbcc are hex numbers otherwise a pure integer is converted as decimal rgb. If htmlOnly is true, only the #aabbcc form is allowed. >>> HexColor('#ffffff') Color(1,1,1,1) >>> HexColor('#FFFFFF') Color(1,1,1,1) >>> HexColor('0xffffff') Color(1,1,1,1) >>> HexColor('16777215') Color(1,1,1,1) An '0x' or '#' prefix is required for hex (as opposed to decimal): >>> HexColor('ffffff') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'ffffff' >>> HexColor('#FFFFFF', htmlOnly=True) Color(1,1,1,1) >>> HexColor('0xffffff', htmlOnly=True) Traceback (most recent call last): ValueError: not a hex string >>> HexColor('16777215', htmlOnly=True) Traceback (most recent call last): ValueError: not a hex string """ #" for emacs if isinstance(val,basestring): b = 10 if val[:1] == '#': val = val[1:] b = 16 if len(val) == 8: alpha = True else: if htmlOnly: raise ValueError('not a hex string') if val[:2].lower() == '0x': b = 16 val = val[2:] if len(val) == 8: alpha = True val = int(val,b) if alpha: return Color((val>>24)&0xFF/255.0,((val>>16)&0xFF)/255.0,((val>>8)&0xFF)/255.0,(val&0xFF)/255.0) return Color(((val>>16)&0xFF)/255.0,((val>>8)&0xFF)/255.0,(val&0xFF)/255.0) def linearlyInterpolatedColor(c0, c1, x0, x1, x): """ Linearly interpolates colors. Can handle RGB, CMYK and PCMYK colors - give ValueError if colours aren't the same. Doesn't currently handle 'Spot Color Interpolation'. """ if c0.__class__ != c1.__class__: raise ValueError("Color classes must be the same for interpolation!\nGot %r and %r'"%(c0,c1)) if x1<x0: x0,x1,c0,c1 = x1,x0,c1,c0 # normalized so x1>x0 if x<x0-1e-8 or x>x1+1e-8: # fudge factor for numerical problems raise ValueError, "Can't interpolate: x=%f is not between %f and %f!" % (x,x0,x1) if x<=x0: return c0 elif x>=x1: return c1 cname = c0.__class__.__name__ dx = float(x1-x0) x = x-x0 if cname == 'Color': # RGB r = c0.red+x*(c1.red - c0.red)/dx g = c0.green+x*(c1.green- c0.green)/dx b = c0.blue+x*(c1.blue - c0.blue)/dx a = c0.alpha+x*(c1.alpha - c0.alpha)/dx return Color(r,g,b,alpha=a) elif cname == 'CMYKColor': if cmykDistance(c0,c1)<1e-8: #colors same do density and preserve spotName if any assert c0.spotName == c1.spotName, "Identical cmyk, but different spotName" c = c0.cyan m = c0.magenta y = c0.yellow k = c0.black d = c0.density+x*(c1.density - c0.density)/dx a = c0.alpha+x*(c1.alpha - c0.alpha)/dx return CMYKColor(c,m,y,k, density=d, spotName=c0.spotName, alpha=a) elif cmykDistance(c0,_CMYK_white)<1e-8: #special c0 is white c = c1.cyan m = c1.magenta y = c1.yellow k = c1.black d = x*c1.density/dx a = x*c1.alpha/dx return CMYKColor(c,m,y,k, density=d, spotName=c1.spotName, alpha=a) elif cmykDistance(c1,_CMYK_white)<1e-8: #special c1 is white c = c0.cyan m = c0.magenta y = c0.yellow k = c0.black d = x*c0.density/dx d = c0.density*(1-x/dx) a = c0.alpha*(1-x/dx) return PCMYKColor(c,m,y,k, density=d, spotName=c0.spotName, alpha=a) else: c = c0.cyan+x*(c1.cyan - c0.cyan)/dx m = c0.magenta+x*(c1.magenta - c0.magenta)/dx y = c0.yellow+x*(c1.yellow - c0.yellow)/dx k = c0.black+x*(c1.black - c0.black)/dx d = c0.density+x*(c1.density - c0.density)/dx a = c0.alpha+x*(c1.alpha - c0.alpha)/dx return CMYKColor(c,m,y,k, density=d, alpha=a) elif cname == 'PCMYKColor': if cmykDistance(c0,c1)<1e-8: #colors same do density and preserve spotName if any assert c0.spotName == c1.spotName, "Identical cmyk, but different spotName" c = c0.cyan m = c0.magenta y = c0.yellow k = c0.black d = c0.density+x*(c1.density - c0.density)/dx a = c0.alpha+x*(c1.alpha - c0.alpha)/dx return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, spotName=c0.spotName, alpha=100*a) elif cmykDistance(c0,_CMYK_white)<1e-8: #special c0 is white c = c1.cyan m = c1.magenta y = c1.yellow k = c1.black d = x*c1.density/dx a = x*c1.alpha/dx return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, spotName=c1.spotName, alpha=a*100) elif cmykDistance(c1,_CMYK_white)<1e-8: #special c1 is white c = c0.cyan m = c0.magenta y = c0.yellow k = c0.black d = x*c0.density/dx d = c0.density*(1-x/dx) a = c0.alpha*(1-x/dx) return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, spotName=c0.spotName, alpha=a*100) else: c = c0.cyan+x*(c1.cyan - c0.cyan)/dx m = c0.magenta+x*(c1.magenta - c0.magenta)/dx y = c0.yellow+x*(c1.yellow - c0.yellow)/dx k = c0.black+x*(c1.black - c0.black)/dx d = c0.density+x*(c1.density - c0.density)/dx a = c0.alpha+x*(c1.alpha - c0.alpha)/dx return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, alpha=a*100) else: raise ValueError, "Can't interpolate: Unknown color class %s!" % cname def obj_R_G_B(c): '''attempt to convert an object to (red,green,blue)''' if isinstance(c,Color): return c.red,c.green,c.blue elif isinstance(c,(tuple,list)): if len(c)==3: return tuple(c) elif len(c)==4: return toColor(c).rgb() else: raise ValueError('obj_R_G_B(%r) bad argument' % (c)) # special case -- indicates no drawing should be done # this is a hangover from PIDDLE - suggest we ditch it since it is not used anywhere transparent = Color(0,0,0,alpha=0) _CMYK_white=CMYKColor(0,0,0,0) _PCMYK_white=PCMYKColor(0,0,0,0) _CMYK_black=CMYKColor(0,0,0,1) _PCMYK_black=PCMYKColor(0,0,0,100) # Special colors ReportLabBlueOLD = HexColor(0x4e5688) ReportLabBlue = HexColor(0x00337f) ReportLabBluePCMYK = PCMYKColor(100,65,0,30,spotName='Pantone 288U') ReportLabLightBlue = HexColor(0xb7b9d3) ReportLabFidBlue=HexColor(0x3366cc) ReportLabFidRed=HexColor(0xcc0033) ReportLabGreen = HexColor(0x336600) ReportLabLightGreen = HexColor(0x339933) # color constants -- mostly from HTML standard aliceblue = HexColor(0xF0F8FF) antiquewhite = HexColor(0xFAEBD7) aqua = HexColor(0x00FFFF) aquamarine = HexColor(0x7FFFD4) azure = HexColor(0xF0FFFF) beige = HexColor(0xF5F5DC) bisque = HexColor(0xFFE4C4) black = HexColor(0x000000) blanchedalmond = HexColor(0xFFEBCD) blue = HexColor(0x0000FF) blueviolet = HexColor(0x8A2BE2) brown = HexColor(0xA52A2A) burlywood = HexColor(0xDEB887) cadetblue = HexColor(0x5F9EA0) chartreuse = HexColor(0x7FFF00) chocolate = HexColor(0xD2691E) coral = HexColor(0xFF7F50) cornflowerblue = cornflower = HexColor(0x6495ED) cornsilk = HexColor(0xFFF8DC) crimson = HexColor(0xDC143C) cyan = HexColor(0x00FFFF) darkblue = HexColor(0x00008B) darkcyan = HexColor(0x008B8B) darkgoldenrod = HexColor(0xB8860B) darkgray = HexColor(0xA9A9A9) darkgrey = darkgray darkgreen = HexColor(0x006400) darkkhaki = HexColor(0xBDB76B) darkmagenta = HexColor(0x8B008B) darkolivegreen = HexColor(0x556B2F) darkorange = HexColor(0xFF8C00) darkorchid = HexColor(0x9932CC) darkred = HexColor(0x8B0000) darksalmon = HexColor(0xE9967A) darkseagreen = HexColor(0x8FBC8B) darkslateblue = HexColor(0x483D8B) darkslategray = HexColor(0x2F4F4F) darkslategrey = darkslategray darkturquoise = HexColor(0x00CED1) darkviolet = HexColor(0x9400D3) deeppink = HexColor(0xFF1493) deepskyblue = HexColor(0x00BFFF) dimgray = HexColor(0x696969) dimgrey = dimgray dodgerblue = HexColor(0x1E90FF) firebrick = HexColor(0xB22222) floralwhite = HexColor(0xFFFAF0) forestgreen = HexColor(0x228B22) fuchsia = HexColor(0xFF00FF) gainsboro = HexColor(0xDCDCDC) ghostwhite = HexColor(0xF8F8FF) gold = HexColor(0xFFD700) goldenrod = HexColor(0xDAA520) gray = HexColor(0x808080) grey = gray green = HexColor(0x008000) greenyellow = HexColor(0xADFF2F) honeydew = HexColor(0xF0FFF0) hotpink = HexColor(0xFF69B4) indianred = HexColor(0xCD5C5C) indigo = HexColor(0x4B0082) ivory = HexColor(0xFFFFF0) khaki = HexColor(0xF0E68C) lavender = HexColor(0xE6E6FA) lavenderblush = HexColor(0xFFF0F5) lawngreen = HexColor(0x7CFC00) lemonchiffon = HexColor(0xFFFACD) lightblue = HexColor(0xADD8E6) lightcoral = HexColor(0xF08080) lightcyan = HexColor(0xE0FFFF) lightgoldenrodyellow = HexColor(0xFAFAD2) lightgreen = HexColor(0x90EE90) lightgrey = HexColor(0xD3D3D3) lightpink = HexColor(0xFFB6C1) lightsalmon = HexColor(0xFFA07A) lightseagreen = HexColor(0x20B2AA) lightskyblue = HexColor(0x87CEFA) lightslategray = HexColor(0x778899) lightslategrey = lightslategray lightsteelblue = HexColor(0xB0C4DE) lightyellow = HexColor(0xFFFFE0) lime = HexColor(0x00FF00) limegreen = HexColor(0x32CD32) linen = HexColor(0xFAF0E6) magenta = HexColor(0xFF00FF) maroon = HexColor(0x800000) mediumaquamarine = HexColor(0x66CDAA) mediumblue = HexColor(0x0000CD) mediumorchid = HexColor(0xBA55D3) mediumpurple = HexColor(0x9370DB) mediumseagreen = HexColor(0x3CB371) mediumslateblue = HexColor(0x7B68EE) mediumspringgreen = HexColor(0x00FA9A) mediumturquoise = HexColor(0x48D1CC) mediumvioletred = HexColor(0xC71585) midnightblue = HexColor(0x191970) mintcream = HexColor(0xF5FFFA) mistyrose = HexColor(0xFFE4E1) moccasin = HexColor(0xFFE4B5) navajowhite = HexColor(0xFFDEAD) navy = HexColor(0x000080) oldlace = HexColor(0xFDF5E6) olive = HexColor(0x808000) olivedrab = HexColor(0x6B8E23) orange = HexColor(0xFFA500) orangered = HexColor(0xFF4500) orchid = HexColor(0xDA70D6) palegoldenrod = HexColor(0xEEE8AA) palegreen = HexColor(0x98FB98) paleturquoise = HexColor(0xAFEEEE) palevioletred = HexColor(0xDB7093) papayawhip = HexColor(0xFFEFD5) peachpuff = HexColor(0xFFDAB9) peru = HexColor(0xCD853F) pink = HexColor(0xFFC0CB) plum = HexColor(0xDDA0DD) powderblue = HexColor(0xB0E0E6) purple = HexColor(0x800080) red = HexColor(0xFF0000) rosybrown = HexColor(0xBC8F8F) royalblue = HexColor(0x4169E1) saddlebrown = HexColor(0x8B4513) salmon = HexColor(0xFA8072) sandybrown = HexColor(0xF4A460) seagreen = HexColor(0x2E8B57) seashell = HexColor(0xFFF5EE) sienna = HexColor(0xA0522D) silver = HexColor(0xC0C0C0) skyblue = HexColor(0x87CEEB) slateblue = HexColor(0x6A5ACD) slategray = HexColor(0x708090) slategrey = slategray snow = HexColor(0xFFFAFA) springgreen = HexColor(0x00FF7F) steelblue = HexColor(0x4682B4) tan = HexColor(0xD2B48C) teal = HexColor(0x008080) thistle = HexColor(0xD8BFD8) tomato = HexColor(0xFF6347) turquoise = HexColor(0x40E0D0) violet = HexColor(0xEE82EE) wheat = HexColor(0xF5DEB3) white = HexColor(0xFFFFFF) whitesmoke = HexColor(0xF5F5F5) yellow = HexColor(0xFFFF00) yellowgreen = HexColor(0x9ACD32) fidblue=HexColor(0x3366cc) fidred=HexColor(0xcc0033) fidlightblue=HexColor("#d6e0f5") ColorType=type(black) ################################################################ # # Helper functions for dealing with colors. These tell you # which are predefined, so you can print color charts; # and can give the nearest match to an arbitrary color object # ################################################################# def colorDistance(col1, col2): """Returns a number between 0 and root(3) stating how similar two colours are - distance in r,g,b, space. Only used to find names for things.""" return math.sqrt( (col1.red - col2.red)**2 + (col1.green - col2.green)**2 + (col1.blue - col2.blue)**2 ) def cmykDistance(col1, col2): """Returns a number between 0 and root(4) stating how similar two colours are - distance in r,g,b, space. Only used to find names for things.""" return math.sqrt( (col1.cyan - col2.cyan)**2 + (col1.magenta - col2.magenta)**2 + (col1.yellow - col2.yellow)**2 + (col1.black - col2.black)**2 ) _namedColors = None def getAllNamedColors(): #returns a dictionary of all the named ones in the module # uses a singleton for efficiency global _namedColors if _namedColors is not None: return _namedColors import colors _namedColors = {} for (name, value) in colors.__dict__.items(): if isinstance(value, Color): _namedColors[name] = value return _namedColors def describe(aColor,mode=0): '''finds nearest colour match to aColor. mode=0 print a string desription mode=1 return a string description mode=2 return (distance, colorName) ''' namedColors = getAllNamedColors() closest = (10, None, None) #big number, name, color for (name, color) in namedColors.items(): distance = colorDistance(aColor, color) if distance < closest[0]: closest = (distance, name, color) if mode<=1: s = 'best match is %s, distance %0.4f' % (closest[1], closest[0]) if mode==0: print s else: return s elif mode==2: return (closest[1], closest[0]) else: raise ValueError, "Illegal value for mode "+str(mode) def hue2rgb(m1, m2, h): if h<0: h += 1 if h>1: h -= 1 if h*6<1: return m1+(m2-m1)*h*6 if h*2<1: return m2 if h*3<2: return m1+(m2-m1)*(4-6*h) return m1 def hsl2rgb(h, s, l): if l<=0.5: m2 = l*(s+1) else: m2 = l+s-l*s m1 = l*2-m2 return hue2rgb(m1, m2, h+1./3),hue2rgb(m1, m2, h),hue2rgb(m1, m2, h-1./3) class cssParse: def pcVal(self,v): v = v.strip() try: c=eval(v[:-1]) if not isinstance(c,(float,int)): raise ValueError c=min(100,max(0,c))/100. except: raise ValueError('bad percentage argument value %r in css color %r' % (v,self.s)) return c def rgbPcVal(self,v): return int(self.pcVal(v)*255+0.5)/255. def rgbVal(self,v): v = v.strip() try: c=eval(v[:]) if not isinstance(c,int): raise ValueError return int(min(255,max(0,c)))/255. except: raise ValueError('bad argument value %r in css color %r' % (v,self.s)) def hueVal(self,v): v = v.strip() try: c=eval(v[:]) if not isinstance(c,(int,float)): raise ValueError return ((c%360+360)%360)/360. except: raise ValueError('bad hue argument value %r in css color %r' % (v,self.s)) def alphaVal(self,v,c=1,n='alpha'): try: a = eval(v.strip()) if not isinstance(a,(int,float)): raise ValueError return min(c,max(0,a)) except: raise ValueError('bad %s argument value %r in css color %r' % (n,v,self.s)) def __call__(self,s): s = s.strip() hsl = s.startswith('hsl') rgb = s.startswith('rgb') cmyk = s.startswith('cmyk') c = 1 if hsl: n = 3 if rgb: n = 3 if cmyk: n = 4 else: cmyk = s.startswith('pcmyk') if cmyk: n = 5 c = 100 if not (rgb or hsl or cmyk): return None self.s = s n = s[n:] ha = n.startswith('a') n = n[(ha and 1 or 0):].strip() if not n.startswith('(') or not n.endswith(')'): raise ValueError('improperly formatted css style color %r' % s) n = n[1:-1].split(',') #strip parens and split on comma a = len(n) b = cmyk and 4 or 3 if ha and a!=(b+1) or not ha and a!=b: raise ValueError('css color %r has wrong number of components' % s) if ha: n,a = n[:b],self.alphaVal(n[b],c) else: a = c if cmyk: C = self.alphaVal(n[0],c,'cyan') M = self.alphaVal(n[1],c,'magenta') Y = self.alphaVal(n[2],c,'yellow') K = self.alphaVal(n[3],c,'black') return (c>1 and PCMYKColor or CMYKColor)(C,M,Y,K,alpha=a) else: if hsl: R,G,B= hsl2rgb(self.hueVal(n[0]),self.pcVal(n[1]),self.pcVal(n[2])) else: R,G,B = map('%' in n[0] and self.rgbPcVal or self.rgbVal,n) return Color(R,G,B,a) cssParse=cssParse() class toColor: def __init__(self): self.extraColorsNS = {} #used for overriding/adding to existing color names #make case insensitive if that's your wish def setExtraColorsNameSpace(self,NS): self.extraColorsNS = NS def __call__(self,arg,default=None): '''try to map an arbitrary arg to a color instance >>> toColor('rgb(128,0,0)')==toColor('rgb(50%,0%,0%)') True >>> toColor('rgb(50%,0%,0%)')!=Color(0.5,0,0,1) True >>> toColor('hsl(0,100%,50%)')==toColor('rgb(255,0,0)') True >>> toColor('hsl(-120,100%,50%)')==toColor('rgb(0,0,255)') True >>> toColor('hsl(120,100%,50%)')==toColor('rgb(0,255,0)') True >>> toColor('rgba(255,0,0,0.5)')==Color(1,0,0,0.5) True >>> toColor('cmyk(1,0,0,0)')==CMYKColor(1,0,0,0) True >>> toColor('pcmyk(100,0,0,0)')==PCMYKColor(100,0,0,0) True >>> toColor('cmyka(1,0,0,0,0.5)')==CMYKColor(1,0,0,0,alpha=0.5) True >>> toColor('pcmyka(100,0,0,0,0.5)')==PCMYKColor(100,0,0,0,alpha=0.5) True ''' if isinstance(arg,Color): return arg if isinstance(arg,(tuple,list)): assert 3<=len(arg)<=4, 'Can only convert 3 and 4 sequences to color' assert 0<=min(arg) and max(arg)<=1 return len(arg)==3 and Color(arg[0],arg[1],arg[2]) or CMYKColor(arg[0],arg[1],arg[2],arg[3]) elif isinstance(arg,basestring): C = cssParse(arg) if C: return C if arg in self.extraColorsNS: return self.extraColorsNS[arg] C = getAllNamedColors() s = arg.lower() if s in C: return C[s] try: return toColor(eval(arg)) except: pass try: return HexColor(arg) except: if default is None: raise ValueError('Invalid color value %r' % arg) return default toColor = toColor() def toColorOrNone(arg,default=None): '''as above but allows None as a legal value''' if arg is None: return None else: return toColor(arg, default) def setColors(**kw): UNDEF = [] progress = 1 assigned = {} while kw and progress: progress = 0 for k, v in kw.items(): if isinstance(v,(tuple,list)): c = map(lambda x,UNDEF=UNDEF: toColor(x,UNDEF),v) if isinstance(v,tuple): c = tuple(c) ok = UNDEF not in c else: c = toColor(v,UNDEF) ok = c is not UNDEF if ok: assigned[k] = c del kw[k] progress = 1 if kw: raise ValueError("Can't convert\n%s" % str(kw)) getAllNamedColors() for k, c in assigned.items(): globals()[k] = c if isinstance(c,Color): _namedColors[k] = c def Whiter(c,f): '''given a color combine with white as c*f w*(1-f) 0<=f<=1''' c = toColor(c) if isinstance(c,CMYKColorSep): c = c.clone() if isinstance(c,PCMYKColorSep): c.__class__ = PCMYKColor else: c.__class__ = CMYKColor if isinstance(c,PCMYKColor): w = _PCMYK_white elif isinstance(c,CMYKColor): w = _CMYK_white else: w = white return linearlyInterpolatedColor(w, c, 0, 1, f) def Blacker(c,f): '''given a color combine with black as c*f+b*(1-f) 0<=f<=1''' c = toColor(c) if isinstance(c,CMYKColorSep): c = c.clone() if isinstance(c,PCMYKColorSep): c.__class__ = PCMYKColor else: c.__class__ = CMYKColor if isinstance(c,PCMYKColor): b = _PCMYK_black elif isinstance(c,CMYKColor): b = _CMYK_black else: b = black return linearlyInterpolatedColor(b, c, 0, 1, f) def fade(aSpotColor, percentages): """Waters down spot colors and returns a list of new ones e.g fade(myColor, [100,80,60,40,20]) returns a list of five colors """ out = [] for percent in percentages: frac = percent * 0.01 #assume they give us numbers from 0 to 100 newCyan = frac * aSpotColor.cyan newMagenta = frac * aSpotColor.magenta newYellow = frac * aSpotColor.yellow newBlack = frac * aSpotColor.black newDensity = frac * aSpotColor.density newSpot = CMYKColor( newCyan, newMagenta, newYellow, newBlack, spotName = aSpotColor.spotName, density = newDensity) out.append(newSpot) return out def _enforceError(kind,c,tc): if isinstance(tc,Color): xtra = tc._lookupName() xtra = xtra and '(%s)'%xtra or '' else: xtra = '' raise ValueError('Non %s color %r%s' % (kind,c,xtra)) def _enforceSEP(c): '''pure separating colors only, this makes black a problem''' tc = toColor(c) if not isinstance(tc,CMYKColorSep): _enforceError('separating',c,tc) return tc def _enforceSEP_BLACK(c): '''separating + blacks only''' tc = toColor(c) if not isinstance(tc,CMYKColorSep): if isinstance(tc,Color) and tc.red==tc.blue==tc.green: #ahahahah it's a grey tc = _CMYK_black.clone(density=1-tc.red) elif not (isinstance(tc,CMYKColor) and tc.cyan==tc.magenta==tc.yellow==0): #ie some shade of grey _enforceError('separating or black',c,tc) return tc def _enforceSEP_CMYK(c): '''separating or cmyk only''' tc = toColor(c) if not isinstance(tc,CMYKColorSep): if isinstance(tc,Color) and tc.red==tc.blue==tc.green: #ahahahah it's a grey tc = _CMYK_black.clone(density=1-tc.red) elif not isinstance(tc,CMYKColor): _enforceError('separating or CMYK',c,tc) return tc def _enforceCMYK(c): '''cmyk outputs only (rgb greys converted)''' tc = toColor(c) if not isinstance(tc,CMYKColor): if isinstance(tc,Color) and tc.red==tc.blue==tc.green: #ahahahah it's a grey tc = _CMYK_black.clone(black=1-tc.red,alpha=tc.alpha) else: _enforceError('CMYK',c,tc) elif isinstance(tc,CMYKColorSep): tc = tc.clone() tc.__class__ = CMYKColor return tc def _enforceRGB(c): tc = toColor(c) if isinstance(tc,CMYKColor): if tc.cyan==tc.magenta==tc.yellow==0: #ahahahah it's grey v = 1-tc.black*tc.density tc = Color(v,v,v,alpha=tc.alpha) else: _enforceError('RGB',c,tc) return tc def _chooseEnforceColorSpace(enforceColorSpace): if enforceColorSpace is not None and not callable(enforceColorSpace): if isinstance(enforceColorSpace,basestring): enforceColorSpace=enforceColorSpace.upper() if enforceColorSpace=='CMYK': enforceColorSpace = _enforceCMYK elif enforceColorSpace=='RGB': enforceColorSpace = _enforceRGB elif enforceColorSpace=='SEP': enforceColorSpace = _enforceSEP elif enforceColorSpace=='SEP_BLACK': enforceColorSpace = _enforceSEP_BLACK elif enforceColorSpace=='SEP_CMYK': enforceColorSpace = _enforceSEP_CMYK else: raise ValueError('Invalid value for Canvas argument enforceColorSpace=%r' % enforceColorSpace) return enforceColorSpace if __name__ == "__main__": import doctest doctest.testmod()
bsd-3-clause
adaur/SickRage
lib/guessit/transfo/guess_movie_title_from_position.py
28
8579
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2013 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import absolute_import, division, print_function, unicode_literals from guessit.plugins.transformers import Transformer from guessit.matcher import found_property from guessit import u from guessit.patterns.list import all_separators from guessit.language import all_lang_prefixes_suffixes class GuessMovieTitleFromPosition(Transformer): def __init__(self): Transformer.__init__(self, -200) def supported_properties(self): return ['title'] def should_process(self, mtree, options=None): options = options or {} return not options.get('skip_title') and not mtree.guess.get('type', '').startswith('episode') @staticmethod def excluded_word(*values): for value in values: if value.clean_value.lower() in all_separators + all_lang_prefixes_suffixes: return True return False def process(self, mtree, options=None): """ try to identify the remaining unknown groups by looking at their position relative to other known elements """ if 'title' in mtree.info: return path_nodes = list(filter(lambda x: x.category == 'path', mtree.nodes())) basename = path_nodes[-2] all_valid = lambda leaf: len(leaf.clean_value) > 0 basename_leftover = list(basename.unidentified_leaves(valid=all_valid)) try: folder = path_nodes[-3] folder_leftover = list(folder.unidentified_leaves()) except IndexError: folder = None folder_leftover = [] self.log.debug('folder: %s' % u(folder_leftover)) self.log.debug('basename: %s' % u(basename_leftover)) # specific cases: # if we find the same group both in the folder name and the filename, # it's a good candidate for title if (folder_leftover and basename_leftover and folder_leftover[0].clean_value == basename_leftover[0].clean_value and not GuessMovieTitleFromPosition.excluded_word(folder_leftover[0])): found_property(folder_leftover[0], 'title', confidence=0.8) return # specific cases: # if the basename contains a number first followed by an unidentified # group, and the folder only contains 1 unidentified one, then we have # a series # ex: Millenium Trilogy (2009)/(1)The Girl With The Dragon Tattoo(2009).mkv if len(folder_leftover) > 0 and len(basename_leftover) > 1: series = folder_leftover[0] film_number = basename_leftover[0] title = basename_leftover[1] basename_leaves = list(basename.leaves()) num = None try: num = int(film_number.clean_value) except ValueError: pass if num: self.log.debug('series: %s' % series.clean_value) self.log.debug('title: %s' % title.clean_value) if (series.clean_value != title.clean_value and series.clean_value != film_number.clean_value and basename_leaves.index(film_number) == 0 and basename_leaves.index(title) == 1 and not GuessMovieTitleFromPosition.excluded_word(title, series)): found_property(title, 'title', confidence=0.6) found_property(series, 'filmSeries', confidence=0.6) found_property(film_number, 'filmNumber', num, confidence=0.6) return if folder: year_group = folder.first_leaf_containing('year') if year_group: groups_before = folder.previous_unidentified_leaves(year_group) if groups_before: try: node = next(groups_before) if not GuessMovieTitleFromPosition.excluded_word(node): found_property(node, 'title', confidence=0.8) return except StopIteration: pass # if we have either format or videoCodec in the folder containing the # file or one of its parents, then we should probably look for the title # in there rather than in the basename try: props = list(mtree.previous_leaves_containing(mtree.children[-2], ['videoCodec', 'format', 'language'])) except IndexError: props = [] if props: group_idx = props[0].node_idx[0] if all(g.node_idx[0] == group_idx for g in props): # if they're all in the same group, take leftover info from there leftover = mtree.node_at((group_idx,)).unidentified_leaves() try: node = next(leftover) if not GuessMovieTitleFromPosition.excluded_word(node): found_property(node, 'title', confidence=0.7) return except StopIteration: pass # look for title in basename if there are some remaining unidentified # groups there if basename_leftover: # if basename is only one word and the containing folder has at least # 3 words in it, we should take the title from the folder name # ex: Movies/Alice in Wonderland DVDRip.XviD-DiAMOND/dmd-aw.avi # ex: Movies/Somewhere.2010.DVDRip.XviD-iLG/i-smwhr.avi <-- TODO: gets caught here? if (basename_leftover[0].clean_value.count(' ') == 0 and folder_leftover and folder_leftover[0].clean_value.count(' ') >= 2 and not GuessMovieTitleFromPosition.excluded_word(folder_leftover[0])): found_property(folder_leftover[0], 'title', confidence=0.7) return # if there are only many unidentified groups, take the first of which is # not inside brackets or parentheses. # ex: Movies/[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi if basename_leftover[0].is_explicit(): for basename_leftover_elt in basename_leftover: if not basename_leftover_elt.is_explicit() and not GuessMovieTitleFromPosition.excluded_word(basename_leftover_elt): found_property(basename_leftover_elt, 'title', confidence=0.8) return # if all else fails, take the first remaining unidentified group in the # basename as title if not GuessMovieTitleFromPosition.excluded_word(basename_leftover[0]): found_property(basename_leftover[0], 'title', confidence=0.6) return # if there are no leftover groups in the basename, look in the folder name if folder_leftover and not GuessMovieTitleFromPosition.excluded_word(folder_leftover[0]): found_property(folder_leftover[0], 'title', confidence=0.5) return # if nothing worked, look if we have a very small group at the beginning # of the basename basename_leftover = basename.unidentified_leaves(valid=lambda leaf: True) try: node = next(basename_leftover) if not GuessMovieTitleFromPosition.excluded_word(node): found_property(node, 'title', confidence=0.4) return except StopIteration: pass
gpl-3.0
numericube/twistranet
twistranet/twistapp/forms/fields.py
1
7275
""" The twistranet Fields """ import os import urlparse from django import forms from django.core.validators import URL_VALIDATOR_USER_AGENT from django.db import models from django.core.validators import EMPTY_VALUES from django.utils.encoding import smart_unicode from django.utils.translation import ugettext as _ from twistranet.twistapp.lib.log import log import widgets from validators import URLValidator, ViewPathValidator class PermissionFormField(forms.ChoiceField): """ This overrides the regular ChoiceField to add additional rendering. """ widget = widgets.PermissionsWidget def __init__( self, choices = (), required=True, widget=None, max_length = None, label=None, initial=None, help_text=None, to_field_name=None, *args, **kwargs ): super(PermissionFormField, self).__init__(choices, required, widget, label, initial, help_text, *args, **kwargs) # We put this here to avoid import errors self.default_error_messages = { 'invalid_choice': _(u'Select a valid choice. That choice is not one of' u' the available choices.'), } class PermissionsFormField(forms.ChoiceField): """ This overrides the regular ChoiceField to add additional rendering. """ def valid_value(self, value): "Check to see if the provided value is a valid choice" for id, name, description in self.choices: if value == smart_unicode(id): return True return False class ModelInputField(forms.Field): """ This is a field used to enter a foreign key value inside a classic Input widget. This is used when there are a lot of values to check against (and ModelChoiceField is not efficient anymore), plus the value is checked against the QuerySet very late in the process. """ def __init__( self, model, filter = None, required=True, widget=None, label=None, initial=None, help_text=None, to_field_name=None, *args, **kwargs ): super(ModelInputField, self).__init__(required, widget, label, initial, help_text, *args, **kwargs) self.model = model self.filter = filter self.to_field_name = to_field_name # We put this here to avoid import errors self.default_error_messages = { 'invalid_choice': _(u'Select a valid choice. That choice is not one of' u' the available choices.'), } def to_python(self, value): """ 'Resolve' the query set at validation time. This way, we're sure to have the freshest version of the QS. """ if value in EMPTY_VALUES: return None try: key = self.to_field_name or 'pk' qs = self.model.objects.get_query_set() if self.filter: qs = qs.filter(self.filter) value = qs.get(**{key: value}) except self.queryset.model.DoesNotExist: raise ValidationError(self.error_messages['invalid_choice']) return value class ResourceFormField(forms.MultiValueField): """ The ResourceFormField is a resource browser. You can pass it a few parameters: - model which is the subclass you want to read your resources from (default: twistranet.Resource). Useful if you want to display only images for example. - filter which will be passed to model.objects.filter() call before rendering the widget. These model / filter params are the only solution to handle choices WITH the security model. - allow_upload (upload is ok) - allow_select (can select an existing resource from the given filter) """ widget = widgets.ResourceWidget field = ModelInputField model = None filter = None def __init__(self, *args, **kwargs): # Initial values from twistranet.twistapp.models import Resource self.model = kwargs.pop("model", Resource) self.filter = kwargs.pop("filter", None) self.allow_upload = kwargs.pop("allow_upload", True) self.allow_select = kwargs.pop("allow_select", True) self.display_renderer = kwargs.pop("display_renderer", True) self.media_type = kwargs.pop("media_type", 'file') self.widget = kwargs.pop("widget", self.widget( model = self.model, filter = self.filter, allow_upload = self.allow_upload, allow_select = self.allow_select, display_renderer = self.display_renderer, media_type = self.media_type )) self.required = kwargs.pop("required", True) # The fields we'll use: # - A ModelInputField used to handle the ForeignKey. # - A FileField used to handle data upload. fields = [] field0 = self.field(model = self.model, filter = self.filter, required = self.required) # no more used # field1 = forms.FileField(required = False) dummy = forms.CharField(required = False) if self.allow_select or self.allow_upload: fields.append(field0) else: fields.append(dummy) # # Compatibility with form_for_instance # if kwargs.get('initial'): # initial = kwargs['initial'] # else: # initial = None # self.widget = self.widget(initial=initial) super(ResourceFormField, self).__init__(fields, label = kwargs.pop('label'), required = False) #self.required) def prepare_value(self, value): """ Pass the query_set to the underlying widget, so that it's computed as late as possible. """ qs = self.model.objects.get_query_set() if self.filter: qs = qs.filter(self.filter) self.widget.query_set = qs return super(ResourceFormField, self).prepare_value(value) def compress(self, data_list): return data_list # URLField which also accept relative urls class LargeURLField(forms.CharField): """ A URL field which accepts internal link and intranet links (without a standard domain) """ def __init__(self, max_length=None, min_length=None, verify_exists=False, validator_user_agent=URL_VALIDATOR_USER_AGENT, *args, **kwargs): super(LargeURLField, self).__init__(max_length, min_length, *args, **kwargs) self.validators.append(URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent)) def to_python(self, value): if value: value = urlparse.urlunparse(urlparse.urlparse(value)) return super(LargeURLField, self).to_python(value) class ViewPathField(forms.CharField): """ View Path field (could be improved) """ def __init__(self, max_length=None, min_length=None, *args, **kwargs): super(ViewPathField, self).__init__(max_length, min_length, *args, **kwargs) self.validators.append(ViewPathValidator()) self.default_error_messages = { 'invalid': _(u'Enter a valid Path.'),}
agpl-3.0
SravanthiSinha/edx-platform
pavelib/utils/test/suites/bokchoy_suite.py
57
6408
""" Class used for defining and running Bok Choy acceptance test suite """ from time import sleep from paver.easy import sh from pavelib.utils.test.suites.suite import TestSuite from pavelib.utils.envs import Env from pavelib.utils.test import bokchoy_utils from pavelib.utils.test import utils as test_utils try: from pygments.console import colorize except ImportError: colorize = lambda color, text: text # pylint: disable=invalid-name __test__ = False # do not collect class BokChoyTestSuite(TestSuite): """ TestSuite for running Bok Choy tests Properties (below is a subset): test_dir - parent directory for tests log_dir - directory for test output report_dir - directory for reports (e.g., coverage) related to test execution xunit_report - directory for xunit-style output (xml) fasttest - when set, skip various set-up tasks (e.g., collectstatic) serversonly - prepare and run the necessary servers, only stopping when interrupted with Ctrl-C testsonly - assume servers are running (as per above) and run tests with no setup or cleaning of environment test_spec - when set, specifies test files, classes, cases, etc. See platform doc. default_store - modulestore to use when running tests (split or draft) """ def __init__(self, *args, **kwargs): super(BokChoyTestSuite, self).__init__(*args, **kwargs) self.test_dir = Env.BOK_CHOY_DIR / kwargs.get('test_dir', 'tests') self.log_dir = Env.BOK_CHOY_LOG_DIR self.report_dir = Env.BOK_CHOY_REPORT_DIR self.xunit_report = self.report_dir / "xunit.xml" self.cache = Env.BOK_CHOY_CACHE self.fasttest = kwargs.get('fasttest', False) self.serversonly = kwargs.get('serversonly', False) self.testsonly = kwargs.get('testsonly', False) self.test_spec = kwargs.get('test_spec', None) self.default_store = kwargs.get('default_store', None) self.verbosity = kwargs.get('verbosity', 2) self.extra_args = kwargs.get('extra_args', '') self.har_dir = self.log_dir / 'hars' self.imports_dir = kwargs.get('imports_dir', None) def __enter__(self): super(BokChoyTestSuite, self).__enter__() # Ensure that we have a directory to put logs and reports self.log_dir.makedirs_p() self.har_dir.makedirs_p() self.report_dir.makedirs_p() test_utils.clean_reports_dir() if not (self.fasttest or self.skip_clean): test_utils.clean_test_files() msg = colorize('green', "Checking for mongo, memchache, and mysql...") print msg bokchoy_utils.check_services() if not self.testsonly: self.prepare_bokchoy_run() msg = colorize('green', "Confirming servers have started...") print msg bokchoy_utils.wait_for_test_servers() if self.serversonly: self.run_servers_continuously() def __exit__(self, exc_type, exc_value, traceback): super(BokChoyTestSuite, self).__exit__(exc_type, exc_value, traceback) msg = colorize('green', "Cleaning up databases...") print msg # Clean up data we created in the databases sh("./manage.py lms --settings bok_choy flush --traceback --noinput") bokchoy_utils.clear_mongo() def prepare_bokchoy_run(self): """ Sets up and starts servers for a Bok Choy run. If --fasttest is not specified then static assets are collected """ sh("{}/scripts/reset-test-db.sh".format(Env.REPO_ROOT)) if not self.fasttest: self.generate_optimized_static_assets() # Clear any test data already in Mongo or MySQLand invalidate # the cache bokchoy_utils.clear_mongo() self.cache.flush_all() sh( "DEFAULT_STORE={default_store}" " ./manage.py lms --settings bok_choy loaddata --traceback" " common/test/db_fixtures/*.json".format( default_store=self.default_store, ) ) if self.imports_dir: sh( "DEFAULT_STORE={default_store}" " ./manage.py cms --settings=bok_choy import {import_dir}".format( default_store=self.default_store, import_dir=self.imports_dir ) ) # Ensure the test servers are available msg = colorize('green', "Confirming servers are running...") print msg bokchoy_utils.start_servers(self.default_store) def run_servers_continuously(self): """ Infinite loop. Servers will continue to run in the current session unless interrupted. """ print 'Bok-choy servers running. Press Ctrl-C to exit...\n' print 'Note: pressing Ctrl-C multiple times can corrupt noseid files and system state. Just press it once.\n' while True: try: sleep(10000) except KeyboardInterrupt: print "Stopping bok-choy servers.\n" break @property def cmd(self): """ This method composes the nosetests command to send to the terminal. If nosetests aren't being run, the command returns an empty string. """ # Default to running all tests if no specific test is specified if not self.test_spec: test_spec = self.test_dir else: test_spec = self.test_dir / self.test_spec # Skip any additional commands (such as nosetests) if running in # servers only mode if self.serversonly: return "" # Construct the nosetests command, specifying where to save # screenshots and XUnit XML reports cmd = [ "DEFAULT_STORE={}".format(self.default_store), "SCREENSHOT_DIR='{}'".format(self.log_dir), "BOK_CHOY_HAR_DIR='{}'".format(self.har_dir), "SELENIUM_DRIVER_LOG_DIR='{}'".format(self.log_dir), "nosetests", test_spec, "--with-xunit", "--xunit-file={}".format(self.xunit_report), "--verbosity={}".format(self.verbosity), ] if self.pdb: cmd.append("--pdb") cmd.append(self.extra_args) cmd = (" ").join(cmd) return cmd
agpl-3.0