text
stringlengths 0
1.05M
| meta
dict |
---|---|
"""
Filters the original item model.
"""
from PyQt4 import QtCore, QtGui
from datafinder.gui.user.models.repository.filter.base_filter import BaseRepositoryFilter
__version__ = "$Revision-Id:$"
class LeafFilter(BaseRepositoryFilter, QtGui.QSortFilterProxyModel):
"""
This Model wraps the L{RepositoryModel<datafinder.gui.user.models.repository.repository.RepositoryModel>}.
It filters files and links.
"""
def __init__(self, repositoryModel):
"""
Constructor.
@param repositoryModel: Repository model.
@type repositoryModel: L{RepositoryModel<datafinder.gui.user.models.repository.repository.RepositoryModel>}
"""
BaseRepositoryFilter.__init__(self, repositoryModel)
QtGui.QSortFilterProxyModel.__init__(self, None)
self._columnCount = 1
self.setSourceModel(self._repositoryModel)
self.connect(self._repositoryModel, QtCore.SIGNAL("updateSignal"), self._emitActiveIndexChangedSignal)
def columnCount(self, _=QtCore.QModelIndex()):
"""
@see: L{columnCount<PyQt4.QtGui.QSortFilterProxyModel.columnCount>}
"""
return self._columnCount
def mapFromSource(self, index):
"""
@see: L{mapFromSource<datafinder.gui.user.models.filter.BaseRepositoryFilter.mapFromSource>}
Re-implemented to resolve name clash.
"""
return QtGui.QSortFilterProxyModel.mapFromSource(self, index)
def mapToSource(self, index):
"""
@see: L{mapToSource<datafinder.gui.user.models.filter.BaseRepositoryFilter.mapToSource>}
Re-implemented to resolve name clash.
"""
return QtGui.QSortFilterProxyModel.mapToSource(self, index)
def filterAcceptsRow(self, row, parent):
"""
@see: L{filterAcceptsRow<PyQt4.QtGui.QSortFilterProxyModel.filterAcceptsRow>}
"""
index = self._repositoryModel.index(row, 0, parent)
item = self._repositoryModel.nodeFromIndex(index)
acceptsRow = False
if not item is None:
acceptsRow = item.isCollection
return acceptsRow
def _emitActiveIndexChangedSignal(self, index):
"""
Signals change of the active index.
"""
index = self.mapFromSource(index)
self.emit(QtCore.SIGNAL("updateSignal"), index)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/models/repository/filter/leaf_filter.py",
"copies": "1",
"size": "4159",
"license": "bsd-3-clause",
"hash": -1782452074379508000,
"line_mean": 34.4824561404,
"line_max": 115,
"alpha_frac": 0.6864630921,
"autogenerated": false,
"ratio": 4.192540322580645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014424776123572527,
"num_lines": 114
} |
"""
Implements aggregated type-specific validation functionalities.
"""
from datetime import datetime
from decimal import Decimal
from datafinder.core.configuration.properties.validators import base_validators
__version__ = "$Revision-Id:$"
class StringValidator(base_validators.AndValidator):
""" Aggregates useful checks for checking string values. """
def __init__(self, minimum=None, maximum=None, pattern=None,
options=None, optionsMandatory=True):
"""
@param minimum: Minimum length of the string.
@type minimum: C{int}
@param maximum: Maximum length of the string.
@type maximum: C{int}
@param pattern: Regular expression pattern.
@type pattern: C{str}
@param options: List of options the value has to be taken from.
@type options: C{list} of C{unicode}
@param optionsMandatory: Indicates whether the the value must
be one of C{options} or not. Default: C{False}
@type optionsMandatory: C{bool}
"""
base_validators.AndValidator.__init__(self, list())
self.validators.append(base_validators.AreTypesMatched([str, unicode]))
self.validators.append(base_validators.IsBinaryStringDecodable())
self.validators.append(base_validators.IsLengthInRange(minimum, maximum))
if not pattern is None:
self.validators.append(base_validators.IsPatternMatched(pattern))
if not options is None:
self.validators.append(base_validators.AreOptionsMatched(options, optionsMandatory))
class NumberValidator(base_validators.AndValidator):
""" Aggregates useful checks for checking numeric values. """
def __init__(self, minimum=None, maximum=None, minDecimalPlaces=None,
maxDecimalPlaces=None, options=None, optionsMandatory=True):
"""
@param minimum: Minimum value.
@type minimum: C{decimal.Decimal}
@param maximum: Maximum value.
@type maximum: C{decimal.Decimal}
@param options: List of options the value has to be taken from.
@type options: C{list} of C{decimal.Decimal}
@param optionsMandatory: Indicates whether the the value must
be one of C{options} or not. Default: C{False}
@type optionsMandatory: C{bool}
"""
base_validators.AndValidator.__init__(self, list())
self.validators.append(base_validators.AreTypesMatched([int, float, Decimal]))
self.validators.append(base_validators.IsInRange(minimum, maximum))
self.validators.append(base_validators.IsNumberOfDecimalPlacesInRange(minDecimalPlaces, maxDecimalPlaces))
if not options is None:
self.validators.append(base_validators.AreOptionsMatched(options, optionsMandatory))
class BooleanValidator(base_validators.AndValidator):
""" Aggregates useful checks for boolean values. """
def __init__(self):
base_validators.AndValidator.__init__(self, list())
self.validators.append(base_validators.AreTypesMatched([bool]))
class DatetimeValidator(base_validators.AndValidator):
""" The class aggregates all checks that are useful for validation of date times. """
def __init__(self, minimum=None, maximum=None, options=None, optionsMandatory=True):
"""
@param minimum: Minimum length of the list.
@type minimum: C{int}
@param maximum: Maximum length of the list.
@type maximum: C{int}
@param options: List of options the value has to be taken from.
@type options: C{list} of C{datetime}
@param optionsMandatory: Indicates whether the the value must
be one of C{options} or not. Default: C{False}
@type optionsMandatory: C{bool}
"""
base_validators.AndValidator.__init__(self, list())
self.validators.append(base_validators.AreTypesMatched([datetime]))
self.validators.append(base_validators.IsInRange(minimum, maximum))
if not options is None:
self.validators.append(base_validators.AreOptionsMatched(options, optionsMandatory))
class ListValidator(base_validators.AndValidator):
""" The class aggregates all checks that are useful for validation of lists. """
def __init__(self, minimum=None, maximum=None, itemValidators=None):
"""
@param minimum: Minimum length of the list.
@type minimum: C{int}
@param maximum: Maximum length of the list.
@type maximum: C{int}
@param itemValidators: List of checks for single items.
All checks are tried until at least one succeeds.
@type itemValidators: C{list}
"""
base_validators.AndValidator.__init__(self, list())
self.validators.append(base_validators.AreTypesMatched([list]))
self.validators.append(base_validators.IsInRange(minimum, maximum))
if not itemValidators is None:
self.validators.append(base_validators.ForEach(base_validators.OrValidator(itemValidators)))
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/configuration/properties/validators/type_validators.py",
"copies": "1",
"size": "6957",
"license": "bsd-3-clause",
"hash": 2909858175148151000,
"line_mean": 42.8838709677,
"line_max": 114,
"alpha_frac": 0.6760097743,
"autogenerated": false,
"ratio": 4.299752781211372,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017042346630988435,
"num_lines": 155
} |
"""
Implements factory methods for objects that can be used to
access a WebDAV file system.
"""
__version__ = "$Revision-Id:$"
import logging
from webdav.Connection import WebdavError
from datafinder.persistence.adapters.webdav_.configuration import Configuration
from datafinder.persistence.adapters.webdav_.connection_pool import WebdavConnectionPool
from datafinder.persistence.adapters.webdav_ import constants
from datafinder.persistence.adapters.webdav_.util import ItemIdentifierMapper, createCollectionStorer
from datafinder.persistence.adapters.webdav_.data.adapter import DataWebdavAdapter
from datafinder.persistence.adapters.webdav_.metadata.adapter import MetadataWebdavAdapter
from datafinder.persistence.adapters.webdav_.principal_search.adapter import PrincipalSearchWebdavAdapter
from datafinder.persistence.adapters.webdav_.privileges.adapter import PrivilegeWebdavAdapter, SimplePrivilegeWebdavAdapter
from datafinder.persistence.adapters.webdav_.privileges.privileges_mapping import PrivilegeMapper
from datafinder.persistence.adapters.webdav_.search.adapter import SearchWebdavAdapter
from datafinder.persistence.common.base_factory import BaseFileSystem
from datafinder.persistence.common.connection.manager import ConnectionPoolManager
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.principal_search.principalsearcher import NullPrincipalSearcher
_logger = logging.getLogger()
class FileSystem(BaseFileSystem):
"""
Implements factory methods of the different aspects of file system items.
Moreover, information of specific feature are available.
"""
_connectionManager = ConnectionPoolManager(constants.MAX_POOL_NUMBER)
def __init__(self, baseConfiguration):
"""
Constructor.
@param baseConfiguration: Object specifying configuration parameters.
@type baseConfiguration: L{BaseConfiguration<datafinder.persistence.common.configuration.BaseConfiguration>}
"""
BaseFileSystem.__init__(self)
self._configuration = Configuration(baseConfiguration)
self._hasMetadataSearchSupport = None
self._hasPrivilegeSupport = None
self._resourceTypeCache = dict()
self._connectionPool = self._getConnectionPool()
def _getConnectionPool(self):
""" Creates / retrieves a usable connection pool for the given configuration. """
connectionPool = self._connectionManager.get(self._configuration.baseUrl)
if connectionPool is None:
connectionPool = WebdavConnectionPool(self._configuration)
self._connectionManager.add(self._configuration.baseUrl, connectionPool)
return connectionPool
def updateCredentials(self, credentials):
""" @see: L{updateCredentials<datafinder.persistence.factory.FileSystem.updateCredentials>} """
try:
self._configuration.username = credentials["username"]
self._configuration.password = credentials["password"]
except KeyError:
raise PersistenceError("Invalid credentials provided.")
else:
self._connectionPool.reload()
def createDataStorer(self, identifier):
"""
Factory Method providing a WebDAV-specific data storer.
@return: WebDAV-specific implementation of the data interface.
@rtype: L{DataWebdavAdapter<datafinder.persistence.adapters.webdav_.
data.adapter.DataWebdavAdapter>
"""
return DataWebdavAdapter(identifier, self._connectionPool,
ItemIdentifierMapper(self._configuration.baseUrl), resourceTypeCache=self._resourceTypeCache)
def createMetadataStorer(self, identifier):
"""
Factory Method providing a WebDAV-specific meta data storer.
@return: WebDAV-specific implementation of the meta data interface.
@rtype: L{MetadataWebdavAdapter<datafinder.persistence.adapters.webdav_.
metadata.adapter.MetadataWebdavAdapter>
"""
return MetadataWebdavAdapter(
identifier, self._connectionPool, ItemIdentifierMapper(self._configuration.baseUrl),
hasMetadataSearchSupport=self.hasMetadataSearchSupport)
def createPrivilegeStorer(self, identifier):
"""
Factory Method providing a WebDAV-specific privilege storer.
@return: WebDAV-specific implementation of the privilege interface.
@rtype: L{PrivilegeWebdavAdapter<datafinder.persistence.adapters.webdav_.
privileges.adapter.PrivilegeWebdavAdapter>
"""
if self.hasPrivilegeSupport:
return PrivilegeWebdavAdapter(identifier, self._connectionPool, ItemIdentifierMapper(self._configuration.baseUrl),
PrivilegeMapper(self._configuration.userCollectionUrl, self._configuration.groupCollectionUrl))
else:
return SimplePrivilegeWebdavAdapter(
identifier, self._connectionPool, ItemIdentifierMapper(self._configuration.baseUrl), PrivilegeMapper(None, None))
def createPrincipalSearcher(self):
"""
Factory method for the WebDAV-specific principal search object.
@return: WebDAV-specific implementation of the principal search interface.
@rtype: L{PrincipalSearchWebdavAdapter<datafinder.persistence.adapters.webdav_.
principal_search.adapter.PrincipalSearchWebdavAdapter>
"""
if not self._configuration.userCollectionUrl or not self._configuration.groupCollectionUrl:
_logger.debug("Missing user or group URL to use the principal searcher. Thus, using a null value object.")
return NullPrincipalSearcher()
else:
return PrincipalSearchWebdavAdapter(
self._configuration.userCollectionUrl, self._configuration.groupCollectionUrl, self._connectionPool)
def createSearcher(self):
"""
Factory method for the WebDAV-specific search object.
@return: WebDAV-specific implementation of the search interface.
@rtype: L{lSearchWebdavAdapter<datafinder.persistence.adapters.webdav_.search.adapter.SearchWebdavAdapter>
"""
return SearchWebdavAdapter(self._connectionPool, ItemIdentifierMapper(self._configuration.baseUrl))
def release(self):
""" Releases the acquired connection pool. """
self._connectionManager.remove(self._configuration.baseUrl)
@property
def hasCustomMetadataSupport(self):
"""
This is the WebDAV-specific implementation.
@note: Always returns C{True} because custom meta data support is a built-in WebDAV feature.
@see: L{FileSystem.hasCustomMetadataSupport<datafinder.persistence.factory.FileSystem.hasCustomMetadataSupport>}
"""
return True
@property
def hasMetadataSearchSupport(self):
"""
This is the WebDAV-specific implementation.
@see: L{FileSystem.hasMetadataSearchSupport<datafinder.persistence.factory.FileSystem.hasMetadataSearchSupport>}
"""
if self._hasMetadataSearchSupport is None:
connection = self._connectionPool.acquire()
try:
try:
collectionStorer = createCollectionStorer(self._configuration.baseUrl, connection, False)
self._hasMetadataSearchSupport = collectionStorer.daslBasicsearchSupportAvailable
except WebdavError:
self._hasMetadataSearchSupport = False
finally:
self._connectionPool.release(connection)
return self._hasMetadataSearchSupport
@property
def hasPrivilegeSupport(self):
"""
This is the WebDAV-specific implementation.
@see: L{FileSystem.hasPrivilegeSupport<datafinder.persistence.factory.FileSystem.hasPrivilegeSupport>}
"""
if self._hasPrivilegeSupport is None:
connection = self._connectionPool.acquire()
try:
try:
collectionStorer = createCollectionStorer(self._configuration.baseUrl, connection, False)
self._hasPrivilegeSupport = collectionStorer.aclSupportAvailable
except (WebdavError, AttributeError):
self._hasPrivilegeSupport = False
finally:
self._connectionPool.release(connection)
return self._hasPrivilegeSupport
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/webdav_/factory.py",
"copies": "1",
"size": "10559",
"license": "bsd-3-clause",
"hash": -8050509645168745000,
"line_mean": 43.5172413793,
"line_max": 137,
"alpha_frac": 0.692679231,
"autogenerated": false,
"ratio": 4.861418047882136,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.022104819183294273,
"num_lines": 231
} |
"""
Implements the model component of the create configuration dialog.
"""
__version__ = "$Revision-Id:$"
from datafinder.common import logger
from datafinder.core.error import ConfigurationError
_logger = logger.getDefaultLogger()
class CreateConfigurationModel(object):
""" Implements the model component of the create configuration dialog. """
def __init__(self, repositoryManager):
""" Constructor. """
self._repositoryManager = repositoryManager
self._preferences = repositoryManager.preferences
self._configurationUri = None
self._dataPath = None
self._repositoryConfiguration = None
self._exists = None
self._username = None
self._password = None
def reset(self):
""" Resets the repository configuration. """
self._configurationUri = None
self._dataPath = None
self._repositoryConfiguration = None
self._exists = None
def __exists(self):
""" Getter of the existence flag. """
if self._exists is None:
raise ConfigurationError("Repository configuration is not initialized.")
else:
return self._exists
exists = property(__exists)
def prepareConfiguration(self, hostUri, configurationPath, dataPath, username, password):
"""
Performs some basic checks of the paths.
@param configurationUri: URI of the configuration area.
@type configurationUri: C{unicode}
@param dataPath: Path of the data area.
@type dataPath: C{unicode}
@raise ConfigurationError: Indicating problems on repository initialization.
"""
if not hostUri.endswith("/"):
hostUri += "/"
if configurationPath.startswith("/"):
configurationPath = configurationPath[1:]
if configurationPath.endswith("/"):
configurationPath = configurationPath[:-1]
if dataPath.startswith("/"):
dataPath = dataPath[1:]
if dataPath.endswith("/"):
dataPath = dataPath[:-1]
if dataPath == configurationPath:
raise ConfigurationError("Configuration and data path should not be equal.")
else:
self._configurationUri = hostUri + configurationPath
self._dataPath = dataPath
self._username = username
self._password = password
self._repositoryConfiguration = self._repositoryManager.getRepositoryConfiguration(self._configurationUri,
username=self._username,
password=self._password,
baseUri=hostUri)
self._exists = self._repositoryConfiguration.exists()
def createConfiguration(self, overwrite=True):
"""
Creates the configuration or overwrites an existing.
@param overwrite: Optional flag indicating whether an existing configuration is replaced. Default: C{True}
@type overwrite: C{bool}
"""
self._repositoryConfiguration.create(overwrite, self._dataPath)
self._preferences.addConnection(self._configurationUri, self._username, self._password)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/admin/create_configuration_dialog/model.py",
"copies": "1",
"size": "5321",
"license": "bsd-3-clause",
"hash": -8462154764590047000,
"line_mean": 38.9307692308,
"line_max": 119,
"alpha_frac": 0.6211238489,
"autogenerated": false,
"ratio": 5.086998087954111,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019180200097820174,
"num_lines": 130
} |
"""
This is the main controller component of the wizard
"""
from PyQt4 import QtGui, QtCore
from datafinder.common.logger import getDefaultLogger
from datafinder.gui.gen.user.creation_wizard_ui import Ui_Wizard
from datafinder.gui.user.common.progress_dialog import ProgressDialog
from datafinder.gui.user.dialogs.creation_wizard import constants
from datafinder.gui.user.dialogs.creation_wizard.error_handler import ErrorHandler
from datafinder.gui.user.dialogs.creation_wizard.pages.item_selection_page import ItemSelectionWizardPage
from datafinder.gui.user.dialogs.creation_wizard.state_handler.create_archive_state_handler import CreateArchiveHandler
from datafinder.gui.user.dialogs.creation_wizard.state_handler.create_collection_state_handler import CreateCollectionHandler
from datafinder.gui.user.dialogs.creation_wizard.state_handler.create_link_state_handler import CreateLinkHandler
from datafinder.gui.user.dialogs.creation_wizard.state_handler.create_leaf_state_handler import CreateLeafHandler
from datafinder.gui.user.dialogs.creation_wizard.state_handler.export_state_handler import ExportHandler
from datafinder.gui.user.dialogs.creation_wizard.state_handler.import_state_handler import ImportHandler
__version__ = "$Revision-Id:$"
class CreationWizard(QtGui.QWizard, Ui_Wizard):
""" Main controller of the wizard. """
ARCHIVE_STATE = 0
COLLECTION_STATE = 1
EXPORT_STATE = 2
IMPORT_STATE = 3
LEAF_STATE = 4
LINK_STATE = 5
_stateWizardHandlerMap = {ARCHIVE_STATE: CreateArchiveHandler,
COLLECTION_STATE: CreateCollectionHandler,
EXPORT_STATE: ExportHandler,
IMPORT_STATE: ImportHandler,
LEAF_STATE: CreateLeafHandler,
LINK_STATE: CreateLinkHandler}
_logger = getDefaultLogger()
def __init__(self, sourceBaseRepositoryModel, targetBaseRepositoryModel, parent=None, preSelectedSourceItems=None):
"""
Constructor.
@param sourceBaseRepositoryModel: Reference on the not filtered source repository model.
@type sourceBaseRepositoryModel: L{RepositoryModel<datafinder.gui.user.models.repository.repository.RepositoryModel>}
@param targetBaseRepositoryModel: Reference on the not filtered target repository model.
@type targetBaseRepositoryModel: L{RepositoryModel<datafinder.gui.user.models.repository.repository.RepositoryModel>}
@param parent: Parent widget of this dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtGui.QWizard.__init__(self, parent)
Ui_Wizard.__init__(self)
self.setupUi(self)
self._sourceBaseRepositoryModel = sourceBaseRepositoryModel
self._targetBaseRepositoryModel = targetBaseRepositoryModel
self.preSelectedSourceItems = preSelectedSourceItems
self._stateHandler = None
self._progressDialog = None
self._errorHandler = ErrorHandler(self)
def start(self, state):
"""
Initializes and starts the wizard.
@param state: Constant specifying the state of the wizard.
@type state: C{int}
"""
self._stateHandler = self._stateWizardHandlerMap[state](self)
result = self._stateHandler.checkPreConditions()
if not result is None:
QtGui.QMessageBox.critical(self.parent(), self._stateHandler.WINDOW_TITLE, result)
else:
self.setWindowTitle(self._stateHandler.WINDOW_TITLE)
self.connect(self.button(QtGui.QWizard.FinishButton),
QtCore.SIGNAL("clicked()"),
self._finishSlot)
self.sourceChoserWizardPage.pageMode = ItemSelectionWizardPage.SOURCE_ITEM_PAGE
self.targetChoserWizardPage.pageMode = ItemSelectionWizardPage.TARGET_ITEM_PAGE
self.propertyWidget.deactivateRefreshButton()
self.exec_()
def _finishSlot(self):
""" Returns function including error handling for the specific creation action. """
self._progressDialog = ProgressDialog(self._stateHandler.WINDOW_TITLE, "", parent=self.parent())
self._stateHandler.prepareFinishSlot()
self._progressDialog._cb = self._stateHandler.finishSlotCallback
self._progressDialog.start(self._stateHandler.finishSlot)
def nextId(self):
""" @see: L{nextId<PyQt4.QtGui.QWizard.nextId>} """
try:
return self._stateHandler.nextId()
except KeyError:
return -1
def initializePage(self, identifier):
""" @see: L{initializePage<PyQt4.QtGui.QWizard.initializePage>} """
page = self.page(identifier)
page.errorHandler = self.errorHandler
self._stateHandler.initializePage(identifier)
page.setTitle(self._stateHandler.currentTitle)
page.setSubTitle(self._stateHandler.currentSubTitle)
self.errorHandler.udpateErrorDisplay()
def cleanupPage(self, identifier):
""" @see: L{cleanupPage<PyQt4.QtGui.QWizard.cleanupPage>} """
self._stateHandler.cleanupPage(identifier)
self.errorHandler.clear()
def configureSourceItemPage(self, filteredRepositoryModel, preSelectedIndexes, itemNameLabelText="",
checkTargetDataTypesExistence=False, disableItemNameSpecification=False,
selectionMode=QtGui.QAbstractItemView.SingleSelection, itemCheckFunction=None):
""" Prepares the source item wizard page. """
if itemCheckFunction is None:
itemCheckFunction = self.sourceChoserWizardPage.itemCheckFunction
self._configureItemChoserPage(self.sourceChoserWizardPage, filteredRepositoryModel, preSelectedIndexes, itemNameLabelText,
checkTargetDataTypesExistence, disableItemNameSpecification, selectionMode, None, itemCheckFunction)
@staticmethod
def _configureItemChoserPage(choserWizardPage, filteredRepositoryModel, preSelectedIndexes, itemNameLabelText="",
checkTargetDataTypesExistence=False, disableItemNameSpecification=False,
selectionMode=QtGui.QAbstractItemView.SingleSelection, targetIndex=None,
itemCheckFunction=None):
""" Prepares the item wizard page. """
choserWizardPage.filteredRepositoryModel = filteredRepositoryModel
choserWizardPage.preSelectedIndexes = preSelectedIndexes
choserWizardPage.itemNameLabelText = itemNameLabelText
choserWizardPage.checkTargetDataTypesExistence = checkTargetDataTypesExistence
choserWizardPage.disableItemNameSpecification = disableItemNameSpecification
choserWizardPage.selectionMode = selectionMode
choserWizardPage.targetIndex = targetIndex
choserWizardPage.itemCheckFunction = itemCheckFunction
choserWizardPage.configure()
def configureTargetItemPage(self, filteredRepositoryModel, preSelectedIndexes, itemNameLabelText="",
checkTargetDataTypesExistence=False, disableItemNameSpecification=False,
selectionMode=QtGui.QAbstractItemView.SingleSelection, targetIndex=None,
itemCheckFunction=None):
""" Prepares the target item wizard page. """
self._configureItemChoserPage(
self.targetChoserWizardPage, filteredRepositoryModel, preSelectedIndexes, itemNameLabelText,
checkTargetDataTypesExistence, disableItemNameSpecification, selectionMode, targetIndex, itemCheckFunction)
def configureDataStorePage(self, dataStoreMode, baseRepositoryModel):
""" Prepares the source item wizard page. """
self.datastoreChoserWizardPage.dataStoreMode = dataStoreMode
self.datastoreChoserWizardPage.iconProvider = baseRepositoryModel.iconProvider
self.datastoreChoserWizardPage.dataStoreHandler = baseRepositoryModel.repository.configuration.dataStoreHandler
self.datastoreChoserWizardPage.preferences = baseRepositoryModel.repository.configuration.preferences
self.datastoreChoserWizardPage.configure()
def configurePropertyPage(self, baseRepositoryModel, isDataTypeSelectionEnabled, index=QtCore.QModelIndex(),
indexChanged=True, initialProperties=None):
""" Prepares the source item wizard page. """
self.propertyChoserWizardPage.baseRepositoryModel = baseRepositoryModel
self.propertyChoserWizardPage.isDataTypeSelectionEnabled = isDataTypeSelectionEnabled
self.propertyChoserWizardPage.index = index
self.propertyChoserWizardPage.indexChanged = indexChanged
self.propertyChoserWizardPage.initialProperties = initialProperties
self.propertyChoserWizardPage.configure()
@property
def sourceItemName(self):
""" Returns the specified item name or C{None}. """
return self._getItemName(constants.SOURCE_PAGE_ID, self.sourceItemNameLineEdit)
@property
def targetItemName(self):
""" Returns the specified item name or C{None}. """
return self._getItemName(constants.TARGET_PAGE_ID, self.targetItemNameLineEdit)
def _getItemName(self, pageId, lineEdit):
""" Retrieves the item name. """
itemName = None
if self.hasVisitedPage(pageId) \
and not self.page(pageId).disableItemNameSpecification:
itemName = unicode(lineEdit.text())
itemName.strip()
return itemName
@property
def sourceIndexes(self):
""" Returns the specified source indexes or C{None}. """
sourceIndexes = None
if self.hasVisitedPage(constants.SOURCE_PAGE_ID):
sourceIndexes = self.sourceSelectItemWidget.selectedIndexes
return sourceIndexes
@property
def targetIndexes(self):
""" Returns the specified target indexes or C{None}. """
targetIndexes = None
if self.hasVisitedPage(constants.TARGET_PAGE_ID):
targetIndexes = self.targetSelectItemWidget.selectedIndexes
return targetIndexes
@property
def properties(self):
""" Returns the specified properties or C{None}. """
properties = None
if self.hasVisitedPage(constants.PROPERTY_PAGE_ID):
propertyModel = self.propertyWidget.model
properties = propertyModel.properties
return properties
@property
def dataStoreName(self):
""" Returns the specified data store or C{None}. """
dataStoreName = None
if self.hasVisitedPage(constants.DATASTORE_PAGE_ID):
dataStoreName = unicode(self.dataStoreComboBox.currentText())
return dataStoreName
@property
def dataStoreConfiguration(self):
""" Returns the specified data store configuration or C{None}. """
dataStoreConfiguration = None
if self.hasVisitedPage(constants.DATASTORE_PAGE_ID):
dataStoreConfiguration = self.page(constants.DATASTORE_PAGE_ID).selectedDataStoreConfiguration
return dataStoreConfiguration
@property
def sourceRepositoryModel(self):
""" Getter of the source repository. """
return self._sourceBaseRepositoryModel
@property
def targetRepositoryModel(self):
""" Getter of the target repository. """
return self._targetBaseRepositoryModel
@property
def errorHandler(self):
""" Getter of the central error handler instance. """
return self._errorHandler
@property
def currentSubTitle(self):
""" Returns the current default sub title of the create collection wizard. """
return self._stateHandler.currentSubTitle
@property
def currentTitle(self):
""" Returns the current default title of the create collection wizard. """
return self._stateHandler.currentTitle
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/dialogs/creation_wizard/main.py",
"copies": "1",
"size": "14287",
"license": "bsd-3-clause",
"hash": -6330458124206628000,
"line_mean": 43.5,
"line_max": 138,
"alpha_frac": 0.6792188703,
"autogenerated": false,
"ratio": 4.668954248366013,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020640644666801173,
"num_lines": 314
} |
"""
This module contains all custom widgets for the datafinder guis.
"""
import functools
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
__version__ = "$Revision-Id$"
class _Tab(object):
"""
Tab class to store tab informations.
Only used in the L{datafinder.gui.user.ouput.decorator.TabWidgetDecorator}.
"""
def __init__(self, tabText, tabToolTip, tabWhatsThis, tabIcon, widget, shown = True):
"""
Constructor.
@param tabText: Text of the tab.
@type tabText: C{string}
@param tabToolTip: ToolTip of the tab.
@type tabToolTip: C{string}
@param tabWhatsThis: Whats this text of the tab.
@type tabWhatsThis: C{string}
@param tabIcon: Icon of the tab.
@type tabIcon: C{QtGui.QIcon}
@param widget: Widget of the tab.
@type widget: C{QtGui.QWidget}
@param shown: True = The tab is visible, False = the tab is removed.
@type shown: C{bool}
"""
self.text = tabText
self.toolTip = tabToolTip
self.whatsThis = tabWhatsThis
self.icon = tabIcon
self.widget = widget
self.shown = shown
class HideableTabWidget(QtGui.QTabWidget):
"""
Decorator for the QTabWidget class to change the visibility of tab items.
"""
def __init__(self, parent=None):
"""
Constructor.
@param tabWidget: TabWidget that you want to decorate.
@type tabWidget: C{QtGui.QTabWidget}
@param parent: Parent of this L{QtCore.QObject}.
@type parent: C{QtCore.QObject}
"""
QtGui.QTabWidget.__init__(self, parent)
self.__tabs = list()
self.tabBar().setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
QtCore.QObject.connect(self.tabBar(),
QtCore.SIGNAL("customContextMenuRequested(QPoint)"),
self.showTabBarContextMenuSlot)
def fetchTabs(self, index=0):
"""
Fetch all tab informations and stores them in an internal list.
Necessary cause it is not possible to hide tabs without loss of tab informations.
Has to be called after setting up new tabs that have to get the hiding ability.
@param index: The index at which the tab was inserted.
@type index: C{int}
"""
count = self.count()
self.__tabs = self.__tabs[:index]
for i in range(index, count):
tab = _Tab(self.tabText(i), self.tabToolTip(i), self.tabWhatsThis(i), self.tabIcon(i), self.widget(i))
self.__tabs.append(tab)
def setTabShown(self, tab, shown):
"""
Show or hide a widget at the given index.
@param index: Index of the tab.
@type index: C{int}
@param shown: True = show, False = hide.
@type shown: C{bool}
"""
index = tab
#Index correction.
for i in range(tab):
if not self.__tabs[i].shown:
index -= 1
#Set the tab visible.
if shown is True:
self.insertTab(index,
self.__tabs[tab].widget,
self.__tabs[tab].icon,
self.__tabs[tab].text)
self.setTabToolTip(index, self.__tabs[tab].toolTip)
self.setTabWhatsThis(index, self.__tabs[tab].whatsThis)
self.setCurrentIndex(index)
#Hide the tab.
else:
self.removeTab(index)
#Set the tab visibility status.
self.__tabs[tab].shown = shown
#Hide the tabwidget if there is no tab anymore.
shown = self.count() > 0
#Sending signal on visibility change.
if self.isHidden() == shown:
self.emit(QtCore.SIGNAL("shownChangedSignal(bool)"), shown)
self.setShown(shown)
def showTabBarContextMenuSlot(self):
"""
Slot is called when a context menu request was emitted.
"""
menu = QtGui.QMenu(self)
for i, tab in enumerate(self.__tabs):
action = menu.addAction(tab.icon, tab.text)
action.setCheckable(True)
action.setChecked(tab.shown)
self.connect(action, QtCore.SIGNAL("triggered(bool)"),
functools.partial(self.setTabShown, i))
menu.exec_(QtGui.QCursor.pos())
class DefaultTreeView(QtGui.QTreeView):
"""
Customized the given L{QtGui.QTreeView}.
"""
def __init__(self, parent=None):
"""
Constructor.
@param widget: The tree view that has to be customized.
@type widget: C{QtGui.QWidget}
"""
QtGui.QTreeView.__init__(self, parent)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.setEditTriggers(QtGui.QAbstractItemView.SelectedClicked |
QtGui.QAbstractItemView.EditKeyPressed)
self.header().hide()
self.header().setSortIndicator(0, QtCore.Qt.AscendingOrder)
self.setSortingEnabled(True)
self.connect(self, QtCore.SIGNAL("expanded(QModelIndex)"), self._resizeColumnsSlot)
self.connect(self, QtCore.SIGNAL("collapsed(QModelIndex)"), self._resizeColumnsSlot)
def _resizeColumnsSlot(self, index):
"""
Resize the given columns on expand or collapse.
@param index: Index with the column which have to be resized.
@type index: C{QtCore.QModelIndex}
"""
if index.isValid():
self.resizeColumnToContents(index.column())
class DefaultTableView(QtGui.QTableView):
"""
Customized the given L{QtGui.QTableView}.
"""
def __init__(self, parent=None):
"""
Constructor.
@param widget: The table view that has to be customized.
@type widget: C{QtGui.QTableView}
"""
QtGui.QTableView.__init__(self, parent)
self.__gridStyles = [(self.tr('Solid'), QtCore.Qt.SolidLine),
(self.tr('Dashed'), QtCore.Qt.DashLine),
(self.tr('Dotted'), QtCore.Qt.DotLine),
(self.tr('Dashed Dotted'), QtCore.Qt.DashDotLine)]
self.verticalHeader().hide()
self.verticalHeader().setDefaultSectionSize(22)
self.horizontalHeader().setSortIndicatorShown(True)
self.horizontalHeader().setClickable(True)
self.horizontalHeader().setStretchLastSection(True)
self.horizontalHeader().setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.horizontalHeader().setMovable(True)
self.horizontalHeader().setHighlightSections(False)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.setGridStyle(QtCore.Qt.DotLine)
self.connect(self.horizontalHeader(),
QtCore.SIGNAL("customContextMenuRequested(QPoint)"),
self.showHeaderMenu)
self.installEventFilter(self)
def eventFilter(self, _, event):
""" Custom event filter which:
- emits a "returnPressed" event with additional currently selected index
if the Qt.Key_Return key is pressed.
- ensures that the content of the current cell is copied to the
clip board if <Ctrl>+C is pressed
"""
if event.type() == QtCore.QEvent.KeyPress:
if event.key() == Qt.Key_Return:
self.emit(QtCore.SIGNAL("returnPressed"), self.currentIndex())
elif event.type() == QtCore.QEvent.KeyRelease:
if event.matches(QtGui.QKeySequence.Copy):
QtGui.QApplication.clipboard().setText(self.currentIndex().data().toString())
return False
def showHeaderMenu(self, _):
"""
Shows the header content menu at the current cursor position.
"""
#Generates the menu for changing the visibility of the headers.
menu = QtGui.QMenu(self)
lastCheckedAction = None
numberOfCheckActions = 0
for section in range(self.model().columnCount(QtCore.QModelIndex())):
text = self.model().headerData(section, QtCore.Qt.Horizontal, QtCore.Qt.DisplayRole).toString()
action = menu.addAction(text)
action.setCheckable(True)
if self.isColumnHidden(section):
action.setChecked(False)
action.connect(action, QtCore.SIGNAL("triggered(bool)"),
functools.partial(self.showColumn, section))
else:
action.setChecked(True)
action.connect(action, QtCore.SIGNAL("triggered(bool)"),
functools.partial(self.hideColumn, section))
lastCheckedAction = action
numberOfCheckActions += 1
action.setEnabled(True)
if not lastCheckedAction is None and numberOfCheckActions == 1:
lastCheckedAction.setEnabled(False)
#Generates the menu for the grid style.
gridMenu = QtGui.QMenu(self.tr('Grid'), menu)
styleGroup = QtGui.QActionGroup(menu)
for name, style in self.__gridStyles:
action = gridMenu.addAction(name)
action.setCheckable(True)
action.setChecked(style == self.gridStyle())
action.setEnabled(self.showGrid())
styleGroup.addAction(action)
self.connect(action, QtCore.SIGNAL("triggered(bool)"),
functools.partial(self.setGridStyle, style))
gridMenu.addSeparator()
action = gridMenu.addAction(self.tr('Show'))
action.setCheckable(True)
action.setChecked(self.showGrid())
self.connect(action, QtCore.SIGNAL("triggered(bool)"), self.setShowGrid)
menu.addSeparator()
menu.addMenu(gridMenu)
menu.exec_(QtGui.QCursor.pos())
class DefaultListView(QtGui.QListView):
"""
Customize the given L{QtGui.QListView}.
"""
def __init__(self, parent=None):
"""
Constructor.
@param widget: The widget that has to be wrapped by this class.
@type widget: C{QtGui.QWidget}
"""
QtGui.QListView.__init__(self, parent)
self.__verticalOffset = 0
def keyPressEvent(self, keyEvent):
""" Signals that the return key is pressed and provides the specific the current model index. """
if keyEvent.key() == Qt.Key_Return:
self.emit(QtCore.SIGNAL("returnPressed"), self.selectionModel().currentIndex())
QtGui.QListView.keyPressEvent(self, keyEvent)
def setViewMode(self, mode):
"""
@see: QtGui.QListView#setViewMode
"""
size = QtCore.QSize(-1, -1)
self.__verticalOffset = 0
if mode == QtGui.QListView.IconMode:
size = QtCore.QSize(115, 80)
self.__verticalOffset = -10
self.setGridSize(size)
QtGui.QListView.setViewMode(self, mode)
def visualRect(self, index):
"""
@see: QtCore.QAbstractItemView#visualRect
"""
rect = self.rectForIndex(index)
dx = -1 * self.horizontalOffset()
dy = -1 * self.verticalOffset() - self.__verticalOffset
rect.adjust(dx, dy, dx, dy)
return rect
class ActionTooltipMenu(QtGui.QMenu):
""" Implements a menu which shows the tool tip of the active action. """
def __init__(self, parent=None):
""" Constructor. """
QtGui.QMenu.__init__(self, parent)
def event(self, event):
"""
@see: L{event<PyQt4.QtGui.QWidget.event>}
Used displaying token dependent tool tips.
"""
if event.type() == QtCore.QEvent.ToolTip:
if not self.activeAction() is None:
QtGui.QToolTip.showText(event.globalPos(), self.activeAction().toolTip())
else:
QtGui.QToolTip.hideText()
return QtGui.QMenu.event(self, event)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/common/widget/widget.py",
"copies": "1",
"size": "14327",
"license": "bsd-3-clause",
"hash": 6878227478979152000,
"line_mean": 34.7358974359,
"line_max": 114,
"alpha_frac": 0.5870035597,
"autogenerated": false,
"ratio": 4.356035269078747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016290904818847377,
"num_lines": 390
} |
"""
This module defines a basic set of validation functions / classes for value verification.
"""
import decimal
import re
import sys
__version__ = "$Revision-Id:$"
class IsInRange(object):
"""
Checks whether a given value is in a specific range.
The requirement is that minimum, maximum and the value are comparable (<, > support).
"""
def __init__(self, minValue=None, maxValue=None):
"""
@param minValue: The lower bound.
@type minValue: C{object}
@param maxValue: The upper bound.
@type minValue: C{object}
"""
self.minValue = minValue
self.maxValue = maxValue
def __call__(self, value):
"""
Implements the validation.
@param value: The value to check.
@type value: C{object}
"""
if not self.minValue is None and value < self.minValue:
raise ValueError("The provided value is < than the defined minimum.")
if not self.maxValue is None and value > self.maxValue:
raise ValueError("The provided value is > then the defined maximum.")
class IsDecimalInRange(object):
"""
Class for checking boundaries of decimal values.
"""
def __init__(self, minValue, maxValue):
"""
@param minValue: The lower bound.
@type minValue: C{decimal.Decimal}, C{int}, C{long}, C{float}
@param maxValue: The upper bound.
@type maxValue: C{decimal.Decimal}, C{int}, C{long}, C{float}
"""
self.minValue = minValue
self.maxValue = maxValue
self.__inRangeValidator = IsInRange()
def __call__(self, value):
"""
Implements validation of the value.
The value is converted to C{decimal.Decimal} before performing the range check.
"""
self.__inRangeValidator.minValue = _toDecimal(self.minValue)
self.__inRangeValidator.maxValue = _toDecimal(self.maxValue)
self.__inRangeValidator(_toDecimal(value))
def _toDecimal(value):
""" Performs the conversion to C{decimal.Decimal}. """
if not isinstance(value, decimal.Decimal):
try:
value = decimal.Decimal(str(value))
except decimal.InvalidOperation:
raise ValueError("The value '%s' is no valid numeric." % str(value))
return value
class IsLengthInRange(object):
"""
Checks whether the length of a given value is in a specific range.
The values that can be checked with this validation class have to support
the "len" function.
"""
def __init__(self, minLength=None, maxLength=None):
"""
@param minLength: The lower bound.
@type minLength: C{int}
@param maxLength: The upper bound.
@type maxLength: C{int}
"""
self.minLength = minLength
self.maxLength = maxLength
self.__inRangeValidator = IsInRange()
def __call__(self, value):
"""
Implements the validation.
@param value: The value to check.
@type value: C{object}
"""
self.__inRangeValidator.minValue = self.minLength
self.__inRangeValidator.maxValue = self.maxLength
self.__inRangeValidator(len(value))
class IsNumberOfDecimalPlacesInRange(object):
"""
Checks whether the number of decimal places which was specified
is in a specific range.
"""
def __init__(self, minNumberOfDecimalPlaces=None, maxNumberOfDecimalPlaces=None):
"""
@param minNumberOfDecimalPlaces: The lower bound.
@type minNumberOfDecimalPlaces: C{int}
@param maxNumberOfDecimalPlaces: The upper bound.
@type maxNumberOfDecimalPlaces: C{int}
"""
self.minNumberOfDecimalPlaces = minNumberOfDecimalPlaces
self.maxNumberOfDecimalPlaces = maxNumberOfDecimalPlaces
self.__inRangeValidator = IsInRange()
def __call__(self, value):
"""
Implements the validation.
@param value: The value to check.
@type value: L{Decimal<decimal.Decimal>}, C{float}, C{int}
"""
value = _toDecimal(value)
# calculate specified number of decimal places
tupleRepr = value.as_tuple() # represents as: (sign, given digits, exponent)
if tupleRepr[2] >= 0: # positive or zero exponent
decimalPlaces = 0
else:
absolutExponent = abs(tupleRepr[2])
possibleNumberOfDecimalPlaces = len(tupleRepr[1])
if possibleNumberOfDecimalPlaces > absolutExponent:
decimalPlaces = absolutExponent
else:
decimalPlaces = possibleNumberOfDecimalPlaces
# check the calculated number of specified decimal places
self.__inRangeValidator.minValue = self.minNumberOfDecimalPlaces
self.__inRangeValidator.maxValue = self.maxNumberOfDecimalPlaces
self.__inRangeValidator(decimalPlaces)
class AreOptionsMatched(object):
"""
Checks whether a value is taken from a certain list of options.
The check is performed with the comparison operator.
"""
def __init__(self, options, optionsMandatory=True):
"""
@param options: List of options that the checked value have to be taken from.
@type options: C{list}
"""
self.options = options
self.optionsMandatory = optionsMandatory
def __call__(self, value):
"""
Implements the validation.
@param value: Value to check.
@type value: Depends on the concrete use case.
"""
if self.optionsMandatory:
if not value in self.options:
raise ValueError("The item is not taken from the specified options.")
class AreTypesMatched(object):
"""
Checks whether the value is from one of the allowed types.
"""
def __init__(self, valueTypes, exactMatch=True):
"""
@param valueTypes: List of class object.
@type valueTypes: C{list} of class objects.
@param exactMatch: If C{True} type checking is performed by using C{type}
otherwise C{isinstance} is used.
@type exactMatch: C{bool}
"""
self.valueTypes = valueTypes
self.exactMatch = exactMatch
def __call__(self, value):
"""
Implements the check.
@param value: Class object.
"""
representationTypeFound = False
for valueType in self.valueTypes:
if self.exactMatch:
if type(value) == valueType:
representationTypeFound = True
break
else:
if isinstance(value, valueType):
representationTypeFound = True
break
if not representationTypeFound:
raise ValueError("The given value has not the required type. %s %s" % (repr(value), repr(self.valueTypes)))
class IsPatternMatched(object):
"""
Checks whether the value conforms to specified string pattern.
"""
def __init__(self, pattern):
"""
@param regularExpression: Convenient regular expression pattern.
@type regularExpression: C{unicode}
"""
self.pattern = pattern
def __call__(self, value):
""" Implements the check. """
try:
result = re.match(self.pattern, value)
except (re.error, TypeError):
raise ValueError("The pattern %s is not a valid regular expression." % self.pattern)
if result is None:
raise ValueError("The given value does not match the defined pattern.")
class IsEachValueUnique(object):
""" Checks whether every value of a given list appears only once. """
@staticmethod
def __call__(value):
"""
Checks whether every value of a given list appears only once.
The check is performed with the comparison operator ("==").
@param value: List of items to check.
@type value: C{list}
"""
tmpDict = dict.fromkeys(value) # Removes duplicated entries
if len(tmpDict) != len(value):
raise ValueError("The values in the given list are not unique.")
class IsBinaryStringDecodable(object):
"""
Checks whether the given string can be converted to unicode by
using the default encoding.
"""
def __init__(self):
self._encoding = sys.getdefaultencoding() or "ascii"
def __call__(self, value):
"""
Checks whether the given string can be converted to unicode by
using the default encoding.
@param value: String to check.
@type value: C{basestring}
"""
if not isinstance(value, unicode):
try:
unicode(value, self._encoding)
except UnicodeError:
errorMessage = "The given binary string cannot be converted to unicode using the default encoding." + \
"Please convert the string to unicode before."
raise ValueError(errorMessage)
except TypeError:
raise ValueError("The value '%s' is no binary string." % str(value))
class ForEach(object):
"""
This class performs a given check for each value in a sequence.
"""
def __init__(self, validator):
"""
@param validator: A callable which takes a certain value as input.
Valid callables are defined in this module.
@type: C{callable}
"""
self.validator = validator
def __call__(self, value):
"""
Calls the validator for each item of the given sequence.
@param value: A sequence.
"""
for item in value:
self.validator(item)
class OrValidator(object):
"""
This class performs given checks on a value until one of the checks succeeds.
"""
def __init__(self, validators):
"""
@param validator: A list callable which takes a certain value as input.
Valid callables are defined in this module.
@type: C{list} of C{callable}
"""
self.validators = validators
def __call__(self, value):
"""
Calls every check until a one of them succeeds.
@param value: Any value.
"""
for validator in self.validators:
try:
validator(value)
allValidatorsFailed = False
break
except ValueError:
allValidatorsFailed = True
if allValidatorsFailed:
raise ValueError("Every defined validation rule failed for the given value.")
class AndValidator(object):
"""
Succeeds when all configured checks succeed as well.
"""
def __init__(self, validators):
"""
@param validator: A list of callables which takes a certain value as input.
Valid callables are defined in this module.
@type: C{list} of C{callable}
"""
self.validators = validators
def __call__(self, value):
"""
Calls all checks on the given value.
@param value: Any value.
"""
for validator in self.validators:
validator(value)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/configuration/properties/validators/base_validators.py",
"copies": "1",
"size": "13874",
"license": "bsd-3-clause",
"hash": -4670276932198831000,
"line_mean": 31.4313253012,
"line_max": 119,
"alpha_frac": 0.5808706934,
"autogenerated": false,
"ratio": 4.777548209366391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023318734750361236,
"num_lines": 415
} |
"""
This module provides common functionality used in different build targets.
"""
import os
__version__ = "$Revision-Id:$"
def setVersion(versionString):
""" Sets the version name within the DataFinder ..."""
relativeVersionFilePath = "src/datafinder/core/configuration/constants.py" # Path Relative to project root
fileHandle = open(relativeVersionFilePath, "r")
content = fileHandle.readlines()
fileHandle.close()
newContent = list()
for line in content:
if "VERSION" in line:
line = "VERSION = \"%s\"\n" % versionString
newContent.append(line)
fileHandle = open(relativeVersionFilePath, "w")
fileHandle.writelines(newContent)
fileHandle.close()
def regenerateFile(sourceFilePath, targetFilePath):
"""
Returns C{True} if the target file (C{targetFilePath}) needs to regenerated
from the source file (C{targetFilePath}). The target file is created if:
- The target file does not exist
- The modification date of the source file is new than the one of the target file
"""
regenerateFile = True
if os.path.exists(targetFilePath):
if os.path.getmtime(sourceFilePath) < os.path.getmtime(targetFilePath):
regenerateFile = False
return regenerateFile
| {
"repo_name": "DLR-SC/DataFinder",
"path": "build_scripts/distutils/src/datafinder_distutils/utils.py",
"copies": "1",
"size": "3034",
"license": "bsd-3-clause",
"hash": 8828410342400932000,
"line_mean": 36.4050632911,
"line_max": 110,
"alpha_frac": 0.7142386289,
"autogenerated": false,
"ratio": 4.219749652294854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016769064162691735,
"num_lines": 79
} |
"""
This module provides constants for the L{datafinder.core.item} package.
"""
__version__ = "$Revision-Id:$"
# ItemState -> Indicates whether and how the item is accessible
ITEM_STATE_NULL = "ItemState:NULL"
ITEM_STATE_INACCESSIBLE = "ItemState:INACCESSIBLE"
ITEM_STATE_ACCESSIBLE = "ItemState:ACCESSIBLE"
ITEM_STATE_MIGRATED = "ItemState:MIGRATED"
ITEM_STATE_ARCHIVED = "ItemState:ARCHIVED"
ITEM_STATE_ARCHIVED_READONLY = "ItemState:ARCHIVED_READONLY"
ITEM_STATE_ARCHIVED_MEMBER = "ItemState:ARCHIVED_MEMBER"
ITEM_STATE_UNSUPPORTED_STORAGE_INTERFACE = "ItemState:UNSUPPORTED_STORAGE_INTERFACE"
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/item/data_persister/constants.py",
"copies": "1",
"size": "2317",
"license": "bsd-3-clause",
"hash": -6492248555707790000,
"line_mean": 40.9074074074,
"line_max": 84,
"alpha_frac": 0.7544238239,
"autogenerated": false,
"ratio": 3.767479674796748,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9957566697016427,
"avg_score": 0.01286736033606418,
"num_lines": 54
} |
"""
This module provides the definition of a property representations.
"""
from datafinder.core.configuration.gen.datamodel import property as property_
from datafinder.core.configuration.properties import constants, property_type
from datafinder.core.error import ConfigurationError, PropertyError
__version__ = "$Revision-Id:$"
class PropertyDefinition(object):
"""
This the base class for property definitions.
@ivar identifier: This is the logical identifier of the property which has to be unique on
a specific resource. This identifier is mapped on a persistence identifier
when the property value is stored.
@type identifier: C{unicode}
@ivar type: Indicates the type of the property, i.e. the format of associated values.
@type type: C{string}, for constant definition see L{constants<datafinder.core.item.metadata.constants>}.
@ivar category: This holds the category of the property, i.e. if the property is
system-, data model- or user-specific. In accordance to the category
depends whether a property can be changed or deleted by the user.
@type category: C{string}, for constant definitions see L{constants<datafinder.core.item.metadata.constants>}
@ivar displayName: A readable name that can be presented in a user interface.
@type displayName: C{unicode}
@ivar description: Describes the purpose of the property.
@type description: C{unicode}
@ivar notNull: Flag indicating if C{None} is a allowed property value or not.
This restrictions is also checked on property value validation.
@type notNull: C{bool}
@ivar defaultValue: A default value for the property. The default value is not checked
against the the existing restrictions when set.
@type defaultValue: The type of the default value depends on the property definition.
"""
def __init__(self, identifier, category=constants.USER_PROPERTY_CATEGORY, propertyType=property_type.AnyType(),
displayName=None, description=None, namespace=None):
"""
Constructor.
@param identifier: This is the logical identifier of the property which has to be
unique on a specific resource.
@type identifier: C{unicode}
@param category: Determines the category of the property.
@type category: C{unicode}
@param propertyType: Type/ restrictions for property values.
@type propertyType: C{object}
@param displayName: Descriptive name of the property.
@type displayName: C{unicode}
@param description: Description of the property.
@type description: C{unicode}
"""
self._identifier = identifier
self.namespace = namespace
self.category = category
self.displayName = displayName or identifier or ""
self.description = description
self._propertyType = propertyType
self.defaultValue = None
def _getNotNull(self):
return self._propertyType.notNull
def _setNotNull(self, value):
self._propertyType.notNull = value
notNull = property(_getNotNull, _setNotNull)
@property
def identifier(self):
""" Getter for the attribute C{self.__identifier}. """
return self._identifier
@property
def type(self):
""" Returns the type constant of the property. """
return self._propertyType.name
@property
def restrictions(self):
""" Returns the defined restrictions of the property. """
result = dict()
for restriction, value in self._propertyType.restrictions.iteritems():
if not value is None:
result[restriction] = value
return result
def validate(self, value):
"""
Checks whether the given value conforms to the given restrictions.
@param value: The value to check against the restrictions.
@type value: C{object}
@raise PropertyError: Indicating that the value does not conform
to the defined restrictions.
"""
try:
self._propertyType.validate(value)
except ValueError, error:
raise PropertyError(self.identifier, repr(error.args))
def fromPersistenceFormat(self, persistedValue):
""" @see: <fromPersistenceFormat<datafinder.core.configuration.properties.
property_type.ListType.fromPersistenceFormat>}
@raise PropertyError: Indicating that the value could not be restored or
does not conform to the defined restrictions.
"""
try:
value = self._propertyType.fromPersistenceFormat(persistedValue)
except ValueError, error:
raise PropertyError(self.identifier, repr(error.args))
else:
self.validate(value)
return value
def toPersistenceFormat(self, value):
""" @see: <fromPersistenceFormat<datafinder.core.configuration.properties.
property_type.ListType.fromPersistenceFormat>}
@raise PropertyError: Indicating that the value could not be transformed
to the persistence format.
"""
try:
self.validate(value)
return self._propertyType.toPersistenceFormat(value)
except ValueError, error:
raise PropertyError(self.identifier, repr(error.args))
def __cmp__(self, other):
""" Comparison of two instances. """
try:
if self.identifier == other.identifier \
and self.namespace == other.namespace:
return 0
return 1
except AttributeError:
return 1
def __hash__(self):
return id(self.identifier) | id(self.namespace)
def __repr__(self):
""" Returns a readable representation. """
return self.displayName
@staticmethod
def load(persistedPropertyDefinition):
""" Loads the property definition form persistence format. """
propertyType = property_type.createPropertyType(persistedPropertyDefinition.valueType)
propertyDef = PropertyDefinition(persistedPropertyDefinition.name, propertyType=propertyType)
propertyDef.defaultValue = persistedPropertyDefinition.defaultValue
propertyDef.notNull = persistedPropertyDefinition.mandatory
return propertyDef
def toPersistenceRepresentation(self):
""" Returns a property definition in persistence format. """
return property_(self.identifier, self._propertyType.name,
self.notNull, self.defaultValue)
class PropertyDefinitionFactory(object):
""" Factory for creation of property definitions. """
PROPERTY_TYPE_NAMES = property_type.PROPERTY_TYPE_NAMES
def __init__(self, propertyIdValidator=None):
"""
Constructor.
@param propertyIdValidator: Function taking the property
identifier as argument and checks whether it is valid.
Example:
>>> def isValid(theIdentfierString):
... position = None
... isValid = True
... return isValid, position
@type propertyIdValidator: Function object.
"""
self.propertyIdValidator = propertyIdValidator
def isValidPropertyIdentifier(self, identifier):
"""
Checks whether the given identifier is valid.
@param identifier: Identifier of a property definition.
@type identifier: C{unicode}
"""
isValid = True
if not self.propertyIdValidator is None:
isValid = self.propertyIdValidator(identifier)[0]
return isValid
def createPropertyDefinition(self, identifier, category=constants.USER_PROPERTY_CATEGORY,
propertyType=property_type.AnyType(), displayName=None, description=None, namespace=None):
"""
Returns a property definition.
@raise ConfigurationError: Indicating problems with identifier.
"""
if not self.isValidPropertyIdentifier(identifier):
raise ConfigurationError("Identifier '%s' does not match property identifier pattern.")
else:
return PropertyDefinition(identifier, category, propertyType,
displayName, description, namespace)
@staticmethod
def createPropertyType(propertyTypeName, restrictions=dict()):
"""
Provided for convenience.
@see: L{createPropertyType<datafinder.core.configuration.properties.property_type.createPropertyType>}
"""
return property_type.createPropertyType(propertyTypeName, restrictions)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/configuration/properties/property_definition.py",
"copies": "1",
"size": "10958",
"license": "bsd-3-clause",
"hash": 1814740226480722000,
"line_mean": 38.5851851852,
"line_max": 116,
"alpha_frac": 0.6442781529,
"autogenerated": false,
"ratio": 4.95164934478084,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.609592749768084,
"avg_score": null,
"num_lines": null
} |
""" Creates a sample Lucene index for the full-text search feature. """
import lucene
import sys
if __name__ == "__main__":
lucene.initVM()
indexDir = "D:/Downloads/index"
dir_ = lucene.SimpleFSDirectory(lucene.File(indexDir))
analyzer = lucene.StandardAnalyzer(lucene.Version.LUCENE_CURRENT)
writer = lucene.IndexWriter(dir_, analyzer, True, lucene.IndexWriter.MaxFieldLength(512))
print("Currently there are %d documents in the index..." % writer.numDocs())
content = ("Strategische Konzeption, Umsetzung und Betreuung von langfristig " +
"hochwirksamen und messbar erfolgreichen Maßnahmen in Social Media.")
doc = lucene.Document()
doc.add(lucene.Field(
"content", content, lucene.Field.Store.YES, lucene.Field.Index.ANALYZED))
doc.add(lucene.Field(
"filePath", "Projekte/bericht.txt", lucene.Field.Store.YES, lucene.Field.Index.ANALYZED))
writer.addDocument(doc)
content = ("Design von Marken, Screens und Interfaces sowie Entwicklung von " +
"individuellen Facebook Apps, iPhone Apps und Webauftritten.")
doc = lucene.Document()
doc.add(lucene.Field(
"content", content, lucene.Field.Store.YES, lucene.Field.Index.ANALYZED))
doc.add(lucene.Field(
"filePath", "Projekte/implementierung.txt", lucene.Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(doc)
writer.optimize()
print("...done optimizing index of %d documents" % writer.numDocs())
print("Closing index of %d documents..." % writer.numDocs())
writer.close()
print("...done closing index of %d documents" % writer.numDocs())
| {
"repo_name": "DLR-SC/DataFinder",
"path": "contrib/lucene/create_sample_index.py",
"copies": "1",
"size": "3377",
"license": "bsd-3-clause",
"hash": -5002695304426668000,
"line_mean": 42.4342105263,
"line_max": 98,
"alpha_frac": 0.7157240154,
"autogenerated": false,
"ratio": 3.600213219616205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9750467129664135,
"avg_score": 0.013094021070413887,
"num_lines": 76
} |
"""
Constant definitions for connection handling.
"""
from webdav.Constants import NS_DAV, PROP_RESOURCE_TYPE
__version__ = "$Revision-Id:$"
# Constants for connection pooling
MAX_POOL_NUMBER = 10
MAX_CONNECTION_NUMBER = 4
# Defines special WebDAV properties
LINK_TARGET_PROPERTY = ("http://dlr.de/system/", "linkTarget")
RESOURCE_TYPE_PROPERTY = (NS_DAV, PROP_RESOURCE_TYPE)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/webdav_/constants.py",
"copies": "1",
"size": "2101",
"license": "bsd-3-clause",
"hash": -2543982314989028400,
"line_mean": 36.2,
"line_max": 72,
"alpha_frac": 0.7429795336,
"autogenerated": false,
"ratio": 3.9052044609665426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015452294869770691,
"num_lines": 55
} |
"""
Constant definitions.
"""
__version__ = "$Revision-Id:$"
DEFAULT_SSH_PORT = 22
MAXIMUM_RECEIVED_BYTES = 1024
CONNECTION_TIMEOUT = 500.0
MAX_POOL_NUMBER = 10
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/tsm/constants.py",
"copies": "1",
"size": "1877",
"license": "bsd-3-clause",
"hash": 2357127403117883400,
"line_mean": 36.306122449,
"line_max": 72,
"alpha_frac": 0.7416089505,
"autogenerated": false,
"ratio": 3.9350104821802936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017344412608926284,
"num_lines": 49
} |
"""
Constants definitions of the meta data support.
"""
from datafinder.core.configuration.properties import constants as const
__version__ = "$Revision-Id:$"
# Property categories
USER_PROPERTY_CATEGORY = const.USER_PROPERTY_CATEGORY
MANAGED_SYSTEM_PROPERTY_CATEGORY = const.MANAGED_SYSTEM_PROPERTY_CATEGORY
UNMANAGED_SYSTEM_PROPERTY_CATEGORY = const.UNMANAGED_SYSTEM_PROPERTY_CATEGORY
DATAMODEL_PROPERTY_CATEGORY = const.DATAMODEL_PROPERTY_CATEGORY
# System-specific property identifiers
MODIFICATION_DATETIME_ID = const.MODIFICATION_DATETIME_ID
CREATION_DATETIME_ID = const.CREATION_DATETIME_ID
OWNER_ID = const.OWNER_ID
MIME_TYPE_ID = const.MIME_TYPE_ID
DATATYPE_ID = const.DATATYPE_ID
DATASTORE_NAME_ID = const.DATASTORE_NAME_ID
SIZE_ID = const.SIZE_ID
CONTENT_MODIFICATION_DATETIME_ID = const.CONTENT_MODIFICATION_DATETIME_ID
CONTENT_CREATION_DATETIME_ID = const.CONTENT_CREATION_DATETIME_PROPERTY_ID
CONTENT_SIZE_ID = const.CONTENT_SIZE_ID
CONTENT_IDENTIFIER_ID = const.CONTENT_IDENTIFIER_ID
ARCHIVE_RETENTION_EXCEEDED_ID = const.ARCHIVE_RETENTION_EXCEEDED_DATETIME_ID
ARCHIVE_ROOT_COLLECTION_ID = const.ARCHIVE_ROOT_COLLECTION_ID
ARCHIVE_PART_INDEX_ID = const.ARCHIVE_PART_INDEX_ID
ARCHIVE_PART_COUNT_ID = const.ARCHIVE_PART_COUNT_ID
DATA_FORMAT_ID = const.DATA_FORMAT_ID
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/script_api/properties/constants.py",
"copies": "1",
"size": "3022",
"license": "bsd-3-clause",
"hash": 2044777892360740000,
"line_mean": 40.5633802817,
"line_max": 77,
"alpha_frac": 0.7673726009,
"autogenerated": false,
"ratio": 3.6235011990407675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9829614911360669,
"avg_score": 0.01225177771601955,
"num_lines": 71
} |
"""
Contains wrapper class around the property representation used in the core package.
"""
__version__ = "$Revision-Id:$"
class PropertyDescription(object):
"""
Wrapper around the internal property representation giving restricted access to
the relevant parameters.
All instance variables are read-only.
@ivar identifier: This is the logical identifier of the property.
@type identifier: C{unicode}
@ivar category: This holds the category of the property, i.e. if the property is
system, data model or user specific.
System specific: property can NOT be deleted from resource, values are read-only
Data model specific: property can NOT be deleted from resource, values changeable
User specific: property can be deleted from resource, values changeable
@type category: C{unicode}, for possible values see:
L{constants<datafinder.script_api.properties.constants>}
@ivar displayName: A readable name that can be presented in a user interface.
@type displayName: C{unicode}
@ivar description: Describes the purpose of the property.
@type description: C{unicode}
@ivar notNull: Flag indicating if C{None} is a allowed property value or not.
@type notNull: C{bool}
@ivar defaultValue: A default value for the property that is used for creation of the property on a resource.
@type defaultValue: The type of the default value depends on the property definition.
@ivar restrictions: This parameter holds the defined property restrictions that are
represented by parameters. The returned mapping can contain the following keys:
minimumValue: Defining the lower boundary of a value range.
maximumValue: Defining the upper boundary of a value range.
minimumLength: Defining the lower boundary of a length range.
maximumLength: Defining the upper boundary of a length range.
minimumNumberOfDecimalPlaces: Defining the minimum number of decimal places.
maximumNumberOfDecimalPlaces: Defining the maximum number of decimal places.
pattern: Regular expression pattern that restricts a string value.
options: A list of options the value can be chosen from.
optionsMandatory: Boolean indicating whether the value MUST be from the list of options.
subTypes: List of strings identifying supported types.
The possible restrictions depend on the type.
@type restrictions: C{dict}
@ivar namespace: Name space in which the property is valid, e.g. used to distinguish
different C{name} properties of different data types.
@type namespace: C{unicode}
"""
def __init__(self, propertyDefinition):
"""
Constructor.
@param propertyRepresentation: The property definition.
@type propertyRepresentation: L{PropertyTemplate<datafinder.application.metadata.property_types.PropertyBase>}
"""
self.__propertyDefinition = propertyDefinition
def __getIdentifier(self):
"""
Returns the identifier of the property.
"""
return self.__propertyDefinition.identifier
identifier = property(__getIdentifier)
def __getType(self):
""" Returns the propertyType. """
return self.__propertyDefinition.type
type = property(__getType)
def __getDisplayName(self):
"""
Returns the display name of the property.
"""
return self.__propertyDefinition.displayName
displayName = property(__getDisplayName)
def __getCategory(self):
"""
Returns the property category.
"""
return self.__propertyDefinition.category
category = property(__getCategory)
def __getDescription(self):
"""
Returns the property description.
"""
return self.__propertyDefinition.description
description = property(__getDescription)
def __getDefaultValue(self):
"""
Returns the specific default value.
"""
return self.__propertyDefinition.defaultValue
defaultValue = property(__getDefaultValue)
def __getNotNull(self):
"""
Returns whether the value can be C{None} or not.
"""
return self.__propertyDefinition.notNull
notNull = property(__getNotNull)
def __getNamespace(self):
"""
Returns the namespace.
"""
return self.__propertyDefinition.namespace
namespace = property(__getNamespace)
def __getRestrictions(self):
"""
Returns the defined restrictions of the property.
"""
return self.__propertyDefinition.restrictions
restrictions = property(__getRestrictions)
def __repr__(self):
""" Returns a readable representation. """
return self.identifier + " Type: " + self.type \
+ " Category: " + self.category
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/script_api/properties/property_description.py",
"copies": "1",
"size": "7285",
"license": "bsd-3-clause",
"hash": 6682999860754484000,
"line_mean": 36.1413612565,
"line_max": 118,
"alpha_frac": 0.6322580645,
"autogenerated": false,
"ratio": 5.130281690140845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.04590497264379265,
"num_lines": 191
} |
"""
Defines data associated with a principal, i.e. user / role.
"""
from datafinder.persistence.principal_search import constants
__version__ = "$Revision-Id:$"
class Principal(object):
""" Represents a principal. """
def __init__(self, identifier, **kwargs):
""" Constructor. """
self.identifier = identifier
self.type = constants.USER_PRINCIPAL_TYPE
self.displayName = identifier
self.memberof = list()
self.__dict__.update(kwargs)
def __cmp__(self, principal):
""" Compares of two instances. """
if self.identifier == principal.identifier \
and self.type == principal.type \
and self.displayName == principal.displayName:
identical = True
for item in self.memberof:
if not item in principal.roles:
identical = False
if identical:
return 0
return 1
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/principal_search/principal.py",
"copies": "1",
"size": "2719",
"license": "bsd-3-clause",
"hash": -1916695395209044000,
"line_mean": 35.2465753425,
"line_max": 72,
"alpha_frac": 0.6730415594,
"autogenerated": false,
"ratio": 4.33652312599681,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01831439791464878,
"num_lines": 73
} |
"""
Defines the set of lucene-specific configuration parameters.
"""
from datafinder.persistence.error import PersistenceError
__version__ = "$Revision-Id:$"
_LUCENE_SCHEME_PREFIX = "lucene+"
_LUCENE_PLUS_FILE_SCHEME_PREFIX = _LUCENE_SCHEME_PREFIX + "file"
class Configuration(object):
""" Defines a set of configuration parameters for lucene. """
def __init__(self, baseConfiguration, env):
"""
@param baseConfiguration: General basic configuration.
@type baseConfiguration: L{BaseConfiguration<datafinder.persistence.common.configuration.BaseConfiguration>}
@param env: lucene module specific Java VM
"""
indexUri = baseConfiguration.baseUri
if not indexUri:
raise PersistenceError("Invalid lucene index URI has been provided.")
if indexUri.startswith(_LUCENE_SCHEME_PREFIX):
if indexUri.startswith(_LUCENE_PLUS_FILE_SCHEME_PREFIX):
if baseConfiguration.uriPath.startswith("//"): # Stupid workaround for urlsplit bug in Python2.6
indexUri = "file:" + baseConfiguration.uriPath
else:
indexUri = "file://" + baseConfiguration.uriPath
else:
indexUri = indexUri[len(_LUCENE_SCHEME_PREFIX):]
self.luceneIndexUri = indexUri
self.env = env
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/lucene/configuration.py",
"copies": "1",
"size": "3107",
"license": "bsd-3-clause",
"hash": 1364757609580828000,
"line_mean": 39.4266666667,
"line_max": 116,
"alpha_frac": 0.6968136466,
"autogenerated": false,
"ratio": 4.176075268817204,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015394793509272645,
"num_lines": 75
} |
"""
Defines two classes to support domain objects. Domain objects allow you to
model meta data in a more compact way.
Domain objects should always be inherited from C{DomainObject}. Then the required
properties should be defined on class level using C{DomainProperty}.
Here an example:
>>> class Author(DomainObject):
... name = DomainProperty(StringType(), None, "Name", "This is the author name.")
...
... @name.setValidate
... def _validateName(self):
... if self.name is None or len(self.name) == 0:
... raise ValueError("Name should not be empty.")
...
... author = Author()
... author.name = "Pierre"
"""
import inspect
__version__ = "$Revision-Id:$"
class DomainProperty(property):
""" Describes a property of a domain object.
Properties defined in this way are persisted and can be further described by a
documentation string, display name, and a default value. You can also provide a
custom validation function using C{setValidate). The existing property types
L{property_type<datafinder.core.configuration.properties.property_type>} can
be used.
"""
def __init__(self, type_, defaultValue=None, displayName="", docString=""):
property.__init__(self, self._getter, self._setter)
self.type = type_
self.__doc__ = docString
self.defaultValue = defaultValue
self.displayName = displayName
self._values = dict()
self._validate = lambda _: None
def validate(self, instance):
""" Validates the given object.
@raise ValueError: Indicates an invalid object.
"""
self.type.validate(self._getter(instance))
self._validate(instance)
def setValidate(self, function):
""" This method is intended to be used as method decorator.
>>> @name.setValidate
... def _validateName(self):
... pass
The decorated method should just expect the domain
property instance as argument. Invalid values should be
indicated using C{ValueError}. The default validation method
does nothing.
"""
self._validate = function
def _getter(self, instance):
if id(instance) in self._values:
return self._values[id(instance)]
else:
return self.defaultValue
def _setter(self, instance, value):
self._values[id(instance)] = value
def __repr__(self):
return "%s: %s\n%s" % (self.displayName, self.type.name, self.__doc__)
class DomainObject(object):
""" Base class for all domain objects.
@note: Domain object should be created using an empty constructor.
"""
def validate(self):
""" Indicates validation errors using C{ValueError}. """
for instance, _, propertyDescriptor, value in self.walk():
propertyDescriptor.validate(instance)
if isinstance(value, DomainObject):
value.validate()
def walk(self, recursively=False):
""" Returns a generator which allows walking through
all defined domain properties.
For every property the following information is returned:
- The instance on which the property is defined.
- The attribute name to which the property is bound.
- The property descriptor.
- The current value of the property.
@param recursively: Indicates whether sub domain
objects are processed as well. Default: C{False}
@type recursively: C{bool}
"""
processLater = list()
for name, propertyDescriptor in inspect.getmembers(self.__class__):
if isinstance(propertyDescriptor, DomainProperty):
value = getattr(self, name)
yield self, name, propertyDescriptor, value
if isinstance(value, DomainObject) and recursively:
processLater.append(value)
for theProperty in processLater:
for propertyInfo in theProperty.walk(recursively):
yield propertyInfo
def __cmp__(self, other):
""" Two instances are equal if all domain properties are equal. """
for _, name, __, value in self.walk():
try:
if cmp(getattr(other, name), value) != 0:
return 1
except AttributeError:
return 1
return 0
def __hash__(self):
hashValue = list()
for _, __, ___, value in self.walk():
hashValue.append(value)
return hash(tuple(hashValue))
def __repr__(self):
result = ""
for _, name, __, value in self.walk():
result += "%s: '%s' " % (name, str(value))
return result.strip()
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/configuration/properties/domain.py",
"copies": "1",
"size": "6707",
"license": "bsd-3-clause",
"hash": 3704548010168181000,
"line_mean": 35.469273743,
"line_max": 84,
"alpha_frac": 0.6250186372,
"autogenerated": false,
"ratio": 4.584415584415584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01900760737193654,
"num_lines": 179
} |
"""
Factory for creation of data persister.
"""
import logging
from datafinder.core.configuration.datastores.constants import DEFAULT_STORE, OFFLINE_STORE, STORAGE_REALISATION_MODE_ENUM
from datafinder.core.configuration.properties import constants as property_constants
from datafinder.core.item.data_persister import constants
from datafinder.core.item.data_persister import persisters
__version__ = "$Revision-Id:$"
_logger = logging.getLogger()
class DataPersisterFactory(object):
""" Factory creating corresponding data persister. """
def __init__(self, dataStoreHandler, dataStoreAccessManager, propertyDefinitionRegistry):
"""
Constructor.
@param configuration: The repository configuration.
@type configuration: L{RepositoryConfiguration<datafinder.core.configuration.configuration.RepositoryCOnfiguration>}
"""
self._dataStoreHandler = dataStoreHandler
self._dataStoreAccessManager = dataStoreAccessManager
self._propertyDefinitionRegistry = propertyDefinitionRegistry
def createDataPersister(self, item):
""" Creates the suitable data persister and attaches it to the item. """
datastore = self._determineDatastore(item)
dataState = self._determineDataState(item, datastore)
if dataState == constants.ITEM_STATE_ARCHIVED_MEMBER:
rootItemPath = item.properties[property_constants.ARCHIVE_ROOT_COLLECTION_ID].value
rootItem = item.itemFactory.create(rootItemPath)
dataPersister = persisters.ArchiveMemberDataPersister(dataState, item, rootItem, self._propertyDefinitionRegistry)
elif dataState in [constants.ITEM_STATE_NULL, constants.ITEM_STATE_INACCESSIBLE,
constants.ITEM_STATE_UNSUPPORTED_STORAGE_INTERFACE]:
dataPersister = persisters.NullDataPersister(dataState)
elif datastore is None or datastore.storeType == DEFAULT_STORE:
dataPersister = persisters.DefaultDataPersister(dataState, item.fileStorer)
else:
fileSystem = self._dataStoreAccessManager.getFileSystem(datastore)
isAccessible = self._dataStoreAccessManager.isAccessible(datastore)
if fileSystem is None or not isAccessible:
dataPersister = persisters.NullDataPersister(constants.ITEM_STATE_UNSUPPORTED_STORAGE_INTERFACE)
else:
if datastore.storageRealisation == STORAGE_REALISATION_MODE_ENUM.FLAT:
baseFileStorer = fileSystem.createFileStorer("/")
dataPersister = persisters.FlatDataPersister(dataState, baseFileStorer, item, self._propertyDefinitionRegistry)
else:
fileStorer = self._createHierachicalFileStorer(datastore, fileSystem, item.path)
dataPersister = persisters.HierarchicalDataPersister(dataState, fileStorer)
if property_constants.ARCHIVE_PART_COUNT_ID in item.properties:
dataPersister = persisters.ArchiveDataPersister(dataState, item, dataPersister)
return dataPersister
def _determineDatastore(self, item):
""" Determines the data store configuration. """
try:
datastoreName = item.properties[property_constants.DATASTORE_NAME_ID].value
datastore = self._dataStoreHandler.getDataStore(datastoreName)
except KeyError:
datastore = None
return datastore
def _determineDataState(self, item, datastore):
""" Determines the data state constant. """
if datastore is None and property_constants.DATASTORE_NAME_ID in item.properties:
dataState = constants.ITEM_STATE_UNSUPPORTED_STORAGE_INTERFACE
elif datastore is None: # items without data store AND data store information
if item.isLink:
dataState = constants.ITEM_STATE_NULL
elif item.isCollection:
dataState = constants.ITEM_STATE_NULL
if property_constants.ARCHIVE_ROOT_COLLECTION_ID in item.properties:
dataState = constants.ITEM_STATE_ARCHIVED_MEMBER
else:
dataState = constants.ITEM_STATE_ACCESSIBLE
else: # items with valid data store
dataState = self.__determineDataState(datastore, item)
return dataState
@staticmethod
def __determineDataState(datastore, item):
""" Determines data state of items with valid data store. """
dataState = constants.ITEM_STATE_ACCESSIBLE
if property_constants.ARCHIVE_ROOT_COLLECTION_ID in item.properties:
dataState = constants.ITEM_STATE_ARCHIVED_MEMBER
rootItem = item.itemFactory.create(item.properties[property_constants.ARCHIVE_ROOT_COLLECTION_ID].value)
if rootItem.state == constants.ITEM_STATE_MIGRATED:
dataState = constants.ITEM_STATE_MIGRATED
elif datastore.storeType == OFFLINE_STORE:
dataState = constants.ITEM_STATE_INACCESSIBLE
elif datastore.isMigrated:
dataState = constants.ITEM_STATE_MIGRATED
elif property_constants.ARCHIVE_RETENTION_EXCEEDED_DATETIME_ID in item.properties:
try:
if datastore.readOnly:
dataState = constants.ITEM_STATE_ARCHIVED_READONLY
else:
dataState = constants.ITEM_STATE_ARCHIVED
except AttributeError:
dataState = constants.ITEM_STATE_ARCHIVED
return dataState
@staticmethod
def _createHierachicalFileStorer(datastore, fileStorerFactory, path):
""" Creates for the given item path the specific file storer object. """
effectivePath = path
try:
if path.startswith(datastore.removePathPrefix):
effectivePath = path[len(datastore.removePathPrefix):]
if not effectivePath.startswith("/"):
effectivePath = "/" + effectivePath
if effectivePath.endswith("/"):
effectivePath = effectivePath[-1]
except AttributeError:
effectivePath = path
return fileStorerFactory.createFileStorer(effectivePath)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/item/data_persister/factory.py",
"copies": "1",
"size": "8169",
"license": "bsd-3-clause",
"hash": -4236973658306092500,
"line_mean": 46.3372781065,
"line_max": 131,
"alpha_frac": 0.6731546089,
"autogenerated": false,
"ratio": 4.563687150837989,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01464727405438196,
"num_lines": 169
} |
"""
Handler for create collection wizard.
"""
from datafinder.gui.user.dialogs.creation_wizard.constants import PROPERTY_PAGE_ID, SOURCE_PAGE_ID
from datafinder.gui.user.dialogs.creation_wizard.state_handler.base_state_handler import BaseStateHandler
from datafinder.gui.user.models.repository.filter.leaf_filter import LeafFilter
__version__ = "$Revision-Id:$"
class CreateCollectionHandler(BaseStateHandler):
""" Handles collection creation. """
WINDOW_TITLE = "New Collection"
_PAGEID_TITLE_SUBTITLE_MAP = {SOURCE_PAGE_ID: ("Collection", "Creates a new collection."),
PROPERTY_PAGE_ID: ("Collection Properties", "Please attach additional information to the collection.")}
_ITEMNAME_LABEL_TEXT = "Collection name:"
def __init__(self, wizard):
""" Constructor. """
BaseStateHandler.__init__(self, wizard)
self._repositoryModel = wizard.sourceRepositoryModel
self._currentSourceIndex = None
self.lockIndex = None # Redefining it because check-in pylint wants it
def nextId(self):
""" Returns the identifier of the next page. """
nextId = -1
if self._repositoryModel.hasCustomMetadataSupport \
and self._wizard.currentId() == SOURCE_PAGE_ID:
nextId = PROPERTY_PAGE_ID
return nextId
def initializePage(self, identifier):
""" Performs initialization actions for the wizard page with the given identifier. """
if identifier == SOURCE_PAGE_ID:
self._wizard.configureSourceItemPage(LeafFilter(self._repositoryModel),
[self._repositoryModel.activeIndex],
self._ITEMNAME_LABEL_TEXT,
self._repositoryModel.isManagedRepository)
else:
indexChanged = self._currentSourceIndex != self._wizard.sourceIndexes[0]
self._currentSourceIndex = self._wizard.sourceIndexes[0]
self._wizard.configurePropertyPage(self._repositoryModel, True, self._currentSourceIndex, indexChanged)
def prepareFinishSlot(self):
""" Performs the finish slot preparation. """
self.lockIndex = self._wizard.sourceIndexes[0]
self._repositoryModel.lock([self.lockIndex])
def finishSlotCallback(self):
""" Unlocks the lock index. """
self._repositoryModel.unlock(self.lockIndex)
self._repositoryModel.activeIndex = self.lockIndex
def finishSlot(self):
""" Performs specific actions when the user commits his parameters. """
self._repositoryModel.createCollection(self._wizard.sourceItemName,
self.lockIndex,
self._wizard.properties)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/dialogs/creation_wizard/state_handler/create_collection_state_handler.py",
"copies": "1",
"size": "4712",
"license": "bsd-3-clause",
"hash": 7402252968879238000,
"line_mean": 42.0373831776,
"line_max": 137,
"alpha_frac": 0.6536502547,
"autogenerated": false,
"ratio": 4.513409961685824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02205304995270554,
"num_lines": 107
} |
"""
Handler for export of item to the unmanaged repository.
"""
from PyQt4 import QtGui
from datafinder.gui.user.dialogs.creation_wizard.constants import SOURCE_PAGE_ID, TARGET_PAGE_ID
from datafinder.gui.user.dialogs.creation_wizard.state_handler.base_state_handler import BaseStateHandler
from datafinder.gui.user.models.repository.filter.leaf_filter import LeafFilter
from datafinder.gui.user.models.repository.filter.property_filter import PropertyFilter
__version__ = "$Revision-Id:$"
class ExportHandler(BaseStateHandler):
""" Handles importing of items. """
WINDOW_TITLE = "Export Items"
_PAGEID_TITLE_SUBTITLE_MAP = {SOURCE_PAGE_ID: ("Export", "Exports items."),
TARGET_PAGE_ID: ("Destination", "Please select the destination collection.")}
def __init__(self, wizard):
""" Constructor. """
BaseStateHandler.__init__(self, wizard)
self._sourceRepositoryModel = wizard.sourceRepositoryModel
self._targetRepositoryModel = wizard.targetRepositoryModel
self._currentSourceIndex = None
self.lockIndex = None # Redefining it because check-in pylint wants it
def checkPreConditions(self):
""" Checks the preconditions. """
if self._sourceRepositoryModel is None or self._targetRepositoryModel is None:
return "Both repository models are not set. This should not happen."
if not self._targetRepositoryModel.initialized:
return "Please connect the shared data repository to import items."
if self._targetRepositoryModel.isManagedRepository:
return "The export is defined from the shared data repository into the local data repository."
def nextId(self):
""" Returns the identifier of the next page. """
nextId = -1
if self._wizard.currentId() == SOURCE_PAGE_ID:
nextId = TARGET_PAGE_ID
return nextId
def initializePage(self, identifier):
""" Performs initialization actions for the wizard page with the given identifier. """
if identifier == SOURCE_PAGE_ID:
preSelectedSourceItems = self._wizard.preSelectedSourceItems
if preSelectedSourceItems is None:
preSelectedSourceItems = [self._sourceRepositoryModel.activeIndex]
self._wizard.configureSourceItemPage(PropertyFilter(self._sourceRepositoryModel,
itemSelectionMode=PropertyFilter.ALL_SELECTION_MODE),
preSelectedSourceItems, "",
False, True, QtGui.QAbstractItemView.MultiSelection,
itemCheckFunction=lambda item: item.capabilities.canRetrieveData \
or item.isCollection and item.capabilities.canArchive)
else:
self._wizard.configureTargetItemPage(LeafFilter(self._targetRepositoryModel),
[self._targetRepositoryModel.activeIndex], "", False, True)
def prepareFinishSlot(self):
""" Performs the finish slot preparation. """
self.lockIndex = self._wizard.targetIndexes[0]
self._targetRepositoryModel.lock([self.lockIndex])
def finishSlotCallback(self):
""" Unlocks the lock index. """
self._targetRepositoryModel.unlock(self.lockIndex)
self._targetRepositoryModel.activeIndex = self.lockIndex
def finishSlot(self):
""" Performs specific actions when the user commits his parameters. """
self._targetRepositoryModel.performImport(self._wizard.sourceIndexes,
self.lockIndex)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/dialogs/creation_wizard/state_handler/export_state_handler.py",
"copies": "1",
"size": "5689",
"license": "bsd-3-clause",
"hash": 8104145582584221000,
"line_mean": 45.0165289256,
"line_max": 134,
"alpha_frac": 0.651608367,
"autogenerated": false,
"ratio": 4.7094370860927155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018956572173630632,
"num_lines": 121
} |
"""
Implements actions of the data repository.
"""
import sys
from datafinder.core.configuration.properties.constants import UNMANAGED_SYSTEM_PROPERTY_CATEGORY, MANAGED_SYSTEM_PROPERTY_CATEGORY
from datafinder.core.error import ItemError, PropertyError
from datafinder.gui.user.common.util import StartInQtThread
from datafinder.gui.user.common.fileaction_handler import FileActionHandler
from datafinder.gui.user.models.repository.clipboard import ItemClipboard
__version__ = "$Revision-Id:$"
class ActionHandler(object):
"""
Implements actions of the data repository.
"""
def __init__(self, parentModel, repository):
"""
Constructor.
@param parentModel: The repository model.
@type parentModel: L{RepositoryModel<datafinder.gui.user.models.repository.RepsitoryModel>}
@param repository: The underlying repository instance.
@type repository: L{Repository<datafinder.core.repository.Repository>}
"""
self._parentModel = parentModel
self._repository = repository
self._searchResult = list()
self._itemClipboard = ItemClipboard(self._parentModel)
self._fileActionHandler = None
if not FileActionHandler is None:
self._fileActionHandler = FileActionHandler()
def refresh(self, index, itemStateOnly=False):
"""
Refreshes the given item referred to as C{index}.
@param index: Index identifying the underlying item.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param itemStateOnly: If set it indicates that only the item
state is refreshed but no structural information. Default is C{False}
@type itemStateOnly: C{bool}
"""
node = self._parentModel.nodeFromIndex(index)
if not itemStateOnly:
self._parentModel.lock([index])
node.refresh(itemStateOnly)
if not itemStateOnly:
self._parentModel.unlock(index)
self._parentModel.activeIndex = index
def delete(self, indexes, ignoreStorageLocation=False):
"""
Deletes the items referred to as C{indexes}.
@param index: Indexes identifying the items.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param ignoreStorageLocation: Optional flag indicating whether the storage location
in case of managed repository items is ignored or not. Default: C{False}
@type ignoreStorageLocation: C{bool}
@raise ItemError: Indicating problems on deletion.
"""
for index in indexes:
if index.isValid():
item = self._parentModel.nodeFromIndex(index)
item.delete(ignoreStorageLocation)
del item
def copy(self, sourceIndexes, targetParentIndex):
"""
Copies the item referenced by C{sourceIndex} under the item referenced
by C{targetParentIndex}.
@param sourceIndexes: List of Indexes identifying the source items.
@type sourceIndexes: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param targetParentIndex: Index identifying the parent item of the source item copies.
@type targetParentIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@raise ItemError: Indicating problems on copying.
"""
for sourceIndex in sourceIndexes:
if sourceIndex.isValid():
sourceItem = self._parentModel.nodeFromIndex(sourceIndex)
targetParentItem = self._parentModel.nodeFromIndex(targetParentIndex)
targetName = self._repository.determineUniqueItemName(sourceItem.name, targetParentItem)
targetItem = self._createNewItem(
targetName, targetParentItem, sourceItem.isCollection,
sourceItem.linkTarget, sourceItem.isLink)
try:
sourceItem.copy(targetItem)
except ItemError, error:
targetItem.invalidate()
raise error
def _createNewItem(self, name, parent, isCollection=False,
linkTargetItem=None, isLink=False):
""" Item factory helper method. """
if isCollection:
item = self._repository.createCollection(name, parent)
elif isLink:
item = self._repository.createLink(name, linkTargetItem, parent)
else:
item = self._repository.createLeaf(name, parent)
return item
def move(self, sourceIndexes, targetParentIndex, newName=None):
"""
Moves the item referenced by C{sourceIndex} under the
item referenced by C{targetParentIndex}.
@param sourceIndex: List of indexes identifying the source items.
@type sourceIndex: C{list} of L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param targetParentIndex: Index identifying the new parent item of the source item.
@type targetParentIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param newName: Optional new of the moved item.
@type newName: C{unicode}
@raise ItemError: Indicating problems on moving.
"""
for sourceIndex in sourceIndexes:
if sourceIndex.isValid():
sourceItem = self._parentModel.nodeFromIndex(sourceIndex)
targetParentItem = self._parentModel.nodeFromIndex(targetParentIndex)
if targetParentItem.path.startswith(sourceItem.path):
postPath = targetParentItem.path[len(sourceItem.path):]
if len(postPath) == 0 or postPath[0] == "/":
raise ItemError("Cannot move item in its own sub-structure.")
targetName = self._repository.determineUniqueItemName(newName or sourceItem.name, targetParentItem)
try:
targetItem = self._createNewItem(
targetName, targetParentItem, sourceItem.isCollection,
sourceItem.linkTarget, sourceItem.isLink)
sourceItem.move(targetItem)
except ItemError, error:
targetItem.invalidate()
raise error
else:
del sourceItem
@StartInQtThread()
def updateProperties(self, index, changedProperties, deletablePropertyIds):
"""
Updates the properties of the item referenced by C{index}.
@param index: Index identifying the source item.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param changedProperties: List of properties to add or update.
@type changedProperties: C{list} of L{Property<datafinder.core.item.property.Property>}
@param deletablePropertyIds: List of property identifier to remove.
@type deletablePropertyIds: C{list} of C{unicode}
"""
if index.isValid():
item = self._parentModel.nodeFromIndex(index)
if len(changedProperties) > 0 or len(deletablePropertyIds) > 0:
item.updateProperties(changedProperties)
item.deleteProperties(deletablePropertyIds)
self._parentModel.itemDataChangedSignal(index)
def copyProperties(self, sourceIndex, targetIndex):
"""
Copies the properties of item referenced by C{sourceIndex}
to the item referenced by C{targetIndex}. Only data model or
individual properties are copied.
@param sourceIndex: Index identifying the source item.
@type sourceIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param targetIndex: Index identifying the target item.
@type targetIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
if sourceIndex.isValid() and targetIndex.isValid():
sourceItem = self._parentModel.nodeFromIndex(sourceIndex)
targetItem = self._parentModel.nodeFromIndex(targetIndex)
properties = list()
for property_ in sourceItem.properties.values():
if not property_.propertyDefinition.category \
in [UNMANAGED_SYSTEM_PROPERTY_CATEGORY, MANAGED_SYSTEM_PROPERTY_CATEGORY]:
properties.append(property_)
targetItem.updateProperties(properties)
self._parentModel.itemDataChangedSignal(targetIndex)
def search(self, index, restrictions):
"""
Performs a search request at the given collections with the given restrictions.
@param index: Index identifying the collection that is searched.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param restrictions: A string specifying the restrictions.
@type restrictions: C{unicode}
@raise CoreError: Indicating problems on search.
"""
collection = self._parentModel.nodeFromIndex(index)
searchResult = self._repository.search(restrictions, collection)
self._parentModel.searchResultChangedSignal(searchResult)
def createCollection(self, name, parentIndex, properties=None):
"""
Creates the collection with the given properties.
@param name: Name of the new collection.
@type name: C{unicode}
@param parentIndex: Index identifying the parent item of the new collection.
@type parentIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param properties: Optional additional properties.
@type properties: C{list} of L{Property<datafinder.core.item.property.Property>}
@raise ItemError: Indicating problems on creation.
"""
if properties is None:
properties = list()
parentItem = self._parentModel.nodeFromIndex(parentIndex)
collection = self._createNewItem(name, parentItem, True)
try:
collection.create(properties)
except (PropertyError, ItemError), error:
collection.invalidate()
raise error
def createLeaf(self, name, parentIndex, properties, fileObject=None):
"""
Creates the leaf.
@param name: Name of the new leaf.
@type name: C{unicode}
@param parentIndex: Index identifying the parent item of the new collection.
@type parentIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param properties: Optional additional properties.
@type properties: C{list} of L{Property<datafinder.core.item.property.Property>}
@raise ItemError: Indicating problems on creation.
"""
parentItem = self._parentModel.nodeFromIndex(parentIndex)
leaf = self._createNewItem(name, parentItem)
try:
leaf.create(properties)
except ItemError, error:
leaf.invalidate()
raise error
else:
if not fileObject is None:
leaf.storeData(fileObject)
def createLink(self, name, parentIndex, targetIndex, properties=None):
"""
Creates the link.
@param name: Name of the new link.
@type name: C{unicode}
@param parentIndex: Index identifying the parent item of the new collection.
@type parentIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param targetIndex: Identifies the item the link should point to.
@type targetIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@raise ItemError: Indicating problems on creation.
"""
parentItem = self._parentModel.nodeFromIndex(parentIndex)
targetItem = self._parentModel.nodeFromIndex(targetIndex)
if sys.platform == "win32" and not name.endswith(".lnk") and parentItem.uri.startswith("file:///"):
name += ".lnk"
name = self._repository.determineUniqueItemName(name, parentItem)
link = self._createNewItem(name, parentItem, linkTargetItem=targetItem, isLink=True)
if properties is None:
properties = list()
try:
link.create(properties)
except ItemError, error:
link.invalidate()
raise error
def createArchive(self, sourceIndex, targetParentIndex, properties=None):
"""
Create an archive.
@param sourceIndex: Index identifying the item which should be archived.
@type sourceIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@param targetParentIndex: Identifies the the parent of the new archive.
@type targetParentIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
sourceParentItem = self._parentModel.nodeFromIndex(sourceIndex)
targetParentItem = self._parentModel.nodeFromIndex(targetParentIndex)
self._repository.createArchive(sourceParentItem, targetParentItem, properties)
def isValidIdentifier(self, identifier):
"""
Checks whether the given string describes a valid identifier.
@param identifier: String describing the identifier.
@type identifier: C{unicode}
@return: C{True},C{None} if it is valid, otherwise C{False}, position
@rtype: C{tuple} of C{bool}, C{int}
"""
return self._repository.isValidIdentifier(identifier)
def isValidPropertyIdentifier(self, identifier):
"""
Checks whether the given string describes a valid property identifier.
@param identifier: String describing the property identifier.
@type identifier: C{unicode}
@return: C{True} if it is valid, otherwise C{False}
@rtype: C{bool}
"""
return self._repository.isValidPropertyIdentifier(identifier)
def performImport(self, sourceIndexes, targetParentIndex, defaultProperties=None):
"""
Imports the given items potentially belonging to another
data repository below the given item specified by C{targetParentIndex}.
@raise ItemError: Indicates problems on import.
"""
targetParentItem = self._parentModel.nodeFromIndex(targetParentIndex)
if defaultProperties is None:
defaultProperties = list()
failedItems = list()
for sourceIndex in sourceIndexes:
if sourceIndex.isValid():
sourceItem = sourceIndex.model().nodeFromIndex(sourceIndex)
targetItemName = self._repository.determineUniqueItemName(sourceItem.name, targetParentItem)
try:
self._repository.performImport(sourceItem, targetParentItem,
targetItemName, defaultProperties[:])
except ItemError, error:
failedItems.append((sourceItem, error.message))
if len(failedItems) > 0:
errorMessage = "Problems during import of the following items:\n"
for item, message in failedItems:
errorMessage += "\n" + item.path + " Reason: " + message
raise ItemError(errorMessage)
def performOpen(self, index):
"""
Starts an external viewer for the given item identified by C{index}.
@param index: Index identifying the parent item of the new collection.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
self._fileActionHandler.performOpen(self._parentModel.nodeFromIndex(index))
def performPrint(self, index):
"""
Prints the given item identified by C{index}.
@param index: Index identifying the parent item of the new collection.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
self._fileActionHandler.performPrint(self._parentModel.nodeFromIndex(index))
def commitArchive(self, indexes):
"""
Commits changes of the given archives.
@param indexes: Indexes of archives which should be committed.
@type indexes: C{list} of L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
for index in indexes:
if index.isValid():
item = self._parentModel.nodeFromIndex(index)
self._repository.commitArchive(item)
def searchPrincipal(self, pattern, searchMode):
"""
Just redirects to the repository functionality.
@see: L{searchPrincipal<datafinder.core.repository.Repository.searchPrincipal>}
"""
return self._repository.searchPrincipal(pattern, searchMode)
@property
def hasMetadataSearchSupport(self):
""" Flag indicating support for meta data search. """
return self._repository.hasMetadataSearchSupport
@property
def hasCustomMetadataSupport(self):
""" Flag indicating support for meta data search. """
return self._repository.hasCustomMetadataSupport
@property
def isManagedRepository(self):
""" Flag indicating whether the repository is managed or not. """
return self._repository.configuration.isManagedRepository
@property
def clipboard(self):
"""
Getter for the item clip-board.
@return: The item clip-board.
@rtype: L{ItemClipboard<datafinder.gui.user.models.clipboard.ItemClipboard>}
"""
return self._itemClipboard
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/models/repository/action_handler.py",
"copies": "1",
"size": "19922",
"license": "bsd-3-clause",
"hash": 6192404126138526000,
"line_mean": 40.4776119403,
"line_max": 131,
"alpha_frac": 0.6250878426,
"autogenerated": false,
"ratio": 4.751252086811352,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02044566420797485,
"num_lines": 469
} |
"""
Implements adapter for accessing the file system.
"""
from datetime import datetime
import mimetypes
import os
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.metadata import constants, value_mapping
from datafinder.persistence.metadata.metadatastorer import NullMetadataStorer
__version__ = "$Revision-Id:$"
class MetadataFileSystemAdapter(NullMetadataStorer):
""" Implements meta data storer interface for a standard file system. """
def __init__(self, identifier, itemIdMapper):
"""
@param identifier: Identifier of the item.
@type identifier: C{unicode}
@param itemIdMapper: Utility object allowing item identifier mapping.
@type itemIdMapper: L{ItemIdentifierMapper<datafinder.persistence.adapters.filesystem.util.ItemIdentifierMapper>}
"""
NullMetadataStorer.__init__(self, identifier)
self.__itemIdMapper = itemIdMapper
self.__persistenceId = self.__itemIdMapper.mapIdentifier(identifier)
def retrieve(self, propertyIds=None):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>}"""
try:
rawResult = os.stat(self.__persistenceId)
except OSError, error:
reason = os.strerror(error.errno)
errorMessage = "Cannot retrieve properties of collection '%s'. Reason: '%s'" % (self.identifier, reason)
raise PersistenceError(errorMessage)
else:
mappedResult = self._mapRawResult(rawResult)
return self._filterResult(propertyIds, mappedResult)
def _mapRawResult(self, rawResult):
""" Maps the os module specific result to interface format. """
mappedResult = dict()
mappedResult[constants.CREATION_DATETIME] = value_mapping.MetadataValue(str(rawResult.st_ctime), datetime)
mappedResult[constants.MODIFICATION_DATETIME] = value_mapping.MetadataValue(str(rawResult.st_mtime), datetime)
mappedResult[constants.SIZE] = value_mapping.MetadataValue(str(rawResult.st_size))
mappedResult[constants.OWNER] = value_mapping.MetadataValue("")
mimeType = mimetypes.guess_type(self.__persistenceId, False)
if mimeType[0] is None:
mappedResult[constants.MIME_TYPE] = value_mapping.MetadataValue("")
else:
mappedResult[constants.MIME_TYPE] = value_mapping.MetadataValue(mimeType[0])
return mappedResult
@staticmethod
def _filterResult(selectedPropertyIds, mappedResult):
""" Filters the result so it contains only the specified properties. """
if not selectedPropertyIds is None and len(selectedPropertyIds) > 0:
result = dict()
for propertyId in selectedPropertyIds:
if propertyId in mappedResult:
result[propertyId] = mappedResult[propertyId]
return result
else:
return mappedResult
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/filesystem/metadata/adapter.py",
"copies": "1",
"size": "4811",
"license": "bsd-3-clause",
"hash": 6896586868499910000,
"line_mean": 41.7363636364,
"line_max": 121,
"alpha_frac": 0.6929952193,
"autogenerated": false,
"ratio": 4.421875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01767456475699129,
"num_lines": 110
} |
"""
Implements adapter for manipulating a AmazonS3 file system.
It needs a bucketname from an Amazon S3 bucket. In the bucket the data
items are stored. A bucket is used similar to a directory. Only that
collections cannot be created within.
The keys with the identifier of the item are stored in the bucket.
"""
from atexit import register
from os import remove
from locale import resetlocale, setlocale, LC_TIME, Error
import logging
from tempfile import NamedTemporaryFile
from boto.exception import S3ResponseError, S3CreateError, BotoClientError, S3DataError
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.data.datastorer import NullDataStorer
__version__ = "$Revision-Id$"
UTF_ENCODING = "UTF-8"
LOCALE_TIME = "C"
_temporaryFiles = list()
class DataS3Adapter(NullDataStorer):
""" An adapter instance represents an item within the Amazon S3 file system. """
def __init__(self, identifier, connectionPool, bucketname):
"""
@param identifier: Logical identifier of the resource.
@type identifier: C{unicode}
@param connectionPool: Connection pool - connection to S3
@type connectionPool: L{Connection<datafinder.persistence.amazonS3.connection_pool.S3ConnectionPool>}
@param bucketname: Name of the bucket in Amazon S3, specified in the data location of the configuration.
@type bucketname: C{unicode}
"""
NullDataStorer.__init__(self, identifier)
self._connectionPool = connectionPool
self._bucketname = bucketname
self._bucket = self._getBucket()
self._keyname = identifier.encode(UTF_ENCODING)
self._key = None
def _getBucket(self):
""" Gets a s3 bucket, to access and store data items on the service """
bucket = None
setlocale(LC_TIME, LOCALE_TIME)
connection = self._connectionPool.acquire()
try:
bucket = connection.lookup(self._bucketname)
if bucket is None:
bucket = connection.create_bucket(self._bucketname)
except (S3ResponseError, S3CreateError), error:
if bucket is None:
raise PersistenceError("Cannot determine item existence. Reason: '%s'" % error.error_message)
else:
errorMessage = u"Cannot create resource '%s'. Reason: '%s'" % (self.identifier, error.error_message)
raise PersistenceError(errorMessage)
finally:
self._connectionPool.release(connection)
self._resetLocale()
return bucket
@property
def isLeaf(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
if self._isRoot(self._keyname):
return False
else:
return True
@staticmethod
def _isRoot(key):
""" Determines if the root is accessed. """
return key == "/"
@property
def isCollection(self):
""" @see: L{NullDataStorer<datafinder.persistence.metadata.metadatastorenr.NullDataStorer>} """
if self._keyname == "/":
return True
else:
return False
@property
def canAddChildren(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
return self.isCollection
def createResource(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
setlocale(LC_TIME, LOCALE_TIME)
connection = self._connectionPool.acquire()
try:
if not self._keyname == "/":
self._key = self._bucket.get_key(self._keyname)
if not self._key:
self._key = self._bucket.new_key(self._keyname)
except (S3ResponseError, PersistenceError), error:
errorMessage = "Cannot create resource '%s'. Reason: '%s'" % (self.identifier, error)
raise PersistenceError(errorMessage)
finally:
self._connectionPool.release(connection)
self._resetLocale()
return self._key
def getChildren(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
result = list()
if self.isCollection:
setlocale(LC_TIME, LOCALE_TIME)
connection = self._connectionPool.acquire()
try:
result = self._bucket.get_all_keys()
except S3ResponseError, error:
errorMessage = u"Cannot retrieve children of item '%s'. Reason: '%s'" % (self.identifier, error)
raise PersistenceError(errorMessage)
finally:
self._connectionPool.release(connection)
self._resetLocale()
return result
def writeData(self, data):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
setlocale(LC_TIME, LOCALE_TIME)
connection = self._connectionPool.acquire()
try:
self._key = self.createResource()
self._key.set_contents_from_file(data)
except (PersistenceError, S3ResponseError, S3DataError), error:
errorMessage = "Unable to write data to '%s'. " % self.identifier \
+ "Reason: %s" % error
raise PersistenceError(errorMessage)
finally:
self._connectionPool.release(connection)
self._resetLocale()
def readData(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
setlocale(LC_TIME, LOCALE_TIME)
connection = self._connectionPool.acquire()
try:
fileObject = NamedTemporaryFile(delete=False)
self.createResource()
self._key.get_contents_to_filename(fileObject.name)
_temporaryFiles.append(fileObject)
return fileObject
except (PersistenceError, S3ResponseError, BotoClientError), error:
errorMessage = "Unable to read data from '%s'. " % self.identifier \
+ "Reason: %s" % error
raise PersistenceError(errorMessage)
finally:
self._connectionPool.release(connection)
self._resetLocale()
def delete(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
if self.isLeaf:
setlocale(LC_TIME, LOCALE_TIME)
connection = self._connectionPool.acquire()
try:
self.createResource()
self._key.delete()
except (PersistenceError, S3ResponseError), error:
errorMessage = "Unable to delete item '%s'. " % self.identifier \
+ "Reason: %s" % error
raise PersistenceError(errorMessage)
finally:
self._connectionPool.release(connection)
self._resetLocale()
else:
raise PersistenceError("Unable to delete item '%s'. " % self.identifier)
def move(self, destination):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
self.copy(destination)
self.delete()
def copy(self, destination):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
setlocale(LC_TIME, LOCALE_TIME)
connection = self._connectionPool.acquire()
try:
destination.writeData(self.readData())
except (S3ResponseError, S3CreateError, PersistenceError), error:
errorMessage = "Unable to move item '%s' to '%s'." % (self.identifier, self._bucket.identifier)\
+ "Reason: %s" % error
raise PersistenceError(errorMessage)
finally:
self._connectionPool.release(connection)
self._resetLocale()
def exists(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
exists = True
if self.isLeaf:
setlocale(LC_TIME, LOCALE_TIME)
connection = self._connectionPool.acquire()
try:
key = self._bucket.get_key(self._keyname)
if key is None:
exists = False
except S3ResponseError, error:
raise PersistenceError("Cannot determine item existence. " \
+ "Reason: '%s'" % error.error_message)
finally:
self._connectionPool.release(connection)
self._resetLocale()
return exists
@staticmethod
def _resetLocale():
"""Reseting the process time settings"""
try:
resetlocale(LC_TIME)
except Error:
setlocale(LC_TIME, "C")
@register
def _cleanupTemporaryFile(fileList=None):
""" Cleaning up temporary files. Problems are sent to the debug logger"""
if fileList:
tempFiles = fileList
else:
tempFiles = _temporaryFiles
for tempFile in tempFiles:
try:
tempFile.close()
remove(tempFile.name)
except (OSError, PersistenceError):
_log = logging.getLogger("")
_log.debug("Cannot clean up temporary file '%s'" % tempFile.name)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/amazonS3/data/adapter.py",
"copies": "1",
"size": "11697",
"license": "bsd-3-clause",
"hash": 1748728832780645000,
"line_mean": 37.2583892617,
"line_max": 116,
"alpha_frac": 0.5992989655,
"autogenerated": false,
"ratio": 4.474751338944147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01771799198153613,
"num_lines": 298
} |
"""
Implements a registry for data formats.
"""
from copy import copy
from mimetypes import guess_type
from datafinder.core.configuration.dataformats.dataformat import DataFormat
__version__ = "$Revision-Id:$"
class DataFormatRegistry(object):
""" Implements a registry for data formats. """
__DEFAULT_DATAFORMAT = DataFormat("Default")
def __init__(self):
""" Constructor. """
self._nameDataFormatMap = dict()
self._mimeTypeDataFormatMap = dict()
self._fileSuffixDataFormatMap = dict()
def load(self):
""" Initializes the data format registry. """
self.__registerStandardDataFormats()
def __registerStandardDataFormats(self):
""" Registers the standard data formats. """
self.register(DataFormat("WORD", ["application/msword"], "doc_format"))
self.register(DataFormat("EXCEL", ["application/vnd.ms-excel"], "xls_format"))
self.register(DataFormat("POWERPOINT", ["application/vnd.ms-powerpoint"], "ppt_format"))
self.register(DataFormat("PDF", ["application/pdf"], "pdf_format"))
self.register(DataFormat("XML", ["text/xml", "application/xml"], "xml_format", [".xml"]))
self.register(DataFormat("HTML", ["text/html"], "html_format"))
self.register(DataFormat("PYTHON", ["text/x-python"], "py_format", [".pyc", ".pyd"]))
self.register(DataFormat("BINARY", ["application/octet-stream"], "bin_format", [".bin"]))
self.register(DataFormat("TEXT", ["text/plain"], "txt_format", [".log", ".java", ".cpp", ".js", ".php", ".csv", ".ini", ".rtf"]))
self.register(DataFormat("ARCHIVE", ["application/zip", "application/x-tar"], "zip_format", [".7z", ".bz2", ".rar"]))
self.register(DataFormat("AUDIO", ["audio/mpeg", "audio/x-wav", "audio/midi"], "audio_format", [".ogg", ".wma"]))
self.register(DataFormat("VIDEO", ["video/mpeg", "video/x-msvideo", "video/quicktime"], "video_format", [".xvid"]))
self.register(DataFormat("IMAGE", ["image/jpeg", "image/tiff"], "image_format", [".gif", ".png", ".eps", ".bmp"]))
self.register(DataFormat("VISIO", [], "vsd_format", [".vsd"]))
def register(self, dataFormat):
"""
Registers a data format. If a data format with the given name
already exists, it will be replaced.
@param dataFormat: The format which has to be registered.
@type dataFormat: L{DataFormat<datafinder.core.configuration.dataformats.dataformat.DataFormat>}
"""
self.unregister(dataFormat)
self._nameDataFormatMap[dataFormat.name] = dataFormat
for mimeType in dataFormat.mimeTypes:
if mimeType in self._mimeTypeDataFormatMap:
self._mimeTypeDataFormatMap[mimeType].append(dataFormat)
else:
self._mimeTypeDataFormatMap[mimeType] = [dataFormat]
for fileSuffix in dataFormat.additionalFileSuffixes:
if fileSuffix in self._fileSuffixDataFormatMap:
self._fileSuffixDataFormatMap[fileSuffix].append(dataFormat)
else:
self._fileSuffixDataFormatMap[fileSuffix] = [dataFormat]
def unregister(self, dataFormat):
"""
Unregisters the given data format.
@param dataFormat: The format which has to be unregistered.
@type dataFormat: L{DataFormat<datafinder.core.configuration.dataformats.dataformat.DataFormat>}
"""
if dataFormat.name in self._nameDataFormatMap:
del self._nameDataFormatMap[dataFormat.name]
for mimeType in dataFormat.mimeTypes:
if mimeType in self._mimeTypeDataFormatMap:
self._mimeTypeDataFormatMap[mimeType].remove(dataFormat)
if len(self._mimeTypeDataFormatMap[mimeType]) == 0:
del self._mimeTypeDataFormatMap[mimeType]
for fileSuffix in dataFormat.additionalFileSuffixes:
if fileSuffix in self._fileSuffixDataFormatMap:
self._fileSuffixDataFormatMap[fileSuffix].remove(dataFormat)
if len(self._fileSuffixDataFormatMap[fileSuffix]) == 0:
del self._fileSuffixDataFormatMap[fileSuffix]
def hasDataFormat(self, dataFormat):
"""
Checks whether the specific data format exists.
@param dataFormat: The format which has to be unregistered.
@type dataFormat: L{DataFormat<datafinder.core.configuration.dataformats.dataformat.DataFormat>}
@return: Flag indicating whether it is registered.
@rtype: C{bool}
"""
return dataFormat.name in self._nameDataFormatMap
def getDataFormat(self, name):
"""
Retrieves the data format for the given name or C{None}.
@param name: Name of the data format.
@type name: C{unicode}
@return: The data format associated with C{name}.
@rtype: L{DataFormat<datafinder.core.configuration.dataformats.dataformat.DataFormat>}
"""
if name in self._nameDataFormatMap:
dataFormat = self._nameDataFormatMap[name]
else:
dataFormat = self.defaultDataFormat
return dataFormat
def determineDataFormat(self, dataFormatName=None, mimeType=None, baseName=None):
"""
Determines the data format using the given data format name, MIME type, base name.
@param dataFormatName: Explicit name of a data format or C{None} which is the default value.
@type dataFormatName: C{unicode}
@param mimeType: MIME type or C{None} which is the default value.
@type mimeType: C{unicode}
@param mimeType: Base name or C{None} which is the default value.
@type mimeType: C{unicode}
"""
if not dataFormatName is None:
dataFormat = self.getDataFormat(dataFormatName)
else:
dataFormat = self._determineDataFormat(mimeType, baseName)
return dataFormat
def _determineDataFormat(self, mimeType=None, baseName=None):
"""
Guesses the data format for the given MIME type and/or base name.
First a MIME type based resolution is tried. Otherwise the file suffix
of the base name is explicitly used to resolve the data format. If everything
fails, the default data format is returned.
"""
dataFormat = None
if mimeType is None and baseName is None:
dataFormat = self.defaultDataFormat
else:
if mimeType is None:
mimeType = guess_type(baseName, False)[0]
if mimeType is None:
dataFormat = self._determineDataFormatUsingFileSuffix(baseName)
else:
mimeType = mimeType.lower()
if mimeType in self._mimeTypeDataFormatMap:
dataFormat = self._mimeTypeDataFormatMap[mimeType][0]
elif not baseName is None:
dataFormat = self._determineDataFormatUsingFileSuffix(baseName)
else:
dataFormat = self.defaultDataFormat
return dataFormat
def _determineDataFormatUsingFileSuffix(self, baseName):
""" Determines the file data format using the file suffix. """
startPosition = baseName.rfind(".")
if startPosition != -1:
fileSuffix = baseName[startPosition:]
fileSuffix = fileSuffix.lower()
if fileSuffix in self._fileSuffixDataFormatMap:
dataFormat = self._fileSuffixDataFormatMap[fileSuffix][0]
else:
dataFormat = self.defaultDataFormat
else:
dataFormat = self.defaultDataFormat
return dataFormat
@property
def defaultDataFormat(self):
""" Returns the default data format. """
return copy(self.__DEFAULT_DATAFORMAT)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/configuration/dataformats/registry.py",
"copies": "1",
"size": "10048",
"license": "bsd-3-clause",
"hash": 5928433387172368000,
"line_mean": 42.2643171806,
"line_max": 137,
"alpha_frac": 0.6227109873,
"autogenerated": false,
"ratio": 4.397374179431072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02292624423324649,
"num_lines": 227
} |
"""
Implements test cases for the privileges mapping.
"""
import unittest
from webdav.Constants import TAG_READ, TAG_READ_ACL, TAG_WRITE, TAG_WRITE_ACL, TAG_ALL, \
TAG_WRITE_CONTENT, TAG_WRITE_PROPERTIES, TAG_BIND, TAG_UNBIND, \
TAG_READ_CURRENT_USER_PRIVILEGE_SET, TAG_UNLOCK
from webdav.acp import ACL, ACE, GrantDeny, Privilege
from datafinder.persistence.adapters.webdav_.privileges.privileges_mapping import PrivilegeMapper
from datafinder.persistence.principal_search.constants import ALL_PRINCIPAL
from datafinder.persistence.principal_search.principal import Principal
from datafinder.persistence.privileges.ace import AccessControlListEntry
from datafinder.persistence.privileges import constants
__version__ = "$Revision-Id:$"
_VALID_WEBDAV_ACL = ACL()
_VALID_WEBDAV_ACL.aces = [TAG_READ]
class PrivilegeMapperTestCase(unittest.TestCase):
""" Implements test cases for the privilege mapping. """
def setUp(self):
""" Creates test setup. """
self._validWebdavAcl = ACL()
self._validInterfaceAcl = list()
self._initWebdavAcl()
self._initInterfaceAcl()
self._privilegeMapper = PrivilegeMapper("http://etst.de/users/", "http://etst.de/groups/")
def _initWebdavAcl(self):
""" Builds an ACL of the WebDAV library. """
aces = list()
ace = ACE() # ace of user test
ace.principal.principalURL = "http://etst.de/users/test"
grantDeny = GrantDeny()
grantDeny.grantDeny = 1
grantDeny.privileges = [Privilege(TAG_READ), Privilege(TAG_WRITE)]
ace.grantDenies.append(grantDeny)
aces.append(ace)
ace = ACE()
ace.principal.principalURL = "http://etst.de/users/test"
grantDeny = GrantDeny()
grantDeny.grantDeny = 0
grantDeny.privileges = [Privilege(TAG_WRITE_ACL)]
ace.grantDenies.append(grantDeny)
aces.append(ace)
ace = ACE() # ace of user test2
ace.principal.property = TAG_ALL
grantDeny = GrantDeny()
grantDeny.grantDeny = 1
grantDeny.privileges = [Privilege(TAG_READ), Privilege(TAG_WRITE)]
ace.grantDenies.append(grantDeny)
aces.append(ace)
self._validWebdavAcl.aces = aces
def _initInterfaceAcl(self):
""" Builds corresponding interface-specific ACL. """
principal = Principal("test", displayName="test")
ace = AccessControlListEntry(principal)
ace.grantedPrivileges = [constants.READ_PRIVILEGE, constants.WRITE_PRIVILEGE]
ace.deniedPrivileges = [constants.WRITE_PRIVILEGES_PRIVILEGE]
self._validInterfaceAcl.append(ace)
principal = Principal(ALL_PRINCIPAL)
ace = AccessControlListEntry(principal)
ace.grantedPrivileges = [constants.READ_PRIVILEGE, constants.WRITE_PRIVILEGE]
self._validInterfaceAcl.append(ace)
def testMapAcl(self):
""" Demonstrates default behavior of the mapAcl method. """
self.assertEquals(self._privilegeMapper.mapAcl(self._validInterfaceAcl), self._validWebdavAcl)
self.assertEquals(self._privilegeMapper.mapAcl([]), self._validWebdavAcl)
self.assertRaises(TypeError, self._privilegeMapper.mapAcl, None)
def testMapPersistenceAcl(self):
""" Demonstrates default behavior of the mapPersistenceAcl method. """
self.assertTrue(self._areInterfaceAclsEqual(self._privilegeMapper.mapPersistenceAcl(self._validWebdavAcl),
self._validInterfaceAcl))
self.assertEquals(self._privilegeMapper.mapPersistenceAcl(ACL()), list())
self.assertRaises(AttributeError, self._privilegeMapper.mapPersistenceAcl, None)
@staticmethod
def _areInterfaceAclsEqual(firstAcl, secondAcl):
""" Checks whether two ACLs on interface level are equal. """
equal = True
for ace in firstAcl:
if not ace in secondAcl:
equal = False
break
return equal
def testMapPeristencePrivileges(self):
""" Demonstrates the mapping of persistence privileges. """
self.assertEquals(self._privilegeMapper.mapPersistencePrivileges([Privilege(TAG_READ), Privilege(TAG_READ_ACL),
Privilege(TAG_WRITE), Privilege(TAG_WRITE_ACL),
Privilege(TAG_ALL), Privilege(TAG_WRITE_CONTENT),
Privilege(TAG_WRITE_PROPERTIES), Privilege(TAG_BIND),
Privilege(TAG_UNBIND),
Privilege(TAG_READ_CURRENT_USER_PRIVILEGE_SET)]),
[constants.READ_PRIVILEGE, constants.READ_PRIVILEGES_PRIVILEGE, constants.WRITE_PRIVILEGE,
constants.WRITE_PRIVILEGES_PRIVILEGE, constants.ALL_PRIVILEGE, constants.WRITE_CONTENT_PRIVILEGE,
constants.WRITE_PROPERTIES_PRIVILEGE, constants.ADD_ITEM_PRIVILEGE, constants.REMOVE_ITEM_PRIVILEGE,
constants.READ_USER_PRIVILEGES_PRIVILEGE])
self.assertEquals(self._privilegeMapper.mapPersistencePrivileges([Privilege(TAG_UNLOCK)]), list())
self.assertRaises(AttributeError, self._privilegeMapper.mapPersistencePrivileges, [None])
| {
"repo_name": "DLR-SC/DataFinder",
"path": "test/unittest/datafinder_test/persistence/adapters/webdav_/privileges/privileges_mapping_test.py",
"copies": "1",
"size": "7464",
"license": "bsd-3-clause",
"hash": 2939977557647336400,
"line_mean": 44.9433962264,
"line_max": 127,
"alpha_frac": 0.640943194,
"autogenerated": false,
"ratio": 4.142064372918979,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016523842029018035,
"num_lines": 159
} |
"""
Implements test cases for the WebDAV-specific file system factory.
"""
import unittest
from datafinder.persistence.adapters.sftp import factory
from datafinder.persistence.error import PersistenceError
class ParseDiskFreeOutpoutParserTestCase(unittest.TestCase):
def testExpectedDefaultCase(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200 245600 97590824 1% /home\n")
availableSpace = factory._parseDiskFreeCommandOutForAvailableSpace(dfOut)
self.assertEquals(availableSpace, 99933003776)
def testMultipleDevices(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200 245600 200 1% /home\n"
"/dev/sdc1 103079200 245600 1000 1% /home\n")
availableSpace = factory._parseDiskFreeCommandOutForAvailableSpace(dfOut)
self.assertEquals(availableSpace, 204800)
def testInvalidFormat(self):
dfOut = "INVALID"
self.assertRaises(PersistenceError, factory._parseDiskFreeCommandOutForAvailableSpace, dfOut)
def testInsufficientColumns(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200\n")
self.assertRaises(PersistenceError, factory._parseDiskFreeCommandOutForAvailableSpace, dfOut)
def testNotANumber(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200 245600 NOTANUMBER 1% /home\n")
self.assertRaises(PersistenceError, factory._parseDiskFreeCommandOutForAvailableSpace, dfOut)
def testLargeValue(self):
dfOut = (
"Filesystem 1K-blocks Used Available Use% Mounted on\n"
"/dev/sdb1 103079200 245600 975908240000000000000000 1% /home\n")
availableSpace = factory._parseDiskFreeCommandOutForAvailableSpace(dfOut)
self.assertEquals(availableSpace, 999330037760000000000000000)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "test/unittest/datafinder_test/persistence/adapters/sftp/factory_test.py",
"copies": "1",
"size": "3879",
"license": "bsd-3-clause",
"hash": -6245098647549124000,
"line_mean": 42.5862068966,
"line_max": 101,
"alpha_frac": 0.6976024749,
"autogenerated": false,
"ratio": 3.982546201232033,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017582431789042993,
"num_lines": 87
} |
"""
Implements the data format representation.
"""
from datafinder.core.configuration.dataformats.constants import DEFAULT_DATAFORMAT_ICONNAME, STANDARD_FORMAT_TYPE
__version__ = "$Revision-Id:$"
class DataFormat(object):
""" Represents a data type. """
def __init__(self, name, mimeTypes=None, iconName=DEFAULT_DATAFORMAT_ICONNAME, additionalFileSuffixes=None, type_=STANDARD_FORMAT_TYPE):
"""
Constructor.
@param name: Name of data type.
@type name: C{unicode}
@param mimeTypes: List of MIME types which are associated with this format.
@type mimeTypes: C{list} of C{unicode}
@param iconName: Symbolic name of an associated icon.
@type iconName: C{unicode}
@param additionalFileSuffixes: List of file suffixes which are used when a MIME type based resolution fails.
@type additionalFileSuffixes: C{list} of C{unicode}
@param type_: The format type. See L{constants<datafinder.core.configuration.dataformats.constants>} for details.
@param type_: C{unicode}
"""
self.name = name
self.type = type_
self.iconName = iconName
self.mimeTypes = mimeTypes or list()
self.additionalFileSuffixes = additionalFileSuffixes or list()
self.description = ""
self.propertyDefinitions = dict()
self.printScriptName = None
self.editScriptName = None
self.viewScriptName = None
def __repr__(self):
""" Provides a readable string representation. """
return self.name + " " + self.type
def __cmp__(self, other):
""" Makes the data types comparable. """
try:
return cmp(self.name, other.name)
except AttributeError:
return 1
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/configuration/dataformats/dataformat.py",
"copies": "1",
"size": "3587",
"license": "bsd-3-clause",
"hash": 3425982221879211000,
"line_mean": 37.4175824176,
"line_max": 140,
"alpha_frac": 0.676888765,
"autogenerated": false,
"ratio": 4.260095011876484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020244215962837916,
"num_lines": 91
} |
"""
Implements the editor for search queries.
"""
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from datafinder.gui.user.dialogs.search_dialog.utils import SearchQueryAnalyzer
__version__ = "$Revision-Id:$"
class SearchQueryEditor(QtGui.QTextEdit):
"""
Implement an editor for search queries.
"""
def __init__(self, parent=None):
"""
@param parent: parent widget
@type parent: L{QWidget <PyQt4.QtGui.QWidget>}
"""
QtGui.QTextEdit.__init__(self, parent)
self._completers = dict()
self._activeTokenType = SearchQueryAnalyzer.PROPERTY_TYPE
self._stateChangeTrigger = dict()
self._searchQueryAnalyzer = None
self._cursorPositionAnalysisResults = None # tuple of current token, token before, completion prefix
def event(self, event):
"""
@see: L{event<PyQt4.QtGui.QWidget.event>}
Used displaying token dependent tool tips.
"""
if event.type() == QtCore.QEvent.ToolTip:
if not self._searchQueryAnalyzer is None:
position = self.cursorForPosition(event.pos()).position()
token = self._searchQueryAnalyzer.token(position)
toolTip = None
if not token is None:
toolTip = token.toolTip
if not toolTip is None:
QtGui.QToolTip.showText(event.globalPos(), toolTip)
else:
QtGui.QToolTip.hideText()
return QtGui.QTextEdit.event(self, event)
def _handleValidationSignal(self, valid):
""" Highlights the invalid parts of the search restrictions. """
if len(self.toPlainText()) > 0:
format_ = QtGui.QTextCharFormat()
format_.setFontUnderline(not valid)
startPosition = 0
if valid:
format_.setUnderlineStyle(QtGui.QTextCharFormat.NoUnderline)
else:
startPosition = self._searchQueryAnalyzer.errorResult[0]
format_.setUnderlineColor(QtCore.Qt.red)
format_.setUnderlineStyle(QtGui.QTextCharFormat.WaveUnderline)
textCursor = self.textCursor()
textCursor.setPosition(startPosition)
textCursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.KeepAnchor)
extraSelection = QtGui.QTextEdit.ExtraSelection()
extraSelection.cursor = textCursor
extraSelection.format = format_
self.setExtraSelections([extraSelection])
def registerCompleter(self, completer, tokenType):
"""
Registers completer for the given token type.
@param completer: completer to be registered
@type completer: L{QCompleter <PyQt4.QtGui.QCompleter>}
@param tokenType: For constant definitions see
L{SearchQueryAnalyzer<datafinder.gui.user.dialogs.search_dialog.utils.SearchQueryAnalyzer>}.
@type tokenType: C{int}
"""
self._completers[tokenType] = completer
def _setActiveTokenType(self, tokenType):
"""
Setter method for token type.
It disconnects the QCompleter of the old token type and connects the new one.
"""
if not self._completers[self._activeTokenType] is None:
self.disconnect(self._completer, QtCore.SIGNAL("activated(QString)"), self.insertCompletion)
self._completer.popup().hide()
self._activeTokenType = tokenType
self._completers[tokenType].setWidget(self)
self.connect(self._completers[tokenType], QtCore.SIGNAL("activated(QString)"), self.insertCompletion)
state = property(None, _setActiveTokenType)
def completer(self):
"""
@see: L{completer <PyQt4.QtGui.QLineEdit.completer>}
"""
return self._completer
def insertCompletion(self, completion):
"""
Inserts the chosen completion in the text editor.
"""
completion = unicode(completion)
textCursor = self.textCursor()
currentToken = self._cursorPositionAnalysisResults[0]
if not currentToken is None:
textCursor.beginEditBlock()
textCursor.setPosition(currentToken.start)
textCursor.setPosition(currentToken.end, QtGui.QTextCursor.KeepAnchor)
textCursor.deleteChar()
textCursor.insertText(completion)
textCursor.endEditBlock()
else:
textCursor.insertText(completion + " ")
self.setTextCursor(textCursor)
def _analyzeCurrentCursorPosition(self):
"""
Analyzes the current position of the cursor
and finds out which tokens are placed around the cursor.
"""
textCursor = self.textCursor()
position = textCursor.position()
currentToken, tokenBefore = self._searchQueryAnalyzer.tokenAround(position)
completionPrefix = ""
if not currentToken is None:
completionPrefix = currentToken.token[:(position - currentToken.start)]
self._cursorPositionAnalysisResults = currentToken, tokenBefore, completionPrefix
return self._cursorPositionAnalysisResults
def _updateActiveTokenType(self):
"""
Updates the used completer. It decides based on the types of the
tokens around the current cursor position which completer should be used.
"""
currentToken, tokenBefore, completionPrefix = self._analyzeCurrentCursorPosition()
newState = SearchQueryAnalyzer.PROPERTY_TYPE
if not currentToken is None:
newState = currentToken.type
if not tokenBefore is None:
newState = self._searchQueryAnalyzer.nextTokenType(tokenBefore.type)
self.state = newState
return completionPrefix
def keyPressEvent(self, keyEvent):
"""
Slot that is called when a key is pressed. It checks whether a completion exist in its own
completion model and presents a popup with all possibilities if available.
@see: L{keyPressEvent <PyQt4.QtGui.QTextEdit.keyPressEvent>}
"""
if not self._completer is None and self._completer.popup().isVisible():
if keyEvent.key() in [Qt.Key_Enter, Qt.Key_Return, Qt.Key_Escape, Qt.Key_Tab, Qt.Key_Backtab]:
keyEvent.ignore()
return
if keyEvent.key() == Qt.Key_Space and keyEvent.modifiers() & Qt.ControlModifier:
keyEvent.ignore()
completionPrefix = self._updateActiveTokenType()
self._completer.setCompletionPrefix(completionPrefix)
self._completer.popup().setCurrentIndex(self._completer.completionModel().index(0, 0))
cursorRect = self.cursorRect()
cursorRect.setWidth(self._completer.popup().sizeHintForColumn(0)
+ self._completer.popup().verticalScrollBar().sizeHint().width())
self._completer.complete(cursorRect)
else:
QtGui.QTextEdit.keyPressEvent(self, keyEvent)
@property
def _completer(self):
"""
Active completer.
"""
return self._completers[self._activeTokenType]
def _setSearchQueryAnalyzer(self, searchQueryAnalyzer):
""" Sets the analyzer instance and connects the validation signal. """
self._searchQueryAnalyzer = searchQueryAnalyzer
self.connect(self._searchQueryAnalyzer, QtCore.SIGNAL(SearchQueryAnalyzer.VALIDATION_SIGNAL), self._handleValidationSignal)
searchQueryAnalyzer = property(None, _setSearchQueryAnalyzer)
class Completer(QtGui.QCompleter):
""" Custom completer allowing displaying completions and tool tips for them in a list view. """
def __init__(self, completions, sortCompletions=False):
"""
Constructor.
@param completions: Completions and specific tool tips explaining them.
@type completions: C{dict} of C{unicode}, C{unicode}
@param sortCompletions: Flag indicating whether completions should be sorted.
@type sortCompletions: C{bool}
"""
QtGui.QCompleter.__init__(self)
self._model = QtGui.QStandardItemModel(len(completions), 0)
counter = 0
keys = completions.keys()
if sortCompletions:
keys.sort()
for name in keys:
item = QtGui.QStandardItem(name)
item.setToolTip(completions[name])
self._model.setItem(counter, 0, item)
counter += 1
self.setModel(self._model)
self.setPopup(self._CustomListView())
class _CustomListView(QtGui.QListView):
""" Custom list view. """
def __init__(self):
""" Constructor. """
QtGui.QListView.__init__(self)
def event(self, event):
"""
This re-implementation is required because
when the list view is popped up the method C{viewportEvent}
- handling item specific tool tips - is no called.
"""
if event.type() == QtCore.QEvent.ToolTip:
return self.viewportEvent(event)
else:
return QtGui.QListView.event(self, event)
class SearchSyntaxHighlighter(QtGui.QSyntaxHighlighter):
"""
This class enables syntax highlighting in the criteria window of the search dialog.
"""
def __init__(self, searchQueryAnalyzer, parent=None):
"""
Constructor.
@param parent: Parent object of this class.
@type parent: L{QObject<PyQt4.QtCore.QObject>}
"""
QtGui.QSyntaxHighlighter.__init__(self, parent)
self._parent = parent
self._searchQueryAnalyzer = searchQueryAnalyzer
self._searchQueryAnalyzer._matchHook = self._highlightToken
literalFormat = QtGui.QTextCharFormat()
literalFormat.setForeground(QtCore.Qt.darkGreen)
comparisonFormat = QtGui.QTextCharFormat()
comparisonFormat.setForeground(QtCore.Qt.blue)
conjunctionFormat = QtGui.QTextCharFormat()
conjunctionFormat.setForeground(QtCore.Qt.blue)
conjunctionFormat.setFontWeight(QtGui.QFont.Bold)
propertyFormat = QtGui.QTextCharFormat()
propertyFormat.setForeground(QtCore.Qt.darkMagenta)
self._typeFormatMap = {SearchQueryAnalyzer.LITERAL_TYPE: literalFormat,
SearchQueryAnalyzer.CONJUNCTION_TYPE: conjunctionFormat,
SearchQueryAnalyzer.COMPARISON_TYPE: comparisonFormat,
SearchQueryAnalyzer.PROPERTY_TYPE: propertyFormat}
self._paragraphStartPosition = 0
def highlightBlock(self, currentText):
"""
@see: L{highlightBlock<PyQt4.QtGui.QSyntaxHighlighter.highlightBlock>}
"""
text = unicode(self._parent.toPlainText())
currentText = unicode(currentText)
if len(text.strip()) > 0:
splittedText = text.split("\n")
self._paragraphStartPosition = 0
if len(splittedText) > 1:
self._paragraphStartPosition = self._determineParagraphStartPosition(splittedText, currentText)
self._searchQueryAnalyzer.analyze(text)
else:
self._searchQueryAnalyzer.clearParsingResults()
@staticmethod
def _determineParagraphStartPosition(splittedText, currentText):
""" Finds out the start of the paragraph that is currently changed. """
begin = 0
for paragraph in splittedText:
if paragraph != currentText:
begin += len(paragraph) + 1
else:
break
return begin
def _highlightToken(self, tokenDescriptor):
""" Set the format of the given token. """
if tokenDescriptor.start - self._paragraphStartPosition >= 0:
self.setFormat(tokenDescriptor.start - self._paragraphStartPosition, tokenDescriptor.end - tokenDescriptor.start,
self._typeFormatMap[tokenDescriptor.type])
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/dialogs/search_dialog/search_query_editor.py",
"copies": "1",
"size": "14560",
"license": "bsd-3-clause",
"hash": -8482501809231218000,
"line_mean": 38.1101928375,
"line_max": 131,
"alpha_frac": 0.6159340659,
"autogenerated": false,
"ratio": 4.698289770893837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018995089546062497,
"num_lines": 363
} |
"""
Implements the model for logging messages.
"""
import os
import sys
import time
import logging
from PyQt4 import QtCore, QtGui
__version__ = "$Revision-Id:$"
_DATETIME_FORMAT = "%d.%m.%y %X"
class LoggerHandler(logging.Handler, QtCore.QObject):
""" Implements a logging handler which indicates available log records via
the custom Qt signal C{logrecord}. The separation of logger handler and model
is required to avoid threading issues."""
def __init__(self, name, level=logging.DEBUG):
"""
@param name: Logger name to which the handler should be added.
@param level: Minimum logging level which should be handled.
"""
QtCore.QObject.__init__(self)
logging.Handler.__init__(self, level)
logging.getLogger(name).addHandler(self)
def emit(self, record):
""" @see: L{emit<logging.Handler.emit>} """
QtCore.QObject.emit(self, QtCore.SIGNAL("logrecord"), record)
class LoggingModel(QtCore.QAbstractTableModel):
"""
It implements the L{QtCore.QAbstractTableModel} to present the logging records in a L{QtGui.QTableView}.
It uses C{LoggerHandler} which indicates new logging records via a custom Qt signal.
"""
_LEVEL_NAME = "levelname"
_NAME = "name"
_PATH_NAME = "pathname"
_FUNC_NAME = "funcName"
_LINE_NO = "lineno"
_MESSAGE = "msg"
LEVEL_NO = "levelno"
CREATED = "created"
def __init__(self, loggerHandler, parent=None):
"""
@param loggerHandler: Indicates new log records via Qt signals.
@type loggerHandler: C{LoggerHandler}
@param parent: Parent L{QtCore.QObject} of the model.
@type parent: C{QtCore.QObject}
"""
QtCore.QAbstractTableModel.__init__(self, parent)
self._loggerHandler = loggerHandler
self.__methods = [self._LEVEL_NAME, self.CREATED, self._NAME, self._PATH_NAME,
self._FUNC_NAME, self._LINE_NO, self._MESSAGE]
self.__headers = [self.tr("Level"), self.tr("Created"), self.tr("Logger"),
self.tr("Module"), self.tr("Function"), self.tr("Line"),
self.tr("Message")]
self.__recordBuffer = []
self.connect(self._loggerHandler, QtCore.SIGNAL("logrecord"), self._addNewLogRecord)
def _getHeaders(self):
"""
Return the headers of the this model.
@return: List of headers.
@rtype: C{list}
"""
return self.__headers
def _getBuffer(self):
"""
Return the buffer of logging records.
@return: List of logging records.
@rtype: C{list}
"""
return self.__recordBuffer
@staticmethod
def _parseModuleName(path):
"""
Generates the module path relative to the current PYTHONPATH.
@param path: Path the has to be converts to the module representation.
@type path: C{unicode}
"""
for pyPath in sys.path:
if path.lower().startswith(pyPath.lower()):
return path[len(pyPath) + 1:-3].replace(os.sep, ".")
return path
def rowCount(self, _=QtCore.QModelIndex()):
"""
@see: QtCore.QAbstractTableModel#rowCount
"""
return len(self.__recordBuffer)
def columnCount(self, _=QtCore.QModelIndex()):
"""
@see: QtCore.QAbstractTableModel#columnCount
"""
return len(self.__methods)
def data(self, index, role=QtCore.Qt.DisplayRole):
"""
@see: QtCore.QAbstractTableModel#data
"""
row = index.row()
column = index.column()
variant = QtCore.QVariant()
if role == QtCore.Qt.DisplayRole:
attribute = getattr(self.__recordBuffer[row], self.__methods[column])
if self.__methods[column] == self.CREATED:
attribute = time.strftime(_DATETIME_FORMAT, time.localtime(attribute))
elif self.__methods[column] == self._PATH_NAME:
attribute = self._parseModuleName(attribute)
elif self.__methods[column] == self._MESSAGE:
attribute = unicode(attribute).strip()
variant = QtCore.QVariant(attribute)
elif role == QtCore.Qt.ToolTipRole:
attribute = getattr(self.__recordBuffer[row], self._MESSAGE)
try:
attribute.strip()
except AttributeError:
attribute = unicode(attribute)
variant = QtCore.QVariant(attribute)
elif role == QtCore.Qt.TextAlignmentRole:
alignment = int(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
if self.__methods[column] == self._LINE_NO:
alignment = int(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
variant = QtCore.QVariant(alignment)
elif role == QtCore.Qt.ForegroundRole:
color = QtGui.QColor(QtCore.Qt.black)
if self.__recordBuffer[row].levelno in (logging.CRITICAL, logging.ERROR):
color = QtGui.QColor(QtCore.Qt.red)
variant = QtCore.QVariant(color)
elif role == QtCore.Qt.BackgroundColorRole:
color = QtGui.QColor(QtCore.Qt.white)
variant = QtCore.QVariant(color)
return variant
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
"""
@see: QtCore.QAbstractTableModel#headerData
"""
variant = QtCore.QVariant()
if orientation == QtCore.Qt.Horizontal:
if role == QtCore.Qt.DisplayRole:
attribute = QtCore.QVariant(self.__headers[section])
variant = QtCore.QVariant(attribute)
elif role == QtCore.Qt.TextAlignmentRole:
alignment = QtCore.QVariant(int(QtCore.Qt.AlignLeft))
if self.__methods[section] == self._LINE_NO:
alignment = int(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
variant = QtCore.QVariant(alignment)
return variant
def flush(self):
"""
@see: logging.Handler#flush
"""
try:
self.beginRemoveRows(QtCore.QModelIndex(), 0, len(self.__recordBuffer))
self.__recordBuffer = []
self.endRemoveRows()
except RuntimeError:
return
def _addNewLogRecord(self, record):
try:
self.beginInsertRows(QtCore.QModelIndex(), self.rowCount(), self.rowCount())
self.__recordBuffer.append(record)
self.endInsertRows()
except RuntimeError:
return
myBuffer = property(_getBuffer)
del _getBuffer
class LoggingSortFilterModel(QtGui.QSortFilterProxyModel):
"""
The LoggingSortFilterModel implements filter mechanism for logging messages.
It also implements the ability for sorting logmessages.
"""
def __init__(self, model, parent=None):
"""
Constructor.
@param model: Model that has to be sorted and filtered.
@type model: C{QtCore.QAbstractItemModel}
@param parent: Parent object.
@type parent: C{QtCore.QObject}
"""
QtGui.QSortFilterProxyModel.__init__(self, parent)
self.__showCount = 0
self.__filters = []
self.setSourceModel(model)
def __getattr__(self, name):
"""
Returns the attribute under the given name.
@param name: Name of the attribute that has to be returned.
@type name: C{string}
@return: The attribute for the given name.
@rtype: C{object}
"""
if hasattr(self.sourceModel(), name):
return getattr(self.sourceModel(), name)
raise AttributeError("Unknown attribute '%s'" % name)
def addFilter(self, level):
"""
Adds a given level to the filter list.
@param level: The level that has to be filtered.
@type level: C{int}
"""
if not (level in self.__filters):
self.__filters.append(level)
self.invalidate()
def removeFilter(self, level):
"""
Removes the given level from the filter list.
@param level: The level that to be removed from the filter list.
@type level: C{int}
"""
if level in self.__filters:
self.__filters.remove(level)
self.invalidate()
def isFiltered(self, level):
"""
Returns if the given level is contained in the filter list.
@return: True if the level is contained else False.
@rtype: C{boolean}
"""
return level in self.__filters
def filterAcceptsRow(self, row, _):
"""
@see: QtGui.QSortFilterProxyModel#filterAcceptsRow
"""
return not(self.sourceModel().myBuffer[row].levelno in self.__filters)
def lessThan(self, left, right):
"""
@see: QtGui.QSortFilterProxyModel#lessThan
"""
leftData = left.data().toString()
rightData = right.data().toString()
if leftData == rightData:
leftCreated = getattr(self.sourceModel().myBuffer[left.row()], LoggingModel.CREATED)
rightCreated = getattr(self.sourceModel().myBuffer[right.row()], LoggingModel.CREATED)
return leftCreated < rightCreated
return leftData < rightData
def columnCount(self, _=QtCore.QModelIndex()):
"""
@see: QtCore.QAbstractItemModel#columnCount
"""
return self.sourceModel().columnCount(None)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/models/logger.py",
"copies": "1",
"size": "11656",
"license": "bsd-3-clause",
"hash": 266018881873642800,
"line_mean": 31.7855072464,
"line_max": 108,
"alpha_frac": 0.5972889499,
"autogenerated": false,
"ratio": 4.40347563279184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006876981901361915,
"num_lines": 345
} |
"""
Implements the principal search WebDAV-specific
"""
import os
from webdav.Condition import ContainsTerm
from webdav.Connection import WebdavError
from webdav.Constants import NS_DAV, PROP_DISPLAY_NAME
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.principal_search import constants, principal
from datafinder.persistence.principal_search.principalsearcher import NullPrincipalSearcher
from datafinder.persistence.adapters.webdav_ import util
__version__ = "$Revision-Id:$"
class PrincipalSearchWebdavAdapter(NullPrincipalSearcher):
""" Implements the search for principals WebDAV-specific. """
def __init__(self, userCollectionUrl, groupCollectionUrl, connectionPool, connectionHelper=util):
"""
Constructor.
@param userCollectionUrl: URL pointing to the user collection.
@type userCollectionUrl: C{unicode}
@param groupCollectionUrl: URL pointing to the group collection.
@type groupCollectionUrl: C{unicode}
@param connectionPool: Connection pool.
@type connectionPool: L{Connection<datafinder.persistence.webdav_.connection_pool.WebdavConnectionPool>}
@param connectionHelper: Utility object/module creating WebDAV library storer instances.
@type connectionHelper: L{ItemIdentifierMapper<datafinder.persistence.adapters.webdav_.util}
"""
NullPrincipalSearcher.__init__(self)
self.__connectionPool = connectionPool
self.__userCollectionUrl = userCollectionUrl
self.__groupCollectionUrl = groupCollectionUrl
self.__connectionHelper = connectionHelper
def searchPrincipal(self, pattern, searchMode):
""" @see: L{NullPrincipalSearcher<datafinder.persistence.principal_search.principalsearcher.NullPrincipalSearcher>} """
connection = self.__connectionPool.acquire()
try:
userCollectionStorer = self.__connectionHelper.createCollectionStorer(self.__userCollectionUrl, connection)
groupCollectionStorer = self.__connectionHelper.createCollectionStorer(self.__groupCollectionUrl, connection)
return self._searchPrincipal(pattern, searchMode, userCollectionStorer, groupCollectionStorer)
finally:
self.__connectionPool.release(connection)
def _searchPrincipal(self, pattern, searchMode, userCollectionStorer, groupCollectionStorer):
""" Performs principal search on the WebDAV server. """
mappedResult = list()
userRawResult = dict()
groupRawResult = dict()
if searchMode == constants.SEARCH_MODE_USER_AND_GROUP:
groupRawResult = self._performSearch(pattern, groupCollectionStorer)
userRawResult = self._performSearch(pattern, userCollectionStorer)
elif searchMode == constants.SEARCH_MODE_GROUP_ONLY:
groupRawResult = self._performSearch(pattern, groupCollectionStorer)
elif searchMode == constants.SEARCH_MODE_USER_ONLY:
userRawResult = self._performSearch(pattern, userCollectionStorer)
else:
raise PersistenceError("The specified search mode is not supported.")
self._mapRawResult(userRawResult, mappedResult, True)
self._mapRawResult(groupRawResult, mappedResult, False)
return mappedResult
@staticmethod
def _performSearch(name, collectionStorer):
""" Performs the principal search on the given WebDAV principal collection. """
condition = ContainsTerm(PROP_DISPLAY_NAME, name, False)
try:
searchResult = collectionStorer.search(condition, [(NS_DAV, PROP_DISPLAY_NAME)])
except WebdavError, error:
errorMessage = "Cannot perform user/group query. Reason: %s" % error.reason
raise PersistenceError(errorMessage)
return searchResult
@staticmethod
def _mapRawResult(rawResult, mappedResult, isUser):
""" Maps the WebDAV search result to the required format. """
for key, value in rawResult.iteritems():
uniqueName = os.path.basename(key)
displayName = ""
if (NS_DAV, PROP_DISPLAY_NAME) in value:
displayName = unicode(value[(NS_DAV, PROP_DISPLAY_NAME)].textof())
if isUser:
principalType = constants.USER_PRINCIPAL_TYPE
else:
principalType = constants.GROUP_PRINCIPAL_TYPE
principal_ = principal.Principal(uniqueName, type=principalType, displayName=displayName)
mappedResult.append(principal_)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/webdav_/principal_search/adapter.py",
"copies": "1",
"size": "6428",
"license": "bsd-3-clause",
"hash": 359522353945374600,
"line_mean": 44.9197080292,
"line_max": 127,
"alpha_frac": 0.7005289359,
"autogenerated": false,
"ratio": 4.457697642163661,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014295973558713974,
"num_lines": 137
} |
"""
Implements the specific property wizard page.
"""
from PyQt4 import QtCore
from datafinder.core.configuration.properties.constants import DATATYPE_ID, DATASTORE_NAME_ID
from datafinder.gui.user.dialogs.creation_wizard.constants import INCOMPLETE_PROPERTY_DEFINITION
from datafinder.gui.user.dialogs.creation_wizard.pages.base_page import BaseWizardPage
from datafinder.gui.user.dialogs.creation_wizard.pages.utils import determineTargetDataTypes
from datafinder.gui.user.models.properties import PropertiesModel
__version__ = "$Revision-Id:$"
class PropertyWizardPage(BaseWizardPage):
""" Implements the specific property wizard page. """
def __init__(self):
""" Constructor. """
BaseWizardPage.__init__(self)
self.index = None
self.indexChanged = True
self.baseRepositoryModel = None
self.isDataTypeSelectionEnabled = True
self.initialProperties = None
self._dataTypes = dict()
def configure(self):
""" Prepares the source index wizard page. """
if self.propertyWidget.model is None:
self.propertyWidget.model = PropertiesModel(self.baseRepositoryModel, False)
if self.isDataTypeSelectionEnabled:
self.connect(self.propertyWidget.model,
QtCore.SIGNAL(PropertiesModel.IS_CONSISTENT_SIGNAL),
self._handlePropertyConsistencySlot)
self.connect(self.dataTypeComboBox, QtCore.SIGNAL("activated(const QString)"), self._selectedDataTypeChanged)
if not self.wizard().dataStoreName is None:
dataStoreProperty = self.baseRepositoryModel.repository.createProperty(DATASTORE_NAME_ID,
self.wizard().dataStoreName)
if self.initialProperties is None:
self.initialProperties = list()
self.initialProperties.append(dataStoreProperty)
self._loadProperties(self.initialProperties)
if self.isDataTypeSelectionEnabled:
self.dataTypeLabel.show()
self.dataTypeComboBox.show()
self._handleDataTypeProperties()
else:
self.dataTypeLabel.hide()
self.dataTypeComboBox.hide()
def _handleDataTypeProperties(self):
""" Initializes data type specific property initializations. """
if self.indexChanged:
currentDataTypeName = unicode(self.dataTypeComboBox.currentText())
self._dataTypes = dict()
self.dataTypeComboBox.clear()
targetDataTypes = determineTargetDataTypes(self.baseRepositoryModel, self.index)
if len(targetDataTypes) > 0:
for targetDataType in targetDataTypes:
self._dataTypes[targetDataType.name] = targetDataType
icon = self.baseRepositoryModel.iconProvider.iconForDataType(targetDataType)
if not icon is None:
self.dataTypeComboBox.addItem(icon, targetDataType.name)
else:
self.dataTypeComboBox.addItem(targetDataType.name)
if not currentDataTypeName in self._dataTypes:
self._selectedDataTypeChanged(self._dataTypes.keys()[0])
self._handlePropertyConsistencySlot(self.propertyWidget.model.isConsistent)
def _selectedDataTypeChanged(self, dataTypeName):
""" Handles changed selection of the data type. """
dataType = self._dataTypes[unicode(dataTypeName)]
dataTypeProperty = self.baseRepositoryModel.repository.createProperty(DATATYPE_ID,
unicode(self.dataTypeComboBox.currentText()))
properties = [dataTypeProperty]
for propDef in dataType.propertyDefinitions:
prop = self.baseRepositoryModel.repository.createPropertyFromDefinition(propDef, propDef.defaultValue)
properties.append(prop)
self._loadProperties(properties)
def _loadProperties(self, properties):
""" Sets the given properties and resets the property model. """
self.propertyWidget.model.load(properties)
self.propertyWidget.model.reset()
def _handlePropertyConsistencySlot(self, isConsistent):
""" Slot handling the consistency of the property model. """
if isConsistent:
self.errorHandler.removeError(INCOMPLETE_PROPERTY_DEFINITION)
else:
errorMessage = "Please complete the missing property values."
self.errorHandler.appendError(INCOMPLETE_PROPERTY_DEFINITION, errorMessage)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/dialogs/creation_wizard/pages/property_page.py",
"copies": "1",
"size": "6636",
"license": "bsd-3-clause",
"hash": -6686415911719897000,
"line_mean": 44.4055944056,
"line_max": 125,
"alpha_frac": 0.6571729958,
"autogenerated": false,
"ratio": 4.7030474840538625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01676007132297072,
"num_lines": 143
} |
"""
Maps the interface-specific privilege definition to the WebDAV-specific one and vice versa.
"""
import os
from webdav.Constants import TAG_ALL, TAG_READ, TAG_WRITE, \
TAG_WRITE_CONTENT, TAG_WRITE_PROPERTIES, \
TAG_BIND, TAG_UNBIND, \
TAG_WRITE_ACL, TAG_READ_ACL, TAG_READ_CURRENT_USER_PRIVILEGE_SET, \
TAG_AUTHENTICATED, TAG_UNAUTHENTICATED, TAG_OWNER
from webdav.acp import ACL, ACE, Principal, GrantDeny, Privilege
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.principal_search import constants as principal_constants
from datafinder.persistence.principal_search import principal
from datafinder.persistence.privileges import constants, ace
__version__ = "$Revision-Id:$"
# maps WebDAV specific principals to defined principals
_webdavToGeneralPrincipalMap = {TAG_ALL: principal_constants.ALL_PRINCIPAL,
TAG_AUTHENTICATED: principal_constants.AUTHENTICATED_PRINCIPAL,
TAG_UNAUTHENTICATED: principal_constants.UNAUTHENTICATED_PRINCIPAL,
TAG_OWNER: principal_constants.OWNER_PRINCIPAL}
_generalToWebdavPrincipalMap = dict(zip(_webdavToGeneralPrincipalMap.values(),
_webdavToGeneralPrincipalMap.keys()))
# maps WebDAV specific privilege constants to defined privileges
_webdavToGeneralPrivilegeMap = {TAG_ALL: constants.ALL_PRIVILEGE,
TAG_WRITE: constants.WRITE_PRIVILEGE,
TAG_WRITE_CONTENT: constants.WRITE_CONTENT_PRIVILEGE,
TAG_WRITE_PROPERTIES: constants.WRITE_PROPERTIES_PRIVILEGE,
TAG_BIND: constants.ADD_ITEM_PRIVILEGE,
TAG_UNBIND: constants.REMOVE_ITEM_PRIVILEGE,
TAG_READ: constants.READ_PRIVILEGE,
TAG_WRITE_ACL: constants.WRITE_PRIVILEGES_PRIVILEGE,
TAG_READ_ACL: constants.READ_PRIVILEGES_PRIVILEGE,
TAG_READ_CURRENT_USER_PRIVILEGE_SET: constants.READ_USER_PRIVILEGES_PRIVILEGE}
_generalToWebdavPrivilegeMap = dict(zip(_webdavToGeneralPrivilegeMap.values(),
_webdavToGeneralPrivilegeMap.keys()))
class PrivilegeMapper(object):
""" Implements mapping of privileges from the interface-specific to the WebDAV-specific format. """
def __init__(self, userUrl, groupUrl):
"""
Constructor.
@param userUrl: URL points to a collection containing the user principals.
@type userUrl: C{unicode}
@param userUrl: URL points to a collection containing the group principals.
@type userUrl: C{unicode}
"""
self._userUrl = userUrl
self._groupUrl = groupUrl
def mapAcl(self, acl):
"""
Maps the given ACL in the interface format to the WebDAV-library-specific format.
@param acl: ACL in interface representation.
@param acl: C{list} of L{AccessControlListEntry<datafinder.persistence.
privileges.ace.AccessControlListEntry>}
@return: ACL in WebDAV-specific format.
@rtype: L{ACL<webdav.acp.Acl.ACL>}
"""
persistenceAces = list()
for ace_ in acl:
persistenceAce = self._createAce(ace_.principal, ace_.grantedPrivileges, True)
if not persistenceAce is None:
persistenceAces.append(persistenceAce)
persistenceAce = self._createAce(ace_.principal, ace_.deniedPrivileges, False)
if not persistenceAce is None:
persistenceAces.append(persistenceAce)
return ACL(aces=persistenceAces)
def _createAce(self, principal_, privileges, isGranted):
""" Prepares a WebDAV-specific access control element. """
ace_ = None
if len(privileges) > 0:
grantDeny = GrantDeny()
if isGranted:
grantDeny.setGrant()
else:
grantDeny.setDeny()
grantDeny.addPrivileges(self._mapPrivileges(privileges))
mappedPrincipal = self._mapPrincipal(principal_)
ace_ = ACE(principal=mappedPrincipal, grantDenies=[grantDeny])
return ace_
def _mapPrincipal(self, principal_):
""" Maps the interface-specific principal representation to the WebDAV-specific. """
if principal_.identifier in _generalToWebdavPrincipalMap.keys():
mappedPrincipal = Principal()
mappedPrincipal.property = _generalToWebdavPrincipalMap[principal_.identifier]
else:
if principal_.type == principal_constants.USER_PRINCIPAL_TYPE:
principalUrl = self._userUrl + principal_.identifier
else:
principalUrl = self._groupUrl + principal_.identifier
mappedPrincipal = Principal(principalURL=principalUrl)
return mappedPrincipal
@staticmethod
def _mapPrivileges(privilegeConstants):
""" Maps interface-specific privilege constants to WebDAV-library constants. """
webdavPrivileges = list()
for privilegeConstant in privilegeConstants:
try:
webdavPrivilegeConstant = _generalToWebdavPrivilegeMap[privilegeConstant]
except KeyError:
errorMessage = "Unsupported privilege '%s' was found!" % privilegeConstant
raise PersistenceError(errorMessage)
else:
webdavPrivileges.append(Privilege(privilege=webdavPrivilegeConstant))
return webdavPrivileges
def mapPersistenceAcl(self, acl):
"""
Maps an ACL in WebDAV-specific format to the interface-specific format.
@param acl: ACL in WebDAV-specific format.
@param acl: L{ACL<webdav.acp.Acl.ACL>}
@return: ACL in interface representation.
@rtype: C{list} of L{AccessControlListEntry<datafinder.persistence.
privileges.ace.AccessControlListEntry>}
"""
mappedAcl = list()
joinedAcl = acl.joinGrantDeny()
for ace_ in joinedAcl.aces:
if ace_.inherited is None:
grantedPrivileges = list()
deniedPrivileges = list()
tmpList = None
for grantDeny in ace_.grantDenies:
if grantDeny.isGrant():
tmpList = grantedPrivileges
else:
tmpList = deniedPrivileges
tmpList.extend(self.mapPersistencePrivileges(grantDeny.privileges))
mappedPrincipal = self._mapPersistencePrincipal(ace_.principal)
mappedAce = ace.AccessControlListEntry(mappedPrincipal, grantedPrivileges=grantedPrivileges,
deniedPrivileges=deniedPrivileges)
mappedAcl.append(mappedAce)
return mappedAcl
@staticmethod
def mapPersistencePrivileges(privileges):
"""
Maps privileges in the WebDAV-specific format to the interface representation.
@note: Unsupported WebDAV privileges are irgnored.
@param privileges: Privileges in WebDAV-specific format.
@type privileges: C{list} of L{Privilege<webdav.acp.Privilege.Privilege>}
@return: Privileges in interface format.
@rtype: C{list} of C{unicode}
@note: Privilege constants defined {here<datafinder.persistence.privileges.constants>}.
"""
mappedPrivileges = list()
for privilege in privileges:
if _webdavToGeneralPrivilegeMap.has_key(privilege.name): # unsupported WebDAV privileges are ignored
privilegeConstant = _webdavToGeneralPrivilegeMap[privilege.name]
mappedPrivileges.append(privilegeConstant)
return mappedPrivileges
def _mapPersistencePrincipal(self, principal_):
""" Maps the WebDAV representation of a principal to the interface-specific. """
mappedPrincipal = principal.Principal(None)
if not principal_.property is None and principal_.property in _webdavToGeneralPrincipalMap.keys():
mappedPrincipal = principal.Principal(_webdavToGeneralPrincipalMap[principal_.property],
type=principal_constants.GROUP_PRINCIPAL_TYPE)
elif not principal_.principalURL is None:
if self._userUrl in principal_.principalURL:
principalType = principal_constants.USER_PRINCIPAL_TYPE
else:
principalType = principal_constants.GROUP_PRINCIPAL_TYPE
mappedPrincipal = principal.Principal(os.path.basename(principal_.principalURL), type=principalType)
return mappedPrincipal
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/webdav_/privileges/privileges_mapping.py",
"copies": "1",
"size": "11020",
"license": "bsd-3-clause",
"hash": 1649881398474181600,
"line_mean": 45.7056277056,
"line_max": 112,
"alpha_frac": 0.6299455535,
"autogenerated": false,
"ratio": 4.54058508446642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018741398343036245,
"num_lines": 231
} |
"""
Principal search specific constants.
"""
__version__ = "$Revision-Id:$"
# principal search constants
SEARCH_MODE_USER_ONLY = 0
SEARCH_MODE_GROUP_ONLY = 1
SEARCH_MODE_USER_AND_GROUP = 2
# special principals
ALL_PRINCIPAL = "____allprincipal____"
AUTHENTICATED_PRINCIPAL = "____authenticatedprincipal____"
UNAUTHENTICATED_PRINCIPAL = "____unauthenticatedprincipal____"
OWNER_PRINCIPAL = "____ownerprincipal____"
# principal types
USER_PRINCIPAL_TYPE = "____user____"
GROUP_PRINCIPAL_TYPE = "____group____"
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/principal_search/constants.py",
"copies": "1",
"size": "2236",
"license": "bsd-3-clause",
"hash": 4418645345292622000,
"line_mean": 35.8983050847,
"line_max": 72,
"alpha_frac": 0.7307692308,
"autogenerated": false,
"ratio": 3.7898305084745765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9948576330983272,
"avg_score": 0.014404681658260813,
"num_lines": 59
} |
"""
Provides access to certain states of the user GUI.
"""
from PyQt4.QtGui import QDialog
from datafinder.gui.user.common.item_selection_dialog import ItemSelectionDialog
from datafinder.gui.user.common.progress_dialog import ProgressDialog
from datafinder.gui.user.models.repository.filter.leaf_filter import LeafFilter
from datafinder.script_api.repository import RepositoryDescription
__version__ = "$Revision-Id:$"
_context = None
def mainWidget():
""" Returns the main widget of the user client.
@return: The main widget.
@rtype: L{MainWindow<datafinder.gui.user.application.MainWindow>}
"""
return _context.mainWidget
def unmanagedRepositoryDescription():
"""
Returns the context of the unmanaged repository.
@return: Unmanaged repository descriptor.
@rtype: L{RepositoryDescription<datafinder.script_api.repository.RepositoryDescription>}
"""
return _context.unmanagedRepositoryDescription
def managedRepositoryDescription():
"""
Returns the context of the managed repository.
@return: Managed repository descriptor.
@rtype: L{RepositoryDescription<datafinder.script_api.repository.RepositoryDescription>}
"""
return _context.managedRepositoryDescription
def lock(paths, repositoryDescription=None):
"""
Locks the given paths. Instead of the child
items a place holder item ("...") is displayed until
the specific path gets unlocked.
@param paths: Paths of the items which should be locked.
@type paths: C{unicode}
@param repositoryDescription: Identifies the target repository.
@type repositoryDescription: L{RepositoryDescription<datafinder.script_api.repository.RepositoryDescription>}
"""
rm = _context.determineRepositoryModel(repositoryDescription)
indexes = _context.determinesIndexes(rm, paths)
rm.lock(indexes)
def unlock(paths, repositoryDescription=None):
"""
Unlocks the given paths.
@param paths: Paths of the items which should be unlocked.
@type paths: C{unicode}
@param repositoryDescription: Identifies the target repository.
@type repositoryDescription: L{RepositoryDescription<datafinder.script_api.repository.RepositoryDescription>}
"""
rm = _context.determineRepositoryModel(repositoryDescription)
indexes = _context.determinesIndexes(rm, paths)
for index in indexes:
rm.unlock(index)
def currentSelection(repositoryDescription=None):
"""
Returns paths of the current selected items.
@return: Paths of the selected items.
@rtype: C{list} of C{unicode}
@param repositoryDescription: Identifies the target repository.
@type repositoryDescription: L{RepositoryDescription<datafinder.script_api.repository.RepositoryDescription>}
"""
rc = _context.determineRepositoryController(repositoryDescription)
paths = list()
for index in rc.collectionController.selectedIndexes:
if index.isValid():
paths.append(index.model().nodeFromIndex(index).path)
return paths
def currentCollection(repositoryDescription=None):
"""
Returns the current active collection.
@return: Path of the current active collection.
@rtype: C{unicode}
@param repositoryDescription: Identifies the target repository.
@type repositoryDescription: L{RepositoryDescription<datafinder.script_api.repository.RepositoryDescription>}
"""
rm = _context.determineRepositoryModel(repositoryDescription)
return rm.nodeFromIndex(rm.activeIndex).path
def selectItem(path, repositoryDescription=None):
"""
Selects the item identified by the given path.
@param path: Path of the item to select.
@type path: C{unicode}
@param repositoryDescription: Identifies the target repository.
@type repositoryDescription: L{RepositoryDescription<datafinder.script_api.repository.RepositoryDescription>}
"""
rm = _context.determineRepositoryModel(repositoryDescription)
rm.activePath = path
def performWithProgressDialog(function, callback=None,
windowTitle="Perform Script Action",
labelText="Performing a script action in background..."):
"""
Performs the given function and shows a nice progress dialog.
Please make sure to perform no action changing GUI elements within this function.
Moreover the locking and unlocking of items not be performed within this function.
Cleaning up actions can be implemented in the given call back function.
@param function: Function to perform.
@type function: Callable without any arguments.
@param callback: Function to perform clean up actions. Default: C{None}
@type callback: Callable without any arguments.
@param windowTitle: Title of the progress dialog. Default: C{Perform Script Action}
@type windowTitle: C{unicode}
@param labelText: Message shown in the progress dialog. Default: C{Performing a script action in background...}
@type labelText: C{unicode}
"""
if _context.progressDialog is None:
_context.progressDialog = ProgressDialog(windowTitle, labelText)
_context.progressDialog._cb = callback
_context.progressDialog.start(function)
def getExistingCollection(repositoryDescription=None, helpText=""):
"""
Shows a dialog allowing the selection of a collection and returns its path.
When the dialog has been canceled by the user C{None} is returned.
@param repositoryDescription: Identifies the target repository.
@type repositoryDescription: L{RepositoryDescription<datafinder.script_api.repository.RepositoryDescription>}
@param helpText: An optional information displayed in the dialog. Default: C{}
@type helpText: C{unicode}
"""
existingCollectionPath = None
rm = _context.determineRepositoryModel(repositoryDescription)
filteredRm = LeafFilter(rm)
itemSelectionDialog = ItemSelectionDialog(filteredRm)
itemSelectionDialog.selectedIndex = filteredRm.activeIndex
itemSelectionDialog.helpText = helpText
exitCode = itemSelectionDialog.exec_()
if exitCode == QDialog.Accepted:
existingCollectionPath = rm.nodeFromIndex(itemSelectionDialog.selectedIndex).path
return existingCollectionPath
def getScriptExecutionContext():
"""
Returns the repository description instance and
the set of items selected on script action execution.
@return: Script execution context.
@rtype: L{ScriptExecutionContext<datafinder.gui.user.script_api.ScriptExecutionContext>}
"""
scriptExecutionContext = None
if not _context.scriptController.boundScriptExecutionContext is None:
repository, items = _context.scriptController.boundScriptExecutionContext
itemPaths = [item.path for item in items]
scriptExecutionContext = ScriptExecutionContext(RepositoryDescription(repository), itemPaths)
return scriptExecutionContext
class ScriptExecutionContext(object):
""" Simple context object which contains the script execution context. """
def __init__(self, repositoryDescription, itemPaths):
"""
Constructor.
@param repositoryDescription: The description of the repository.
@type: L{RepositoryDescription<datafinder.script_api.repository.RepositoryDescription>}
@param itemPaths: Selected item paths in which context the script is executed.
@type itemPaths: C{list} of C{unicode}
"""
self.repositoryDescription = repositoryDescription
self.itemPaths = itemPaths
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/script_api.py",
"copies": "1",
"size": "9604",
"license": "bsd-3-clause",
"hash": 8954389017884758000,
"line_mean": 36.7258064516,
"line_max": 115,
"alpha_frac": 0.7178259059,
"autogenerated": false,
"ratio": 4.628433734939759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02507035084148691,
"num_lines": 248
} |
"""
Provides file storer implementation allowing access to an item on a specific storage system.
"""
import os
from tempfile import NamedTemporaryFile, mkstemp
from datafinder.persistence.error import PersistenceError
__version__ = "$Revision-Id:$"
_BLOCK_SIZE = 30000
class FileStorer(object):
"""
Convenience object provided to allow access of the complete interface.
@note: All methods of this interface are raising errors of
type L{PersistenceError<datafinder.persistence.error.PersistenceError>}
to indicate problems.
"""
# pylint: disable=R0904
# R0904: pylint warns about too many public methods (>30).
# However, the methods used for read-only access to attributes are counted as well.
# So it is fine to disable this warning.
def __init__(self, fileSystem, identifier, dataStorer, metadataStorer, privilegeStorer):
"""
Constructor.
@param fileSystem: File system representation this item belongs to.
@type fileSystem: L{FileSystem<datafinder.persistence.factory.FileSystem>}
@param dataStorer: Encapsulates data / file system hierarchy specific behavior.
@type dataStorer: C{object} implementing the interface of L{NullDataStorer<datafinder.
persistence.data.datastorer.NullDataStorer>}
@param metadataStorer: Encapsulates meta data specific behavior.
@type metadataStorer: C{object} implementing the interface of L{NullMetadataStorer<datafinder.
persistence.metadata.metadatastorer.NullMetadataStorer>}
@param privilegeStorer: Encapsulates privilege specific behavior.
@type privilegeStorer: C{object} implementing the interface of L{NullPrivilegeStorer<datafinder.
persistence.privileges.privilegestorer.NullPrivilegeStorer>}
"""
self.__fileSystem = fileSystem
self.__identifier = identifier
self.__dataStorer = dataStorer
self.__metadataStorer = metadataStorer
self.__privilegeStorer = privilegeStorer
self._tempfile = None
@property
def identifier(self):
""" Simple getter for the identifier attribute. """
return self.__identifier
@property
def uri(self):
""" Determines the URI of the item. """
result = None
baseUri = self.__fileSystem.baseUri
if not baseUri is None and not self.__identifier is None and len(self.__identifier) > 0:
if baseUri.endswith("/"):
result = baseUri + self.__identifier[1:]
else:
result = baseUri + self.__identifier
if result.endswith("/"):
result = result[:-1]
return result
@property
def name(self):
""" Returns the name component of the identifier. """
result = ""
if not self.__identifier is None:
result = self.__identifier.rsplit("/")[-1]
return result
@property
def fileSystem(self):
""" Simple getter for the fileSystem attribute. """
return self.__fileSystem
@property
def dataStorer(self):
""" Simple getter for the dataStorer attribute. """
return self.__dataStorer
@property
def metadataStorer(self):
""" Simple getter for the metadataStorer attribute. """
return self.__metadataStorer
@property
def privilegeStorer(self):
""" Simple getter for the privilegeStorer attribute. """
return self.__privilegeStorer
@property
def parent(self):
""" Returns the parent file storer. """
identifier = self.__identifier or ""
parentId = "/".join(identifier.rsplit("/")[:-1])
if parentId == "" and identifier.startswith("/") and identifier != "/":
parentId = "/"
return self.__fileSystem.createFileStorer(parentId)
@property
def linkTarget(self):
"""
Returns a file storer representing the target the link is pointing to.
If the file storer is no link the property is C{None}.
"""
linkTarget = self.__dataStorer.linkTarget
if not linkTarget is None:
return self.__fileSystem.createFileStorer(linkTarget)
@property
def isLink(self):
"""
Determines whether the associated item is a symbolic link or not.
If it is a link the link target can be retrieved using the C{getChildren} method.
@return: Flag indicating whether it is a link or not.
@rtype: C{bool}
"""
return self.__dataStorer.isLink
@property
def isCollection(self):
"""
Determines whether the associated item is an item container or not.
@return: Flag indicating whether it is an item container or not.
@rtype: C{bool}
"""
return self.__dataStorer.isCollection
@property
def isLeaf(self):
"""
Determines whether the associated item is a leaf node or not.
@return: Flag indicating whether it is a leaf node or not.
@rtype: C{bool}
"""
return self.__dataStorer.isLeaf
@property
def canAddChildren(self):
"""
Determines whether it is possible to add new items below this item.
@return: Flag indicating the possibility of adding new items below.
@rtype: C{bool}
"""
return self.__dataStorer.canAddChildren
def createCollection(self, recursively=False):
"""
Creates a collection.
@param recursively: If set to C{True} all missing collections are created as well.
@type recursively: C{bool}
"""
self.__dataStorer.createCollection(recursively)
def createResource(self):
""" Creates a resource. """
self.__dataStorer.createResource()
def createLink(self, destination):
"""
Creates a symbolic link to the specified destination.
@param destination: Identifies the item that the link is pointing to.
@type destination: L{FileStorer<datafinder.persistence.factory.FileStorer>
"""
self.__dataStorer.createLink(destination.dataStorer)
def getChildren(self):
"""
Retrieves the logical identifiers of the child items.
In case of a symbolic link the identifier of the link target is returned.
@return: List of the child item identifiers.
@rtype: C{list} of L{FileStorer<datafinder.persistence.factory.FileStorer>}
"""
result = list()
for item in self.__dataStorer.getChildren():
result.append(self.__fileSystem.createFileStorer(item))
return result
def getChild(self, name):
"""
Returns a child for the given name without regarding the resource
type or existence of the parent item.
"""
if not self.__identifier.endswith("/"):
identifier = self.__identifier + "/" + name
else:
identifier = self.__identifier + name
return self.__fileSystem.createFileStorer(identifier)
def exists(self):
"""
Checks whether the item does already exist.
@return: Flag indicating the existence of the item.
@rtype: C{bool}
"""
return self.__dataStorer.exists()
def delete(self):
""" Deletes the item. """
self.__dataStorer.delete()
def copy(self, destination):
"""
Copies the associated item.
@param destination: Identifies the moved item.
@type destination: L{FileStorer<datafinder.persistence.factory.FileStorer>}
"""
self.__dataStorer.copy(destination.dataStorer)
def move(self, destination):
"""
Moves the associated item.
@param destination: Identifies the moved item.
@type destination: L{FileStorer<datafinder.persistence.factory.FileStorer>}
"""
self.__dataStorer.move(destination.dataStorer)
def readData(self):
"""
Returns the associated data.
@return: Associated data.
@rtype: C{object} implementing the file protocol.
"""
return self.__dataStorer.readData()
def writeData(self, data):
"""
Writes data of the associated item.
@param data: Associated data.
@type data: C{object} implementing the file protocol.
"""
self.__dataStorer.writeData(data)
def getTemporaryFileObject(self, fileNameSuffix="", deleteOnClose=True):
"""
Returns a named local temporary file object allowing access to the binary
content. The file path of the
@param deleteOnClose: Automatically deletes the temporary file object when its closed. Default: C{True}
@type deleteOnClose: C{bool}
@param fileNameSuffix: Optional suffix for the name of the temporary file. Default: Empty string
@type fileNameSuffix: C{unicode}
@return: Tuple consisting of local file path and opened temporary file object.
@rtype: C{tuple} of C{unicode}, C{object} implementing file protocol
@raise PersistenceError: Indicating problems accessing file content.
"""
if self._tempfile is None \
or not os.path.exists(self._tempfile[0]):
inStream = self.readData()
try:
if deleteOnClose:
tempNamedFile = NamedTemporaryFile(suffix=fileNameSuffix)
path = tempNamedFile.name
fileHandle = tempNamedFile.file
else:
fd, path = mkstemp(suffix=fileNameSuffix)
fileHandle = os.fdopen(fd, "w+b")
block = inStream.read(_BLOCK_SIZE)
while len(block) > 0:
fileHandle.write(block)
block = inStream.read(_BLOCK_SIZE)
while len(block) > 0:
fileHandle.write(block)
block = inStream.read(_BLOCK_SIZE)
except (OSError, IOError), error:
reason = os.strerror(error.errno or 0)
errorMessage = "Cannot create local temporary file for '%s'. Reason: '%s'." % (self.identifier, reason)
raise PersistenceError(errorMessage)
finally:
inStream.close()
self._tempfile = path, fileHandle
elif self._tempfile is not None:
self._tempfile = self._tempfile[0], open(self._tempfile[0], "r+b")
return self._tempfile
def retrieveMetadata(self, propertyIds=None):
"""
Retrieves all meta data associated with the item.
C{propertyIds} allows explicit selection of meta data.
@return: Meta data of the associated item.
@rtype: C{dict} of C{unicode}, L{MetadataValue<datafinder.common.metadata.
value_mapping.MetaddataValue>}
"""
return self.__metadataStorer.retrieve(propertyIds)
def updateMetadata(self, properties):
"""
Update the associated meta data.
Adds new properties or updates existing property values.
@param properties: New / updated meta data.
@type properties: C{dict} of C{unicde}, C{object}
"""
self.__metadataStorer.update(properties)
def deleteMetadata(self, propertyIds):
"""
Deletes the selected meta data.
@param propertyIds: Specifies the meta data that has to be deleted.
@type propertyIds: C{list} of C{unicode}
"""
self.__metadataStorer.delete(propertyIds)
def updateAcl(self, acl):
"""
Updates the associated Access Control List (ACL).
@param acl: Describes the ACL of the item. Every entry describes
a list of granted/denied privileges of specific principal.
For defined privileges see L{datafinder.persistence.constants}.
@type acl: C{dict} of C{unicode} of C{tuple} of C{list} of C{unicode}, C{list} of C{unicode}.
"""
self.__privilegeStorer.updateAcl(acl)
def retrievePrivileges(self):
"""
Determines the privileges that the current user owns regarding the associated item.
@return: List of granted privileges.
For defined privileges see L{datafinder.persistence.constants}.
@rtype: C{list} of C{unicode}
"""
return self.__privilegeStorer.retrievePrivileges()
def retrieveAcl(self):
"""
Retrieve the ACL of the associated item.
@return ACL of the item. Every entry describes
a list of granted/denied privileges of specific principal.
For defined privileges see L{datafinder.persistence.constants}.
@rtype C{dict} of C{unicode} of C{tuple} of C{list} of C{unicode}, C{list} of C{unicode}.
"""
return self.__privilegeStorer.retrieveAcl()
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/filestorer.py",
"copies": "1",
"size": "15736",
"license": "bsd-3-clause",
"hash": -3820979819607770600,
"line_mean": 34.4259259259,
"line_max": 119,
"alpha_frac": 0.5924631418,
"autogenerated": false,
"ratio": 4.8137044967880085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.030253461915197594,
"num_lines": 432
} |
"""
Provides some general functionalities commonly used by different pages.
"""
__version__ = "$Revision-Id:$"
def determineTargetDataTypes(baseRepositoryModel, index):
""" Determines the target data types for the given index. """
targetDataTypes = list()
item = baseRepositoryModel.nodeFromIndex(index)
repository = baseRepositoryModel.repository
dataTypeName = None
if not item.dataType is None:
dataTypeName = item.dataType.name
if not dataTypeName is None or item.isRoot:
targetDataTypes = repository.configuration.getTargetDataTypes(dataTypeName)
return targetDataTypes
def dataTypesCompatible(baseRepositoryModel, sourceIndex, targetIndex):
""" Determines whether a connection from source index to the target index is defined in the data model. """
source = baseRepositoryModel.nodeFromIndex(targetIndex)
target = baseRepositoryModel.nodeFromIndex(sourceIndex)
repository = baseRepositoryModel.repository
sourceDataTypeName = None
if not source.dataType is None:
sourceDataTypeName = source.dataType.name
targetDataTypeName = None
if not target.dataType is None:
targetDataTypeName = target.dataType.name
try:
result = repository.configuration.existsConnection(sourceDataTypeName, targetDataTypeName)
except AttributeError:
result = True
return result
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/dialogs/creation_wizard/pages/utils.py",
"copies": "1",
"size": "3149",
"license": "bsd-3-clause",
"hash": -8367484879386014000,
"line_mean": 38.3717948718,
"line_max": 111,
"alpha_frac": 0.7361067005,
"autogenerated": false,
"ratio": 4.355463347164592,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.021156292228196413,
"num_lines": 78
} |
"""
Represents an user / group / role.
"""
from datafinder.core.error import PrincipalError
from datafinder.persistence.principal_search.principal import constants, Principal as PersistedPrincipal
__version__ = "$Revision-Id:$"
class _PrincipalType(object):
"""
This class defines available properties of a principal type.
The class mainly exists for documentation reasons and is intended
to be replaced by named tuples when switching to Python 3.
"""
def __init__(self, identifier, displayName, description):
"""
Constructor.
@param identifier: Identifier of the principal type.
@type identifier: C{unicode}
@param displayName: Display name of the principal type.
@type displayName: C{unicode}
@param description: Description of the principal type.
@type description: C{unicode}
"""
self.identifier = identifier
self.displayName = displayName
self.description = description
def __cmp__(self, other):
try:
return cmp(self.identifier, other.identifier)
except AttributeError:
return 1
def __hash__(self):
return hash(self.identifier)
USER_PRINCIPAL_TYPE = _PrincipalType(constants.USER_PRINCIPAL_TYPE,
"User Type", "Represents a user.")
GROUP_PRINCIPAL_TYPE = _PrincipalType(constants.GROUP_PRINCIPAL_TYPE,
"Group Type", "Represents a group.")
PRINCIPAL_TYPES = [USER_PRINCIPAL_TYPE, GROUP_PRINCIPAL_TYPE]
class Principal(object):
"""
Represents an user / group / role.
"""
def __init__(self, identifier, displayName=None):
"""
@param identifier: Identifier of the principal.
@type identifier: C{unicode}
@param displayName: Identifier of the principal.
@type displayName: C{unicode}
"""
self.identifier = identifier
self.displayName = displayName
self.type = USER_PRINCIPAL_TYPE
self.memberof = set()
if displayName is None:
self.displayName = self.identifier
def __cmp__(self, other):
""" Compares two instances. """
return cmp(self.identifier, other.identifier)
def __hash__(self):
""" Calculates has value in accordance to comparison. """
return id(self.identifier)
def toPersistenceFormat(self):
"""
Maps the principal to the format required by the persistence layer.
@return: Principal in persistence format.
@rtype: L{Principal<datafinder.persistence.principal_search.principal.Principal>}
"""
mappedPrincipal = PersistedPrincipal(self.identifier)
mappedPrincipal.type = self.type.identifier
mappedPrincipal.displayName = self.displayName
for principal in self.memberof:
mappedPrincipal.memberof.append(principal.toPersistenceFormat())
return mappedPrincipal
@staticmethod
def create(principal):
"""
Creates a principal from the persistence representation.
@return: User / group / role.
@rtype: L{Principal<datafinder.persistence.principal_search.principal.Principal>}
@raise CoreError: Indicates invalid principal type and infinite loops.
"""
mappedPrincipal = Principal(principal.identifier)
foundPrincipalType = False
for principalType in PRINCIPAL_TYPES:
if principalType.identifier == principal.type:
mappedPrincipal.type = principalType
foundPrincipalType = True
if not foundPrincipalType:
raise PrincipalError("Unsupported principal type '%s'." % principal.type)
mappedPrincipal.displayName = principal.displayName
try:
for principal in principal.memberof:
mappedPrincipal.memberof.add(Principal.create(principal))
except RuntimeError:
raise PrincipalError("Detected loop when trying to find out the groups the principal is member of.")
return mappedPrincipal
OWNER_PRINCIPAL = Principal("____owner____", "Owner")
AUTHENTICATED_PRINCIPAL = Principal("____authenticated____", "Authenticated Principal")
AUTHENTICATED_PRINCIPAL.type = GROUP_PRINCIPAL_TYPE
UNAUTHENTICATED_PRINCIPAL = Principal("____unauthenticated____", "Unauthenticated Principal")
UNAUTHENTICATED_PRINCIPAL.type = GROUP_PRINCIPAL_TYPE
SPECIAL_PRINCIPALS = [OWNER_PRINCIPAL, AUTHENTICATED_PRINCIPAL, UNAUTHENTICATED_PRINCIPAL]
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/item/privileges/principal.py",
"copies": "1",
"size": "6528",
"license": "bsd-3-clause",
"hash": -9053203207434615000,
"line_mean": 36.1754385965,
"line_max": 112,
"alpha_frac": 0.6564031863,
"autogenerated": false,
"ratio": 4.527045769764216,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017884091458165753,
"num_lines": 171
} |
"""
Implements the meta data search dialog including results view.
"""
import bisect
from PyQt4 import QtCore, QtGui
from datafinder.common.logger import getDefaultLogger
from datafinder.core import search_restriction
from datafinder.gui.gen.user.search_dialog_ui import Ui_searchDialog
from datafinder.gui.user.common.controller import AbstractController
from datafinder.gui.user.common import util
from datafinder.gui.user.common.item_selection_dialog import ItemSelectionDialog
from datafinder.gui.user.controller.output.searchresults import SearchResultController
from datafinder.gui.user.models.repository.filter.leaf_filter import LeafFilter
from datafinder.gui.user.models.repository.filter.search_filter import SearchFilter
from datafinder.gui.user.dialogs.search_dialog.utils import KeywordSearchQueryConverter
from datafinder.gui.user.dialogs.search_dialog.search_query_editor import Completer, SearchSyntaxHighlighter
from datafinder.gui.user.dialogs.search_dialog.utils import SearchQueryAnalyzer
__version__ = "$Revision-Id:$"
class SearchDialog(QtGui.QDialog, Ui_searchDialog):
"""
This class implements the search dialog.
"""
_logger = getDefaultLogger()
def __init__(self, repositoryModel, parentWidget, itemActionController):
"""
Constructor.
@param repositoryModel: Reference on the repository model.
@type repositoryModel: L{RepositoryModel<datafinder.gui.user.models.repository.repository.RepositoryModel>}
@param parentWidget: Parent widget of this dialog.
@type parentWidget: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtGui.QDialog.__init__(self, parentWidget)
Ui_searchDialog.__init__(self)
self.setupUi(self)
self._collectionSearchDialog = None
self._parser = search_restriction.SearchRestrictionParser()
self.__model = repositoryModel
self._worker = None
self._initialSearchQuery = ""
self.__searchQueryAnalyzer = SearchQueryAnalyzer(self._parser, self._preparePropertyDefinitionToolTips())
self._initSearchQueryEditor()
self.__highlighter = SearchSyntaxHighlighter(self.__searchQueryAnalyzer, self.restrictionTextEdit)
self.__searchResultController = SearchResultController(self.resultsTableView, parentWidget, self, itemActionController)
self.__searchResultController.model = SearchFilter(self.__model)
self.__storedSearchesController = _SearchStorerController(self.searchesListView, self)
self.__storedSearchesController.model = _ServerSearchStoreModel(repositoryModel.preferences)
self._keywordSearchQueryConverter = KeywordSearchQueryConverter(self.__model.repository.configuration.registeredPropertyDefinitions)
self.storedSearchesPushButton.setChecked(True)
self._activateExpertMode()
self.expertModePushButton.setEnabled(False)
self._connectSlots()
def show(self):
""" @see: L{QDialog<PyQt4.QtGui.QDialog>}"""
self.startLineEdit.setText(self.__model.activePath or "/")
QtGui.QDialog.show(self)
def _initSearchQueryEditor(self):
""" Initializes the search query editor. """
self.restrictionTextEdit.searchQueryAnalyzer = self.__searchQueryAnalyzer
propertyNameCompleter = Completer(self._preparePropertyDefinitionToolTips(), True)
self.restrictionTextEdit.registerCompleter(propertyNameCompleter, SearchQueryAnalyzer.PROPERTY_TYPE)
comparisionOperatorCompleter = QtGui.QCompleter(self._parser.comparisonTokens)
comparisionOperatorCompleter.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.restrictionTextEdit.registerCompleter(comparisionOperatorCompleter, SearchQueryAnalyzer.COMPARISON_TYPE)
conjunctionOperatorCompleter = QtGui.QCompleter(self._parser.conjunctionTokens)
conjunctionOperatorCompleter.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.restrictionTextEdit.registerCompleter(conjunctionOperatorCompleter, SearchQueryAnalyzer.CONJUNCTION_TYPE)
completions = {u"''": u"Empty Value.",
u"'DD.MM.YYYY'": u"Date Format Value.",
u"'DD.MM.YYYY HH:MM:SS'": u"Date Time Format Value."}
valueStateCompleter = Completer(completions)
self.restrictionTextEdit.registerCompleter(valueStateCompleter, SearchQueryAnalyzer.LITERAL_TYPE)
self.restrictionTextEdit.state = SearchQueryAnalyzer.PROPERTY_TYPE
def _preparePropertyDefinitionToolTips(self):
""" Prepares a dictionary containing the tool tips of existing property definitions. """
result = dict()
for propDef in self.__model.repository.configuration.registeredPropertyDefinitions.values():
result[propDef.identifier] = util.determinePropertyDefinitionToolTip(propDef)
return result
def _connectSlots(self):
""" Connects signals and slots. """
self.connect(self.startSelectionButton, QtCore.SIGNAL("clicked()"), self._showSelectionClickedSlot)
self.connect(self.closeButton, QtCore.SIGNAL("clicked()"), self._closeClickedSlot)
self.connect(self.searchesListView, QtCore.SIGNAL("doubleClicked(QModelIndex)"), self._storedSearchClickedSlot)
self.connect(self.expertModePushButton, QtCore.SIGNAL("clicked(bool)"), self._searchModeChanged)
self.connect(self.storedSearchesPushButton, QtCore.SIGNAL("clicked(bool)"), self._storedSearchesModeChanged)
self.connect(self.saveSearchButton, QtCore.SIGNAL("clicked()"), self._handleSearchStored)
def _handleSearchStored(self):
""" Reacts on when saving a search. """
self._initialSearchQuery = self.restrictionTextEdit.toPlainText()
def _showSelectionClickedSlot(self):
"""
Slot is called when the start selection button was clicked.
"""
leafFilterModel = LeafFilter(self.__model)
self._collectionSearchDialog = ItemSelectionDialog(leafFilterModel, self)
self._collectionSearchDialog.selectedIndex = self.__model.indexFromPath(unicode(self.startLineEdit.text()))
if self._collectionSearchDialog.exec_() == QtGui.QDialog.Accepted:
item = self.__model.nodeFromIndex(self._collectionSearchDialog.selectedIndex)
self.startLineEdit.setText(item.path or "/")
def _closeClickedSlot(self):
"""
Slot is called when the close button was clicked.
"""
if self._proceedWithUnsavedChanges():
self.__storedSearchesController.model.save()
self.close()
def _expertSearchClickedSlot(self):
"""
Slot is called when the search button was clicked and the dialog is in
the expert mode.
"""
self.resultsGroupBox.show()
self.setEnabled(False)
path = unicode(self.startLineEdit.text())
query = unicode(self.restrictionTextEdit.toPlainText())
self._worker = util.startNewQtThread(self.__model.search,
self._searchFinishedSlot,
self.__model.indexFromPath(path),
query)
def _searchFinishedSlot(self):
"""
Slot is called when the search thread finished its work
and the dialog is in expert mode.
"""
if not self._worker.error is None:
QtGui.QMessageBox.critical(self, "Search Error", self._worker.error.message)
self.setEnabled(True)
def _simpleSearchClickedSlot(self):
"""
Slot is called when the search button was clicked and the dialog is in the simple
search mode.
"""
self.splitter_2.show()
self.resultsGroupBox.show()
self.setEnabled(False)
text = unicode(self.keywordLineEdit.text())
path = unicode(self.startLineEdit.text())
query = self._keywordSearchQueryConverter.convert(text)
self._worker = util.startNewQtThread(self.__model.search,
self._searchFinishedSlot,
self.__model.indexFromPath(path),
query)
def _storedSearchClickedSlot(self, index):
""" Slot is called when a search was selected from the stored searches. """
if self._proceedWithUnsavedChanges():
self.__storedSearchesController.searchSelectedSlot(index)
restriction = self.__storedSearchesController.model.restrictionFromIndex(index)
self._initialSearchQuery = restriction
self.restrictionTextEdit.setText(restriction)
else:
self.searchesListView.clearSelection()
def _searchModeChanged(self, expertMode):
"""
Slot is called when the state of the expertModePushButton changed.
"""
if expertMode:
self._activateExpertMode()
else:
if self._proceedWithUnsavedChanges():
self._activateSimpleMode()
else:
self.expertModePushButton.setChecked(True)
def _proceedWithUnsavedChanges(self):
"""
Checks for unsaved changes of the search restrictions
and asks the user how to proceed.
"""
continue_ = True
if self._initialSearchQuery != self.restrictionTextEdit.toPlainText():
answer = QtGui.QMessageBox.question(self, "Unsaved Changes", "Some changes of the search restrictions "\
+ "have not yet been saved and may be lost when you proceed."\
+ "\nDo you want to continue?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if answer == QtGui.QMessageBox.No:
continue_ = False
return continue_
def _storedSearchesModeChanged(self, storedSearches):
"""
Slot is called when the state of the storedSearchesPushButton changed.
"""
self.storedSearchGroupBox.setVisible(bool(storedSearches))
def _activateSimpleMode(self):
"""
Activates the simple mode. It hides all expert mode's widgets and
sets all simple mode's widgets visible. It also manages the signal slot
connections.
"""
self.disconnect(self.__searchQueryAnalyzer, QtCore.SIGNAL(SearchQueryAnalyzer.VALIDATION_SIGNAL), self.searchButton.setEnabled)
self.disconnect(self.__searchQueryAnalyzer, QtCore.SIGNAL(SearchQueryAnalyzer.VALIDATION_SIGNAL),
self.__storedSearchesController.validationSlot)
self.disconnect(self.searchButton, QtCore.SIGNAL("clicked()"), self._expertSearchClickedSlot)
self.connect(self.keywordLineEdit, QtCore.SIGNAL("textChanged(const QString &)"), self._handleKeywordLineEditTextChanged)
self.connect(self.searchButton, QtCore.SIGNAL("clicked()"), self._simpleSearchClickedSlot)
self.splitter_2.hide()
self.resultsGroupBox.hide()
self.optionsGroupBox.hide()
self.storedSearchGroupBox.hide()
self.simpleSearchGroupBox.show()
self.searchButton.show()
self.storedSearchesPushButton.hide()
self.resize(self.minimumSize().width(), self.sizeHint().height())
self._handleKeywordLineEditTextChanged(self.keywordLineEdit.text())
def _handleKeywordLineEditTextChanged(self, newText):
""" Handles changes of the keyword line editor. """
isEmpty = len(unicode(newText).strip()) == 0
self.searchButton.setEnabled(not isEmpty)
def _activateExpertMode(self):
"""
Activates the expert mode. It hides all simple mode's widgets and
sets all expert mode's widgets visible. It also manages the signal slot
connections.
"""
self.disconnect(self.searchButton, QtCore.SIGNAL("clicked()"), self._simpleSearchClickedSlot)
self.disconnect(self.keywordLineEdit, QtCore.SIGNAL("textChanged(const QString &)"), self._handleKeywordLineEditTextChanged)
self.optionsGroupBox.show()
self.storedSearchGroupBox.setVisible(self.storedSearchesPushButton.isChecked())
self.connect(self.searchButton, QtCore.SIGNAL("clicked()"), self._expertSearchClickedSlot)
self.connect(self.__searchQueryAnalyzer, QtCore.SIGNAL(SearchQueryAnalyzer.VALIDATION_SIGNAL), self.searchButton.setEnabled)
self.connect(self.__searchQueryAnalyzer, QtCore.SIGNAL(SearchQueryAnalyzer.VALIDATION_SIGNAL),
self.__storedSearchesController.validationSlot)
self.startLabel.hide()
self.startSelectionButton.hide()
self.splitter_2.show()
self.simpleSearchGroupBox.hide()
self.storedSearchesPushButton.show()
query = self._keywordSearchQueryConverter.convert(unicode(self.keywordLineEdit.text()))
self._initialSearchQuery = query
self.restrictionTextEdit.setText(query)
class _SearchStorerController(AbstractController):
"""
Class controls the storage and retrieval of stored searches.
"""
def __init__(self, listView, parentController):
"""
Constructor.
"""
AbstractController.__init__(self, listView, parentController=parentController)
self.__valid = False
self.connect(self.widget, QtCore.SIGNAL("modelUpdateSignal"), self.__modelUpdateSlot)
self.connect(self.parentController.saveSearchButton,
QtCore.SIGNAL("clicked()"),
self.saveSearchClickedSlot)
self.connect(self.parentController.deleteSearchButton,
QtCore.SIGNAL("clicked()"),
self.deleteSearchClickedSlot)
self.connect(self.parentController.searchNameLineEdit,
QtCore.SIGNAL("textChanged(QString)"),
self.searchNameTextChanged)
def __modelUpdateSlot(self, _):
"""
Slot is called when the model has changed.
"""
self.connect(self.selectionModel(),
QtCore.SIGNAL("currentChanged(QModelIndex, QModelIndex)"),
self.searchSelectedSlot)
self.connect(self.selectionModel(),
QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"),
self.__deleteButtonEnableSlot)
def __saveButtonEnableCheck(self):
"""
Checks if the restriction text is valid and the search name lint edit isn't empty.
"""
searchSaveName = unicode(self.parentController.searchNameLineEdit.text()).strip()
self.parentController.saveSearchButton.setEnabled(self.__valid and len(searchSaveName) > 0)
def __deleteButtonEnableSlot(self):
"""
Checked if an item is selected in the L{QtGui.QListView} and enables or disables the delete button.
"""
self.parentController.deleteSearchButton.setEnabled(self.selectionModel().hasSelection())
def deleteSearchClickedSlot(self):
"""
Deletes the selected search from the model.
"""
if self.selectionModel().hasSelection():
index = self.selectionModel().selectedIndexes()[0]
self.model.remove(index)
def saveSearchClickedSlot(self):
"""
Slot is called when a new search has to be added to the search storer view.
"""
name = unicode(self.parentController.searchNameLineEdit.text())
restriction = unicode(self.parentController.restrictionTextEdit.toPlainText())
index = self.model.set(name, restriction)
self.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)
def validationSlot(self, valid):
"""
Slot is called when the validation of the restriction text edit has changed.
@param valid: The information if the restriction is valid.
@type valid: C{bool}
"""
self.__valid = valid
self.__saveButtonEnableCheck()
def searchNameTextChanged(self):
"""
Slot is called when the name of the save name line edit changed.
"""
self.__saveButtonEnableCheck()
def searchSelectedSlot(self, index):
"""
Slot is called when a stored search was selected.
@param index: The index of the selected stored search.
@type index: C{QtCore.QModelIndex}
"""
if index.isValid():
self.parentController.searchNameLineEdit.setText(self.model.data(index).toString())
else:
self.parentController.searchNameLineEdit.setText("")
class _ServerSearchStoreModel(QtCore.QAbstractTableModel):
"""
The SearchStorerModel contains all stored searches. The class is responsible for holding
the data structure and the management of it.
"""
def __init__(self, preferences):
"""
Constructor.
@param preferences: Object containing application preferences.
@type preferences: L{PreferencesHandler<datafinder.core.configuration.preferences.PreferencesHandler>}
"""
QtCore.QAbstractTableModel.__init__(self, None)
self._preferences = preferences
self._dirty = False
self._columnCount = 1
self._storedSearchQueries = list()
for searchQuery in preferences.searchQueries:
self._insert(searchQuery.name, searchQuery.query)
def _insert(self, name, restriction):
"""
Insert the given arguments at the right position of the this list.
After the insert the internal list is still sorted.
"""
if not name is None and not restriction is None:
toolTip = self._determineRestrictionToolTip(restriction)
bisectList = util.BisectColumnHelper(self._storedSearchQueries, 0)
row = bisect.bisect_left(bisectList, name.lower())
self.beginInsertRows(QtCore.QModelIndex(), row, row)
self._storedSearchQueries.insert(row, [name, restriction, toolTip])
self.endInsertRows()
return row
@staticmethod
def _determineRestrictionToolTip(restriction):
""" Determines the tool tip of the given restrictions. """
toolTip = restriction
toolTipLength = len(toolTip)
for counter in range(1, (toolTipLength / 60) + 1):
toolTip = toolTip[:(counter * 60)] + "\n" + toolTip[(counter * 60):]
return toolTip
def rowCount(self, _=QtCore.QModelIndex()):
"""
@see: L{rowCount<PyQt4.QtCore.QAbstractTableModel.rowCount>}
"""
return len(self._storedSearchQueries)
def columnCount(self, _=QtCore.QModelIndex()):
"""
@see: L{columnCount<PyQt4.QtCore.QAbstractTableModel.columnCount>}
"""
return self._columnCount
def data(self, index, role=QtCore.Qt.DisplayRole):
"""
@see: L{data<PyQt4.QtCore.QAbstractTableModel.data>}
"""
row = index.row()
variant = QtCore.QVariant()
if role == QtCore.Qt.DisplayRole:
variant = QtCore.QVariant(self._storedSearchQueries[row][index.column()])
elif role == QtCore.Qt.ToolTipRole:
variant = QtCore.QVariant(self._storedSearchQueries[row][2])
return variant
def set(self, name, restriction):
"""
Add a new line or edit an existing line in the stored searches.
@param name: Name of the new search.
@type name: C{unicode}
@param restriction: Restriction that has to be saved under the given name.
@type restriction: C{unicode}
@return: The index of the new created search item.
@rtype: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
name = unicode(name)
restriction = unicode(restriction)
try:
row = [item[0] for item in self._storedSearchQueries].index(name)
except ValueError:
row = self._insert(name, restriction)
else:
self._storedSearchQueries[row][1] = restriction
self._storedSearchQueries[row][2] = self._determineRestrictionToolTip(restriction)
self._dirty = True
return self.createIndex(row, 0, self._storedSearchQueries[row][0])
def remove(self, index):
"""
Deletes the search under the given index.
@param index: The index that has to be deleted.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
row = index.row()
self.beginRemoveRows(QtCore.QModelIndex(), row, row)
del self._storedSearchQueries[row]
self.endRemoveRows()
self._dirty = True
def save(self, force=False):
"""
Save all searches in the preferences.
@param force: Force the save process even no data has changed.
@type force: C{boolean}
"""
if self._dirty or force:
self._preferences.clearSearchQueries()
for name, query, _ in self._storedSearchQueries:
self._preferences.addSearchQuery(name, query)
self._dirty = False
def restrictionFromIndex(self, index):
"""
Returns the restriction from the given index.
@param index: The index from which the restriction has to be returned.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@return: The restriction for the given index.
@rtype: C{unicode}
"""
if index.isValid():
return unicode(self._storedSearchQueries[index.row()][1])
return ""
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/dialogs/search_dialog/main.py",
"copies": "1",
"size": "24338",
"license": "bsd-3-clause",
"hash": 8203573779657564000,
"line_mean": 40.0345423143,
"line_max": 140,
"alpha_frac": 0.6416303723,
"autogenerated": false,
"ratio": 4.660666411336653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01145978486904752,
"num_lines": 579
} |
"""
Tests for the property editor factory.
"""
import unittest, sys, datetime
from PyQt4 import QtGui, QtCore
from datafinder.core.configuration.properties import constants
from datafinder.core.configuration.properties.property_type import PROPERTY_TYPE_NAMES
from datafinder.gui.user.common.widget.property.editors.factory import EditorFactory
from datafinder.gui.user.common.widget.property.editors.list_editor import ListEditor
__version__ = "$Revision-Id:$"
class EditorFactoryTest(unittest.TestCase):
"""
Tests for the property editor factory.
"""
def setUp(self):
""" Creates object under test. """
self._editorFactory = EditorFactory()
self._app = QtGui.QApplication(sys.argv)
def testGetEditorForPropTypes(self):
"""
Tests the getEditor method. It checks if every property type has a corresponding
editor.
"""
propTypes = PROPERTY_TYPE_NAMES[:]
propTypes.remove(u'Any')
for propType in propTypes:
self._editorFactory.createEditor(None, propType)
def testGetCorrectEditor(self):
"""
Checks that the correct editor type is returned corresponding to the input type.
"""
self.assertTrue(type(self._editorFactory.createEditor(None, 'Number')) == QtGui.QDoubleSpinBox)
self.assertTrue(type(self._editorFactory.createEditor(None, 'Boolean')) == QtGui.QCheckBox)
self.assertTrue(type(self._editorFactory.createEditor(None, 'Date Time')) == QtGui.QDateTimeEdit)
self.assertTrue(isinstance(self._editorFactory.createEditor(None, 'String'), QtGui.QLineEdit))
self.assertTrue(type(self._editorFactory.createEditor(None, 'List')) == ListEditor)
def testGetValueFromEditor(self):
"""
Tests the mapping from editor to return type
"""
lineEdit = QtGui.QLineEdit()
lineEdit.setText(QtCore.QString(u"TestValue"))
self.assertEquals(self._editorFactory.getValueFromEditor(lineEdit), u"TestValue")
lineEdit = QtGui.QLineEdit()
lineEdit.setText(QtCore.QString(u""))
self.assertEquals(self._editorFactory.getValueFromEditor(lineEdit), None)
spinBox = QtGui.QDoubleSpinBox()
spinBox.setValue(23.04)
self.assertEquals(self._editorFactory.getValueFromEditor(spinBox), 23.04)
checkBox = QtGui.QCheckBox()
checkBox.setChecked(True)
self.assertTrue(self._editorFactory.getValueFromEditor(checkBox))
comboBox = QtGui.QComboBox()
comboBox.addItems([u"test1"])
self.assertEquals(self._editorFactory.getValueFromEditor(comboBox), u"test1")
listEditor = ListEditor(dict(), self._editorFactory, ["test"])
self.assertEquals(self._editorFactory.getValueFromEditor(listEditor), ["test"])
listEditor = ListEditor(dict(), self._editorFactory)
self.assertEquals(self._editorFactory.getValueFromEditor(listEditor), list())
def testEditorRestrictionsStringInt(self):
"""
Tests restrictions for integer and string editors
"""
restrictions = {constants.MAXIMUM_LENGTH: 12,
constants.MAXIMUM_VALUE: 500,
constants.MINIMUM_VALUE: 10,
constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES: 5,
constants.MINIMUM_NUMBER_OF_DECIMAL_PLACES: 1,
constants.PATTERN : 'A.B*C'
}
lineEdit = self._editorFactory.createEditor(None, "String", restrictions)
self.assertTrue(type(lineEdit.validator()) == QtGui.QRegExpValidator)
self.assertTrue(lineEdit.maxLength() == restrictions[constants.MAXIMUM_LENGTH])
spinBox = self._editorFactory.createEditor(None, "Number", restrictions)
self.assertTrue(spinBox.maximum() == restrictions[constants.MAXIMUM_VALUE])
self.assertTrue(spinBox.minimum() == restrictions[constants.MINIMUM_VALUE])
self.assertTrue(spinBox.decimals() == restrictions[constants.MAXIMUM_NUMBER_OF_DECIMAL_PLACES])
def testEditorRestrictionsDateTime(self):
"""
Tests restrictions for the date time editor
"""
restrictions = {
constants.MINIMUM_VALUE: datetime.datetime(1950, 1, 1, 0, 15),
constants.MAXIMUM_VALUE: datetime.datetime(2010, 1, 1, 0, 15),
}
dateTimeEdit = self._editorFactory.createEditor(None, "Date Time", restrictions)
self.assertTrue(dateTimeEdit.maximumDateTime().toPyDateTime() == restrictions[constants.MAXIMUM_VALUE])
self.assertTrue(dateTimeEdit.minimumDateTime().toPyDateTime() == restrictions[constants.MINIMUM_VALUE])
def testEditorRestrictionOption(self):
"""
Tests the OPTIONS restriction for Strings
"""
restrictions = {constants.OPTIONS: ""}
comboBox = self._editorFactory.createEditor(None, "String", restrictions)
self.assertTrue(type(comboBox) == QtGui.QComboBox)
def testSetEditorValue(self):
"""
Tests the setEditorValue method
"""
lineEdit = QtGui.QLineEdit()
self._editorFactory.setEditorValue(lineEdit, u"Test")
self.assertTrue(lineEdit.text() == u"Test" )
spinBox = QtGui.QDoubleSpinBox()
self._editorFactory.setEditorValue(spinBox, 2.05)
self.assertTrue(spinBox.value() == 2.05)
checkBox = QtGui.QCheckBox()
self._editorFactory.setEditorValue(checkBox, True)
self.assertTrue(checkBox.isChecked() == True)
def tearDown(self):
""" Cleans up the test environment. """
self._app.quit()
self._app.deleteLater()
| {
"repo_name": "DLR-SC/DataFinder",
"path": "test/unittest/datafinder_test/gui/user/common/widget/property/editors/factory_test.py",
"copies": "1",
"size": "7807",
"license": "bsd-3-clause",
"hash": 4962720561371037000,
"line_mean": 40.2,
"line_max": 111,
"alpha_frac": 0.648904829,
"autogenerated": false,
"ratio": 4.418222976796831,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023212323045223655,
"num_lines": 185
} |
"""
Tests of the data type representation.
"""
import unittest
from datafinder.core.configuration.datamodel import datatype
from datafinder.core.configuration.properties.constants import DATAMODEL_PROPERTY_CATEGORY
from datafinder_test.mocks import SimpleMock
__version__ = "$Revision-Id:$"
class DataTypeTestCase(unittest.TestCase):
""" Tests of the data type representation. """
def setUp(self):
""" Creates object under test. """
self._dataType = datatype.DataType("name")
def testPropertyDefinitionHandling(self):
""" Tests the handling of property definitions. """
self._dataType.addPropertyDefinition(SimpleMock(identifier="name"))
self._dataType.addPropertyDefinition(SimpleMock(identifier="name2"))
self.assertEquals(len(self._dataType.propertyDefinitions), 2)
self._dataType.addPropertyDefinition(SimpleMock(identifier="name"))
self.assertEquals(len(self._dataType.propertyDefinitions), 2)
self._dataType.removePropertyType("name")
self._dataType.removePropertyType("name2")
self.assertEquals(len(self._dataType.propertyDefinitions), 0)
self._dataType.propertyDefinitions = [SimpleMock(identifier="name"), SimpleMock(identifier="name2")]
self.assertEquals(len(self._dataType.propertyDefinitions), 2)
def testComparison(self):
""" Tests the comparison of two data type instances. """
self.assertEquals(self._dataType, self._dataType)
self.assertEquals(self._dataType, datatype.DataType("name"))
self.assertNotEquals(self._dataType, datatype.DataType("name1"))
self.assertNotEquals(self._dataType, None)
def testLoad(self):
""" Tests the initialization from persistence format. """
persistedDataType = SimpleMock(name="name", iconName="iconName",
properties=[SimpleMock(name="name", valueType="Any",
defaultValue="string", mandatory=False)])
dataType = datatype.DataType.load(persistedDataType)
self.assertEquals(dataType.name, "name")
self.assertEquals(dataType.iconName, "iconName")
self.assertEquals(len(dataType.propertyDefinitions), 1)
self.assertEquals(dataType.propertyDefinitions[0].category, DATAMODEL_PROPERTY_CATEGORY)
self.assertEquals(dataType.propertyDefinitions[0].namespace, self._dataType.name)
def testToPersistenceRepresentation(self):
""" Tests the transformation into the persistence format. """
self._dataType.addPropertyDefinition(SimpleMock("name"))
persistedDataType = self._dataType.toPersistenceRepresentation()
self.assertEquals(persistedDataType.name, "name")
self.assertEquals(persistedDataType.iconName, "dataType")
self.assertEquals(len(persistedDataType.properties), 1)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "test/unittest/datafinder_test/core/configuration/datamodel/datatype_test.py",
"copies": "1",
"size": "4772",
"license": "bsd-3-clause",
"hash": -696258209623771000,
"line_mean": 43.0188679245,
"line_max": 108,
"alpha_frac": 0.6919530595,
"autogenerated": false,
"ratio": 4.447343895619758,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01928780790918555,
"num_lines": 106
} |
"""
Tests utility functionality.
"""
__version__ = "$Revision-Id:$"
import unittest
from datafinder.persistence.adapters.webdav_ import util
_PERSISTENCE_ID = "http://test.de:80/hhh/j/c:/lll/"
_INTERFACE_ID = "/c:/lll"
class ItemIdentifierMapperTestCase(unittest.TestCase):
""" Tests the identifier mapping. """
def testMapIdentifier(self):
""" Tests the normal behavior of the identifier mapping. """
mapper = util.ItemIdentifierMapper("http://test.de:80/hhh/j")
self.assertEquals(mapper.mapIdentifier("/c:/lll/"), _PERSISTENCE_ID)
self.assertEquals(mapper.mapIdentifier("c:/lll/"), _PERSISTENCE_ID)
mapper = util.ItemIdentifierMapper("http://test.de:80/hhh/j/")
self.assertEquals(mapper.mapIdentifier("/c:/lll/"), _PERSISTENCE_ID)
self.assertEquals(mapper.mapIdentifier("c:/lll/"), _PERSISTENCE_ID)
def testMapPersistenceIdentifier(self):
""" Tests the normal behavior of the persistence ID mapping. """
mapper = util.ItemIdentifierMapper("http://test.de:80/hhh/j")
self.assertEquals(mapper.mapPersistenceIdentifier("http://test.de:80/hhh/j/c:/lll/"), _INTERFACE_ID)
self.assertEquals(mapper.mapPersistenceIdentifier("http://test.de:80/hhh/j/c:/lll"), _INTERFACE_ID)
mapper = util.ItemIdentifierMapper("http://test.de:80/hhh/j/")
self.assertEquals(mapper.mapPersistenceIdentifier("http://test.de:80/hhh/j/c:/lll/"), _INTERFACE_ID)
self.assertEquals(mapper.mapPersistenceIdentifier("http://test.de:80/hhh/j/c:/lll"), _INTERFACE_ID)
self.assertEquals(mapper.mapPersistenceIdentifier("http://test:80/kkk/j/c:/lll"), "/kkk/j/c:/lll")
self.assertEquals(mapper.mapPersistenceIdentifier("http://test:80/kkk/j/c:/lll/"), "/kkk/j/c:/lll")
def testDetermineBaseName(self):
""" Tests the determine base name functionality. """
self.assertEquals(util.ItemIdentifierMapper.determineBaseName("/kkjh/aa/hh"), "hh")
self.assertEquals(util.ItemIdentifierMapper.determineBaseName("/"), "")
self.assertEquals(util.ItemIdentifierMapper.determineBaseName("jjj"), "jjj")
self.assertEquals(util.ItemIdentifierMapper.determineBaseName(""), "")
self.assertRaises(AttributeError, util.ItemIdentifierMapper.determineBaseName, None)
def testDetermineParentPath(self):
""" Tests the determine parent functionality. """
self.assertEquals(util.ItemIdentifierMapper.determineParentPath("/kkjh/aa/hh"), "/kkjh/aa")
self.assertEquals(util.ItemIdentifierMapper.determineParentPath("/"), "")
self.assertEquals(util.ItemIdentifierMapper.determineParentPath("jjj"), "")
self.assertEquals(util.ItemIdentifierMapper.determineParentPath(""), "")
self.assertRaises(AttributeError, util.ItemIdentifierMapper.determineBaseName, None)
def testInvalidUrl(self):
""" Tests invalid base URL. """
self.assertRaises(AttributeError, util.ItemIdentifierMapper, None)
util.ItemIdentifierMapper("invalidURL")
| {
"repo_name": "DLR-SC/DataFinder",
"path": "test/unittest/datafinder_test/persistence/adapters/webdav_/util_test.py",
"copies": "1",
"size": "4877",
"license": "bsd-3-clause",
"hash": 7027054706762138000,
"line_mean": 45.8137254902,
"line_max": 108,
"alpha_frac": 0.6996104162,
"autogenerated": false,
"ratio": 3.926731078904992,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01987980162734069,
"num_lines": 102
} |
"""
The module defines available privileges.
"""
from datafinder.core.error import PrivilegeError
from datafinder.persistence.privileges import constants
__version__ = "$Revision-Id:$"
class _Privilege(object):
"""
This class defines available properties of a privilege.
The class mainly exists for documentation reasons and is intended
to be replaced by named tuples when switching to Python 3.
"""
def __init__(self, identifier, displayName, description, aggregatedPrivileges=None):
"""
Constructor.
@param identifier: Identifier of the privilege.
@type identifier: C{unicode}
@param displayName: Display name of the privilege.
@type displayName: C{unicode}
@param description: Describes the purpose of the privilege.
@type description: C{unicode}
@param aggregatedPrivileges: Directly aggregated privileges.
@type aggregatedPrivileges: C{list} of L{_Privilege<datafinder.core.item.privileges.privilege._Privilege>}
"""
self.identifier = identifier
self.displayName = displayName
self.description = description
self.aggregatedPrivileges = aggregatedPrivileges
if self.aggregatedPrivileges is None:
self.aggregatedPrivileges = list()
else:
for privilege in aggregatedPrivileges:
self.aggregatedPrivileges.extend(privilege.aggregatedPrivileges)
def __repr__(self):
""" Determines the string representation. """
return self.displayName
def __cmp__(self, other):
""" Compares two instances. """
return cmp(self.identifier, other.identifier)
def __hash__(self):
""" Calculates has value in accordance to comparison. """
return id(self.identifier)
REMOVE_ITEM = _Privilege(constants.REMOVE_ITEM_PRIVILEGE, "Remove Item", "Determines removal of items.")
ADD_ITEM = _Privilege(constants.ADD_ITEM_PRIVILEGE, "Add Item", "Determines adding of items.")
WRITE_PROPERTIES = _Privilege(constants.WRITE_PROPERTIES_PRIVILEGE, "Write Properties", "Determines modification of properties.")
WRITE_CONTENT = _Privilege(constants.WRITE_CONTENT_PRIVILEGE, "Write Content", "Determines modification of the item content.")
WRITE_PRIVILEGE = _Privilege(constants.WRITE_PRIVILEGE, "Write", "Aggregates all modification privileges.",
[WRITE_CONTENT, WRITE_PROPERTIES, ADD_ITEM, REMOVE_ITEM])
READ_PRIVILEGES_PRIVILEGE = _Privilege(constants.READ_PRIVILEGES_PRIVILEGE, "Read Privileges", "Determines reading of item privileges.")
WRITE_PRIVILEGES_PRIVILEGE = _Privilege(constants.WRITE_PRIVILEGES_PRIVILEGE, "Write Privileges", "Determines writing of item privileges.")
READ_USER_PRIVILEGES_PRIVILEGE = _Privilege(constants.READ_USER_PRIVILEGES_PRIVILEGE, "Read User Privileges",
"Determines reading of the current user privileges.")
READ_PRIVILEGE = _Privilege(constants.READ_PRIVILEGE, "Read", "Determines reading of the item content and its properties.")
ALL_PRIVILEGE = _Privilege(constants.ALL_PRIVILEGE, "All", "Aggregates all available privileges.",
[READ_PRIVILEGE, READ_PRIVILEGES_PRIVILEGE, WRITE_PRIVILEGE,
WRITE_PRIVILEGES_PRIVILEGE, READ_USER_PRIVILEGES_PRIVILEGE])
PRIVILEGES = [ALL_PRIVILEGE] + ALL_PRIVILEGE.aggregatedPrivileges
def getPrivilege(identifier):
"""
Creates a privilege from the persistence format.
@param identifier: Privilege identifier.
@type identifier: C{unicode}
"""
for privilege in PRIVILEGES:
if privilege.identifier == identifier:
return privilege
raise PrivilegeError("The privilege '%s' is not supported." % identifier)
class _AccessLevel(object):
"""
This class defines generally available access levels.
Access level correspond to the item aspects content, properties, and administration.
They are introduced to simplify the privilege handling in context of these item aspects
"""
def __init__(self, identifier, displayName, description):
"""
Constructor.
@param identifier: Identifier of the access level.
@type identifier: C{unicode}
@param displayName: Display name of the access level.
@type displayName: C{unicode}
@param description: Describes the purpose of the access level.
@type description: C{unicode}
"""
self.identifier = identifier
self.displayName = displayName
self.description = description
def __str__(self):
""" Determines the string representation. """
return self.displayName
__repr__ = __str__
NONE_ACCESS_LEVEL = _AccessLevel("____none____", "None", "Neither reading nor writing access.")
READ_ONLY_ACCESS_LEVEL = _AccessLevel("____read-only____", "Read-Only", "Only reading access.")
FULL_ACCESS_LEVEL = _AccessLevel("____full____", "Full", "Reading and writing access.")
ACCESS_LEVELS = [NONE_ACCESS_LEVEL, READ_ONLY_ACCESS_LEVEL, FULL_ACCESS_LEVEL]
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/item/privileges/privilege.py",
"copies": "1",
"size": "7047",
"license": "bsd-3-clause",
"hash": 9160348121903685000,
"line_mean": 39.9464285714,
"line_max": 139,
"alpha_frac": 0.67915425,
"autogenerated": false,
"ratio": 4.291717417783191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.018494841363873853,
"num_lines": 168
} |
"""
The module provides a text editor.
"""
from PyQt4 import QtGui, QtCore
from PyQt4.Qt import Qt
from datafinder.gui.gen.user.text_editor_dialog_ui import Ui_textEditorDialog
__version__ = "$Revision-Id:$"
class TextEditor(QtGui.QLineEdit):
"""
This widget widget is a specialized line editor which allows
text editing in a separated dialog.
"""
def __init__(self, initData="", parent=None):
"""
Constructor.
@param initData: Initial list data.
@type initData: C{unicode}
@param parent: Parent widget of the dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtGui.QLineEdit.__init__(self, parent)
self.value = initData or ""
self.setText(self.value)
self._editButton = QtGui.QPushButton("...", self)
self._editButton.setCursor(Qt.ArrowCursor)
self._editButton.setMaximumSize(QtCore.QSize(20, 20))
self.setStyleSheet("QLineEdit { padding-right: 0px; } ")
self.connect(self._editButton, QtCore.SIGNAL("clicked()"), self._showEditorSlot)
def resizeEvent(self, _):
""" Ensures that the edit button is in the right corner of the line editor. """
size = self._editButton.maximumSize()
self._editButton.move(self.rect().right() - size.width(),
(self.rect().bottom() + 1 - size.height()) / 2)
def _showEditorSlot(self):
""" Slot which shows the list editor. """
self.value = self.text()
textEditor = _TextEditorDialog(self.value, self)
textEditor.exec_()
self.setText(self.value)
self.setFocus(Qt.OtherFocusReason)
class _TextEditorDialog(QtGui.QDialog, Ui_textEditorDialog):
"""
This dialog shows the content of a list property and supports the editing the property.
"""
def __init__(self, initData, parent=None):
"""
Constructor.
@param initData: Initial list data.
@type initData: C{unicode}
@param parent: Parent widget of the dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtGui.QDialog.__init__(self, parent)
Ui_textEditorDialog.__init__(self)
self.setupUi(self)
self.textEdit.setText(initData or "")
self.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), self.accepted)
def accepted(self):
""" This slot is called when the user clicks OK. It returns the entered text. """
self.parent().value = self.textEdit.toPlainText()
QtGui.QDialog.accept(self)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/common/widget/property/editors/text_editor.py",
"copies": "1",
"size": "4483",
"license": "bsd-3-clause",
"hash": -8313688464511776000,
"line_mean": 33.5793650794,
"line_max": 91,
"alpha_frac": 0.6442114655,
"autogenerated": false,
"ratio": 4.185807656395892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019829280352421638,
"num_lines": 126
} |
"""
This implementation of NullDataStorer can read out of and write into
ZIP compressed archives.
"""
__version__ = "$Revision-Id$"
import codecs
import types
from zipfile import ZipInfo
from datafinder.persistence.data.datastorer import NullDataStorer
from datafinder.persistence.error import PersistenceError
_ZIP_FILENAME_CODEC = codecs.lookup("CP437")
class DataArchiveAdapter(NullDataStorer, object):
""" This class implements the L{NullDataStorer} scheme for ZIP archives. """
def __init__(self, identifier, archive, password=None, readonly=False):
""" Constructor.
@param identifier: The identifier of the associated item.
@type identifier: C{unicode}
@type archive: The zip archive that should be used for storage.
@type archive: C{zipfile.ZipFile}
@param password: If the archive is encrypted, the password should be given here.
@type password: C{string}
@param readonly: Flag whether the archive is opened read-only.
@type readonly: C{bool}
"""
super(DataArchiveAdapter, self).__init__(identifier)
self._archive = archive
self._children = None
self._password = password
self._readonly = readonly
self._persistenceId = _ZIP_FILENAME_CODEC.encode(self.identifier, errors="ignore")[0] #identifier used to access item in zip archive
def getChildren(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
if not self._children:
self._children = list()
files = self._archive.namelist()
for name in files:
if not isinstance(name, types.UnicodeType):
name = _ZIP_FILENAME_CODEC.decode(name, errors="ignore")[0]
if name.startswith(self.identifier) \
and name.find("/", len(self.identifier) + 1) < 0:
self._children.append(name)
return self._children
@property
def canAddChildren(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
return not self._readonly
def exists(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
try:
self._archive.getinfo(self._persistenceId)
except KeyError:
return False
return True
def writeData(self, data):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
if self._readonly:
raise PersistenceError(u"Tried to write to read-only archive.")
try:
info = self._archive.getinfo(self._persistenceId)
except KeyError:
info = ZipInfo(self._persistenceId)
try:
self._archive.writestr(info, data.read())
except IOError, error:
errorMessage = "Cannot write data of archive member '%s'.\nReason: '%s'" % (self.identifier, error.message)
raise PersistenceError(errorMessage)
def readData(self):
""" @see:L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
try:
return self._archive.open(self._persistenceId, "r", self._password)
except IOError, error:
errorMessage = "Cannot access archive member '%s'.\nReason: '%s'" % (self.identifier, error.message)
raise PersistenceError(errorMessage)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/archive/data/adapter.py",
"copies": "1",
"size": "5325",
"license": "bsd-3-clause",
"hash": -6672894765059395000,
"line_mean": 38.3409090909,
"line_max": 140,
"alpha_frac": 0.6584037559,
"autogenerated": false,
"ratio": 4.253194888178914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017042967050246813,
"num_lines": 132
} |
"""
This module contains the HistoryModel.
"""
from PyQt4 import QtCore
__version__ = "$Revision-Id:$"
class HistoryModel(QtCore.QAbstractItemModel):
"""
The History model implements a mechanism to store and restore L{QModelIndex<PyQt4.QtCore.QModelIndex>} objects.
That is necessary for the built-in history functionality. That allows to navigate forward
and backwards through the L{QModelIndex<PyQt4.QtCore.QModelIndex>} objects.
"""
def __init__(self, parent=None, maximumLength=20):
"""
Constructor.
@param parent: Parent object.
@type parent: L{QObject<PyQt4.QtCore.QObject>}
@param maximumLength: Maximum length of the history.
@type maximumLength: C{int}
"""
QtCore.QAbstractItemModel.__init__(self, parent)
self._sortedColumn = 0
self._sortedOrder = QtCore.Qt.AscendingOrder
self.__pathIndex = 0
self.__pathCurrent = ""
self.__pathForwardList = list()
self.__pathBackwardList = list()
self.maximumLength = maximumLength
def __pushHistories(self, loadList, storeList, steps=1):
"""
Move all entries according to the step count from the load list to the store list.
@param loadList: The load list contains all entries that will move to C{storeList}.
@type loadList: C{list}
@param storeList: The store list stores all entries from the C{loadList}.
@type storeList: C{list}
@param steps: The step count that has to been moved forward in the history.
@type steps: C{int}
"""
index = steps - 1
loadList = loadList[::-1]
if index < len(loadList):
storeList.append(self.__pathCurrent)
storeList += loadList[:index]
self.__pathCurrent = loadList[index]
loadList = loadList[steps:]
return loadList[::-1], storeList
def _getIndex(self):
"""
Getter for the current index.
@return: Current model index.
@rtype: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
return self._index(self.__pathCurrent)
def _setIndex(self, index):
"""
Setter for the new selected index.
@param index: Index of a valid directory.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
node = self.nodeFromIndex(index)
if not node is None and node != self._placeHolderCollection and node != self._placeHolderLeaf:
if not node.isCollection:
parentIndex = index.parent()
node = self.nodeFromIndex(parentIndex)
if not node is None:
if node.isCollection:
self.__pathForwardList = list()
if len(self.__pathBackwardList) == 0 or self.__pathCurrent != self.__pathBackwardList[-1]:
if len(node.path) > 0 and node.path != self.__pathCurrent:
self.__pathBackwardList.append(self.__pathCurrent)
self.__pathBackwardList = self.__pathBackwardList[:self.maximumLength]
self.__pathCurrent = node.path
self.sort(self._sortedColumn, self._sortedOrder)
self.updateSignal(index)
def _getPath(self):
"""
Returns the current path of the model.
@return: Current path.
@rtype: C{unicode}
"""
return self.__pathCurrent
def _setPath(self, path):
"""
Setter for the current path.
When the given path is a file it will be executed.
@param path: Path that has to been set to the current path.
@type path: C{unicode}
"""
self.activeIndex = self._index(path)
@property
def history(self):
"""
Returns the list of the last stored histories.
@return: List of index elements.
@rtype: C{list}, C{list}
"""
return self.__pathBackwardList[::-1], self.__pathForwardList[::-1]
def _setRelativeHistoryIndex(self, index):
"""
Set the current path to the given relative index in the history.
Positive index will move forward in the history, negative will move backward.
@param index: Relative index in the history that has to been selected.
@type index: C{int}
"""
if index > 0:
result = self.__pushHistories(self.__pathForwardList,
self.__pathBackwardList,
index)
self.__pathForwardList, self.__pathBackwardList = result
elif index < 0:
result = self.__pushHistories(self.__pathBackwardList,
self.__pathForwardList,
index*-1)
self.__pathBackwardList, self.__pathForwardList = result
if index != 0:
self.updateSignal(self.activeIndex)
def clear(self):
"""
Clears the history and emits the update signal.
"""
self.__pathIndex = 0
self.__pathCurrent = ""
self.__pathForwardList = list()
self.__pathBackwardList = list()
self.updateSignal()
def updateSignal(self, index=QtCore.QModelIndex()):
"""
Emits the update signal to all connected views.
@param index: The index where a change has happened.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
self.emit(QtCore.SIGNAL("updateSignal"), index)
def itemDataChangedSignal(self, index=QtCore.QModelIndex()):
"""
Emits the item data changed signal to all connected views.
@param index: The index where a change has happened.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
self.emit(QtCore.SIGNAL("itemDataChanged"), index)
def searchResultChangedSignal(self, items):
"""
Emits the search result changed signal to all connected views.
@param items: List of items representing the search result.
@type index: C{list}
"""
self.emit(QtCore.SIGNAL("searchResultChangedSignal"), items)
activeIndex = property(_getIndex, _setIndex)
activePath = property(_getPath, _setPath)
relativeHistoryIndex = property(fset=_setRelativeHistoryIndex)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/models/repository/history.py",
"copies": "1",
"size": "8394",
"license": "bsd-3-clause",
"hash": -1245409710228574200,
"line_mean": 33.5677966102,
"line_max": 115,
"alpha_frac": 0.60447939,
"autogenerated": false,
"ratio": 4.45068928950159,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006887096671018178,
"num_lines": 236
} |
"""
This module provides a simple clip-board for internal
copy-cut-paste actions.
"""
from datafinder.gui.user.models import constants
__version__ = "$Revision-Id:$"
class ItemClipboard(object):
""" Implements the clip-board. """
def __init__(self, repositoryModel):
"""
Constructor.
@param repositoryModel: The repository model.
@type repositoryModel: L{RepositoryModel<datafinder.gui.user.models.repository.RepsitoryModel>}
"""
self._clipboard = None
self.clear()
self._repositoryModel = repositoryModel
def setCopyIndexes(self, itemIndexes):
"""
Sets the given item indexes for copying purpose.
@param itemIndexes: List of item indexes marked for the given purpose.
@type itemIndexes: C{list} of L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
self._setItemIndexes(constants.CLIPBOARD_STATE_COPY, itemIndexes)
def setCutIndexes(self, itemIndexes):
"""
Sets the given item indexes for cut purpose.
@param itemIndexes: List of item indexes marked for the given purpose.
@type itemIndexes: C{list} of L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
self._setItemIndexes(constants.CLIPBOARD_STATE_CUT, itemIndexes)
def setCopyPropertiesIndex(self, itemIndex):
"""
Sets the given item index for copying of its properties.
@param itemIndexes: Item index marked for the given purpose.
@type itemIndexes: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
self._setItemIndexes(constants.CLIPBOARD_STATE_COPY_PROPERTIES, [itemIndex])
def _setItemIndexes(self, state, itemIndexes):
"""
Sets the given item indexes for the purpose specified with C{state}.
@param state: Specifies the purpose (copy or cut) of the temporarily store.
@type state: L{constants<datafinder.gui.user.common.constants>}
@param itemIndexes: List of item indexes marked for the given purpose.
@type itemIndexes: C{list} of L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
if state in self._clipboard:
self.clear()
self._clipboard[state] = list()
for index in itemIndexes:
if index.isValid():
item = self._repositoryModel.nodeFromIndex(index)
self._clipboard[state].append(item.path)
@property
def state(self):
"""
Read-only property for the state of the clip-board.
This can be used to determine whether items for copying or moving
have been put into the clip-board.
@see L{constants<datafinder.gui.user.models.constants>}
"""
state = constants.CLIPBOARD_STATE_EMPTY
for key in self._clipboard:
if len(self._clipboard[key]) > 0:
return key
return state
@property
def indexes(self):
""" Returns the current set of item indexes in the clip-board. """
itemIndexes = list()
state = self.state
if state in self._clipboard:
invalidPaths = list()
for path in self._clipboard[state]:
index = self._repositoryModel.indexFromPath(path)
if index.isValid():
itemIndexes.append(index)
else:
invalidPaths.append(path)
for path in invalidPaths:
self._clipboard[state].remove(path)
return itemIndexes
@property
def isEmpty(self):
""" Convenience read-only flag to check whether the clip-board is empty. """
return self.state == constants.CLIPBOARD_STATE_EMPTY
def clear(self):
""" Clears the content of the clip-board. """
self._clipboard = {constants.CLIPBOARD_STATE_COPY: list(),
constants.CLIPBOARD_STATE_CUT: list(),
constants.CLIPBOARD_STATE_COPY_PROPERTIES: list()}
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/models/repository/clipboard.py",
"copies": "1",
"size": "5974",
"license": "bsd-3-clause",
"hash": -9116586161005526000,
"line_mean": 35.8101265823,
"line_max": 103,
"alpha_frac": 0.6288918647,
"autogenerated": false,
"ratio": 4.379765395894428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020429414434328937,
"num_lines": 158
} |
""""
Module provides access to a configured logger instance.
"""
import logging
import os
from datafinder.core.configuration import constants
__version__ = "$Revision-Id:$"
_fileLogFormat = "%(asctime)s: %(levelname)s: %(message)s"
_logFileName = "debug.log"
_webdavLogFileName = "webdav.log"
def getDefaultLogger():
"""
Returns a configured logger object.
@return: Logger instance.
@rtype: C{logging.Logger}
"""
myLogger = logging.getLogger(None)
if len(myLogger.handlers) == 0:
from webdav import logger
webdavLogger = logger.getDefaultLogger(_getFileHandler(_webdavLogFileName))
webdavLogger.level = logging.INFO
myLogger.level = logging.INFO
myLogger.addHandler(_getFileHandler(_logFileName))
return myLogger
def _getFileHandler(fileName):
""" Initializes a file handler with the given file name. """
formatter = logging.Formatter(_fileLogFormat)
if not os.path.exists(constants.WORKING_PATH):
os.mkdir(constants.WORKING_PATH)
fileHandler = logging.FileHandler(os.path.join(constants.WORKING_PATH, fileName), "wb")
fileHandler.setFormatter(formatter)
return fileHandler
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/common/logger.py",
"copies": "1",
"size": "2957",
"license": "bsd-3-clause",
"hash": 4114739541944627700,
"line_mean": 33.6265060241,
"line_max": 91,
"alpha_frac": 0.7159283057,
"autogenerated": false,
"ratio": 4.045143638850889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.019508662755628688,
"num_lines": 83
} |
"""
Base class for the adaptor specific file system factory implementations.
"""
import decimal
from datafinder.persistence.common import character_constants as char_const
from datafinder.persistence.data.datastorer import NullDataStorer
from datafinder.persistence.metadata.metadatastorer import NullMetadataStorer
from datafinder.persistence.principal_search.principalsearcher import NullPrincipalSearcher
from datafinder.persistence.privileges.privilegestorer import NullPrivilegeStorer
from datafinder.persistence.search.searcher import NullSearcher
__version__ = "$Revision-Id:$"
class BaseFileSystem(object):
""" Base class for the adaptor specific file system factory implementations. """
@property
def canHandleLocation(self):
"""
Indicates if the FileSystem can handle the location.
@return: True if FileSystem can handle the location, False if not.
"""
self = self
return True
def createDataStorer(self, identifier):
"""
Factory method an adapter specific data storer.
@return: Adapter specific data storer.
@rtype: instanceOf L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>}
"""
self = self # silent pylint
return NullDataStorer(identifier)
def createMetadataStorer(self, identifier):
"""
Factory method an adapter specific meta data storer.
@return: Adapter specific meta data storer.
@rtype: instanceOf L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>}
"""
self = self # silent pylint
return NullMetadataStorer(identifier)
def createPrivilegeStorer(self, identifier):
"""
Factory method an adapter specific meta data storer.
@return: Adapter specific meta data storer.
@rtype: instanceOf L{NullMetadataStorer<datafinder.persistence.privileges.privilegestorer.NullPrivilegeStorer>}
"""
self = self # silent pylint
return NullPrivilegeStorer(identifier)
def createPrincipalSearcher(self):
"""
Factory method an adapter specific meta data storer.
@return: Adapter specific meta data storer.
@rtype: instanceOf L{NullMetadataStorer<datafinder.persistence.privileges.privilegestorer.NullPrivilegeStorer>}
"""
self = self # silent pylint
return NullPrincipalSearcher()
def createSearcher(self):
"""
Factory method an adapter specific meta data storer.
@return: Adapter specific meta data storer.
@rtype: instanceOf L{NullMetadataStorer<datafinder.persistence.search.searcher.NullSearcher>}
"""
self = self # silent pylint
return NullSearcher()
def release(self):
"""
@see: L{FileSystem.release<datafinder.persistence.factory.FileSystem.release>}
@note: The default implementation does nothing.
"""
pass
def updateCredentials(self, credentials):
"""
@see: L{FileSystem.updateCredentials<datafinder.persistence.factory.FileSystem.updateCredentials>}
@note: The default implementation does nothing.
"""
pass
def prepareUsage(self):
"""
Prepares usage of the file system.
@note: The default implementation does nothing.
"""
pass
def isValidIdentifier(self, name):
"""
@see: L{FileSystem.isValidIdentifier<datafinder.persistence.factory.FileSystem.metadataIdentifierPattern>}
@note: This implementation always returns C{True}, C{None}.
"""
return self._validateIdentifier(name,
char_const.IDENTIFIER_INVALID_CHARACTER_RE,
char_const.IDENTIFIER_VALID_STARTCHARACTER_RE)
@staticmethod
def _validateIdentifier(name, invalidCharRe, validStartCharRe):
""" Helper used for identifier validation. """
isValidIdentifer = False, None
if not name is None and len(name.strip()) > 0:
result = invalidCharRe.search(name)
if not result is None:
isValidIdentifer = False, result.start()
else:
if validStartCharRe.match(name):
isValidIdentifer = True, None
else:
isValidIdentifer = False, 0
return isValidIdentifer
def isValidMetadataIdentifier(self, name): # W0613
"""
@see: L{FileSystem.metadataIdentifier<datafinder.persistence.factory.FileSystem.metadataIdentifierPattern>}
@note: This implementation always returns C{True}, C{None}.
"""
return self._validateIdentifier(name,
char_const.PROPERTYNAME_INVALID_CHARACTER_RE,
char_const.PROPERTYNAME_VALID_STARTCHARACTER_RE)
@property
def hasCustomMetadataSupport(self):
"""
@see: L{FileSystem.hasCustomMetadataSupport<datafinder.persistence.factory.FileSystem.hasCustomMetadataSupport>}
@note: This implementation always returns C{False}.
"""
self = self # silent pylint
return False
@property
def hasMetadataSearchSupport(self):
"""
@see: L{FileSystem.hasMetadataSearchSupport<datafinder.persistence.factory.FileSystem.hasMetadataSearchSupport>}
@note: This implementation always returns C{False}.
"""
self = self # silent pylint
return False
@property
def hasPrivilegeSupport(self):
"""
@see: L{FileSystem.hasPrivilegeSupport<datafinder.persistence.factory.FileSystem.hasPrivilegeSupport>}
@note: This implementation always returns C{False}.
"""
self = self # silent pylint
return False
def determineFreeDiskSpace(self):
"""
@see: L{FileSystem.determineFreeDiskSpace<datafinder.persistence.factory.FileSystem.determineFreeDiskSpace>}
"""
return decimal.Decimal('infinity')
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/common/base_factory.py",
"copies": "1",
"size": "8251",
"license": "bsd-3-clause",
"hash": -6972693561341776000,
"line_mean": 35.5045454545,
"line_max": 120,
"alpha_frac": 0.6440431463,
"autogenerated": false,
"ratio": 4.785962877030163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03074709283676716,
"num_lines": 220
} |
"""
This module implements how the meta data is persisted on the SVN server.
"""
import datetime
import logging
import mimetypes
from datafinder.persistence.adapters.svn.constants import JSON_PROPERTY_NAME
from datafinder.persistence.adapters.svn.error import SubversionError
from datafinder.persistence.adapters.svn.util import util
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.metadata import constants as const
from datafinder.persistence.metadata.value_mapping import custom_format
from datafinder.persistence.metadata.value_mapping import json_format
from datafinder.persistence.metadata.metadatastorer import NullMetadataStorer
__version__ = "$Revision-Id$"
_log = logging.getLogger()
class MetadataSubversionAdapter(NullMetadataStorer):
""" Implements meta data storer interface for subversion. """
def __init__(self, identifier, connectionPool):
"""
Constructor.
@param identifier: Logical identifier of the resource.
@type identifier: C{unicode}
@param connectionPool: Connection pool.
@type connectionPool: L{Connection<datafinder.persistence.svn.connection_pool.SVNConnectionPool>}
"""
NullMetadataStorer.__init__(self, identifier)
self.__connectionPool = connectionPool
def retrieve(self, propertyIds=None):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>}"""
connection = self.__connectionPool.acquire()
try:
properties = self._retrieveCustomProperties(connection)
for key, value in properties.iteritems():
properties[key] = json_format.MetadataValue(value)
systemProperties = self._retrieveSystemProperties(connection)
properties.update(systemProperties)
return self._filterResult(propertyIds, properties)
finally:
self.__connectionPool.release(connection)
@staticmethod
def _filterResult(propertyIds, properties):
if not propertyIds is None and len(propertyIds) > 0:
filteredProps = dict()
for propertyId in propertyIds:
if propertyId in properties:
filteredProps[propertyId] = properties[propertyId]
else:
filteredProps = properties
return filteredProps
def _retrieveCustomProperties(self, connection):
customProperties = dict()
try:
jsonString = connection.getProperty(self.identifier, JSON_PROPERTY_NAME)
except SubversionError:
parentId = util.determineParentPath(self.identifier)
try:
connection.update(parentId)
jsonString = connection.getProperty(self.identifier, JSON_PROPERTY_NAME)
except SubversionError, error:
raise PersistenceError(str(error))
if not jsonString is None:
customProperties = json_format.convertFromPersistenceFormat(jsonString)
return customProperties
def _retrieveSystemProperties(self, connection):
try:
rawSystemProps = connection.info(self.identifier)
except SubversionError, error:
errorMessage = "Problem during meta data retrieval. " \
+ "Reason: '%s'" % str(error)
raise PersistenceError(errorMessage)
else:
systemProps = dict()
systemProps[const.CREATION_DATETIME] = \
custom_format.MetadataValue(rawSystemProps["creationDate"] or "", datetime.datetime)
systemProps[const.MODIFICATION_DATETIME] = \
custom_format.MetadataValue(rawSystemProps["lastChangedDate"] or "", datetime.datetime)
systemProps[const.SIZE] = custom_format.MetadataValue(rawSystemProps["size"] or "")
systemProps[const.OWNER] = custom_format.MetadataValue(rawSystemProps["owner"] or "")
systemProps[const.MIME_TYPE] = custom_format.MetadataValue(self._guessMimeType() or "")
return systemProps
def _guessMimeType(self):
return mimetypes.guess_type(self.identifier, False)[0]
def update(self, properties):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>}"""
connection = self.__connectionPool.acquire()
try:
customProperties = self._retrieveCustomProperties(connection)
customProperties.update(properties)
newJsonString = json_format.convertToPersistenceFormat(customProperties)
try:
connection.update(self.identifier)
connection.setProperty(self.identifier, JSON_PROPERTY_NAME, newJsonString)
except SubversionError, error:
errorMessage = "Cannot update properties of item '%s'. " % self.identifier \
+ "Reason: '%s'" % error
raise PersistenceError(errorMessage)
finally:
self.__connectionPool.release(connection)
def delete(self, propertyIds):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>}"""
connection = self.__connectionPool.acquire()
try:
customProperties = self._retrieveCustomProperties(connection)
for propertyId in propertyIds:
if propertyId in customProperties:
del customProperties[propertyId]
newJsonString = json_format.convertToPersistenceFormat(customProperties)
try:
connection.update(self.identifier)
connection.setProperty(self.identifier, JSON_PROPERTY_NAME, newJsonString)
except SubversionError, error:
errorMessage = "Cannot delete properties of item '%s'. " % self.identifier \
+ "Reason: '%s'" % error
raise PersistenceError(errorMessage)
finally:
self.__connectionPool.release(connection)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/svn/metadata/adapter.py",
"copies": "1",
"size": "7959",
"license": "bsd-3-clause",
"hash": -4928843757952398000,
"line_mean": 43.2215909091,
"line_max": 109,
"alpha_frac": 0.6662897349,
"autogenerated": false,
"ratio": 4.720640569395018,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011429956864512869,
"num_lines": 176
} |
"""
This module implements how the meta data is persisted on the WebDAV server.
"""
from webdav.Connection import WebdavError
from webdav import Constants
from webdav.NameCheck import WrongNameError
from datafinder.persistence.adapters.webdav_.metadata import identifier_mapping
from datafinder.persistence.adapters.webdav_ import util
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.metadata import constants, value_mapping
from datafinder.persistence.metadata.metadatastorer import NullMetadataStorer
__version__ = "$Revision-Id:$"
class MetadataWebdavAdapter(NullMetadataStorer):
""" This class implements property retrieval, storage and deletion using the WebDAV protocol. """
def __init__(self, identifier, connectionPool, itemIdMapper, metadataIdMapper=identifier_mapping, connectionHelper=util,
hasMetadataSearchSupport=True):
"""
Constructor.
@param identifier: Logical identifier of the resource.
@type identifier: C{unicode}
@param connectionPool: Connection pool.
@type connectionPool: L{Connection<datafinder.persistence.webdav_.connection_pool.WebdavConnectionPool>}
@param itemIdMapper: Utility object mapping item identifiers.
@type itemIdMapper: L{ItemIdentifierMapper<datafinder.persistence.adapters.webdav_.util.ItemIdentifierMapper}
@param metadataIdMapper: Utility object/module mapping meta data item identifiers.
@type metadataIdMapper: L{ItemIdentifierMapper<datafinder.persistence.adapters.webdav_.metadata.identifier_mapping}
@param connectionHelper: Utility object/module creating WebDAV library storer instances.
@type connectionHelper: L{ItemIdentifierMapper<datafinder.persistence.adapters.webdav_.util}
"""
NullMetadataStorer.__init__(self, identifier)
self.__connectionPool = connectionPool
self.__itemIdMapper = itemIdMapper
self.__metadataIdMapper = metadataIdMapper
self._persistenceId = self.__itemIdMapper.mapIdentifier(identifier)
self.__connectionHelper = connectionHelper
self.__hasMetadataSearchSupport = hasMetadataSearchSupport
def retrieve(self, propertyIds=None):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>}"""
connection = self.__connectionPool.acquire()
try:
if propertyIds is None:
persistenceProperties = self._retrieveAllProperties(connection)
properties = {constants.CREATION_DATETIME: value_mapping.MetadataValue(""),
constants.MODIFICATION_DATETIME: value_mapping.MetadataValue(""),
constants.SIZE: value_mapping.MetadataValue("0"),
constants.MIME_TYPE: value_mapping.MetadataValue(""),
constants.OWNER: value_mapping.MetadataValue("")}
else:
persistenceIds = [self.__metadataIdMapper.mapMetadataId(propertyId) for propertyId in propertyIds]
persistenceProperties = self._retrieveProperties(connection, persistenceIds)
properties = dict()
for persistenceId, value in persistenceProperties.iteritems():
logicalId = self.__metadataIdMapper.mapPersistenceMetadataId(persistenceId)
if not logicalId is None:
representationValue = self._getMetadataValue(persistenceId, value)
properties[logicalId] = representationValue
return properties
finally:
self.__connectionPool.release(connection)
def _retrieveAllProperties(self, connection):
""" Retrieves all properties. """
webdavStorer = self.__connectionHelper.createResourceStorer(self._persistenceId, connection)
try:
return webdavStorer.readAllProperties()
except WebdavError, error:
errorMessage = "Problem during meta data retrieval." \
+ "Reason: '%s'" % error.reason
raise PersistenceError(errorMessage)
def _retrieveProperties(self, connection, propertyIds):
""" Retrieves the specified set of properties. """
# pylint: disable=W0142
# W0142: The use of * / ** magic is required by the webdav library interface.
result = dict()
if len(propertyIds) > 0:
webdavStorer = self.__connectionHelper.createResourceStorer(self._persistenceId, connection)
try:
result = webdavStorer.readProperties(*propertyIds)
except WebdavError, error:
errorMessage = "Problem during meta data retrieval." \
+ "Reason: '%s'" % error.reason
raise PersistenceError(errorMessage)
return result
@staticmethod
def _getMetadataValue(persistenceId, peristedValueAsXml):
"""
Adapts the retrieved XML representation of the WebDAV
library and returns an according value representation.
@param persistenceId: Identifier on the persistence layer (namespace, name).
@type persistenceId: C{tuple} of C{string}, C{string}
@return: Value representation that can be converted to different Python types.
@rtype: C{value_mapping.MetadataValue}
"""
persistedValueAsString = ""
if (Constants.NS_DAV, Constants.PROP_OWNER) == persistenceId:
if len(peristedValueAsXml.children) > 0:
persistedValueAsString = peristedValueAsXml.children[0].textof()
else:
persistedValueAsString = peristedValueAsXml.textof()
return value_mapping.MetadataValue(persistedValueAsString)
def update(self, properties):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>}"""
connection = self.__connectionPool.acquire()
try:
persistencePropertyValueMapping = dict()
for propertyId, value in properties.iteritems():
persistenceId = self.__metadataIdMapper.mapMetadataId(propertyId)
persistenceValue = value_mapping.getPersistenceRepresentation(value)
persistencePropertyValueMapping[persistenceId] = persistenceValue
webdavStorer = self.__connectionHelper.createResourceStorer(self._persistenceId, connection)
try:
webdavStorer.writeProperties(persistencePropertyValueMapping)
except WebdavError, error:
errorMessage = "Cannot update properties of item '%s'" % self.identifier \
+ "Reason: '%s'" % error.reason
raise PersistenceError(errorMessage)
except WrongNameError:
raise PersistenceError("Cannot update properties because invalid characters " \
+ "are contained within an identifier.")
finally:
self.__connectionPool.release(connection)
def delete(self, propertyIds):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>}"""
# pylint: disable=W0142
# W0142: The use of * / ** magic is required by the webdav library interface.
connection = self.__connectionPool.acquire()
try:
persistenceIds = [self.__metadataIdMapper.mapMetadataId(propertyId) for propertyId in propertyIds]
webdavStorer = self.__connectionHelper.createResourceStorer(self._persistenceId, connection)
try:
webdavStorer.deleteProperties(None, *persistenceIds)
except WebdavError, error:
errorMessage = "Cannot delete properties of item '%s'" % self.identifier \
+ "Reason: '%s'" % error.reason
raise PersistenceError(errorMessage)
finally:
self.__connectionPool.release(connection)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/webdav_/metadata/adapter.py",
"copies": "1",
"size": "9993",
"license": "bsd-3-clause",
"hash": 4243594167960795600,
"line_mean": 48.4696969697,
"line_max": 124,
"alpha_frac": 0.6644651256,
"autogenerated": false,
"ratio": 4.769928400954654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5934393526554654,
"avg_score": null,
"num_lines": null
} |
"""
Provides a dialog to handle credential/authentication information updates for data store.
Via callbacks the authentication information of the centrally managed file systems can be directly updated.
Currently, we just support simple username/password authentication.
"""
from PyQt4 import QtGui
from PyQt4.QtGui import QDialogButtonBox
from PyQt4 import QtCore
from datafinder.gui.gen.user import datastore_credential_update_dialog_ui
from datafinder.gui.user.common import util
__version__ = "$Revision-Id:$"
class DataStoreCredentialUpdateView(datastore_credential_update_dialog_ui.Ui_CredentialUpdateDialog, QtGui.QDialog):
""" View component of the dialog which provides a simplified interface to the GUI elements. """
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
datastore_credential_update_dialog_ui.Ui_CredentialUpdateDialog.__init__(self)
self.setupUi(self)
@property
def retryButton(self):
""" Provides access to the Retry button. """
return self.buttons.button(QDialogButtonBox.Retry)
def indicateMessage(self, message):
""" Shows the error message. """
self.errorLabel.show()
self.errorLabel.setText(message)
class DataStoreCredentialUpdateController(QtCore.QObject):
""" Handles the interaction with the user. """
def __init__(self, datastore, credentialUpdateCallback, parent=None):
"""
@param datastore: Data store configuration to show the user some details.
@type datastore: L{DefaultDataStore<datafinder.core.configuration.datastores.datastore.DefaultDataStore>}
@param credentialUpdateCallback: Callback function to set new credentials.
@see: L{AuthenticationError<datafinder.core.error.AuthenticationError>} for details about the callback.
@param parent: The parent widget of the dialog.
@type parent: L{QWidget<PyQt4.QtGui.QWidget>}
"""
QtCore.QObject.__init__(self)
self._datastore = datastore
self._credentialUpdateCallback = credentialUpdateCallback
self._view = DataStoreCredentialUpdateView(parent)
self._workerThread = None
self._view.retryButton.clicked.connect(self._performCredentialUpdate)
self._indicateErrorMessage()
def _indicateErrorMessage(self):
message = (
"The data store '%s' can currently not be accessed.\n"
"You can try to provide new authentication information\n"
"and retry to establish the connection."
% self._datastore.name)
self._view.indicateMessage(message)
def _indicateSuccessMessage(self):
message = (
"The data store '%s' could be successfully accessed!"
% self._datastore.name)
self._view.indicateMessage(message)
def _performCredentialUpdate(self):
username = unicode(self._view.username_input.text())
password = unicode(self._view.password_input.text())
credentials = {"username": username, "password": password}
self._view.retryButton.setEnabled(False)
self._workerThread = util.startNewQtThread(lambda: self._credentialUpdateCallback(credentials), self._evaluateCredentialUpdate)
def _evaluateCredentialUpdate(self):
if self._workerThread.result:
self._indicateSuccessMessage()
else:
self._indicateErrorMessage()
self._view.retryButton.setEnabled(True)
def show(self):
""" Delegates to view. """
return self._view.show()
def exec_(self):
""" Delegates to view. """
return self._view.exec_()
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/dialogs/datastore_dialog/credential_update_dialog.py",
"copies": "1",
"size": "5593",
"license": "bsd-3-clause",
"hash": 7255814150311223000,
"line_mean": 38.5289855072,
"line_max": 135,
"alpha_frac": 0.6799570892,
"autogenerated": false,
"ratio": 4.499597747385358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5679554836585358,
"avg_score": null,
"num_lines": null
} |
"""
This module implements some sanity checks based on the visitor/tree walker classes
defined in this package.
"""
import logging
from datafinder.core.item.base import ItemBase
from datafinder.core.item.collection import ItemRoot, ItemCollection
from datafinder.core.item.data_persister import constants
from datafinder.core.item.leaf import ItemLeaf
from datafinder.core.item.link import ItemLink
from datafinder.core.item.privileges.privilege import ALL_PRIVILEGE, WRITE_CONTENT, WRITE_PROPERTIES, READ_PRIVILEGE
from datafinder.core.item.visitor.base import ItemTreeWalkerBase, VisitSlot
from datafinder.persistence.error import PersistenceError
__version__ = "$Revision-Id:$"
_logger = logging.getLogger()
class ActionCheckVisitor(object):
"""
This class performs sanity checks on a given L{ItemBase<datafinder.core.item.base.ItemBase>}.
It checks what capabilities this item has (e.g. can be read/write/deleted etc.).
To ensure a special capability, you may use the convenience C{canDo} methods. But as those
re-check the item each time they are called, it is better to access the C{capabilites} dictionary
in conjunction with the L{check<datafinder.core.item.visitor.checks.ActionCheckTreeWalker.check>}
method if you want to query different capabilities at once.
@cvar CAPABILITY_ADD_CHILDREN: Other items can be added below this item.
@cvar CAPABILITY_DELETE: The item can be deleted (or does not exist yet).
@cvar CAPABILITY_COPY: The item can be copied.
@cvar CAPABILITY_MOVE: The item can be moved.
@cvar CAPABILITY_STORE: The associated data for the item can be stored.
@cvar CAPABILITY_RERTRIEVE: The associated data for the item can be retrieved.
@cvar CAPABILITY_ARCHIVE: The given item can be archived.
@cvar CAPABILITY_SEARCH: Searches can be performed using the given item.
@cvar CAPAPILITY_PRIVILEGES: The item can have added privileges
@cvar CAPABILITY_RETRIEVE_PROPERTIES: Properties of the item can be retrieved.
@cvar CAPABILITY_STORE_PROPERTIES: Properties of the given item can be written.
"""
CAPABILITY_ADD_CHILDREN = "canAddChildren"
CAPABILITY_DELETE = "delete"
CAPABILITY_COPY = "copy"
CAPABILITY_MOVE = "move"
CAPABILITY_STORE = "storeData"
CAPABILITY_RETRIEVE = "retrieveData"
CAPABILITY_ARCHIVE = "archive"
CAPABILITY_SEARCH = "search"
CAPABILITY_PRIVILEGES = "privilege"
CAPABILITY_RETRIEVE_PROPERTIES = "retrieveProperties"
CAPABILITY_STORE_PROPERTIES = "storeProperties"
def __init__(self, resolveLinks=False, hasCustomMetadataSupport=False, hasSearchSupport=False):
self.resolveLinks = resolveLinks
self._hasCustomMetadataSupport = hasCustomMetadataSupport
self._hasSearchSupport = hasSearchSupport
self.capabilities = dict()
self._path = list()
def check(self, item):
"""
Run checks.
This method cares for resetting the capabilities dictionary first. It then triggers the visitor
to check the items capabilities.
@param item: The item to be checked
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self._initCapabilities()
if not item.ignoreChecks:
self.handle(item)
def _initCapabilities(self):
self.capabilities = {
ActionCheckTreeWalker.CAPABILITY_ADD_CHILDREN: True,
ActionCheckTreeWalker.CAPABILITY_DELETE: True,
ActionCheckTreeWalker.CAPABILITY_COPY: True,
ActionCheckTreeWalker.CAPABILITY_MOVE: True,
ActionCheckTreeWalker.CAPABILITY_STORE: True,
ActionCheckTreeWalker.CAPABILITY_RETRIEVE: True,
ActionCheckTreeWalker.CAPABILITY_ARCHIVE: True,
ActionCheckTreeWalker.CAPABILITY_SEARCH: self._hasSearchSupport,
ActionCheckTreeWalker.CAPABILITY_PRIVILEGES: False,
ActionCheckTreeWalker.CAPABILITY_RETRIEVE_PROPERTIES: self._hasCustomMetadataSupport,
ActionCheckTreeWalker.CAPABILITY_STORE_PROPERTIES: self._hasCustomMetadataSupport
}
def handleDataNode(self, item):
"""
Implementation of the C{handle} slot for any items except links.
It enforces that only valid combinations of state, strategy and location are given
and cares for setting the capabilities dictionary correctly. If one of the sanity checks
fail, a C{ValueError} along with an expressive error message is raised.
@param item: The item which should be checked.
@type item: L{ItemRoot<datafinder.core.item.collection.ItemRoot>},
L{ItemCollection<datafinder.core.item.collection.ItemCollection>} or
L{ItemLeaf<datafinder.core.item.leaf.ItemLeaf>}
@raise ValueError: Any sanity check failed on the given item or any of its child.
"""
if item.isRoot:
self._disableAllCapabilities()
try:
self.capabilities[self.CAPABILITY_ADD_CHILDREN] = item.fileStorer.canAddChildren
except PersistenceError, error:
_logger.error(error)
self.capabilities[self.CAPABILITY_ADD_CHILDREN] = False
self.capabilities[self.CAPABILITY_SEARCH] = self._hasSearchSupport
else:
self._checkPrivileges(item)
self._checkDataState(item)
if not (item.isCollection and item.state == constants.ITEM_STATE_NULL):
self._disable((ActionCheckVisitor.CAPABILITY_ARCHIVE,))
def _disableAllCapabilities(self):
self._disable(self.capabilities.keys())
def _disable(self, caps):
for capability in caps:
self.capabilities[capability] = False
def _checkPrivileges(self, item):
if not item is None and not (ALL_PRIVILEGE in item.privileges or WRITE_CONTENT in item.privileges):
self._disable((ActionCheckVisitor.CAPABILITY_ADD_CHILDREN,
ActionCheckVisitor.CAPABILITY_STORE,
ActionCheckVisitor.CAPABILITY_STORE_PROPERTIES,
ActionCheckVisitor.CAPABILITY_MOVE,
ActionCheckVisitor.CAPABILITY_DELETE))
if not item is None and not (ALL_PRIVILEGE in item.privileges or WRITE_PROPERTIES in item.privileges):
self._disable((ActionCheckVisitor.CAPABILITY_STORE_PROPERTIES,))
if not item is None and not (ALL_PRIVILEGE in item.privileges or READ_PRIVILEGE in item.privileges):
self._disable((ActionCheckVisitor.CAPABILITY_RETRIEVE,
ActionCheckVisitor.CAPABILITY_RETRIEVE_PROPERTIES,
ActionCheckVisitor.CAPABILITY_COPY,
ActionCheckVisitor.CAPABILITY_ARCHIVE,
ActionCheckVisitor.CAPABILITY_SEARCH))
def _checkDataState(self, item):
state = item.state
# Capability constraints for items in state INACCESSIBLE or NULL
# - must not store data
# - must not retrieve data
# - (i.e. may not access data)
if state == constants.ITEM_STATE_INACCESSIBLE \
or state == constants.ITEM_STATE_NULL:
self._disable((ActionCheckVisitor.CAPABILITY_STORE,
ActionCheckVisitor.CAPABILITY_RETRIEVE))
# Capability constraints for items in state MIGRATED
# - must not be accessed
elif state == constants.ITEM_STATE_MIGRATED \
or state == constants.ITEM_STATE_UNSUPPORTED_STORAGE_INTERFACE:
self._disableAllCapabilities()
# Capability constraints for items in state ARCHIVE
# - must not change properties
elif state == constants.ITEM_STATE_ARCHIVED:
self._disable((ActionCheckVisitor.CAPABILITY_STORE_PROPERTIES, ))
# Capability constraints for items in state ARCHIVE MEMBER
# - must not delete data
# - must not store data
# - addition sub-items is optional
# - must not change properties
# - must not be copied or moved
elif state == constants.ITEM_STATE_ARCHIVED_MEMBER:
self._disable((ActionCheckVisitor.CAPABILITY_COPY,
ActionCheckVisitor.CAPABILITY_MOVE,
ActionCheckVisitor.CAPABILITY_DELETE,
ActionCheckVisitor.CAPABILITY_STORE,
ActionCheckVisitor.CAPABILITY_STORE_PROPERTIES))
# Capability constraints for items in state READONLY ARCHIVE
# - must not delete data
# - must not store data
# - must not change properties
elif state == constants.ITEM_STATE_ARCHIVED_READONLY:
self._disable((ActionCheckVisitor.CAPABILITY_STORE,
ActionCheckVisitor.CAPABILITY_DELETE,
ActionCheckVisitor.CAPABILITY_STORE_PROPERTIES))
def handleLink(self, item):
"""
Implementation of the C{handle} slot for L{ItemLink<datafinder.core.item.link.ItemLink>}.
"""
if self.resolveLinks and item.name not in self._path:
item = item.linkTarget
self.handle(item)
else:
self._checkPrivileges(item)
self._checkDataState(item)
def handleBase(self, item):
"""
Implementation of the C{handle} slot for L{ItemBase<datafinder.core.item.base.ItemBase>}.
"""
if item.isLink:
self.handleLink(item)
else:
self.handleDataNode(item)
handle = VisitSlot((handleDataNode, [ItemRoot, ItemCollection, ItemLeaf]),
(handleLink, [ItemLink]),
(handleBase, [ItemBase]))
def canAddChildren(self, item):
"""
Convenience method to check whether an item can be created below.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_ADD_CHILDREN]
def canDelete(self, item):
"""
Convenience method to check whether an item can be deleted.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_DELETE]
def canCopy(self, item):
"""
Convenience method to check whether an item can be copied.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_COPY]
def canMove(self, item):
"""
Convenience method to check whether an item can be moved.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_MOVE]
def canStoreData(self, item):
"""
Convenience method to check whether the associated data can be stored using this item.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_STORE]
def canRetrieveData(self, item):
"""
Convenience method to check whether the associated data can be retrieved using this item.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_RETRIEVE]
def canArchive(self, item):
"""
Convenience method to check whether an item can be archived.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_ARCHIVE]
def canSearch(self, item):
"""
Convenience method to check whether an item can be searched.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_SEARCH]
def canPrivileges(self, item):
"""
Convenience method to check whether an item can have rights added to it.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_PRIVILEGES]
def canRetrieveProperties(self, item):
"""
Convenience method to check whether an item`s properties can be retrieved.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_RETRIEVE_PROPERTIES]
def canStoreProperties(self, item):
"""
Convenience method to check whether an item`s properties can be written.
@note: The sanity checks are run again when this method is called.
@param item: The item to be checked.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self.check(item)
return self.capabilities[ActionCheckVisitor.CAPABILITY_STORE_PROPERTIES]
class ActionCheckTreeWalker(ItemTreeWalkerBase, ActionCheckVisitor):
"""
This class does essentially the same as
L{ActionCheckVisitor<datafinder.core.item.visitor.checks.ActionCheckVisitor>} but extends the checks
to any children of the item due to its nature as tree walker.
@ivar handle: Visitor slot that does the actual handling.
@type handle: C{VisitSlot}
"""
def __init__(self, resolveLinks=False, hasCustomMetadataSupport=False, hasSearchSupport=False):
"""
Constructor.
@param resolveLinks: Select whether links should be resolved or not.
@type resolveLinks: boolean
"""
ActionCheckVisitor.__init__(self, resolveLinks, hasCustomMetadataSupport, hasSearchSupport)
ItemTreeWalkerBase.__init__(self, mode=-1) # enforce pre-order scheme
self.affectedItems = list()
self._path = list()
def check(self, item):
"""
Run checks.
This method cares for resetting the capabilities dictionary first. It then starts walking
the tree and updates the capabilities for each node it hits.
@param item: The item to be checked
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self._initCapabilities()
if not item.ignoreChecks:
self.affectedItems = list()
self.walk(item)
self.affectedItems.remove(item)
def walk(self, node):
"""
Re-implementation of the C{walk} slot for any item.
Simply cares for appending current node to the list of affected items and calling
the base implementation of this slot.
"""
if node.state in [constants.ITEM_STATE_ARCHIVED, constants.ITEM_STATE_ARCHIVED_READONLY]:
currentCapabilities = self.capabilities.copy() #ignoring capabilities of archive members
self.affectedItems.append(node)
super(ActionCheckTreeWalker, self).walk(node)
if node.state in [constants.ITEM_STATE_ARCHIVED, constants.ITEM_STATE_ARCHIVED_READONLY]:
self.capabilities = currentCapabilities
self.handle(node)
def handleLink(self, item):
"""
Implementation of the C{handle} slot for L{ItemLink<datafinder.core.item.link.ItemLink>}.
"""
if self.resolveLinks and item.name not in self._path:
self._path.append(item.name)
item = item.linkTarget
self.walk(item)
self._path = self._path[:-1]
handle = VisitSlot((handleLink, [ItemLink]), inherits="handle")
class ItemCapabilityChecker(object):
"""
Convenience class providing the can* methods of C{ActionCheckVisitor}.
The item is not passed to the method but to the constructor.
"""
def __init__(self, item, hasCustomMetadataSupport=False, hasSearchSupport=False):
""" Constructor. """
self._item = item
self._actionCheckVisitor = ActionCheckVisitor(False, hasCustomMetadataSupport, hasSearchSupport)
def _decorateMethodWithItemInstance(self, method):
""" Returns a method decorated with the item instance. """
def _decoratedMethod():
""" The decorated method implementation. """
return method(self._item)
return property(_decoratedMethod).fget()
def __getattr__(self, name):
""" The implementation does the decoration magic. """
if hasattr(self._actionCheckVisitor, name):
return self._decorateMethodWithItemInstance(getattr(self._actionCheckVisitor, name))
else:
raise AttributeError("AttributeError: '%s' object has no attribute '%s'" % (str(self), name))
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/item/visitor/checks.py",
"copies": "1",
"size": "20884",
"license": "bsd-3-clause",
"hash": 5893060359118241000,
"line_mean": 40.1898989899,
"line_max": 116,
"alpha_frac": 0.6354625551,
"autogenerated": false,
"ratio": 4.451929226177787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5587391781277787,
"avg_score": null,
"num_lines": null
} |
""" Module implementing a wizard handler for archive creation. """
from datetime import datetime, timedelta
from datafinder.core.configuration.properties.constants import DATASTORE_NAME_ID, \
ARCHIVE_RETENTION_EXCEEDED_DATETIME_ID
from datafinder.gui.user.dialogs.creation_wizard import constants
from datafinder.gui.user.dialogs.creation_wizard.state_handler.base_state_handler import BaseStateHandler
from datafinder.gui.user.models.repository.filter.leaf_filter import LeafFilter
__version__ = "$Revision-Id:$"
class CreateArchiveHandler(BaseStateHandler):
""" Handles collection creation. """
WINDOW_TITLE = "New Archive"
_PAGEID_TITLE_SUBTITLE_MAP = {constants.SOURCE_PAGE_ID: ("Archive Collection", "Please select the collection that should be archived."),
constants.TARGET_PAGE_ID: ("Target Collection",
"Select the collection where the archive should be stored."),
constants.DATASTORE_PAGE_ID: ("Storage Location", "Please select the storage location of your archive.")}
def __init__(self, wizard):
""" Constructor. """
BaseStateHandler.__init__(self, wizard)
self._repositoryModel = wizard.sourceRepositoryModel
self._sourceIndex = None
self.lockIndex = None # Redefining it because check-in pylint wants it
def checkPreConditions(self):
""" Checks the preconditions. """
result = None
if self._repositoryModel.initialized and self._repositoryModel.isManagedRepository:
if len(self._repositoryModel.repository.configuration.archiveDatastores) == 0:
result = "There are no archive storage locations configured. Please contact your administrator."
return result
def nextId(self):
""" Returns the identifier of the next page. """
currentId = self._wizard.currentId()
nextId = -1
if currentId == constants.SOURCE_PAGE_ID:
nextId = constants.TARGET_PAGE_ID
elif currentId == constants.TARGET_PAGE_ID \
and self._repositoryModel.repository.configuration.isManagedRepository:
nextId = constants.DATASTORE_PAGE_ID
return nextId
def initializePage(self, identifier):
""" Performs initialization actions for the wizard page with the given identifier. """
if identifier == constants.SOURCE_PAGE_ID:
preSelectedSourceItems = self._wizard.preSelectedSourceItems
if preSelectedSourceItems is None or len(preSelectedSourceItems) == 0:
preSelectedSourceItems = [self._repositoryModel.activeIndex]
self._wizard.configureSourceItemPage(LeafFilter(self._repositoryModel), preSelectedSourceItems,
disableItemNameSpecification=True,
itemCheckFunction=lambda item: item.capabilities.canArchive)
elif identifier == constants.TARGET_PAGE_ID:
self._wizard.configureTargetItemPage(LeafFilter(self._repositoryModel), [self._wizard.sourceIndexes[0].parent()],
disableItemNameSpecification=True, checkTargetDataTypesExistence=True,
targetIndex=self._wizard.sourceIndexes[0])
else:
self._wizard.configureDataStorePage(constants.ARCHIVE_DATASTORE_MODE, self._repositoryModel)
def prepareFinishSlot(self):
""" Performs the finish slot preparation. """
self.lockIndex = self._wizard.targetIndexes[0]
self._sourceIndex = self._wizard.sourceIndexes[0]
self._repositoryModel.lock([self.lockIndex])
def finishSlotCallback(self):
""" Unlocks the lock index. """
self._repositoryModel.unlock(self.lockIndex)
self._repositoryModel.activeIndex = self.lockIndex
def finishSlot(self):
""" Performs specific actions when the user commits his parameters. """
properties = None
if self._repositoryModel.isManagedRepository:
properties = list()
dataStoreConfiguration = self._wizard.dataStoreConfiguration
dataStoreProperty = self._repositoryModel.repository.createProperty(DATASTORE_NAME_ID, dataStoreConfiguration.name)
properties.append(dataStoreProperty)
try:
rententionPeriod = dataStoreConfiguration.retentionPeriod or 1
exeededDate = datetime.now() + timedelta(rententionPeriod)
except (OverflowError, AttributeError):
exeededDate = datetime.now() + timedelta(1)
exeededDateProperty = self._repositoryModel.repository.createProperty(ARCHIVE_RETENTION_EXCEEDED_DATETIME_ID, exeededDate)
properties.append(exeededDateProperty)
self._repositoryModel.createArchive(self._sourceIndex, self.lockIndex, properties)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/gui/user/dialogs/creation_wizard/state_handler/create_archive_state_handler.py",
"copies": "1",
"size": "6854",
"license": "bsd-3-clause",
"hash": 887152385997534000,
"line_mean": 47.7,
"line_max": 140,
"alpha_frac": 0.6692442369,
"autogenerated": false,
"ratio": 4.6945205479452055,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020648906218639938,
"num_lines": 140
} |
"""
Test case for the item support module.
"""
import unittest
from datafinder.core.error import ItemError
from datafinder.script_api.error import ItemSupportError
from datafinder.script_api.item import item_support
from datafinder_test.mocks import SimpleMock
__version__ = "$Revision-Id:$"
class ItemSupportTestCase(unittest.TestCase):
"""
The TestCase for the item support.
"""
def setUp(self):
""" Creates the required mocks. """
self._targetItemMock = SimpleMock()
self._itemMock = SimpleMock()
self._repositoryMock = SimpleMock(self._itemMock)
self._repositoryManagerInstanceMock = SimpleMock(workingRepository=self._repositoryMock)
item_support.repositoryManagerInstance = self._repositoryManagerInstanceMock
def testRefresh(self):
""" Test for the refresh method. """
item_support.refresh("")
self._itemMock.error = ItemError("")
self.assertRaises(ItemSupportError, item_support.refresh, "")
def testCreateCollection(self):
""" Test for the createCollection method. """
item_support.createCollection("", dict())
self._itemMock.methodNameResultMap = {"create": (None, ItemError(""))}
self.assertRaises(ItemSupportError, item_support.createCollection, "", dict())
def testCreateLeaf(self):
""" Test for the createLeaf method. """
item_support.createLeaf("", dict())
self._itemMock.methodNameResultMap = {"create": (None, ItemError(""))}
self.assertRaises(ItemSupportError, item_support.createLeaf, "", dict())
def testCreateLink(self):
""" Test for the createLink method. """
item_support.createLink("", "")
self._itemMock.methodNameResultMap = {"create": (None, ItemError(""))}
self.assertRaises(ItemSupportError, item_support.createLink, "", "")
def testDelete(self):
""" Test for the delete method. """
item_support.delete("")
self._itemMock.error = ItemError("")
self.assertRaises(ItemSupportError, item_support.delete, "")
def testCopy(self):
""" Test for the copy method. """
item_support.copy("", "")
self._repositoryMock.error = ItemError("")
self.assertRaises(ItemSupportError, item_support.copy, "", "")
self._repositoryMock.error = None
self._itemMock.methodNameResultMap = {"copy": (None, ItemError(""))}
self.assertRaises(ItemSupportError, item_support.copy, "", "")
def testMove(self):
""" Test for the move method. """
item_support.move("", "")
self._repositoryMock.error = ItemError("")
self.assertRaises(ItemSupportError, item_support.move, "", "")
self._repositoryMock.error = None
self._itemMock.methodNameResultMap = {"move": (None, ItemError(""))}
self.assertRaises(ItemSupportError, item_support.move, "", "")
def testRetrieveData(self):
""" Test for the retrieveData method. """
item_support.retrieveData("")
self._itemMock.error = ItemError("")
self.assertRaises(ItemSupportError, item_support.retrieveData, "")
def testStoreData(self):
""" Test for the storeData method. """
item_support.storeData("", "")
self._itemMock.error = ItemError("")
self.assertRaises(ItemSupportError, item_support.storeData, "", "")
def testSearch(self):
""" Test for the search method. """
self._itemMock.value = list()
item_support.search("", "")
self._itemMock.error = ItemError("")
self.assertRaises(ItemSupportError, item_support.search, "", "")
def testWalk(self):
""" Tests the walk method. """
self._repositoryMock.value = [SimpleMock(path="/"), SimpleMock(path="/test")]
self.assertEquals(item_support.walk("/"), ["/", "/test"])
self._repositoryMock.error = ItemError("")
self.assertRaises(ItemSupportError, item_support.walk, "")
| {
"repo_name": "DLR-SC/DataFinder",
"path": "test/unittest/datafinder_test/script_api/item/item_support_test.py",
"copies": "1",
"size": "5915",
"license": "bsd-3-clause",
"hash": 8734669603307466000,
"line_mean": 34.8475609756,
"line_max": 96,
"alpha_frac": 0.646830093,
"autogenerated": false,
"ratio": 4.365313653136531,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02497748127940121,
"num_lines": 164
} |
"""
Implements the data adapter to access files/directories via SFTP.
"""
__version__ = "$Revision-Id:$"
import errno
import stat
import StringIO
import sys
import tempfile
from paramiko.ssh_exception import SSHException
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.data import datastorer
from datafinder.persistence.adapters.sftp import constants
class SftpDataAdapter(datastorer.NullDataStorer):
"""
@note: Links are not supported.
@note: Copying of large collections might be inefficient
because files are transferred to the client and then
back to the server. However, this is a limitation of SFTP.
@see: For interface details see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>}
"""
def __init__(self, identifier, persistenceIdentifier,
connectionPool, factory, idMapper):
datastorer.NullDataStorer.__init__(self, identifier)
self._connectionPool = connectionPool
self._persistenceIdentifier = persistenceIdentifier
self._factory = factory
self._idMapper = idMapper
@property
def isCollection(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
try:
return stat.S_ISDIR(connection.stat(self._persistenceIdentifier).st_mode)
except (IOError, EOFError, SSHException):
message = "Cannot determine item type (file or collection) of '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
@staticmethod
def _reRaiseError(message):
_, value, traceback = sys.exc_info()
raise PersistenceError, u"%s.\nReason: '%s'" % (message, value), traceback
@property
def isLeaf(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
return not self.isCollection
@property
def canAddChildren(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
return self.isCollection
def createCollection(self, recursively=False):
"""
@see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>}
@note: Currently it is parent collections are created recursively. I.e.,
this might lead to problems when creating largely nested collections.
"""
if recursively:
self._createMissingParents()
self._createSingleCollection()
def _createMissingParents(self):
parentId = self._idMapper.determineParentId(self.identifier)
parent = self._factory.createDataStorer(parentId)
if not parent.exists():
try:
parent.createCollection(recursively=True)
except RuntimeError:
raise PersistenceError(
"Cannot create collection '%s'.\n" % self.identifier,
"The collection path is too deeply nested.")
def _createSingleCollection(self):
connection = self._connectionPool.acquire()
try:
connection.mkdir(self._persistenceIdentifier)
except (IOError, EOFError, SSHException):
message = "Cannot create collection '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
# Set the directory permissions because the mode parameter of
# mkdir did not work for rwxrws--T (=> x instead of s)
self._setPermissions(constants.DEFAULT_DIRECTORY_PERMISSIONS)
def _setPermissions(self, mode):
""" Helper method which sets the permissions of a dirctory/file to the given mode.
See os.chmode for details on the mode parameter (octal).
"""
connection = self._connectionPool.acquire()
try:
connection.chmod(self._persistenceIdentifier, mode)
except (IOError, EOFError, SSHException):
message = "Cannot set default permissions of file '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def createResource(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
self.writeData(StringIO.StringIO(""))
self._setPermissions(constants.DEFAULT_FILE_PERMISSIONS)
def createLink(self, destination):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
raise PersistenceError("Not implemented.")
def getChildren(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
try:
children = list()
for name in connection.listdir(self._persistenceIdentifier):
name = name.decode(constants.FILE_NAME_ENCODING, "replace")
child_id = self._idMapper.determineChildId(self.identifier, name)
children.append(child_id)
return children
except (IOError, EOFError, SSHException):
message = "Cannot retrieve children of item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def exists(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
try:
connection.stat(self._persistenceIdentifier)
return True
except IOError, error:
if error.errno == errno.ENOENT:
return False
message = "Cannot determine existence of '%s'!" % self.identifier
self._reRaiseError(message)
except (EOFError, SSHException):
message = "Cannot determine existence of '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def delete(self):
"""
@see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>}
@note: As there is no library function to delete complete directories,
we implemented it on our own.
"""
isCollection = self.isCollection
connection = self._connectionPool.acquire()
try:
if isCollection:
self._deleteCollection(connection)
else:
self._deleteLeaf(connection)
except (IOError, EOFError, SSHException):
message = "Cannot delete item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def _deleteCollection(self, connection):
emptiedCollections = self._emptyAllCollections(connection)
self._deleteEmptiedCollections(connection, emptiedCollections)
def _emptyAllCollections(self, connection):
collections = [self._persistenceIdentifier]
emptiedCollections = list()
while collections:
currentCollection = collections[0]
for attrs in connection.listdir_attr(currentCollection):
persistenceId = self._idMapper.determinePersistenceChildId(
currentCollection, attrs.filename)
if not stat.S_ISDIR(attrs.st_mode):
connection.remove(persistenceId)
else:
collections.append(persistenceId)
collections.remove(currentCollection)
emptiedCollections.append(currentCollection)
return emptiedCollections
@staticmethod
def _deleteEmptiedCollections(connection, emptiedCollections):
emptiedCollections.reverse()
for collection in emptiedCollections:
connection.rmdir(collection)
def _deleteLeaf(self, connection):
connection.remove(self._persistenceIdentifier)
def copy(self, destination):
"""
@see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>}
@note: There is no library function to copy complete directories.
Additionally, every file needs to be transferred to the client
and back to the server. Thus, it takes some time to copy large data sets.
Unfortunately, this is a limitation of SFTP.
"""
isCollection = self.isCollection
connection = self._connectionPool.acquire()
try:
if isCollection:
self._copyCollection(connection, destination)
else:
self._copyLeaf(destination)
except (IOError, EOFError, SSHException):
message = "Cannot copy item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def _copyCollection(self, connection, destination):
collections = [self]
baseOrginalId = self.identifier
baseDestinationId = destination.identifier
while collections:
currentCollection = collections[0]
self._createDestinationCollection(currentCollection, baseOrginalId, baseDestinationId)
self._copyCollectionContent(currentCollection, connection, collections, baseOrginalId, baseDestinationId)
def _createDestinationCollection(self, orgCollection, baseOrginalId, baseDestinationId):
destCollectionId = orgCollection.identifier.replace(baseOrginalId, baseDestinationId)
destCollection = self._factory.createDataStorer(destCollectionId)
destCollection.createCollection()
def _copyCollectionContent(self, orgCollection, connection, collections, baseOrginalId, baseDestinationId):
orgPersistenceId = self._idMapper.determinePeristenceId(orgCollection.identifier)
for attrs in connection.listdir_attr(orgPersistenceId):
name = attrs.filename.decode(constants.FILE_NAME_ENCODING, "replace")
itemId = self._idMapper.determineChildId(orgCollection.identifier, name)
itemStorer = self._factory.createDataStorer(itemId)
if stat.S_ISDIR(attrs.st_mode):
collections.append(itemStorer)
else:
destItemId = itemId.replace(baseOrginalId, baseDestinationId)
destItemStorer = self._factory.createDataStorer(destItemId)
data = itemStorer.readData()
destItemStorer.writeData(data)
collections.remove(orgCollection)
def _copyLeaf(self, destination):
data = self.readData()
destination.writeData(data)
def move(self, destination):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
destPersistenceId = self._idMapper.determinePeristenceId(destination.identifier)
try:
connection.rename(self._persistenceIdentifier, destPersistenceId)
except (IOError, EOFError, SSHException):
message = "Cannot move/rename item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def readData(self):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
temporaryFileObject = tempfile.TemporaryFile()
try:
temporaryFileObject.seek(0)
remoteFileObject = connection.open(self._persistenceIdentifier)
block = remoteFileObject.read(constants.BLOCK_SIZE)
while block:
temporaryFileObject.write(block)
block = remoteFileObject.read(constants.BLOCK_SIZE)
temporaryFileObject.seek(0)
return temporaryFileObject
except (IOError, EOFError, SSHException):
message = "Cannot read data of item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
self._connectionPool.release(connection)
def writeData(self, data):
""" @see: L{NullDataStorer<datafinder.persistence.data.datastorer.NullDataStorer>} """
connection = self._connectionPool.acquire()
try:
remoteFileObject = connection.open(self._persistenceIdentifier, "w")
block = data.read(constants.BLOCK_SIZE)
while block:
remoteFileObject.write(block)
block = data.read(constants.BLOCK_SIZE)
except (IOError, EOFError, SSHException):
message = "Cannot write data to item '%s'!" % self.identifier
self._reRaiseError(message)
finally:
data.close()
self._connectionPool.release(connection)
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/persistence/adapters/sftp/data/adapter.py",
"copies": "1",
"size": "15344",
"license": "bsd-3-clause",
"hash": -5391806408292138000,
"line_mean": 41.4674220963,
"line_max": 117,
"alpha_frac": 0.6377085506,
"autogenerated": false,
"ratio": 4.678048780487805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5815757331087805,
"avg_score": null,
"num_lines": null
} |
""" This module provides a tree walker that copies all items from one repository to another. """
from datafinder.common import logger
from datafinder.core.configuration.properties.constants import CONTENT_CREATION_DATETIME_PROPERTY_ID, \
CREATION_DATETIME_ID, DATA_FORMAT_ID, SIZE_ID, \
CONTENT_SIZE_ID, CONTENT_MODIFICATION_DATETIME_ID, \
MODIFICATION_DATETIME_ID
from datafinder.core.error import CoreError, ItemError
from datafinder.core.item.collection import ItemCollection, ItemRoot
from datafinder.core.item.link import ItemLink
from datafinder.core.item.leaf import ItemLeaf
from datafinder.core.item.property import Property
from datafinder.core.item.visitor.base import ItemTreeWalkerBase, VisitSlot
__version__ = "$Revision-Id:$"
class Importer(ItemTreeWalkerBase, object): # inherit from object to make pylint happy, but this is a pylint issue
"""
This class uses the L{ItemTreeWalkerBase<datafinder.core.item.visitor.base.ItemTreeWalkerBase>}
protocol to implement a recursive copy algorithm between two repositories. It is assumed
(but NOT checked) that the repository configurations are compatible.
"""
_log = logger.getDefaultLogger()
def __init__(self, stopTraversalStates=None, stopProcessStates=None):
"""
@param stopTraversalStates: List of states that are used to prevent traversal of specific collections. Default: C{None}
@type stopTraversalStates: C{list} of C{unicode}
@param stopProcessStates: List of states that are used to prevent processing a specific items. Default: C{None}
@type stopProcessStates: C{list} of C{unicode}
"""
super(Importer, self).__init__(-1, stopTraversalStates, stopProcessStates)
self._pwd = None
self._deferredLinks = None
self._destinationRootPath = None
self._itemFactory = None
self._newSourceName = None
self._defaultProperties = None
self._source = None
self._copyData = True
self._ignoreLinks = False
self._determinePropertiesCallback = None
self.importedLeafs = None
def performImport(self, source, targetCollection, newSourceName=None,
defaultProperties=None, copyData=True, ignoreLinks=False, determinePropertiesCallback=None):
"""
This method initiates the copy process and starts walking the source creating a
new node in the destination tree for each item it passes.
@param source: The item that should be imported.
@type source: L{ItemBase<datafinder.core.item.base.ItemBase>}
@param targetCollection: The collection that should afterwards contain the copy.
@type targetCollection: L{ItemCollection<datafinder.core.item.collection.ItemCollection>}
@param newSourceName: Optional new name of the source item. Default: C{None}
@type newSourceName: C{unicode}
@param defaultProperties: Optional list of properties which are set for every item. Default: C{None}
@type defaultProperties: C{list} of L{Property<datafinder.core.item.property.Property>}
@param copyData: Flag indicating whether data has to be copied or not.
@type copyData: C{bool}
@param ignoreLinks: Flag indicating the links are ignored during import. Default: C{False}
@type ignoreLinks: C{bool}
@param determinePropertiesCallback: Function determining properties used when importing a specific item.
@type: determinePropertiesCallback: C{callable} using an item description as input and returns a dictionary
describing the properties.
@raise ItemError: Indicating errors during import.
"""
self._pwd = targetCollection
self._deferredLinks = list()
self._destinationRootPath = targetCollection.path
self._itemFactory = targetCollection.itemFactory
self._newSourceName = newSourceName
self._defaultProperties = defaultProperties
self._source = source
self._copyData = copyData
self.importedLeafs = list()
self._ignoreLinks = ignoreLinks
self._determinePropertiesCallback = determinePropertiesCallback
self.walk(source)
missingDefferedLinkPaths = list()
for source, importName, destinationParent in self._deferredLinks:
try:
self._copyLink(source, importName, destinationParent)
except ItemError:
missingDefferedLinkPaths.append(source.path)
if len(missingDefferedLinkPaths) > 0:
errorMessage = "The following links could not be imported:"
for linkPath in missingDefferedLinkPaths:
errorMessage += "\n" + linkPath
raise ItemError(errorMessage)
def walk(self, node):
"""
@see: L{walk<datafinder.core.item.visitor.base.ItemTreeWalkerBase.walk>} method to add further post-processing.
"""
super(Importer, self).walk(node)
if node.isCollection:
if not node.state in self._stopTraversalStates:
self._pwd = self._pwd.parent
def _copyLink(self, source, importName, destinationParent):
""" Copies a link item. """
baseDestinationPath = self._destinationRootPath
if not baseDestinationPath.endswith("/"):
baseDestinationPath += "/"
baseDestinationPath += (self._newSourceName or self._source.name)
destinationLinkTargetPath = baseDestinationPath + source.linkTarget.path[len(self._source.path):]
destinationLinkTarget = self._itemFactory.create(destinationLinkTargetPath)
link = self._itemFactory.createLink(importName, destinationLinkTarget, destinationParent)
properties = source.properties.values()[:]
if not self._determinePropertiesCallback is None:
properties.extend(self._determinePropertiesCallback(source))
if not self._defaultProperties is None:
properties.extend(self._defaultProperties)
try:
link.create(properties)
except CoreError, error:
link.invalidate()
raise error
def _importLink(self, link):
""" Imports a link item. Not resolvable links are processed at the end. """
if not self._ignoreLinks:
importName = self._determineImportName(link)
if not link.linkTarget is None \
and link.linkTarget.path.startswith(self._source.path):
try:
self._copyLink(link, importName, self._pwd)
except ItemError:
self._deferredLinks.append((link, importName, self._pwd))
def _determineImportName(self, item):
""" Determines the name used importing the given item. """
importName = item.name
if item == self._source and not self._newSourceName is None:
importName = self._newSourceName
importName = self._itemFactory.determineValidItemName(importName)
if importName != item.name:
self._log.warning("Imported '%s' using different name: '%s'." % (item.path, importName))
return importName
def _importLeaf(self, leaf):
""" Retrieves the content of a leaf item. """
if leaf.capabilities.canRetrieveData:
importName = self._determineImportName(leaf)
importedLeaf = self._itemFactory.createLeaf(importName, self._pwd)
properties = self._determineLeafProperties(leaf)
try:
importedLeaf.create(properties)
except CoreError, error:
self._handleLeafCreationError(importedLeaf, error)
else:
if self._copyData:
try:
importedLeaf.storeData(leaf.retrieveData())
except CoreError, error:
self._handleLeafCreationError(importedLeaf, error)
else:
self.importedLeafs.append(leaf)
def _handleLeafCreationError(self, leaf, error):
self._log.error(error.args)
try:
leaf.delete(ignoreStorageLocation=True)
except CoreError, error_:
leaf.invalidate()
self._log.info(error_.args)
raise error
def _determineLeafProperties(self, leaf):
""" Determines the properties when importing a leaf item. """
properties = leaf.properties.values()[:]
if not leaf.isManaged:
contentCreationPropertyDefinition = self._itemFactory.getPropertyDefinition(CONTENT_CREATION_DATETIME_PROPERTY_ID)
properties.append(Property(contentCreationPropertyDefinition, leaf.properties[CREATION_DATETIME_ID].value))
contentModificationPropertyDefinition = self._itemFactory.getPropertyDefinition(CONTENT_MODIFICATION_DATETIME_ID)
properties.append(Property(contentModificationPropertyDefinition, leaf.properties[MODIFICATION_DATETIME_ID].value))
contentSizeDefinition = self._itemFactory.getPropertyDefinition(CONTENT_SIZE_ID)
properties.append(Property(contentSizeDefinition, leaf.properties[SIZE_ID].value))
dataFormatPropertyDefinition = self._itemFactory.getPropertyDefinition(DATA_FORMAT_ID)
properties.append(Property(dataFormatPropertyDefinition, leaf.dataFormat.name))
if not self._determinePropertiesCallback is None:
properties.extend(self._determinePropertiesCallback(leaf))
if not self._defaultProperties is None:
properties.extend(self._defaultProperties)
return properties
def _importCollection(self, collection):
""" Retrieves the content of a collection. """
importName = self._determineImportName(collection)
importedCollection = self._itemFactory.createCollection(importName, self._pwd)
properties = collection.properties.values()[:]
if not self._determinePropertiesCallback is None:
properties.extend(self._determinePropertiesCallback(collection))
if not self._defaultProperties is None:
properties.extend(self._defaultProperties)
try:
importedCollection.create(properties)
except CoreError, error:
importedCollection.invalidate()
raise error
self._pwd = importedCollection
handle = VisitSlot((_importLink, [ItemLink]),
(_importLeaf, [ItemLeaf]),
(_importCollection, [ItemCollection]),
(lambda self, _: None, [ItemRoot]))
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/item/visitor/importer.py",
"copies": "1",
"size": "12969",
"license": "bsd-3-clause",
"hash": 6514658418624894000,
"line_mean": 46.7556390977,
"line_max": 127,
"alpha_frac": 0.6469272881,
"autogenerated": false,
"ratio": 4.769768297168077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5916695585268077,
"avg_score": null,
"num_lines": null
} |
"""
Contains constants to be used by DataFinder classes.
"""
import os
__version__ = "$Revision-Id:$"
DATAMODEL_FILENAME = "datamodel.xml"
DATASTORE_FILENAME = "datastores.xml"
ICON_DIRECTORYNAME = "icons"
SCRIPT_DIRECTORYNAME = "scripts"
# Path to the working directory
WORKING_PATH = os.path.join(os.path.expanduser("~"), ".datafinder")
# DataFinder install directory
INSTALLATION_HOME = os.environ.get("DF_HOME")
# Installed image files
if not INSTALLATION_HOME is None:
LOCAL_INSTALLED_ICONS_DIRECTORY_PATH = os.path.abspath(os.path.join(INSTALLATION_HOME, "resources", "images"))
else:
LOCAL_INSTALLED_ICONS_DIRECTORY_PATH = ""
# version number
VERSION = ""
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/configuration/constants.py",
"copies": "1",
"size": "2410",
"license": "bsd-3-clause",
"hash": 6810543078689737000,
"line_mean": 33.9701492537,
"line_max": 114,
"alpha_frac": 0.7369294606,
"autogenerated": false,
"ratio": 3.795275590551181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.997844373253944,
"avg_score": 0.010752263722348098,
"num_lines": 67
} |
"""
This module implements the visitor pattern in a reusable way by providing an abstract base class
upon which concrete implementations can be build.
"""
from datafinder.core.item.base import ItemBase
from datafinder.core.item.collection import ItemRoot, ItemCollection
from datafinder.core.item.leaf import ItemLeaf
from datafinder.core.item.link import ItemLink
__version__ = "$Revision-Id:$"
class VisitSlot(object):
"""
This class implements a dispatcher for implementing the visitor pattern.
Its constructor takes an arbitrary number of handler methods (i.e. callables)
each of which has an attribute C{accept} with a tuple with all the names of
the class types that can be handled by the respective method. If you pass in
a value for the keyword parameter C{inherits}, the dispatcher will try to call
the inherited method if it cannot find any suitable handler in its own list.
"""
def __init__(self, *handlers, **kw):
"""
Constructor. Takes a list of handlers and optionally the name of a
method of any base class to use as fallback.
@param handlers: Describes the handlers: callable, list of accepted classes,
e.g. (_walkItem, [ItemBase], _walkCollection, [ItemCollection])
@type handlers: Tuples of callables, list of classes.
@param inherits: The handler to call on the base class if all own handlers fail.
@type inherits: str.
"""
self._inherits = kw.get("inherits", None)
self._handlers = handlers
def __get__(self, instance, owner):
""" Descriptor implementation: __get__. """
def _visitClosure(node, *args, **kw):
clazz = node.__class__
for method, classes in self._handlers:
if clazz in classes:
return method(instance, node, *args, **kw)
if self._inherits:
for superclass in owner.__mro__[1:-1]: # ignore object class
method = superclass.__dict__.get(self._inherits, None)
if isinstance(method, VisitSlot):
try:
return method.__get__(instance, superclass)(node, *args, **kw)
except AttributeError:
continue
raise AttributeError("No matching handler found for %s." % (clazz.__name__))
return _visitClosure
class ItemTreeWalkerBase(object):
"""
This class implements an abstract base for an ItemTreeWalker based on the
L{ItemVisitorBase<datafinder.core.item.visitor.base.ItemVisitorBase>}.
It provides the visit slot C{walk} to walk the tree. This method itself calls the second defined
slot C{handle} for each node it passes. For the latter one, the implementations for the different
handled types have to be made in the derived class.
As the passed tree is usually not binary at all, only the pre- and post-order schemes
for walking the tree are implemented.
@ivar walk: The classical tree walker slot.
"""
def __init__(self, mode=-1, stopTraversalStates=None, stopProcessStates=None):
"""
Constructor.
@param mode: The scheme to traverse the tree (pre- or post-order). Pass C{mode=-1} for
pre-order and C{mode=1} for post-order
@type mode: C{int}
@param stopTraversalStates: List of states that are used to prevent traversal of specific collections. Default: C{None}
@type stopTraversalStates: C{list} of C{unicode}
@param stopProcessStates: List of states that are used to prevent processing a specific items. Default: C{None}
@type stopProcessStates: C{list} of C{unicode}
"""
super(ItemTreeWalkerBase, self).__init__()
self._mode = mode
self._stopTraversalStates = stopTraversalStates
self._stopProcessStates = stopProcessStates
if self._mode == 0:
raise ValueError("Mode should be -1 (pre-order) or 1 (post-order).")
if self._stopTraversalStates is None:
self._stopTraversalStates = list()
if self._stopProcessStates is None:
self._stopProcessStates = list()
def _walkCollection(self, node, *args, **kw):
"""
Implementation of the visit slot C{walk} for collections and roots.
@param node: The node that should be traversed.
@type node: L{ItemRoot<datafinder.core.item.collection.ItemRoot>} or
L{ItemCollection<datafinder.core.item.collection.ItemCollection>}
"""
if self._mode < 0:
if not node.state in self._stopTraversalStates:
self.handle(node, *args, **kw) # PRE-Order
if not node.state in self._stopTraversalStates:
for child in node.getChildren():
self.walk(child)
if self._mode > 0:
if not node.state in self._stopTraversalStates:
self.handle(node, *args, **kw) # POST-Order
def _walkAny(self, node, *args, **kw):
"""
Implementation of the visit slot C{walk} for leafs and links.
@param node: The leaf or link that should be traversed.
@type node: L{ItemLeaf<datafinder.core.item.leaf.ItemLeaf>} or
L{ItemLink<datafinder.core.item.link.ItemLink>}
"""
if not node.state in self._stopProcessStates:
self.handle(node, *args, **kw)
def _walkBase(self, node, *args, **kw):
"""
Implementation of the visit slot C{walk} for instances of the base item class.
@param node: The instance of the base item.
@type node: The L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
if node.isLink or node.isLeaf:
self._walkAny(node, *args, **kw)
else:
self._walkCollection(node, *args, **kw)
walk = VisitSlot((_walkCollection, [ItemRoot, ItemCollection]),
(_walkAny, [ItemLeaf, ItemLink]),
(_walkBase, [ItemBase]))
handle = VisitSlot()
| {
"repo_name": "DLR-SC/DataFinder",
"path": "src/datafinder/core/item/visitor/base.py",
"copies": "1",
"size": "8128",
"license": "bsd-3-clause",
"hash": -4316065648001340400,
"line_mean": 40.7789473684,
"line_max": 127,
"alpha_frac": 0.6274606299,
"autogenerated": false,
"ratio": 4.3864004317323255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5513861061632326,
"avg_score": null,
"num_lines": null
} |
"""
Operating system independent start script (Win32 and UN*X)
for the DataFinder application.
"""
import os
import locale
import sys
from datafinder.gui.admin import admin_application
__version__ = "$Revision-Id:$"
# set the encoding
encoding = "UTF-8"
if not locale.getdefaultlocale()[1] is None:
encoding = locale.getdefaultlocale()[1]
try:
sys.setdefaultencoding(encoding)
except AttributeError:
if sys.getdefaultencoding() == "ascii":
print("It is required to correctly set default encoding. " + \
"Please see site.py for further details.")
admin_application.main()
| {
"repo_name": "DLR-SC/DataFinder",
"path": "bin/datafinder-admin-client.py",
"copies": "1",
"size": "2371",
"license": "bsd-3-clause",
"hash": 709299695272273500,
"line_mean": 33.9242424242,
"line_max": 72,
"alpha_frac": 0.7326022775,
"autogenerated": false,
"ratio": 3.9516666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5184268944166667,
"avg_score": null,
"num_lines": null
} |
"""
Operating system independent start script (Win32 and UN*X)
for the DataFinder application.
"""
import os
import locale
import sys
from datafinder.gui.user import application
__version__ = "$Revision-Id:$"
dfStart = os.environ.get("DF_START")
profile = os.environ.get("DF_PROFILE")
debug = os.environ.get("DF_DEBUG")
# set the encoding
encoding = "UTF-8"
if not locale.getdefaultlocale()[1] is None:
encoding = locale.getdefaultlocale()[1]
try:
sys.setdefaultencoding(encoding)
except AttributeError:
if sys.getdefaultencoding() == "ascii":
print("It is required to correctly set default encoding. " + \
"Please see site.py for further details.")
if profile:
import cProfile
cProfile.run("application.main()", sort="cumulative")
else:
application.main(dfStart, bool(debug))
| {
"repo_name": "DLR-SC/DataFinder",
"path": "bin/datafinder-client.py",
"copies": "1",
"size": "2591",
"license": "bsd-3-clause",
"hash": -7805308713296167000,
"line_mean": 32.5466666667,
"line_max": 72,
"alpha_frac": 0.7217290621,
"autogenerated": false,
"ratio": 3.8614008941877795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012850201423017026,
"num_lines": 75
} |
# $__FILE__
import collectd
import re
DEBUG = False
def values_to_dict(values):
"""
Convert `collectd.Values` instance to dictionary.
:param values: Instance of `collectd.Values`.
:returns: Dictionary representing `collectd.Values`.
"""
assert isinstance(values, collectd.Values)
values_dict = {'time': values.time,
'interval': values.interval,
'host': values.host,
'plugin': values.plugin,
'plugin_instance': values.plugin_instance,
'type': values.type,
'type_instance': values.type_instance,
'values': values.values}
try:
values_dict['dsname'] = values.dsname
except AttributeError:
values_dict['dsname'] = []
try:
values_dict['dstype'] = values.dstype
except AttributeError:
values_dict['dstype'] = []
try:
values_dict['dsmin'] = values.dsmin
except AttributeError:
values_dict['dsmin'] = []
try:
values_dict['dsmax'] = values.dsmax
except AttributeError:
values_dict['dsmax'] = []
return values_dict
def add_typesdb_info_to_values(values_dict, types_dict=dict()):
"""
Add information from types.db files.
:param values_dict: Dictionary.
:param types_dict: A dictionary containing information from
Collectd's types.db files in the same format as returned
by `read_types_db`. If this argument is omitted only
information that can be obtained by calling `collectd.get_dataset()`
is used.
:returns: `collectd.Values` with additional attributes.
Since Collectd 5.5 the Python plugin provides a `get_dataset()`
function that returns information from the types.db files. In this
case `types_dict` does not have to be passed to
`add_typesdb_info_to_values()`. The Python plugin of earlier
Collectd versions does not provide `get_dataset()` and it is
necessary to read (ideally all) types.db by calling
`read_typesdb(path)` for each file (updating the dictionary
with each call) and passing the resulting dictionary as
and argument to `add_typesdb_info_to_values()`.
"""
values_dict['dsname'] = []
values_dict['dstype'] = []
values_dict['dsmin'] = []
values_dict['dsmax'] = []
dataset = None
try:
dataset = collectd.get_dataset(values_dict['type'])
except AttributeError:
#
# collectd.get_dataset() is not yet implemented. Try to get
# the nformation from TYPES which holds the information
# we read from types.db files.
#
try:
dataset = types_dict[values_dict['type']]
except KeyError:
pass
except TypeError, msg:
pass
if dataset:
for (i, value) in enumerate(values_dict['values']):
(dsname, dstype, dsmin, dsmax) = dataset[i]
values_dict['dsname'].append(dsname)
values_dict['dstype'].append(dstype)
values_dict['dsmin'].append(dsmin)
values_dict['dsmax'].append(dsmax)
return values_dict
def read_typesdb(path):
"""
Read a Collectd types.db file.
:param path: Path to types.db file.
:returns: Dictionary where the keys are the "type" and values
are list of ```(dsname, dstype, dsmin, dsmax)``` tuples.
If ```dsmin``` or ```dsmax``` are returned as floats or
as the character ```U``` if undefined.
This function should be called for each types.db file,
updating the dictionary each time.
>>> types_dict = {}
>>> types_dict.update('/usr/share/collectd/types.db')
>>> types_dict.update('/usr/local/share/collectd/types.db')
Since Collect 5.5 the Python plugin implements `collectd.get_dataset()`
and `read_typesdb()` is no longer required.
"""
types_dict = {}
try:
with open(path) as fp:
for line in fp:
fields = re.split(r'[,\s]+', line.strip())
# Skip comments
if fields[0].startswith('#'):
continue
name = fields[0]
if len(fields) < 2:
collectd.notice("configuration error: %s in %s is missing definition" % (name, path))
continue
name = fields[0]
types_dict[name] = []
for field in fields[1:]:
fields2 = field.split(':')
if len(fields2) < 4:
collectd.notice("configuration error: %s %s has wrong format" % (name, field))
continue
dsname = fields2[0]
dstype = fields2[1].lower()
dsmin = fields2[2]
dsmax = fields2[3]
if dsmin != 'U':
dsmin = float(fields2[2])
if dsmax != 'U':
dsmax = float(fields2[3])
types_dict[name].append((dsname, dstype, dsmin, dsmax))
collectd.debug("read_types_db: types_dict[%s]=%s" % (name, types_dict[name]))
except IOError, msg:
collectd.notice("configuration error: %s - %s" % (path, msg))
return types_dict
| {
"repo_name": "mjuenema/collectd-plugins",
"path": "include/collectdlib.py",
"copies": "1",
"size": "5359",
"license": "mit",
"hash": 5666157672841035000,
"line_mean": 28.938547486,
"line_max": 105,
"alpha_frac": 0.5607389438,
"autogenerated": false,
"ratio": 4.135030864197531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5195769807997531,
"avg_score": null,
"num_lines": null
} |
# $__FILE__
import Queue as queue
import threading
import collectd
class BaseWriter(threading.Thread):
"""
Base class for all writers.
:param formatter: Formatter instance.
"""
MAX_BUFFER_SIZE = 1000
"""The maximum size of values in the output buffer."""
def __init__(self, formatter):
collectd.debug("BaseWriter.__init__: formatter=%s, MAX_BUFFER_SIZE=%s" %
(formatter, self.MAX_BUFFER_SIZE))
threading.Thread.__init__(self)
self.buffer = queue.Queue(maxsize=self.MAX_BUFFER_SIZE)
self.formatter = formatter
def shutdown(self):
"""
`shutdown()` will be called by `run()`.
This can be overridden by a derived class.
"""
pass
def flush(self, message):
"""
`flush()` will be called by `run()` when the write buffer must be flushed.
:param message:
This must be overridden by a derived class.
"""
raise NotImplementedError
def write(self, values_dict):
collectd.debug('%s.write_callback: values_object=%s' % ('$NAME', values_dict))
try:
self.buffer.put_nowait(values_dict)
except queue.Full:
collectd.notice("%s output buffer full" % (self))
def run(self):
collectd.debug("BaseWriter.run")
while True:
try:
values_dict = self.buffer.get(block=True, timeout=0.1)
self.flush(values_dict)
except queue.Empty:
pass
| {
"repo_name": "mjuenema/collectd-plugins",
"path": "include/_basewriter.py",
"copies": "1",
"size": "1563",
"license": "mit",
"hash": -7344210796577761000,
"line_mean": 21.9852941176,
"line_max": 86,
"alpha_frac": 0.5649392194,
"autogenerated": false,
"ratio": 4.235772357723577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023962640956988142,
"num_lines": 68
} |
"""$ fio bounds"""
import json
import logging
import click
from cligj import precision_opt, use_rs_opt
import fiona
from fiona.fio.helpers import obj_gen
@click.command(short_help="Print the extent of GeoJSON objects")
@precision_opt
@click.option('--explode/--no-explode', default=False,
help="Explode collections into features (default: no).")
@click.option('--with-id/--without-id', default=False,
help="Print GeoJSON ids and bounding boxes together "
"(default: without).")
@click.option('--with-obj/--without-obj', default=False,
help="Print GeoJSON objects and bounding boxes together "
"(default: without).")
@use_rs_opt
@click.pass_context
def bounds(ctx, precision, explode, with_id, with_obj, use_rs):
"""Print the bounding boxes of GeoJSON objects read from stdin.
Optionally explode collections and print the bounds of their
features.
To print identifiers for input objects along with their bounds
as a {id: identifier, bbox: bounds} JSON object, use --with-id.
To print the input objects themselves along with their bounds
as GeoJSON object, use --with-obj. This has the effect of updating
input objects with {id: identifier, bbox: bounds}.
"""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
stdout = click.get_text_stream('stdout')
try:
source = obj_gen(stdin)
for i, obj in enumerate(source):
obj_id = obj.get('id', 'collection:' + str(i))
xs = []
ys = []
features = obj.get('features') or [obj]
for j, feat in enumerate(features):
feat_id = feat.get('id', 'feature:' + str(i))
w, s, e, n = fiona.bounds(feat)
if precision > 0:
w, s, e, n = (round(v, precision)
for v in (w, s, e, n))
if explode:
if with_id:
rec = {
'parent': obj_id,
'id': feat_id,
'bbox': (w, s, e, n)}
elif with_obj:
feat.update(parent=obj_id, bbox=(w, s, e, n))
rec = feat
else:
rec = (w, s, e, n)
if use_rs:
click.echo(u'\u001e', nl=False)
click.echo(json.dumps(rec))
else:
xs.extend([w, e])
ys.extend([s, n])
if not explode:
w, s, e, n = (min(xs), min(ys), max(xs), max(ys))
if with_id:
rec = {'id': obj_id, 'bbox': (w, s, e, n)}
elif with_obj:
obj.update(id=obj_id, bbox=(w, s, e, n))
rec = obj
else:
rec = (w, s, e, n)
if use_rs:
click.echo(u'\u001e', nl=False)
click.echo(json.dumps(rec))
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "perrygeo/Fiona",
"path": "fiona/fio/bounds.py",
"copies": "2",
"size": "3279",
"license": "bsd-3-clause",
"hash": 8879716352329089000,
"line_mean": 36.2613636364,
"line_max": 71,
"alpha_frac": 0.4977127173,
"autogenerated": false,
"ratio": 4.008557457212714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 88
} |
"""$ fio cat"""
import json
import logging
import warnings
import click
import cligj
import fiona
from fiona.transform import transform_geom
from fiona.fio import options
warnings.simplefilter('default')
# Cat command
@click.command(short_help="Concatenate and print the features of datasets")
@cligj.files_in_arg
@cligj.precision_opt
@cligj.indent_opt
@cligj.compact_opt
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@options.dst_crs_opt
@cligj.use_rs_opt
@click.option('--bbox', default=None, metavar="w,s,e,n",
help="filter for features intersecting a bounding box")
@click.pass_context
def cat(ctx, files, precision, indent, compact, ignore_errors, dst_crs,
use_rs, bbox):
"""Concatenate and print the features of input datasets as a
sequence of GeoJSON features."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
try:
with fiona.drivers(CPL_DEBUG=verbosity > 2):
for path in files:
with fiona.open(path) as src:
if bbox:
try:
bbox = tuple(map(float, bbox.split(',')))
except ValueError:
bbox = json.loads(bbox)
for i, feat in src.items(bbox=bbox):
if dst_crs or precision > 0:
g = transform_geom(
src.crs, dst_crs, feat['geometry'],
antimeridian_cutting=True,
precision=precision)
feat['geometry'] = g
feat['bbox'] = fiona.bounds(g)
if use_rs:
click.echo(u'\u001e', nl=False)
click.echo(json.dumps(feat, **dump_kwds))
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "perrygeo/Fiona",
"path": "fiona/fio/cat.py",
"copies": "1",
"size": "2273",
"license": "bsd-3-clause",
"hash": 6099740639182791000,
"line_mean": 31.9420289855,
"line_max": 75,
"alpha_frac": 0.543774747,
"autogenerated": false,
"ratio": 4.102888086642599,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5146662833642599,
"avg_score": null,
"num_lines": null
} |
"""$ fio collect"""
from functools import partial
import json
import logging
import click
import cligj
from fiona.fio import helpers
from fiona.fio import options
from fiona.transform import transform_geom
@click.command(short_help="Collect a sequence of features.")
@cligj.precision_opt
@cligj.indent_opt
@cligj.compact_opt
@click.option('--record-buffered/--no-record-buffered', default=False,
help="Economical buffering of writes at record, not collection "
"(default), level.")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@options.src_crs_opt
@click.option('--with-ld-context/--without-ld-context', default=False,
help="add a JSON-LD context to JSON output.")
@click.option('--add-ld-context-item', multiple=True,
help="map a term to a URI and add it to the output's JSON LD "
"context.")
@click.option('--parse/--no-parse', default=True,
help="load and dump the geojson feature (default is True)")
@click.pass_context
def collect(ctx, precision, indent, compact, record_buffered, ignore_errors,
src_crs, with_ld_context, add_ld_context_item, parse):
"""Make a GeoJSON feature collection from a sequence of GeoJSON
features and print it."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
if src_crs:
if not parse:
raise click.UsageError("Can't specify --src-crs with --no-parse")
transformer = partial(transform_geom, src_crs, 'EPSG:4326',
antimeridian_cutting=True, precision=precision)
else:
transformer = lambda x: x
first_line = next(stdin)
# If parsing geojson
if parse:
# If input is RS-delimited JSON sequence.
if first_line.startswith(u'\x1e'):
def feature_text_gen():
buffer = first_line.strip(u'\x1e')
for line in stdin:
if line.startswith(u'\x1e'):
if buffer:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield json.dumps(feat, **dump_kwds)
buffer = line.strip(u'\x1e')
else:
buffer += line
else:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield json.dumps(feat, **dump_kwds)
else:
def feature_text_gen():
feat = json.loads(first_line)
feat['geometry'] = transformer(feat['geometry'])
yield json.dumps(feat, **dump_kwds)
for line in stdin:
feat = json.loads(line)
feat['geometry'] = transformer(feat['geometry'])
yield json.dumps(feat, **dump_kwds)
# If *not* parsing geojson
else:
# If input is RS-delimited JSON sequence.
if first_line.startswith(u'\x1e'):
def feature_text_gen():
buffer = first_line.strip(u'\x1e')
for line in stdin:
if line.startswith(u'\x1e'):
if buffer:
yield buffer
buffer = line.strip(u'\x1e')
else:
buffer += line
else:
yield buffer
else:
def feature_text_gen():
yield first_line
for line in stdin:
yield line
try:
source = feature_text_gen()
if record_buffered:
# Buffer GeoJSON data at the feature level for smaller
# memory footprint.
indented = bool(indent)
rec_indent = "\n" + " " * (2 * (indent or 0))
collection = {
'type': 'FeatureCollection',
'features': []}
if with_ld_context:
collection['@context'] = helpers.make_ld_context(
add_ld_context_item)
head, tail = json.dumps(collection, **dump_kwds).split('[]')
sink.write(head)
sink.write("[")
# Try the first record.
try:
i, first = 0, next(source)
if with_ld_context:
first = helpers.id_record(first)
if indented:
sink.write(rec_indent)
sink.write(first.replace("\n", rec_indent))
except StopIteration:
pass
except Exception as exc:
# Ignoring errors is *not* the default.
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
# Log error and close up the GeoJSON, leaving it
# more or less valid no matter what happens above.
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Because trailing commas aren't valid in JSON arrays
# we'll write the item separator before each of the
# remaining features.
for i, rec in enumerate(source, 1):
try:
if with_ld_context:
rec = helpers.id_record(rec)
if indented:
sink.write(rec_indent)
sink.write(item_sep)
sink.write(rec.replace("\n", rec_indent))
except Exception as exc:
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Close up the GeoJSON after writing all features.
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
else:
# Buffer GeoJSON data at the collection level. The default.
collection = {
'type': 'FeatureCollection',
'features': []}
if with_ld_context:
collection['@context'] = helpers.make_ld_context(
add_ld_context_item)
head, tail = json.dumps(collection, **dump_kwds).split('[]')
sink.write(head)
sink.write("[")
sink.write(",".join(source))
sink.write("]")
sink.write(tail)
sink.write("\n")
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "perrygeo/Fiona",
"path": "fiona/fio/collect.py",
"copies": "2",
"size": "7908",
"license": "bsd-3-clause",
"hash": 1833212056278169600,
"line_mean": 35.7813953488,
"line_max": 78,
"alpha_frac": 0.4782498735,
"autogenerated": false,
"ratio": 4.715563506261181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00013679890560875513,
"num_lines": 215
} |
"""$ fio distrib"""
import json
import logging
import click
import cligj
from fiona.fio import helpers
@click.command()
@cligj.use_rs_opt
@click.pass_context
def distrib(ctx, use_rs):
"""Distribute features from a collection.
Print the features of GeoJSON objects read from stdin.
"""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
try:
source = helpers.obj_gen(stdin)
for i, obj in enumerate(source):
obj_id = obj.get('id', 'collection:' + str(i))
features = obj.get('features') or [obj]
for j, feat in enumerate(features):
if obj.get('type') == 'FeatureCollection':
feat['parent'] = obj_id
feat_id = feat.get('id', 'feature:' + str(i))
feat['id'] = feat_id
if use_rs:
click.echo(u'\u001e', nl=False)
click.echo(json.dumps(feat))
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "perrygeo/Fiona",
"path": "fiona/fio/distrib.py",
"copies": "2",
"size": "1131",
"license": "bsd-3-clause",
"hash": -452608748002256260,
"line_mean": 26.5853658537,
"line_max": 62,
"alpha_frac": 0.5720601238,
"autogenerated": false,
"ratio": 3.77,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 41
} |
"""$ fio dump"""
from functools import partial
import json
import logging
import click
import cligj
import fiona
from fiona.fio import helpers
from fiona.transform import transform_geom
@click.command(short_help="Dump a dataset to GeoJSON.")
@click.argument('input', type=click.Path(), required=True)
@click.option('--encoding', help="Specify encoding of the input file.")
@cligj.precision_opt
@cligj.indent_opt
@cligj.compact_opt
@click.option('--record-buffered/--no-record-buffered', default=False,
help="Economical buffering of writes at record, not collection "
"(default), level.")
@click.option('--ignore-errors/--no-ignore-errors', default=False,
help="log errors but do not stop serialization.")
@click.option('--with-ld-context/--without-ld-context', default=False,
help="add a JSON-LD context to JSON output.")
@click.option('--add-ld-context-item', multiple=True,
help="map a term to a URI and add it to the output's JSON LD "
"context.")
@click.pass_context
def dump(ctx, input, encoding, precision, indent, compact, record_buffered,
ignore_errors, with_ld_context, add_ld_context_item):
"""Dump a dataset either as a GeoJSON feature collection (the default)
or a sequence of GeoJSON features."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
sink = click.get_text_stream('stdout')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
item_sep = compact and ',' or ', '
open_kwds = {}
if encoding:
open_kwds['encoding'] = encoding
def transformer(crs, feat):
tg = partial(transform_geom, crs, 'EPSG:4326',
antimeridian_cutting=True, precision=precision)
feat['geometry'] = tg(feat['geometry'])
return feat
try:
with fiona.drivers(CPL_DEBUG=verbosity > 2):
with fiona.open(input, **open_kwds) as source:
meta = source.meta
meta['fields'] = dict(source.schema['properties'].items())
if record_buffered:
# Buffer GeoJSON data at the feature level for smaller
# memory footprint.
indented = bool(indent)
rec_indent = "\n" + " " * (2 * (indent or 0))
collection = {
'type': 'FeatureCollection',
'fiona:schema': meta['schema'],
'fiona:crs': meta['crs'],
'features': []}
if with_ld_context:
collection['@context'] = helpers.make_ld_context(
add_ld_context_item)
head, tail = json.dumps(
collection, **dump_kwds).split('[]')
sink.write(head)
sink.write("[")
itr = iter(source)
# Try the first record.
try:
i, first = 0, next(itr)
first = transformer(first)
if with_ld_context:
first = helpers.id_record(first)
if indented:
sink.write(rec_indent)
sink.write(json.dumps(
first, **dump_kwds).replace("\n", rec_indent))
except StopIteration:
pass
except Exception as exc:
# Ignoring errors is *not* the default.
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
# Log error and close up the GeoJSON, leaving it
# more or less valid no matter what happens above.
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Because trailing commas aren't valid in JSON arrays
# we'll write the item separator before each of the
# remaining features.
for i, rec in enumerate(itr, 1):
rec = transformer(rec)
try:
if with_ld_context:
rec = helpers.id_record(rec)
if indented:
sink.write(rec_indent)
sink.write(item_sep)
sink.write(json.dumps(
rec, **dump_kwds).replace("\n", rec_indent))
except Exception as exc:
if ignore_errors:
logger.error(
"failed to serialize file record %d (%s), "
"continuing",
i, exc)
else:
logger.critical(
"failed to serialize file record %d (%s), "
"quiting",
i, exc)
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
raise
# Close up the GeoJSON after writing all features.
sink.write("]")
sink.write(tail)
if indented:
sink.write("\n")
else:
# Buffer GeoJSON data at the collection level. The default.
collection = {
'type': 'FeatureCollection',
'fiona:schema': meta['schema'],
'fiona:crs': meta['crs']}
if with_ld_context:
collection['@context'] = helpers.make_ld_context(
add_ld_context_item)
collection['features'] = [
helpers.id_record(transformer(rec))
for rec in source]
else:
collection['features'] = [
transformer(source.crs, rec) for rec in source]
json.dump(collection, sink, **dump_kwds)
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "perrygeo/Fiona",
"path": "fiona/fio/dump.py",
"copies": "1",
"size": "7255",
"license": "bsd-3-clause",
"hash": 8577487071796888000,
"line_mean": 40.2215909091,
"line_max": 79,
"alpha_frac": 0.4372157133,
"autogenerated": false,
"ratio": 5.23071377072819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6167929484028191,
"avg_score": null,
"num_lines": null
} |
"""$ fio filter"""
import json
import logging
import click
from cligj import use_rs_opt
from fiona.fio.helpers import obj_gen, eval_feature_expression
@click.command()
@click.argument('filter_expression')
@use_rs_opt
@click.pass_context
def filter(ctx, filter_expression, use_rs):
"""
Filter GeoJSON features by python expression.
Features are read from stdin.
The expression is evaluated in a restricted namespace containing:
- sum, pow, min, max and the imported math module
- shape (optional, imported from shapely.geometry if available)
- bool, int, str, len, float type conversions
- f (the feature to be evaluated,
allows item access via javascript-style dot notation using munch)
The expression will be evaluated for each feature and, if true,
the feature will be included in the output.
e.g. fio cat data.shp \
| fio filter "f.properties.area > 1000.0" \
| fio collect > large_polygons.geojson
"""
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
try:
source = obj_gen(stdin)
for i, obj in enumerate(source):
features = obj.get('features') or [obj]
for j, feat in enumerate(features):
if not eval_feature_expression(feat, filter_expression):
continue
if use_rs:
click.echo(u'\u001e', nl=False)
click.echo(json.dumps(feat))
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "perrygeo/Fiona",
"path": "fiona/fio/filter.py",
"copies": "1",
"size": "1617",
"license": "bsd-3-clause",
"hash": -8593765650833143000,
"line_mean": 28.4,
"line_max": 78,
"alpha_frac": 0.6345083488,
"autogenerated": false,
"ratio": 4.093670886075949,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5228179234875949,
"avg_score": null,
"num_lines": null
} |
"""$ fio info"""
import logging
import json
import click
from cligj import indent_opt
import fiona
import fiona.crs
from fiona.fio import options
@click.command()
# One or more files.
@click.argument('input', type=click.Path(exists=True))
@click.option('--layer', metavar="INDEX|NAME", callback=options.cb_layer,
help="Print information about a specific layer. The first "
"layer is used by default. Layers use zero-based "
"numbering when accessed by index.")
@indent_opt
# Options to pick out a single metadata item and print it as
# a string.
@click.option('--count', 'meta_member', flag_value='count',
help="Print the count of features.")
@click.option('-f', '--format', '--driver', 'meta_member', flag_value='driver',
help="Print the format driver.")
@click.option('--crs', 'meta_member', flag_value='crs',
help="Print the CRS as a PROJ.4 string.")
@click.option('--bounds', 'meta_member', flag_value='bounds',
help="Print the boundary coordinates "
"(left, bottom, right, top).")
@click.option('--name', 'meta_member', flag_value='name',
help="Print the datasource's name.")
@click.pass_context
def info(ctx, input, indent, meta_member, layer):
"""
Print information about a dataset.
When working with a multi-layer dataset the first layer is used by default.
Use the '--layer' option to select a different layer.
"""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
try:
with fiona.drivers(CPL_DEBUG=verbosity > 2):
with fiona.open(input, layer=layer) as src:
info = src.meta
info.update(bounds=src.bounds, name=src.name)
try:
info.update(count=len(src))
except TypeError as e:
info.update(count=None)
logger.debug("Setting 'count' to None/null - layer does "
"not support counting")
proj4 = fiona.crs.to_string(src.crs)
if proj4.startswith('+init=epsg'):
proj4 = proj4.split('=')[1].upper()
info['crs'] = proj4
if meta_member:
if isinstance(info[meta_member], (list, tuple)):
click.echo(" ".join(map(str, info[meta_member])))
else:
click.echo(info[meta_member])
else:
click.echo(json.dumps(info, indent=indent))
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "perrygeo/Fiona",
"path": "fiona/fio/info.py",
"copies": "1",
"size": "2740",
"license": "bsd-3-clause",
"hash": -8525050485044526000,
"line_mean": 36.5342465753,
"line_max": 79,
"alpha_frac": 0.5693430657,
"autogenerated": false,
"ratio": 4.0773809523809526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 73
} |
"""$ fio insp"""
import code
import logging
import sys
import click
import fiona
@click.command(short_help="Open a dataset and start an interpreter.")
@click.argument('src_path', type=click.Path(exists=True))
@click.option('--ipython', 'interpreter', flag_value='ipython',
help="Use IPython as interpreter.")
@click.pass_context
def insp(ctx, src_path, interpreter):
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
banner = 'Fiona %s Interactive Inspector (Python %s)\n' \
'Type "src.schema", "next(src)", or "help(src)" ' \
'for more information.' \
% (fiona.__version__, '.'.join(map(str, sys.version_info[:3])))
try:
with fiona.drivers(CPL_DEBUG=verbosity > 2):
with fiona.open(src_path) as src:
scope = locals()
if not interpreter:
code.interact(banner, local=scope)
elif interpreter == 'ipython':
import IPython
IPython.InteractiveShell.banner1 = banner
IPython.start_ipython(argv=[], user_ns=scope)
else:
raise click.ClickException(
'Interpreter {} is unsupported or missing '
'dependencies'.format(interpreter))
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "perrygeo/Fiona",
"path": "fiona/fio/insp.py",
"copies": "1",
"size": "1482",
"license": "bsd-3-clause",
"hash": -7100327972204072000,
"line_mean": 31.2173913043,
"line_max": 76,
"alpha_frac": 0.5715249663,
"autogenerated": false,
"ratio": 4.234285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
"""$ fio load"""
from functools import partial
import itertools
import json
import logging
import click
import fiona
from fiona.fio import options
from fiona.transform import transform_geom
FIELD_TYPES_MAP_REV = dict([(v, k) for k, v in fiona.FIELD_TYPES_MAP.items()])
@click.command(short_help="Load GeoJSON to a dataset in another format.")
@click.argument('output', type=click.Path(), required=True)
@click.option('-f', '--format', '--driver', required=True,
help="Output format driver name.")
@options.src_crs_opt
@click.option('--dst-crs', '--dst_crs',
help="Destination CRS. Defaults to --src-crs when not given.")
@click.option('--sequence / --no-sequence', default=False,
help="Specify whether the input stream is a LF-delimited "
"sequence of GeoJSON features (the default) or a single "
"GeoJSON feature collection.")
@click.option('--layer', metavar="INDEX|NAME", callback=options.cb_layer,
help="Load features into specified layer. Layers use "
"zero-based numbering when accessed by index.")
@click.pass_context
def load(ctx, output, driver, src_crs, dst_crs, sequence, layer):
"""Load features from JSON to a file in another format.
The input is a GeoJSON feature collection or optionally a sequence of
GeoJSON feature objects."""
verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
logger = logging.getLogger('fio')
stdin = click.get_text_stream('stdin')
dst_crs = dst_crs or src_crs
if src_crs and dst_crs and src_crs != dst_crs:
transformer = partial(transform_geom, src_crs, dst_crs,
antimeridian_cutting=True, precision=-1)
else:
transformer = lambda x: x
first_line = next(stdin)
# If input is RS-delimited JSON sequence.
if first_line.startswith(u'\x1e'):
def feature_gen():
buffer = first_line.strip(u'\x1e')
for line in stdin:
if line.startswith(u'\x1e'):
if buffer:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
buffer = line.strip(u'\x1e')
else:
buffer += line
else:
feat = json.loads(buffer)
feat['geometry'] = transformer(feat['geometry'])
yield feat
elif sequence:
def feature_gen():
yield json.loads(first_line)
for line in stdin:
feat = json.loads(line)
feat['geometry'] = transformer(feat['geometry'])
yield feat
else:
def feature_gen():
text = "".join(itertools.chain([first_line], stdin))
for feat in json.loads(text)['features']:
feat['geometry'] = transformer(feat['geometry'])
yield feat
try:
source = feature_gen()
# Use schema of first feature as a template.
# TODO: schema specified on command line?
first = next(source)
schema = {'geometry': first['geometry']['type']}
schema['properties'] = dict([
(k, FIELD_TYPES_MAP_REV.get(type(v)) or 'str')
for k, v in first['properties'].items()])
with fiona.drivers(CPL_DEBUG=verbosity > 2):
with fiona.open(
output, 'w',
driver=driver,
crs=dst_crs,
schema=schema,
layer=layer) as dst:
dst.write(first)
dst.writerecords(source)
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "perrygeo/Fiona",
"path": "fiona/fio/load.py",
"copies": "1",
"size": "3824",
"license": "bsd-3-clause",
"hash": -3181918525697732600,
"line_mean": 34.738317757,
"line_max": 78,
"alpha_frac": 0.5635460251,
"autogenerated": false,
"ratio": 4.239467849223947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5303013874323946,
"avg_score": null,
"num_lines": null
} |
"""fast, simple packet creation / parsing, with definitions for the
basic TCP/IP protocols.
"""
__author__ = 'Dug Song <dugsong@monkey.org>'
__copyright__ = 'Copyright (c) 2004 Dug Song'
__license__ = 'BSD'
__url__ = 'http://monkey.org/~dugsong/dpkt/'
__version__ = '1.2'
try:
from itertools import izip as _it_izip
except ImportError:
_it_izip = zip
from struct import calcsize as _st_calcsize, \
pack as _st_pack, unpack as _st_unpack, error as _st_error
from re import compile as _re_compile
intchr = _re_compile(r"(?P<int>[0-9]+)(?P<chr>.)")
class MetaPacket(type):
def __new__(cls, clsname, clsbases, clsdict):
if '__hdr__' in clsdict:
st = clsdict['__hdr__']
clsdict['__hdr_fields__'] = [ x[0] for x in st ]
clsdict['__hdr_fmt__'] = clsdict.get('__byte_order__', '>') + \
''.join([ x[1] for x in st ])
clsdict['__hdr_len__'] = _st_calcsize(clsdict['__hdr_fmt__'])
clsdict['__hdr_defaults__'] = \
dict(zip(clsdict['__hdr_fields__'], [ x[2] for x in st ]))
clsdict['__slots__'] = clsdict['__hdr_fields__']
return type.__new__(cls, clsname, clsbases, clsdict)
class Packet(object):
"""Packet class
__hdr__ should be defined as a list of (name, structfmt, default) tuples
__byte_order__ can be set to override the default ('>')
"""
__metaclass__ = MetaPacket
data = ''
def __init__(self, *args, **kwargs):
"""Packet constructor with ([buf], [field=val,...]) prototype.
Arguments:
buf -- packet buffer to unpack
Optional keyword arguments correspond to packet field names.
"""
if args:
self.unpack(args[0])
else:
for k in self.__hdr_fields__:
setattr(self, k, self.__hdr_defaults__[k])
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __len__(self):
return self.__hdr_len__ + len(self.data)
def __repr__(self):
l = [ '%s=%r' % (k, getattr(self, k))
for k in self.__hdr_defaults__
if getattr(self, k) != self.__hdr_defaults__[k] ]
if self.data:
l.append('data=%r' % self.data)
return '%s(%s)' % (self.__class__.__name__, ', '.join(l))
def __str__(self):
return self.pack_hdr() + str(self.data)
def pack_hdr(self):
"""Return packed header string."""
try:
return _st_pack(self.__hdr_fmt__,
*[ getattr(self, k) for k in self.__hdr_fields__ ])
except _st_error:
vals = []
for k in self.__hdr_fields__:
v = getattr(self, k)
if isinstance(v, tuple):
vals.extend(v)
else:
vals.append(v)
return _st_pack(self.__hdr_fmt__, *vals)
def unpack(self, buf):
"""Unpack packet header fields from buf, and set self.data."""
res = list(_st_unpack(self.__hdr_fmt__, buf[:self.__hdr_len__]))
for e, k in enumerate(self.__slots__):
sfmt = self.__hdr__[e][1]
mat = intchr.match(sfmt)
if mat and mat.group('chr') != 's':
cnt = int(mat.group('int'))
setattr(self, k, list(res[:cnt]))
del res[:cnt]
else:
if sfmt[-1] == 's':
i = res[0].find('\x00')
if i != -1:
res[0] = res[0][:i]
setattr(self, k, res[0])
del res[0]
assert len(res) == 0
self.data = buf[self.__hdr_len__:]
# XXX - ''.join([(len(`chr(x)`)==3) and chr(x) or '.' for x in range(256)])
__vis_filter = """................................ !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[.]^_`abcdefghijklmnopqrstuvwxyz{|}~................................................................................................................................."""
def hexdump(buf, length=16):
"""Return a hexdump output string of the given buffer."""
n = 0
res = []
while buf:
line, buf = buf[:length], buf[length:]
hexa = ' '.join(['%02x' % ord(x) for x in line])
line = line.translate(__vis_filter)
res.append(' %04d: %-*s %s' % (n, length * 3, hexa, line))
n += length
return '\n'.join(res)
def in_cksum_add(s, buf):
"""in_cksum_add(cksum, buf) -> cksum
Return accumulated Internet checksum.
"""
nleft = len(buf)
i = 0
while nleft > 1:
s += ord(buf[i]) * 256 + ord(buf[i+1])
i += 2
nleft -= 2
if nleft:
s += ord(buf[i]) * 256
return s
def in_cksum_done(s):
"""Fold and return Internet checksum."""
while (s >> 16):
s = (s >> 16) + (s & 0xffff)
return (~s & 0xffff)
def in_cksum(buf):
"""Return computed Internet checksum."""
return in_cksum_done(in_cksum_add(0, buf))
try:
import psyco
psyco.bind(in_cksum)
psyco.bind(Packet)
except ImportError:
pass
| {
"repo_name": "eriknstr/ThinkPad-FreeBSD-setup",
"path": "FreeBSD/tests/sys/opencrypto/dpkt.py",
"copies": "2",
"size": "5223",
"license": "isc",
"hash": -3959888973692892000,
"line_mean": 31.64375,
"line_max": 278,
"alpha_frac": 0.4826727934,
"autogenerated": false,
"ratio": 3.550645819170632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5033318612570632,
"avg_score": null,
"num_lines": null
} |
# $FreeBSD$
#!/usr/bin/env python
from __future__ import print_function
"""
This script parses each "meta" file and extracts the
information needed to deduce build and src dependencies.
It works much the same as the original shell script, but is
*much* more efficient.
The parsing work is handled by the class MetaFile.
We only pay attention to a subset of the information in the
"meta" files. Specifically:
'CWD' to initialize our notion.
'C' to track chdir(2) on a per process basis
'R' files read are what we really care about.
directories read, provide a clue to resolving
subsequent relative paths. That is if we cannot find
them relative to 'cwd', we check relative to the last
dir read.
'W' files opened for write or read-write,
for filemon V3 and earlier.
'E' files executed.
'L' files linked
'V' the filemon version, this record is used as a clue
that we have reached the interesting bit.
"""
"""
RCSid:
$Id: meta2deps.py,v 1.19 2016/04/02 20:45:40 sjg Exp $
Copyright (c) 2011-2013, Juniper Networks, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, re, sys
def getv(dict, key, d=None):
"""Lookup key in dict and return value or the supplied default."""
if key in dict:
return dict[key]
return d
def resolve(path, cwd, last_dir=None, debug=0, debug_out=sys.stderr):
"""
Return an absolute path, resolving via cwd or last_dir if needed.
"""
if path.endswith('/.'):
path = path[0:-2]
if len(path) > 0 and path[0] == '/':
return path
if path == '.':
return cwd
if path.startswith('./'):
return cwd + path[1:]
if last_dir == cwd:
last_dir = None
for d in [last_dir, cwd]:
if not d:
continue
p = '/'.join([d,path])
if debug > 2:
print("looking for:", p, end=' ', file=debug_out)
if not os.path.exists(p):
if debug > 2:
print("nope", file=debug_out)
p = None
continue
if debug > 2:
print("found:", p, file=debug_out)
return p
return None
def abspath(path, cwd, last_dir=None, debug=0, debug_out=sys.stderr):
"""
Return an absolute path, resolving via cwd or last_dir if needed.
this gets called a lot, so we try to avoid calling realpath
until we know we have something.
"""
rpath = resolve(path, cwd, last_dir, debug, debug_out)
if rpath:
path = rpath
if (path.find('/') < 0 or
path.find('./') > 0 or
path.endswith('/..') or
os.path.islink(path)):
return os.path.realpath(path)
return path
def sort_unique(list, cmp=None, key=None, reverse=False):
list.sort(cmp, key, reverse)
nl = []
le = None
for e in list:
if e == le:
continue
nl.append(e)
return nl
def add_trims(x):
return ['/' + x + '/',
'/' + x,
x + '/',
x]
class MetaFile:
"""class to parse meta files generated by bmake."""
conf = None
dirdep_re = None
host_target = None
srctops = []
objroots = []
excludes = []
seen = {}
obj_deps = []
src_deps = []
file_deps = []
def __init__(self, name, conf={}):
"""if name is set we will parse it now.
conf can have the follwing keys:
SRCTOPS list of tops of the src tree(s).
CURDIR the src directory 'bmake' was run from.
RELDIR the relative path from SRCTOP to CURDIR
MACHINE the machine we built for.
set to 'none' if we are not cross-building.
More specifically if machine cannot be deduced from objdirs.
TARGET_SPEC
Sometimes MACHINE isn't enough.
HOST_TARGET
when we build for the pseudo machine 'host'
the object tree uses HOST_TARGET rather than MACHINE.
OBJROOTS a list of the common prefix for all obj dirs it might
end in '/' or '-'.
DPDEPS names an optional file to which per file dependencies
will be appended.
For example if 'some/path/foo.h' is read from SRCTOP
then 'DPDEPS_some/path/foo.h +=' "RELDIR" is output.
This can allow 'bmake' to learn all the dirs within
the tree that depend on 'foo.h'
EXCLUDES
A list of paths to ignore.
ccache(1) can otherwise be trouble.
debug desired debug level
debug_out open file to send debug output to (sys.stderr)
"""
self.name = name
self.debug = getv(conf, 'debug', 0)
self.debug_out = getv(conf, 'debug_out', sys.stderr)
self.machine = getv(conf, 'MACHINE', '')
self.machine_arch = getv(conf, 'MACHINE_ARCH', '')
self.target_spec = getv(conf, 'TARGET_SPEC', '')
self.curdir = getv(conf, 'CURDIR')
self.reldir = getv(conf, 'RELDIR')
self.dpdeps = getv(conf, 'DPDEPS')
self.line = 0
if not self.conf:
# some of the steps below we want to do only once
self.conf = conf
self.host_target = getv(conf, 'HOST_TARGET')
for srctop in getv(conf, 'SRCTOPS', []):
if srctop[-1] != '/':
srctop += '/'
if not srctop in self.srctops:
self.srctops.append(srctop)
_srctop = os.path.realpath(srctop)
if _srctop[-1] != '/':
_srctop += '/'
if not _srctop in self.srctops:
self.srctops.append(_srctop)
trim_list = add_trims(self.machine)
if self.machine == 'host':
trim_list += add_trims(self.host_target)
if self.target_spec:
trim_list += add_trims(self.target_spec)
for objroot in getv(conf, 'OBJROOTS', []):
for e in trim_list:
if objroot.endswith(e):
# this is not what we want - fix it
objroot = objroot[0:-len(e)]
if e.endswith('/'):
objroot += '/'
if not objroot in self.objroots:
self.objroots.append(objroot)
_objroot = os.path.realpath(objroot)
if objroot[-1] == '/':
_objroot += '/'
if not _objroot in self.objroots:
self.objroots.append(_objroot)
# we want the longest match
self.srctops.sort(reverse=True)
self.objroots.sort(reverse=True)
self.excludes = getv(conf, 'EXCLUDES', [])
if self.debug:
print("host_target=", self.host_target, file=self.debug_out)
print("srctops=", self.srctops, file=self.debug_out)
print("objroots=", self.objroots, file=self.debug_out)
print("excludes=", self.excludes, file=self.debug_out)
self.dirdep_re = re.compile(r'([^/]+)/(.+)')
if self.dpdeps and not self.reldir:
if self.debug:
print("need reldir:", end=' ', file=self.debug_out)
if self.curdir:
srctop = self.find_top(self.curdir, self.srctops)
if srctop:
self.reldir = self.curdir.replace(srctop,'')
if self.debug:
print(self.reldir, file=self.debug_out)
if not self.reldir:
self.dpdeps = None # we cannot do it?
self.cwd = os.getcwd() # make sure this is initialized
self.last_dir = self.cwd
if name:
self.try_parse()
def reset(self):
"""reset state if we are being passed meta files from multiple directories."""
self.seen = {}
self.obj_deps = []
self.src_deps = []
self.file_deps = []
def dirdeps(self, sep='\n'):
"""return DIRDEPS"""
return sep.strip() + sep.join(self.obj_deps)
def src_dirdeps(self, sep='\n'):
"""return SRC_DIRDEPS"""
return sep.strip() + sep.join(self.src_deps)
def file_depends(self, out=None):
"""Append DPDEPS_${file} += ${RELDIR}
for each file we saw, to the output file."""
if not self.reldir:
return None
for f in sort_unique(self.file_deps):
print('DPDEPS_%s += %s' % (f, self.reldir), file=out)
def seenit(self, dir):
"""rememer that we have seen dir."""
self.seen[dir] = 1
def add(self, list, data, clue=''):
"""add data to list if it isn't already there."""
if data not in list:
list.append(data)
if self.debug:
print("%s: %sAdd: %s" % (self.name, clue, data), file=self.debug_out)
def find_top(self, path, list):
"""the logical tree may be split across multiple trees"""
for top in list:
if path.startswith(top):
if self.debug > 2:
print("found in", top, file=self.debug_out)
return top
return None
def find_obj(self, objroot, dir, path, input):
"""return path within objroot, taking care of .dirdep files"""
ddep = None
for ddepf in [path + '.dirdep', dir + '/.dirdep']:
if not ddep and os.path.exists(ddepf):
ddep = open(ddepf, 'r').readline().strip('# \n')
if self.debug > 1:
print("found %s: %s\n" % (ddepf, ddep), file=self.debug_out)
if ddep.endswith(self.machine):
ddep = ddep[0:-(1+len(self.machine))]
elif self.target_spec and ddep.endswith(self.target_spec):
ddep = ddep[0:-(1+len(self.target_spec))]
if not ddep:
# no .dirdeps, so remember that we've seen the raw input
self.seenit(input)
self.seenit(dir)
if self.machine == 'none':
if dir.startswith(objroot):
return dir.replace(objroot,'')
return None
m = self.dirdep_re.match(dir.replace(objroot,''))
if m:
ddep = m.group(2)
dmachine = m.group(1)
if dmachine != self.machine:
if not (self.machine == 'host' and
dmachine == self.host_target):
if self.debug > 2:
print("adding .%s to %s" % (dmachine, ddep), file=self.debug_out)
ddep += '.' + dmachine
return ddep
def try_parse(self, name=None, file=None):
"""give file and line number causing exception"""
try:
self.parse(name, file)
except:
# give a useful clue
print('{}:{}: '.format(self.name, self.line), end=' ', file=sys.stderr)
raise
def parse(self, name=None, file=None):
"""A meta file looks like:
# Meta data file "path"
CMD "command-line"
CWD "cwd"
TARGET "target"
-- command output --
-- filemon acquired metadata --
# buildmon version 3
V 3
C "pid" "cwd"
E "pid" "path"
F "pid" "child"
R "pid" "path"
W "pid" "path"
X "pid" "status"
D "pid" "path"
L "pid" "src" "target"
M "pid" "old" "new"
S "pid" "path"
# Bye bye
We go to some effort to avoid processing a dependency more than once.
Of the above record types only C,E,F,L,R,V and W are of interest.
"""
version = 0 # unknown
if name:
self.name = name;
if file:
f = file
cwd = self.last_dir = self.cwd
else:
f = open(self.name, 'r')
skip = True
pid_cwd = {}
pid_last_dir = {}
last_pid = 0
self.line = 0
if self.curdir:
self.seenit(self.curdir) # we ignore this
interesting = 'CEFLRV'
for line in f:
self.line += 1
# ignore anything we don't care about
if not line[0] in interesting:
continue
if self.debug > 2:
print("input:", line, end=' ', file=self.debug_out)
w = line.split()
if skip:
if w[0] == 'V':
skip = False
version = int(w[1])
"""
if version < 4:
# we cannot ignore 'W' records
# as they may be 'rw'
interesting += 'W'
"""
elif w[0] == 'CWD':
self.cwd = cwd = self.last_dir = w[1]
self.seenit(cwd) # ignore this
if self.debug:
print("%s: CWD=%s" % (self.name, cwd), file=self.debug_out)
continue
pid = int(w[1])
if pid != last_pid:
if last_pid:
pid_cwd[last_pid] = cwd
pid_last_dir[last_pid] = self.last_dir
cwd = getv(pid_cwd, pid, self.cwd)
self.last_dir = getv(pid_last_dir, pid, self.cwd)
last_pid = pid
# process operations
if w[0] == 'F':
npid = int(w[2])
pid_cwd[npid] = cwd
pid_last_dir[npid] = cwd
last_pid = npid
continue
elif w[0] == 'C':
cwd = abspath(w[2], cwd, None, self.debug, self.debug_out)
if cwd.endswith('/.'):
cwd = cwd[0:-2]
self.last_dir = cwd
if self.debug > 1:
print("cwd=", cwd, file=self.debug_out)
continue
if w[2] in self.seen:
if self.debug > 2:
print("seen:", w[2], file=self.debug_out)
continue
# file operations
if w[0] in 'ML':
# these are special, tread src as read and
# target as write
self.parse_path(w[1].strip("'"), cwd, 'R', w)
self.parse_path(w[2].strip("'"), cwd, 'W', w)
continue
elif w[0] in 'ERWS':
path = w[2]
self.parse_path(path, cwd, w[0], w)
if not file:
f.close()
def parse_path(self, path, cwd, op=None, w=[]):
"""look at a path for the op specified"""
if not op:
op = w[0]
# we are never interested in .dirdep files as dependencies
if path.endswith('.dirdep'):
return
for p in self.excludes:
if p and path.startswith(p):
if self.debug > 2:
print("exclude:", p, path, file=self.debug_out)
return
# we don't want to resolve the last component if it is
# a symlink
path = resolve(path, cwd, self.last_dir, self.debug, self.debug_out)
if not path:
return
dir,base = os.path.split(path)
if dir in self.seen:
if self.debug > 2:
print("seen:", dir, file=self.debug_out)
return
# we can have a path in an objdir which is a link
# to the src dir, we may need to add dependencies for each
rdir = dir
dir = abspath(dir, cwd, self.last_dir, self.debug, self.debug_out)
if rdir == dir or rdir.find('./') > 0:
rdir = None
# now put path back together
path = '/'.join([dir,base])
if self.debug > 1:
print("raw=%s rdir=%s dir=%s path=%s" % (w[2], rdir, dir, path), file=self.debug_out)
if op in 'RWS':
if path in [self.last_dir, cwd, self.cwd, self.curdir]:
if self.debug > 1:
print("skipping:", path, file=self.debug_out)
return
if os.path.isdir(path):
if op in 'RW':
self.last_dir = path;
if self.debug > 1:
print("ldir=", self.last_dir, file=self.debug_out)
return
if op in 'ERW':
# finally, we get down to it
if dir == self.cwd or dir == self.curdir:
return
srctop = self.find_top(path, self.srctops)
if srctop:
if self.dpdeps:
self.add(self.file_deps, path.replace(srctop,''), 'file')
self.add(self.src_deps, dir.replace(srctop,''), 'src')
self.seenit(w[2])
self.seenit(dir)
if rdir and not rdir.startswith(srctop):
dir = rdir # for below
rdir = None
else:
return
objroot = None
for dir in [dir,rdir]:
if not dir:
continue
objroot = self.find_top(dir, self.objroots)
if objroot:
break
if objroot:
ddep = self.find_obj(objroot, dir, path, w[2])
if ddep:
self.add(self.obj_deps, ddep, 'obj')
else:
# don't waste time looking again
self.seenit(w[2])
self.seenit(dir)
def main(argv, klass=MetaFile, xopts='', xoptf=None):
"""Simple driver for class MetaFile.
Usage:
script [options] [key=value ...] "meta" ...
Options and key=value pairs contribute to the
dictionary passed to MetaFile.
-S "SRCTOP"
add "SRCTOP" to the "SRCTOPS" list.
-C "CURDIR"
-O "OBJROOT"
add "OBJROOT" to the "OBJROOTS" list.
-m "MACHINE"
-a "MACHINE_ARCH"
-H "HOST_TARGET"
-D "DPDEPS"
-d bumps debug level
"""
import getopt
# import Psyco if we can
# it can speed things up quite a bit
have_psyco = 0
try:
import psyco
psyco.full()
have_psyco = 1
except:
pass
conf = {
'SRCTOPS': [],
'OBJROOTS': [],
'EXCLUDES': [],
}
try:
machine = os.environ['MACHINE']
if machine:
conf['MACHINE'] = machine
machine_arch = os.environ['MACHINE_ARCH']
if machine_arch:
conf['MACHINE_ARCH'] = machine_arch
srctop = os.environ['SB_SRC']
if srctop:
conf['SRCTOPS'].append(srctop)
objroot = os.environ['SB_OBJROOT']
if objroot:
conf['OBJROOTS'].append(objroot)
except:
pass
debug = 0
output = True
opts, args = getopt.getopt(argv[1:], 'a:dS:C:O:R:m:D:H:qT:X:' + xopts)
for o, a in opts:
if o == '-a':
conf['MACHINE_ARCH'] = a
elif o == '-d':
debug += 1
elif o == '-q':
output = False
elif o == '-H':
conf['HOST_TARGET'] = a
elif o == '-S':
if a not in conf['SRCTOPS']:
conf['SRCTOPS'].append(a)
elif o == '-C':
conf['CURDIR'] = a
elif o == '-O':
if a not in conf['OBJROOTS']:
conf['OBJROOTS'].append(a)
elif o == '-R':
conf['RELDIR'] = a
elif o == '-D':
conf['DPDEPS'] = a
elif o == '-m':
conf['MACHINE'] = a
elif o == '-T':
conf['TARGET_SPEC'] = a
elif o == '-X':
if a not in conf['EXCLUDES']:
conf['EXCLUDES'].append(a)
elif xoptf:
xoptf(o, a, conf)
conf['debug'] = debug
# get any var=val assignments
eaten = []
for a in args:
if a.find('=') > 0:
k,v = a.split('=')
if k in ['SRCTOP','OBJROOT','SRCTOPS','OBJROOTS']:
if k == 'SRCTOP':
k = 'SRCTOPS'
elif k == 'OBJROOT':
k = 'OBJROOTS'
if v not in conf[k]:
conf[k].append(v)
else:
conf[k] = v
eaten.append(a)
continue
break
for a in eaten:
args.remove(a)
debug_out = getv(conf, 'debug_out', sys.stderr)
if debug:
print("config:", file=debug_out)
print("psyco=", have_psyco, file=debug_out)
for k,v in list(conf.items()):
print("%s=%s" % (k,v), file=debug_out)
m = None
for a in args:
if a.endswith('.meta'):
if not os.path.exists(a):
continue
m = klass(a, conf)
elif a.startswith('@'):
# there can actually multiple files per line
for line in open(a[1:]):
for f in line.strip().split():
if not os.path.exists(f):
continue
m = klass(f, conf)
if output and m:
print(m.dirdeps())
print(m.src_dirdeps('\nsrc:'))
dpdeps = getv(conf, 'DPDEPS')
if dpdeps:
m.file_depends(open(dpdeps, 'wb'))
return m
if __name__ == '__main__':
try:
main(sys.argv)
except:
# yes, this goes to stdout
print("ERROR: ", sys.exc_info()[1])
raise
| {
"repo_name": "TigerBSD/TigerBSD",
"path": "FreeBSD/share/mk/meta2deps.py",
"copies": "2",
"size": "22678",
"license": "isc",
"hash": -1391275719489034000,
"line_mean": 30.7619047619,
"line_max": 97,
"alpha_frac": 0.5050709939,
"autogenerated": false,
"ratio": 3.7935764469722315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011912064913677163,
"num_lines": 714
} |
# $Header$
# $Name$
from __future__ import print_function
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
def contourf(*arguments, **kwargs):
"""Call signatures::
contourf(X, Y, C, N, **kwargs)
contourf(X, Y, C, V, **kwargs)
Create a contourf plot of a 2-D llc array (with tricontour).
*C* is the array of color values.
*N* is the number of levels
*V* is a list of levels
*X* and *Y*, specify the (*x*, *y*) coordinates of
the grid points
**kwargs are passed to tricontour.
"""
arglen = len(arguments)
h = []
if arglen >= 3:
data = np.copy(arguments[2].flatten())
x = arguments[0].flatten()
y = arguments[1].flatten()
# Create the Triangulation;
# no triangles so Delaunay triangulation created.
triang = tri.Triangulation(x, y)
ntri = triang.triangles.shape[0]
# Mask off unwanted triangles.
mask = np.where(data[triang.triangles].prod(axis=1)==0., 1, 0)
triang.set_mask(mask)
if arglen == 3:
h = plt.tricontourf(triang, data, **kwargs)
elif arglen == 4:
h = plt.tricontourf(triang, data, arguments[3], **kwargs)
else:
print("wrong number of arguments")
print("need at least 3 or 4 arguments")
sys.exit(__doc__)
# show the triangles for debugging
#plt.triplot(triang, color='0.7')
else:
print("wrong number of arguments")
print("need at least x,y,fld")
sys.exit(__doc__)
return h
def contour(*arguments, **kwargs):
"""Call signatures::
contour(X, Y, C, N, **kwargs)
contour(X, Y, C, V, **kwargs)
Create a contour plot of a 2-D llc array (with tricontour).
*C* is the array of color values.
*N* is the number of levels
*V* is a list of levels
*X* and *Y*, specify the (*x*, *y*) coordinates of
the grid points
**kwargs are passed to tricontour.
"""
arglen = len(arguments)
h = []
if arglen >= 3:
data = arguments[2].flatten()
x = arguments[0].flatten()
y = arguments[1].flatten()
# Create the Triangulation;
# no triangles so Delaunay triangulation created.
triang = tri.Triangulation(x, y)
ntri = triang.triangles.shape[0]
# Mask off unwanted triangles.
mask = np.where(data[triang.triangles].prod(axis=1)==0., 1, 0)
triang.set_mask(mask)
if arglen == 3:
h = plt.tricontour(triang, data, **kwargs)
elif arglen == 4:
h = plt.tricontour(triang, data, arguments[3], **kwargs)
else:
print("wrong number of arguments")
print("need at least 3 or 4 arguments")
sys.exit(__doc__)
# show the triangles for debugging
#plt.triplot(triang, color='0.7')
else:
print("wrong number of arguments")
print("need at least x,y,fld")
sys.exit(__doc__)
return h
def flat(fld, **kwargs):
"""convert mds data into global 2D field
only fields with 2 to 5 dimensions are allowed"""
ndims = len(fld.shape)
if ndims == 2:
gfld = _flat2D(fld, **kwargs)
elif ndims == 3:
gfld = [ _flat2D(fld[a,:,:], **kwargs)
for a in range(fld.shape[0]) ]
elif ndims == 4:
gfld = [ [ _flat2D(fld[a,b,:,:], **kwargs)
for b in range(fld.shape[1]) ]
for a in range(fld.shape[0]) ]
elif ndims == 5:
gfld = [ [ [ _flat2D(fld[a,b,c,:,:], **kwargs)
for c in range(fld.shape[2]) ]
for b in range(fld.shape[1]) ]
for a in range(fld.shape[0]) ]
else:
print("wrong number of dimensions")
print("only 2 to 5 dimensions are allowed")
sys.exit(__doc__)
gfld = np.array(gfld)
return gfld
def _flat2D(fld, center='Atlantic'):
"""convert mds 2D data into global 2D field"""
nx = fld.shape[1]
ny = fld.shape[0]
n = ny//nx//4
# eastern and western hemispheres
eastern=np.concatenate((fld[:n*nx,:],fld[n*nx:2*(n*nx)]),axis=1)
tmp = fld[2*(n*nx)+nx:, ::-1]
western=np.concatenate((tmp[2::n,:].transpose(),
tmp[1::n,:].transpose(),
tmp[0::n,:].transpose()))
# Arctic face is special
arctic = fld[2*(n*nx):2*(n*nx)+nx,:]
arctice = np.concatenate((np.triu(arctic[::-1,:nx//2].transpose()),
np.zeros((nx//2,nx))),axis=1)
# arcticw = np.concatenate((arctic[:,nx:nx//2-1:-1].transpose(),
# np.zeros((nx//2,nx//2)),
# arctic[nx:nx//2-1:-1,nx//2-1::-1]),axis=1)
mskr = np.tri(nx//2)[::-1,:]
arcticw = np.concatenate((arctic[0:nx//2,nx:nx//2-1:-1].transpose(),
arctic[nx//2:nx,nx:nx//2-1:-1].transpose()*mskr,
np.triu(arctic[nx:nx//2-1:-1,nx:nx//2-1:-1]),
arctic[nx:nx//2-1:-1,nx//2-1::-1]*mskr),axis=1)
#
if center == 'Pacific':
gfld = np.concatenate( ( np.concatenate((eastern,arctice)),
np.concatenate((western,arcticw)) ), axis=1)
else:
gfld = np.concatenate( ( np.concatenate((western,arcticw)),
np.concatenate((eastern,arctice)) ), axis=1)
return gfld
def _mds2D(fld,center='Atlantic'):
"""convert global 2D 'flat field' to mds 2D data"""
ni = fld.shape[-1]
nj = fld.shape[-2]
nx = ni//4
ny = nx*(3*4+1)
n = ny//nx//4
# arctic face
arcticw = fld[n*nx:,:nx]
arctice = fld[n*nx:,2*nx:3*nx]
arctic = np.concatenate((arctice,arcticw[::-1,::-1]),axis=0)
# eastern and western hemispheres
eastern=fld[:n*nx,2*nx:]
# this is tricky
western=fld[:n*nx,:2*nx]
mdsfld = np.concatenate((eastern[:,:nx],
eastern[:,nx:],
arctic[:,::-1].transpose(),
western[::-1,:].transpose().reshape((2*n*nx,nx))),
axis=0)
return mdsfld
def mds(fld,center='Atlantic'):
"""convert global 'flat field in mds data;
only fields with 2 to 5 dimensions are allowed"""
ndims = len(fld.shape)
if ndims == 2:
mdsfld = _mds2D(fld, **kwargs)
elif ndims == 3:
mdsfld = [ _mds2D(fld[a,:,:], **kwargs)
for a in range(fld.shape[0]) ]
elif ndims == 4:
mdsfld = [ [ _mds2D(fld[a,b,:,:], **kwargs)
for b in range(fld.shape[1]) ]
for a in range(fld.shape[0]) ]
elif ndims == 5:
mdsfld = [ [ [ _mds2D(fld[a,b,c,:,:], **kwargs)
for c in range(fld.shape[2]) ]
for b in range(fld.shape[1]) ]
for a in range(fld.shape[0]) ]
else:
print("wrong number of dimensions")
print("only 2 to 5 dimensions are allowed")
sys.exit(__doc__)
mdsfld = np.array(mdsfld)
return mdsfld
def faces(fld):
"""convert mds multidimensional data into a list with 6 faces"""
ndim = len(fld.shape)
if ndim == 2:
f = _faces2D(fld)
else:
# use list for dynamical memory allocation, because it is fast
if ndim == 3:
ff = []
nk = fld.shape[0]
for k in range(nk):
fld2D = fld[k,:,:]
f2D = _faces2D(fld2D)
ff.append(f2D)
elif ndim == 4:
ff = []
nk = fld.shape[1]
nl = fld.shape[0]
for l in range(nl):
for k in range(nk):
fld2D = fld[l,k,:,:]
f2D = _faces2D(fld2D)
ff.append(f2D)
elif ndim == 5:
ff = []
nk = fld.shape[2]
nl = fld.shape[1]
nm = fld.shape[0]
for m in range(nm):
for l in range(nl):
for k in range(nk):
fld2D = fld[m,l,k,:,:]
f2D = _faces2D(fld2D)
ff.append(f2D)
# permute list indices so that face index is the first
ff = np.transpose(ff)
f = []
for listIndex in range(len(ff)):
# for each face turn list into array, glue together and reshape
nx = ff[listIndex][0].shape[-1]
ny = ff[listIndex][0].shape[-2]
if ndim == 3: rshp = (nk,ny,nx)
elif ndim == 4: rshp = (nl,nk,ny,nx)
elif ndim == 5: rshp = (nm,nl,nk,ny,nx)
f.append(np.concatenate(np.array(ff[listIndex])).reshape(rshp))
return f
def faces2mds(ff):
"""convert 6 faces to mds 2D data,
inverse opertation of llc.faces"""
ndims = len(ff[0].shape)
shp = list(ff[0].shape)
shp[-2]=2*ff[0].shape[-2]
wd = np.concatenate( (ff[3],ff[4]),axis=-2 ).reshape(shp)
f = np.concatenate( (ff[0],ff[1],ff[2],wd),axis=-2)
return f
def _faces2D(fld):
"""convert mds 2D data into a list with 6 faces"""
nx = fld.shape[-1]
ny = fld.shape[-2]
n = ny//nx//4
# divide into faces
f = []
f.append(fld[:n*nx,:])
f.append(fld[n*nx:2*(n*nx),:])
# arctic face
f.append(fld[2*(n*nx):2*(n*nx)+nx,:])
# western hemisphere
wd = fld[2*(n*nx)+nx:,:].reshape(2*nx,n*nx)
f.append(wd[:nx,:])
f.append(wd[nx:,:])
# pseudo-sixth face
f.append(np.zeros((nx,nx)))
return f
def _sqCoord(a):
b = np.copy(np.squeeze(a))
# it appears to be important, that here we do not mask the array
# but reset zeros to NaN (only used for coordinate arrays!!!)
# b = np.ma.masked_where(b==0., b)
# b[b==0.] = np.NaN
return b
def _sqData(a):
b = np.copy(np.squeeze(a))
# it appears to be important, that here we do not mask the array
# but reset zeros to NaN (only used for coordinate arrays!!!)
b = np.ma.masked_where(b==0., b)
b = np.ma.masked_where(np.isnan(b), b)
return b
def pcol(*arguments, **kwargs):
"""Call signatures::
pcol(X, Y, C, **kwargs)
pcol(X, Y, C, m, **kwargs)
Create a pseudo-color plot of a 2-D llc array (with plt.pcolormesh).
*m* if given is the map projection to use
NOTE: currently not all projections work
*C* is the array of color values.
*X* and *Y*, specify the (*x*, *y*) coordinates of
the grid point corners (G-points)
**kwargs are passed to plt.pcolormesh.
"""
arglen = len(arguments)
h = []
mapit = False
if arglen < 3:
print("wrong number of arguments")
print("need at least x,y,fld")
sys.exit(__doc__)
elif arglen > 3:
mapit = True
m = arguments[3]
if mapit:
# not all projections work, catch few of these here
if ( (m.projection == 'hammer') |
(m.projection == 'robin') |
(m.projection == 'moll') |
(m.projection == 'cea') ):
sys.exit("selected projection '"+m.projection
+"' is not supported")
# these projections use simple code for the Arctic face;
# all others require more complicted methods
stereographicProjection = (m.projection == 'npaeqd') | \
(m.projection == 'spaeqd') | \
(m.projection == 'nplaea') | \
(m.projection == 'splaea') | \
(m.projection == 'npstere') | \
(m.projection == 'spstere') | \
(m.projection == 'stere')
else:
stereographicProjection = False
xg = arguments[0]
yg = arguments[1]
data = arguments[2]
nx = data.shape[-1]
ny = data.shape[-2]
n = ny//nx//4
# color range
cax = [data.min(),data.max()]
# overwrite if necessary
if 'vmin' in kwargs: cax[0] = kwargs.pop('vmin','')
if 'vmax' in kwargs: cax[1] = kwargs.pop('vmax','')
# divide into faces
f0 = []
f0.append(faces(xg))
f0.append(faces(yg))
f0.append(faces(data))
# fill holes in coordinate arrays
# for t in [0,1,3,4]:
# inan = f0[2][t]==0 # _sqCoord(f0[2][t])==np.NaN]
# f0[0][t][inan]=np.NaN
# f0[1][t][inan]=np.NaN
# for t in [0,1]:
# for i in range(nx):
# for j in range(n*nx):
# if f0[0][t][j,i]==0:f0[0][t][100,i]
# if f0[1][t][j,i]==0:f0[1][t][100,i]
#
# for t in [3,4]:
# for i in range(n*nx):
# for j in range(nx):
# if f0[0][t][j,i]==0:f0[0][t][j,239]
# if f0[1][t][j,i]==0:f0[1][t][j,239]
# find the missing corners by interpolation
fo = []
fo.append( (f0[0][0][-1,0]+f0[0][2][-1,0]+f0[0][4][-1,0])/3. )
fo.append( (f0[1][2][-1,0]+f0[1][2][-1,0]+f0[1][4][-1,0])/3. )
fo.append( np.NaN )
fe = []
fe.append( (f0[0][1][0,-1]+f0[0][3][0,-1])/2. )
fe.append( (f0[1][1][0,-1]+f0[1][3][0,-1])/2. )
fe.append( np.NaN )
f = np.copy(f0)
# fill some gaps at the face boundaries
for t in [0,2,4]:
tp = 2*(t//2)
tpp = tp
if tp==4: tpp = tp-6
for k in [0,1,2]:
tp = min(tp,3)
f[k][t] = np.concatenate((f0[k][t],f0[k][1+tp][:,:1]),axis=1)
if k==2: tmp = np.atleast_2d(np.append(f0[k][2+tpp][::-1,:1],fo[k]))
else: tmp = np.atleast_2d(np.append(fo[k],f0[k][2+tpp][::-1,:1]))
f[k][t] = np.concatenate((f[k][t],tmp),axis=0)
for t in [1,3]:
tp = 2*(t//2)
for k in [0,1,2]:
f[k][t] = np.concatenate((f0[k][t],f0[k][2+tp][:1,:]),axis=0)
if k==2: tmp = np.atleast_2d(np.append(f0[k][3+tp][:1,::-1],fe[k]))
else: tmp = np.atleast_2d(np.append(fe[k],f0[k][3+tp][:1,::-1]))
f[k][t] = np.concatenate((f[k][t],tmp.transpose()),axis=1)
# we do not really have a sixth face so we overwrite the southernmost row
# of face 4 and 5 by a hack:
for t in [3,4]:
f[0][t][:,-1] = f[0][t][:,-2]
f[1][t][:,-1] = -90. # degree = south pole
# make sure that only longitudes of one sign are on individual lateral faces
i0 = f[0][3]<0.
f[0][3][i0] = f[0][3][i0]+360.
# plot the lateral faces
ph = []
for t in [0,1,3,4]:
if mapit: x, y = m(_sqCoord(f[0][t]), _sqCoord(f[1][t]))
else: x, y = _sqCoord(f[0][t]), _sqCoord(f[1][t])
ph.append(plt.pcolormesh(x,y,_sqData(f[2][t]), **kwargs))
# plot more lateral faces to be able to select the longitude range later
for t in [1,3,4]:
f[0][t] = f[0][t]+ (-1)**t*360.
if mapit: x, y = m(_sqCoord(f[0][t]), _sqCoord(f[1][t]))
else: x, y = _sqCoord(f[0][t]), _sqCoord(f[1][t])
ph.append(plt.pcolormesh(x,y,_sqData(f[2][t]), **kwargs))
# Arctic face is special, because of the rotation of the grid by
# rangle = 7deg (seems to be the default)
t = 2
if mapit & stereographicProjection:
x, y = m(_sqCoord(f[0][t]),_sqCoord(f[1][t]))
ph.append(plt.pcolormesh(x,y,_sqData(f[2][t]), **kwargs))
else:
rangle = 7.
# first half of Arctic tile
nn = nx//2+1
xx = np.copy(f[0][t][:nn,:])
yy = np.copy(f[1][t][:nn,:])
zz = np.copy(f[2][t][:nn,:])
xx = np.where(xx<rangle,xx+360,xx)
if mapit: x, y = m(_sqCoord(xx),_sqCoord(yy))
else: x, y = _sqCoord(xx),_sqCoord(yy)
ph.append(plt.pcolormesh(x,y,_sqData(zz), **kwargs))
# repeat for xx-360
xx = xx-360.
if mapit: x, y = m(_sqCoord(xx),_sqCoord(yy))
else: x, y = _sqCoord(xx),_sqCoord(yy)
ph.append(plt.pcolormesh(x,y,_sqData(zz), **kwargs))
# second half of Arctic tile
nn = nx//2-1
xx = np.copy(f[0][t][nn:,:])
yy = np.copy(f[1][t][nn:,:])
zz = np.copy(f[2][t][nn:,:])
# need to mask some zz-values so that there is no erroneous wrap-around
zz = np.ma.masked_where(xx>rangle,zz)
xx = np.where(xx>rangle,np.nan,xx)
#
if mapit: x, y = m(_sqCoord(xx),_sqCoord(yy))
else: x, y = _sqCoord(xx),_sqCoord(yy)
ph.append(plt.pcolormesh(x,y,_sqData(zz), **kwargs))
# repeat for xx+360
xx = xx + 360.
if mapit: x, y = m(_sqCoord(xx),_sqCoord(yy))
else: x, y = _sqCoord(xx),_sqCoord(yy)
ph.append(plt.pcolormesh(x,y,_sqData(zz), **kwargs))
if not mapit:
plt.xlim([-170,190])
plt.ylim([-90,90])
for im in ph:
im.set_clim(cax[0],cax[1])
return ph
| {
"repo_name": "altMITgcm/MITgcm66h",
"path": "utils/python/MITgcmutils/MITgcmutils/llc.py",
"copies": "1",
"size": "17054",
"license": "mit",
"hash": -209563794158961020,
"line_mean": 31.2381852552,
"line_max": 80,
"alpha_frac": 0.4966576756,
"autogenerated": false,
"ratio": 3.0491686036116574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4045826279211657,
"avg_score": null,
"num_lines": null
} |
# $Header$
# $Name$
#
# created by mlosch on 2002-08-09
# converted to python by jahn on 2010-04-29
import sys
import numpy as np
__doc__ = """
Density of Sea Water using Jackett and McDougall 1995 (JAOT 12) polynomial
Functions:
dens :: computes in-situ density from salinity, potential temperature
and pressure
"""
# coefficients nonlinear equation of state in pressure coordinates for
# 1. density of fresh water at p = 0
eosJMDCFw = [ 999.842594,
6.793952e-02,
- 9.095290e-03,
1.001685e-04,
- 1.120083e-06,
6.536332e-09,
]
# 2. density of sea water at p = 0
eosJMDCSw = [ 8.244930e-01,
- 4.089900e-03,
7.643800e-05,
- 8.246700e-07,
5.387500e-09,
- 5.724660e-03,
1.022700e-04,
- 1.654600e-06,
4.831400e-04,
]
# coefficients in pressure coordinates for
# 3. secant bulk modulus K of fresh water at p = 0
eosJMDCKFw = [ 1.965933e+04,
1.444304e+02,
- 1.706103e+00,
9.648704e-03,
- 4.190253e-05,
]
# 4. secant bulk modulus K of sea water at p = 0
eosJMDCKSw = [ 5.284855e+01,
- 3.101089e-01,
6.283263e-03,
- 5.084188e-05,
3.886640e-01,
9.085835e-03,
- 4.619924e-04,
]
# 5. secant bulk modulus K of sea water at p
eosJMDCKP = [ 3.186519e+00,
2.212276e-02,
- 2.984642e-04,
1.956415e-06,
6.704388e-03,
- 1.847318e-04,
2.059331e-07,
1.480266e-04,
2.102898e-04,
- 1.202016e-05,
1.394680e-07,
- 2.040237e-06,
6.128773e-08,
6.207323e-10,
]
def densjmd95(s,theta,p):
"""
densjmd95 Density of sea water
=========================================================================
USAGE: dens = densjmd95(s,theta,p)
DESCRIPTION:
Density of Sea Water using Jackett and McDougall 1995 (JAOT 12)
polynomial (modified UNESCO polynomial).
INPUT: (all must have same dimensions)
S = salinity [psu (PSS-78)]
Theta = potential temperature [degree C (IPTS-68)]
P = pressure [dbar]
(P may have dims 1x1, mx1, 1xn or mxn for S(mxn) )
OUTPUT:
dens = density [kg/m^3]
AUTHOR: Martin Losch 2002-08-09 (mlosch@mit.edu)
check value
S = 35.5 PSU
Theta = 3 degC
P = 3000 dbar
rho = 1041.83267 kg/m^3
Jackett and McDougall, 1995, JAOT 12(4), pp. 381-388
"""
# make sure arguments are floating point
s = np.asfarray(s)
t = np.asfarray(theta)
p = np.asfarray(p)
# convert pressure to bar
p = .1*p
t2 = t*t
t3 = t2*t
t4 = t3*t
if np.any(s<0):
sys.stderr.write('negative salinity values! setting to nan\n')
# the sqrt will take care of this
# if s.ndim > 0:
# s[s<0] = np.nan
# else:
# s = np.nan
s3o2 = s*np.sqrt(s)
# density of freshwater at the surface
rho = ( eosJMDCFw[0]
+ eosJMDCFw[1]*t
+ eosJMDCFw[2]*t2
+ eosJMDCFw[3]*t3
+ eosJMDCFw[4]*t4
+ eosJMDCFw[5]*t4*t
)
# density of sea water at the surface
rho = ( rho
+ s*(
eosJMDCSw[0]
+ eosJMDCSw[1]*t
+ eosJMDCSw[2]*t2
+ eosJMDCSw[3]*t3
+ eosJMDCSw[4]*t4
)
+ s3o2*(
eosJMDCSw[5]
+ eosJMDCSw[6]*t
+ eosJMDCSw[7]*t2
)
+ eosJMDCSw[8]*s*s
)
rho = rho / (1. - p/bulkmodjmd95(s,t,p))
return rho
def bulkmodjmd95(s,theta,p):
""" bulkmod = bulkmodjmd95(s,theta,p)
"""
# make sure arguments are floating point
s = np.asfarray(s)
t = np.asfarray(theta)
p = np.asfarray(p)
t2 = t*t
t3 = t2*t
t4 = t3*t
# if np.any(s<0):
# sys.stderr.write('negative salinity values! setting to nan\n')
# the sqrt will take care of this
# if s.ndim > 0:
# s[s<0] = np.nan
# else:
# s = np.nan
s3o2 = s*np.sqrt(s)
#p = pressure(i,j,k,bi,bj)*SItoBar
p2 = p*p
# secant bulk modulus of fresh water at the surface
bulkmod = ( eosJMDCKFw[0]
+ eosJMDCKFw[1]*t
+ eosJMDCKFw[2]*t2
+ eosJMDCKFw[3]*t3
+ eosJMDCKFw[4]*t4
)
# secant bulk modulus of sea water at the surface
bulkmod = ( bulkmod
+ s*( eosJMDCKSw[0]
+ eosJMDCKSw[1]*t
+ eosJMDCKSw[2]*t2
+ eosJMDCKSw[3]*t3
)
+ s3o2*( eosJMDCKSw[4]
+ eosJMDCKSw[5]*t
+ eosJMDCKSw[6]*t2
)
)
# secant bulk modulus of sea water at pressure p
bulkmod = ( bulkmod
+ p*( eosJMDCKP[0]
+ eosJMDCKP[1]*t
+ eosJMDCKP[2]*t2
+ eosJMDCKP[3]*t3
)
+ p*s*( eosJMDCKP[4]
+ eosJMDCKP[5]*t
+ eosJMDCKP[6]*t2
)
+ p*s3o2*eosJMDCKP[7]
+ p2*( eosJMDCKP[8]
+ eosJMDCKP[9]*t
+ eosJMDCKP[10]*t2
)
+ p2*s*( eosJMDCKP[11]
+ eosJMDCKP[12]*t
+ eosJMDCKP[13]*t2
)
)
return bulkmod
# aliases
dens = densjmd95
| {
"repo_name": "altMITgcm/MITgcm66h",
"path": "utils/python/MITgcmutils/MITgcmutils/jmd95.py",
"copies": "1",
"size": "6013",
"license": "mit",
"hash": -4769491135905674000,
"line_mean": 25.6061946903,
"line_max": 76,
"alpha_frac": 0.4452020622,
"autogenerated": false,
"ratio": 2.9856007944389273,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8830600820446672,
"avg_score": 0.020040407238451182,
"num_lines": 226
} |
#$ header class MPI_(public)
#$ header method __init__(MPI_)
from pyccel.stdlib.internal.mpi import mpi_comm_world
from pyccel.stdlib.internal.mpi import mpi_comm_rank
from pyccel.stdlib.internal.mpi import mpi_comm_size
from pyccel.stdlib.internal.mpi import mpi_send
from pyccel.stdlib.internal.mpi import mpi_ssend
from pyccel.stdlib.internal.mpi import mpi_bsend
from pyccel.stdlib.internal.mpi import mpi_isend
from pyccel.stdlib.internal.mpi import mpi_issend
from pyccel.stdlib.internal.mpi import mpi_ibsend
from pyccel.stdlib.internal.mpi import mpi_recv
from pyccel.stdlib.internal.mpi import mpi_irecv
from pyccel.stdlib.internal.mpi import mpi_sendrecv
from pyccel.stdlib.internal.mpi import mpi_sendrecv_replace
from pyccel.stdlib.internal.mpi import mpi_bcast
from pyccel.stdlib.internal.mpi import mpi_scatter
from pyccel.stdlib.internal.mpi import mpi_barrier
from pyccel.stdlib.internal.mpi import mpi_gather
from pyccel.stdlib.internal.mpi import mpi_allgather
from pyccel.stdlib.internal.mpi import mpi_gatherv
from pyccel.stdlib.internal.mpi import mpi_alltoall
from pyccel.stdlib.internal.mpi import mpi_reduce
from pyccel.stdlib.internal.mpi import mpi_allreduce
from pyccel.stdlib.internal.mpi import mpi_wait
from pyccel.stdlib.internal.mpi import mpi_waitall
from pyccel.stdlib.internal.mpi import mpi_waitany
from pyccel.stdlib.internal.mpi import mpi_waitsome
from pyccel.stdlib.internal.mpi import mpi_test
from pyccel.stdlib.internal.mpi import mpi_testall
from pyccel.stdlib.internal.mpi import mpi_testany
from pyccel.stdlib.internal.mpi import mpi_testsome
from pyccel.stdlib.internal.mpi import mpi_cart_create
from pyccel.stdlib.internal.mpi import mpi_cart_coords
from pyccel.stdlib.internal.mpi import mpi_cart_shift
from pyccel.stdlib.internal.mpi import mpi_cart_sub
from pyccel.stdlib.internal.mpi import mpi_comm_split
from pyccel.stdlib.internal.mpi import mpi_comm_free
from pyccel.stdlib.internal.mpi import mpi_type_vector
from pyccel.stdlib.internal.mpi import mpi_type_commit
from pyccel.stdlib.internal.mpi import mpi_type_contiguous
from pyccel.stdlib.internal.mpi import mpi_type_free
from pyccel.stdlib.internal.mpi import mpi_type_indexed
from pyccel.stdlib.internal.mpi import mpi_type_create_subarray
from pyccel.stdlib.internal.mpi import ANY_TAG
from pyccel.stdlib.internal.mpi import ANY_SOURCE
from pyccel.stdlib.internal.mpi import MPI_SUM
from pyccel.stdlib.internal.mpi import MPI_PROD
from pyccel.stdlib.internal.mpi import MPI_MAX
from pyccel.stdlib.internal.mpi import MPI_MIN
from pyccel.stdlib.internal.mpi import MPI_MAXLOC
from pyccel.stdlib.internal.mpi import MPI_MINLOC
from pyccel.stdlib.internal.mpi import MPI_LAND
from pyccel.stdlib.internal.mpi import MPI_LOR
from pyccel.stdlib.internal.mpi import MPI_LXOR
from pyccel.stdlib.internal.mpi import MPI_INTEGER
from pyccel.stdlib.internal.mpi import MPI_DOUBLE
class MPI_:
def __init__(self):
self.COMM_WORLD = 0
self.INT = MPI_INTEGER
self.DOUBLE = MPI_DOUBLE
self.SUM = MPI_SUM
self.PROD = MPI_PROD
self.MAX = MPI_MAX
self.MIN = MPI_MIN
self.MAXLOC = MPI_MAXLOC
self.MINLOC = MPI_MINLOC
self.LAND = MPI_LAND
self.LOR = MPI_LOR
self.LXOR = MPI_LXOR
MPI = MPI_()
#$ header macro x.COMM_WORLD := mpi_comm_world
#$ header macro x.SUM := MPI_SUM
#$ header macro x.PROD := MPI_PROD
#$ header macro x.MAX := MPI_MAX
#$ header macro x.MIN := MPI_MIN
#$ header macro x.MAXLOC := MPI_MAXLOC
#$ header macro x.MINLOC := MPI_MINLOC
#$ header macro x.LAND := MPI_LAND
#$ header macro x.LOR := MPI_LOR
#$ header macro x.LXOR := MPI_LXOR
#$ header macro x.INT := MPI_INTEGER
#$ header macro x.DOUBLE := MPI_DOUBLE
#$ header macro (x), y.Get_rank() := mpi_comm_rank(y,x,ierr)
#$ header macro (x), y.Get_size() := mpi_comm_size(y,x,ierr)
#......................
#lower-case letter functions
#......................
#$ header macro y.send(data, dest, tag=0) := mpi_send(data, data.count, data.dtype, dest ,tag, y, ierr)
#$ header macro y.ssend(data, dest, tag=0) := mpi_ssend(data, data.count, data.dtype, dest ,tag, y, ierr)
#$ header macro y.bsend(data, dest, tag=0) := mpi_bsend(data, data.count, data.dtype, dest ,tag, y, ierr)
#$ header macro (req),y.isend(data, dest, tag=0) := mpi_isend(data, data.count, data.dtype, dest ,tag, y, req, ierr)
#$ header macro (req),y.issend(data, dest, tag=0) := mpi_issend(data, data.count, data.dtype, dest ,tag, y, req, ierr)
#$ header macro (req),y.ibsend(data, dest, tag=0) := mpi_ibsend(data, data.count, data.dtype, dest ,tag, y, req, ierr)
#$ header macro (x), y.recv(source=0, tag=0) := mpi_recv(x, x.count, x.dtype, source ,tag, y, status, ierr)
#$ header macro (x), y.sendrecv(sendobj, dest, sendtag=0, source=ANY_SOURCE, recvtag=ANY_TAG) := mpi_sendrecv(sendobj, sendobj.count, sendobj.dtype, dest, sendtag, x, x.count, x.dtype, source , recvtag, y, status, ierr)
#$ header macro (x),y.reduce(data, op=MPI_SUM, root=0) := mpi_reduce(data, x, data.count, data.dtype, op ,root, y, ierr)
#$ header macro (x),y.allreduce(data, op=MPI_SUM) := mpi_allreduce(data, x, data.count, data.dtype, op , y, ierr)
#$ header macro y.bcast(data, root=0) := mpi_bcast(data, data.count, data.dtype, root, y, ierr)
#$ header macro (x),y.gather(data, root=0) := mpi_gather(data, data.count, data.dtype, x, x.count, x.dtype, root, y, ierr)
#.....................
##$ header macro (x),y.scatter
##$ header macro (req), y.irecv
##$ header macro y.alltoall
#not_working for the moment
#.....................
#......................
#upper-case letter functions
#......................
#$ header macro (x), y.Split(color=0, key=0) := mpi_comm_split(y, color, key, x, ierr)
#$ header macro y.Free() := mpi_comm_free(y, ierr)
#$ header macro (datatype),y.Create_vector(count, blocklength, stride) := mpi_type_vector(count, blocklength, stride, y.dtype, datatype, ierr)
#$ header macro x.Commit() := mpi_type_commit(x,ierr)
#$ header macro y.Send([data, dtype=data.dtype], dest=0, tag=0) := mpi_send(data, data.count, dtype, dest ,tag, y, ierr)
#$ header macro y.Recv([data, dtype=data.dtype], source=ANY_SOURCE, tag=ANY_TAG) := mpi_recv(data, data.count, data.dtype, source ,tag, y, status, ierr)
#$ header macro (req),y.Isend([data, dtype=data.dtype], dest=0, tag=0) := mpi_isend(data, data.count, dtype, dest ,tag, y, req, ierr)
#$ header macro (req),y.Issend([data, dtype=data.dtype], dest=0, tag=0) := mpi_issend(data, data.count, dtype, dest ,tag, y, ierr)
#$ header macro (req),y.Ibsend([data, dtype=data.dtype], dest=0, tag=0) := mpi_ibsend(data, data.count, dtype, dest ,tag, y, ierr)
#$ header macro (req),y.Irecv([data, dtype=data.dtype], source=ANY_SOURCE, tag=ANY_TAG) := mpi_irecv(data, data.count, dtype, source ,tag, y, req, ierr)
#$ header macro (x), y.Sendrecv(sendobj, dest, sendtag=0, recvbuf=x, source=ANY_SOURCE, recvtag=ANY_TAG, Stat=status) := mpi_sendrecv(sendobj, sendobj.count, sendobj.dtype, dest, sendtag, recvbuf, recvbuf.count, recvbuf.dtype, source , recvtag, y, status, ierr)
#$ header macro y.Reduce(data, recvbuf, op=MPI_SUM, root=0) := mpi_reduce(data, recvbuf, data.count, data.dtype, op ,root, y, ierr)
#$ header macro y.Allreduce(data, recvbuf, op=MPI_SUM) := mpi_allreduce(data, recvbuf, data.count, data.dtype, op , y, ierr)
#$ header macro x.Allgather(A,B) := mpi_allgather(A, A.count, A.dtype, B, B.count, B.dtype, x)
#$ header macro y.Gather(data, recvbuf, root=0) := mpi_gather(data, data.count, data.dtype, recvbuf, recvbuf.count, recvbuf.dtype, root, y, ierr)
#$ header macro y.Bcast(data, root=0) := mpi_bcast(data, data.count, data.dtype, root, y, ierr)
| {
"repo_name": "ratnania/pyccel",
"path": "tests/macro/scripts/MPI/mpi4py.py",
"copies": "1",
"size": "7894",
"license": "mit",
"hash": -4386461861454674400,
"line_mean": 44.8953488372,
"line_max": 263,
"alpha_frac": 0.6931846972,
"autogenerated": false,
"ratio": 2.8990084465662873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40921931437662873,
"avg_score": null,
"num_lines": null
} |
#$ header func(double) reuslts(double)
def func(t):
y=pow(t,2)
return y
xStart = array((1,2,5),float)
side = 0.1
tol = 1.0e-6
n = len(xStart)
n = n+1
k = n-1
# Number of variables
x = zeros(n, double)
f = zeros(n, double)
# Generate starting simplex
x[0] = xStart
for i in range(1,n):
x[i] = xStart
x[i] = xStart[i-1] + side
# Compute values of func at the vertices of the simplex
for i in range(1,n+1):
f[i] = func(x[i])
# Main loop
for k in range(1,500):
# Find highest and lowest vertices
iLo =0
iHi =0
# Compute the move vector d
m=n+1
d =-m*x[iHi]
#
if sqrt(dot(d,d)/n) < tol:
n=n+1
xNew = x[iHi] + 2.0*d
fNew = func(xNew)
if fNew <= f[iLo]:
# Accept reflection
x[iHi] = xNew
f[iHi] = fNew
# Try expanding the reflection
xNew = x[iHi] + d
fNew = func(xNew)
if fNew <= f[iLo]:
x[iHi] = xNew
f[iHi] = fNew
# Accept expansion
else:
if fNew <= f[iHi]:
x[iHi] = xNew
f[iHi] = fNew
# Accept reflection
else:
# Try contraction
xNew = x[iHi] + 0.5*d
fNew = func(xNew)
if fNew <= f[iHi]:
# Accept contraction
x[iHi] = xNew
f[iHi] = fNew
else:
# Use shrinkage
s=len(x)
for i in range(1,s):
if i != iLo:
x[i] = x[i]*0.5 - x[iLo]*0.5
f[i] = func(x[i])
print("Too many iterations in downhill")
print((x[iLo]))
| {
"repo_name": "ratnania/pyccel",
"path": "src_old/tests/scripts/core/ex18.py",
"copies": "1",
"size": "1667",
"license": "mit",
"hash": 1804656444876378600,
"line_mean": 21.8356164384,
"line_max": 55,
"alpha_frac": 0.4577084583,
"autogenerated": false,
"ratio": 2.8940972222222223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3851805680522222,
"avg_score": null,
"num_lines": null
} |
#$ header metavar module_name='mpi4py'
#$ header metavar module_version='3.1'
#$ header metavar ignore_at_import=True
#$ header metavar import_all=True
from pyccel.stdlib.internal.mpi import mpi_comm_world
from pyccel.stdlib.internal.mpi import mpi_comm_rank
from pyccel.stdlib.internal.mpi import mpi_comm_size
from pyccel.stdlib.internal.mpi import mpi_send
from pyccel.stdlib.internal.mpi import mpi_ssend
from pyccel.stdlib.internal.mpi import mpi_bsend
from pyccel.stdlib.internal.mpi import mpi_isend
from pyccel.stdlib.internal.mpi import mpi_issend
from pyccel.stdlib.internal.mpi import mpi_ibsend
from pyccel.stdlib.internal.mpi import mpi_recv
from pyccel.stdlib.internal.mpi import mpi_irecv
from pyccel.stdlib.internal.mpi import mpi_sendrecv
from pyccel.stdlib.internal.mpi import mpi_sendrecv_replace
from pyccel.stdlib.internal.mpi import mpi_bcast
from pyccel.stdlib.internal.mpi import mpi_scatter
from pyccel.stdlib.internal.mpi import mpi_barrier
from pyccel.stdlib.internal.mpi import mpi_gather
from pyccel.stdlib.internal.mpi import mpi_allgather
from pyccel.stdlib.internal.mpi import mpi_allgatherv
from pyccel.stdlib.internal.mpi import mpi_gatherv
from pyccel.stdlib.internal.mpi import mpi_alltoall
from pyccel.stdlib.internal.mpi import mpi_reduce
from pyccel.stdlib.internal.mpi import mpi_allreduce
from pyccel.stdlib.internal.mpi import mpi_wait
from pyccel.stdlib.internal.mpi import mpi_waitall
from pyccel.stdlib.internal.mpi import mpi_waitany
from pyccel.stdlib.internal.mpi import mpi_waitsome
from pyccel.stdlib.internal.mpi import mpi_test
from pyccel.stdlib.internal.mpi import mpi_testall
from pyccel.stdlib.internal.mpi import mpi_testany
from pyccel.stdlib.internal.mpi import mpi_testsome
from pyccel.stdlib.internal.mpi import mpi_cart_create
from pyccel.stdlib.internal.mpi import mpi_cart_coords
from pyccel.stdlib.internal.mpi import mpi_cart_shift
from pyccel.stdlib.internal.mpi import mpi_cart_sub
from pyccel.stdlib.internal.mpi import mpi_comm_split
from pyccel.stdlib.internal.mpi import mpi_comm_free
from pyccel.stdlib.internal.mpi import mpi_type_vector
from pyccel.stdlib.internal.mpi import mpi_type_commit
from pyccel.stdlib.internal.mpi import mpi_type_contiguous
from pyccel.stdlib.internal.mpi import mpi_type_free
from pyccel.stdlib.internal.mpi import mpi_type_indexed
from pyccel.stdlib.internal.mpi import mpi_type_create_subarray
from pyccel.stdlib.internal.mpi import ANY_TAG
from pyccel.stdlib.internal.mpi import ANY_SOURCE
from pyccel.stdlib.internal.mpi import MPI_SUM
from pyccel.stdlib.internal.mpi import MPI_PROD
from pyccel.stdlib.internal.mpi import MPI_MAX
from pyccel.stdlib.internal.mpi import MPI_MIN
from pyccel.stdlib.internal.mpi import MPI_MAXLOC
from pyccel.stdlib.internal.mpi import MPI_MINLOC
from pyccel.stdlib.internal.mpi import MPI_LAND
from pyccel.stdlib.internal.mpi import MPI_LOR
from pyccel.stdlib.internal.mpi import MPI_LXOR
from pyccel.stdlib.internal.mpi import MPI_INTEGER
from pyccel.stdlib.internal.mpi import MPI_DOUBLE
#===================================================================================
#$ header class MPI_(public)
#$ header method __init__(MPI_)
class MPI_:
def __init__(self):
self.COMM_WORLD = -1
self.INT = MPI_INTEGER
self.DOUBLE = MPI_DOUBLE
self.SUM = MPI_SUM
self.PROD = MPI_PROD
self.MAX = MPI_MAX
self.MIN = MPI_MIN
self.MAXLOC = MPI_MAXLOC
self.MINLOC = MPI_MINLOC
self.LAND = MPI_LAND
self.LOR = MPI_LOR
self.LXOR = MPI_LXOR
self.Request = -1
MPI = MPI_()
#====================================================================================
ierr = -1
#$ header macro x.COMM_WORLD := mpi_comm_world
#$ header macro x.SUM := MPI_SUM
#$ header macro x.PROD := MPI_PROD
#$ header macro x.MAX := MPI_MAX
#$ header macro x.MIN := MPI_MIN
#$ header macro x.MAXLOC := MPI_MAXLOC
#$ header macro x.MINLOC := MPI_MINLOC
#$ header macro x.LAND := MPI_LAND
#$ header macro x.LOR := MPI_LOR
#$ header macro x.LXOR := MPI_LXOR
#$ header macro x.INT := MPI_INTEGER
#$ header macro x.DOUBLE := MPI_DOUBLE
#$ header macro (x), y.Get_rank() := mpi_comm_rank(y,x,ierr)
#$ header macro (x), y.Get_size() := mpi_comm_size(y,x,ierr)
#......................
#lower-case letter functions
#......................
#$ header macro y.send(data, dest, tag=0) := mpi_send(data, data.count, data.dtype, dest ,tag, y, ierr)
#$ header macro y.ssend(data, dest, tag=0) := mpi_ssend(data, data.count, data.dtype, dest ,tag, y, ierr)
#$ header macro y.bsend(data, dest, tag=0) := mpi_bsend(data, data.count, data.dtype, dest ,tag, y, ierr)
#$ header macro (req),y.isend(data, dest, tag=0) := mpi_isend(data, data.count, data.dtype, dest ,tag, y, req, ierr)
#$ header macro (req),y.issend(data, dest, tag=0) := mpi_issend(data, data.count, data.dtype, dest ,tag, y, req, ierr)
#$ header macro (req),y.ibsend(data, dest, tag=0) := mpi_ibsend(data, data.count, data.dtype, dest ,tag, y, req, ierr)
#$ header macro (x), y.recv(source=0, tag=0) := mpi_recv(x, x.count, x.dtype, source ,tag, y, MPI_STATUS_IGNORE, ierr)
#$ header macro (x), y.sendrecv(sendobj, dest, sendtag=0, source=ANY_SOURCE, recvtag=ANY_TAG) := mpi_sendrecv(sendobj, sendobj.count, sendobj.dtype, dest, sendtag, x, x.count, x.dtype, source , recvtag, y, MPI_STATUS_IGNORE, ierr)
#$ header macro (x), y.reduce(data, op=MPI_SUM, root=0) := mpi_reduce(data, x, data.count, data.dtype, op ,root, y, ierr)
#$ header macro (x), y.allreduce(data, op=MPI_SUM) := mpi_allreduce(data, x, data.count, data.dtype, op , y, ierr)
#$ header macro y.bcast(data, root=0) := mpi_bcast(data, data.count, data.dtype, root, y, ierr)
#$ header macro (x), y.gather(data, root=0) := mpi_gather(data, data.count, data.dtype, x, data.count, x.dtype, root, y, ierr)
#.....................
##$ header macro (x),y.scatter
##$ header macro (req), y.irecv
##$ header macro y.alltoall
#not_working for the moment
#.....................
#......................
#upper-case letter functions
#......................
#$ header macro (x), y.Split(color=0, key=0) := mpi_comm_split(y, color, key, x, ierr)
#$ header macro y.Free() := mpi_comm_free(y, ierr)
#$ header macro (datatype),y.Create_vector(count, blocklength, stride) := mpi_type_vector(count, blocklength, stride, y.dtype, datatype, ierr)
#$ header macro x.Commit() := mpi_type_commit(x, ierr)
#$ header macro y.Send([data, dtype=data.dtype], dest=0, tag=0) := mpi_send(data, data.count, dtype, dest ,tag, y, ierr)
#$ header macro y.Recv([data, dtype=data.dtype], source=ANY_SOURCE, tag=ANY_TAG) := mpi_recv(data, data.count, data.dtype, source ,tag, y, MPI_STATUS_IGNORE, ierr)
#$ header macro (req),y.Isend([data, count=data.count,dtype=data.dtype], dest=0, tag=0) := mpi_isend(data, count, dtype, dest ,tag, y, req, ierr)
#$ header macro (req),y.Issend([data, count=data.count,dtype=data.dtype], dest=0, tag=0) := mpi_issend(data, count, dtype, dest ,tag, y, ierr)
#$ header macro (req),y.Ibsend([data, count=data.count,dtype=data.dtype], dest=0, tag=0) := mpi_ibsend(data, count, dtype, dest ,tag, y, ierr)
#$ header macro (req),y.Irecv( [data, count=data.count,dtype=data.dtype], source=ANY_SOURCE, tag=ANY_TAG) := mpi_irecv(data, count, dtype, source ,tag, y, req, ierr)
#$ header macro (x), y.Sendrecv(sendobj, dest, sendtag=0, recvbuf=x, source=ANY_SOURCE, recvtag=ANY_TAG) := mpi_sendrecv(sendobj, sendobj.count, sendobj.dtype, dest, sendtag, recvbuf, recvbuf.count, recvbuf.dtype, source , recvtag, y, MPI_STATUS_IGNORE, ierr)
#$ header macro y.Reduce(data, recvbuf, op=MPI_SUM, root=0) := mpi_reduce(data, recvbuf, data.count, data.dtype, op ,root, y, ierr)
#$ header macro y.Allreduce(data, recvbuf, op=MPI_SUM) := mpi_allreduce(data, recvbuf, data.count, data.dtype, op , y, ierr)
#$ header macro x.Allgatherv(A,[B,Bcounts,Bdisps,Bdtype = B.dtype]) := mpi_allgatherv(A, A.count, A.dtype, B, Bcounts, Bdisps, Bdtype, x, ierr)
#$ header macro y.Gather(data, recvbuf, root=0) := mpi_gather(data, data.count, data.dtype, recvbuf, data.count, recvbuf.dtype, root, y, ierr)
#$ header macro y.Bcast(data, root=0) := mpi_bcast(data, data.count, data.dtype, root, y, ierr)
#$ header macro x.Waitall(req) := mpi_waitall(req.count, req, MPI_STATUSES_IGNORE, ierr)
| {
"repo_name": "ratnania/pyccel",
"path": "pyccel/stdlib/external/mpi4py.py",
"copies": "1",
"size": "8533",
"license": "mit",
"hash": 8640236211763993000,
"line_mean": 46.4055555556,
"line_max": 261,
"alpha_frac": 0.6817063167,
"autogenerated": false,
"ratio": 2.9444444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4126150761144445,
"avg_score": null,
"num_lines": null
} |
#$ header metavar print=True
from pyccel.stdlib.internal.dfftpack import dffti
from pyccel.stdlib.internal.dfftpack import dfftf
from pyccel.stdlib.internal.dfftpack import dfftb
from pyccel.stdlib.internal.dfftpack import dzffti
from pyccel.stdlib.internal.dfftpack import dzfftf
from pyccel.stdlib.internal.dfftpack import dzfftb
from pyccel.stdlib.internal.dfftpack import dcosqi
from pyccel.stdlib.internal.dfftpack import dcosqf
from pyccel.stdlib.internal.dfftpack import dcosqb
from pyccel.stdlib.internal.dfftpack import dcosti
from pyccel.stdlib.internal.dfftpack import dcost
from pyccel.stdlib.internal.dfftpack import dsinqi
from pyccel.stdlib.internal.dfftpack import dsinqf
from pyccel.stdlib.internal.dfftpack import dsinqb
from pyccel.stdlib.internal.dfftpack import dsinti
from pyccel.stdlib.internal.dfftpack import dsint
from pyccel.stdlib.internal.dfftpack import zffti
from pyccel.stdlib.internal.dfftpack import zfftf
from pyccel.stdlib.internal.dfftpack import zfftb
#$ header function fft(double[:]|complex[:], complex[:], int)
def fft(x, y, n):
from numpy import empty
w = empty(4*n+15)
y[:] = x[:]
zffti(n,w)
zfftf(n,y,w)
#$ header function ifft(double[:]|complex[:], complex[:], int)
def ifft(x, y, n):
from numpy import empty
w = empty(4*n+15)
y[:] = x[:]
zffti(n, w)
zfftb(n, x, w)
#$ header function rfft(double[:], double[:], int)
def rfft(x, y, n):
from numpy import empty
w = empty(2*n+15)
y[:] = x[:]
dffti(n,w)
dfftf(n,y,w)
#$ header function irfft(double[:], double[:], int)
def irfft(x, y, n):
from numpy import empty
w = empty(2*n+15)
y[:] = x[:]
dffti(n,w)
dfftb(n,y,w)
#$ header macro (y), fft (x, n=x.count) := fft (x, y, n)
#$ header macro (y), ifft(x, n=x.count) := ifft(x, y, n)
#$ header macro (y), rfft (x,n=x.count) := rfft (x, y, n)
#$ header macro (y), irfft(x,n=x.count) := irfft(x, y, n)
| {
"repo_name": "ratnania/pyccel",
"path": "pyccel/stdlib/external/dfftpack.py",
"copies": "1",
"size": "1937",
"license": "mit",
"hash": -7794478542784123000,
"line_mean": 27.0724637681,
"line_max": 62,
"alpha_frac": 0.6969540527,
"autogenerated": false,
"ratio": 2.712885154061625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8765496125456984,
"avg_score": 0.028868616260928296,
"num_lines": 69
} |
# ${header}
# This is a simple class to represent the ${obj_key} object in the game. You can extend it by adding utility functions here in this file.
<%include file="functions.noCreer" /><% parent_classes = obj['parentClasses'] %>
% if len(parent_classes) > 0:
% for parent_class in parent_classes:
from games.${underscore(game_name)}.${underscore(parent_class)} import ${parent_class}
% endfor
% else:
<% if obj_key == "Game":
parent_classes = [ 'BaseGame' ]
else:
parent_classes = [ 'BaseGameObject' ]
%>from joueur.${underscore(parent_classes[0])} import ${parent_classes[0]}
% endif
% if obj_key == "Game":
# import game objects
% for game_obj_key in sort_dict_keys(game_objs):
from games.${underscore(game_name)}.${underscore(game_obj_key)} import ${game_obj_key}
% endfor
% endif
${merge("# ", "imports", "# you can add addtional import(s) here")}
class ${obj_key}(${", ".join(parent_classes)}):
""" The class representing the ${obj_key} in the ${game_name} game.
${obj['description']}
"""
def __init__(self):
""" initializes a ${obj_key} with basic logic as provided by the Creer code generator
"""
% for parent_class in reversed(parent_classes):
${parent_class}.__init__(self)
% endfor
# private attributes to hold the properties so they appear read only
% for attr_name in obj['attribute_names']:
<% attr_parms = obj['attributes'][attr_name]
%> self._${underscore(attr_name)} = ${shared['py']['default'](attr_parms['type'], attr_parms['default'])}
% endfor
% if obj_key == "Game":
self.name = "${game_name}"
self._game_object_classes = {<% c = len(game_objs) %>
% for game_obj_key, game_obj in game_objs.items():
<% c -= 1
%> '${game_obj_key}': ${game_obj_key}${',' if c != 0 else ''}
% endfor
}
% endif
% for attr_name in obj['attribute_names']:
<% attr_parms = obj['attributes'][attr_name] %>
@property
def ${underscore(attr_name)}(self):
"""${attr_parms['description']}
"""
return self._${underscore(attr_name)}
% endfor
% for function_name in obj['function_names']:
<% function_parms = obj['functions'][function_name]
%>
def ${underscore(function_name)}(self${shared['py']['args'](function_parms['arguments'])}):
""" ${function_parms['description']}
% if len(function_parms['arguments']) > 0:
Args:
% for arg_parms in function_parms['arguments']:
${underscore(arg_parms['name'])} (${"Optional[" if arg_parms['optional'] else ""}${shared['py']['type'](arg_parms['type'])}${"]" if arg_parms['optional'] else ""}): ${arg_parms['description']}
% endfor
% endif
% if function_parms['returns']:
Returns:
${shared['py']['type'](function_parms['returns']['type'])}: ${function_parms['returns']['description']}
% endif
"""
return self._run_on_server('${function_name}'${shared['py']['kwargs'](function_parms['argument_names'])})
% endfor
${merge(" # ", "functions", " # if you want to add any client side logic (such as state checking functions) this is where you can add them")}
| {
"repo_name": "brhoades/megaminer16-anarchy",
"path": "_creer/games/${underscore(game_name)}/${underscore(obj_key)}.py",
"copies": "1",
"size": "3103",
"license": "mit",
"hash": -5064488761453006000,
"line_mean": 34.6666666667,
"line_max": 204,
"alpha_frac": 0.6187560425,
"autogenerated": false,
"ratio": 3.4325221238938055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4551278166393805,
"avg_score": null,
"num_lines": null
} |
# ${header}
# This is where you build your AI for the ${game_name} game.
<%include file="functions.noCreer" />
from joueur.base_ai import BaseAI
${merge("# ", "imports", "# you can add addtional import(s) here")}
class AI(BaseAI):
""" the basic AI functions that are the same between games
"""
def get_name(self):
""" this is the name you send to the server to play as.
Returns
str: the name you want your player to have
"""
${merge(" # ", "get-name", ' return "' + game_name + ' Python Player" # REPLACE THIS WITH YOUR TEAM NAME')}
def start(self):
""" this is called once the game starts and your AI knows its player.id and game. You can initialize your AI here.
"""
${merge(" # ", "start", " # replace with your start logic")}
def game_updated(self):
""" this is called every time the game's state updates, so if you are tracking anything you can update it here.
"""
${merge(" # ", "game-updated", " # replace with your game updated logic")}
def end(self, won, reason):
""" this is called when the game ends, you can clean up your data and dump files here if need be
Args:
won (bool): won == true means you won, won == false means you lost
reason (str): the reason why you won or lost
"""
${merge(" # ", "end", " # replace with your end logic")}
% for function_name in ai['function_names']:
<% function_parms = ai['functions'][function_name]
%>
def ${underscore(function_name)}(self${", ".join([""] + function_parms['argument_names'])}):
""" ${function_parms['description']}
% if len(function_parms['arguments']) > 0:
Args:
% for arg_parms in function_parms['arguments']:
${underscore(arg_parms['name'])} (${shared['py']['type'](arg_parms['type'])}): ${arg_parms['description']}
% endfor
% endif
% if function_parms['returns']:
Returns:
${shared['py']['type'](function_parms['returns']['type'])}: ${function_parms['returns']['description']}
% endif
"""
${merge(" # ", function_name,
""" # Put your game logic here for {0}
return {1}
""".format(function_name, shared['py']['default'](function_parms['returns']['type'], function_parms['returns']['default']) if function_parms['returns'] else "")
)}
% endfor
${merge(" # ", "functions", " # if you need additional functions for your AI you can add them here")}
| {
"repo_name": "brhoades/megaminer16-anarchy",
"path": "_creer/games/${underscore(game_name)}/ai.py",
"copies": "1",
"size": "2515",
"license": "mit",
"hash": 5370203120698734000,
"line_mean": 33.9305555556,
"line_max": 160,
"alpha_frac": 0.5868787276,
"autogenerated": false,
"ratio": 3.845565749235474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9844312589730397,
"avg_score": 0.017626377421015187,
"num_lines": 72
} |
#!$HOME/anaconda/bin/python
# -*- coding: utf-8 -*-
'''
Ripped from template.py
- makes an apollonian gasket
'''
import inkex # Required
import simplestyle # will be needed here for styles support
import ag
__version__ = '0.0'
inkex.localize()
### Your helper functions go here
def cplxs2pts(zs):
tt = []
for z in zs:
tt.extend([z.real,z.imag])
return tt
def draw_SVG_circle(parent, r, cx, cy, name):
" structre an SVG circle entity under parent "
circ_attribs = { 'cx': str(cx), 'cy': str(cy),
'r': str(r),
inkex.addNS('label','inkscape'): name}
circle = inkex.etree.SubElement(parent, inkex.addNS('circle','svg'), circ_attribs )
class Myextension(inkex.Effect): # choose a better name
def __init__(self):
" define how the options are mapped from the inx file "
inkex.Effect.__init__(self) # initialize the super class
# list of parameters defined in the .inx file
self.OptionParser.add_option("-d", "--depth",
action="store", type="int",
dest="depth", default=3,
help="command line help")
self.OptionParser.add_option("", "--c1",
action="store", type="float",
dest="c1", default=2.0,
help="command line help")
self.OptionParser.add_option("", "--c2",
action="store", type="float",
dest="c2", default=3.0,
help="command line help")
self.OptionParser.add_option("", "--c3",
action="store", type="float",
dest="c3", default=3.0,
help="command line help")
self.OptionParser.add_option("-x", "--shrink",
action="store", type="inkbool",
dest="shrink", default=True,
help="command line help")
# here so we can have tabs - but we do not use it directly - else error
self.OptionParser.add_option("", "--active-tab",
action="store", type="string",
dest="active_tab", default='title', # use a legitmate default
help="Active tab.")
def calc_unit_factor(self):
""" return the scale factor for all dimension conversions.
- The document units are always irrelevant as
everything in inkscape is expected to be in 90dpi pixel units
"""
# namedView = self.document.getroot().find(inkex.addNS('namedview', 'sodipodi'))
# doc_units = self.getUnittouu(str(1.0) + namedView.get(inkex.addNS('document-units', 'inkscape')))
unit_factor = self.getUnittouu(str(1.0) + self.options.units)
return unit_factor
### -------------------------------------------------------------------
### Main function and is called when the extension is run.
def effect(self):
#set up path styles
path_stroke = '#DD0000' # take color from tab3
path_fill = 'none' # no fill - just a line
path_stroke_width = 1. # can also be in form '0.6mm'
page_id = self.options.active_tab # sometimes wrong the very first time
style_curve = { 'stroke': path_stroke,
'fill': 'none',
'stroke-width': path_stroke_width }
# This finds center of current view in inkscape
t = 'translate(%s,%s)' % (self.view_center[0], self.view_center[1] )
# add a group to the document's current layer
#all the circles inherit style from this group
g_attribs = { inkex.addNS('label','inkscape'): 'zengon' + "_%d"%(self.options.depth),
inkex.addNS('transform-center-x','inkscape'): str(0),
inkex.addNS('transform-center-y','inkscape'): str(0),
'transform': t,
'style' : simplestyle.formatStyle(style_curve),
'info':'N: '}
topgroup = inkex.etree.SubElement(self.current_layer, 'g', g_attribs )
circles = ag.main(c1=self.options.c1,
c2=self.options.c2,
c3=self.options.c3,
depth=self.options.depth)
#shrink the circles so they don't touch
#useful for laser cutting
if self.options.shrink:
circles = circles[1:]
for cc in circles:
cc.r = abs(cc.r)
if cc.r >.5:
cc.r -= .1
else:
cc.r *= .9
scale_factor = 200
for c in circles:
cx, cy, r = c.m.real, c.m.imag, abs(c.r)
#rescale and add circle to document
cx, cy, r = scale_factor*cx , scale_factor*cy, scale_factor*r
draw_SVG_circle(topgroup,r,cx,cy,'apo')
if __name__ == '__main__':
e = Myextension()
e.affect()
| {
"repo_name": "macbuse/Apollonian",
"path": "apollon_inx.py",
"copies": "1",
"size": "5516",
"license": "mit",
"hash": -8685216098883638000,
"line_mean": 35.0522875817,
"line_max": 107,
"alpha_frac": 0.470630892,
"autogenerated": false,
"ratio": 4.2726568551510455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.039515765152828015,
"num_lines": 153
} |
import logging
import debug
log = logging.getLogger('rfc1157')
from rfc1155 import *
asnTagNumbers['Get'] = 0x00
asnTagNumbers['GetNext'] = 0x01
asnTagNumbers['Response'] = 0x02
asnTagNumbers['Set'] = 0x03
asnTagNumbers['Trap'] = 0x04
class ErrorStatus(Integer):
""" Error Status
"""
# define a dictionary of error codes
errString = {
0: 'No Error',
1: 'Response message would have been too large',
2: 'There is no such variable name in this MIB',
3: 'The value given has the wrong type',
4: 'Object is Read Only',
5: 'An unknown error occurred'
}
errNum = {
'noError': 0,
'tooBig': 1,
'noSuchName': 2,
'badValue': 3,
'readOnly': 4,
'genErr': 5,
}
def __str__(self):
""" Return a nicer looking error
"""
return '%d: %s' % (self.value, self.errString[self.value])
def enum(self, num=None):
"""
Return the stringified version of my enum.
If a specific number is passed in, use that,
otherwise, use my current value.
"""
if num is None:
num = self.value
return self.errNum(num)
class VarBind(Sequence):
""" Variable Binding
This binds a name to an object
"""
def __init__(self, name=None, value=None):
if name:
if not isinstance( name, ObjectID ):
raise ValueError('name must be an ObjectID')
if value:
if not isinstance( value, Asn1Object ):
raise ValueError('name must be an Asn1Object')
self.objectID = name
self.objectValue = value
Sequence.__init__(self, [ self.objectID, self.objectValue ] )
class VarBindList(SequenceOf):
""" A Sequence of VarBinds
"""
def __init__(self, value=[]):
SequenceOf.__init__(self, VarBind, value)
return
pass
class Message(Sequence):
""" A Message is the base comms type for all SNMP messages
"""
def __init__(self, version=0, community='public', data=None):
Sequence.__init__(self)
self.version = Integer(version)
self.community = OctetString(community)
self.data = data
def __str__(self):
result = '[%s, ' % self.version
result += '%s, ' % self.community
result += '%s]' % self.data
return result
def encodeContents(self):
self.value = []
self.value.append(self.version)
self.value.append(self.community)
self.value.append(self.data)
return Sequence.encodeContents(self)
def decode(self, stream):
objectList = Sequence().decode(stream)
# Should return a single Sequence
if len(objectList) != 1:
raise MessageError('Malformed Message: More than one object decoded.' % len(self.value) )
# Sequence should contain 3 objects
if len(objectList[0]) != 3:
raise MessageError('Malformed Message: Incorrect sequence length %d' % len(self.value) )
self.version = objectList[0][0]
self.community = objectList[0][1]
self.data = objectList[0][2]
return self
class MessageError(Exception):
def __init__(self, args=None):
self.args = args
class PDU(Sequence):
""" Base class for a non-trap PDU
"""
asnTagClass = asnTagClasses['CONTEXT']
def __init__(self, requestID=0, errorStatus=0, errorIndex=0, varBindList=[]):
""" __init__ allows you to create a new object with no arguments,
arguments of the class ultimately desired (eg Integer)
or, to make like easier, it will convert basic strings and ints
into the ultimately desired objects.
"""
Sequence.__init__(self)
self.requestID = Integer(requestID)
self.errorStatus = ErrorStatus(errorStatus)
self.errorIndex = Integer(errorIndex)
self.varBindList = VarBindList(varBindList)
self.value = [ self.requestID, self.errorStatus, self.errorIndex, self.varBindList ]
def decodeContents(self, stream):
""" Decode into a Get PDU Object
"""
objectList = Sequence.decodeContents(self, stream)
if len(self.value) != 4:
raise PDUError('Malformed PDU: Incorrect length %d' % len(self.value) )
# Build things with the correct type
myVarList = VarBindList()
for item in objectList[3]:
myVarList.append( VarBind(item[0], item[1]) )
return self.__class__( int(objectList[0]), int(objectList[1]), int(objectList[2]), myVarList)
class PDUError(Exception):
def __init__(self, args=None):
self.args = args
class Get(PDU):
""" A Get Request PDU
"""
asnTagNumber = asnTagNumbers['Get']
class GetNext(PDU):
""" A GetNext PDU
"""
asnTagNumber = asnTagNumbers['GetNext']
class Response(PDU):
""" A Response PDU
"""
asnTagNumber = asnTagNumbers['Get']
class Set(PDU):
""" A Set PDU
"""
asnTagNumber = asnTagNumbers['Set']
class GenericTrap(Integer):
"""
Generic Trap type
"""
genericTraps = {
0: 'coldStart',
1: 'warmStart',
2: 'linkDown',
3: 'linkUp',
4: 'authenticationFailure',
5: 'egpNeighborLoss',
6: 'enterpriseSpecific',
}
def __str__(self):
""" Return an informative string instead of just a number
"""
return '%s: %d (%s)' % (self.__class__.__name__, self.value, self.genericTraps[self.value])
def enum(self, num=None):
"""
Return the stringified version of my enum.
If a specific number is passed in, use that,
otherwise, use my current value.
"""
if num is None:
num = self.value
return self.genericTraps[num]
class TrapPDU(Sequence):
""" A Trap PDU
"""
asnTagClass = asnTagClasses['CONTEXT']
asnTagNumber = asnTagNumbers['Trap']
def __init__(self, enterprise=None, agentAddr=None, genericTrap=None, specificTrap=None, timestamp=None, varBindList=None):
Sequence.__init__(self)
self.enterprise = enterprise # ObjectID
self.agentAddr = agentAddr # NetworkAddress
self.genericTrap = genericTrap # GenericTrap
self.specificTrap = specificTrap # Integer
self.timestamp = timestamp # TimeTicks
self.varBindList = varBindList # VarBindList
self.value = []
self.value.append(self.enterprise)
self.value.append(self.agentAddr)
self.value.append(self.genericTrap)
self.value.append(self.specificTrap)
self.value.append(self.timestamp)
self.value.append(self.varBindList)
# def encodeContents(self):
# return Sequence.encodeContents(self)
def decodeContents(self, stream):
""" Decode into a Get PDU Object
"""
objectList = Sequence.decodeContents(self, stream)
if len(self.value) != 6:
raise PDUError('Malformed TrapPDU: Incorrect length %d' % len(self.value) )
# Build things with the correct type
myVarList = VarBindList()
for item in objectList[5]:
myVarList.append( VarBind(item[0], item[1]) )
return self.__class__( objectList[0], objectList[1], GenericTrap(int(objectList[2])), objectList[3], objectList[4], myVarList)
# Add some new decode types
tagDecodeDict[0xa0] = Get
tagDecodeDict[0xa1] = GetNext
tagDecodeDict[0xa2] = Response
tagDecodeDict[0xa3] = Set
tagDecodeDict[0xa4] = TrapPDU
| {
"repo_name": "jpwarren/libsnmp",
"path": "lib/libsnmp/rfc1157.py",
"copies": "1",
"size": "7845",
"license": "mit",
"hash": 1553658741419499800,
"line_mean": 29.4069767442,
"line_max": 134,
"alpha_frac": 0.5928616953,
"autogenerated": false,
"ratio": 3.811953352769679,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4904815048069679,
"avg_score": null,
"num_lines": null
} |
import socket
import logging
import time
from libsnmp import debug
from libsnmp import rfc1155
from libsnmp import rfc1157
log = logging.getLogger('v1.SNMP')
class manager:
def __init__(self, dest, interface=('0.0.0.0', 0), socksize=0x10000):
self.dest = dest
self.interface = interface
self.socket = None
self.socksize = socksize
self.request_id = 1
return
def __del__(self):
self.close()
return
def get_socket(self):
return self.socket
def open(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind(self.interface)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.socksize)
return self.socket
def send(self, request, dst=(None, 0)):
if not self.socket:
self.open()
pass
self.socket.sendto(request, dst)
return
def read(self):
if not self.socket:
raise ValueError('Socket not initialized')
(message, src) = self.socket.recvfrom(self.socksize)
return (message, src)
def close(self):
if self.socket:
self.socket.close()
pass
self.socket = None
return
pass
| {
"repo_name": "jpwarren/libsnmp",
"path": "lib/libsnmp/role.py",
"copies": "1",
"size": "1495",
"license": "mit",
"hash": -8170934413562186000,
"line_mean": 21.6515151515,
"line_max": 82,
"alpha_frac": 0.5658862876,
"autogenerated": false,
"ratio": 3.976063829787234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9861657368544423,
"avg_score": 0.036058549768562244,
"num_lines": 66
} |
import sys
import asyncore
import types
from libsnmp import rfc1155
from libsnmp import rfc1157
from libsnmp import role
class manager(asyncore.dispatcher):
def __init__(self, (cb_fun, cb_ctx), dst=(None, 0), interface=('0.0.0.0', 0), timeout=0.25):
if not callable(cb_fun):
raise ValueError('Non-callable callback function')
self.cb_fun = cb_fun
self.cb_ctx = cb_ctx
self.timeout = timeout
asyncore.dispatcher.__init__(self)
self.manager = role.manager(dst, interface)
self.set_socket(self.manager.open())
return
def send(self, req, dst=(None, 0)):
self.manager.send(req, dst)
return
def handle_read(self):
(response, src) = self.manager.read()
self.cb_fun(self, self.cb_ctx, (response, src), (None, None, None))
return
def writable(self):
return 0
def handle_connect(self):
return
def handle_close(self):
self.manager.close()
return
def handle_error(self, exc_type=None, exc_value=None, exc_traceback=None):
if exc_type is None or exc_value is None or exc_traceback is None:
exc_type, exc_value, exc_traceback = sys.exc_info()
pass
if type(exc_type) == types.ClassType and issubclass(exc_type, ValueError):
self.cb_fun(self, self.cb_ctx, (None, None), (exc_type, exc_value, exc_traceback))
else:
raise
return
pass
def poll(self):
asyncore.poll(self.timeout)
| {
"repo_name": "jpwarren/libsnmp",
"path": "lib/libsnmp/asynrole.py",
"copies": "1",
"size": "1749",
"license": "mit",
"hash": -2403174374758568400,
"line_mean": 25.5,
"line_max": 96,
"alpha_frac": 0.5803316181,
"autogenerated": false,
"ratio": 3.7292110874200426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9647468733995299,
"avg_score": 0.03241479430494852,
"num_lines": 66
} |
## An snmpmanager understands SNMPv1 and SNMPv2c messages
## and so it can encode and decode both.
import socket
import select
import logging
import Queue
import time
import os
import asyncore
from libsnmp import debug
from libsnmp import asynrole
from libsnmp import rfc1157
from libsnmp import rfc1905
from libsnmp import v1
from libsnmp import v2
log = logging.getLogger('snmp-manager')
## Used in typeSetter()
## Demo only, really
typeValDict = {
'i': 0x02, ## Integer
's': 0x04, ## String
'o': 0x06, ## ObjectID
't': 0x43, ## TimeTicks
'a': 0x40, ## IPAddress
'c': 0x41, ## Counter
'C': 0x46, ## Counter64
}
class snmpManager(asynrole.manager):
nextRequestID = 0L # global counter of requestIDs
def __init__(self, queueEmpty=None, trapCallback=None, interface=('0.0.0.0', 0), timeout=0.25):
""" Create a new snmpManager bound to interface
queueEmpty is a callback of what to do if I run out
of stuff to do. Default is to wait for more stuff.
"""
self.queueEmpty = queueEmpty
self.outbound = Queue.Queue()
self.callbacks = {}
# What to do if we get a trap
self.trapCallback = trapCallback
# initialise as an asynrole manager
asynrole.manager.__init__(self, (self.receiveData, None), interface=interface, timeout=timeout )
try:
# figure out the current system uptime
pass
except:
raise
def assignRequestID(self):
""" Assign a unique requestID
"""
reqID = self.nextRequestID
self.nextRequestID += 1
return reqID
def createGetRequestPDU(self, varbindlist, version=2):
reqID = self.assignRequestID()
if version == 1:
pdu = rfc1157.Get( reqID, varBindList=varbindlist )
elif version == 2:
pdu = rfc1905.Get( reqID, varBindList=varbindlist )
return pdu
def createGetNextRequestPDU(self, varbindlist, version=2):
reqID = self.assignRequestID()
if version == 1:
pdu = rfc1157.GetNext( reqID, varBindList=varbindlist )
elif version == 2:
pdu = rfc1905.GetNext( reqID, varBindList=varbindlist )
return pdu
def createSetRequestPDU(self, varbindlist, version=2):
reqID = self.assignRequestID()
if version == 1:
pdu = rfc1157.Set( reqID, varBindList=varbindlist )
elif version == 2:
pdu = rfc1905.Set( reqID, varBindList=varbindlist )
return pdu
def createGetRequestMessage(self, oid, community='public', version=2):
""" Creates a message object from a pdu and a
community string.
"""
if version == 1:
objID = rfc1157.ObjectID(oid)
val = rfc1157.Null()
varbindlist = rfc1157.VarBindList( [ rfc1157.VarBind(objID, val) ] )
pdu = self.createGetRequestPDU( varbindlist, 1 )
message = rfc1157.Message( community=community, data=pdu )
elif version == 2:
objID = rfc1905.ObjectID(oid)
val = rfc1905.Null()
varbindlist = rfc1905.VarBindList( [ rfc1905.VarBind(objID, val) ] )
pdu = self.createGetRequestPDU( varbindlist, 2 )
message = rfc1905.Message( community=community, data=pdu )
else:
raise ValueError('Unknown version %d' % version)
return message
def createGetNextRequestMessage(self, varbindlist, community='public', version=2):
""" Creates a message object from a pdu and a
community string.
"""
pdu = self.createGetNextRequestPDU( varbindlist, version )
if version == 1:
return rfc1157.Message( community=community, data=pdu )
if version == 2:
return rfc1905.Message( community=community, data=pdu )
def createSetRequestMessage(self, oid, valtype, value, community='public', version=2):
""" Creates a message object from a pdu and a
community string.
"""
if version == 1:
objID = rfc1157.ObjectID(oid)
val = rfc1157.tagDecodeDict[valtype](value)
varbindlist = rfc1157.VarBindList( [ rfc1157.VarBind(objID, val) ] )
pdu = self.createSetRequestPDU( varbindlist, 1 )
message = rfc1157.Message( community=community, data=pdu )
elif version == 2:
objID = rfc1905.ObjectID(oid)
val = rfc1905.tagDecodeDict[valtype](value)
varbindlist = rfc1905.VarBindList( [ rfc1905.VarBind(objID, val) ] )
pdu = self.createSetRequestPDU( varbindlist, 1 )
message = rfc1905.Message( community=community, data=pdu )
else:
raise ValueError('Unknown version %d' % version)
return message
def createTrapMessage(self, pdu, community='public', version=2):
""" Creates a message object from a pdu and a
community string.
"""
if version == 1:
return v1.createTrapMessage( community=community, data=pdu )
elif version == 2:
return v2.createTrapMessage( community=community, data=pdu )
def createTrapPDU(self, varbindlist, version=2, enterprise='.1.3.6.1.4', agentAddr=None, genericTrap=6, specificTrap=0):
""" Creates a Trap PDU object from a list of strings and integers
along with a varBindList to make it a bit easier to build a Trap.
"""
if agentAddr is None:
agentAddr = self.getsockname()[0]
pass
if version == 1:
ent = rfc1157.ObjectID(enterprise)
agent = rfc1157.NetworkAddress(agentAddr)
gTrap = rfc1157.GenericTrap(genericTrap)
sTrap = rfc1157.Integer(specificTrap)
ts = rfc1157.TimeTicks( self.getSysUptime() )
pdu = rfc1157.TrapPDU(ent, agent, gTrap, sTrap, ts, varbindlist)
# log.debug('v1.trap is %s' % pdu)
elif version == 2:
ent = rfc1905.ObjectID(enterprise)
agent = rfc1905.NetworkAddress(agentAddr)
gTrap = rfc1157.GenericTrap(genericTrap)
sTrap = rfc1905.Integer(specificTrap)
ts = rfc1905.TimeTicks( self.getSysUptime() )
pdu = rfc1157.TrapPDU(ent, agent, gTrap, sTrap, ts, varbindlist)
pass
return pdu
def snmpGet(self, oid, remote, callback, community='public', version=2):
""" snmpGet issues an SNMP Get Request to remote for
the object ID oid
remote is a tuple of (host, port)
oid is a dotted string eg: .1.2.6.1.0.1.1.3.0
"""
msg = self.createGetRequestMessage( oid, community, version )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[msg.data.requestID] = callback
return msg.data.requestID
def snmpGetNext(self, varbindlist, remote, callback, community='public', version=2):
""" snmpGetNext issues an SNMP Get Next Request to remote for
the varbindlist that is passed in. It is assumed that you
have either built a varbindlist yourself or just pass
one in that was previously returned by an snmpGet or snmpGetNext
"""
msg = self.createGetNextRequestMessage( varbindlist, community, version )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[msg.data.requestID] = callback
return msg.data.requestID
def snmpSet(self, oid, valtype, value, remote, callback, community='public', version=2):
"""
snmpSet is slightly more complex in that you need to pass in
a combination of oid and value in order to set a variable.
Depending on the version, this will be built into the appropriate
varbindlist for message creation.
valtype should be a tagDecodeDict key
"""
msg = self.createSetRequestMessage( oid, valtype, value, community, version )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[msg.data.requestID] = callback
return msg.data.requestID
def snmpTrap(self, remote, trapPDU, community='public', version=2):
""" Queue up a trap for sending
"""
msg = self.createTrapMessage(trapPDU, community, version)
self.outbound.put( (msg, remote) )
def receiveData(self, manager, cb_ctx, (data, src), (exc_type, exc_value, exc_traceback) ):
""" This method should be called when data is received
from a remote host.
"""
# Exception handling
if exc_type is not None:
raise exc_type(exc_value)
# perform the action on the message by calling the
# callback from my list of callbacks, passing it the
# message and a reference to myself
# Decode the data into a message
msg = rfc1905.Message().decode(data)
# Decode it based on what version of message it is
if msg.version == 0:
# if __debug__: log.debug('Detected SNMPv1 message')
self.handleV1Message(msg)
elif msg.version == 1:
# if __debug__: log.debug('Detected SNMPv2 message')
self.handleV2Message(msg)
else:
log.error('Unknown message version %d detected' % msg.version)
log.error('version is a %s' % msg.version() )
raise ValueError('Unknown message version %d detected' % msg.version)
def handleV1Message(self, msg):
""" Handle reception of an SNMP version 1 message
"""
if isinstance(msg.data, rfc1157.PDU):
self.callbacks[msg.data.requestID](self, msg)
## remove the callback from my list once it's done
del self.callbacks[msg.data.requestID]
elif isinstance(msg.data, rfc1157.TrapPDU):
self.trapCallback(self, msg)
else:
log.info('Unknown SNMPv1 Message type received')
pass
def handleV2Message(self, msg):
""" Handle reception of an SNMP version 2c message
"""
if isinstance(msg.data, rfc1905.PDU):
self.callbacks[msg.data.requestID](self, msg)
## remove the callback from my list once it's done
del self.callbacks[msg.data.requestID]
elif isinstance(msg.data, rfc1905.TrapPDU):
self.trapCallback(self, msg)
else:
log.info('Unknown SNMPv2 Message type received')
pass
def enterpriseOID(self, partialOID):
""" A convenience method to automagically prepend the
'enterprise' prefix to the partial OID
"""
return '.1.3.6.1.2.1.' + partialOID
def run(self):
"""
Listen for incoming request thingies
and send pending requests
"""
while 1:
try:
# check for inbound messages
self.poll()
# send any pending outbound messages
request = self.outbound.get(0)
self.send( request[0].encode(), request[1] )
except Queue.Empty:
if self.queueEmpty:
self.queueEmpty(self)
pass
except:
raise
def getSysUptime(self):
""" This is a pain because of system dependence
Each OS has a different way of doing this and I
cannot find a Python builtin that will do it.
"""
try:
##
## The linux way
##
uptime = open('/proc/uptime').read().split()
upsecs = int(float(uptime[0]) * 100)
return upsecs
except:
return 0
def typeSetter(self, typestring):
"""
Used to figure out the right tag value key to use in
snmpSet. This is really only used for a more user-friendly
way of doing things from a frontend. Use the actual key
values if you're calling snmpSet programmatically.
"""
return typeValDict[typestring]
| {
"repo_name": "jpwarren/libsnmp",
"path": "lib/libsnmp/snmpmanager.py",
"copies": "1",
"size": "12829",
"license": "mit",
"hash": 4240019484947737600,
"line_mean": 33.1196808511,
"line_max": 124,
"alpha_frac": 0.5984098527,
"autogenerated": false,
"ratio": 4.022891188460332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5121301041160332,
"avg_score": null,
"num_lines": null
} |
import socket
import select
import logging
import Queue
import time
import os
import asyncore
from libsnmp import debug
from libsnmp import asynrole
from libsnmp.rfc1157 import *
log = logging.getLogger('v1.SNMP')
log.setLevel(logging.INFO)
class SNMP(asynrole.manager):
nextRequestID = 0L # global counter of requestIDs
def __init__(self, interface=('0.0.0.0', 0), queueEmpty=None, trapCallback=None, timeout=0.25):
""" Create a new SNMPv1 object bound to localaddr
where localaddr is an address tuple of the form
('server', port)
queueEmpty is a callback of what to do if I run out
of stuff to do. Default is to wait for more stuff.
"""
self.queueEmpty = queueEmpty
self.outbound = Queue.Queue()
self.callbacks = {}
# What to do if we get a trap
self.trapCallback = trapCallback
# initialise as an asynrole manager
asynrole.manager.__init__(self, (self.receiveData, None), interface=interface, timeout=timeout )
try:
# figure out the current system uptime
pass
except:
raise
def assignRequestID(self):
""" Assign a unique requestID
"""
reqID = self.nextRequestID
self.nextRequestID += 1
return reqID
def createGetRequestPDU(self, varbindlist):
reqID = self.assignRequestID()
pdu = Get( reqID, varBindList=varbindlist )
return pdu
def createGetNextRequestPDU(self, varbindlist):
reqID = self.assignRequestID()
pdu = GetNext( reqID, varBindList=varbindlist )
return pdu
def createGetRequestMessage(self, oid, community='public'):
""" Creates a message object from a pdu and a
community string.
"""
objID = ObjectID(oid)
val = Null()
varbindlist = VarBindList( [ VarBind(objID, val) ] )
pdu = self.createGetRequestPDU( varbindlist )
return Message( community=community, data=pdu )
def createGetNextRequestMessage(self, varbindlist, community='public'):
""" Creates a message object from a pdu and a
community string.
"""
pdu = self.createGetNextRequestPDU( varbindlist )
return Message( community=community, data=pdu )
def createTrapMessage(self, pdu, community='public'):
""" Creates a message object from a pdu and a
community string.
"""
return Message( community=community, data=pdu )
def createTrapPDU(self, varbindlist, enterprise='.1.3.6.1.4', agentAddr=None, genericTrap=6, specificTrap=0):
""" Creates a Trap PDU object from a list of strings and integers
along with a varBindList to make it a bit easier to build a Trap.
"""
ent = ObjectID(enterprise)
if not agentAddr:
agentAddr = self.getsockname()[0]
agent = NetworkAddress(agentAddr)
gTrap = GenericTrap(genericTrap)
sTrap = Integer(specificTrap)
ts = TimeTicks( self.getSysUptime() )
pdu = TrapPDU(ent, agent, gTrap, sTrap, ts, varbindlist)
# log.debug('v1.trap is %s' % pdu)
return pdu
def snmpGet(self, oid, remote, callback, community='public'):
""" snmpGet issues an SNMP Get Request to remote for
the object ID oid
remote is a tuple of (host, port)
oid is a dotted string eg: .1.2.6.1.0.1.1.3.0
"""
msg = self.createGetRequestMessage( oid, community )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[int(msg.data.requestID)] = callback
return msg.data.requestID
def snmpGetNext(self, varbindlist, remote, callback, community='public'):
""" snmpGetNext issues an SNMP Get Next Request to remote for
the varbindlist that is passed in. It is assumed that you
have either built a varbindlist yourself or just pass
one in that was previously returned by an snmpGet or snmpGetNext
"""
msg = self.createGetNextRequestMessage( varbindlist, community )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[int(msg.data.requestID)] = callback
return msg.data.requestID
def snmpSet(self, varbindlist, remote, callback, community='public'):
""" An snmpSet requires a bit more up front smarts, in that
you need to pass in a varbindlist of matching OIDs and
values so that the value type matches that expected for the
OID. This library does not care about any of that stuff.
"""
reqID = self.assignRequestID()
pdu = GetNext( reqID, varBindList=varbindlist )
msg = Message( community=community, data=pdu )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[int(msg.data.requestID)] = callback
return msg.data.requestID
def snmpTrap(self, remote, trapPDU, community='public'):
""" Queue up a trap for sending
"""
msg = self.createTrapMessage(trapPDU, community)
self.outbound.put( (msg, remote) )
def createSetRequestMessage(self, varBindList, community='public'):
""" Creates a message object from a pdu and a
community string.
"""
def receiveData(self, manager, cb_ctx, (data, src), (exc_type, exc_value, exc_traceback) ):
""" This method should be called when data is received
from a remote host.
"""
# Exception handling
if exc_type is not None:
raise exc_type(exc_value)
# perform the action on the message by calling the
# callback from my list of callbacks, passing it the
# message and a reference to myself
try:
# Decode the data into a message
msg = Message().decode(data)
# Decode it based on what version of message it is
if msg.version == 0:
if __debug__: log.debug('Detected SNMPv1 message')
else:
log.error('Unknown message version %d detected' % msg.version)
log.error('version is a %s' % msg.version() )
raise ValueError('Unknown message version %d detected' % msg.version)
# Figure out what kind of PDU the message contains
if isinstance(msg.data, PDU):
# if __debug__: log.debug('response to requestID: %d' % msg.data.requestID)
self.callbacks[int(msg.data.requestID)](self, msg)
# remove the callback from my list once it's done
del self.callbacks[int(msg.data.requestID)]
elif isinstance(msg.data, TrapPDU):
if __debug__: log.debug('Detected an inbound Trap')
self.trapCallback(self, msg)
else:
if __debug__: log.debug('Unknown message type')
# log any errors in callback
except Exception, e:
# log.error('Exception in callback: %s: %s' % (self.callbacks[int(msg.data.requestID)].__name__, e) )
log.error('Exception in receiveData: %s' % e )
raise
def enterpriseOID(self, partialOID):
""" A convenience method to automagically prepend the
'enterprise' prefix to the partial OID
"""
return '.1.3.6.1.2.1.' + partialOID
def run(self):
""" Listen for incoming request thingies
and send pending requests
"""
while True:
try:
# send any pending outbound messages
request = self.outbound.get(0)
self.send( request[0].encode(), request[1] )
except Queue.Empty:
if self.queueEmpty is not None:
self.queueEmpty(self)
pass
# check for inbound messages
self.poll()
time.sleep(0.1)
def getSysUptime(self):
""" This is a pain because of system dependence
Each OS has a different way of doing this and I
cannot find a Python builtin that will do it.
"""
try:
##
## The linux way
##
uptime = open('/proc/uptime').read().split()
upsecs = int(float(uptime[0]) * 100)
return upsecs
except:
return 0
| {
"repo_name": "jpwarren/libsnmp",
"path": "lib/libsnmp/v1.py",
"copies": "1",
"size": "9076",
"license": "mit",
"hash": -7805561379218646000,
"line_mean": 34.3151750973,
"line_max": 113,
"alpha_frac": 0.599052446,
"autogenerated": false,
"ratio": 4.186346863468635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004483415259365735,
"num_lines": 257
} |
import logging
import debug
import rfc1157
from rfc1902 import *
log = logging.getLogger('rfc1905')
asnTagNumbers['GetBulk'] = 0x05
asnTagNumbers['Inform'] = 0x06
asnTagNumbers['TrapV2'] = 0x07
asnTagNumbers['Report'] = 0x08
max_bindings = 2147483647L
class VarBind(rfc1157.VarBind):
""" VarBind redefined here to place it in the same namespace
"""
pass
## We need to add some special types because ucd-snmp uses
## context specific values for the CHOICE within a VarBind
class NoSuchObject(rfc1157.Null):
def __str__(self):
return('No Such Object')
class NoSuchInstance(rfc1157.Null):
def __str__(self):
return('No Such Instance')
class EndOfMibView(rfc1157.Null):
def __str__(self):
return('EndOfMibView')
class VarBindList(rfc1157.VarBindList):
""" An SNMPv2 VarBindList has a maximum size of max_bindings
"""
def __init__(self, value=[]):
if len(value) > max_bindings:
raise ValueError('A VarBindList must be shorter than %d' % max_bindings)
rfc1157.VarBindList.__init__(self, value)
class Message(rfc1157.Message):
def __init__(self, version=1, community='public', data=None):
rfc1157.Message.__init__(self, version, community, data)
class ErrorStatus(rfc1157.ErrorStatus):
""" An SNMPv2 Error status
"""
def __init__(self, value):
rfc1157.ErrorStatus.__init__(self, value)
# add to the SNMPv1 error strings
self.errString[6] = 'Access is not permitted'
self.errString[7] = 'Type is incorrect'
self.errString[8] = 'Length is incorrect'
self.errString[9] = 'Encoding is incorrect'
self.errString[10] = 'Value is incorrect'
self.errString[11] = 'No creation'
self.errString[12] = 'Value is inconsistent'
self.errString[13] = 'Resourse Unavailable'
self.errString[14] = 'Commit Failed'
self.errString[15] = 'Undo Failed'
self.errString[16] = 'Authorization Error'
self.errString[17] = 'Not Writable'
self.errString[18] = 'Inconsistent Name'
self.errNum[6] = 'noAccess'
self.errNum[7] = 'wrongType'
self.errNum[8] = 'wrongLength'
self.errNum[9] = 'wrongEncoding'
self.errNum[10] = 'wrongValue'
self.errNum[11] = 'noCreation'
self.errNum[12] = 'inconsistentValue'
self.errNum[13] = 'resourceUnavailable'
self.errNum[14] = 'commitFailed'
self.errNum[15] = 'undoFailed'
self.errNum[16] = 'authorizationError'
self.errNum[17] = 'notWritable'
self.errNum[18] = 'inconsistentName'
class PDU(rfc1157.PDU):
""" SNMPv2 PDUs are very similar to SNMPv1 PDUs
"""
asnTagClass = asnTagClasses['CONTEXT']
def __init__(self, requestID=0, errorStatus=0, errorIndex=0, varBindList=[]):
rfc1157.PDU.__init__(self)
if errorIndex > max_bindings:
raise ValueError('errorIndex must be <= %d' % max_bindings)
self.requestID = Integer32(requestID)
self.errorStatus = ErrorStatus(errorStatus)
self.errorIndex = Integer(errorIndex)
self.varBindList = VarBindList(varBindList)
self.value = [
self.requestID,
self.errorStatus,
self.errorIndex,
self.varBindList,
]
# def decodeContents(self, stream):
# """ Decode into a PDU object
# """
# objectList = Sequence.decodeContents(self, stream)
# if len(self.value) != 4:
# raise PDUError('Malformed PDU: Incorrect length %d' % len(self.value) )
#
# # Build things with the correct types
# for item in objectList[3]:
# myVarList.append( VarBind(item[0], item[1]) )
#
# return self.__class__( int(objectList[0]), int(objectList[1]), int(objectList[2]), myVarList)
class BulkPDU(Sequence):
""" BulkPDU is a new type of PDU specifically for doing GetBulk
requests in SNMPv2.
"""
asnTagClass = asnTagClasses['CONTEXT']
def __init__(self, requestID=0, nonRepeaters=0, maxRepetitions=0, varBindList=[]):
Sequence.__init__(self)
if nonRepeaters > max_bindings:
raise ValueError('nonRepeaters must be <= %d' % max_bindings)
if maxRepetitions > max_bindings:
raise ValueError('nonRepeaters must be <= %d' % max_bindings)
self.requestID = Integer32(requestID)
self.nonRepeaters = Integer(nonRepeaters)
self.maxRepetitions = Integer(maxRepetitions)
self.varBindList = VarBindList(varBindList)
self.value = [
self.requestID,
self.nonRepeaters,
self.maxRepetitions,
self.varBindList
]
def decodeContents(self, stream):
""" Decode into a BulkPDU object
"""
objectList = Sequence.decodeContents(self, stream)
if len(self.value) != 4:
raise PDUError('Malformed BulkPDU: Incorrect length %d' % len(self.value) )
# Build things with the correct types
for item in objectList[3]:
myVarList.append( VarBind(item[0], item[1]) )
return self.__class__( int(objectList[0]), int(objectList[1]), int(objectList[2]), myVarList)
class Get(PDU):
""" An SNMPv2 Get Request PDU
"""
asnTagNumber = asnTagNumbers['Get']
class GetNext(PDU):
""" An SNMPv2 Get Next Request PDU
"""
asnTagNumber = asnTagNumbers['GetNext']
class Response(PDU):
""" An SNMPv2 Response PDU
"""
asnTagNumber = asnTagNumbers['Response']
class Set(PDU):
""" An SNMPv2 Set Request PDU
"""
asnTagNumber = asnTagNumbers['Set']
class GetBulk(BulkPDU):
""" An SNMPv2 Get Next Request PDU
"""
asnTagNumber = asnTagNumbers['GetBulk']
class Inform(PDU):
""" An SNMPv2 Get Next Request PDU
"""
asnTagNumber = asnTagNumbers['Inform']
class TrapV2(PDU):
""" An SNMPv2 Trap PDU
"""
asnTagNumber = asnTagNumbers['TrapV2']
class Report(PDU):
""" An SNMPv2 Report PDU
"""
asnTagNumber = asnTagNumbers['Report']
class PDUError(Exception):
def __init__(self, args=None):
self.args = args
## Add some new decode types
tagDecodeDict[0xa2] = Response
tagDecodeDict[0xa5] = GetBulk
tagDecodeDict[0xa6] = Inform
tagDecodeDict[0xa7] = TrapV2
tagDecodeDict[0xa8] = Report
## ucd-snmp returns context-specific values at time
tagDecodeDict[0x80] = NoSuchObject
tagDecodeDict[0x81] = NoSuchInstance
tagDecodeDict[0x82] = EndOfMibView
| {
"repo_name": "jpwarren/libsnmp",
"path": "lib/libsnmp/rfc1905.py",
"copies": "1",
"size": "6667",
"license": "mit",
"hash": 1303991376097038300,
"line_mean": 28.7633928571,
"line_max": 102,
"alpha_frac": 0.6341682916,
"autogenerated": false,
"ratio": 3.418974358974359,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45531426505743594,
"avg_score": null,
"num_lines": null
} |
#import socket
#import select
import logging
#import Queue
#import time
#import os
#import asyncore
import traceback
from libsnmp import debug
from libsnmp import rfc1155
from libsnmp import rfc1157
from libsnmp import rfc1902
from libsnmp import rfc1905
#from libsnmp import asynrole
from libsnmp import v1
log = logging.getLogger('v2.SNMP')
log.setLevel(logging.INFO)
class SNMP(v1.SNMP):
def createGetRequestPDU(self, varbindlist):
reqID = self.assignRequestID()
pdu = rfc1905.Get( reqID, varBindList=varbindlist )
return pdu
def createGetNextRequestPDU(self, varbindlist):
reqID = self.assignRequestID()
pdu = rfc1905.GetNext( reqID, varBindList=varbindlist )
return pdu
def createGetRequestMessage(self, oidlist, community='public'):
"""
Creates a message object from a pdu and a
community string.
@param oidlist: a list of oids to place in the message.
"""
varbinds = []
for oid in oidlist:
objID = rfc1155.ObjectID(oid)
val = rfc1155.Null()
varbinds.append( rfc1157.VarBind(objID, val) )
pass
varbindlist = rfc1905.VarBindList( varbinds )
pdu = self.createGetRequestPDU( varbindlist )
return rfc1905.Message( community=community, data=pdu )
def createGetNextRequestMessage(self, varbindlist, community='public'):
""" Creates a message object from a pdu and a
community string.
"""
pdu = self.createGetNextRequest( varbindlist )
return rfc1905.Message( community=community, data=pdu )
def createTrapMessage(self, pdu, community='public'):
""" Creates a message object from a pdu and a
community string.
"""
return rfc1905.Message( community=community, data=pdu )
def createTrap(self, varbindlist, enterprise='.1.3.6.1.4', agentAddr=None, genericTrap=6, specificTrap=0):
""" Creates a Trap PDU object from a list of strings and integers
along with a varBindList to make it a bit easier to build a Trap.
"""
ent = rfc1155.ObjectID(enterprise)
if not agentAddr:
agentAddr = self.getsockname()[0]
agent = rfc1155.NetworkAddress(agentAddr)
gTrap = rfc1157.GenericTrap(genericTrap)
sTrap = rfc1155.Integer(specificTrap)
ts = rfc1155.TimeTicks( self.getSysUptime() )
pdu = rfc1157.TrapPDU(ent, agent, gTrap, sTrap, ts, varbindlist)
return pdu
def snmpGet(self, oid, remote, callback, community='public'):
""" snmpGet issues an SNMP Get Request to remote for
the object ID oid
remote is a tuple of (host, port)
oid is a dotted string eg: .1.2.6.1.0.1.1.3.0
"""
msg = self.createGetRequestMessage( oid, community )
# log.debug('sending message: %s' % msg)
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[int(msg.data.requestID)] = callback
return msg.data.requestID
def snmpGetNext(self, varbindlist, remote, callback, community='public'):
""" snmpGetNext issues an SNMP Get Next Request to remote for
the varbindlist that is passed in. It is assumed that you
have either built a varbindlist yourself or just pass
one in that was previously returned by an snmpGet or snmpGetNext
"""
msg = self.createGetNextRequestMessage( varbindlist, community )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[int(msg.data.requestID)] = callback
return msg.data.requestID
def snmpSet(self, varbindlist, remote, callback, community='public'):
""" An snmpSet requires a bit more up front smarts, in that
you need to pass in a varbindlist of matching OIDs and
values so that the value type matches that expected for the
OID. This library does not care about any of that stuff.
"""
reqID = self.assignRequestID()
pdu = rfc1157.GetNextRequestPDU( reqID, varBindList=varbindlist )
msg = rfc1905.Message( community=community, data=pdu )
# add this message to the outbound queue as a tuple
self.outbound.put( (msg, remote) )
# Add the callback to my dictionary with the requestID
# as the key for later retrieval
self.callbacks[int(msg.data.requestID)] = callback
return msg.data.requestID
def snmpTrap(self, remote, trapPDU, community='public'):
""" Queue up a trap for sending
"""
msg = self.createTrapMessage(trapPDU, community)
self.outbound.put( (msg, remote) )
def createSetRequestMessage(self, varBindList, community='public'):
""" Creates a message object from a pdu and a
community string.
"""
def receiveData(self, manager, cb_ctx, (data, src), (exc_type, exc_value, exc_traceback) ):
""" This method should be called when data is received
from a remote host.
"""
# Exception handling
if exc_type is not None:
raise exc_type(exc_value)
# perform the action on the message by calling the
# callback from my list of callbacks, passing it the
# message and a reference to myself
try:
# Decode the data into a message
msg = rfc1905.Message().decode(data)
# Decode it based on what version of message it is
if msg.version == 0:
if __debug__: log.debug('Detected SNMPv1 message')
elif msg.version == 1:
if __debug__: log.debug('Detected SNMPv2 message')
else:
log.error('Unknown message version %d detected' % msg.version)
log.error('version is a %s' % msg.version() )
raise ValueError('Unknown message version %d detected' % msg.version)
# Figure out what kind of PDU the message contains
if isinstance(msg.data, rfc1157.PDU):
# if __debug__: log.debug('response to requestID: %d' % msg.data.requestID)
self.callbacks[int(msg.data.requestID)](self, msg)
# remove the callback from my list once it's done
del self.callbacks[int(msg.data.requestID)]
elif isinstance(msg.data, rfc1157.TrapPDU):
if __debug__: log.debug('Detected an inbound Trap')
self.trapCallback(self, msg)
else:
log.debug('Unknown message type')
# log any errors in callback
except Exception, e:
# log.error('Exception in callback: %s: %s' % (self.callbacks[int(msg.data.requestID)].__name__, e) )
log.error('Exception in receiveData: %s' % e )
traceback.print_exc()
#raise
| {
"repo_name": "jpwarren/libsnmp",
"path": "lib/libsnmp/v2.py",
"copies": "1",
"size": "7369",
"license": "mit",
"hash": -2463921900430932500,
"line_mean": 36.5969387755,
"line_max": 112,
"alpha_frac": 0.6250508889,
"autogenerated": false,
"ratio": 3.9983722192078135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5123423108107814,
"avg_score": null,
"num_lines": null
} |
import util
import debug
import logging
import types
from rfc1155 import *
log = logging.getLogger('rfc1902')
## change logging level.. options of:
##
## logging.CRITICAL
## logging.ERROR
## logging.WARN
## logging.INFO
## logging.DEBUG
log.setLevel(logging.INFO)
# Add a new TagNumber for encoding purposes
asnTagNumbers['Counter64'] = 0x06
class Integer32(Integer):
""" A 32 bit integer
"""
MINVAL = -2147483648L
MAXVAL = 2147483648L
class Counter32(Counter):
""" A 32 bit counter
"""
pass
class Guage32(Guage):
""" A 32 bit Guage
"""
pass
class Counter64(Counter):
""" A 64 bit counter
"""
MINVAL = 0L
MAXVAL = 18446744073709551615L
asnTagClass = asnTagNumbers['Counter64']
class OctetString(OctetString):
""" An SNMP v2 OctetString must be between
0 and 65535 bytes in length
"""
def __init__(self, value=''):
if len(value) > 65535:
raise ValueError('OctetString must be shorter than 65535 bytes')
OctetString.__init__(self, value)
## Modify tag decode lookup table to use SNMPv2 classes
## instead of the old SNMPv1 classes. Little actual difference
## apart from the class names.
tagDecodeDict[0x02] = Integer32
tagDecodeDict[0x41] = Counter32
tagDecodeDict[0x42] = Guage32
tagDecodeDict[0x46] = Counter64
| {
"repo_name": "jpwarren/libsnmp",
"path": "lib/libsnmp/rfc1902.py",
"copies": "1",
"size": "1489",
"license": "mit",
"hash": -7154582475362335000,
"line_mean": 19.6805555556,
"line_max": 76,
"alpha_frac": 0.678307589,
"autogenerated": false,
"ratio": 3.3917995444191344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9527864756170339,
"avg_score": 0.008448475449759197,
"num_lines": 72
} |
# I've included here all the basic SNMPv1 types, since they are used
# by SNMPv2 and v3.
import util
import debug
import logging
import types
import copy
log = logging.getLogger('Asn1Object')
## change logging level.. options of:
##
## logging.CRITICAL
## logging.ERROR
## logging.WARN
## logging.INFO
## logging.DEBUG
##
log.setLevel(logging.INFO)
asnTagClasses = {
'UNIVERSAL': 0x00,
'APPLICATION': 0x40,
'CONTEXT': 0x80,
'PRIVATE': 0xC0
}
asnTagFormats = {
'PRIMITIVE': 0x00,
'CONSTRUCTED': 0x20
}
asnTagNumbers = {
'Integer': 0x02,
'OctetString': 0x04,
'Null': 0x05,
'ObjectID': 0x06,
'Sequence': 0x10,
# Application types
'IPAddress': 0x00,
'Counter': 0x01,
'Guage': 0x02,
'TimeTicks': 0x03,
'Opaque': 0x04,
}
class Asn1Object:
"""Base class for all Asn1Objects This is only intended to
support a specific subset of ASN1 stuff as defined by the RFCs to
keep things as simple as possible."""
##
## The asnTag is a number used with BER to encode/decode the
## object.
##
asnTagClass = asnTagClasses['UNIVERSAL']
asnTagFormat = asnTagFormats['PRIMITIVE']
asnTagNumber = None
value = None
def __init__(self):
return
def encode(self):
""" encode() this Asn1Object using BER"""
contents = self.encodeContents()
resultlist = []
resultlist.append(self.encodeIdentifier())
resultlist.append(self.encodeLength(len(contents)))
resultlist.append(contents)
result = ''.join(resultlist)
return result
##
##
def decodeTag(self, stream):
"""Decode a BER tag field, returning the tag and the remainder
of the stream"""
tag = ord(stream[0])
n = 1
if tag & 0x1F == 0x1F:
## A large tag is encoded using concatenated 7-bit values
## over the following octets, ignoring the initial 5 bits
## in the first octet. The 8th bit represents a
## follow-on.
tag = 0
while 1:
byte = ord(stream[n])
tag = (tag << 7) | (byte & 0x7F)
n += 1
if not byte & 0x80: break
pass
pass
return tag, stream[n:]
##
##
def decodeLength(self, stream):
"""Decode a BER length field, returing the length and the
remainder of the stream"""
length = ord(stream[0])
n = 1
if length & 0x80:
## Multi-Octet length encoding. The first octet
## represents the run-length (the number of octets used to
## build the length)
run = length & 0x7F
length = 0
for i in xrange(run):
length = (length << 8) | ord(stream[n])
n += 1
pass
pass
return length, stream[n:]
##
##
def decode(self, stream):
"""decode() an octet stream into a sequence of Asn1Objects
This method should be overridden by subclasses to define how
to decode one of themselves from a fixed length stream. This
general case method looks at the identifier at the beginning
of a stream of octets and uses the appropriate decode() method
of that known object. Attempts to decode() an unknown object
type result in an error. """
if type(stream) != types.StringType:
raise TypeError('stream should be of type StringType, not %s' % type(stream) )
objects = []
while len(stream) > 0:
(tag, stream) = self.decodeTag(stream)
(length, stream) = self.decodeLength(stream)
objectData = stream[:length]
stream = stream[length:]
try:
decoder = tagDecodeDict[tag]()
except KeyError:
raise ValueError('Unknown ASN.1 Type %d' % (tag) )
objects.append( decoder.decodeContents(objectData) )
pass
return objects
def encodeContents(self):
"""encodeContents should be overridden by subclasses to encode
the contents of a particular type"""
raise NotImplementedError
def encodeIdentifier(self):
"""encodeIdentifier() returns encoded identifier octets for
this object. Section 6.3 of ITU-T-X.209 """
if self.asnTagNumber < 0x1F:
result = chr(self.asnTagClass | self.asnTagFormat | self.asnTagNumber)
else:
## Encode each number of the asnTagNumber from 31 upwards
## as a sequence of 7-bit numbers with bit 8 set to 1 for
## all but the last octet. Bit 8 set to 0 signifies the
## last octet of the Identifier octets
# encode the first octet
resultlist = []
resultlist.append(chr(self.asnTagClass | self.asnTagFormat | 0x1F))
# encode each subsequent octet
integer = self.asnTagNumber
while integer != -1:
resultlist.append(chr(integer & 0xFF))
integer = integer >> 8
pass
result = ''.join(resultlist)
pass
return result
def encodeLength(self, length):
"""encodeLength() takes the length of the contents and
produces the encoding for that length. Section 6.3 of
ITU-T-X.209 """
if length < 127:
result = chr( length & 0xff )
pass
else:
# Long form - Octet one is the number of octets used to
# encode the length It has bit 8 set to 1 and the
# remaining 7 bits are used to encode the number of octets
# used to encode the length Each subsequent octet uses all
# 8 bits to encode the length
if __debug__: log.debug('Long length encoding required for length of %d' % length)
resultlist = []
numOctets = 0
while length > 0:
resultlist.insert(0, chr(length & 0xff))
length = length >> 8
numOctets += 1
pass
# Add a 1 to the front of the octet
if __debug__: log.debug('long length encoding of: %d octets' % numOctets)
numOctets = numOctets | 0x80
resultlist.insert(0, chr(numOctets & 0xff))
result = ''.join(resultlist)
pass
return result
def encodeEndOfContents(self):
return '\000\000'
##
##
def __eq__(self, other):
"""
Compare two instance by comparison of their value fields
only.
"""
return isinstance(other, self.__class__) and self.value == other.value
# return self.value == other.value
##
##
def __ne__(self, other):
"""Compare two objects for inequality"""
return not (self == other)
##
##
def toObjectID(self):
raise TypeError
pass
class Integer(Asn1Object):
"""An ASN.1 Integer type"""
asnTagClass = asnTagClasses['UNIVERSAL']
asnTagNumber = asnTagNumbers['Integer']
MINVAL = -2147483648L
MAXVAL = 2147483647L
def __init__(self, value=0L):
Asn1Object.__init__(self)
if not self.MINVAL <= value <= self.MAXVAL:
if __debug__: log.debug('minval: %d' % self.MINVAL)
if __debug__: log.debug('maxval: %d' % self.MAXVAL)
raise ValueError('Integer value of %d is out of bounds' % value)
self.value = value
return
def __str__(self):
return '%d' % self.value
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __long__(self):
return self.value
def __hex__(self):
return hex(self.value)
def __oct__(self):
return oct(self.value)
def __call__(self):
""" Return the value of the Integer when referring to it directly
"""
return self.value
# Define some handy arithmetic operations
def __eq__(self, other):
try:
if self.value == long(other):
return True
except:
raise
return False
def __add__(self, integer):
""" Add a value
"""
if not isinstance(integer, self.__class__):
integer = self.__class__(integer)
return self.__class__(self.value + integer.value)
def __sub__(self, integer):
if not isinstance(value, self.__class__):
value = self.__class__(value)
return self.__class__(self.value + integer.value)
def __hash__(self):
""" Standard Python integers are easy to hash
so we just do the same thing.
"""
return self.value.__hash__()
def encodeContents(self):
## We handle two special cases otherwise we handle positive
## and negative numbers independently
integer = self.value
if integer == 0:
return '\000'
elif integer == -1:
return '\377'
elif integer > 0:
result = []
while integer != 0:
result.insert(0, integer & 0xff)
integer >>= 8
pass
if result[0] & 0x80:
result.insert(0, 0)
pass
return ''.join(map(chr, result))
else:
result = []
while integer != -1:
result.insert(0, integer & 0xff)
integer >>= 8
pass
if result[0] & 0x80 != 0x80:
result.insert(0, 0)
pass
return ''.join(map(chr, result))
pass
def decodeContents(self, stream):
""" Decode some input octet stream into a signed ASN.1 integer
"""
##
## This method wins because it's consistently the fastest
##
input = map(ord, stream)
if __debug__: log.debug('Decoding %s' % util.octetsToHex(stream) )
self.value = 0L
byte = input[0]
if (byte & 0x80) == 0x80:
negbit = 0x80L
self.value = byte & 0x7f
for i in xrange(1, len(input)):
negbit <<= 8
self.value = (self.value << 8) | input[i]
pass
self.value = self.value - negbit
else:
self.value = long(byte)
for i in xrange(1,len(input)):
self.value = (self.value << 8) | input[i]
pass
pass
if __debug__: log.debug('decoded as: %d' % self.value)
return self
def decodeTwosInteger1(self, stream):
""" One algorithm for decoding twos complement Integers """
##
## Original pysnmp algorithm
##
bytes = map(ord, stream)
if bytes[0] & 0x80:
bytes.insert(0, -1L)
pass
result = reduce(lambda x,y: x<<8 | y, bytes, 0L)
return result
def decodeTwosInteger2(self, stream):
"""A second algorithm for decoding twos complement Integers
Coded from scratch by jpw """
val = 0
byte = ord(stream[0])
if (byte & 0x80) == 0x80:
negbit = 0x80L
val = byte & 0x7f
for i in range(len(stream)-1):
byte = ord(stream[i+1])
negbit <<= 8
val = (val << 8) | byte
pass
val = val - negbit
else:
val = byte
for i in range(len(stream)-1):
byte = ord(stream[i+1])
val = (val<<8) | byte
pass
pass
return val
def decodeTwosInteger3(self, stream):
""" A third algorithm for decoding twos complement Integers
Coded from scratch by jpw """
val = 0
bytes = map(ord, stream)
if bytes[0] & 0x80:
bytes[0] = bytes[0] & 0x7f # invert bit 8
negbit = 0x80L
for i in bytes:
negbit <<= 8
val = (val << 8) | i
pass
val = val - (negbit >> 8)
else:
for i in bytes:
val = (val << 8) | i
pass
pass
return val
##
##
def toObjectID(self):
return ObjectID([self.value])
pass
class OctetString(Asn1Object):
"""An ASN.1 Octet String type"""
asnTagClass = asnTagClasses['UNIVERSAL']
asnTagNumber = asnTagNumbers['OctetString']
def __init__(self, value=''):
Asn1Object.__init__(self)
self.value = copy.copy(value)
return
def __str__(self):
return self.value
def encodeContents(self):
"""An OctetString is already encoded. Whee!"""
return self.value
def decodeContents(self, stream):
"""An OctetString is already decoded. Whee! """
self.value = stream
return self
def __hex__(self):
return ''.join( [ '%.2X' % ord(x) for x in self.value ] )
def __oct__(self):
return ''.join( [ '%3o' % ord(x) for x in self.value ] )
def toObjectID(self):
return ObjectID([ ord(x) for x in self.value])
pass
class ObjectID(Asn1Object):
"""An ASN.1 Object Identifier type """
asnTagClass = asnTagClasses['UNIVERSAL']
asnTagFormat = asnTagFormats['PRIMITIVE']
asnTagNumber = asnTagNumbers['ObjectID']
def __init__(self, value=None):
"""Create an ObjectID - value is a list of subids as a string
or list"""
Asn1Object.__init__(self)
if type(value) == types.StringType:
value = value.strip('.')
subidlist = value.split('.')
self.value = []
for subid in subidlist:
number = int(subid)
if number < 0 or number > 0x7FFFFFFF:
raise ValueError("SubID out of range")
self.value.append(number)
pass
pass
elif type(value) == types.ListType or type(value) == types.NoneType:
self.value = copy.copy(value)
elif type(value) == types.TupleType:
self.value = list(value)
elif type(value) == types.IntType:
self.value = [value]
elif isinstance(value, ObjectID):
self.value = value.value[:]
else:
raise TypeError('unknown type passed as OID')
return
def __str__(self):
if self.value is not None:
# Prepend a leading '.' to the OID string
return '.' + '.'.join( [str(x) for x in self.value] )
else:
return ''
pass
def __len__(self):
"""Return the length of the value field"""
if self.value is None:
return 0
else:
return len(self.value)
pass
def __getitem__(self, key):
if isinstance(key, int):
return self.value.__getitem__(key)
else:
return ObjectID(self.value.__getitem__(key))
pass
def __delitem__(self, key):
self.value.__delitem__(key)
return
def copy(self):
"""
Return a copy of this object as a new object
"""
return ObjectID(self.value)
def append(self, subid):
if type(subid) == types.IntType:
self.value.append(subid)
else:
raise TypeError
return
def extend(self, other):
if isinstance(other, self.__class__):
self.value.extend(other.value)
else:
self.value.extend(other)
pass
return None
def isPrefixOf(self, other):
"""
Compares this ObjectID with another ObjectID and returns
non-None if this ObjectID is a prefix of the other one.
"""
if not isinstance(other, self.__class__):
raise TypeError('Attempt to compare ObjectID with non-ObjectID: %s' % other.__repr__() )
if len(other) < len(self):
return False
for i in range(len(self)):
if self.value[i] != other.value[i]:
return False
pass
return True
def encodeContents(self):
"""encode() an objectID into an octet stream """
result = []
idlist = self.value[:]
# Do the bit with the first 2 subids
# section 22.4 of X.209
idlist.reverse()
subid1 = (idlist.pop() * 40) + idlist.pop()
idlist.reverse()
idlist.insert(0, subid1)
for subid in idlist:
if subid < 128:
result.append(chr(subid & 0x7f))
else:
position = len(result)
result.append(chr(subid & 0x7f))
subid = subid >> 7
while subid > 0:
result.insert(position, chr(0x80 | (subid & 0x7f)))
subid = subid >> 7
pass
pass
pass
return ''.join(result)
##
##
def decodeContents(self, stream):
"""decode() a stream into an ObjectID()"""
self.value = []
bytes = map(ord, stream)
if len(stream) == 0:
raise ValueError('stream of zero length in %s' % self.__class__.__name__)
##
## Do the funky decode of the first octet
##
if bytes[0] < 128:
self.value.append( int(bytes[0] / 40) )
self.value.append( int(bytes[0] % 40) )
else:
## I haven't bothered putting in the convoluted logic here
## because the highest likely assignment for the first
## octet is 83 according to Annex B of X.208 Those X.209
## does give as an example 2.100.3, which is kinda stupid.
## Actually, a lot of the space-saving encodings, like
## this first octet, are a real PITA later on. So yeah,
## stuff it, we'll just raise an exception.
raise NotImplementedError('First octet is > 128! Unsupported oid detected')
##
## Decode the rest of the octets
##
n = 1
while n < len(bytes):
subid = bytes[n]
n += 1
##
## If bit 8 is not set, this is the last octet of this subid
## If bit 8 is set, the subid spans this octet and the ones
## afterwards, up until bit 8 isn't set.
##
if subid & 0x80 == 0x80:
val = subid & 0x7f
while (subid & 0x80) == 0x80:
subid = bytes[n]
n += 1
val = (val << 7) | (subid & 0x7f)
pass
self.value.append(val)
else:
self.value.append(subid)
pass
pass
return self
def toObjectID(self):
return ObjectID(copy.copy(self.value))
pass
class Null(Asn1Object):
"""An ASN.1 Object Identifier type"""
asnTagClass = asnTagClasses['UNIVERSAL']
asnTagFormat = asnTagFormats['PRIMITIVE']
asnTagNumber = asnTagNumbers['Null']
def __str__(self):
return '<Null>'
def encodeContents(self):
return ''
def decodeContents(self, stream):
if len(stream) != 0:
raise ValueError('Input stream too long for %s' % self.__class__.__name__)
return self
pass
##
##
class Sequence(Asn1Object):
"""A Sequence is basically a list of name, value pairs with the
name being an object Type and the value being an instance of an
Asn1Object of that Type."""
asnTagClass = asnTagClasses['UNIVERSAL']
asnTagFormat = asnTagFormats['CONSTRUCTED']
asnTagNumber = asnTagNumbers['Sequence']
value = []
def __init__(self, value=[]):
Asn1Object.__init__(self)
self.value = value
return
def __str__(self):
result = '['
res = []
for item in self.value:
res.append( '%s' % item )
pass
result += ', '.join(res)
result += ']'
return result
def __len__(self):
return len(self.value)
def __getitem__(self, index):
return self.value[index]
## We want to implement some usual sequence stuff for this type
## such as slices, etc.
def append(self, val):
self.value.append(val)
def encodeContents(self):
""" To encode a Sequence, we simply encode() each sub-object
in turn."""
if __debug__: log.debug('Encoding sequence contents...')
resultlist = []
for elem in self.value:
resultlist.append(elem.encode())
pass
result = ''.join(resultlist)
return result
def decodeContents(self, stream):
"""decode a sequence of objects"""
objectList = self.decode(stream)
self.value = objectList
#return Sequence(objectList)
return self
pass
class SequenceOf(Sequence):
"""A SequenceOf is a special kind of sequence that places a
constraint on the kind of objects it can contain. It is variable
in length."""
asnTagClass = asnTagClasses['UNIVERSAL']
asnTagFormat = asnTagFormats['CONSTRUCTED']
asnTagNumber = asnTagNumbers['Sequence']
def __init__(self, componentType=Asn1Object, value=None):
Sequence.__init__(self)
self.componentType = componentType
## Add each item in the list to ourselves, which automatically
## checks each one to ensure it is of the correct type.
self.value = []
if value:
for item in value:
self.append(item)
pass
pass
return
def append(self, value):
if not isinstance( value, self.componentType ):
raise ValueError('%s: cannot contain components of type: %s' % (self.__class__.__name__, value.__class__.__name__) )
Sequence.append(self, value)
return
pass
class IPAddress(OctetString):
"""An IpAddress is a special type of OctetString. It represents a
32-bit internet address as an OctetString of length 4, in network
byte order. """
asnTagClass = asnTagClasses['APPLICATION']
asnTagFormat = asnTagFormats['PRIMITIVE']
asnTagNumber = asnTagNumbers['IPAddress']
def __init__(self, value=None):
OctetString.__init__(self, value)
if type(value) == types.StringType:
self.value = ''
listform = value.split('.')
if len(listform) != 4:
raise ValueError('IPAddress must be of length 4')
for item in listform:
self.value += chr(int(item))
pass
pass
elif type(value) == types.ListType:
if len(value) != 4:
raise ValueError('IPAddress must be of length 4')
pass
else:
self.value = ''
pass
return
def decodeContents(self, stream):
"""An IPAddress is already decoded. Whee!"""
self.value = stream
return self
def __str__(self):
result = []
for item in self.value:
result.append( '%d' % ord(item) )
pass
return '.'.join(result)
##
##
def toObjectID(self):
return ObjectID( [ ord(x) for x in self.value ] )
pass
class NetworkAddress(IPAddress):
""" A Network Address is a CHOICE with only one possible value:
internet
"""
name = 'internet'
pass
class Counter(Integer):
""" A counter starts at zero and keeps going to a maximum integer
value of 2^32-1 where it wraps back to zero.
"""
asnTagClass = asnTagClasses['APPLICATION']
asnTagFormat = asnTagFormats['PRIMITIVE']
asnTagNumber = asnTagNumbers['Counter']
MINVAL = 0L
MAXVAL = 4294967295L
def __add__(self, val):
""" We only add to a counter, and we check for a wrap
condition.
"""
if self.value + val > self.MAXVAL:
self.value = val - ( self.MAXVAL - self.value )
else:
self.value += val
pass
return
def decodeContents(self, stream):
result = Integer.decodeContents(self, stream)
## Some agents encode Counters incorrectly (hello Solaris) as
## a negative number. I'm assuming most SNMP libraries don't
## notice the problem because the are written in C and cast
## the result to an unsigned int - problem solved (if
## accidentally). This ugly hack on their behalf flips the
## value over to the positive world.
if self.value < 0:
self.value += 0x100000000L
pass
return self
pass
class Guage(Integer):
""" A Guage is a non negative integer. It may increase or
decrease. It latches at a maximum value.
"""
asnTagClass = asnTagClasses['APPLICATION']
asnTagFormat = asnTagFormats['PRIMITIVE']
asnTagNumber = asnTagNumbers['Guage']
MINVAL = 0
MAXVAL = 4294967295L
def __add__(self, val):
"""Add to the Guage, latching at the maximum"""
if self.value + val > MAXVAL:
self.value = MAXVAL
else:
self.value += val
pass
return
def __sub__(self, val):
"""Subtract from the Guage, latching at zerod """
if self.value - val < self.MINVAL:
self.value = self.MINVAL
else:
self.value -= val
pass
return
pass
class TimeTicks(Integer):
""" TimeTicks is the number of hundredths of a second since an
epoch, specified at object creation time
"""
asnTagClass = asnTagClasses['APPLICATION']
asnTagFormat = asnTagFormats['PRIMITIVE']
asnTagNumber = asnTagNumbers['TimeTicks']
MINVAL = 0
MAXVAL = 4294967295L
epoch = None
def __init__(self, value=0, epoch=None):
Integer.__init__(self, value)
if epoch:
self.epoch = epoch
pass
return
pass
def _todo__str__(self):
"""
Format the TimeTicks value into an actual
time/date stamp based on the epoch.
"""
# FIXME: Assumes an epoch of 1 Jan 1970
return ''
class Opaque(OctetString):
"""Opaque is a fun type that allows you to pass arbitrary ASN.1
encoded stuff in an object. The value is some ASN.1 syntax encoded
using BER which this object encodes as an OctetString. We don't
do any decoding of this object because we don't have to, and that
makes this all much quicker. """
pass
class DecodeError(Exception):
def __init__(self, args=None):
self.args = args
return
pass
##
## Lookup table for object decoding
##
tagDecodeDict = {
0x02: Integer,
0x04: OctetString,
0x05: Null,
0x06: ObjectID,
0x30: Sequence,
# Application types
0x40: IPAddress,
0x41: Counter,
0x42: Guage,
0x43: TimeTicks,
0x44: Opaque,
}
| {
"repo_name": "jpwarren/libsnmp",
"path": "lib/libsnmp/rfc1155.py",
"copies": "1",
"size": "29455",
"license": "mit",
"hash": 4102769776658135000,
"line_mean": 25.875,
"line_max": 128,
"alpha_frac": 0.5102359531,
"autogenerated": false,
"ratio": 4.39889486260454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5409130815704539,
"avg_score": null,
"num_lines": null
} |
# $Id$
# $URL$
"""A few things that didn't seem to fit anywhere else."""
import os, os.path
import pwd
import tempfile
import fcntl
import errno
import threading
import subprocess
import logger
PID_FILE = '/var/run/nodemanager.pid'
####################
def get_default_if():
interface = get_if_from_hwaddr(get_hwaddr_from_plnode())
if not interface: interface = "eth0"
return interface
def get_hwaddr_from_plnode():
try:
for line in open("/usr/boot/plnode.txt", 'r').readlines():
if line.startswith("NET_DEVICE"):
return line.split("=")[1].strip().strip('"')
except:
pass
return None
def get_if_from_hwaddr(hwaddr):
import sioc
devs = sioc.gifconf()
for dev in devs:
dev_hwaddr = sioc.gifhwaddr(dev)
if dev_hwaddr == hwaddr: return dev
return None
####################
# daemonizing
def as_daemon_thread(run):
"""Call function <run> with no arguments in its own thread."""
thr = threading.Thread(target=run)
thr.setDaemon(True)
thr.start()
def close_nonstandard_fds():
"""Close all open file descriptors other than 0, 1, and 2."""
_SC_OPEN_MAX = 4
for fd in range(3, os.sysconf(_SC_OPEN_MAX)):
try: os.close(fd)
except OSError: pass # most likely an fd that isn't open
# after http://www.erlenstar.demon.co.uk/unix/faq_2.html
def daemon():
"""Daemonize the current process."""
if os.fork() != 0: os._exit(0)
os.setsid()
if os.fork() != 0: os._exit(0)
os.chdir('/')
os.umask(0022)
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, 0)
# xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull
crashlog = os.open('/var/log/nodemanager.daemon', os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
os.dup2(crashlog, 1)
os.dup2(crashlog, 2)
def fork_as(su, function, *args):
"""fork(), cd / to avoid keeping unused directories open, close all nonstandard file descriptors (to avoid capturing open sockets), fork() again (to avoid zombies) and call <function> with arguments <args> in the grandchild process. If <su> is not None, set our group and user ids appropriately in the child process."""
child_pid = os.fork()
if child_pid == 0:
try:
os.chdir('/')
close_nonstandard_fds()
if su:
pw_ent = pwd.getpwnam(su)
os.setegid(pw_ent[3])
os.seteuid(pw_ent[2])
child_pid = os.fork()
if child_pid == 0: function(*args)
except:
os.seteuid(os.getuid()) # undo su so we can write the log file
os.setegid(os.getgid())
logger.log_exc("tools: fork_as")
os._exit(0)
else: os.waitpid(child_pid, 0)
####################
# manage files
def pid_file():
"""We use a pid file to ensure that only one copy of NM is running at a given time.
If successful, this function will write a pid file containing the pid of the current process.
The return value is the pid of the other running process, or None otherwise."""
other_pid = None
if os.access(PID_FILE, os.F_OK): # check for a pid file
handle = open(PID_FILE) # pid file exists, read it
other_pid = int(handle.read())
handle.close()
# check for a process with that pid by sending signal 0
try: os.kill(other_pid, 0)
except OSError, e:
if e.errno == errno.ESRCH: other_pid = None # doesn't exist
else: raise # who knows
if other_pid == None:
# write a new pid file
write_file(PID_FILE, lambda f: f.write(str(os.getpid())))
return other_pid
def write_file(filename, do_write, **kw_args):
"""Write file <filename> atomically by opening a temporary file, using <do_write> to write that file, and then renaming the temporary file."""
os.rename(write_temp_file(do_write, **kw_args), filename)
def write_temp_file(do_write, mode=None, uidgid=None):
fd, temporary_filename = tempfile.mkstemp()
if mode: os.chmod(temporary_filename, mode)
if uidgid: os.chown(temporary_filename, *uidgid)
f = os.fdopen(fd, 'w')
try: do_write(f)
finally: f.close()
return temporary_filename
# replace a target file with a new contents - checks for changes
# can handle chmod if requested
# can also remove resulting file if contents are void, if requested
# performs atomically:
# writes in a tmp file, which is then renamed (from sliverauth originally)
# returns True if a change occurred, or the file is deleted
def replace_file_with_string (target, new_contents, chmod=None, remove_if_empty=False):
try:
current=file(target).read()
except:
current=""
if current==new_contents:
# if turns out to be an empty string, and remove_if_empty is set,
# then make sure to trash the file if it exists
if remove_if_empty and not new_contents and os.path.isfile(target):
logger.verbose("tools.replace_file_with_string: removing file %s"%target)
try: os.unlink(target)
finally: return True
return False
# overwrite target file: create a temp in the same directory
path=os.path.dirname(target) or '.'
fd, name = tempfile.mkstemp('','repl',path)
os.write(fd,new_contents)
os.close(fd)
if os.path.exists(target):
os.unlink(target)
os.rename(name,target)
if chmod: os.chmod(target,chmod)
return True
####################
# utilities functions to get (cached) information from the node
# get node_id from /etc/planetlab/node_id and cache it
_node_id=None
def node_id():
global _node_id
if _node_id is None:
try:
_node_id=int(file("/etc/planetlab/node_id").read())
except:
_node_id=""
return _node_id
_root_context_arch=None
def root_context_arch():
global _root_context_arch
if not _root_context_arch:
sp=subprocess.Popen(["uname","-i"],stdout=subprocess.PIPE)
(_root_context_arch,_)=sp.communicate()
_root_context_arch=_root_context_arch.strip()
return _root_context_arch
####################
class NMLock:
def __init__(self, file):
logger.log("tools: Lock %s initialized." % file, 2)
self.fd = os.open(file, os.O_RDWR|os.O_CREAT, 0600)
flags = fcntl.fcntl(self.fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fd, fcntl.F_SETFD, flags)
def __del__(self):
os.close(self.fd)
def acquire(self):
logger.log("tools: Lock acquired.", 2)
fcntl.lockf(self.fd, fcntl.LOCK_SH)
def release(self):
logger.log("tools: Lock released.", 2)
fcntl.lockf(self.fd, fcntl.LOCK_UN)
| {
"repo_name": "planetlab/NodeManager",
"path": "tools.py",
"copies": "1",
"size": "6769",
"license": "bsd-3-clause",
"hash": -5963957590108943000,
"line_mean": 33.7128205128,
"line_max": 324,
"alpha_frac": 0.6218052888,
"autogenerated": false,
"ratio": 3.4430315361139368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9507935127329803,
"avg_score": 0.011380339516826947,
"num_lines": 195
} |
# $Id$
# $URL$
"""An extremely simple interface to the signing/verifying capabilities
of gnupg.
You must already have the key in the keyring.
"""
from subprocess import PIPE, Popen
from xmlrpclib import dumps, loads
GPG = '/usr/bin/gpg'
def _popen_gpg(*args):
"""Return a Popen object to GPG."""
return Popen((GPG, '--batch', '--no-tty') + args,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
def sign(data):
"""Return <data> signed with the default GPG key."""
msg = dumps((data,), methodresponse = True)
p = _popen_gpg('--armor', '--sign', '--keyring', '/etc/planetlab/secring.gpg', '--no-default-keyring')
p.stdin.write(msg)
p.stdin.close()
signed_msg = p.stdout.read()
p.stdout.close()
p.stderr.close()
p.wait()
return signed_msg
def verify(signed_msg):
"""If <signed_msg> is a valid signed document, return its contents. Otherwise, return None."""
p = _popen_gpg('--decrypt', '--keyring', '/usr/boot/pubring.gpg', '--no-default-keyring')
p.stdin.write(signed_msg)
p.stdin.close()
msg = p.stdout.read()
p.stdout.close()
p.stderr.close()
if p.wait():
return None # verification failed
else:
data, = loads(msg)[0]
return data
| {
"repo_name": "planetlab/NodeManager",
"path": "ticket.py",
"copies": "1",
"size": "1269",
"license": "bsd-3-clause",
"hash": 7052532039179568000,
"line_mean": 27.8409090909,
"line_max": 106,
"alpha_frac": 0.6201733649,
"autogenerated": false,
"ratio": 3.3307086614173227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9425962061945986,
"avg_score": 0.004983992874267326,
"num_lines": 44
} |
# $Id$
# $URL$
"""configuration files"""
import grp
import os
import pwd
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
import string
import curlwrapper
import logger
import tools
import xmlrpclib
from config import Config
# right after net
priority = 2
class conf_files:
def __init__(self, noscripts=False):
self.config = Config()
self.noscripts = noscripts
self.data = None
def checksum(self, path):
try:
f = open(path)
try: return sha(f.read()).digest()
finally: f.close()
except IOError: return None
def system(self, cmd):
if not self.noscripts and cmd:
logger.verbose('conf_files: running command %s' % cmd)
return tools.fork_as(None, os.system, cmd)
else: return 0
def update_conf_file(self, cf_rec):
if not cf_rec['enabled']: return
dest = cf_rec['dest']
err_cmd = cf_rec['error_cmd']
mode = string.atoi(cf_rec['file_permissions'], base=8)
try:
uid = pwd.getpwnam(cf_rec['file_owner'])[2]
except:
logger.log('conf_files: cannot find user %s -- %s not updated'%(cf_rec['file_owner'],dest))
return
try:
gid = grp.getgrnam(cf_rec['file_group'])[2]
except:
logger.log('conf_files: cannot find group %s -- %s not updated'%(cf_rec['file_group'],dest))
return
url = 'https://%s/%s' % (self.config.PLC_BOOT_HOST, cf_rec['source'])
# set node_id at the end of the request - hacky
if tools.node_id():
if url.find('?') >0: url += '&'
else: url += '?'
url += "node_id=%d"%tools.node_id()
else:
logger.log('conf_files: %s -- WARNING, cannot add node_id to request'%dest)
try:
logger.verbose("conf_files: retrieving URL=%s"%url)
contents = curlwrapper.retrieve(url, self.config.cacert)
except xmlrpclib.ProtocolError,e:
logger.log('conf_files: failed to retrieve %s from %s, skipping' % (dest, url))
return
if not cf_rec['always_update'] and sha(contents).digest() == self.checksum(dest):
return
if self.system(cf_rec['preinstall_cmd']):
self.system(err_cmd)
if not cf_rec['ignore_cmd_errors']: return
logger.log('conf_files: installing file %s from %s' % (dest, url))
try: os.makedirs(os.path.dirname(dest))
except OSError: pass
tools.write_file(dest, lambda f: f.write(contents), mode=mode, uidgid=(uid,gid))
if self.system(cf_rec['postinstall_cmd']): self.system(err_cmd)
def run_once(self, data):
if data.has_key("conf_files"):
for f in data['conf_files']:
try: self.update_conf_file(f)
except: logger.log_exc("conf_files: failed to update conf_file")
else:
logger.log_missing_data("conf_files.run_once",'conf_files')
def start(): pass
def GetSlivers(data, config = None, plc = None):
logger.log("conf_files: Running.")
cf = conf_files()
cf.run_once(data)
logger.log("conf_files: Done.")
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config', help='PLC configuration file')
parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session', help='API session key (or file)')
parser.add_option('--noscripts', action='store_true', dest='noscripts', default=False, help='Do not run pre- or post-install scripts')
(options, args) = parser.parse_args()
# Load /etc/planetlab/plc_config
config = Config(options.config)
# Load /etc/planetlab/session
if os.path.exists(options.session):
session = file(options.session).read().strip()
else:
session = options.session
# Initialize XML-RPC client
from plcapi import PLCAPI
plc = PLCAPI(config.plc_api_uri, config.cacert, auth = session)
main = conf_files(options.noscripts)
data = plc.GetSlivers()
main.run_once(data)
| {
"repo_name": "planetlab/NodeManager",
"path": "conf_files.py",
"copies": "1",
"size": "4265",
"license": "bsd-3-clause",
"hash": 4790541881196973000,
"line_mean": 33.674796748,
"line_max": 140,
"alpha_frac": 0.5939038687,
"autogenerated": false,
"ratio": 3.602195945945946,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9657138562525514,
"avg_score": 0.007792250424086565,
"num_lines": 123
} |
# $Id$
# $URL$
"""network configuration"""
# system provided modules
import os, string, time, socket
# PlanetLab system modules
import sioc, plnet
# local modules
import bwlimit, logger, iptables, tools
# we can't do anything without a network
priority=1
dev_default = tools.get_default_if()
def start():
logger.log("net: plugin starting up...")
def GetSlivers(data, config, plc):
# added by caglar
# band-aid for short period as old API returns networks instead of interfaces
global KEY_NAME
KEY_NAME = "interfaces"
#################
logger.verbose("net: GetSlivers called.")
if not 'interfaces' in data:
# added by caglar
# band-aid for short period as old API returns networks instead of interfaces
# logger.log_missing_data('net.GetSlivers','interfaces')
# return
if not 'networks' in data:
logger.log_missing_data('net.GetSlivers','interfaces')
return
else:
KEY_NAME = "networks"
##################
plnet.InitInterfaces(logger, plc, data)
if 'OVERRIDES' in dir(config):
if config.OVERRIDES.get('net_max_rate') == '-1':
logger.log("net: Slice and node BW Limits disabled.")
if len(bwlimit.tc("class show dev %s" % dev_default)):
logger.verbose("net: *** DISABLING NODE BW LIMITS ***")
bwlimit.stop()
else:
InitNodeLimit(data)
InitI2(plc, data)
else:
InitNodeLimit(data)
InitI2(plc, data)
InitNAT(plc, data)
def InitNodeLimit(data):
# query running network interfaces
devs = sioc.gifconf()
ips = dict(zip(devs.values(), devs.keys()))
macs = {}
for dev in devs:
macs[sioc.gifhwaddr(dev).lower()] = dev
for interface in data[KEY_NAME]:
# Get interface name preferably from MAC address, falling
# back on IP address.
hwaddr=interface['mac']
if hwaddr <> None: hwaddr=hwaddr.lower()
if hwaddr in macs:
dev = macs[interface['mac']]
elif interface['ip'] in ips:
dev = ips[interface['ip']]
else:
logger.log('net: %s: no such interface with address %s/%s' % (interface['hostname'], interface['ip'], interface['mac']))
continue
# Get current node cap
try:
old_bwlimit = bwlimit.get_bwcap(dev)
except:
old_bwlimit = None
# Get desired node cap
if interface['bwlimit'] is None or interface['bwlimit'] < 0:
new_bwlimit = bwlimit.bwmax
else:
new_bwlimit = interface['bwlimit']
if old_bwlimit != new_bwlimit:
# Reinitialize bandwidth limits
bwlimit.init(dev, new_bwlimit)
# XXX This should trigger an rspec refresh in case
# some previously invalid sliver bwlimit is now valid
# again, or vice-versa.
def InitI2(plc, data):
if not 'groups' in data: return
if "Internet2" in data['groups']:
logger.log("net: This is an Internet2 node. Setting rules.")
i2nodes = []
i2nodeids = plc.GetNodeGroups(["Internet2"])[0]['node_ids']
for node in plc.GetInterfaces({"node_id": i2nodeids}, ["ip"]):
# Get the IPs
i2nodes.append(node['ip'])
# this will create the set if it doesn't already exist
# and add IPs that don't exist in the set rather than
# just recreateing the set.
bwlimit.exempt_init('Internet2', i2nodes)
# set the iptables classification rule if it doesnt exist.
cmd = '-A POSTROUTING -m set --set Internet2 dst -j CLASSIFY --set-class 0001:2000 --add-mark'
rules = []
ipt = os.popen("/sbin/iptables-save")
for line in ipt.readlines(): rules.append(line.strip(" \n"))
ipt.close()
if cmd not in rules:
logger.verbose("net: Adding iptables rule for Internet2")
os.popen("/sbin/iptables -t mangle " + cmd)
def InitNAT(plc, data):
# query running network interfaces
devs = sioc.gifconf()
ips = dict(zip(devs.values(), devs.keys()))
macs = {}
for dev in devs:
macs[sioc.gifhwaddr(dev).lower()] = dev
ipt = iptables.IPTables()
for interface in data[KEY_NAME]:
# Get interface name preferably from MAC address, falling
# back on IP address.
hwaddr=interface['mac']
if hwaddr <> None: hwaddr=hwaddr.lower()
if hwaddr in macs:
dev = macs[interface['mac']]
elif interface['ip'] in ips:
dev = ips[interface['ip']]
else:
logger.log('net: %s: no such interface with address %s/%s' % (interface['hostname'], interface['ip'], interface['mac']))
continue
try:
settings = plc.GetInterfaceTags({'interface_tag_id': interface['interface_tag_ids']})
except:
continue
for setting in settings:
if setting['category'].upper() != 'FIREWALL':
continue
if setting['name'].upper() == 'EXTERNAL':
# Enable NAT for this interface
ipt.add_ext(dev)
elif setting['name'].upper() == 'INTERNAL':
ipt.add_int(dev)
elif setting['name'].upper() == 'PF': # XXX Uglier code is hard to find...
for pf in setting['value'].split("\n"):
fields = {}
for field in pf.split(","):
(key, val) = field.split("=", 2)
fields[key] = val
if 'new_dport' not in fields:
fields['new_dport'] = fields['dport']
if 'source' not in fields:
fields['source'] = "0.0.0.0/0"
ipt.add_pf(fields)
ipt.commit()
| {
"repo_name": "planetlab/NodeManager",
"path": "net.py",
"copies": "1",
"size": "5908",
"license": "bsd-3-clause",
"hash": 7833433080532858000,
"line_mean": 32.9540229885,
"line_max": 132,
"alpha_frac": 0.55788761,
"autogenerated": false,
"ratio": 3.9099933818663137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9943482540863432,
"avg_score": 0.00487969020057627,
"num_lines": 174
} |
# $Id$
# $URL$
"""
vsys sub-configurator. Maintains configuration parameters associated with vsys scripts.
All slice attributes with the prefix vsys_ are written into configuration files on the
node for the reference of vsys scripts.
"""
import logger
import os
VSYS_PRIV_DIR = "/etc/planetlab/vsys-attributes"
def start():
logger.log("vsys_privs: plugin starting")
if (not os.path.exists(VSYS_PRIV_DIR)):
os.makedirs(VSYS_PRIV_DIR)
logger.log("vsys_privs: Created vsys attributes dir")
def GetSlivers(data, config=None, plc=None):
if 'slivers' not in data:
logger.log_missing_data("vsys_privs.GetSlivers",'slivers')
return
privs = {}
# Parse attributes and update dict of scripts
if 'slivers' not in data:
logger.log_missing_data("vsys_privs.GetSlivers",'slivers')
return
for sliver in data['slivers']:
slice = sliver['name']
for attribute in sliver['attributes']:
tag = attribute['tagname']
value = attribute['value']
if tag.startswith('vsys_'):
if (privs.has_key(slice)):
slice_priv = privs[slice]
if (slice_priv.has_key(tag)):
slice_priv[tag].append(value)
else:
slice_priv[tag]=[value]
privs[slice] = slice_priv
else:
privs[slice] = {tag:[value]}
cur_privs = read_privs()
write_privs(cur_privs, privs)
def read_privs():
cur_privs={}
priv_finder = os.walk(VSYS_PRIV_DIR)
priv_find = [i for i in priv_finder]
(rootdir,slices,foo) = priv_find[0]
for slice in slices:
cur_privs[slice]={}
if (len(priv_find)>1):
for (slicedir,bar,tagnames) in priv_find[1:]:
if (bar != []):
# The depth of the vsys-privileges directory = 1
pass
for tagname in tagnames:
tagfile = os.path.join(slicedir,tagname)
values_n = file(tagfile).readlines()
values = map(lambda s:s.rstrip(),values_n)
slice = os.path.basename(slicedir)
cur_privs[slice][tagname]=values
return cur_privs
def write_privs(cur_privs,privs):
for slice in privs.keys():
variables = privs[slice]
slice_dir = os.path.join(VSYS_PRIV_DIR,slice)
if (not os.path.exists(slice_dir)):
os.mkdir(slice_dir)
# Add values that do not exist
for k in variables.keys():
v = variables[k]
if (cur_privs.has_key(slice)
and cur_privs[slice].has_key(k)
and cur_privs[slice][k] == v):
# The binding has not changed
pass
else:
v_file = os.path.join(slice_dir, k)
f = open(v_file,'w')
data = '\n'.join(v)
f.write(data)
f.close()
logger.log("vsys_privs: added vsys attribute %s for %s"%(k,slice))
# Remove files and directories
# that are invalid
for slice in cur_privs.keys():
variables = cur_privs[slice]
slice_dir = os.path.join(VSYS_PRIV_DIR,slice)
# Add values that do not exist
for k in variables.keys():
if (privs.has_key(slice)
and cur_privs[slice].has_key(k)):
# ok, spare this tag
print "Sparing %s, %s "%(slice,k)
else:
v_file = os.path.join(slice_dir, k)
os.remove(v_file)
if (not privs.has_key(slice)):
os.rmdir(slice_dir)
if __name__ == "__main__":
test_slivers = {'slivers':[
{'name':'foo','attributes':[
{'tagname':'vsys_m','value':'2'},
{'tagname':'vsys_m','value':'3'},
{'tagname':'vsys_m','value':'4'}
]},
{'name':'bar','attributes':[
#{'tagname':'vsys_x','value':'z'}
]}
]}
start(None,None)
GetSlivers(test_slivers)
| {
"repo_name": "planetlab/NodeManager",
"path": "plugins/vsys_privs.py",
"copies": "1",
"size": "4123",
"license": "bsd-3-clause",
"hash": 5562344780138080000,
"line_mean": 30,
"line_max": 88,
"alpha_frac": 0.5241329129,
"autogenerated": false,
"ratio": 3.6358024691358026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9607902075740962,
"avg_score": 0.010406661258968318,
"num_lines": 133
} |
# $Id$
# $URL$
"""vsys configurator. Maintains ACLs and script pipes inside vservers based on slice attributes."""
import logger
import os
VSYSCONF="/etc/vsys.conf"
VSYSBKEND="/vsys"
def start():
logger.log("vsys: plugin starting up...")
def GetSlivers(data, config=None, plc=None):
"""For each sliver with the vsys attribute, set the script ACL, create the vsys directory in the slice, and restart vsys."""
if 'slivers' not in data:
logger.log_missing_data("vsys.GetSlivers",'slivers')
return
# Touch ACLs and create dict of available
scripts = {}
for script in touchAcls(): scripts[script] = []
# slices that need to be written to the conf
slices = []
_restart = False
# Parse attributes and update dict of scripts
if 'slivers' not in data:
logger.log_missing_data("vsys.GetSlivers",'slivers')
return
for sliver in data['slivers']:
for attribute in sliver['attributes']:
if attribute['tagname'] == 'vsys':
if sliver['name'] not in slices:
# add to conf
slices.append(sliver['name'])
_restart = createVsysDir(sliver['name']) or _restart
if attribute['value'] in scripts.keys():
scripts[attribute['value']].append(sliver['name'])
# Write the conf
_restart = writeConf(slices, parseConf()) or _restart
# Write out the ACLs
if writeAcls(scripts, parseAcls()) or _restart:
logger.log("vsys: restarting vsys service")
logger.log_call(["/etc/init.d/vsys", "restart", ])
def createVsysDir(sliver):
'''Create /vsys directory in slice. Update vsys conf file.'''
try:
os.mkdir("/vservers/%s/vsys" % sliver)
return True
except OSError:
return False
def touchAcls():
'''Creates empty acl files for scripts.
To be ran in case of new scripts that appear in the backend.
Returns list of available scripts.'''
acls = []
scripts = []
for (root, dirs, files) in os.walk(VSYSBKEND):
for file in files:
# ingore scripts that start with local_
if file.startswith("local_"): continue
if file.endswith(".acl"):
acls.append(file.replace(".acl", ""))
else:
scripts.append(file)
for new in (set(scripts) - set(acls)):
logger.log("vsys: Found new script %s. Writing empty acl." % new)
f = open("%s/%s.acl" %(VSYSBKEND, new), "w")
f.write("\n")
f.close()
return scripts
def writeAcls(currentscripts, oldscripts):
'''Creates .acl files for script in the script repo.'''
# Check each oldscript entry to see if we need to modify
_restartvsys = False
# for iteritems along dict(oldscripts), if length of values
# not the same as length of values of new scripts,
# and length of non intersection along new scripts is not 0,
# then dicts are different.
for (acl, oldslivers) in oldscripts.iteritems():
try:
if (len(oldslivers) != len(currentscripts[acl])) or \
(len(set(oldslivers) - set(currentscripts[acl])) != 0):
_restartvsys = True
logger.log("vsys: Updating %s.acl w/ slices %s" % (acl, currentscripts[acl]))
f = open("%s/%s.acl" % (VSYSBKEND, acl), "w")
for slice in currentscripts[acl]: f.write("%s\n" % slice)
f.close()
except KeyError:
logger.log("vsys: #:)# Warning,Not a valid Vsys script,%s"%acl)
# Trigger a restart
return _restartvsys
def parseAcls():
'''Parse the frontend script acls. Return {script: [slices]} in conf.'''
# make a dict of what slices are in what acls.
scriptacls = {}
for (root, dirs, files) in os.walk(VSYSBKEND):
for file in files:
if file.endswith(".acl") and not file.startswith("local_"):
f = open(root+"/"+file,"r+")
scriptname = file.replace(".acl", "")
scriptacls[scriptname] = []
for slice in f.readlines():
scriptacls[scriptname].append(slice.rstrip())
f.close()
# return what scripts are configured for which slices.
return scriptacls
def writeConf(slivers, oldslivers):
# Check if this is needed
# The assumption here is if lengths are the same,
# and the non intersection of both arrays has length 0,
# then the arrays are identical.
if (len(slivers) != len(oldslivers)) or \
(len(set(oldslivers) - set(slivers)) != 0):
logger.log("vsys: Updating %s" % VSYSCONF)
f = open(VSYSCONF,"w")
for sliver in slivers:
f.write("/vservers/%(name)s/vsys %(name)s\n" % {"name": sliver})
f.truncate()
f.close()
return True
else:
return False
def parseConf():
'''Parse the vsys conf and return list of slices in conf.'''
scriptacls = {}
slicesinconf = []
try:
f = open(VSYSCONF)
for line in f.readlines():
(path, slice) = line.split()
slicesinconf.append(slice)
f.close()
except: logger.log_exc("vsys: failed parseConf")
return slicesinconf
| {
"repo_name": "planetlab/NodeManager",
"path": "plugins/vsys.py",
"copies": "1",
"size": "5283",
"license": "bsd-3-clause",
"hash": 5882340575905811000,
"line_mean": 33.9867549669,
"line_max": 128,
"alpha_frac": 0.5884913875,
"autogenerated": false,
"ratio": 3.736209335219236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4824700722719236,
"avg_score": null,
"num_lines": null
} |
"""
Additional support for Pygments formatter.
"""
import pygments
import pygments.formatter
class OdtPygmentsFormatter(pygments.formatter.Formatter):
def __init__(self, rststyle_function, escape_function):
pygments.formatter.Formatter.__init__(self)
self.rststyle_function = rststyle_function
self.escape_function = escape_function
def rststyle(self, name, parameters=( )):
return self.rststyle_function(name, parameters)
class OdtPygmentsProgFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Literal.String:
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (
tokenclass.Literal.Number.Integer,
tokenclass.Literal.Number.Integer.Long,
tokenclass.Literal.Number.Float,
tokenclass.Literal.Number.Hex,
tokenclass.Literal.Number.Oct,
tokenclass.Literal.Number,
):
s2 = self.rststyle('codeblock-number')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Operator:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Class:
s2 = self.rststyle('codeblock-classname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Function:
s2 = self.rststyle('codeblock-functionname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
class OdtPygmentsLaTeXFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (tokenclass.Literal.String,
tokenclass.Literal.String.Backtick,
):
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Attribute:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
if value[-1] == '\n':
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>\n' % \
(s2, value[:-1], )
else:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Builtin:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
| {
"repo_name": "timonwong/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python2/docutils/writers/odf_odt/pygmentsformatter.py",
"copies": "4",
"size": "4613",
"license": "mit",
"hash": 1936637410140328400,
"line_mean": 41.3211009174,
"line_max": 79,
"alpha_frac": 0.5063949707,
"autogenerated": false,
"ratio": 3.9698795180722892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007621516895549604,
"num_lines": 109
} |
"""
Open Document Format (ODF) Writer.
"""
VERSION = '1.0a'
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
import tempfile
import zipfile
from xml.dom import minidom
import time
import re
import io
import copy
import urllib.request, urllib.error, urllib.parse
import docutils
from docutils import frontend, nodes, utils, writers, languages
from docutils.readers import standalone
from docutils.transforms import references
WhichElementTree = ''
try:
# 1. Try to use lxml.
#from lxml import etree
#WhichElementTree = 'lxml'
raise ImportError('Ignoring lxml')
except ImportError as e:
try:
# 2. Try to use ElementTree from the Python standard library.
from xml.etree import ElementTree as etree
WhichElementTree = 'elementtree'
except ImportError as e:
try:
# 3. Try to use a version of ElementTree installed as a separate
# product.
from elementtree import ElementTree as etree
WhichElementTree = 'elementtree'
except ImportError as e:
s1 = 'Must install either a version of Python containing ' \
'ElementTree (Python version >=2.5) or install ElementTree.'
raise ImportError(s1)
#
# Import pygments and odtwriter pygments formatters if possible.
try:
import pygments
import pygments.lexers
from .pygmentsformatter import OdtPygmentsProgFormatter, \
OdtPygmentsLaTeXFormatter
except ImportError as exp:
pygments = None
# check for the Python Imaging Library
try:
import PIL.Image
except ImportError:
try: # sometimes PIL modules are put in PYTHONPATH's root
import Image
class PIL(object): pass # dummy wrapper
PIL.Image = Image
except ImportError:
PIL = None
## import warnings
## warnings.warn('importing IPShellEmbed', UserWarning)
## from IPython.Shell import IPShellEmbed
## args = ['-pdb', '-pi1', 'In <\\#>: ', '-pi2', ' .\\D.: ',
## '-po', 'Out<\\#>: ', '-nosep']
## ipshell = IPShellEmbed(args,
## banner = 'Entering IPython. Press Ctrl-D to exit.',
## exit_msg = 'Leaving Interpreter, back to program.')
#
# ElementTree does not support getparent method (lxml does).
# This wrapper class and the following support functions provide
# that support for the ability to get the parent of an element.
#
if WhichElementTree == 'elementtree':
class _ElementInterfaceWrapper(etree._ElementInterface):
def __init__(self, tag, attrib=None):
etree._ElementInterface.__init__(self, tag, attrib)
if attrib is None:
attrib = {}
self.parent = None
def setparent(self, parent):
self.parent = parent
def getparent(self):
return self.parent
#
# Constants and globals
SPACES_PATTERN = re.compile(r'( +)')
TABS_PATTERN = re.compile(r'(\t+)')
FILL_PAT1 = re.compile(r'^ +')
FILL_PAT2 = re.compile(r' {2,}')
TABLESTYLEPREFIX = 'rststyle-table-'
TABLENAMEDEFAULT = '%s0' % TABLESTYLEPREFIX
TABLEPROPERTYNAMES = ('border', 'border-top', 'border-left',
'border-right', 'border-bottom', )
GENERATOR_DESC = 'Docutils.org/odf_odt'
NAME_SPACE_1 = 'urn:oasis:names:tc:opendocument:xmlns:office:1.0'
CONTENT_NAMESPACE_DICT = CNSD = {
# 'office:version': '1.0',
'chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'dom': 'http://www.w3.org/2001/xml-events',
'dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'math': 'http://www.w3.org/1998/Math/MathML',
'meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'office': NAME_SPACE_1,
'ooo': 'http://openoffice.org/2004/office',
'oooc': 'http://openoffice.org/2004/calc',
'ooow': 'http://openoffice.org/2004/writer',
'presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xforms': 'http://www.w3.org/2002/xforms',
'xlink': 'http://www.w3.org/1999/xlink',
'xsd': 'http://www.w3.org/2001/XMLSchema',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
STYLES_NAMESPACE_DICT = SNSD = {
# 'office:version': '1.0',
'chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'dom': 'http://www.w3.org/2001/xml-events',
'dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'math': 'http://www.w3.org/1998/Math/MathML',
'meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'office': NAME_SPACE_1,
'presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'ooo': 'http://openoffice.org/2004/office',
'oooc': 'http://openoffice.org/2004/calc',
'ooow': 'http://openoffice.org/2004/writer',
'script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xlink': 'http://www.w3.org/1999/xlink',
}
MANIFEST_NAMESPACE_DICT = MANNSD = {
'manifest': 'urn:oasis:names:tc:opendocument:xmlns:manifest:1.0',
}
META_NAMESPACE_DICT = METNSD = {
# 'office:version': '1.0',
'dc': 'http://purl.org/dc/elements/1.1/',
'meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'office': NAME_SPACE_1,
'ooo': 'http://openoffice.org/2004/office',
'xlink': 'http://www.w3.org/1999/xlink',
}
#
# Attribute dictionaries for use with ElementTree (not lxml), which
# does not support use of nsmap parameter on Element() and SubElement().
CONTENT_NAMESPACE_ATTRIB = {
#'office:version': '1.0',
'xmlns:chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:dom': 'http://www.w3.org/2001/xml-events',
'xmlns:dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'xmlns:draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'xmlns:fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'xmlns:form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'xmlns:math': 'http://www.w3.org/1998/Math/MathML',
'xmlns:meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'xmlns:number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'xmlns:office': NAME_SPACE_1,
'xmlns:presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'xmlns:ooo': 'http://openoffice.org/2004/office',
'xmlns:oooc': 'http://openoffice.org/2004/calc',
'xmlns:ooow': 'http://openoffice.org/2004/writer',
'xmlns:script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'xmlns:style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'xmlns:svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'xmlns:table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'xmlns:text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xmlns:xforms': 'http://www.w3.org/2002/xforms',
'xmlns:xlink': 'http://www.w3.org/1999/xlink',
'xmlns:xsd': 'http://www.w3.org/2001/XMLSchema',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
STYLES_NAMESPACE_ATTRIB = {
#'office:version': '1.0',
'xmlns:chart': 'urn:oasis:names:tc:opendocument:xmlns:chart:1.0',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:dom': 'http://www.w3.org/2001/xml-events',
'xmlns:dr3d': 'urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0',
'xmlns:draw': 'urn:oasis:names:tc:opendocument:xmlns:drawing:1.0',
'xmlns:fo': 'urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0',
'xmlns:form': 'urn:oasis:names:tc:opendocument:xmlns:form:1.0',
'xmlns:math': 'http://www.w3.org/1998/Math/MathML',
'xmlns:meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'xmlns:number': 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0',
'xmlns:office': NAME_SPACE_1,
'xmlns:presentation': 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0',
'xmlns:ooo': 'http://openoffice.org/2004/office',
'xmlns:oooc': 'http://openoffice.org/2004/calc',
'xmlns:ooow': 'http://openoffice.org/2004/writer',
'xmlns:script': 'urn:oasis:names:tc:opendocument:xmlns:script:1.0',
'xmlns:style': 'urn:oasis:names:tc:opendocument:xmlns:style:1.0',
'xmlns:svg': 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0',
'xmlns:table': 'urn:oasis:names:tc:opendocument:xmlns:table:1.0',
'xmlns:text': 'urn:oasis:names:tc:opendocument:xmlns:text:1.0',
'xmlns:xlink': 'http://www.w3.org/1999/xlink',
}
MANIFEST_NAMESPACE_ATTRIB = {
'xmlns:manifest': 'urn:oasis:names:tc:opendocument:xmlns:manifest:1.0',
}
META_NAMESPACE_ATTRIB = {
#'office:version': '1.0',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:meta': 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0',
'xmlns:office': NAME_SPACE_1,
'xmlns:ooo': 'http://openoffice.org/2004/office',
'xmlns:xlink': 'http://www.w3.org/1999/xlink',
}
#
# Functions
#
#
# ElementTree support functions.
# In order to be able to get the parent of elements, must use these
# instead of the functions with same name provided by ElementTree.
#
def Element(tag, attrib=None, nsmap=None, nsdict=CNSD):
if attrib is None:
attrib = {}
tag, attrib = fix_ns(tag, attrib, nsdict)
if WhichElementTree == 'lxml':
el = etree.Element(tag, attrib, nsmap=nsmap)
else:
el = _ElementInterfaceWrapper(tag, attrib)
return el
def SubElement(parent, tag, attrib=None, nsmap=None, nsdict=CNSD):
if attrib is None:
attrib = {}
tag, attrib = fix_ns(tag, attrib, nsdict)
if WhichElementTree == 'lxml':
el = etree.SubElement(parent, tag, attrib, nsmap=nsmap)
else:
el = _ElementInterfaceWrapper(tag, attrib)
parent.append(el)
el.setparent(parent)
return el
def fix_ns(tag, attrib, nsdict):
nstag = add_ns(tag, nsdict)
nsattrib = {}
for key, val in attrib.items():
nskey = add_ns(key, nsdict)
nsattrib[nskey] = val
return nstag, nsattrib
def add_ns(tag, nsdict=CNSD):
if WhichElementTree == 'lxml':
nstag, name = tag.split(':')
ns = nsdict.get(nstag)
if ns is None:
raise RuntimeError('Invalid namespace prefix: %s' % nstag)
tag = '{%s}%s' % (ns, name,)
return tag
def ToString(et):
outstream = io.StringIO()
if sys.version_info >= (3, 2):
et.write(outstream, encoding="unicode")
else:
et.write(outstream)
s1 = outstream.getvalue()
outstream.close()
return s1
def escape_cdata(text):
text = text.replace("&", "&")
text = text.replace("<", "<")
text = text.replace(">", ">")
ascii = ''
for char in text:
if ord(char) >= ord("\x7f"):
ascii += "&#x%X;" % ( ord(char), )
else:
ascii += char
return ascii
WORD_SPLIT_PAT1 = re.compile(r'\b(\w*)\b\W*')
def split_words(line):
# We need whitespace at the end of the string for our regexpr.
line += ' '
words = []
pos1 = 0
mo = WORD_SPLIT_PAT1.search(line, pos1)
while mo is not None:
word = mo.groups()[0]
words.append(word)
pos1 = mo.end()
mo = WORD_SPLIT_PAT1.search(line, pos1)
return words
#
# Classes
#
class TableStyle(object):
def __init__(self, border=None, backgroundcolor=None):
self.border = border
self.backgroundcolor = backgroundcolor
def get_border_(self):
return self.border_
def set_border_(self, border):
self.border_ = border
border = property(get_border_, set_border_)
def get_backgroundcolor_(self):
return self.backgroundcolor_
def set_backgroundcolor_(self, backgroundcolor):
self.backgroundcolor_ = backgroundcolor
backgroundcolor = property(get_backgroundcolor_, set_backgroundcolor_)
BUILTIN_DEFAULT_TABLE_STYLE = TableStyle(
border = '0.0007in solid #000000')
#
# Information about the indentation level for lists nested inside
# other contexts, e.g. dictionary lists.
class ListLevel(object):
def __init__(self, level, sibling_level=True, nested_level=True):
self.level = level
self.sibling_level = sibling_level
self.nested_level = nested_level
def set_sibling(self, sibling_level): self.sibling_level = sibling_level
def get_sibling(self): return self.sibling_level
def set_nested(self, nested_level): self.nested_level = nested_level
def get_nested(self): return self.nested_level
def set_level(self, level): self.level = level
def get_level(self): return self.level
class Writer(writers.Writer):
MIME_TYPE = 'application/vnd.oasis.opendocument.text'
EXTENSION = '.odt'
supported = ('odt', )
"""Formats this writer supports."""
default_stylesheet = 'styles' + EXTENSION
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_stylesheet))
default_template = 'template.txt'
default_template_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), default_template))
settings_spec = (
'ODF-Specific Options',
None,
(
('Specify a stylesheet. '
'Default: "%s"' % default_stylesheet_path,
['--stylesheet'],
{
'default': default_stylesheet_path,
'dest': 'stylesheet'
}),
('Specify a configuration/mapping file relative to the '
'current working '
'directory for additional ODF options. '
'In particular, this file may contain a section named '
'"Formats" that maps default style names to '
'names to be used in the resulting output file allowing for '
'adhering to external standards. '
'For more info and the format of the configuration/mapping file, '
'see the odtwriter doc.',
['--odf-config-file'],
{'metavar': '<file>'}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'default': False,
'action': 'store_true',
'dest': 'cloak_email_addresses',
'validator': frontend.validate_boolean}),
('Do not obfuscate email addresses.',
['--no-cloak-email-addresses'],
{'default': False,
'action': 'store_false',
'dest': 'cloak_email_addresses',
'validator': frontend.validate_boolean}),
('Specify the thickness of table borders in thousands of a cm. '
'Default is 35.',
['--table-border-thickness'],
{'default': None,
'validator': frontend.validate_nonnegative_int}),
('Add syntax highlighting in literal code blocks.',
['--add-syntax-highlighting'],
{'default': False,
'action': 'store_true',
'dest': 'add_syntax_highlighting',
'validator': frontend.validate_boolean}),
('Do not add syntax highlighting in literal code blocks. (default)',
['--no-syntax-highlighting'],
{'default': False,
'action': 'store_false',
'dest': 'add_syntax_highlighting',
'validator': frontend.validate_boolean}),
('Create sections for headers. (default)',
['--create-sections'],
{'default': True,
'action': 'store_true',
'dest': 'create_sections',
'validator': frontend.validate_boolean}),
('Do not create sections for headers.',
['--no-sections'],
{'default': True,
'action': 'store_false',
'dest': 'create_sections',
'validator': frontend.validate_boolean}),
('Create links.',
['--create-links'],
{'default': False,
'action': 'store_true',
'dest': 'create_links',
'validator': frontend.validate_boolean}),
('Do not create links. (default)',
['--no-links'],
{'default': False,
'action': 'store_false',
'dest': 'create_links',
'validator': frontend.validate_boolean}),
('Generate endnotes at end of document, not footnotes '
'at bottom of page.',
['--endnotes-end-doc'],
{'default': False,
'action': 'store_true',
'dest': 'endnotes_end_doc',
'validator': frontend.validate_boolean}),
('Generate footnotes at bottom of page, not endnotes '
'at end of document. (default)',
['--no-endnotes-end-doc'],
{'default': False,
'action': 'store_false',
'dest': 'endnotes_end_doc',
'validator': frontend.validate_boolean}),
('Generate a bullet list table of contents, not '
'an ODF/oowriter table of contents.',
['--generate-list-toc'],
{'default': True,
'action': 'store_false',
'dest': 'generate_oowriter_toc',
'validator': frontend.validate_boolean}),
('Generate an ODF/oowriter table of contents, not '
'a bullet list. (default)',
['--generate-oowriter-toc'],
{'default': True,
'action': 'store_true',
'dest': 'generate_oowriter_toc',
'validator': frontend.validate_boolean}),
('Specify the contents of an custom header line. '
'See odf_odt writer documentation for details '
'about special field character sequences.',
['--custom-odt-header'],
{ 'default': '',
'dest': 'custom_header',
}),
('Specify the contents of an custom footer line. '
'See odf_odt writer documentation for details '
'about special field character sequences.',
['--custom-odt-footer'],
{ 'default': '',
'dest': 'custom_footer',
}),
)
)
settings_defaults = {
'output_encoding_error_handler': 'xmlcharrefreplace',
}
relative_path_settings = (
'stylesheet_path',
)
config_section = 'odf_odt writer'
config_section_dependencies = (
'writers',
)
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = ODFTranslator
def translate(self):
self.settings = self.document.settings
self.visitor = self.translator_class(self.document)
self.visitor.retrieve_styles(self.EXTENSION)
self.document.walkabout(self.visitor)
self.visitor.add_doc_title()
self.assemble_my_parts()
self.output = self.parts['whole']
def assemble_my_parts(self):
"""Assemble the `self.parts` dictionary. Extend in subclasses.
"""
writers.Writer.assemble_parts(self)
f = tempfile.NamedTemporaryFile()
zfile = zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED)
self.write_zip_str(zfile, 'mimetype', self.MIME_TYPE,
compress_type=zipfile.ZIP_STORED)
content = self.visitor.content_astext()
self.write_zip_str(zfile, 'content.xml', content)
s1 = self.create_manifest()
self.write_zip_str(zfile, 'META-INF/manifest.xml', s1)
s1 = self.create_meta()
self.write_zip_str(zfile, 'meta.xml', s1)
s1 = self.get_stylesheet()
self.write_zip_str(zfile, 'styles.xml', s1)
self.store_embedded_files(zfile)
self.copy_from_stylesheet(zfile)
zfile.close()
f.seek(0)
whole = f.read()
f.close()
self.parts['whole'] = whole
self.parts['encoding'] = self.document.settings.output_encoding
self.parts['version'] = docutils.__version__
def write_zip_str(self, zfile, name, bytes, compress_type=zipfile.ZIP_DEFLATED):
localtime = time.localtime(time.time())
zinfo = zipfile.ZipInfo(name, localtime)
# Add some standard UNIX file access permissions (-rw-r--r--).
zinfo.external_attr = (0x81a4 & 0xFFFF) << 16
zinfo.compress_type = compress_type
zfile.writestr(zinfo, bytes)
def store_embedded_files(self, zfile):
embedded_files = self.visitor.get_embedded_file_list()
for source, destination in embedded_files:
if source is None:
continue
try:
# encode/decode
destination1 = destination.decode('latin-1').encode('utf-8')
zfile.write(source, destination1)
except OSError as e:
self.document.reporter.warning(
"Can't open file %s." % (source, ))
def get_settings(self):
"""
modeled after get_stylesheet
"""
stylespath = self.settings.stylesheet
zfile = zipfile.ZipFile(stylespath, 'r')
s1 = zfile.read('settings.xml')
zfile.close()
return s1
def get_stylesheet(self):
"""Get the stylesheet from the visitor.
Ask the visitor to setup the page.
"""
s1 = self.visitor.setup_page()
return s1
def copy_from_stylesheet(self, outzipfile):
"""Copy images, settings, etc from the stylesheet doc into target doc.
"""
stylespath = self.settings.stylesheet
inzipfile = zipfile.ZipFile(stylespath, 'r')
# Copy the styles.
s1 = inzipfile.read('settings.xml')
self.write_zip_str(outzipfile, 'settings.xml', s1)
# Copy the images.
namelist = inzipfile.namelist()
for name in namelist:
if name.startswith('Pictures/'):
imageobj = inzipfile.read(name)
outzipfile.writestr(name, imageobj)
inzipfile.close()
def assemble_parts(self):
pass
def create_manifest(self):
if WhichElementTree == 'lxml':
root = Element('manifest:manifest',
nsmap=MANIFEST_NAMESPACE_DICT,
nsdict=MANIFEST_NAMESPACE_DICT,
)
else:
root = Element('manifest:manifest',
attrib=MANIFEST_NAMESPACE_ATTRIB,
nsdict=MANIFEST_NAMESPACE_DICT,
)
doc = etree.ElementTree(root)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': self.MIME_TYPE,
'manifest:full-path': '/',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'content.xml',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'styles.xml',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'settings.xml',
}, nsdict=MANNSD)
SubElement(root, 'manifest:file-entry', attrib={
'manifest:media-type': 'text/xml',
'manifest:full-path': 'meta.xml',
}, nsdict=MANNSD)
s1 = ToString(doc)
doc = minidom.parseString(s1)
s1 = doc.toprettyxml(' ')
return s1
def create_meta(self):
if WhichElementTree == 'lxml':
root = Element('office:document-meta',
nsmap=META_NAMESPACE_DICT,
nsdict=META_NAMESPACE_DICT,
)
else:
root = Element('office:document-meta',
attrib=META_NAMESPACE_ATTRIB,
nsdict=META_NAMESPACE_DICT,
)
doc = etree.ElementTree(root)
root = SubElement(root, 'office:meta', nsdict=METNSD)
el1 = SubElement(root, 'meta:generator', nsdict=METNSD)
el1.text = 'Docutils/rst2odf.py/%s' % (VERSION, )
s1 = os.environ.get('USER', '')
el1 = SubElement(root, 'meta:initial-creator', nsdict=METNSD)
el1.text = s1
s2 = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime())
el1 = SubElement(root, 'meta:creation-date', nsdict=METNSD)
el1.text = s2
el1 = SubElement(root, 'dc:creator', nsdict=METNSD)
el1.text = s1
el1 = SubElement(root, 'dc:date', nsdict=METNSD)
el1.text = s2
el1 = SubElement(root, 'dc:language', nsdict=METNSD)
el1.text = 'en-US'
el1 = SubElement(root, 'meta:editing-cycles', nsdict=METNSD)
el1.text = '1'
el1 = SubElement(root, 'meta:editing-duration', nsdict=METNSD)
el1.text = 'PT00M01S'
title = self.visitor.get_title()
el1 = SubElement(root, 'dc:title', nsdict=METNSD)
if title:
el1.text = title
else:
el1.text = '[no title]'
meta_dict = self.visitor.get_meta_dict()
keywordstr = meta_dict.get('keywords')
if keywordstr is not None:
keywords = split_words(keywordstr)
for keyword in keywords:
el1 = SubElement(root, 'meta:keyword', nsdict=METNSD)
el1.text = keyword
description = meta_dict.get('description')
if description is not None:
el1 = SubElement(root, 'dc:description', nsdict=METNSD)
el1.text = description
s1 = ToString(doc)
#doc = minidom.parseString(s1)
#s1 = doc.toprettyxml(' ')
return s1
# class ODFTranslator(nodes.SparseNodeVisitor):
class ODFTranslator(nodes.GenericNodeVisitor):
used_styles = (
'attribution', 'blockindent', 'blockquote', 'blockquote-bulletitem',
'blockquote-bulletlist', 'blockquote-enumitem', 'blockquote-enumlist',
'bulletitem', 'bulletlist',
'caption', 'legend',
'centeredtextbody', 'codeblock', 'codeblock-indented',
'codeblock-classname', 'codeblock-comment', 'codeblock-functionname',
'codeblock-keyword', 'codeblock-name', 'codeblock-number',
'codeblock-operator', 'codeblock-string', 'emphasis', 'enumitem',
'enumlist', 'epigraph', 'epigraph-bulletitem', 'epigraph-bulletlist',
'epigraph-enumitem', 'epigraph-enumlist', 'footer',
'footnote', 'citation',
'header', 'highlights', 'highlights-bulletitem',
'highlights-bulletlist', 'highlights-enumitem', 'highlights-enumlist',
'horizontalline', 'inlineliteral', 'quotation', 'rubric',
'strong', 'table-title', 'textbody', 'tocbulletlist', 'tocenumlist',
'title',
'subtitle',
'heading1',
'heading2',
'heading3',
'heading4',
'heading5',
'heading6',
'heading7',
'admon-attention-hdr',
'admon-attention-body',
'admon-caution-hdr',
'admon-caution-body',
'admon-danger-hdr',
'admon-danger-body',
'admon-error-hdr',
'admon-error-body',
'admon-generic-hdr',
'admon-generic-body',
'admon-hint-hdr',
'admon-hint-body',
'admon-important-hdr',
'admon-important-body',
'admon-note-hdr',
'admon-note-body',
'admon-tip-hdr',
'admon-tip-body',
'admon-warning-hdr',
'admon-warning-body',
'tableoption',
'tableoption.%c', 'tableoption.%c%d', 'Table%d', 'Table%d.%c',
'Table%d.%c%d',
'lineblock1',
'lineblock2',
'lineblock3',
'lineblock4',
'lineblock5',
'lineblock6',
'image', 'figureframe',
)
def __init__(self, document):
#nodes.SparseNodeVisitor.__init__(self, document)
nodes.GenericNodeVisitor.__init__(self, document)
self.settings = document.settings
lcode = self.settings.language_code
self.language = languages.get_language(lcode, document.reporter)
self.format_map = { }
if self.settings.odf_config_file:
from configparser import ConfigParser
parser = ConfigParser()
parser.read(self.settings.odf_config_file)
for rststyle, format in parser.items("Formats"):
if rststyle not in self.used_styles:
self.document.reporter.warning(
'Style "%s" is not a style used by odtwriter.' % (
rststyle, ))
self.format_map[rststyle] = format.decode('utf-8')
self.section_level = 0
self.section_count = 0
# Create ElementTree content and styles documents.
if WhichElementTree == 'lxml':
root = Element(
'office:document-content',
nsmap=CONTENT_NAMESPACE_DICT,
)
else:
root = Element(
'office:document-content',
attrib=CONTENT_NAMESPACE_ATTRIB,
)
self.content_tree = etree.ElementTree(element=root)
self.current_element = root
SubElement(root, 'office:scripts')
SubElement(root, 'office:font-face-decls')
el = SubElement(root, 'office:automatic-styles')
self.automatic_styles = el
el = SubElement(root, 'office:body')
el = self.generate_content_element(el)
self.current_element = el
self.body_text_element = el
self.paragraph_style_stack = [self.rststyle('textbody'), ]
self.list_style_stack = []
self.table_count = 0
self.column_count = ord('A') - 1
self.trace_level = -1
self.optiontablestyles_generated = False
self.field_name = None
self.field_element = None
self.title = None
self.image_count = 0
self.image_style_count = 0
self.image_dict = {}
self.embedded_file_list = []
self.syntaxhighlighting = 1
self.syntaxhighlight_lexer = 'python'
self.header_content = []
self.footer_content = []
self.in_header = False
self.in_footer = False
self.blockstyle = ''
self.in_table_of_contents = False
self.table_of_content_index_body = None
self.list_level = 0
self.def_list_level = 0
self.footnote_ref_dict = {}
self.footnote_list = []
self.footnote_chars_idx = 0
self.footnote_level = 0
self.pending_ids = [ ]
self.in_paragraph = False
self.found_doc_title = False
self.bumped_list_level_stack = []
self.meta_dict = {}
self.line_block_level = 0
self.line_indent_level = 0
self.citation_id = None
self.style_index = 0 # use to form unique style names
self.str_stylesheet = ''
self.str_stylesheetcontent = ''
self.dom_stylesheet = None
self.table_styles = None
self.in_citation = False
def get_str_stylesheet(self):
return self.str_stylesheet
def retrieve_styles(self, extension):
"""Retrieve the stylesheet from either a .xml file or from
a .odt (zip) file. Return the content as a string.
"""
s2 = None
stylespath = self.settings.stylesheet
ext = os.path.splitext(stylespath)[1]
if ext == '.xml':
stylesfile = open(stylespath, 'r')
s1 = stylesfile.read()
stylesfile.close()
elif ext == extension:
zfile = zipfile.ZipFile(stylespath, 'r')
s1 = zfile.read('styles.xml')
s2 = zfile.read('content.xml')
zfile.close()
else:
raise RuntimeError('stylesheet path (%s) must be %s or .xml file' %(stylespath, extension))
self.str_stylesheet = s1
self.str_stylesheetcontent = s2
self.dom_stylesheet = etree.fromstring(self.str_stylesheet)
self.dom_stylesheetcontent = etree.fromstring(self.str_stylesheetcontent)
self.table_styles = self.extract_table_styles(s2)
def extract_table_styles(self, styles_str):
root = etree.fromstring(styles_str)
table_styles = {}
auto_styles = root.find(
'{%s}automatic-styles' % (CNSD['office'], ))
for stylenode in auto_styles:
name = stylenode.get('{%s}name' % (CNSD['style'], ))
tablename = name.split('.')[0]
family = stylenode.get('{%s}family' % (CNSD['style'], ))
if name.startswith(TABLESTYLEPREFIX):
tablestyle = table_styles.get(tablename)
if tablestyle is None:
tablestyle = TableStyle()
table_styles[tablename] = tablestyle
if family == 'table':
properties = stylenode.find(
'{%s}table-properties' % (CNSD['style'], ))
property = properties.get('{%s}%s' % (CNSD['fo'],
'background-color', ))
if property is not None and property != 'none':
tablestyle.backgroundcolor = property
elif family == 'table-cell':
properties = stylenode.find(
'{%s}table-cell-properties' % (CNSD['style'], ))
if properties is not None:
border = self.get_property(properties)
if border is not None:
tablestyle.border = border
return table_styles
def get_property(self, stylenode):
border = None
for propertyname in TABLEPROPERTYNAMES:
border = stylenode.get('{%s}%s' % (CNSD['fo'], propertyname, ))
if border is not None and border != 'none':
return border
return border
def add_doc_title(self):
text = self.settings.title
if text:
self.title = text
if not self.found_doc_title:
el = Element('text:p', attrib = {
'text:style-name': self.rststyle('title'),
})
el.text = text
self.body_text_element.insert(0, el)
el = self.find_first_text_p(self.body_text_element)
if el is not None:
self.attach_page_style(el)
def find_first_text_p(self, el):
"""Search the generated doc and return the first <text:p> element.
"""
if (
el.tag == 'text:p' or
el.tag == 'text:h'
):
return el
elif el.getchildren():
for child in el.getchildren():
el1 = self.find_first_text_p(child)
if el1 is not None:
return el1
return None
else:
return None
def attach_page_style(self, el):
"""Attach the default page style.
Create an automatic-style that refers to the current style
of this element and that refers to the default page style.
"""
current_style = el.get('text:style-name')
style_name = 'P1003'
el1 = SubElement(
self.automatic_styles, 'style:style', attrib={
'style:name': style_name,
'style:master-page-name': "rststyle-pagedefault",
'style:family': "paragraph",
}, nsdict=SNSD)
if current_style:
el1.set('style:parent-style-name', current_style)
el.set('text:style-name', style_name)
def rststyle(self, name, parameters=( )):
"""
Returns the style name to use for the given style.
If `parameters` is given `name` must contain a matching number of ``%`` and
is used as a format expression with `parameters` as the value.
"""
name1 = name % parameters
stylename = self.format_map.get(name1, 'rststyle-%s' % name1)
return stylename
def generate_content_element(self, root):
return SubElement(root, 'office:text')
def setup_page(self):
self.setup_paper(self.dom_stylesheet)
if (len(self.header_content) > 0 or len(self.footer_content) > 0 or
self.settings.custom_header or self.settings.custom_footer):
self.add_header_footer(self.dom_stylesheet)
new_content = etree.tostring(self.dom_stylesheet)
return new_content
def setup_paper(self, root_el):
try:
fin = os.popen("paperconf -s 2> /dev/null")
w, h = list(map(float, fin.read().split()))
fin.close()
except:
w, h = 612, 792 # default to Letter
def walk(el):
if el.tag == "{%s}page-layout-properties" % SNSD["style"] and \
"{%s}page-width" % SNSD["fo"] not in el.attrib:
el.attrib["{%s}page-width" % SNSD["fo"]] = "%.3fpt" % w
el.attrib["{%s}page-height" % SNSD["fo"]] = "%.3fpt" % h
el.attrib["{%s}margin-left" % SNSD["fo"]] = \
el.attrib["{%s}margin-right" % SNSD["fo"]] = \
"%.3fpt" % (.1 * w)
el.attrib["{%s}margin-top" % SNSD["fo"]] = \
el.attrib["{%s}margin-bottom" % SNSD["fo"]] = \
"%.3fpt" % (.1 * h)
else:
for subel in el.getchildren(): walk(subel)
walk(root_el)
def add_header_footer(self, root_el):
automatic_styles = root_el.find(
'{%s}automatic-styles' % SNSD['office'])
path = '{%s}master-styles' % (NAME_SPACE_1, )
master_el = root_el.find(path)
if master_el is None:
return
path = '{%s}master-page' % (SNSD['style'], )
master_el_container = master_el.findall(path)
master_el = None
target_attrib = '{%s}name' % (SNSD['style'], )
target_name = self.rststyle('pagedefault')
for el in master_el_container:
if el.get(target_attrib) == target_name:
master_el = el
break
if master_el is None:
return
el1 = master_el
if self.header_content or self.settings.custom_header:
if WhichElementTree == 'lxml':
el2 = SubElement(el1, 'style:header', nsdict=SNSD)
else:
el2 = SubElement(el1, 'style:header',
attrib=STYLES_NAMESPACE_ATTRIB,
nsdict=STYLES_NAMESPACE_DICT,
)
for el in self.header_content:
attrkey = add_ns('text:style-name', nsdict=SNSD)
el.attrib[attrkey] = self.rststyle('header')
el2.append(el)
if self.settings.custom_header:
elcustom = self.create_custom_headfoot(el2,
self.settings.custom_header, 'header', automatic_styles)
if self.footer_content or self.settings.custom_footer:
if WhichElementTree == 'lxml':
el2 = SubElement(el1, 'style:footer', nsdict=SNSD)
else:
el2 = SubElement(el1, 'style:footer',
attrib=STYLES_NAMESPACE_ATTRIB,
nsdict=STYLES_NAMESPACE_DICT,
)
for el in self.footer_content:
attrkey = add_ns('text:style-name', nsdict=SNSD)
el.attrib[attrkey] = self.rststyle('footer')
el2.append(el)
if self.settings.custom_footer:
elcustom = self.create_custom_headfoot(el2,
self.settings.custom_footer, 'footer', automatic_styles)
code_none, code_field, code_text = list(range(3))
field_pat = re.compile(r'%(..?)%')
def create_custom_headfoot(self, parent, text, style_name, automatic_styles):
parent = SubElement(parent, 'text:p', attrib={
'text:style-name': self.rststyle(style_name),
})
current_element = None
field_iter = self.split_field_specifiers_iter(text)
for item in field_iter:
if item[0] == ODFTranslator.code_field:
if item[1] not in ('p', 'P',
't1', 't2', 't3', 't4',
'd1', 'd2', 'd3', 'd4', 'd5',
's', 't', 'a'):
msg = 'bad field spec: %%%s%%' % (item[1], )
raise RuntimeError(msg)
el1 = self.make_field_element(parent,
item[1], style_name, automatic_styles)
if el1 is None:
msg = 'bad field spec: %%%s%%' % (item[1], )
raise RuntimeError(msg)
else:
current_element = el1
else:
if current_element is None:
parent.text = item[1]
else:
current_element.tail = item[1]
def make_field_element(self, parent, text, style_name, automatic_styles):
if text == 'p':
el1 = SubElement(parent, 'text:page-number', attrib={
#'text:style-name': self.rststyle(style_name),
'text:select-page': 'current',
})
elif text == 'P':
el1 = SubElement(parent, 'text:page-count', attrib={
#'text:style-name': self.rststyle(style_name),
})
elif text == 't1':
self.style_index += 1
el1 = SubElement(parent, 'text:time', attrib={
'text:style-name': self.rststyle(style_name),
'text:fixed': 'true',
'style:data-style-name': 'rst-time-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:time-style', attrib={
'style:name': 'rst-time-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:hours', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:minutes', attrib={
'number:style': 'long',
})
elif text == 't2':
self.style_index += 1
el1 = SubElement(parent, 'text:time', attrib={
'text:style-name': self.rststyle(style_name),
'text:fixed': 'true',
'style:data-style-name': 'rst-time-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:time-style', attrib={
'style:name': 'rst-time-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:hours', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:minutes', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:seconds', attrib={
'number:style': 'long',
})
elif text == 't3':
self.style_index += 1
el1 = SubElement(parent, 'text:time', attrib={
'text:style-name': self.rststyle(style_name),
'text:fixed': 'true',
'style:data-style-name': 'rst-time-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:time-style', attrib={
'style:name': 'rst-time-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:hours', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:minutes', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ' '
el3 = SubElement(el2, 'number:am-pm')
elif text == 't4':
self.style_index += 1
el1 = SubElement(parent, 'text:time', attrib={
'text:style-name': self.rststyle(style_name),
'text:fixed': 'true',
'style:data-style-name': 'rst-time-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:time-style', attrib={
'style:name': 'rst-time-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:hours', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:minutes', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ':'
el3 = SubElement(el2, 'number:seconds', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ' '
el3 = SubElement(el2, 'number:am-pm')
elif text == 'd1':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'number:automatic-order': 'true',
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:month', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '/'
el3 = SubElement(el2, 'number:day', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '/'
el3 = SubElement(el2, 'number:year')
elif text == 'd2':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'number:automatic-order': 'true',
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:month', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '/'
el3 = SubElement(el2, 'number:day', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '/'
el3 = SubElement(el2, 'number:year', attrib={
'number:style': 'long',
})
elif text == 'd3':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'number:automatic-order': 'true',
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:month', attrib={
'number:textual': 'true',
})
el3 = SubElement(el2, 'number:text')
el3.text = ' '
el3 = SubElement(el2, 'number:day', attrib={
})
el3 = SubElement(el2, 'number:text')
el3.text = ', '
el3 = SubElement(el2, 'number:year', attrib={
'number:style': 'long',
})
elif text == 'd4':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'number:automatic-order': 'true',
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:month', attrib={
'number:textual': 'true',
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = ' '
el3 = SubElement(el2, 'number:day', attrib={
})
el3 = SubElement(el2, 'number:text')
el3.text = ', '
el3 = SubElement(el2, 'number:year', attrib={
'number:style': 'long',
})
elif text == 'd5':
self.style_index += 1
el1 = SubElement(parent, 'text:date', attrib={
'text:style-name': self.rststyle(style_name),
'style:data-style-name': 'rst-date-style-%d' % self.style_index,
})
el2 = SubElement(automatic_styles, 'number:date-style', attrib={
'style:name': 'rst-date-style-%d' % self.style_index,
'xmlns:number': SNSD['number'],
'xmlns:style': SNSD['style'],
})
el3 = SubElement(el2, 'number:year', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '-'
el3 = SubElement(el2, 'number:month', attrib={
'number:style': 'long',
})
el3 = SubElement(el2, 'number:text')
el3.text = '-'
el3 = SubElement(el2, 'number:day', attrib={
'number:style': 'long',
})
elif text == 's':
el1 = SubElement(parent, 'text:subject', attrib={
'text:style-name': self.rststyle(style_name),
})
elif text == 't':
el1 = SubElement(parent, 'text:title', attrib={
'text:style-name': self.rststyle(style_name),
})
elif text == 'a':
el1 = SubElement(parent, 'text:author-name', attrib={
'text:fixed': 'false',
})
else:
el1 = None
return el1
def split_field_specifiers_iter(self, text):
pos1 = 0
pos_end = len(text)
while True:
mo = ODFTranslator.field_pat.search(text, pos1)
if mo:
pos2 = mo.start()
if pos2 > pos1:
yield (ODFTranslator.code_text, text[pos1:pos2])
yield (ODFTranslator.code_field, mo.group(1))
pos1 = mo.end()
else:
break
trailing = text[pos1:]
if trailing:
yield (ODFTranslator.code_text, trailing)
def astext(self):
root = self.content_tree.getroot()
et = etree.ElementTree(root)
s1 = ToString(et)
return s1
def content_astext(self):
return self.astext()
def set_title(self, title): self.title = title
def get_title(self): return self.title
def set_embedded_file_list(self, embedded_file_list):
self.embedded_file_list = embedded_file_list
def get_embedded_file_list(self): return self.embedded_file_list
def get_meta_dict(self): return self.meta_dict
def process_footnotes(self):
for node, el1 in self.footnote_list:
backrefs = node.attributes.get('backrefs', [])
first = True
for ref in backrefs:
el2 = self.footnote_ref_dict.get(ref)
if el2 is not None:
if first:
first = False
el3 = copy.deepcopy(el1)
el2.append(el3)
else:
children = el2.getchildren()
if len(children) > 0: # and 'id' in el2.attrib:
child = children[0]
ref1 = child.text
attribkey = add_ns('text:id', nsdict=SNSD)
id1 = el2.get(attribkey, 'footnote-error')
if id1 is None:
id1 = ''
tag = add_ns('text:note-ref', nsdict=SNSD)
el2.tag = tag
if self.settings.endnotes_end_doc:
note_class = 'endnote'
else:
note_class = 'footnote'
el2.attrib.clear()
attribkey = add_ns('text:note-class', nsdict=SNSD)
el2.attrib[attribkey] = note_class
attribkey = add_ns('text:ref-name', nsdict=SNSD)
el2.attrib[attribkey] = id1
attribkey = add_ns('text:reference-format', nsdict=SNSD)
el2.attrib[attribkey] = 'page'
el2.text = ref1
#
# Utility methods
def append_child(self, tag, attrib=None, parent=None):
if parent is None:
parent = self.current_element
if attrib is None:
el = SubElement(parent, tag)
else:
el = SubElement(parent, tag, attrib)
return el
def append_p(self, style, text=None):
result = self.append_child('text:p', attrib={
'text:style-name': self.rststyle(style)})
self.append_pending_ids(result)
if text is not None:
result.text = text
return result
def append_pending_ids(self, el):
if self.settings.create_links:
for id in self.pending_ids:
SubElement(el, 'text:reference-mark', attrib={
'text:name': id})
self.pending_ids = [ ]
def set_current_element(self, el):
self.current_element = el
def set_to_parent(self):
self.current_element = self.current_element.getparent()
def generate_labeled_block(self, node, label):
label = '%s:' % (self.language.labels[label], )
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
el = self.append_p('blockindent')
return el
def generate_labeled_line(self, node, label):
label = '%s:' % (self.language.labels[label], )
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
el1.tail = node.astext()
return el
def encode(self, text):
text = text.replace('\u00a0', " ")
return text
#
# Visitor functions
#
# In alphabetic order, more or less.
# See docutils.docutils.nodes.node_class_names.
#
def dispatch_visit(self, node):
"""Override to catch basic attributes which many nodes have."""
self.handle_basic_atts(node)
nodes.GenericNodeVisitor.dispatch_visit(self, node)
def handle_basic_atts(self, node):
if isinstance(node, nodes.Element) and node['ids']:
self.pending_ids += node['ids']
def default_visit(self, node):
self.document.reporter.warning('missing visit_%s' % (node.tagname, ))
def default_departure(self, node):
self.document.reporter.warning('missing depart_%s' % (node.tagname, ))
def visit_Text(self, node):
# Skip nodes whose text has been processed in parent nodes.
if isinstance(node.parent, docutils.nodes.literal_block):
return
text = node.astext()
# Are we in mixed content? If so, add the text to the
# etree tail of the previous sibling element.
if len(self.current_element.getchildren()) > 0:
if self.current_element.getchildren()[-1].tail:
self.current_element.getchildren()[-1].tail += text
else:
self.current_element.getchildren()[-1].tail = text
else:
if self.current_element.text:
self.current_element.text += text
else:
self.current_element.text = text
def depart_Text(self, node):
pass
#
# Pre-defined fields
#
def visit_address(self, node):
el = self.generate_labeled_block(node, 'address')
self.set_current_element(el)
def depart_address(self, node):
self.set_to_parent()
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
el = self.append_p('blockindent')
else:
el = self.generate_labeled_block(node, 'author')
self.set_current_element(el)
def depart_author(self, node):
self.set_to_parent()
def visit_authors(self, node):
label = '%s:' % (self.language.labels['authors'], )
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
def depart_authors(self, node):
pass
def visit_contact(self, node):
el = self.generate_labeled_block(node, 'contact')
self.set_current_element(el)
def depart_contact(self, node):
self.set_to_parent()
def visit_copyright(self, node):
el = self.generate_labeled_block(node, 'copyright')
self.set_current_element(el)
def depart_copyright(self, node):
self.set_to_parent()
def visit_date(self, node):
self.generate_labeled_line(node, 'date')
def depart_date(self, node):
pass
def visit_organization(self, node):
el = self.generate_labeled_block(node, 'organization')
self.set_current_element(el)
def depart_organization(self, node):
self.set_to_parent()
def visit_status(self, node):
el = self.generate_labeled_block(node, 'status')
self.set_current_element(el)
def depart_status(self, node):
self.set_to_parent()
def visit_revision(self, node):
el = self.generate_labeled_line(node, 'revision')
def depart_revision(self, node):
pass
def visit_version(self, node):
el = self.generate_labeled_line(node, 'version')
#self.set_current_element(el)
def depart_version(self, node):
#self.set_to_parent()
pass
def visit_attribution(self, node):
el = self.append_p('attribution', node.astext())
def depart_attribution(self, node):
pass
def visit_block_quote(self, node):
if 'epigraph' in node.attributes['classes']:
self.paragraph_style_stack.append(self.rststyle('epigraph'))
self.blockstyle = self.rststyle('epigraph')
elif 'highlights' in node.attributes['classes']:
self.paragraph_style_stack.append(self.rststyle('highlights'))
self.blockstyle = self.rststyle('highlights')
else:
self.paragraph_style_stack.append(self.rststyle('blockquote'))
self.blockstyle = self.rststyle('blockquote')
self.line_indent_level += 1
def depart_block_quote(self, node):
self.paragraph_style_stack.pop()
self.blockstyle = ''
self.line_indent_level -= 1
def visit_bullet_list(self, node):
self.list_level +=1
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
pass
else:
if 'classes' in node and \
'auto-toc' in node.attributes['classes']:
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('tocenumlist'),
})
self.list_style_stack.append(self.rststyle('enumitem'))
else:
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('tocbulletlist'),
})
self.list_style_stack.append(self.rststyle('bulletitem'))
self.set_current_element(el)
else:
if self.blockstyle == self.rststyle('blockquote'):
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('blockquote-bulletlist'),
})
self.list_style_stack.append(
self.rststyle('blockquote-bulletitem'))
elif self.blockstyle == self.rststyle('highlights'):
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('highlights-bulletlist'),
})
self.list_style_stack.append(
self.rststyle('highlights-bulletitem'))
elif self.blockstyle == self.rststyle('epigraph'):
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('epigraph-bulletlist'),
})
self.list_style_stack.append(
self.rststyle('epigraph-bulletitem'))
else:
el = SubElement(self.current_element, 'text:list', attrib={
'text:style-name': self.rststyle('bulletlist'),
})
self.list_style_stack.append(self.rststyle('bulletitem'))
self.set_current_element(el)
def depart_bullet_list(self, node):
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
pass
else:
self.set_to_parent()
self.list_style_stack.pop()
else:
self.set_to_parent()
self.list_style_stack.pop()
self.list_level -=1
def visit_caption(self, node):
raise nodes.SkipChildren()
pass
def depart_caption(self, node):
pass
def visit_comment(self, node):
el = self.append_p('textbody')
el1 = SubElement(el, 'office:annotation', attrib={})
el2 = SubElement(el1, 'dc:creator', attrib={})
s1 = os.environ.get('USER', '')
el2.text = s1
el2 = SubElement(el1, 'text:p', attrib={})
el2.text = node.astext()
def depart_comment(self, node):
pass
def visit_compound(self, node):
# The compound directive currently receives no special treatment.
pass
def depart_compound(self, node):
pass
def visit_container(self, node):
styles = node.attributes.get('classes', ())
if len(styles) > 0:
self.paragraph_style_stack.append(self.rststyle(styles[0]))
def depart_container(self, node):
styles = node.attributes.get('classes', ())
if len(styles) > 0:
self.paragraph_style_stack.pop()
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_definition_list(self, node):
self.def_list_level +=1
if self.list_level > 5:
raise RuntimeError(
'max definition list nesting level exceeded')
def depart_definition_list(self, node):
self.def_list_level -=1
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
el = self.append_p('deflist-term-%d' % self.def_list_level)
el.text = node.astext()
self.set_current_element(el)
raise nodes.SkipChildren()
def depart_term(self, node):
self.set_to_parent()
def visit_definition(self, node):
self.paragraph_style_stack.append(
self.rststyle('deflist-def-%d' % self.def_list_level))
self.bumped_list_level_stack.append(ListLevel(1))
def depart_definition(self, node):
self.paragraph_style_stack.pop()
self.bumped_list_level_stack.pop()
def visit_classifier(self, node):
els = self.current_element.getchildren()
if len(els) > 0:
el = els[-1]
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('emphasis')
})
el1.text = ' (%s)' % (node.astext(), )
def depart_classifier(self, node):
pass
def visit_document(self, node):
pass
def depart_document(self, node):
self.process_footnotes()
def visit_docinfo(self, node):
self.section_level += 1
self.section_count += 1
if self.settings.create_sections:
el = self.append_child('text:section', attrib={
'text:name': 'Section%d' % self.section_count,
'text:style-name': 'Sect%d' % self.section_level,
})
self.set_current_element(el)
def depart_docinfo(self, node):
self.section_level -= 1
if self.settings.create_sections:
self.set_to_parent()
def visit_emphasis(self, node):
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle('emphasis')})
self.set_current_element(el)
def depart_emphasis(self, node):
self.set_to_parent()
def visit_enumerated_list(self, node):
el1 = self.current_element
if self.blockstyle == self.rststyle('blockquote'):
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle('blockquote-enumlist'),
})
self.list_style_stack.append(self.rststyle('blockquote-enumitem'))
elif self.blockstyle == self.rststyle('highlights'):
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle('highlights-enumlist'),
})
self.list_style_stack.append(self.rststyle('highlights-enumitem'))
elif self.blockstyle == self.rststyle('epigraph'):
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle('epigraph-enumlist'),
})
self.list_style_stack.append(self.rststyle('epigraph-enumitem'))
else:
liststylename = 'enumlist-%s' % (node.get('enumtype', 'arabic'), )
el2 = SubElement(el1, 'text:list', attrib={
'text:style-name': self.rststyle(liststylename),
})
self.list_style_stack.append(self.rststyle('enumitem'))
self.set_current_element(el2)
def depart_enumerated_list(self, node):
self.set_to_parent()
self.list_style_stack.pop()
def visit_list_item(self, node):
# If we are in a "bumped" list level, then wrap this
# list in an outer lists in order to increase the
# indentation level.
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
self.paragraph_style_stack.append(
self.rststyle('contents-%d' % (self.list_level, )))
else:
el1 = self.append_child('text:list-item')
self.set_current_element(el1)
else:
el1 = self.append_child('text:list-item')
el3 = el1
if len(self.bumped_list_level_stack) > 0:
level_obj = self.bumped_list_level_stack[-1]
if level_obj.get_sibling():
level_obj.set_nested(False)
for level_obj1 in self.bumped_list_level_stack:
for idx in range(level_obj1.get_level()):
el2 = self.append_child('text:list', parent=el3)
el3 = self.append_child(
'text:list-item', parent=el2)
self.paragraph_style_stack.append(self.list_style_stack[-1])
self.set_current_element(el3)
def depart_list_item(self, node):
if self.in_table_of_contents:
if self.settings.generate_oowriter_toc:
self.paragraph_style_stack.pop()
else:
self.set_to_parent()
else:
if len(self.bumped_list_level_stack) > 0:
level_obj = self.bumped_list_level_stack[-1]
if level_obj.get_sibling():
level_obj.set_nested(True)
for level_obj1 in self.bumped_list_level_stack:
for idx in range(level_obj1.get_level()):
self.set_to_parent()
self.set_to_parent()
self.paragraph_style_stack.pop()
self.set_to_parent()
def visit_header(self, node):
self.in_header = True
def depart_header(self, node):
self.in_header = False
def visit_footer(self, node):
self.in_footer = True
def depart_footer(self, node):
self.in_footer = False
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_list(self, node):
pass
def depart_field_list(self, node):
pass
def visit_field_name(self, node):
el = self.append_p('textbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = node.astext()
def depart_field_name(self, node):
pass
def visit_field_body(self, node):
self.paragraph_style_stack.append(self.rststyle('blockindent'))
def depart_field_body(self, node):
self.paragraph_style_stack.pop()
def visit_figure(self, node):
pass
def depart_figure(self, node):
pass
def visit_footnote(self, node):
self.footnote_level += 1
self.save_footnote_current = self.current_element
el1 = Element('text:note-body')
self.current_element = el1
self.footnote_list.append((node, el1))
if isinstance(node, docutils.nodes.citation):
self.paragraph_style_stack.append(self.rststyle('citation'))
else:
self.paragraph_style_stack.append(self.rststyle('footnote'))
def depart_footnote(self, node):
self.paragraph_style_stack.pop()
self.current_element = self.save_footnote_current
self.footnote_level -= 1
footnote_chars = [
'*', '**', '***',
'++', '+++',
'##', '###',
'@@', '@@@',
]
def visit_footnote_reference(self, node):
if self.footnote_level <= 0:
id = node.attributes['ids'][0]
refid = node.attributes.get('refid')
if refid is None:
refid = ''
if self.settings.endnotes_end_doc:
note_class = 'endnote'
else:
note_class = 'footnote'
el1 = self.append_child('text:note', attrib={
'text:id': '%s' % (refid, ),
'text:note-class': note_class,
})
note_auto = str(node.attributes.get('auto', 1))
if isinstance(node, docutils.nodes.citation_reference):
citation = '[%s]' % node.astext()
el2 = SubElement(el1, 'text:note-citation', attrib={
'text:label': citation,
})
el2.text = citation
elif note_auto == '1':
el2 = SubElement(el1, 'text:note-citation', attrib={
'text:label': node.astext(),
})
el2.text = node.astext()
elif note_auto == '*':
if self.footnote_chars_idx >= len(
ODFTranslator.footnote_chars):
self.footnote_chars_idx = 0
footnote_char = ODFTranslator.footnote_chars[
self.footnote_chars_idx]
self.footnote_chars_idx += 1
el2 = SubElement(el1, 'text:note-citation', attrib={
'text:label': footnote_char,
})
el2.text = footnote_char
self.footnote_ref_dict[id] = el1
raise nodes.SkipChildren()
def depart_footnote_reference(self, node):
pass
def visit_citation(self, node):
self.in_citation = True
for id in node.attributes['ids']:
self.citation_id = id
break
self.paragraph_style_stack.append(self.rststyle('blockindent'))
self.bumped_list_level_stack.append(ListLevel(1))
def depart_citation(self, node):
self.citation_id = None
self.paragraph_style_stack.pop()
self.bumped_list_level_stack.pop()
self.in_citation = False
def visit_citation_reference(self, node):
if self.settings.create_links:
id = node.attributes['refid']
el = self.append_child('text:reference-ref', attrib={
'text:ref-name': '%s' % (id, ),
'text:reference-format': 'text',
})
el.text = '['
self.set_current_element(el)
elif self.current_element.text is None:
self.current_element.text = '['
else:
self.current_element.text += '['
def depart_citation_reference(self, node):
self.current_element.text += ']'
if self.settings.create_links:
self.set_to_parent()
def visit_label(self, node):
if isinstance(node.parent, docutils.nodes.footnote):
raise nodes.SkipChildren()
elif self.citation_id is not None:
el = self.append_p('textbody')
self.set_current_element(el)
el.text = '['
if self.settings.create_links:
el1 = self.append_child('text:reference-mark-start', attrib={
'text:name': '%s' % (self.citation_id, ),
})
def depart_label(self, node):
if isinstance(node.parent, docutils.nodes.footnote):
pass
elif self.citation_id is not None:
self.current_element.text += ']'
if self.settings.create_links:
el = self.append_child('text:reference-mark-end', attrib={
'text:name': '%s' % (self.citation_id, ),
})
self.set_to_parent()
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def check_file_exists(self, path):
if os.path.exists(path):
return 1
else:
return 0
def visit_image(self, node):
# Capture the image file.
if 'uri' in node.attributes:
source = node.attributes['uri']
if not source.startswith('http:'):
if not source.startswith(os.sep):
docsource, line = utils.get_source_line(node)
if docsource:
dirname = os.path.dirname(docsource)
if dirname:
source = '%s%s%s' % (dirname, os.sep, source, )
if not self.check_file_exists(source):
self.document.reporter.warning(
'Cannot find image file %s.' % (source, ))
return
else:
return
if source in self.image_dict:
filename, destination = self.image_dict[source]
else:
self.image_count += 1
filename = os.path.split(source)[1]
destination = 'Pictures/1%08x%s' % (self.image_count, filename, )
if source.startswith('http:'):
try:
imgfile = urllib.request.urlopen(source)
content = imgfile.read()
imgfile.close()
imgfile2 = tempfile.NamedTemporaryFile('wb', delete=False)
imgfile2.write(content)
imgfile2.close()
imgfilename = imgfile2.name
source = imgfilename
except urllib.error.HTTPError as e:
self.document.reporter.warning(
"Can't open image url %s." % (source, ))
spec = (source, destination,)
else:
spec = (os.path.abspath(source), destination,)
self.embedded_file_list.append(spec)
self.image_dict[source] = (source, destination,)
# Is this a figure (containing an image) or just a plain image?
if self.in_paragraph:
el1 = self.current_element
else:
el1 = SubElement(self.current_element, 'text:p',
attrib={'text:style-name': self.rststyle('textbody')})
el2 = el1
if isinstance(node.parent, docutils.nodes.figure):
el3, el4, el5, caption = self.generate_figure(node, source,
destination, el2)
attrib = {}
el6, width = self.generate_image(node, source, destination,
el5, attrib)
if caption is not None:
el6.tail = caption
else: #if isinstance(node.parent, docutils.nodes.image):
el3 = self.generate_image(node, source, destination, el2)
def depart_image(self, node):
pass
def get_image_width_height(self, node, attr):
size = None
if attr in node.attributes:
size = node.attributes[attr]
unit = size[-2:]
if unit.isalpha():
size = size[:-2]
else:
unit = 'px'
try:
size = float(size)
except ValueError as e:
self.document.reporter.warning(
'Invalid %s for image: "%s"' % (
attr, node.attributes[attr]))
size = [size, unit]
return size
def get_image_scale(self, node):
if 'scale' in node.attributes:
try:
scale = int(node.attributes['scale'])
if scale < 1: # or scale > 100:
self.document.reporter.warning(
'scale out of range (%s), using 1.' % (scale, ))
scale = 1
scale = scale * 0.01
except ValueError as e:
self.document.reporter.warning(
'Invalid scale for image: "%s"' % (
node.attributes['scale'], ))
else:
scale = 1.0
return scale
def get_image_scaled_width_height(self, node, source):
scale = self.get_image_scale(node)
width = self.get_image_width_height(node, 'width')
height = self.get_image_width_height(node, 'height')
dpi = (72, 72)
if PIL is not None and source in self.image_dict:
filename, destination = self.image_dict[source]
imageobj = PIL.Image.open(filename, 'r')
dpi = imageobj.info.get('dpi', dpi)
# dpi information can be (xdpi, ydpi) or xydpi
try: iter(dpi)
except: dpi = (dpi, dpi)
else:
imageobj = None
if width is None or height is None:
if imageobj is None:
raise RuntimeError(
'image size not fully specified and PIL not installed')
if width is None: width = [imageobj.size[0], 'px']
if height is None: height = [imageobj.size[1], 'px']
width[0] *= scale
height[0] *= scale
if width[1] == 'px': width = [width[0] / dpi[0], 'in']
if height[1] == 'px': height = [height[0] / dpi[1], 'in']
width[0] = str(width[0])
height[0] = str(height[0])
return ''.join(width), ''.join(height)
def generate_figure(self, node, source, destination, current_element):
caption = None
width, height = self.get_image_scaled_width_height(node, source)
for node1 in node.parent.children:
if node1.tagname == 'caption':
caption = node1.astext()
self.image_style_count += 1
#
# Add the style for the caption.
if caption is not None:
attrib = {
'style:class': 'extra',
'style:family': 'paragraph',
'style:name': 'Caption',
'style:parent-style-name': 'Standard',
}
el1 = SubElement(self.automatic_styles, 'style:style',
attrib=attrib, nsdict=SNSD)
attrib = {
'fo:margin-bottom': '0.0835in',
'fo:margin-top': '0.0835in',
'text:line-number': '0',
'text:number-lines': 'false',
}
el2 = SubElement(el1, 'style:paragraph-properties',
attrib=attrib, nsdict=SNSD)
attrib = {
'fo:font-size': '12pt',
'fo:font-style': 'italic',
'style:font-name': 'Times',
'style:font-name-complex': 'Lucidasans1',
'style:font-size-asian': '12pt',
'style:font-size-complex': '12pt',
'style:font-style-asian': 'italic',
'style:font-style-complex': 'italic',
}
el2 = SubElement(el1, 'style:text-properties',
attrib=attrib, nsdict=SNSD)
style_name = 'rstframestyle%d' % self.image_style_count
# Add the styles
attrib = {
'style:name': style_name,
'style:family': 'graphic',
'style:parent-style-name': self.rststyle('figureframe'),
}
el1 = SubElement(self.automatic_styles,
'style:style', attrib=attrib, nsdict=SNSD)
halign = 'center'
valign = 'top'
if 'align' in node.attributes:
align = node.attributes['align'].split()
for val in align:
if val in ('left', 'center', 'right'):
halign = val
elif val in ('top', 'middle', 'bottom'):
valign = val
attrib = {}
wrap = False
classes = node.parent.attributes.get('classes')
if classes and 'wrap' in classes:
wrap = True
if wrap:
attrib['style:wrap'] = 'dynamic'
else:
attrib['style:wrap'] = 'none'
el2 = SubElement(el1,
'style:graphic-properties', attrib=attrib, nsdict=SNSD)
attrib = {
'draw:style-name': style_name,
'draw:name': 'Frame1',
'text:anchor-type': 'paragraph',
'draw:z-index': '0',
}
attrib['svg:width'] = width
el3 = SubElement(current_element, 'draw:frame', attrib=attrib)
attrib = {}
el4 = SubElement(el3, 'draw:text-box', attrib=attrib)
attrib = {
'text:style-name': self.rststyle('caption'),
}
el5 = SubElement(el4, 'text:p', attrib=attrib)
return el3, el4, el5, caption
def generate_image(self, node, source, destination, current_element,
frame_attrs=None):
width, height = self.get_image_scaled_width_height(node, source)
self.image_style_count += 1
style_name = 'rstframestyle%d' % self.image_style_count
# Add the style.
attrib = {
'style:name': style_name,
'style:family': 'graphic',
'style:parent-style-name': self.rststyle('image'),
}
el1 = SubElement(self.automatic_styles,
'style:style', attrib=attrib, nsdict=SNSD)
halign = None
valign = None
if 'align' in node.attributes:
align = node.attributes['align'].split()
for val in align:
if val in ('left', 'center', 'right'):
halign = val
elif val in ('top', 'middle', 'bottom'):
valign = val
if frame_attrs is None:
attrib = {
'style:vertical-pos': 'top',
'style:vertical-rel': 'paragraph',
'style:horizontal-rel': 'paragraph',
'style:mirror': 'none',
'fo:clip': 'rect(0cm 0cm 0cm 0cm)',
'draw:luminance': '0%',
'draw:contrast': '0%',
'draw:red': '0%',
'draw:green': '0%',
'draw:blue': '0%',
'draw:gamma': '100%',
'draw:color-inversion': 'false',
'draw:image-opacity': '100%',
'draw:color-mode': 'standard',
}
else:
attrib = frame_attrs
if halign is not None:
attrib['style:horizontal-pos'] = halign
if valign is not None:
attrib['style:vertical-pos'] = valign
# If there is a classes/wrap directive or we are
# inside a table, add a no-wrap style.
wrap = False
classes = node.attributes.get('classes')
if classes and 'wrap' in classes:
wrap = True
if wrap:
attrib['style:wrap'] = 'dynamic'
else:
attrib['style:wrap'] = 'none'
# If we are inside a table, add a no-wrap style.
if self.is_in_table(node):
attrib['style:wrap'] = 'none'
el2 = SubElement(el1,
'style:graphic-properties', attrib=attrib, nsdict=SNSD)
# Add the content.
#el = SubElement(current_element, 'text:p',
# attrib={'text:style-name': self.rststyle('textbody')})
attrib={
'draw:style-name': style_name,
'draw:name': 'graphics2',
'draw:z-index': '1',
}
if isinstance(node.parent, nodes.TextElement):
attrib['text:anchor-type'] = 'as-char' #vds
else:
attrib['text:anchor-type'] = 'paragraph'
attrib['svg:width'] = width
attrib['svg:height'] = height
el1 = SubElement(current_element, 'draw:frame', attrib=attrib)
el2 = SubElement(el1, 'draw:image', attrib={
'xlink:href': '%s' % (destination, ),
'xlink:type': 'simple',
'xlink:show': 'embed',
'xlink:actuate': 'onLoad',
})
return el1, width
def is_in_table(self, node):
node1 = node.parent
while node1:
if isinstance(node1, docutils.nodes.entry):
return True
node1 = node1.parent
return False
def visit_legend(self, node):
if isinstance(node.parent, docutils.nodes.figure):
el1 = self.current_element[-1]
el1 = el1[0][0]
self.current_element = el1
self.paragraph_style_stack.append(self.rststyle('legend'))
def depart_legend(self, node):
if isinstance(node.parent, docutils.nodes.figure):
self.paragraph_style_stack.pop()
self.set_to_parent()
self.set_to_parent()
self.set_to_parent()
def visit_line_block(self, node):
self.line_indent_level += 1
self.line_block_level += 1
def depart_line_block(self, node):
self.line_indent_level -= 1
self.line_block_level -= 1
def visit_line(self, node):
style = 'lineblock%d' % self.line_indent_level
el1 = SubElement(self.current_element, 'text:p', attrib={
'text:style-name': self.rststyle(style),
})
self.current_element = el1
def depart_line(self, node):
self.set_to_parent()
def visit_literal(self, node):
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle('inlineliteral')})
self.set_current_element(el)
def depart_literal(self, node):
self.set_to_parent()
def visit_inline(self, node):
styles = node.attributes.get('classes', ())
if len(styles) > 0:
inline_style = styles[0]
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle(inline_style)})
self.set_current_element(el)
def depart_inline(self, node):
self.set_to_parent()
def _calculate_code_block_padding(self, line):
count = 0
matchobj = SPACES_PATTERN.match(line)
if matchobj:
pad = matchobj.group()
count = len(pad)
else:
matchobj = TABS_PATTERN.match(line)
if matchobj:
pad = matchobj.group()
count = len(pad) * 8
return count
def _add_syntax_highlighting(self, insource, language):
lexer = pygments.lexers.get_lexer_by_name(language, stripall=True)
if language in ('latex', 'tex'):
fmtr = OdtPygmentsLaTeXFormatter(lambda name, parameters=():
self.rststyle(name, parameters),
escape_function=escape_cdata)
else:
fmtr = OdtPygmentsProgFormatter(lambda name, parameters=():
self.rststyle(name, parameters),
escape_function=escape_cdata)
outsource = pygments.highlight(insource, lexer, fmtr)
return outsource
def fill_line(self, line):
line = FILL_PAT1.sub(self.fill_func1, line)
line = FILL_PAT2.sub(self.fill_func2, line)
return line
def fill_func1(self, matchobj):
spaces = matchobj.group(0)
repl = '<text:s text:c="%d"/>' % (len(spaces), )
return repl
def fill_func2(self, matchobj):
spaces = matchobj.group(0)
repl = ' <text:s text:c="%d"/>' % (len(spaces) - 1, )
return repl
def visit_literal_block(self, node):
if len(self.paragraph_style_stack) > 1:
wrapper1 = '<text:p text:style-name="%s">%%s</text:p>' % (
self.rststyle('codeblock-indented'), )
else:
wrapper1 = '<text:p text:style-name="%s">%%s</text:p>' % (
self.rststyle('codeblock'), )
source = node.astext()
if (pygments and
self.settings.add_syntax_highlighting
#and
#node.get('hilight', False)
):
language = node.get('language', 'python')
source = self._add_syntax_highlighting(source, language)
else:
source = escape_cdata(source)
lines = source.split('\n')
# If there is an empty last line, remove it.
if lines[-1] == '':
del lines[-1]
lines1 = ['<wrappertag1 xmlns:text="urn:oasis:names:tc:opendocument:xmlns:text:1.0">']
my_lines = []
for my_line in lines:
my_line = self.fill_line(my_line)
my_line = my_line.replace(" ", "\n")
my_lines.append(my_line)
my_lines_str = '<text:line-break/>'.join(my_lines)
my_lines_str2 = wrapper1 % (my_lines_str, )
lines1.append(my_lines_str2)
lines1.append('</wrappertag1>')
s1 = ''.join(lines1)
if WhichElementTree != "lxml":
s1 = s1.encode("utf-8")
el1 = etree.fromstring(s1)
children = el1.getchildren()
for child in children:
self.current_element.append(child)
def depart_literal_block(self, node):
pass
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
# placeholder for math (see docs/dev/todo.txt)
def visit_math(self, node):
self.document.reporter.warning('"math" role not supported',
base_node=node)
self.visit_literal(node)
def depart_math(self, node):
self.depart_literal(node)
def visit_math_block(self, node):
self.document.reporter.warning('"math" directive not supported',
base_node=node)
self.visit_literal_block(node)
def depart_math_block(self, node):
self.depart_literal_block(node)
def visit_meta(self, node):
name = node.attributes.get('name')
content = node.attributes.get('content')
if name is not None and content is not None:
self.meta_dict[name] = content
def depart_meta(self, node):
pass
def visit_option_list(self, node):
table_name = 'tableoption'
#
# Generate automatic styles
if not self.optiontablestyles_generated:
self.optiontablestyles_generated = True
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(table_name),
'style:family': 'table'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-properties', attrib={
'style:width': '17.59cm',
'table:align': 'left',
'style:shadow': 'none'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle('%s.%%c' % table_name, ( 'A', )),
'style:family': 'table-column'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-column-properties', attrib={
'style:column-width': '4.999cm'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle('%s.%%c' % table_name, ( 'B', )),
'style:family': 'table-column'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-column-properties', attrib={
'style:column-width': '12.587cm'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'A', 1, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:background-color': 'transparent',
'fo:padding': '0.097cm',
'fo:border-left': '0.035cm solid #000000',
'fo:border-right': 'none',
'fo:border-top': '0.035cm solid #000000',
'fo:border-bottom': '0.035cm solid #000000'}, nsdict=SNSD)
el2 = SubElement(el1, 'style:background-image', nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'B', 1, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:padding': '0.097cm',
'fo:border': '0.035cm solid #000000'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'A', 2, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:padding': '0.097cm',
'fo:border-left': '0.035cm solid #000000',
'fo:border-right': 'none',
'fo:border-top': 'none',
'fo:border-bottom': '0.035cm solid #000000'}, nsdict=SNSD)
el = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'B', 2, )),
'style:family': 'table-cell'}, nsdict=SNSD)
el1 = SubElement(el, 'style:table-cell-properties', attrib={
'fo:padding': '0.097cm',
'fo:border-left': '0.035cm solid #000000',
'fo:border-right': '0.035cm solid #000000',
'fo:border-top': 'none',
'fo:border-bottom': '0.035cm solid #000000'}, nsdict=SNSD)
#
# Generate table data
el = self.append_child('table:table', attrib={
'table:name': self.rststyle(table_name),
'table:style-name': self.rststyle(table_name),
})
el1 = SubElement(el, 'table:table-column', attrib={
'table:style-name': self.rststyle(
'%s.%%c' % table_name, ( 'A', ))})
el1 = SubElement(el, 'table:table-column', attrib={
'table:style-name': self.rststyle(
'%s.%%c' % table_name, ( 'B', ))})
el1 = SubElement(el, 'table:table-header-rows')
el2 = SubElement(el1, 'table:table-row')
el3 = SubElement(el2, 'table:table-cell', attrib={
'table:style-name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'A', 1, )),
'office:value-type': 'string'})
el4 = SubElement(el3, 'text:p', attrib={
'text:style-name': 'Table_20_Heading'})
el4.text= 'Option'
el3 = SubElement(el2, 'table:table-cell', attrib={
'table:style-name': self.rststyle(
'%s.%%c%%d' % table_name, ( 'B', 1, )),
'office:value-type': 'string'})
el4 = SubElement(el3, 'text:p', attrib={
'text:style-name': 'Table_20_Heading'})
el4.text= 'Description'
self.set_current_element(el)
def depart_option_list(self, node):
self.set_to_parent()
def visit_option_list_item(self, node):
el = self.append_child('table:table-row')
self.set_current_element(el)
def depart_option_list_item(self, node):
self.set_to_parent()
def visit_option_group(self, node):
el = self.append_child('table:table-cell', attrib={
'table:style-name': 'Table%d.A2' % self.table_count,
'office:value-type': 'string',
})
self.set_current_element(el)
def depart_option_group(self, node):
self.set_to_parent()
def visit_option(self, node):
el = self.append_child('text:p', attrib={
'text:style-name': 'Table_20_Contents'})
el.text = node.astext()
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
pass
def depart_option_argument(self, node):
pass
def visit_description(self, node):
el = self.append_child('table:table-cell', attrib={
'table:style-name': 'Table%d.B2' % self.table_count,
'office:value-type': 'string',
})
el1 = SubElement(el, 'text:p', attrib={
'text:style-name': 'Table_20_Contents'})
el1.text = node.astext()
raise nodes.SkipChildren()
def depart_description(self, node):
pass
def visit_paragraph(self, node):
self.in_paragraph = True
if self.in_header:
el = self.append_p('header')
elif self.in_footer:
el = self.append_p('footer')
else:
style_name = self.paragraph_style_stack[-1]
el = self.append_child('text:p',
attrib={'text:style-name': style_name})
self.append_pending_ids(el)
self.set_current_element(el)
def depart_paragraph(self, node):
self.in_paragraph = False
self.set_to_parent()
if self.in_header:
self.header_content.append(
self.current_element.getchildren()[-1])
self.current_element.remove(
self.current_element.getchildren()[-1])
elif self.in_footer:
self.footer_content.append(
self.current_element.getchildren()[-1])
self.current_element.remove(
self.current_element.getchildren()[-1])
def visit_problematic(self, node):
pass
def depart_problematic(self, node):
pass
def visit_raw(self, node):
if 'format' in node.attributes:
formats = node.attributes['format']
formatlist = formats.split()
if 'odt' in formatlist:
rawstr = node.astext()
attrstr = ' '.join(['%s="%s"' % (k, v, )
for k,v in list(CONTENT_NAMESPACE_ATTRIB.items())])
contentstr = '<stuff %s>%s</stuff>' % (attrstr, rawstr, )
if WhichElementTree != "lxml":
contentstr = contentstr.encode("utf-8")
content = etree.fromstring(contentstr)
elements = content.getchildren()
if len(elements) > 0:
el1 = elements[0]
if self.in_header:
pass
elif self.in_footer:
pass
else:
self.current_element.append(el1)
raise nodes.SkipChildren()
def depart_raw(self, node):
if self.in_header:
pass
elif self.in_footer:
pass
else:
pass
def visit_reference(self, node):
text = node.astext()
if self.settings.create_links:
if 'refuri' in node:
href = node['refuri']
if ( self.settings.cloak_email_addresses
and href.startswith('mailto:')):
href = self.cloak_mailto(href)
el = self.append_child('text:a', attrib={
'xlink:href': '%s' % href,
'xlink:type': 'simple',
})
self.set_current_element(el)
elif 'refid' in node:
if self.settings.create_links:
href = node['refid']
el = self.append_child('text:reference-ref', attrib={
'text:ref-name': '%s' % href,
'text:reference-format': 'text',
})
else:
self.document.reporter.warning(
'References must have "refuri" or "refid" attribute.')
if (self.in_table_of_contents and
len(node.children) >= 1 and
isinstance(node.children[0], docutils.nodes.generated)):
node.remove(node.children[0])
def depart_reference(self, node):
if self.settings.create_links:
if 'refuri' in node:
self.set_to_parent()
def visit_rubric(self, node):
style_name = self.rststyle('rubric')
classes = node.get('classes')
if classes:
class1 = classes[0]
if class1:
style_name = class1
el = SubElement(self.current_element, 'text:h', attrib = {
#'text:outline-level': '%d' % section_level,
#'text:style-name': 'Heading_20_%d' % section_level,
'text:style-name': style_name,
})
text = node.astext()
el.text = self.encode(text)
def depart_rubric(self, node):
pass
def visit_section(self, node, move_ids=1):
self.section_level += 1
self.section_count += 1
if self.settings.create_sections:
el = self.append_child('text:section', attrib={
'text:name': 'Section%d' % self.section_count,
'text:style-name': 'Sect%d' % self.section_level,
})
self.set_current_element(el)
def depart_section(self, node):
self.section_level -= 1
if self.settings.create_sections:
self.set_to_parent()
def visit_strong(self, node):
el = SubElement(self.current_element, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
self.set_current_element(el)
def depart_strong(self, node):
self.set_to_parent()
def visit_substitution_definition(self, node):
raise nodes.SkipChildren()
def depart_substitution_definition(self, node):
pass
def visit_system_message(self, node):
pass
def depart_system_message(self, node):
pass
def get_table_style(self, node):
table_style = None
table_name = None
use_predefined_table_style = False
str_classes = node.get('classes')
if str_classes is not None:
for str_class in str_classes:
if str_class.startswith(TABLESTYLEPREFIX):
table_name = str_class
use_predefined_table_style = True
break
if table_name is not None:
table_style = self.table_styles.get(table_name)
if table_style is None:
# If we can't find the table style, issue warning
# and use the default table style.
self.document.reporter.warning(
'Can\'t find table style "%s". Using default.' % (
table_name, ))
table_name = TABLENAMEDEFAULT
table_style = self.table_styles.get(table_name)
if table_style is None:
# If we can't find the default table style, issue a warning
# and use a built-in default style.
self.document.reporter.warning(
'Can\'t find default table style "%s". Using built-in default.' % (
table_name, ))
table_style = BUILTIN_DEFAULT_TABLE_STYLE
else:
table_name = TABLENAMEDEFAULT
table_style = self.table_styles.get(table_name)
if table_style is None:
# If we can't find the default table style, issue a warning
# and use a built-in default style.
self.document.reporter.warning(
'Can\'t find default table style "%s". Using built-in default.' % (
table_name, ))
table_style = BUILTIN_DEFAULT_TABLE_STYLE
return table_style
def visit_table(self, node):
self.table_count += 1
table_style = self.get_table_style(node)
table_name = '%s%%d' % TABLESTYLEPREFIX
el1 = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s' % table_name, ( self.table_count, )),
'style:family': 'table',
}, nsdict=SNSD)
if table_style.backgroundcolor is None:
el1_1 = SubElement(el1, 'style:table-properties', attrib={
#'style:width': '17.59cm',
#'table:align': 'margins',
'table:align': 'left',
'fo:margin-top': '0in',
'fo:margin-bottom': '0.10in',
}, nsdict=SNSD)
else:
el1_1 = SubElement(el1, 'style:table-properties', attrib={
#'style:width': '17.59cm',
'table:align': 'margins',
'fo:margin-top': '0in',
'fo:margin-bottom': '0.10in',
'fo:background-color': table_style.backgroundcolor,
}, nsdict=SNSD)
# We use a single cell style for all cells in this table.
# That's probably not correct, but seems to work.
el2 = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': self.rststyle(
'%s.%%c%%d' % table_name, ( self.table_count, 'A', 1, )),
'style:family': 'table-cell',
}, nsdict=SNSD)
thickness = self.settings.table_border_thickness
if thickness is None:
line_style1 = table_style.border
else:
line_style1 = '0.%03dcm solid #000000' % (thickness, )
el2_1 = SubElement(el2, 'style:table-cell-properties', attrib={
'fo:padding': '0.049cm',
'fo:border-left': line_style1,
'fo:border-right': line_style1,
'fo:border-top': line_style1,
'fo:border-bottom': line_style1,
}, nsdict=SNSD)
title = None
for child in node.children:
if child.tagname == 'title':
title = child.astext()
break
if title is not None:
el3 = self.append_p('table-title', title)
else:
pass
el4 = SubElement(self.current_element, 'table:table', attrib={
'table:name': self.rststyle(
'%s' % table_name, ( self.table_count, )),
'table:style-name': self.rststyle(
'%s' % table_name, ( self.table_count, )),
})
self.set_current_element(el4)
self.current_table_style = el1
self.table_width = 0.0
def depart_table(self, node):
attribkey = add_ns('style:width', nsdict=SNSD)
attribval = '%.4fin' % (self.table_width, )
el1 = self.current_table_style
el2 = el1[0]
el2.attrib[attribkey] = attribval
self.set_to_parent()
def visit_tgroup(self, node):
self.column_count = ord('A') - 1
def depart_tgroup(self, node):
pass
def visit_colspec(self, node):
self.column_count += 1
colspec_name = self.rststyle(
'%s%%d.%%s' % TABLESTYLEPREFIX,
(self.table_count, chr(self.column_count), )
)
colwidth = node['colwidth'] / 12.0
el1 = SubElement(self.automatic_styles, 'style:style', attrib={
'style:name': colspec_name,
'style:family': 'table-column',
}, nsdict=SNSD)
el1_1 = SubElement(el1, 'style:table-column-properties', attrib={
'style:column-width': '%.4fin' % colwidth
},
nsdict=SNSD)
el2 = self.append_child('table:table-column', attrib={
'table:style-name': colspec_name,
})
self.table_width += colwidth
def depart_colspec(self, node):
pass
def visit_thead(self, node):
el = self.append_child('table:table-header-rows')
self.set_current_element(el)
self.in_thead = True
self.paragraph_style_stack.append('Table_20_Heading')
def depart_thead(self, node):
self.set_to_parent()
self.in_thead = False
self.paragraph_style_stack.pop()
def visit_row(self, node):
self.column_count = ord('A') - 1
el = self.append_child('table:table-row')
self.set_current_element(el)
def depart_row(self, node):
self.set_to_parent()
def visit_entry(self, node):
self.column_count += 1
cellspec_name = self.rststyle(
'%s%%d.%%c%%d' % TABLESTYLEPREFIX,
(self.table_count, 'A', 1, )
)
attrib={
'table:style-name': cellspec_name,
'office:value-type': 'string',
}
morecols = node.get('morecols', 0)
if morecols > 0:
attrib['table:number-columns-spanned'] = '%d' % (morecols + 1,)
self.column_count += morecols
morerows = node.get('morerows', 0)
if morerows > 0:
attrib['table:number-rows-spanned'] = '%d' % (morerows + 1,)
el1 = self.append_child('table:table-cell', attrib=attrib)
self.set_current_element(el1)
def depart_entry(self, node):
self.set_to_parent()
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_target(self, node):
#
# I don't know how to implement targets in ODF.
# How do we create a target in oowriter? A cross-reference?
if not ('refuri' in node or 'refid' in node
or 'refname' in node):
pass
else:
pass
def depart_target(self, node):
pass
def visit_title(self, node, move_ids=1, title_type='title'):
if isinstance(node.parent, docutils.nodes.section):
section_level = self.section_level
if section_level > 7:
self.document.reporter.warning(
'Heading/section levels greater than 7 not supported.')
self.document.reporter.warning(
' Reducing to heading level 7 for heading: "%s"' % (
node.astext(), ))
section_level = 7
el1 = self.append_child('text:h', attrib = {
'text:outline-level': '%d' % section_level,
#'text:style-name': 'Heading_20_%d' % section_level,
'text:style-name': self.rststyle(
'heading%d', (section_level, )),
})
self.append_pending_ids(el1)
self.set_current_element(el1)
elif isinstance(node.parent, docutils.nodes.document):
# text = self.settings.title
#else:
# text = node.astext()
el1 = SubElement(self.current_element, 'text:p', attrib = {
'text:style-name': self.rststyle(title_type),
})
self.append_pending_ids(el1)
text = node.astext()
self.title = text
self.found_doc_title = True
self.set_current_element(el1)
def depart_title(self, node):
if (isinstance(node.parent, docutils.nodes.section) or
isinstance(node.parent, docutils.nodes.document)):
self.set_to_parent()
def visit_subtitle(self, node, move_ids=1):
self.visit_title(node, move_ids, title_type='subtitle')
def depart_subtitle(self, node):
self.depart_title(node)
def visit_title_reference(self, node):
el = self.append_child('text:span', attrib={
'text:style-name': self.rststyle('quotation')})
el.text = self.encode(node.astext())
raise nodes.SkipChildren()
def depart_title_reference(self, node):
pass
def generate_table_of_content_entry_template(self, el1):
for idx in range(1, 11):
el2 = SubElement(el1,
'text:table-of-content-entry-template',
attrib={
'text:outline-level': "%d" % (idx, ),
'text:style-name': self.rststyle('contents-%d' % (idx, )),
})
el3 = SubElement(el2, 'text:index-entry-chapter')
el3 = SubElement(el2, 'text:index-entry-text')
el3 = SubElement(el2, 'text:index-entry-tab-stop', attrib={
'style:leader-char': ".",
'style:type': "right",
})
el3 = SubElement(el2, 'text:index-entry-page-number')
def find_title_label(self, node, class_type, label_key):
label = ''
title_node = None
for child in node.children:
if isinstance(child, class_type):
title_node = child
break
if title_node is not None:
label = title_node.astext()
else:
label = self.language.labels[label_key]
return label
def visit_topic(self, node):
if 'classes' in node.attributes:
if 'contents' in node.attributes['classes']:
label = self.find_title_label(node, docutils.nodes.title,
'contents')
if self.settings.generate_oowriter_toc:
el1 = self.append_child('text:table-of-content', attrib={
'text:name': 'Table of Contents1',
'text:protected': 'true',
'text:style-name': 'Sect1',
})
el2 = SubElement(el1,
'text:table-of-content-source',
attrib={
'text:outline-level': '10',
})
el3 =SubElement(el2, 'text:index-title-template', attrib={
'text:style-name': 'Contents_20_Heading',
})
el3.text = label
self.generate_table_of_content_entry_template(el2)
el4 = SubElement(el1, 'text:index-body')
el5 = SubElement(el4, 'text:index-title')
el6 = SubElement(el5, 'text:p', attrib={
'text:style-name': self.rststyle('contents-heading'),
})
el6.text = label
self.save_current_element = self.current_element
self.table_of_content_index_body = el4
self.set_current_element(el4)
else:
el = self.append_p('horizontalline')
el = self.append_p('centeredtextbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
el1.text = label
self.in_table_of_contents = True
elif 'abstract' in node.attributes['classes']:
el = self.append_p('horizontalline')
el = self.append_p('centeredtextbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
label = self.find_title_label(node, docutils.nodes.title,
'abstract')
el1.text = label
elif 'dedication' in node.attributes['classes']:
el = self.append_p('horizontalline')
el = self.append_p('centeredtextbody')
el1 = SubElement(el, 'text:span',
attrib={'text:style-name': self.rststyle('strong')})
label = self.find_title_label(node, docutils.nodes.title,
'dedication')
el1.text = label
def depart_topic(self, node):
if 'classes' in node.attributes:
if 'contents' in node.attributes['classes']:
if self.settings.generate_oowriter_toc:
self.update_toc_page_numbers(
self.table_of_content_index_body)
self.set_current_element(self.save_current_element)
else:
el = self.append_p('horizontalline')
self.in_table_of_contents = False
def update_toc_page_numbers(self, el):
collection = []
self.update_toc_collect(el, 0, collection)
self.update_toc_add_numbers(collection)
def update_toc_collect(self, el, level, collection):
collection.append((level, el))
level += 1
for child_el in el.getchildren():
if child_el.tag != 'text:index-body':
self.update_toc_collect(child_el, level, collection)
def update_toc_add_numbers(self, collection):
for level, el1 in collection:
if (el1.tag == 'text:p' and
el1.text != 'Table of Contents'):
el2 = SubElement(el1, 'text:tab')
el2.tail = '9999'
def visit_transition(self, node):
el = self.append_p('horizontalline')
def depart_transition(self, node):
pass
#
# Admonitions
#
def visit_warning(self, node):
self.generate_admonition(node, 'warning')
def depart_warning(self, node):
self.paragraph_style_stack.pop()
def visit_attention(self, node):
self.generate_admonition(node, 'attention')
depart_attention = depart_warning
def visit_caution(self, node):
self.generate_admonition(node, 'caution')
depart_caution = depart_warning
def visit_danger(self, node):
self.generate_admonition(node, 'danger')
depart_danger = depart_warning
def visit_error(self, node):
self.generate_admonition(node, 'error')
depart_error = depart_warning
def visit_hint(self, node):
self.generate_admonition(node, 'hint')
depart_hint = depart_warning
def visit_important(self, node):
self.generate_admonition(node, 'important')
depart_important = depart_warning
def visit_note(self, node):
self.generate_admonition(node, 'note')
depart_note = depart_warning
def visit_tip(self, node):
self.generate_admonition(node, 'tip')
depart_tip = depart_warning
def visit_admonition(self, node):
title = None
for child in node.children:
if child.tagname == 'title':
title = child.astext()
if title is None:
classes1 = node.get('classes')
if classes1:
title = classes1[0]
self.generate_admonition(node, 'generic', title)
depart_admonition = depart_warning
def generate_admonition(self, node, label, title=None):
el1 = SubElement(self.current_element, 'text:p', attrib = {
'text:style-name': self.rststyle('admon-%s-hdr', ( label, )),
})
if title:
el1.text = title
else:
el1.text = '%s!' % (label.capitalize(), )
s1 = self.rststyle('admon-%s-body', ( label, ))
self.paragraph_style_stack.append(s1)
#
# Roles (e.g. subscript, superscript, strong, ...
#
def visit_subscript(self, node):
el = self.append_child('text:span', attrib={
'text:style-name': 'rststyle-subscript',
})
self.set_current_element(el)
def depart_subscript(self, node):
self.set_to_parent()
def visit_superscript(self, node):
el = self.append_child('text:span', attrib={
'text:style-name': 'rststyle-superscript',
})
self.set_current_element(el)
def depart_superscript(self, node):
self.set_to_parent()
# Use an own reader to modify transformations done.
class Reader(standalone.Reader):
def get_transforms(self):
default = standalone.Reader.get_transforms(self)
if self.settings.create_links:
return default
return [ i
for i in default
if i is not references.DanglingReferences ]
| {
"repo_name": "Lyleo/OmniMarkupPreviewer",
"path": "OmniMarkupLib/Renderers/libs/python3/docutils/writers/odf_odt/__init__.py",
"copies": "2",
"size": "125332",
"license": "mit",
"hash": 370985868924609340,
"line_mean": 37.1528158295,
"line_max": 103,
"alpha_frac": 0.5363514505,
"autogenerated": false,
"ratio": 3.827398766261528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5363750216761528,
"avg_score": null,
"num_lines": null
} |