text
stringlengths 81
112k
|
---|
Reads a floating-point data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FloatingPointDefinition: floating-point data type definition.
def _ReadFloatingPointDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a floating-point data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FloatingPointDefinition: floating-point data type definition.
"""
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.FloatingPointDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE,
is_member=is_member, supported_size_values=(4, 8))
|
Reads a format data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FormatDefinition: format definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadFormatDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a format data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FormatDefinition: format definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
error_message = 'data type not supported as member'
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object = self._ReadLayoutDataTypeDefinition(
definitions_registry, definition_values, data_types.FormatDefinition,
definition_name, self._SUPPORTED_DEFINITION_VALUES_FORMAT)
# TODO: disabled for now
# layout = definition_values.get('layout', None)
# if layout is None:
# error_message = 'missing layout'
# raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.metadata = definition_values.get('metadata', {})
attributes = definition_values.get('attributes', None)
if attributes:
unsupported_attributes = set(attributes.keys()).difference(
self._SUPPORTED_ATTRIBUTES_FORMAT)
if unsupported_attributes:
error_message = 'unsupported attributes: {0:s}'.format(
', '.join(unsupported_attributes))
raise errors.DefinitionReaderError(definition_name, error_message)
byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE)
if byte_order not in definitions.BYTE_ORDERS:
error_message = 'unsupported byte-order attribute: {0!s}'.format(
byte_order)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.byte_order = byte_order
return definition_object
|
Reads an integer data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
IntegerDataTypeDefinition: integer data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadIntegerDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads an integer data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
IntegerDataTypeDefinition: integer data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
definition_object = self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.IntegerDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_INTEGER, is_member=is_member,
supported_size_values=(1, 2, 4, 8))
attributes = definition_values.get('attributes', None)
if attributes:
format_attribute = attributes.get('format', definitions.FORMAT_SIGNED)
if format_attribute not in self._INTEGER_FORMAT_ATTRIBUTES:
error_message = 'unsupported format attribute: {0!s}'.format(
format_attribute)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.format = format_attribute
return definition_object
|
Reads a layout data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_definition_values (set[str]): names of the supported definition
values.
Returns:
LayoutDataTypeDefinition: layout data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadLayoutDataTypeDefinition(
self, definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values):
"""Reads a layout data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_definition_values (set[str]): names of the supported definition
values.
Returns:
LayoutDataTypeDefinition: layout data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values)
|
Reads a member data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
supports_conditions (Optional[bool]): True if conditions are supported
by the data type definition.
Returns:
DataTypeDefinition: structure member data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadMemberDataTypeDefinitionMember(
self, definitions_registry, definition_values, definition_name,
supports_conditions=False):
"""Reads a member data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
supports_conditions (Optional[bool]): True if conditions are supported
by the data type definition.
Returns:
DataTypeDefinition: structure member data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if not definition_values:
error_message = 'invalid structure member missing definition values'
raise errors.DefinitionReaderError(definition_name, error_message)
name = definition_values.get('name', None)
type_indicator = definition_values.get('type', None)
if not name and type_indicator != definitions.TYPE_INDICATOR_UNION:
error_message = 'invalid structure member missing name'
raise errors.DefinitionReaderError(definition_name, error_message)
# TODO: detect duplicate names.
data_type = definition_values.get('data_type', None)
type_values = (data_type, type_indicator)
type_values = [value for value in type_values if value is not None]
if not type_values:
error_message = (
'invalid structure member: {0:s} both data type and type are '
'missing').format(name or '<NAMELESS>')
raise errors.DefinitionReaderError(definition_name, error_message)
if len(type_values) > 1:
error_message = (
'invalid structure member: {0:s} data type and type not allowed to '
'be set at the same time').format(name or '<NAMELESS>')
raise errors.DefinitionReaderError(definition_name, error_message)
condition = definition_values.get('condition', None)
if not supports_conditions and condition:
error_message = (
'invalid structure member: {0:s} unsupported condition').format(
name or '<NAMELESS>')
raise errors.DefinitionReaderError(definition_name, error_message)
value = definition_values.get('value', None)
values = definition_values.get('values', None)
if None not in (value, values):
error_message = (
'invalid structure member: {0:s} value and values not allowed to '
'be set at the same time').format(name or '<NAMELESS>')
raise errors.DefinitionReaderError(definition_name, error_message)
if value:
values = [value]
supported_values = None
if values:
supported_values = []
for value in values:
if isinstance(value, py2to3.UNICODE_TYPE):
value = value.encode('ascii')
supported_values.append(value)
if type_indicator is not None:
data_type_callback = self._DATA_TYPE_CALLBACKS.get(type_indicator, None)
if data_type_callback:
data_type_callback = getattr(self, data_type_callback, None)
if not data_type_callback:
error_message = 'unuspported data type definition: {0:s}.'.format(
type_indicator)
raise errors.DefinitionReaderError(name, error_message)
try:
data_type_definition = data_type_callback(
definitions_registry, definition_values, name, is_member=True)
except errors.DefinitionReaderError as exception:
error_message = 'in: {0:s} {1:s}'.format(
exception.name or '<NAMELESS>', exception.message)
raise errors.DefinitionReaderError(definition_name, error_message)
if condition or supported_values:
definition_object = data_types.MemberDataTypeDefinition(
name, data_type_definition, condition=condition,
values=supported_values)
else:
definition_object = data_type_definition
elif data_type is not None:
data_type_definition = definitions_registry.GetDefinitionByName(
data_type)
if not data_type_definition:
error_message = (
'invalid structure member: {0:s} undefined data type: '
'{1:s}').format(name or '<NAMELESS>', data_type)
raise errors.DefinitionReaderError(definition_name, error_message)
unsupported_definition_values = set(definition_values.keys()).difference(
self._SUPPORTED_DEFINITION_VALUES_MEMBER_DATA_TYPE)
if unsupported_definition_values:
error_message = 'unsupported definition values: {0:s}'.format(
', '.join(unsupported_definition_values))
raise errors.DefinitionReaderError(definition_name, error_message)
aliases = definition_values.get('aliases', None)
description = definition_values.get('description', None)
definition_object = data_types.MemberDataTypeDefinition(
name, data_type_definition, aliases=aliases, condition=condition,
data_type=data_type, description=description, values=supported_values)
return definition_object
|
Reads a padding data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
PaddingtDefinition: padding definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadPaddingDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a padding data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
PaddingtDefinition: padding definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if not is_member:
error_message = 'data type only supported as member'
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object = self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_types.PaddingDefinition,
definition_name, self._SUPPORTED_DEFINITION_VALUES_PADDING)
alignment_size = definition_values.get('alignment_size', None)
if not alignment_size:
error_message = 'missing alignment_size'
raise errors.DefinitionReaderError(definition_name, error_message)
try:
int(alignment_size)
except ValueError:
error_message = 'unuspported alignment size attribute: {0!s}'.format(
alignment_size)
raise errors.DefinitionReaderError(definition_name, error_message)
if alignment_size not in (2, 4, 8, 16):
error_message = 'unuspported alignment size value: {0!s}'.format(
alignment_size)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.alignment_size = alignment_size
return definition_object
|
Reads a semantic data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_definition_values (set[str]): names of the supported definition
values.
Returns:
SemanticDataTypeDefinition: semantic data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadSemanticDataTypeDefinition(
self, definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values):
"""Reads a semantic data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_definition_values (set[str]): names of the supported definition
values.
Returns:
SemanticDataTypeDefinition: semantic data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values)
|
Reads a sequence data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
SequenceDefinition: sequence data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadSequenceDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a sequence data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
SequenceDefinition: sequence data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE)
else:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE)
return self._ReadElementSequenceDataTypeDefinition(
definitions_registry, definition_values, data_types.SequenceDefinition,
definition_name, supported_definition_values)
|
Reads a storage data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_attributes (set[str]): names of the supported attributes.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StorageDataTypeDefinition: storage data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadStorageDataTypeDefinition(
self, definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_attributes, is_member=False):
"""Reads a storage data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_attributes (set[str]): names of the supported attributes.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StorageDataTypeDefinition: storage data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_MEMBER_DATA_TYPE)
else:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_STORAGE_DATA_TYPE)
definition_object = self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values)
attributes = definition_values.get('attributes', None)
if attributes:
unsupported_attributes = set(attributes.keys()).difference(
supported_attributes)
if unsupported_attributes:
error_message = 'unsupported attributes: {0:s}'.format(
', '.join(unsupported_attributes))
raise errors.DefinitionReaderError(definition_name, error_message)
byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE)
if byte_order not in definitions.BYTE_ORDERS:
error_message = 'unsupported byte-order attribute: {0!s}'.format(
byte_order)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.byte_order = byte_order
return definition_object
|
Reads a stream data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StreamDefinition: stream data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadStreamDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a stream data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StreamDefinition: stream data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_MEMBER_DATA_TYPE)
else:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_ELEMENTS_DATA_TYPE)
return self._ReadElementSequenceDataTypeDefinition(
definitions_registry, definition_values, data_types.StreamDefinition,
definition_name, supported_definition_values)
|
Reads a string data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadStringDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a string data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StringDefinition: string data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_STRING_MEMBER)
else:
supported_definition_values = self._SUPPORTED_DEFINITION_VALUES_STRING
definition_object = self._ReadElementSequenceDataTypeDefinition(
definitions_registry, definition_values, data_types.StringDefinition,
definition_name, supported_definition_values)
encoding = definition_values.get('encoding', None)
if not encoding:
error_message = 'missing encoding'
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.encoding = encoding
return definition_object
|
Reads a structure data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StructureDefinition: structure data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadStructureDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a structure data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StructureDefinition: structure data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
error_message = 'data type not supported as member'
raise errors.DefinitionReaderError(definition_name, error_message)
return self._ReadDataTypeDefinitionWithMembers(
definitions_registry, definition_values, data_types.StructureDefinition,
definition_name, supports_conditions=True)
|
Reads a structure family data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StructureDefinition: structure data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadStructureFamilyDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads a structure family data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StructureDefinition: structure data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
error_message = 'data type not supported as member'
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object = self._ReadLayoutDataTypeDefinition(
definitions_registry, definition_values,
data_types.StructureFamilyDefinition, definition_name,
self._SUPPORTED_DEFINITION_VALUES_STRUCTURE_FAMILY)
runtime = definition_values.get('runtime', None)
if not runtime:
error_message = 'missing runtime'
raise errors.DefinitionReaderError(definition_name, error_message)
runtime_data_type_definition = definitions_registry.GetDefinitionByName(
runtime)
if not runtime_data_type_definition:
error_message = 'undefined runtime: {0:s}.'.format(runtime)
raise errors.DefinitionReaderError(definition_name, error_message)
if runtime_data_type_definition.family_definition:
error_message = 'runtime: {0:s} already part of a family.'.format(runtime)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.AddRuntimeDefinition(runtime_data_type_definition)
members = definition_values.get('members', None)
if not members:
error_message = 'missing members'
raise errors.DefinitionReaderError(definition_name, error_message)
for member in members:
member_data_type_definition = definitions_registry.GetDefinitionByName(
member)
if not member_data_type_definition:
error_message = 'undefined member: {0:s}.'.format(member)
raise errors.DefinitionReaderError(definition_name, error_message)
if member_data_type_definition.family_definition:
error_message = 'member: {0:s} already part of a family.'.format(member)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.AddMemberDefinition(member_data_type_definition)
return definition_object
|
Reads an union data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UnionDefinition: union data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadUnionDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads an union data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UnionDefinition: union data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadDataTypeDefinitionWithMembers(
definitions_registry, definition_values, data_types.UnionDefinition,
definition_name, supports_conditions=False)
|
Reads an UUID data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UUIDDataTypeDefinition: UUID data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadUUIDDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
"""Reads an UUID data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
UUIDDataTypeDefinition: UUID data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.UUIDDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE, default_size=16,
is_member=is_member, supported_size_values=(16, ))
|
Reads a data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
Returns:
DataTypeDefinition: data type definition or None.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
def _ReadDefinition(self, definitions_registry, definition_values):
"""Reads a data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
Returns:
DataTypeDefinition: data type definition or None.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if not definition_values:
error_message = 'missing definition values'
raise errors.DefinitionReaderError(None, error_message)
name = definition_values.get('name', None)
if not name:
error_message = 'missing name'
raise errors.DefinitionReaderError(None, error_message)
type_indicator = definition_values.get('type', None)
if not type_indicator:
error_message = 'invalid definition missing type'
raise errors.DefinitionReaderError(name, error_message)
data_type_callback = self._DATA_TYPE_CALLBACKS.get(type_indicator, None)
if data_type_callback:
data_type_callback = getattr(self, data_type_callback, None)
if not data_type_callback:
error_message = 'unuspported data type definition: {0:s}.'.format(
type_indicator)
raise errors.DefinitionReaderError(name, error_message)
return data_type_callback(definitions_registry, definition_values, name)
|
Reads data type definitions from a file into the registry.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
path (str): path of the file to read from.
def ReadFile(self, definitions_registry, path):
"""Reads data type definitions from a file into the registry.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
path (str): path of the file to read from.
"""
with open(path, 'r') as file_object:
self.ReadFileObject(definitions_registry, file_object)
|
Retrieves a format error location.
Args:
yaml_definition (dict[str, object]): current YAML definition.
last_definition_object (DataTypeDefinition): previous data type
definition.
Returns:
str: format error location.
def _GetFormatErrorLocation(
self, yaml_definition, last_definition_object):
"""Retrieves a format error location.
Args:
yaml_definition (dict[str, object]): current YAML definition.
last_definition_object (DataTypeDefinition): previous data type
definition.
Returns:
str: format error location.
"""
name = yaml_definition.get('name', None)
if name:
error_location = 'in: {0:s}'.format(name or '<NAMELESS>')
elif last_definition_object:
error_location = 'after: {0:s}'.format(last_definition_object.name)
else:
error_location = 'at start'
return error_location
|
Reads data type definitions from a file-like object into the registry.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
file_object (file): file-like object to read from.
Raises:
FormatError: if the definitions values are missing or if the format is
incorrect.
def ReadFileObject(self, definitions_registry, file_object):
"""Reads data type definitions from a file-like object into the registry.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
file_object (file): file-like object to read from.
Raises:
FormatError: if the definitions values are missing or if the format is
incorrect.
"""
last_definition_object = None
error_location = None
error_message = None
try:
yaml_generator = yaml.safe_load_all(file_object)
for yaml_definition in yaml_generator:
definition_object = self._ReadDefinition(
definitions_registry, yaml_definition)
if not definition_object:
error_location = self._GetFormatErrorLocation(
yaml_definition, last_definition_object)
error_message = '{0:s} Missing definition object.'.format(
error_location)
raise errors.FormatError(error_message)
definitions_registry.RegisterDefinition(definition_object)
last_definition_object = definition_object
except errors.DefinitionReaderError as exception:
error_message = 'in: {0:s} {1:s}'.format(
exception.name or '<NAMELESS>', exception.message)
raise errors.FormatError(error_message)
except (yaml.reader.ReaderError, yaml.scanner.ScannerError) as exception:
error_location = self._GetFormatErrorLocation({}, last_definition_object)
error_message = '{0:s} {1!s}'.format(error_location, exception)
raise errors.FormatError(error_message)
|
Finds files with an (optional) given extension in a given path.
def find_files(path, filter="*.md"):
""" Finds files with an (optional) given extension in a given path. """
if os.path.isfile(path):
return [path]
if os.path.isdir(path):
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, filter):
matches.append(os.path.join(root, filename))
return matches
|
Reads the organization given by identifier from HDX and returns Organization object
Args:
identifier (str): Identifier of organization
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Organization]: Organization object if successful read, None if not
def read_from_hdx(identifier, configuration=None):
# type: (str, Optional[Configuration]) -> Optional['Organization']
"""Reads the organization given by identifier from HDX and returns Organization object
Args:
identifier (str): Identifier of organization
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Organization]: Organization object if successful read, None if not
"""
organization = Organization(configuration=configuration)
result = organization._load_from_hdx('organization', identifier)
if result:
return organization
return None
|
Returns the organization's users.
Args:
capacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None.
Returns:
List[User]: Organization's users.
def get_users(self, capacity=None):
# type: (Optional[str]) -> List[User]
"""Returns the organization's users.
Args:
capacity (Optional[str]): Filter by capacity eg. member, admin. Defaults to None.
Returns:
List[User]: Organization's users.
"""
users = list()
usersdicts = self.data.get('users')
if usersdicts is not None:
for userdata in usersdicts:
if capacity is not None and userdata['capacity'] != capacity:
continue
id = userdata.get('id')
if id is None:
id = userdata['name']
user = hdx.data.user.User.read_from_hdx(id, configuration=self.configuration)
user['capacity'] = userdata['capacity']
users.append(user)
return users
|
Add new or update existing user in organization with new metadata. Capacity eg. member, admin
must be supplied either within the User object or dictionary or using the capacity argument (which takes
precedence).
Args:
user (Union[User,Dict,str]): Either a user id or user metadata either from a User object or a dictionary
capacity (Optional[str]): Capacity of user eg. member, admin. Defaults to None.
Returns:
None
def add_update_user(self, user, capacity=None):
# type: (Union[hdx.data.user.User,Dict,str],Optional[str]) -> None
"""Add new or update existing user in organization with new metadata. Capacity eg. member, admin
must be supplied either within the User object or dictionary or using the capacity argument (which takes
precedence).
Args:
user (Union[User,Dict,str]): Either a user id or user metadata either from a User object or a dictionary
capacity (Optional[str]): Capacity of user eg. member, admin. Defaults to None.
Returns:
None
"""
if isinstance(user, str):
user = hdx.data.user.User.read_from_hdx(user, configuration=self.configuration)
elif isinstance(user, dict):
user = hdx.data.user.User(user, configuration=self.configuration)
if isinstance(user, hdx.data.user.User):
users = self.data.get('users')
if users is None:
users = list()
self.data['users'] = users
if capacity is not None:
user['capacity'] = capacity
self._addupdate_hdxobject(users, 'name', user)
return
raise HDXError('Type %s cannot be added as a user!' % type(user).__name__)
|
Add new or update existing users in organization with new metadata. Capacity eg. member, admin
must be supplied either within the User object or dictionary or using the capacity argument (which takes
precedence).
Args:
users (List[Union[User,Dict,str]]): A list of either user ids or users metadata from User objects or dictionaries
capacity (Optional[str]): Capacity of users eg. member, admin. Defaults to None.
Returns:
None
def add_update_users(self, users, capacity=None):
# type: (List[Union[hdx.data.user.User,Dict,str]],Optional[str]) -> None
"""Add new or update existing users in organization with new metadata. Capacity eg. member, admin
must be supplied either within the User object or dictionary or using the capacity argument (which takes
precedence).
Args:
users (List[Union[User,Dict,str]]): A list of either user ids or users metadata from User objects or dictionaries
capacity (Optional[str]): Capacity of users eg. member, admin. Defaults to None.
Returns:
None
"""
if not isinstance(users, list):
raise HDXError('Users should be a list!')
for user in users:
self.add_update_user(user, capacity)
|
Get list of datasets in organization
Args:
query (str): Restrict datasets returned to this query (in Solr format). Defaults to '*:*'.
**kwargs: See below
sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'.
rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize).
start (int): Offset in the complete result for where the set of returned datasets should begin
facet (string): Whether to enable faceted results. Default to True.
facet.mincount (int): Minimum counts for facet fields should be included in the results
facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50.
facet.field (List[str]): Fields to facet upon. Default is empty.
use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False.
Returns:
List[Dataset]: List of datasets in organization
def get_datasets(self, query='*:*', **kwargs):
# type: (str, Any) -> List[hdx.data.dataset.Dataset]
"""Get list of datasets in organization
Args:
query (str): Restrict datasets returned to this query (in Solr format). Defaults to '*:*'.
**kwargs: See below
sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'.
rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize).
start (int): Offset in the complete result for where the set of returned datasets should begin
facet (string): Whether to enable faceted results. Default to True.
facet.mincount (int): Minimum counts for facet fields should be included in the results
facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50.
facet.field (List[str]): Fields to facet upon. Default is empty.
use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False.
Returns:
List[Dataset]: List of datasets in organization
"""
return hdx.data.dataset.Dataset.search_in_hdx(query=query,
configuration=self.configuration,
fq='organization:%s' % self.data['name'], **kwargs)
|
Get all organization names in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
sort (str): Sort the search results according to field name and sort-order. Allowed fields are ‘name’, ‘package_count’ and ‘title’. Defaults to 'name asc'.
organizations (List[str]): List of names of the groups to return.
all_fields (bool): Return group dictionaries instead of just names. Only core fields are returned - get some more using the include_* options. Defaults to False.
include_extras (bool): If all_fields, include the group extra fields. Defaults to False.
include_tags (bool): If all_fields, include the group tags. Defaults to False.
include_groups: If all_fields, include the groups the groups are in. Defaults to False.
Returns:
List[str]: List of all organization names in HDX
def get_all_organization_names(configuration=None, **kwargs):
# type: (Optional[Configuration], Any) -> List[str]
"""Get all organization names in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
sort (str): Sort the search results according to field name and sort-order. Allowed fields are ‘name’, ‘package_count’ and ‘title’. Defaults to 'name asc'.
organizations (List[str]): List of names of the groups to return.
all_fields (bool): Return group dictionaries instead of just names. Only core fields are returned - get some more using the include_* options. Defaults to False.
include_extras (bool): If all_fields, include the group extra fields. Defaults to False.
include_tags (bool): If all_fields, include the group tags. Defaults to False.
include_groups: If all_fields, include the groups the groups are in. Defaults to False.
Returns:
List[str]: List of all organization names in HDX
"""
organization = Organization(configuration=configuration)
organization['id'] = 'all organizations' # only for error message if produced
return organization._write_to_hdx('list', kwargs, 'id')
|
Makes a read call to HDX passing in given parameter.
Args:
object_type (str): Description of HDX object type (for messages)
value (str): Value of HDX field
fieldname (str): HDX field name. Defaults to id.
action (Optional[str]): Replacement CKAN action url to use. Defaults to None.
**kwargs: Other fields to pass to CKAN.
Returns:
Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)
def _read_from_hdx(self, object_type, value, fieldname='id',
action=None, **kwargs):
# type: (str, str, str, Optional[str], Any) -> Tuple[bool, Union[Dict, str]]
"""Makes a read call to HDX passing in given parameter.
Args:
object_type (str): Description of HDX object type (for messages)
value (str): Value of HDX field
fieldname (str): HDX field name. Defaults to id.
action (Optional[str]): Replacement CKAN action url to use. Defaults to None.
**kwargs: Other fields to pass to CKAN.
Returns:
Tuple[bool, Union[Dict, str]]: (True/False, HDX object metadata/Error)
"""
if not fieldname:
raise HDXError('Empty %s field name!' % object_type)
if action is None:
action = self.actions()['show']
data = {fieldname: value}
data.update(kwargs)
try:
result = self.configuration.call_remoteckan(action, data)
return True, result
except NotFound:
return False, '%s=%s: not found!' % (fieldname, value)
except Exception as e:
raisefrom(HDXError, 'Failed when trying to read: %s=%s! (POST)' % (fieldname, value), e)
|
Helper method to load the HDX object given by identifier from HDX
Args:
object_type (str): Description of HDX object type (for messages)
id_field (str): HDX object identifier
Returns:
bool: True if loaded, False if not
def _load_from_hdx(self, object_type, id_field):
# type: (str, str) -> bool
"""Helper method to load the HDX object given by identifier from HDX
Args:
object_type (str): Description of HDX object type (for messages)
id_field (str): HDX object identifier
Returns:
bool: True if loaded, False if not
"""
success, result = self._read_from_hdx(object_type, id_field)
if success:
self.old_data = self.data
self.data = result
return True
logger.debug(result)
return False
|
Check metadata exists and contains HDX object identifier, and if so load HDX object
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
operation (str): Operation to report if error. Defaults to update.
Returns:
None
def _check_load_existing_object(self, object_type, id_field_name, operation='update'):
# type: (str, str, str) -> None
"""Check metadata exists and contains HDX object identifier, and if so load HDX object
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
operation (str): Operation to report if error. Defaults to update.
Returns:
None
"""
self._check_existing_object(object_type, id_field_name)
if not self._load_from_hdx(object_type, self.data[id_field_name]):
raise HDXError('No existing %s to %s!' % (object_type, operation))
|
Helper method to check that metadata for HDX object is complete
Args:
ignore_fields (List[str]): Any fields to ignore in the check
Returns:
None
def _check_required_fields(self, object_type, ignore_fields):
# type: (str, List[str]) -> None
"""Helper method to check that metadata for HDX object is complete
Args:
ignore_fields (List[str]): Any fields to ignore in the check
Returns:
None
"""
for field in self.configuration[object_type]['required_fields']:
if field not in self.data and field not in ignore_fields:
raise HDXError('Field %s is missing in %s!' % (field, object_type))
|
Helper method to check if HDX object exists and update it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
file_to_upload (Optional[str]): File to upload to HDX
**kwargs: See below
operation (string): Operation to perform eg. patch. Defaults to update.
Returns:
None
def _merge_hdx_update(self, object_type, id_field_name, file_to_upload=None, **kwargs):
# type: (str, str, Optional[str], Any) -> None
"""Helper method to check if HDX object exists and update it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
file_to_upload (Optional[str]): File to upload to HDX
**kwargs: See below
operation (string): Operation to perform eg. patch. Defaults to update.
Returns:
None
"""
merge_two_dictionaries(self.data, self.old_data)
if 'batch_mode' in kwargs: # Whether or not CKAN should change groupings of datasets on /datasets page
self.data['batch_mode'] = kwargs['batch_mode']
if 'skip_validation' in kwargs: # Whether or not CKAN should perform validation steps (checking fields present)
self.data['skip_validation'] = kwargs['skip_validation']
ignore_field = self.configuration['%s' % object_type].get('ignore_on_update')
self.check_required_fields(ignore_fields=[ignore_field])
operation = kwargs.get('operation', 'update')
self._save_to_hdx(operation, id_field_name, file_to_upload)
|
Helper method to check if HDX object exists in HDX and if so, update it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
file_to_upload (Optional[str]): File to upload to HDX
**kwargs: See below
operation (string): Operation to perform eg. patch. Defaults to update.
Returns:
None
def _update_in_hdx(self, object_type, id_field_name, file_to_upload=None, **kwargs):
# type: (str, str, Optional[str], Any) -> None
"""Helper method to check if HDX object exists in HDX and if so, update it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
file_to_upload (Optional[str]): File to upload to HDX
**kwargs: See below
operation (string): Operation to perform eg. patch. Defaults to update.
Returns:
None
"""
self._check_load_existing_object(object_type, id_field_name)
# We load an existing object even thought it may well have been loaded already
# to prevent an admittedly unlikely race condition where someone has updated
# the object in the intervening time
self._merge_hdx_update(object_type, id_field_name, file_to_upload, **kwargs)
|
Creates or updates an HDX object in HDX and return HDX object metadata dict
Args:
action (str): Action to perform eg. 'create', 'update'
data (Dict): Data to write to HDX
id_field_name (str): Name of field containing HDX object identifier or None
file_to_upload (Optional[str]): File to upload to HDX
Returns:
Dict: HDX object metadata
def _write_to_hdx(self, action, data, id_field_name, file_to_upload=None):
# type: (str, Dict, str, Optional[str]) -> Dict
"""Creates or updates an HDX object in HDX and return HDX object metadata dict
Args:
action (str): Action to perform eg. 'create', 'update'
data (Dict): Data to write to HDX
id_field_name (str): Name of field containing HDX object identifier or None
file_to_upload (Optional[str]): File to upload to HDX
Returns:
Dict: HDX object metadata
"""
file = None
try:
if file_to_upload:
file = open(file_to_upload, 'rb')
files = [('upload', file)]
else:
files = None
return self.configuration.call_remoteckan(self.actions()[action], data, files=files)
except Exception as e:
raisefrom(HDXError, 'Failed when trying to %s %s! (POST)' % (action, data[id_field_name]), e)
finally:
if file_to_upload and file:
file.close()
|
Creates or updates an HDX object in HDX, saving current data and replacing with returned HDX object data
from HDX
Args:
action (str): Action to perform: 'create' or 'update'
id_field_name (str): Name of field containing HDX object identifier
file_to_upload (Optional[str]): File to upload to HDX
Returns:
None
def _save_to_hdx(self, action, id_field_name, file_to_upload=None):
# type: (str, str, Optional[str]) -> None
"""Creates or updates an HDX object in HDX, saving current data and replacing with returned HDX object data
from HDX
Args:
action (str): Action to perform: 'create' or 'update'
id_field_name (str): Name of field containing HDX object identifier
file_to_upload (Optional[str]): File to upload to HDX
Returns:
None
"""
result = self._write_to_hdx(action, self.data, id_field_name, file_to_upload)
self.old_data = self.data
self.data = result
|
Helper method to check if resource exists in HDX and if so, update it, otherwise create it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
name_field_name (str): Name of field containing HDX object name
file_to_upload (Optional[str]): File to upload to HDX (if url not supplied)
Returns:
None
def _create_in_hdx(self, object_type, id_field_name, name_field_name,
file_to_upload=None):
# type: (str, str, str, Optional[str]) -> None
"""Helper method to check if resource exists in HDX and if so, update it, otherwise create it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
name_field_name (str): Name of field containing HDX object name
file_to_upload (Optional[str]): File to upload to HDX (if url not supplied)
Returns:
None
"""
self.check_required_fields()
if id_field_name in self.data and self._load_from_hdx(object_type, self.data[id_field_name]):
logger.warning('%s exists. Updating %s' % (object_type, self.data[id_field_name]))
self._merge_hdx_update(object_type, id_field_name, file_to_upload)
else:
self._save_to_hdx('create', name_field_name, file_to_upload)
|
Helper method to deletes a resource from HDX
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
Returns:
None
def _delete_from_hdx(self, object_type, id_field_name):
# type: (str, str) -> None
"""Helper method to deletes a resource from HDX
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
Returns:
None
"""
if id_field_name not in self.data:
raise HDXError('No %s field (mandatory) in %s!' % (id_field_name, object_type))
self._save_to_hdx('delete', id_field_name)
|
Helper function to add a new HDX object to a supplied list of HDX objects or update existing metadata if the object
already exists in the list
Args:
hdxobjects (List[T <= HDXObject]): list of HDX objects to which to add new objects or update existing ones
id_field (str): Field on which to match to determine if object already exists in list
new_hdxobject (T <= HDXObject): The HDX object to be added/updated
Returns:
T <= HDXObject: The HDX object which was added or updated
def _addupdate_hdxobject(self, hdxobjects, id_field, new_hdxobject):
# type: (List[HDXObjectUpperBound], str, HDXObjectUpperBound) -> HDXObjectUpperBound
"""Helper function to add a new HDX object to a supplied list of HDX objects or update existing metadata if the object
already exists in the list
Args:
hdxobjects (List[T <= HDXObject]): list of HDX objects to which to add new objects or update existing ones
id_field (str): Field on which to match to determine if object already exists in list
new_hdxobject (T <= HDXObject): The HDX object to be added/updated
Returns:
T <= HDXObject: The HDX object which was added or updated
"""
for hdxobject in hdxobjects:
if hdxobject[id_field] == new_hdxobject[id_field]:
merge_two_dictionaries(hdxobject, new_hdxobject)
return hdxobject
hdxobjects.append(new_hdxobject)
return new_hdxobject
|
Remove an HDX object from a list within the parent HDX object
Args:
objlist (List[Union[T <= HDXObject,Dict]]): list of HDX objects
obj (Union[T <= HDXObject,Dict,str]): Either an id or hdx object metadata either from an HDX object or a dictionary
matchon (str): Field to match on. Defaults to id.
delete (bool): Whether to delete HDX object. Defaults to False.
Returns:
bool: True if object removed, False if not
def _remove_hdxobject(self, objlist, obj, matchon='id', delete=False):
# type: (List[Union[HDXObjectUpperBound,Dict]], Union[HDXObjectUpperBound,Dict,str], str, bool) -> bool
"""Remove an HDX object from a list within the parent HDX object
Args:
objlist (List[Union[T <= HDXObject,Dict]]): list of HDX objects
obj (Union[T <= HDXObject,Dict,str]): Either an id or hdx object metadata either from an HDX object or a dictionary
matchon (str): Field to match on. Defaults to id.
delete (bool): Whether to delete HDX object. Defaults to False.
Returns:
bool: True if object removed, False if not
"""
if objlist is None:
return False
if isinstance(obj, six.string_types):
obj_id = obj
elif isinstance(obj, dict) or isinstance(obj, HDXObject):
obj_id = obj.get(matchon)
else:
raise HDXError('Type of object not a string, dict or T<=HDXObject')
if not obj_id:
return False
for i, objdata in enumerate(objlist):
objid = objdata.get(matchon)
if objid and objid == obj_id:
if delete:
objlist[i].delete_from_hdx()
del objlist[i]
return True
return False
|
Helper function to convert supplied list of HDX objects to a list of dict
Args:
hdxobjects (List[T <= HDXObject]): List of HDX objects to convert
Returns:
List[Dict]: List of HDX objects converted to simple dictionaries
def _convert_hdxobjects(self, hdxobjects):
# type: (List[HDXObjectUpperBound]) -> List[HDXObjectUpperBound]
"""Helper function to convert supplied list of HDX objects to a list of dict
Args:
hdxobjects (List[T <= HDXObject]): List of HDX objects to convert
Returns:
List[Dict]: List of HDX objects converted to simple dictionaries
"""
newhdxobjects = list()
for hdxobject in hdxobjects:
newhdxobjects.append(hdxobject.data)
return newhdxobjects
|
Helper function to make a deep copy of a supplied list of HDX objects
Args:
hdxobjects (List[T <= HDXObject]): list of HDX objects to copy
hdxobjectclass (type): Type of the HDX Objects to be copied
attribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None.
Returns:
List[T <= HDXObject]: Deep copy of list of HDX objects
def _copy_hdxobjects(self, hdxobjects, hdxobjectclass, attribute_to_copy=None):
# type: (List[HDXObjectUpperBound], type, Optional[str]) -> List[HDXObjectUpperBound]
"""Helper function to make a deep copy of a supplied list of HDX objects
Args:
hdxobjects (List[T <= HDXObject]): list of HDX objects to copy
hdxobjectclass (type): Type of the HDX Objects to be copied
attribute_to_copy (Optional[str]): An attribute to copy over from the HDX object. Defaults to None.
Returns:
List[T <= HDXObject]: Deep copy of list of HDX objects
"""
newhdxobjects = list()
for hdxobject in hdxobjects:
newhdxobjectdata = copy.deepcopy(hdxobject.data)
newhdxobject = hdxobjectclass(newhdxobjectdata, configuration=self.configuration)
if attribute_to_copy:
value = getattr(hdxobject, attribute_to_copy)
setattr(newhdxobject, attribute_to_copy, value)
newhdxobjects.append(newhdxobject)
return newhdxobjects
|
Helper function to take a list of HDX objects contained in the internal dictionary and add them to a
supplied list of HDX objects or update existing metadata if any objects already exist in the list. The list in
the internal dictionary is then deleted.
Args:
hdxobjects (List[T <= HDXObject]): list of HDX objects to which to add new objects or update existing ones
hdxobjects_name (str): Name of key in internal dictionary from which to obtain list of HDX objects
id_field (str): Field on which to match to determine if object already exists in list
hdxobjectclass (type): Type of the HDX Object to be added/updated
Returns:
None
def _separate_hdxobjects(self, hdxobjects, hdxobjects_name, id_field, hdxobjectclass):
# type: (List[HDXObjectUpperBound], str, str, type) -> None
"""Helper function to take a list of HDX objects contained in the internal dictionary and add them to a
supplied list of HDX objects or update existing metadata if any objects already exist in the list. The list in
the internal dictionary is then deleted.
Args:
hdxobjects (List[T <= HDXObject]): list of HDX objects to which to add new objects or update existing ones
hdxobjects_name (str): Name of key in internal dictionary from which to obtain list of HDX objects
id_field (str): Field on which to match to determine if object already exists in list
hdxobjectclass (type): Type of the HDX Object to be added/updated
Returns:
None
"""
new_hdxobjects = self.data.get(hdxobjects_name, list())
""":type : List[HDXObjectUpperBound]"""
if new_hdxobjects:
hdxobject_names = set()
for hdxobject in hdxobjects:
hdxobject_name = hdxobject[id_field]
hdxobject_names.add(hdxobject_name)
for new_hdxobject in new_hdxobjects:
if hdxobject_name == new_hdxobject[id_field]:
merge_two_dictionaries(hdxobject, new_hdxobject)
break
for new_hdxobject in new_hdxobjects:
if not new_hdxobject[id_field] in hdxobject_names:
hdxobjects.append(hdxobjectclass(new_hdxobject, configuration=self.configuration))
del self.data[hdxobjects_name]
|
Return the dataset's list of tags
Returns:
List[str]: list of tags or [] if there are none
def _get_tags(self):
# type: () -> List[str]
"""Return the dataset's list of tags
Returns:
List[str]: list of tags or [] if there are none
"""
tags = self.data.get('tags', None)
if not tags:
return list()
return [x['name'] for x in tags]
|
Add a tag
Args:
tag (str): Tag to add
Returns:
bool: True if tag added or False if tag already present
def _add_tag(self, tag):
# type: (str) -> bool
"""Add a tag
Args:
tag (str): Tag to add
Returns:
bool: True if tag added or False if tag already present
"""
tags = self.data.get('tags', None)
if tags:
if tag in [x['name'] for x in tags]:
return False
else:
tags = list()
tags.append({'name': tag})
self.data['tags'] = tags
return True
|
Add a list of tag
Args:
tags (List[str]): list of tags to add
Returns:
bool: True if all tags added or False if any already present.
def _add_tags(self, tags):
# type: (List[str]) -> bool
"""Add a list of tag
Args:
tags (List[str]): list of tags to add
Returns:
bool: True if all tags added or False if any already present.
"""
alltagsadded = True
for tag in tags:
if not self._add_tag(tag):
alltagsadded = False
return alltagsadded
|
Return list of strings from comma separated list
Args:
field (str): Field containing comma separated list
Returns:
List[str]: List of strings
def _get_stringlist_from_commastring(self, field):
# type: (str) -> List[str]
"""Return list of strings from comma separated list
Args:
field (str): Field containing comma separated list
Returns:
List[str]: List of strings
"""
strings = self.data.get(field)
if strings:
return strings.split(',')
else:
return list()
|
Add a string to a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to add
Returns:
bool: True if string added or False if string already present
def _add_string_to_commastring(self, field, string):
# type: (str, str) -> bool
"""Add a string to a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to add
Returns:
bool: True if string added or False if string already present
"""
if string in self._get_stringlist_from_commastring(field):
return False
strings = '%s,%s' % (self.data.get(field, ''), string)
if strings[0] == ',':
strings = strings[1:]
self.data[field] = strings
return True
|
Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
def _add_strings_to_commastring(self, field, strings):
# type: (str, List[str]) -> bool
"""Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
"""
allstringsadded = True
for string in strings:
if not self._add_string_to_commastring(field, string):
allstringsadded = False
return allstringsadded
|
Remove a string from a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to remove
Returns:
bool: True if string removed or False if not
def _remove_string_from_commastring(self, field, string):
# type: (str, str) -> bool
"""Remove a string from a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to remove
Returns:
bool: True if string removed or False if not
"""
commastring = self.data.get(field, '')
if string in commastring:
self.data[field] = commastring.replace(string, '')
return True
return False
|
Reads the resource given by identifier from HDX and returns Resource object
Args:
identifier (str): Identifier of resource
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Resource]: Resource object if successful read, None if not
def read_from_hdx(identifier, configuration=None):
# type: (str, Optional[Configuration]) -> Optional['Resource']
"""Reads the resource given by identifier from HDX and returns Resource object
Args:
identifier (str): Identifier of resource
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Resource]: Resource object if successful read, None if not
"""
if is_valid_uuid(identifier) is False:
raise HDXError('%s is not a valid resource id!' % identifier)
resource = Resource(configuration=configuration)
result = resource._load_from_hdx('resource', identifier)
if result:
return resource
return None
|
Delete any existing url and set the file uploaded to the local path provided
Args:
file_to_upload (str): Local path to file to upload
Returns:
None
def set_file_to_upload(self, file_to_upload):
# type: (str) -> None
"""Delete any existing url and set the file uploaded to the local path provided
Args:
file_to_upload (str): Local path to file to upload
Returns:
None
"""
if 'url' in self.data:
del self.data['url']
self.file_to_upload = file_to_upload
|
Check if url or file to upload provided for resource and add resource_type and url_type if not supplied
Returns:
None
def check_url_filetoupload(self):
# type: () -> None
"""Check if url or file to upload provided for resource and add resource_type and url_type if not supplied
Returns:
None
"""
if self.file_to_upload is None:
if 'url' in self.data:
if 'resource_type' not in self.data:
self.data['resource_type'] = 'api'
if 'url_type' not in self.data:
self.data['url_type'] = 'api'
else:
raise HDXError('Either a url or a file to upload must be supplied!')
else:
if 'url' in self.data:
if self.data['url'] != hdx.data.dataset.Dataset.temporary_url:
raise HDXError('Either a url or a file to upload must be supplied not both!')
if 'resource_type' not in self.data:
self.data['resource_type'] = 'file.upload'
if 'url_type' not in self.data:
self.data['url_type'] = 'upload'
if 'tracking_summary' in self.data:
del self.data['tracking_summary']
|
Check if resource exists in HDX and if so, update it
Args:
**kwargs: See below
operation (string): Operation to perform eg. patch. Defaults to update.
Returns:
None
def update_in_hdx(self, **kwargs):
# type: (Any) -> None
"""Check if resource exists in HDX and if so, update it
Args:
**kwargs: See below
operation (string): Operation to perform eg. patch. Defaults to update.
Returns:
None
"""
self._check_load_existing_object('resource', 'id')
if self.file_to_upload and 'url' in self.data:
del self.data['url']
self._merge_hdx_update('resource', 'id', self.file_to_upload, **kwargs)
|
Check if resource exists in HDX and if so, update it, otherwise create it
Returns:
None
def create_in_hdx(self):
# type: () -> None
"""Check if resource exists in HDX and if so, update it, otherwise create it
Returns:
None
"""
self.check_required_fields()
id = self.data.get('id')
if id and self._load_from_hdx('resource', id):
logger.warning('%s exists. Updating %s' % ('resource', id))
if self.file_to_upload and 'url' in self.data:
del self.data['url']
self._merge_hdx_update('resource', 'id', self.file_to_upload)
else:
self._save_to_hdx('create', 'name', self.file_to_upload)
|
Return dataset containing this resource
Returns:
hdx.data.dataset.Dataset: Dataset containing this resource
def get_dataset(self):
# type: () -> hdx.data.dataset.Dataset
"""Return dataset containing this resource
Returns:
hdx.data.dataset.Dataset: Dataset containing this resource
"""
package_id = self.data.get('package_id')
if package_id is None:
raise HDXError('Resource has no package id!')
return hdx.data.dataset.Dataset.read_from_hdx(package_id)
|
Searches for resources in HDX. NOTE: Does not search dataset metadata!
Args:
query (str): Query
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
order_by (str): A field on the Resource model that orders the results
offset (int): Apply an offset to the query
limit (int): Apply a limit to the query
Returns:
List[Resource]: List of resources resulting from query
def search_in_hdx(query, configuration=None, **kwargs):
# type: (str, Optional[Configuration], Any) -> List['Resource']
"""Searches for resources in HDX. NOTE: Does not search dataset metadata!
Args:
query (str): Query
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
**kwargs: See below
order_by (str): A field on the Resource model that orders the results
offset (int): Apply an offset to the query
limit (int): Apply a limit to the query
Returns:
List[Resource]: List of resources resulting from query
"""
resources = []
resource = Resource(configuration=configuration)
success, result = resource._read_from_hdx('resource', query, 'query', Resource.actions()['search'])
if result:
count = result.get('count', None)
if count:
for resourcedict in result['results']:
resource = Resource(resourcedict, configuration=configuration)
resources.append(resource)
else:
logger.debug(result)
return resources
|
Download resource store to provided folder or temporary folder if no folder supplied
Args:
folder (Optional[str]): Folder to download resource to. Defaults to None.
Returns:
Tuple[str, str]: (URL downloaded, Path to downloaded file)
def download(self, folder=None):
# type: (Optional[str]) -> Tuple[str, str]
"""Download resource store to provided folder or temporary folder if no folder supplied
Args:
folder (Optional[str]): Folder to download resource to. Defaults to None.
Returns:
Tuple[str, str]: (URL downloaded, Path to downloaded file)
"""
# Download the resource
url = self.data.get('url', None)
if not url:
raise HDXError('No URL to download!')
logger.debug('Downloading %s' % url)
filename = self.data['name']
format = '.%s' % self.data['format']
if format not in filename:
filename = '%s%s' % (filename, format)
with Download(full_agent=self.configuration.get_user_agent()) as downloader:
path = downloader.download_file(url, folder, filename)
return url, path
|
Get list of resources that have a datastore returning their ids.
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
List[str]: List of resource ids that are in the datastore
def get_all_resource_ids_in_datastore(configuration=None):
# type: (Optional[Configuration]) -> List[str]
"""Get list of resources that have a datastore returning their ids.
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
List[str]: List of resource ids that are in the datastore
"""
resource = Resource(configuration=configuration)
success, result = resource._read_from_hdx('datastore', '_table_metadata', 'resource_id',
Resource.actions()['datastore_search'], limit=10000)
resource_ids = list()
if not success:
logger.debug(result)
else:
for record in result['records']:
resource_ids.append(record['name'])
return resource_ids
|
Check if the resource has a datastore.
Returns:
bool: Whether the resource has a datastore or not
def has_datastore(self):
# type: () -> bool
"""Check if the resource has a datastore.
Returns:
bool: Whether the resource has a datastore or not
"""
success, result = self._read_from_hdx('datastore', self.data['id'], 'resource_id',
self.actions()['datastore_search'])
if not success:
logger.debug(result)
else:
if result:
return True
return False
|
Delete a resource from the HDX datastore
Returns:
None
def delete_datastore(self):
# type: () -> None
"""Delete a resource from the HDX datastore
Returns:
None
"""
success, result = self._read_from_hdx('datastore', self.data['id'], 'resource_id',
self.actions()['datastore_delete'],
force=True)
if not success:
logger.debug(result)
|
For tabular data, create a resource in the HDX datastore which enables data preview in HDX. If no schema is provided
all fields are assumed to be text. If path is not supplied, the file is first downloaded from HDX.
Args:
schema (List[Dict]): List of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}. Defaults to None.
primary_key (Optional[str]): Primary key of schema. Defaults to None.
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
def create_datastore(self, schema=None, primary_key=None,
delete_first=0, path=None):
# type: (Optional[List[Dict]], Optional[str], int, Optional[str]) -> None
"""For tabular data, create a resource in the HDX datastore which enables data preview in HDX. If no schema is provided
all fields are assumed to be text. If path is not supplied, the file is first downloaded from HDX.
Args:
schema (List[Dict]): List of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}. Defaults to None.
primary_key (Optional[str]): Primary key of schema. Defaults to None.
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
"""
if delete_first == 0:
pass
elif delete_first == 1:
self.delete_datastore()
elif delete_first == 2:
if primary_key is None:
self.delete_datastore()
else:
raise HDXError('delete_first must be 0, 1 or 2! (0 = No, 1 = Yes, 2 = Delete if no primary key)')
if path is None:
# Download the resource
url, path = self.download()
delete_after_download = True
else:
url = path
delete_after_download = False
def convert_to_text(extended_rows):
for number, headers, row in extended_rows:
for i, val in enumerate(row):
row[i] = str(val)
yield (number, headers, row)
with Download(full_agent=self.configuration.get_user_agent()) as downloader:
try:
stream = downloader.get_tabular_stream(path, headers=1, post_parse=[convert_to_text],
bytes_sample_size=1000000)
nonefieldname = False
if schema is None:
schema = list()
for fieldname in stream.headers:
if fieldname is not None:
schema.append({'id': fieldname, 'type': 'text'})
else:
nonefieldname = True
data = {'resource_id': self.data['id'], 'force': True, 'fields': schema, 'primary_key': primary_key}
self._write_to_hdx('datastore_create', data, 'resource_id')
if primary_key is None:
method = 'insert'
else:
method = 'upsert'
logger.debug('Uploading data from %s to datastore' % url)
offset = 0
chunksize = 100
rowset = stream.read(keyed=True, limit=chunksize)
while len(rowset) != 0:
if nonefieldname:
for row in rowset:
del row[None]
data = {'resource_id': self.data['id'], 'force': True, 'method': method, 'records': rowset}
self._write_to_hdx('datastore_upsert', data, 'resource_id')
rowset = stream.read(keyed=True, limit=chunksize)
logger.debug('Uploading: %s' % offset)
offset += chunksize
except Exception as e:
raisefrom(HDXError, 'Upload to datastore of %s failed!' % url, e)
finally:
if delete_after_download:
remove(path)
|
For tabular data, create a resource in the HDX datastore which enables data preview in HDX from a dictionary
containing a list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'} and optionally a primary key.
If path is not supplied, the file is first downloaded from HDX.
Args:
data (dict): Dictionary containing list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
def create_datastore_from_dict_schema(self, data, delete_first=0, path=None):
# type: (dict, int, Optional[str]) -> None
"""For tabular data, create a resource in the HDX datastore which enables data preview in HDX from a dictionary
containing a list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'} and optionally a primary key.
If path is not supplied, the file is first downloaded from HDX.
Args:
data (dict): Dictionary containing list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
"""
schema = data['schema']
primary_key = data.get('primary_key')
self.create_datastore(schema, primary_key, delete_first, path=path)
|
For tabular data, create a resource in the HDX datastore which enables data preview in HDX from a YAML file
containing a list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'} and optionally a primary key.
If path is not supplied, the file is first downloaded from HDX.
Args:
yaml_path (str): Path to YAML file containing list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
def create_datastore_from_yaml_schema(self, yaml_path, delete_first=0,
path=None):
# type: (str, Optional[int], Optional[str]) -> None
"""For tabular data, create a resource in the HDX datastore which enables data preview in HDX from a YAML file
containing a list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'} and optionally a primary key.
If path is not supplied, the file is first downloaded from HDX.
Args:
yaml_path (str): Path to YAML file containing list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
"""
data = load_yaml(yaml_path)
self.create_datastore_from_dict_schema(data, delete_first, path=path)
|
For tabular data, create a resource in the HDX datastore which enables data preview in HDX from a JSON file
containing a list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'} and optionally a primary key.
If path is not supplied, the file is first downloaded from HDX.
Args:
json_path (str): Path to JSON file containing list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
def create_datastore_from_json_schema(self, json_path, delete_first=0, path=None):
# type: (str, int, Optional[str]) -> None
"""For tabular data, create a resource in the HDX datastore which enables data preview in HDX from a JSON file
containing a list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'} and optionally a primary key.
If path is not supplied, the file is first downloaded from HDX.
Args:
json_path (str): Path to JSON file containing list of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
"""
data = load_json(json_path)
self.create_datastore_from_dict_schema(data, delete_first, path=path)
|
For tabular data, create a resource in the HDX datastore which enables data preview in HDX using the built in
YAML definition for a topline. If path is not supplied, the file is first downloaded from HDX.
Args:
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
def create_datastore_for_topline(self, delete_first=0, path=None):
# type: (int, Optional[str]) -> None
"""For tabular data, create a resource in the HDX datastore which enables data preview in HDX using the built in
YAML definition for a topline. If path is not supplied, the file is first downloaded from HDX.
Args:
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
"""
data = load_yaml(script_dir_plus_file(join('..', 'hdx_datasource_topline.yml'), Resource))
self.create_datastore_from_dict_schema(data, delete_first, path=path)
|
For tabular data, update a resource in the HDX datastore which enables data preview in HDX. If no schema is provided
all fields are assumed to be text. If path is not supplied, the file is first downloaded from HDX.
Args:
schema (List[Dict]): List of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}. Defaults to None.
primary_key (Optional[str]): Primary key of schema. Defaults to None.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
def update_datastore(self, schema=None, primary_key=None,
path=None):
# type: (Optional[List[Dict]], Optional[str], Optional[str]) -> None
"""For tabular data, update a resource in the HDX datastore which enables data preview in HDX. If no schema is provided
all fields are assumed to be text. If path is not supplied, the file is first downloaded from HDX.
Args:
schema (List[Dict]): List of fields and types of form {'id': 'FIELD', 'type': 'TYPE'}. Defaults to None.
primary_key (Optional[str]): Primary key of schema. Defaults to None.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None
"""
self.create_datastore(schema, primary_key, 2, path=path)
|
Get resource view id
Args:
resource_view (Union[ResourceView,Dict]): ResourceView metadata from a ResourceView object or dictionary
Returns:
ResourceView: ResourceView object
def _get_resource_view(self, resource_view):
# type: (Union[ResourceView,Dict]) -> ResourceView
"""Get resource view id
Args:
resource_view (Union[ResourceView,Dict]): ResourceView metadata from a ResourceView object or dictionary
Returns:
ResourceView: ResourceView object
"""
if isinstance(resource_view, dict):
resource_view = ResourceView(resource_view, configuration=self.configuration)
if isinstance(resource_view, ResourceView):
return resource_view
raise HDXError('Type %s is not a valid resource view!' % type(resource_view).__name__)
|
Add new or update existing resource views in resource with new metadata.
Args:
resource_views (List[Union[ResourceView,Dict]]): A list of resource views metadata from ResourceView objects or dictionaries
Returns:
None
def add_update_resource_views(self, resource_views):
# type: (List[Union[ResourceView,Dict]]) -> None
"""Add new or update existing resource views in resource with new metadata.
Args:
resource_views (List[Union[ResourceView,Dict]]): A list of resource views metadata from ResourceView objects or dictionaries
Returns:
None
"""
if not isinstance(resource_views, list):
raise HDXError('ResourceViews should be a list!')
for resource_view in resource_views:
self.add_update_resource_view(resource_view)
|
Order resource views in resource.
Args:
resource_views (List[Union[ResourceView,Dict,str]]): A list of either resource view ids or resource views metadata from ResourceView objects or dictionaries
Returns:
None
def reorder_resource_views(self, resource_views):
# type: (List[Union[ResourceView,Dict,str]]) -> None
"""Order resource views in resource.
Args:
resource_views (List[Union[ResourceView,Dict,str]]): A list of either resource view ids or resource views metadata from ResourceView objects or dictionaries
Returns:
None
"""
if not isinstance(resource_views, list):
raise HDXError('ResourceViews should be a list!')
ids = list()
for resource_view in resource_views:
if isinstance(resource_view, str):
resource_view_id = resource_view
else:
resource_view_id = resource_view['id']
if is_valid_uuid(resource_view_id) is False:
raise HDXError('%s is not a valid resource view id!' % resource_view)
ids.append(resource_view_id)
_, result = self._read_from_hdx('resource view', self.data['id'], 'id',
ResourceView.actions()['reorder'], order=ids)
|
Delete a resource view from the resource and HDX
Args:
resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary
Returns:
None
def delete_resource_view(self, resource_view):
# type: (Union[ResourceView,Dict,str]) -> None
"""Delete a resource view from the resource and HDX
Args:
resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary
Returns:
None
"""
if isinstance(resource_view, str):
if is_valid_uuid(resource_view) is False:
raise HDXError('%s is not a valid resource view id!' % resource_view)
resource_view = ResourceView({'id': resource_view}, configuration=self.configuration)
else:
resource_view = self._get_resource_view(resource_view)
if 'id' not in resource_view:
found = False
title = resource_view.get('title')
for rv in self.get_resource_views():
if resource_view['title'] == rv['title']:
resource_view = rv
found = True
break
if not found:
raise HDXError('No resource views have title %s in this resource!' % title)
resource_view.delete_from_hdx()
|
Parses the output stem lines to produce a list with possible stems
for each word in the output.
:param skip_empty: set True to skip lines without stems (default is False)
:returns: a list of tuples, each containing an original text word and
a list of stems for the given word
def parse_for_simple_stems(output, skip_empty=False, skip_same_stems=True):
"""
Parses the output stem lines to produce a list with possible stems
for each word in the output.
:param skip_empty: set True to skip lines without stems (default is False)
:returns: a list of tuples, each containing an original text word and
a list of stems for the given word
"""
lines_with_stems = _get_lines_with_stems(output)
stems = list()
last_word = None
for line in lines_with_stems:
word, stem, _ = line.split("\t")
stem = stem if stem != '-' else None
if skip_empty and (stem is None):
continue
if last_word != word:
stems.append((word, []))
## append new stem only if not on list already
stem = None if skip_same_stems and stem in stems[-1][1] else stem
if stem is not None:
stems[-1][1].append(stem)
last_word = word
return stems
|
Creates the class template.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
str: class template.
def _CreateClassTemplate(cls, data_type_definition):
"""Creates the class template.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
str: class template.
"""
type_name = data_type_definition.name
type_description = data_type_definition.description or type_name
while type_description.endswith('.'):
type_description = type_description[:-1]
class_attributes_description = []
init_arguments = []
instance_attributes = []
for member_definition in data_type_definition.members:
attribute_name = member_definition.name
description = member_definition.description or attribute_name
while description.endswith('.'):
description = description[:-1]
member_data_type = getattr(member_definition, 'member_data_type', '')
if isinstance(member_definition, data_types.MemberDataTypeDefinition):
member_definition = member_definition.member_data_type_definition
member_type_indicator = member_definition.TYPE_INDICATOR
if member_type_indicator == definitions.TYPE_INDICATOR_SEQUENCE:
element_type_indicator = member_definition.element_data_type
member_type_indicator = 'tuple[{0:s}]'.format(element_type_indicator)
else:
member_type_indicator = cls._PYTHON_NATIVE_TYPES.get(
member_type_indicator, member_data_type)
argument = '{0:s}=None'.format(attribute_name)
definition = ' self.{0:s} = {0:s}'.format(attribute_name)
description = ' {0:s} ({1:s}): {2:s}.'.format(
attribute_name, member_type_indicator, description)
class_attributes_description.append(description)
init_arguments.append(argument)
instance_attributes.append(definition)
class_attributes_description = '\n'.join(
sorted(class_attributes_description))
init_arguments = ', '.join(init_arguments)
instance_attributes = '\n'.join(sorted(instance_attributes))
template_values = {
'class_attributes_description': class_attributes_description,
'init_arguments': init_arguments,
'instance_attributes': instance_attributes,
'type_description': type_description,
'type_name': type_name}
return cls._CLASS_TEMPLATE.format(**template_values)
|
Checks if a string contains an identifier.
Args:
string (str): string to check.
Returns:
bool: True if the string contains an identifier, False otherwise.
def _IsIdentifier(cls, string):
"""Checks if a string contains an identifier.
Args:
string (str): string to check.
Returns:
bool: True if the string contains an identifier, False otherwise.
"""
return (
string and not string[0].isdigit() and
all(character.isalnum() or character == '_' for character in string))
|
Validates the data type definition.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
ValueError: if the data type definition is not considered valid.
def _ValidateDataTypeDefinition(cls, data_type_definition):
"""Validates the data type definition.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
ValueError: if the data type definition is not considered valid.
"""
if not cls._IsIdentifier(data_type_definition.name):
raise ValueError(
'Data type definition name: {0!s} not a valid identifier'.format(
data_type_definition.name))
if keyword.iskeyword(data_type_definition.name):
raise ValueError(
'Data type definition name: {0!s} matches keyword'.format(
data_type_definition.name))
members = getattr(data_type_definition, 'members', None)
if not members:
raise ValueError(
'Data type definition name: {0!s} missing members'.format(
data_type_definition.name))
defined_attribute_names = set()
for member_definition in members:
attribute_name = member_definition.name
if not cls._IsIdentifier(attribute_name):
raise ValueError('Attribute name: {0!s} not a valid identifier'.format(
attribute_name))
if attribute_name.startswith('_'):
raise ValueError('Attribute name: {0!s} starts with underscore'.format(
attribute_name))
if keyword.iskeyword(attribute_name):
raise ValueError('Attribute name: {0!s} matches keyword'.format(
attribute_name))
if attribute_name in defined_attribute_names:
raise ValueError('Attribute name: {0!s} already defined'.format(
attribute_name))
defined_attribute_names.add(attribute_name)
|
Creates a new structure values class.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
class: structure values class.
def CreateClass(cls, data_type_definition):
"""Creates a new structure values class.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Returns:
class: structure values class.
"""
cls._ValidateDataTypeDefinition(data_type_definition)
class_definition = cls._CreateClassTemplate(data_type_definition)
namespace = {
'__builtins__' : {
'object': builtins.object,
'super': builtins.super},
'__name__': '{0:s}'.format(data_type_definition.name)}
if sys.version_info[0] >= 3:
# pylint: disable=no-member
namespace['__builtins__']['__build_class__'] = builtins.__build_class__
exec(class_definition, namespace) # pylint: disable=exec-used
return namespace[data_type_definition.name]
|
Deregisters a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
KeyError: if a data type definition is not set for the corresponding
name.
def DeregisterDefinition(self, data_type_definition):
"""Deregisters a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
KeyError: if a data type definition is not set for the corresponding
name.
"""
name = data_type_definition.name.lower()
if name not in self._definitions:
raise KeyError('Definition not set for name: {0:s}.'.format(
data_type_definition.name))
del self._definitions[name]
|
Retrieves a specific data type definition by name.
Args:
name (str): name of the data type definition.
Returns:
DataTypeDefinition: data type definition or None if not available.
def GetDefinitionByName(self, name):
"""Retrieves a specific data type definition by name.
Args:
name (str): name of the data type definition.
Returns:
DataTypeDefinition: data type definition or None if not available.
"""
lookup_name = name.lower()
if lookup_name not in self._definitions:
lookup_name = self._aliases.get(name, None)
return self._definitions.get(lookup_name, None)
|
Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name.
def RegisterDefinition(self, data_type_definition):
"""Registers a data type definition.
The data type definitions are identified based on their lower case name.
Args:
data_type_definition (DataTypeDefinition): data type definitions.
Raises:
KeyError: if data type definition is already set for the corresponding
name.
"""
name_lower = data_type_definition.name.lower()
if name_lower in self._definitions:
raise KeyError('Definition already set for name: {0:s}.'.format(
data_type_definition.name))
if data_type_definition.name in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(
data_type_definition.name))
for alias in data_type_definition.aliases:
if alias in self._aliases:
raise KeyError('Alias already set for name: {0:s}.'.format(alias))
self._definitions[name_lower] = data_type_definition
for alias in data_type_definition.aliases:
self._aliases[alias] = name_lower
if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT:
self._format_definitions.append(name_lower)
|
Splits a given string by comma, trims whitespace on the resulting strings and applies a given ```func``` to
each item.
def apply_on_csv_string(rules_str, func):
""" Splits a given string by comma, trims whitespace on the resulting strings and applies a given ```func``` to
each item. """
splitted = rules_str.split(",")
for str in splitted:
func(str.strip())
|
Gets Favicon-URL for the Model.
Template Syntax:
{% placeFavicon %}
def placeFavicon(context):
"""
Gets Favicon-URL for the Model.
Template Syntax:
{% placeFavicon %}
"""
fav = Favicon.objects.filter(isFavicon=True).first()
if not fav:
return mark_safe('<!-- no favicon -->')
html = ''
for rel in config:
for size in sorted(config[rel], reverse=True):
n = fav.get_favicon(size=size, rel=rel)
html += '<link rel="%s" sizes="%sx%s" href="%s"/>' % (
n.rel, n.size, n.size, n.faviconImage.url)
default_fav = fav.get_favicon(size=32, rel='shortcut icon')
html += '<link rel="%s" sizes="%sx%s" href="%s"/>' % (
default_fav.rel, default_fav.size, default_fav.size, default_fav.faviconImage.url)
return mark_safe(html)
|
Set default theme name based in config file.
def set_default_theme(theme):
"""
Set default theme name based in config file.
"""
pref_init() # make sure config files exist
parser = cp.ConfigParser()
parser.read(PREFS_FILE)
# Do we need to create a section?
if not parser.has_section("theme"):
parser.add_section("theme")
parser.set("theme", "default", theme)
# best way to make sure no file truncation?
with open("%s.2" % PREFS_FILE, "w") as fp:
parser.write(fp)
copy("%s.2" % PREFS_FILE, PREFS_FILE)
unlink("%s.2" % PREFS_FILE,)
|
Return theme name based on manual input, prefs file, or default to "plain".
def pick_theme(manual):
"""
Return theme name based on manual input, prefs file, or default to "plain".
"""
if manual:
return manual
pref_init()
parser = cp.ConfigParser()
parser.read(PREFS_FILE)
try:
theme = parser.get("theme", "default")
except (cp.NoSectionError, cp.NoOptionError):
theme = "plain"
return theme
|
Can be called without penalty. Create ~/.cdk dir if it doesn't
exist. Copy the default pref file if it doesn't exist.
def pref_init():
"""Can be called without penalty. Create ~/.cdk dir if it doesn't
exist. Copy the default pref file if it doesn't exist."""
# make sure we have a ~/.cdk dir
if not isdir(PREFS_DIR):
mkdir(PREFS_DIR)
# make sure we have a default prefs file
if not isfile(PREFS_FILE):
copy(join(LOCATION, "custom", "prefs"), PREFS_DIR)
|
Pass a path to a theme file which will be extracted to the themes directory.
def install_theme(path_to_theme):
"""
Pass a path to a theme file which will be extracted to the themes directory.
"""
pref_init()
# cp the file
filename = basename(path_to_theme)
dest = join(THEMES_DIR, filename)
copy(path_to_theme, dest)
# unzip
zf = zipfile.ZipFile(dest)
# should make sure zipfile contains only themename folder which doesn't conflict
# with existing themename. Or some kind of sanity check
zf.extractall(THEMES_DIR) # plus this is a potential security flaw pre 2.7.4
# remove the copied zipfile
unlink(dest)
|
Entry point for choosing what subcommand to run. Really should be using asciidocapi
def main():
"""
Entry point for choosing what subcommand to run. Really should be using asciidocapi
"""
# Try parsing command line args and flags with docopt
args = docopt(__doc__, version="cdk")
# Am I going to need validation? No Schema for the moment...
if args['FILE']:
out = output_file(args['FILE'])
# Great! Run asciidoc with appropriate flags
theme = pick_theme(args['--theme'])
if theme not in listdir(THEMES_DIR):
exit('Selected theme "%s" not found. Check ~/.cdk/prefs' % theme)
cmd = create_command(theme, args['--bare'], args['--toc'], args['--notransition'],
args['--logo'])
run_command(cmd, args)
if args['--toc']:
add_css(out, '.deck-container .deck-toc li a span{color: #888;display:inline;}')
if args['--custom-css']:
add_css_file(out, args['--custom-css'])
if args['--open']:
webbrowser.open("file://" + abspath(out))
# other commands
elif args['--generate']:
if isfile(args['--generate']):
exit("%s already exists!" % args['--generate'])
with open(args['--generate'], "w") as fp:
sample = join(LOCATION, "custom", "sample.asc")
fp.write(open(sample).read())
print("Created sample slide deck in %s..." % args['--generate'])
exit()
elif args['--install-theme']:
path = args['--install-theme']
if not isfile(path):
exit("Theme file not found.")
if not path.endswith(".zip"):
exit("Theme installation currently only supports theme install from "
".zip files.")
install_theme(path)
elif args['--default-theme']:
set_default_theme(args['--default-theme'])
|
Move contents of resources key in internal dictionary into self.resources
Returns:
None
def separate_resources(self):
# type: () -> None
"""Move contents of resources key in internal dictionary into self.resources
Returns:
None
"""
self._separate_hdxobjects(self.resources, 'resources', 'name', hdx.data.resource.Resource)
|
Add new or update existing resource in dataset with new metadata
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
Returns:
hdx.data.resource.Resource: Resource object
def _get_resource_from_obj(self, resource):
# type: (Union[hdx.data.resource.Resource,Dict,str]) -> hdx.data.resource.Resource
"""Add new or update existing resource in dataset with new metadata
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
Returns:
hdx.data.resource.Resource: Resource object
"""
if isinstance(resource, str):
if is_valid_uuid(resource) is False:
raise HDXError('%s is not a valid resource id!' % resource)
resource = hdx.data.resource.Resource.read_from_hdx(resource, configuration=self.configuration)
elif isinstance(resource, dict):
resource = hdx.data.resource.Resource(resource, configuration=self.configuration)
if not isinstance(resource, hdx.data.resource.Resource):
raise HDXError('Type %s cannot be added as a resource!' % type(resource).__name__)
return resource
|
Add new or update existing resource in dataset with new metadata
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
ignore_datasetid (bool): Whether to ignore dataset id in the resource
Returns:
None
def add_update_resource(self, resource, ignore_datasetid=False):
# type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> None
"""Add new or update existing resource in dataset with new metadata
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
ignore_datasetid (bool): Whether to ignore dataset id in the resource
Returns:
None
"""
resource = self._get_resource_from_obj(resource)
if 'package_id' in resource:
if not ignore_datasetid:
raise HDXError('Resource %s being added already has a dataset id!' % (resource['name']))
resource.check_url_filetoupload()
resource_updated = self._addupdate_hdxobject(self.resources, 'name', resource)
if resource.get_file_to_upload():
resource_updated.set_file_to_upload(resource.get_file_to_upload())
|
Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None
def add_update_resources(self, resources, ignore_datasetid=False):
# type: (List[Union[hdx.data.resource.Resource,Dict,str]], bool) -> None
"""Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None
"""
if not isinstance(resources, list):
raise HDXError('Resources should be a list!')
for resource in resources:
self.add_update_resource(resource, ignore_datasetid)
|
Delete a resource from the dataset and also from HDX by default
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True.
Returns:
bool: True if resource removed or False if not
def delete_resource(self, resource, delete=True):
# type: (Union[hdx.data.resource.Resource,Dict,str], bool) -> bool
"""Delete a resource from the dataset and also from HDX by default
Args:
resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary
delete (bool): Whetehr to delete the resource from HDX (not just the dataset). Defaults to True.
Returns:
bool: True if resource removed or False if not
"""
if isinstance(resource, str):
if is_valid_uuid(resource) is False:
raise HDXError('%s is not a valid resource id!' % resource)
return self._remove_hdxobject(self.resources, resource, delete=delete)
|
Reorder resources in dataset according to provided list.
If only some resource ids are supplied then these are
assumed to be first and the other resources will stay in
their original order.
Args:
resource_ids (List[str]): List of resource ids
hxl_update (bool): Whether to call package_hxl_update. Defaults to True.
Returns:
None
def reorder_resources(self, resource_ids, hxl_update=True):
# type: (List[str], bool) -> None
"""Reorder resources in dataset according to provided list.
If only some resource ids are supplied then these are
assumed to be first and the other resources will stay in
their original order.
Args:
resource_ids (List[str]): List of resource ids
hxl_update (bool): Whether to call package_hxl_update. Defaults to True.
Returns:
None
"""
dataset_id = self.data.get('id')
if not dataset_id:
raise HDXError('Dataset has no id! It must be read, created or updated first.')
data = {'id': dataset_id,
'order': resource_ids}
self._write_to_hdx('reorder', data, 'package_id')
if hxl_update:
self.hxl_update()
|
Update dataset metadata with static metadata from YAML file
Args:
path (str): Path to YAML dataset metadata. Defaults to config/hdx_dataset_static.yml.
Returns:
None
def update_from_yaml(self, path=join('config', 'hdx_dataset_static.yml')):
# type: (str) -> None
"""Update dataset metadata with static metadata from YAML file
Args:
path (str): Path to YAML dataset metadata. Defaults to config/hdx_dataset_static.yml.
Returns:
None
"""
super(Dataset, self).update_from_yaml(path)
self.separate_resources()
|
Update dataset metadata with static metadata from JSON file
Args:
path (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json.
Returns:
None
def update_from_json(self, path=join('config', 'hdx_dataset_static.json')):
# type: (str) -> None
"""Update dataset metadata with static metadata from JSON file
Args:
path (str): Path to JSON dataset metadata. Defaults to config/hdx_dataset_static.json.
Returns:
None
"""
super(Dataset, self).update_from_json(path)
self.separate_resources()
|
Reads the dataset given by identifier from HDX and returns Dataset object
Args:
identifier (str): Identifier of dataset
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Dataset]: Dataset object if successful read, None if not
def read_from_hdx(identifier, configuration=None):
# type: (str, Optional[Configuration]) -> Optional['Dataset']
"""Reads the dataset given by identifier from HDX and returns Dataset object
Args:
identifier (str): Identifier of dataset
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Dataset]: Dataset object if successful read, None if not
"""
dataset = Dataset(configuration=configuration)
result = dataset._dataset_load_from_hdx(identifier)
if result:
return dataset
return None
|
Creates resource objects in dataset
def _dataset_create_resources(self):
# type: () -> None
"""Creates resource objects in dataset
"""
if 'resources' in self.data:
self.old_data['resources'] = self._copy_hdxobjects(self.resources, hdx.data.resource.Resource, 'file_to_upload')
self.init_resources()
self.separate_resources()
|
Loads the dataset given by either id or name from HDX
Args:
id_or_name (str): Either id or name of dataset
Returns:
bool: True if loaded, False if not
def _dataset_load_from_hdx(self, id_or_name):
# type: (str) -> bool
"""Loads the dataset given by either id or name from HDX
Args:
id_or_name (str): Either id or name of dataset
Returns:
bool: True if loaded, False if not
"""
if not self._load_from_hdx('dataset', id_or_name):
return False
self._dataset_create_resources()
return True
|
Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
Returns:
None
def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):
# type: (List[str], bool) -> None
"""Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
Returns:
None
"""
if self.is_requestable():
self._check_required_fields('dataset-requestable', ignore_fields)
else:
self._check_required_fields('dataset', ignore_fields)
if len(self.resources) == 0 and not allow_no_resources:
raise HDXError('There are no resources! Please add at least one resource!')
for resource in self.resources:
ignore_fields = ['package_id']
resource.check_required_fields(ignore_fields=ignore_fields)
|
Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore.
Args:
resource (hdx.data.Resource): Resource read from HDX
updated_resource (hdx.data.Resource): Updated resource from dataset
filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)
ignore_fields (List[str]): List of fields to ignore when checking resource
Returns:
None
def _dataset_merge_filestore_resource(self, resource, updated_resource, filestore_resources, ignore_fields):
# type: (hdx.data.Resource, hdx.data.Resource, List[hdx.data.Resource], List[str]) -> None
"""Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore.
Args:
resource (hdx.data.Resource): Resource read from HDX
updated_resource (hdx.data.Resource): Updated resource from dataset
filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)
ignore_fields (List[str]): List of fields to ignore when checking resource
Returns:
None
"""
if updated_resource.get_file_to_upload():
resource.set_file_to_upload(updated_resource.get_file_to_upload())
filestore_resources.append(resource)
merge_two_dictionaries(resource, updated_resource)
resource.check_required_fields(ignore_fields=ignore_fields)
if resource.get_file_to_upload():
resource['url'] = Dataset.temporary_url
|
Helper method to add new resource from dataset including filestore.
Args:
new_resource (hdx.data.Resource): New resource from dataset
ignore_fields (List[str]): List of fields to ignore when checking resource
filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)
Returns:
None
def _dataset_merge_filestore_newresource(self, new_resource, ignore_fields, filestore_resources):
# type: (hdx.data.Resource, List[str], List[hdx.data.Resource]) -> None
"""Helper method to add new resource from dataset including filestore.
Args:
new_resource (hdx.data.Resource): New resource from dataset
ignore_fields (List[str]): List of fields to ignore when checking resource
filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)
Returns:
None
"""
new_resource.check_required_fields(ignore_fields=ignore_fields)
self.resources.append(new_resource)
if new_resource.get_file_to_upload():
filestore_resources.append(new_resource)
new_resource['url'] = Dataset.temporary_url
|
Helper method to create files in filestore by updating resources.
Args:
filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)
create_default_views (bool): Whether to call package_create_default_resource_views.
hxl_update (bool): Whether to call package_hxl_update.
Returns:
None
def _add_filestore_resources(self, filestore_resources, create_default_views, hxl_update):
# type: (List[hdx.data.Resource], bool, bool) -> None
"""Helper method to create files in filestore by updating resources.
Args:
filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to)
create_default_views (bool): Whether to call package_create_default_resource_views.
hxl_update (bool): Whether to call package_hxl_update.
Returns:
None
"""
for resource in filestore_resources:
for created_resource in self.data['resources']:
if resource['name'] == created_resource['name']:
merge_two_dictionaries(resource.data, created_resource)
del resource['url']
resource.update_in_hdx()
merge_two_dictionaries(created_resource, resource.data)
break
self.init_resources()
self.separate_resources()
if create_default_views:
self.create_default_views()
if hxl_update:
self.hxl_update()
|
Helper method to check if dataset or its resources exist and update them
Args:
update_resources (bool): Whether to update resources
update_resources_by_name (bool): Compare resource names rather than position in list
remove_additional_resources (bool): Remove additional resources found in dataset (if updating)
create_default_views (bool): Whether to call package_create_default_resource_views.
hxl_update (bool): Whether to call package_hxl_update.
Returns:
None
def _dataset_merge_hdx_update(self, update_resources, update_resources_by_name,
remove_additional_resources, create_default_views, hxl_update):
# type: (bool, bool, bool, bool, bool) -> None
"""Helper method to check if dataset or its resources exist and update them
Args:
update_resources (bool): Whether to update resources
update_resources_by_name (bool): Compare resource names rather than position in list
remove_additional_resources (bool): Remove additional resources found in dataset (if updating)
create_default_views (bool): Whether to call package_create_default_resource_views.
hxl_update (bool): Whether to call package_hxl_update.
Returns:
None
"""
# 'old_data' here is the data we want to use for updating while 'data' is the data read from HDX
merge_two_dictionaries(self.data, self.old_data)
if 'resources' in self.data:
del self.data['resources']
updated_resources = self.old_data.get('resources', None)
filestore_resources = list()
if update_resources and updated_resources:
ignore_fields = ['package_id']
if update_resources_by_name:
resource_names = set()
for resource in self.resources:
resource_name = resource['name']
resource_names.add(resource_name)
for updated_resource in updated_resources:
if resource_name == updated_resource['name']:
logger.warning('Resource exists. Updating %s' % resource_name)
self._dataset_merge_filestore_resource(resource, updated_resource,
filestore_resources, ignore_fields)
break
updated_resource_names = set()
for updated_resource in updated_resources:
updated_resource_name = updated_resource['name']
updated_resource_names.add(updated_resource_name)
if not updated_resource_name in resource_names:
self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources)
if remove_additional_resources:
resources_to_delete = list()
for i, resource in enumerate(self.resources):
resource_name = resource['name']
if resource_name not in updated_resource_names:
logger.warning('Removing additional resource %s!' % resource_name)
resources_to_delete.append(i)
for i in sorted(resources_to_delete, reverse=True):
del self.resources[i]
else: # update resources by position
for i, updated_resource in enumerate(updated_resources):
if len(self.resources) > i:
updated_resource_name = updated_resource['name']
resource = self.resources[i]
resource_name = resource['name']
logger.warning('Resource exists. Updating %s' % resource_name)
if resource_name != updated_resource_name:
logger.warning('Changing resource name to: %s' % updated_resource_name)
self._dataset_merge_filestore_resource(resource, updated_resource,
filestore_resources, ignore_fields)
else:
self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources)
if remove_additional_resources:
resources_to_delete = list()
for i, resource in enumerate(self.resources):
if len(updated_resources) <= i:
logger.warning('Removing additional resource %s!' % resource['name'])
resources_to_delete.append(i)
for i in sorted(resources_to_delete, reverse=True):
del self.resources[i]
if self.resources:
self.data['resources'] = self._convert_hdxobjects(self.resources)
ignore_field = self.configuration['dataset'].get('ignore_on_update')
self.check_required_fields(ignore_fields=[ignore_field])
self._save_to_hdx('update', 'id')
self._add_filestore_resources(filestore_resources, create_default_views, hxl_update)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.